python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
---|---|---|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example use of the CFR algorithm on Kuhn Poker."""
from absl import app
from absl import flags
from open_spiel.python.algorithms import cfr
from open_spiel.python.algorithms import exploitability
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_integer("iterations", 100, "Number of iterations")
flags.DEFINE_string("game", "kuhn_poker", "Name of the game")
flags.DEFINE_integer("players", 2, "Number of players")
flags.DEFINE_integer("print_freq", 10, "How often to print the exploitability")
def main(_):
game = pyspiel.load_game(FLAGS.game, {"players": FLAGS.players})
cfr_solver = cfr.CFRSolver(game)
for i in range(FLAGS.iterations):
cfr_solver.evaluate_and_update_policy()
if i % FLAGS.print_freq == 0:
conv = exploitability.exploitability(game, cfr_solver.average_policy())
print("Iteration {} exploitability {}".format(i, conv))
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/cfr_example.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Joint Policy-Space Response Oracles.
An implementation of JSPRO, described in https://arxiv.org/abs/2106.09435.
Bibtex / Cite:
```
@misc{marris2021multiagent,
title={Multi-Agent Training beyond Zero-Sum with Correlated Equilibrium
Meta-Solvers},
author={Luke Marris and Paul Muller and Marc Lanctot and Karl Tuyls and
Thore Graepel},
year={2021},
eprint={2106.09435},
archivePrefix={arXiv},
primaryClass={cs.MA}
}
```
"""
from absl import app
from absl import flags
from open_spiel.python.algorithms import jpsro
import pyspiel
GAMES = (
"kuhn_poker_2p",
"kuhn_poker_3p",
"kuhn_poker_4p",
"leduc_poker_2p",
"leduc_poker_3p",
"leduc_poker_4p",
"trade_comm_2p_2i",
"trade_comm_2p_3i",
"trade_comm_2p_4i",
"trade_comm_2p_5i",
"tiny_bridge_2p",
"tiny_bridge_4p",
"sheriff_2p_1r",
"sheriff_2p_2r",
"sheriff_2p_3r",
"sheriff_2p_gabriele",
"goofspiel_2p_3c_total",
"goofspiel_2p_4c_total",
"goofspiel_2p_5c_total",
"goofspiel_2p_5c_total",
"goofspiel_2p_5c_dsc_total",
"goofspiel_2p_5c_dsc_pt_diff",
)
FLAGS = flags.FLAGS
# Game.
flags.DEFINE_string(
"game", "kuhn_poker_3p",
"Game and settings name.")
# JPSRO - General.
flags.DEFINE_integer(
"iterations", 40,
"Number of JPSRO iterations.",
lower_bound=0)
flags.DEFINE_integer(
"seed", 1,
"Pseduo random number generator seed.")
flags.DEFINE_enum(
"policy_init", "uniform", jpsro.INIT_POLICIES,
"Initial policy sampling strategy.")
flags.DEFINE_enum(
"update_players_strategy", "all", jpsro.UPDATE_PLAYERS_STRATEGY,
"Which player's policies to update at each iteration.")
# JPSRO - Best Response.
flags.DEFINE_enum(
"target_equilibrium", "cce", jpsro.BRS,
"The target equilibrium, either ce or cce.")
flags.DEFINE_enum(
"br_selection", "largest_gap", jpsro.BR_SELECTIONS,
"The best response operator. Primarily used with CE target equilibrium.")
# JPSRO - Meta-Solver.
flags.DEFINE_enum(
"train_meta_solver", "mgcce", jpsro.META_SOLVERS,
"Meta-solver to use for training.")
flags.DEFINE_enum(
"eval_meta_solver", "mwcce", jpsro.META_SOLVERS,
"Meta-solver to use for evaluation.")
flags.DEFINE_bool(
"ignore_repeats", False,
"Whether to ignore policy repeats when calculating meta distribution. "
"This is relevant for some meta-solvers (such as Maximum Gini) that will "
"spread weight over repeats. This may or may not be a desireable property "
"depending on how one wishes to search the game space. A uniform "
"meta-solver requires this to be False.")
flags.DEFINE_float(
"action_value_tolerance", -1.0,
"If non-negative, use max-entropy best-responses with specified tolerance "
"on action-value. If negative, the best-response operator will return a "
"best-response policy that deterministically chooses the first action with "
"maximum action-value in each state.")
def get_game(game_name):
"""Returns the game."""
if game_name == "kuhn_poker_2p":
game_name = "kuhn_poker"
game_kwargs = {"players": int(2)}
elif game_name == "kuhn_poker_3p":
game_name = "kuhn_poker"
game_kwargs = {"players": int(3)}
elif game_name == "kuhn_poker_4p":
game_name = "kuhn_poker"
game_kwargs = {"players": int(4)}
elif game_name == "leduc_poker_2p":
game_name = "leduc_poker"
game_kwargs = {"players": int(2)}
elif game_name == "leduc_poker_3p":
game_name = "leduc_poker"
game_kwargs = {"players": int(3)}
elif game_name == "leduc_poker_4p":
game_name = "leduc_poker"
game_kwargs = {"players": int(4)}
elif game_name == "trade_comm_2p_2i":
game_name = "trade_comm"
game_kwargs = {"num_items": int(2)}
elif game_name == "trade_comm_2p_3i":
game_name = "trade_comm"
game_kwargs = {"num_items": int(3)}
elif game_name == "trade_comm_2p_4i":
game_name = "trade_comm"
game_kwargs = {"num_items": int(4)}
elif game_name == "trade_comm_2p_5i":
game_name = "trade_comm"
game_kwargs = {"num_items": int(5)}
elif game_name == "tiny_bridge_2p":
game_name = "tiny_bridge_2p"
game_kwargs = {}
elif game_name == "tiny_bridge_4p":
game_name = "tiny_bridge_4p"
game_kwargs = {} # Too big game.
elif game_name == "sheriff_2p_1r":
game_name = "sheriff"
game_kwargs = {"num_rounds": int(1)}
elif game_name == "sheriff_2p_2r":
game_name = "sheriff"
game_kwargs = {"num_rounds": int(2)}
elif game_name == "sheriff_2p_3r":
game_name = "sheriff"
game_kwargs = {"num_rounds": int(3)}
elif game_name == "sheriff_2p_gabriele":
game_name = "sheriff"
game_kwargs = {
"item_penalty": float(1.0),
"item_value": float(5.0),
"max_bribe": int(2),
"max_items": int(10),
"num_rounds": int(2),
"sheriff_penalty": float(1.0),
}
elif game_name == "goofspiel_2p_3c_total":
game_name = "goofspiel"
game_kwargs = {
"players": int(2),
"returns_type": "total_points",
"num_cards": int(3)}
elif game_name == "goofspiel_2p_4c_total":
game_name = "goofspiel"
game_kwargs = {
"players": int(2),
"returns_type": "total_points",
"num_cards": int(4)}
elif game_name == "goofspiel_2p_5c_total":
game_name = "goofspiel"
game_kwargs = {
"imp_info": True,
"egocentric": True,
"players": int(2),
"returns_type": "total_points",
"num_cards": int(5)
}
elif game_name == "goofspiel_2p_5c_dsc_total":
game_name = "goofspiel"
game_kwargs = {
"imp_info": True,
"egocentric": True,
"points_order": "descending",
"players": int(2),
"returns_type": "total_points",
"num_cards": int(5)
}
elif game_name == "goofspiel_2p_5c_dsc_pt_diff":
game_name = "goofspiel"
game_kwargs = {
"imp_info": True,
"egocentric": True,
"points_order": "descending",
"players": int(2),
"returns_type": "point_difference",
"num_cards": int(5)
}
else:
raise ValueError("Unrecognised game: %s" % game_name)
return pyspiel.load_game_as_turn_based(game_name, game_kwargs)
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
game = get_game(FLAGS.game)
jpsro.run_loop(
game=game,
game_name=FLAGS.game,
seed=FLAGS.seed,
iterations=FLAGS.iterations,
policy_init=FLAGS.policy_init,
update_players_strategy=FLAGS.update_players_strategy,
target_equilibrium=FLAGS.target_equilibrium,
br_selection=FLAGS.br_selection,
train_meta_solver=FLAGS.train_meta_solver,
eval_meta_solver=FLAGS.eval_meta_solver,
action_value_tolerance=FLAGS.action_value_tolerance,
ignore_repeats=FLAGS.ignore_repeats)
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/jpsro.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Find Nash equilibria for constant- or general-sum 2-player games.
Non-matrix games are handled by computing the normal (bimatrix) form.
The algorithms used are:
* direct computation of pure equilibria.
* linear programming to find equilibria for constant-sum games.
* iterated dominance to reduce the action space.
* reverse search vertex enumeration (if using lrsnash) to find all general-sum
equilibria.
* support enumeration (if using nashpy) to find all general-sum equilibria.
* Lemke-Howson enumeration (if using nashpy) to find one general-sum
equilibrium.
The general-sum mixed-equilibrium algorithms are likely to work well for tens of
actions, but less likely to scale beyond that.
Example usage:
```
matrix_nash_example --game kuhn_poker
```
"""
import itertools
from absl import app
from absl import flags
import nashpy
import numpy as np
from open_spiel.python.algorithms import lp_solver
from open_spiel.python.algorithms import matrix_nash
from open_spiel.python.egt import utils
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_string("game", "first_sealed_auction(max_value=6)",
"Game (short name plus optional parameters).")
flags.DEFINE_float("tol", 1e-7, "Tolerance for determining dominance.")
flags.DEFINE_enum(
"mode", "all", ["all", "pure", "one"], "Whether to find all extreme "
"equilibria, all pure equilibria, or just one equilibrium.")
flags.DEFINE_enum(
"solver", "nashpy", ["nashpy", "lrsnash", "linear"],
"Solver to use for finding mixed equilibria. (lrsnash needs to"
" be installed separately to work.)")
flags.DEFINE_string("lrsnash_path", None,
"Full path to lrsnash solver (searches PATH by default).")
flags.DEFINE_integer(
"lrsnash_max_denom", 1000, "Maximum denominator to use "
"when converting payoffs to rationals for lrsnash solver.")
def main(_):
game = pyspiel.load_game(FLAGS.game)
print("loaded game")
# convert game to matrix form if it isn't already a matrix game
if not isinstance(game, pyspiel.MatrixGame):
game = pyspiel.extensive_to_matrix_game(game)
num_rows, num_cols = game.num_rows(), game.num_cols()
print("converted to matrix form with shape (%d, %d)" % (num_rows, num_cols))
# use iterated dominance to reduce the space unless the solver is LP (fast)
if FLAGS.solver != "linear":
if FLAGS.mode == "all":
game, _ = lp_solver.iterated_dominance(
game, tol=FLAGS.tol, mode=lp_solver.DOMINANCE_STRICT)
num_rows, num_cols = game.num_rows(), game.num_cols()
print("discarded strictly dominated actions yielding shape (%d, %d)" %
(num_rows, num_cols))
if FLAGS.mode == "one":
game, _ = lp_solver.iterated_dominance(
game, tol=FLAGS.tol, mode=lp_solver.DOMINANCE_VERY_WEAK)
num_rows, num_cols = game.num_rows(), game.num_cols()
print("discarded very weakly dominated actions yielding shape (%d, %d)" %
(num_rows, num_cols))
# game is now finalized
num_rows, num_cols = game.num_rows(), game.num_cols()
row_actions = [game.row_action_name(row) for row in range(num_rows)]
col_actions = [game.col_action_name(col) for col in range(num_cols)]
row_payoffs, col_payoffs = utils.game_payoffs_array(game)
pure_nash = list(
zip(*((row_payoffs >= row_payoffs.max(0, keepdims=True) - FLAGS.tol)
& (col_payoffs >= col_payoffs.max(1, keepdims=True) - FLAGS.tol)
).nonzero()))
if pure_nash:
print("found %d pure equilibria" % len(pure_nash))
if FLAGS.mode == "pure":
if not pure_nash:
print("found no pure equilibria")
return
print("pure equilibria:")
for row, col in pure_nash:
print("payoffs %f, %f:" % (row_payoffs[row, col], col_payoffs[row, col]))
print("row action:")
print(row_actions[row])
print("col action:")
print(col_actions[col])
print("")
return
if FLAGS.mode == "one" and pure_nash:
print("pure equilibrium:")
row, col = pure_nash[0]
print("payoffs %f, %f:" % (row_payoffs[row, col], col_payoffs[row, col]))
print("row action:")
print(row_actions[row])
print("col action:")
print(col_actions[col])
print("")
return
for row, action in enumerate(row_actions):
print("row action %s:" % row)
print(action)
print("--")
for col, action in enumerate(col_actions):
print("col action %s:" % col)
print(action)
print("--")
if num_rows == 1 or num_cols == 1:
equilibria = itertools.product(np.eye(num_rows), np.eye(num_cols))
elif FLAGS.solver == "linear":
if FLAGS.mode != "one" or (row_payoffs + col_payoffs).max() > (
row_payoffs + col_payoffs).min() + FLAGS.tol:
raise ValueError("can't use linear solver for non-constant-sum game or "
"for finding all optima!")
print("using linear solver")
def gen():
p0_sol, p1_sol, _, _ = lp_solver.solve_zero_sum_matrix_game(
pyspiel.create_matrix_game(row_payoffs - col_payoffs,
col_payoffs - row_payoffs))
yield (np.squeeze(p0_sol, 1), np.squeeze(p1_sol, 1))
equilibria = gen()
elif FLAGS.solver == "lrsnash":
print("using lrsnash solver")
equilibria = matrix_nash.lrs_solve(row_payoffs, col_payoffs,
FLAGS.lrsnash_max_denom,
FLAGS.lrsnash_path)
elif FLAGS.solver == "nashpy":
if FLAGS.mode == "all":
print("using nashpy vertex enumeration")
equilibria = nashpy.Game(row_payoffs, col_payoffs).vertex_enumeration()
else:
print("using nashpy Lemke-Howson solver")
equilibria = matrix_nash.lemke_howson_solve(row_payoffs, col_payoffs)
print("equilibria:" if FLAGS.mode == "all" else "an equilibrium:")
equilibria = iter(equilibria)
# check that there's at least one equilibrium
try:
equilibria = itertools.chain([next(equilibria)], equilibria)
except StopIteration:
print("not found!")
for row_mixture, col_mixture in equilibria:
print("payoffs %f, %f for %s, %s" %
(row_mixture.dot(row_payoffs.dot(col_mixture)),
row_mixture.dot(
col_payoffs.dot(col_mixture)), row_mixture, col_mixture))
if FLAGS.mode == "one":
return
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/matrix_nash_example.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tabular Q-Learning on Lewis Signaling Game."""
import copy
from absl import app
from absl import flags
import numpy as np
from open_spiel.python import rl_environment
from open_spiel.python import rl_tools
from open_spiel.python.algorithms import tabular_qlearner
FLAGS = flags.FLAGS
# Env parameters
flags.DEFINE_integer("num_states", 3, "Number of states and actions")
flags.DEFINE_integer("num_messages", 3, "Number of messages")
flags.DEFINE_string("payoffs", "1, 0, 0, 0, 1, 0, 0, 0, 1",
"Payoffs to use ('random' for random [0, 1) payoffs)")
# Alg parameters
flags.DEFINE_bool("centralized", False, "Set to use centralized learning")
flags.DEFINE_integer("num_episodes", 2000, "Number of train episodes")
flags.DEFINE_float("step_size", 0.1, "Step size for updates")
flags.DEFINE_float("eps_init", 1.0, "Initial value of epsilon")
flags.DEFINE_float("eps_final", 0.0, "Final value of epsilon")
flags.DEFINE_integer("eps_decay_steps", 1900,
"Number of episodes to decay epsilon")
# Misc paramters
flags.DEFINE_integer("num_runs", 100, "Number of repetitions")
flags.DEFINE_integer("log_interval", 10,
"Number of episodes between each logging")
flags.DEFINE_bool("plot", False, "Set to plot the graphs")
flags.DEFINE_bool("compare", False,
"Set to compare centralized vs decentralized")
def run_experiment(num_players, env, payoffs, centralized):
"""Run the experiments."""
num_states = FLAGS.num_states
num_messages = FLAGS.num_messages
num_actions = env.action_spec()["num_actions"]
# Results to store
num_runs = FLAGS.num_runs
training_episodes = FLAGS.num_episodes
log_interval = FLAGS.log_interval
rewards = np.zeros((num_runs, training_episodes // log_interval))
opts = np.zeros((num_runs, training_episodes // log_interval))
converge_point = np.zeros((num_states, num_states))
percent_opt = 0
# Repeat the experiment num_runs times
for i in range(num_runs):
eps_schedule = rl_tools.LinearSchedule(
FLAGS.eps_init, FLAGS.eps_final, FLAGS.eps_decay_steps *
2) # *2 since there are 2 agent steps per episode
agents = [
# pylint: disable=g-complex-comprehension
tabular_qlearner.QLearner(
player_id=idx,
num_actions=num_actions,
step_size=FLAGS.step_size,
epsilon_schedule=eps_schedule,
centralized=centralized) for idx in range(num_players)
]
# 1. Train the agents
for cur_episode in range(training_episodes):
time_step = env.reset()
# Find cur_state for logging. See lewis_signaling.cc for info_state
# details.
cur_state = time_step.observations["info_state"][0][3:].index(1)
while not time_step.last():
player_id = time_step.observations["current_player"]
agent_output = agents[player_id].step(time_step)
time_step = env.step([agent_output.action])
# Episode is over, step all agents with final info state.
for agent in agents:
agent.step(time_step)
# Store rewards
reward = time_step.rewards[0]
max_reward = payoffs[cur_state].max()
cur_idx = (i, cur_episode // log_interval)
rewards[cur_idx] += reward / log_interval
opts[cur_idx] += np.isclose(reward, max_reward) / log_interval
base_info_state0 = [1.0, 0.0, 0.0] + [0.0] * num_states
base_info_state1 = [0.0, 1.0, 0.0] + [0.0] * num_states
if centralized:
base_info_state0 = [base_info_state0, base_info_state0.copy()]
base_info_state1 = [base_info_state1, base_info_state1.copy()]
for s in range(num_states):
info_state0 = copy.deepcopy(base_info_state0)
if centralized:
info_state0[0][3 + s] = 1.0
else:
info_state0[3 + s] = 1.0
# pylint: disable=protected-access
m, _ = agents[0]._epsilon_greedy(
str(info_state0), np.arange(num_messages), 0)
info_state1 = copy.deepcopy(base_info_state1)
if centralized:
info_state1[0][3 + s] = 1.0
info_state1[1][3 + m] = 1.0
else:
info_state1[3 + m] = 1.0
a, _ = agents[1]._epsilon_greedy(
str(info_state1), np.arange(num_states), 0)
converge_point[s, a] += 1
best_act = payoffs[s].argmax()
percent_opt += int(a == best_act) / num_runs / num_states
return rewards, opts, converge_point, percent_opt
def main(_):
game = "lewis_signaling"
num_players = 2
num_states = FLAGS.num_states
num_messages = FLAGS.num_messages
if FLAGS.payoffs == "random":
payoffs = np.random.random((num_states, num_states))
payoffs_str = ",".join([str(x) for x in payoffs.flatten()])
elif FLAGS.payoffs == "climbing":
# This is a particular payoff matrix that is hard for decentralized
# algorithms. Introduced in C. Claus and C. Boutilier, "The dynamics of
# reinforcement learning in cooperative multiagent systems", 1998, for
# simultaneous action games, but it is difficult even in the case of
# signaling games.
payoffs = np.array([[11, -30, 0], [-30, 7, 6], [0, 0, 5]]) / 30
payoffs_str = ",".join([str(x) for x in payoffs.flatten()])
else:
payoffs_str = FLAGS.payoffs
try:
payoffs_list = [float(x) for x in payoffs_str.split(",")]
payoffs = np.array(payoffs_list).reshape((num_states, num_states))
except ValueError:
raise ValueError(
"There should be {} (states * actions) elements in payoff. Found {} elements"
.format(num_states * num_states, len(payoffs_list))) from None
env_configs = {
"num_states": num_states,
"num_messages": num_messages,
"payoffs": payoffs_str
}
env = rl_environment.Environment(game, **env_configs)
if FLAGS.compare:
rewards_list = []
opts_list = []
converge_point_list = []
percent_opt_list = []
for centralized in [True, False]:
rewards, opts, converge_point, percent_opt = run_experiment(
num_players, env, payoffs, centralized)
rewards_list += [rewards]
opts_list += [opts]
converge_point_list += [converge_point]
percent_opt_list += [percent_opt]
else:
rewards, opts, converge_point, percent_opt = run_experiment(
num_players, env, payoffs, FLAGS.centralized)
rewards_list = [rewards]
opts_list = [opts]
converge_point_list = [converge_point]
percent_opt_list = [percent_opt]
if FLAGS.plot:
# pylint: disable=g-import-not-at-top
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy import stats
params = {
"font.size": 12,
"axes.labelsize": 12,
"xtick.labelsize": 11,
"ytick.labelsize": 11,
}
mpl.rcParams.update(params)
def init_fig():
fig, ax = plt.subplots(1, 1)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
return fig, ax
def plot_scalars(scalars,
repetition_axis=0,
scalar_labels=None,
title=None,
ax_labels=None):
"""Plots scalar on ax by filling 1 standard error.
Args:
scalars: List of scalars to plot (mean taken over repetition
axis)
repetition_axis: Axis to take the mean over
scalar_labels: Labels for the scalars (for legend)
title: Figure title
ax_labels: Labels for x and y axis (list of 2 strings)
"""
if not all([len(s.shape) == 2 for s in scalars]):
raise ValueError("Only 2D arrays supported for plotting")
if scalar_labels is None:
scalar_labels = [None] * len(scalars)
if len(scalars) != len(scalar_labels):
raise ValueError(
"Wrong number of scalar labels, expected {} but received {}".format(
len(scalars), len(scalar_labels)))
_, plot_axis = init_fig()
for i, scalar in enumerate(scalars):
xs = np.arange(scalar.shape[1 - repetition_axis]) * FLAGS.log_interval
mean = scalar.mean(axis=repetition_axis)
sem = stats.sem(scalar, axis=repetition_axis)
plot_axis.plot(xs, mean, label=scalar_labels[i])
plot_axis.fill_between(xs, mean - sem, mean + sem, alpha=0.5)
if title is not None:
plot_axis.set_title(title)
if ax_labels is not None:
plot_axis.set_xlabel(ax_labels[0])
plot_axis.set_ylabel(ax_labels[1])
def plot_confusion_matrix(cm, cmap=plt.cm.Blues, title=None):
"""Plots the confusion matrix.
Args:
cm (np.ndarray): Confusion matrix to plot
cmap: Color map to be used in matplotlib's imshow
title: Figure title
Returns:
Figure and axis on which the confusion matrix is plotted
"""
fig, ax = plt.subplots()
ax.imshow(cm, interpolation="nearest", cmap=cmap)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlabel("Receiver's action", fontsize=14)
ax.set_ylabel("Sender's state", fontsize=14)
# Loop over data dimensions and create text annotations.
fmt = "d"
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(
j,
i,
format(cm[i, j], fmt),
ha="center",
va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
if title is not None:
ax.set_title(title)
return fig, ax
if FLAGS.compare:
labels = ["Centralized", "Decentralized"]
else:
labels = ["Centralized"] if FLAGS.centralized else ["Decentralized"]
plot_scalars(
rewards_list,
scalar_labels=labels,
title="Reward graph (Tabular Q-Learning)",
ax_labels=["Episodes", "Reward per episode"])
plt.legend()
plot_scalars(
opts_list,
scalar_labels=labels,
title="Percentage of optimal actions (Tabular Q-Learning)",
ax_labels=["Episodes", "% optimal actions"])
plt.legend()
for i, cp in enumerate(converge_point_list):
plot_confusion_matrix(
cp.astype(int),
title="Final policy (Tabular {})".format(labels[i]))
plt.show()
return percent_opt_list
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/lewis_signaling_qlearner.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/examples/__init__.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train a policy net on bridge bidding based on a dataset of trajectories.
Suitable data for training, generated by WBridge5, may be downloaded from:
https://console.cloud.google.com/storage/browser/openspiel-data/bridge
"""
import os
import pickle
from typing import Any, Tuple
from absl import app
from absl import flags
import haiku as hk
import jax
from jax import numpy as jnp
import numpy as np
import optax
import pyspiel
OptState = Any
Params = Any
FLAGS = flags.FLAGS
GAME = pyspiel.load_game('bridge(use_double_dummy_result=false)')
NUM_ACTIONS = 38
MIN_ACTION = 52
NUM_CARDS = 52
NUM_PLAYERS = 4
TOP_K_ACTIONS = 5 # How many alternative actions to display
flags.DEFINE_integer('iterations', 100000, 'Number of iterations')
flags.DEFINE_string('data_path', None, 'Location for data')
flags.DEFINE_integer('eval_every', 10000, 'How often to evaluate the policy')
flags.DEFINE_integer('num_examples', 3,
'How many examples to print per evaluation')
flags.DEFINE_integer('train_batch', 128, 'Batch size for training step')
flags.DEFINE_integer('eval_batch', 10000, 'Batch size when evaluating')
flags.DEFINE_integer('rng_seed', 42, 'Seed for initial network weights')
flags.DEFINE_string('save_path', None, 'Location for saved networks')
def _no_play_trajectory(line: str):
"""Returns the deal and bidding actions only given a text trajectory."""
actions = [int(x) for x in line.split(' ')]
# Usually a trajectory is NUM_CARDS chance events for the deal, plus one
# action for every bid of the auction, plus NUM_CARDS actions for the play
# phase. Exceptionally, if all NUM_PLAYERS players Pass, there is no play
# phase and the trajectory is just of length NUM_CARDS + NUM_PLAYERS.
if len(actions) == NUM_CARDS + NUM_PLAYERS:
return tuple(actions)
else:
return tuple(actions[:-NUM_CARDS])
def make_dataset(file: str):
"""Creates dataset as a generator of single examples."""
all_trajectories = [_no_play_trajectory(line) for line in open(file)]
while True:
np.random.shuffle(all_trajectories)
for trajectory in all_trajectories:
action_index = np.random.randint(52, len(trajectory))
state = GAME.new_initial_state()
for action in trajectory[:action_index]:
state.apply_action(action)
yield (state.observation_tensor(), trajectory[action_index] - MIN_ACTION)
def batch(dataset, batch_size: int):
"""Creates a batched dataset from a one-at-a-time dataset."""
observations = np.zeros([batch_size] + GAME.observation_tensor_shape(),
np.float32)
labels = np.zeros(batch_size, dtype=np.int32)
while True:
for batch_index in range(batch_size):
observations[batch_index], labels[batch_index] = next(dataset)
yield observations, labels
def one_hot(x, k):
"""Returns a one-hot encoding of `x` of size `k`."""
return jnp.array(x[..., jnp.newaxis] == jnp.arange(k), dtype=np.float32)
def net_fn(x):
"""Haiku module for our network."""
net = hk.Sequential([
hk.Linear(1024),
jax.nn.relu,
hk.Linear(1024),
jax.nn.relu,
hk.Linear(1024),
jax.nn.relu,
hk.Linear(1024),
jax.nn.relu,
hk.Linear(NUM_ACTIONS),
jax.nn.log_softmax,
])
return net(x)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
# Make the network.
net = hk.without_apply_rng(hk.transform(net_fn))
# Make the optimiser.
opt = optax.adam(1e-4)
@jax.jit
def loss(
params: Params,
inputs: np.ndarray,
targets: np.ndarray,
) -> jax.Array:
"""Cross-entropy loss."""
assert targets.dtype == np.int32
log_probs = net.apply(params, inputs)
return -jnp.mean(one_hot(targets, NUM_ACTIONS) * log_probs)
@jax.jit
def accuracy(
params: Params,
inputs: np.ndarray,
targets: np.ndarray,
) -> jax.Array:
"""Classification accuracy."""
predictions = net.apply(params, inputs)
return jnp.mean(jnp.argmax(predictions, axis=-1) == targets)
@jax.jit
def update(
params: Params,
opt_state: OptState,
inputs: np.ndarray,
targets: np.ndarray,
) -> Tuple[Params, OptState]:
"""Learning rule (stochastic gradient descent)."""
_, gradient = jax.value_and_grad(loss)(params, inputs, targets)
updates, opt_state = opt.update(gradient, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, opt_state
def output_samples(params: Params, max_samples: int):
"""Output some cases where the policy disagrees with the dataset action."""
if max_samples == 0:
return
count = 0
with open(os.path.join(FLAGS.data_path, 'test.txt')) as f:
lines = list(f)
np.random.shuffle(lines)
for line in lines:
state = GAME.new_initial_state()
actions = _no_play_trajectory(line)
for action in actions:
if not state.is_chance_node():
observation = np.array(state.observation_tensor(), np.float32)
policy = np.exp(net.apply(params, observation))
probs_actions = [(p, a + MIN_ACTION) for a, p in enumerate(policy)]
pred = max(probs_actions)[1]
if pred != action:
print(state)
for p, a in reversed(sorted(probs_actions)[-TOP_K_ACTIONS:]):
print('{:7} {:.2f}'.format(state.action_to_string(a), p))
print('Ground truth {}\n'.format(state.action_to_string(action)))
count += 1
break
state.apply_action(action)
if count >= max_samples:
return
# Make datasets.
if FLAGS.data_path is None:
raise app.UsageError(
'Please generate your own supervised training data or download from '
'https://console.cloud.google.com/storage/browser/openspiel-data/bridge'
' and supply the local location as --data_path')
train = batch(
make_dataset(os.path.join(FLAGS.data_path, 'train.txt')),
FLAGS.train_batch)
test = batch(
make_dataset(os.path.join(FLAGS.data_path, 'test.txt')), FLAGS.eval_batch)
# Initialize network and optimiser.
rng = jax.random.PRNGKey(FLAGS.rng_seed) # seed used for network weights
inputs, unused_targets = next(train)
params = net.init(rng, inputs)
opt_state = opt.init(params)
# Train/eval loop.
for step in range(FLAGS.iterations):
# Do SGD on a batch of training examples.
inputs, targets = next(train)
params, opt_state = update(params, opt_state, inputs, targets)
# Periodically evaluate classification accuracy on the test set.
if (1 + step) % FLAGS.eval_every == 0:
inputs, targets = next(test)
test_accuracy = accuracy(params, inputs, targets)
print(f'After {1+step} steps, test accuracy: {test_accuracy}.')
if FLAGS.save_path:
filename = os.path.join(FLAGS.save_path, f'params-{1 + step}.pkl')
with open(filename, 'wb') as pkl_file:
pickle.dump(params, pkl_file)
output_samples(params, FLAGS.num_examples)
if __name__ == '__main__':
app.run(main)
| open_spiel-master | open_spiel/python/examples/bridge_supervised_learning.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DQN agents trained on Breakthrough by independent Q-learning."""
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import dqn
from open_spiel.python.algorithms import random_agent
FLAGS = flags.FLAGS
# Training parameters
flags.DEFINE_string("checkpoint_dir", "/tmp/dqn_test",
"Directory to save/load the agent models.")
flags.DEFINE_integer(
"save_every", int(1e4),
"Episode frequency at which the DQN agent models are saved.")
flags.DEFINE_integer("num_train_episodes", int(1e6),
"Number of training episodes.")
flags.DEFINE_integer(
"eval_every", 1000,
"Episode frequency at which the DQN agents are evaluated.")
# DQN model hyper-parameters
flags.DEFINE_list("hidden_layers_sizes", [64, 64],
"Number of hidden units in the Q-Network MLP.")
flags.DEFINE_integer("replay_buffer_capacity", int(1e5),
"Size of the replay buffer.")
flags.DEFINE_integer("batch_size", 32,
"Number of transitions to sample at each learning step.")
def eval_against_random_bots(env, trained_agents, random_agents, num_episodes):
"""Evaluates `trained_agents` against `random_agents` for `num_episodes`."""
num_players = len(trained_agents)
sum_episode_rewards = np.zeros(num_players)
for player_pos in range(num_players):
cur_agents = random_agents[:]
cur_agents[player_pos] = trained_agents[player_pos]
for _ in range(num_episodes):
time_step = env.reset()
episode_rewards = 0
while not time_step.last():
player_id = time_step.observations["current_player"]
if env.is_turn_based:
agent_output = cur_agents[player_id].step(
time_step, is_evaluation=True)
action_list = [agent_output.action]
else:
agents_output = [
agent.step(time_step, is_evaluation=True) for agent in cur_agents
]
action_list = [agent_output.action for agent_output in agents_output]
time_step = env.step(action_list)
episode_rewards += time_step.rewards[player_pos]
sum_episode_rewards[player_pos] += episode_rewards
return sum_episode_rewards / num_episodes
def main(_):
game = "breakthrough"
num_players = 2
env_configs = {"columns": 5, "rows": 5}
env = rl_environment.Environment(game, **env_configs)
info_state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
# random agents for evaluation
random_agents = [
random_agent.RandomAgent(player_id=idx, num_actions=num_actions)
for idx in range(num_players)
]
with tf.Session() as sess:
hidden_layers_sizes = [int(l) for l in FLAGS.hidden_layers_sizes]
# pylint: disable=g-complex-comprehension
agents = [
dqn.DQN(
session=sess,
player_id=idx,
state_representation_size=info_state_size,
num_actions=num_actions,
hidden_layers_sizes=hidden_layers_sizes,
replay_buffer_capacity=FLAGS.replay_buffer_capacity,
batch_size=FLAGS.batch_size) for idx in range(num_players)
]
sess.run(tf.global_variables_initializer())
for ep in range(FLAGS.num_train_episodes):
if (ep + 1) % FLAGS.eval_every == 0:
r_mean = eval_against_random_bots(env, agents, random_agents, 1000)
logging.info("[%s] Mean episode rewards %s", ep + 1, r_mean)
if (ep + 1) % FLAGS.save_every == 0:
for agent in agents:
agent.save(FLAGS.checkpoint_dir)
time_step = env.reset()
while not time_step.last():
player_id = time_step.observations["current_player"]
if env.is_turn_based:
agent_output = agents[player_id].step(time_step)
action_list = [agent_output.action]
else:
agents_output = [agent.step(time_step) for agent in agents]
action_list = [agent_output.action for agent_output in agents_output]
time_step = env.step(action_list)
# Episode is over, step all agents with final info state.
for agent in agents:
agent.step(time_step)
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/breakthrough_dqn.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python spiel example."""
import logging
from absl import app
from absl import flags
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import random_agent
FLAGS = flags.FLAGS
flags.DEFINE_string("game", "kuhn_poker", "Name of the game.")
flags.DEFINE_integer("num_players", 2, "Number of players.")
flags.DEFINE_integer("num_episodes", 2, "Number of episodes.")
def print_iteration(time_step, player_id, action=None):
"""Print TimeStep information."""
obs = time_step.observations
logging.info("Player: %s", player_id)
if time_step.first():
logging.info("Info state: %s, - - %s", obs["info_state"][player_id],
time_step.step_type)
else:
logging.info("Info state: %s, %s %s %s", obs["info_state"][player_id],
time_step.rewards[player_id], time_step.discounts[player_id],
time_step.step_type)
if action is not None:
logging.info("Action taken: %s", action)
logging.info("-" * 80)
def main_loop(unused_arg):
"""RL main loop example."""
logging.info("Registered games: %s", rl_environment.registered_games())
logging.info("Creating game %s", FLAGS.game)
env_configs = {"players": FLAGS.num_players} if FLAGS.num_players else {}
env = rl_environment.Environment(FLAGS.game, **env_configs)
num_actions = env.action_spec()["num_actions"]
agents = [
random_agent.RandomAgent(player_id=i, num_actions=num_actions)
for i in range(FLAGS.num_players)
]
logging.info("Env specs: %s", env.observation_spec())
logging.info("Action specs: %s", env.action_spec())
for cur_episode in range(FLAGS.num_episodes):
logging.info("Starting episode %s", cur_episode)
time_step = env.reset()
while not time_step.last():
pid = time_step.observations["current_player"]
if env.is_turn_based:
agent_output = agents[pid].step(time_step)
action_list = [agent_output.action]
else:
agents_output = [agent.step(time_step) for agent in agents]
action_list = [agent_output.action for agent_output in agents_output]
print_iteration(time_step, pid, action_list)
time_step = env.step(action_list)
# Episode is over, step all agents with final state.
for agent in agents:
agent.step(time_step)
# Print final state of end game.
for pid in range(env.num_players):
print_iteration(time_step, pid)
if __name__ == "__main__":
app.run(main_loop)
| open_spiel-master | open_spiel/python/examples/rl_main_loop.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Solving matrix games with LP solver."""
from absl import app
from open_spiel.python.algorithms import lp_solver
import pyspiel
def main(_):
# lp_solver.solve_zero_sum_matrix_game(pyspiel.load_matrix_game("matrix_mp"))
# lp_solver.solve_zero_sum_matrix_game(pyspiel.load_matrix_game("matrix_rps"))
p0_sol, p1_sol, p0_sol_val, p1_sol_val = lp_solver.solve_zero_sum_matrix_game(
pyspiel.create_matrix_game(
[[0.0, -0.25, 0.5], [0.25, 0.0, -0.05], [-0.5, 0.05, 0.0]],
[[0.0, 0.25, -0.5], [-0.25, 0.0, 0.05], [0.5, -0.05, 0.0]]))
print("p0 val = {}, policy = {}".format(p0_sol_val, p0_sol))
print("p1 val = {}, policy = {}".format(p1_sol_val, p1_sol))
payoff_matrix = [[1., 1., 1.], [2., 0., 1.], [0., 2., 2.]]
mixture = lp_solver.is_dominated(
0, payoff_matrix, 0, lp_solver.DOMINANCE_WEAK, return_mixture=True)
print("mixture strategy : {}".format(mixture))
print("payoff vector : {}".format(mixture.dot(payoff_matrix)))
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/lp_solve_example.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python nfg_writer example."""
from absl import app
from absl import flags
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_string("game", "matrix_rps", "Name of the game")
flags.DEFINE_string("outfile", None, "File to send the output to.")
def main(_):
game = pyspiel.load_game(FLAGS.game)
nfg_text = pyspiel.game_to_nfg_string(game)
if FLAGS.outfile is None:
print(nfg_text)
else:
print("Exporting to {}".format(FLAGS.outfile))
outfile = open(FLAGS.outfile, "w")
outfile.write(nfg_text)
outfile.close()
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/nfg_writer_example.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python spiel example to use value iteration to solve a game."""
from absl import app
from absl import flags
from open_spiel.python.algorithms import value_iteration
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_string("game", "tic_tac_toe", "Name of the game")
def play_tic_tac_toe():
"""Solves tic tac toe."""
game = pyspiel.load_game("tic_tac_toe")
print("Solving the game; depth_limit = {}".format(-1))
values = value_iteration.value_iteration(game, -1, 0.01)
for state, value in values.items():
print("")
print(str(state))
print("Value = {}".format(value))
initial_state = "...\n...\n..."
cross_win_state = "...\n...\n.ox"
naught_win_state = "x..\noo.\nxx."
assert values[initial_state] == 0, "State should be drawn: \n" + initial_state
assert values[cross_win_state] == 1, ("State should be won by player 0: \n" +
cross_win_state)
assert values[naught_win_state] == -1, (
"State should be won by player 1: \n" + cross_win_state)
def main(argv):
del argv
if FLAGS.game == "tic_tac_toe":
play_tic_tac_toe()
else:
raise NotImplementedError("This example only works for Tic-Tac-Toe.")
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/value_iteration.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python spiel example."""
import random
from absl import app
import numpy as np
import pyspiel
from open_spiel.python.utils import file_utils
def _manually_create_game():
"""Creates the game manually from the spiel building blocks."""
game_type = pyspiel.GameType(
"matching_pennies",
"Matching Pennies",
pyspiel.GameType.Dynamics.SIMULTANEOUS,
pyspiel.GameType.ChanceMode.DETERMINISTIC,
pyspiel.GameType.Information.ONE_SHOT,
pyspiel.GameType.Utility.ZERO_SUM,
pyspiel.GameType.RewardModel.TERMINAL,
2, # max num players
2, # min_num_players
True, # provides_information_state
True, # provides_information_state_tensor
False, # provides_observation
False, # provides_observation_tensor
dict() # parameter_specification
)
game = pyspiel.MatrixGame(
game_type,
{}, # game_parameters
["Heads", "Tails"], # row_action_names
["Heads", "Tails"], # col_action_names
[[-1, 1], [1, -1]], # row player utilities
[[1, -1], [-1, 1]] # col player utilities
)
return game
def _easy_create_game():
"""Uses the helper function to create the same game as above."""
return pyspiel.create_matrix_game("matching_pennies", "Matching Pennies",
["Heads", "Tails"], ["Heads", "Tails"],
[[-1, 1], [1, -1]], [[1, -1], [-1, 1]])
def _even_easier_create_game():
"""Leave out the names too, if you prefer."""
return pyspiel.create_matrix_game([[-1, 1], [1, -1]], [[1, -1], [-1, 1]])
def _import_data_create_game():
"""Creates a game via imported payoff data."""
payoff_file = file_utils.find_file(
"open_spiel/data/paper_data/response_graph_ucb/soccer.txt", 2)
payoffs = np.loadtxt(payoff_file)*2-1
return pyspiel.create_matrix_game(payoffs, payoffs.T)
def main(_):
games_list = pyspiel.registered_games()
print("Registered games:")
print(games_list)
# Load a two-player normal-form game as a two-player matrix game.
blotto_matrix_game = pyspiel.load_matrix_game("blotto")
print("Number of rows in 2-player Blotto with default settings is {}".format(
blotto_matrix_game.num_rows()))
# Several ways to load/create the same game of matching pennies.
print("Creating matrix game...")
game = pyspiel.load_matrix_game("matrix_mp")
game = _manually_create_game()
game = _import_data_create_game()
game = _easy_create_game()
game = _even_easier_create_game()
# Quick test: inspect top-left utility values:
print("Values for joint action ({},{}) is {},{}".format(
game.row_action_name(0), game.col_action_name(0),
game.player_utility(0, 0, 0), game.player_utility(1, 0, 0)))
state = game.new_initial_state()
# Print the initial state
print("State:")
print(str(state))
assert state.is_simultaneous_node()
# Simultaneous node: sample actions for all players.
chosen_actions = [
random.choice(state.legal_actions(pid))
for pid in range(game.num_players())
]
print("Chosen actions: ", [
state.action_to_string(pid, action)
for pid, action in enumerate(chosen_actions)
])
state.apply_actions(chosen_actions)
assert state.is_terminal()
# Game is now done. Print utilities for each player
returns = state.returns()
for pid in range(game.num_players()):
print("Utility for player {} is {}".format(pid, returns[pid]))
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/matrix_game_example.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
r"""Two BlueChip bridge bots bid with each other.
The bot_cmd FLAG should contain a command-line to launch an external bot, e.g.
`Wbridge5 Autoconnect {port}`.
"""
# pylint: enable=line-too-long
import re
import socket
import subprocess
from absl import app
from absl import flags
import numpy as np
from open_spiel.python.bots import bluechip_bridge_uncontested_bidding
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_float("timeout_secs", 60, "Seconds to wait for bot to respond")
flags.DEFINE_integer("rng_seed", 1234, "Seed to use to generate hands")
flags.DEFINE_integer("num_deals", 10, "How many deals to play")
flags.DEFINE_string(
"bot_cmd", None,
"Command to launch the external bot; must include {port} which will be "
"replaced by the port number to attach to.")
def _run_once(state, bots):
"""Plays bots with each other, returns terminal utility for each player."""
for bot in bots:
bot.restart_at(state)
while not state.is_terminal():
if state.is_chance_node():
outcomes, probs = zip(*state.chance_outcomes())
state.apply_action(np.random.choice(outcomes, p=probs))
else:
state.apply_action(bots[state.current_player()].step(state)[1])
return state
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
game = pyspiel.load_game("bridge_uncontested_bidding", {
"relative_scoring": True,
"rng_seed": FLAGS.rng_seed,
})
bots = [
bluechip_bridge_uncontested_bidding.BlueChipBridgeBot(
game, 0, _WBridge5Client(FLAGS.bot_cmd)),
bluechip_bridge_uncontested_bidding.BlueChipBridgeBot(
game, 1, _WBridge5Client(FLAGS.bot_cmd)),
]
results = []
for i_deal in range(FLAGS.num_deals):
state = _run_once(game.new_initial_state(), bots)
print("Deal #{}; final state:\n{}".format(i_deal, state))
results.append(state.returns())
stats = np.array(results)
mean = np.mean(stats, axis=0)
stderr = np.std(stats, axis=0, ddof=1) / np.sqrt(FLAGS.num_deals)
print(u"Absolute score: {:+.1f}\u00b1{:.1f}".format(mean[0], stderr[0]))
print(u"Relative score: {:+.1f}\u00b1{:.1f}".format(mean[1], stderr[1]))
class _WBridge5Client(object):
"""Manages the connection to a WBridge5 bot."""
def __init__(self, command):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind(("", 0))
self.port = self.sock.getsockname()[1]
self.sock.listen(1)
self.process = None
self.command = command.format(port=self.port)
def start(self):
if self.process is not None:
self.process.kill()
self.process = subprocess.Popen(self.command.split(" "))
self.conn, self.addr = self.sock.accept()
def read_line(self):
line = ""
while True:
self.conn.settimeout(FLAGS.timeout_secs)
data = self.conn.recv(1024)
if not data:
raise EOFError("Connection closed")
line += data.decode("ascii")
if line.endswith("\n"):
return re.sub(r"\s+", " ", line).strip()
def send_line(self, line):
self.conn.send((line + "\r\n").encode("ascii"))
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/bridge_uncontested_bidding_bluechip.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Play a game, selecting random moves, and save what we see.
This can be used to check by hand the behaviour of a game, and also
as the basis for test cases.
Example usage:
```
playthrough --game kuhn_poker --params players=3
```
"""
from absl import app
from absl import flags
from absl import logging
from open_spiel.python.algorithms import generate_playthrough
FLAGS = flags.FLAGS
flags.DEFINE_string(
"game", "kuhn_poker", "Name of the game, with optional parameters, e.g. "
"'kuhn_poker' or 'go(komi=4.5,board_size=19)'.")
flags.DEFINE_string("output_file", None, "Where to write the data to.")
flags.DEFINE_list("actions", None,
"A (possibly partial) list of action choices to make.")
flags.DEFINE_string("update_path", None,
"If set, regenerates all playthroughs in the path.")
flags.DEFINE_bool(
"alsologtostdout", False,
"If True, the trace will be written to std-out while it "
"is being constructed (in addition to the usual behavior).")
flags.DEFINE_integer("shard", 0, "The shard to update.")
flags.DEFINE_integer("num_shards", 1, "How many shards to use for updates.")
def main(unused_argv):
if FLAGS.update_path:
generate_playthrough.update_path(FLAGS.update_path, FLAGS.shard,
FLAGS.num_shards)
else:
if not FLAGS.game:
raise ValueError("Must specify game")
actions = FLAGS.actions
if actions is not None:
actions = [int(x) for x in actions]
text = generate_playthrough.playthrough(
FLAGS.game, actions, alsologtostdout=FLAGS.alsologtostdout)
if FLAGS.output_file:
with open(FLAGS.output_file, "w") as f:
f.write(text)
else:
logging.info(text)
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/playthrough.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python spiel example to get all the states in the game."""
from absl import app
from absl import flags
# pylint: disable=unused-import
from open_spiel.python import games
from open_spiel.python.algorithms import get_all_states
from open_spiel.python.mfg import games as mfg_games
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_string("game", "tic_tac_toe", "Name of the game")
flags.DEFINE_integer("players", None, "Number of players")
flags.DEFINE_integer("depth_limit", -1, "Depth limit to stop at")
flags.DEFINE_bool("include_terminals", True, "Include terminal states?")
flags.DEFINE_bool("include_chance_states", True, "Include chance states?")
def main(_):
games_list = pyspiel.registered_games()
print("Registered games:")
for game in games_list:
print(" ", game.short_name)
print()
print("Creating game:", FLAGS.game)
params = {}
if FLAGS.players is not None:
params["players"] = FLAGS.players
game = pyspiel.load_game(FLAGS.game, params)
print("Getting all states; depth_limit = {}".format(FLAGS.depth_limit))
all_states = get_all_states.get_all_states(game, FLAGS.depth_limit,
FLAGS.include_terminals,
FLAGS.include_chance_states)
count = 0
for state in all_states:
print(state)
count += 1
print()
print("Total: {} states.".format(count))
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/get_all_states.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example use of the CFR algorithm on Kuhn Poker."""
import pickle
from absl import app
from absl import flags
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_enum("solver", "cfr", ["cfr", "cfrplus", "cfrbr"], "CFR solver")
flags.DEFINE_integer("iterations", 20, "Number of iterations")
flags.DEFINE_string("game", "kuhn_poker", "Name of the game")
flags.DEFINE_integer("players", 2, "Number of players")
def main(_):
game = pyspiel.load_game(
FLAGS.game,
{"players": FLAGS.players},
)
if FLAGS.solver == "cfr":
solver = pyspiel.CFRSolver(game)
elif FLAGS.solver == "cfrplus":
solver = pyspiel.CFRPlusSolver(game)
elif FLAGS.solver == "cfrbr":
solver = pyspiel.CFRBRSolver(game)
for i in range(int(FLAGS.iterations / 2)):
solver.evaluate_and_update_policy()
print("Iteration {} exploitability: {:.6f}".format(
i, pyspiel.exploitability(game, solver.average_policy())))
print("Persisting the model...")
with open("{}_solver.pickle".format(FLAGS.solver), "wb") as file:
pickle.dump(solver, file, pickle.HIGHEST_PROTOCOL)
print("Loading the model...")
with open("{}_solver.pickle".format(FLAGS.solver), "rb") as file:
loaded_solver = pickle.load(file)
print("Exploitability of the loaded model: {:.6f}".format(
pyspiel.exploitability(game, loaded_solver.average_policy())))
for i in range(int(FLAGS.iterations / 2)):
loaded_solver.evaluate_and_update_policy()
print("Iteration {} exploitability: {:.6f}".format(
int(FLAGS.iterations / 2) + i,
pyspiel.exploitability(game, loaded_solver.average_policy())))
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/cfr_cpp_example.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train a policy net on Hearts actions based given a dataset of trajectories.
Trajectories from the Hearts bot Xinxin can be generated using
open_spiel/bots/xinxin/xinxin_game_generator.cc.
"""
import os
import pickle
from typing import Any, Tuple
from absl import app
from absl import flags
import haiku as hk
import jax
from jax import numpy as jnp
import numpy as np
import optax
import pyspiel
OptState = Any
Params = Any
FLAGS = flags.FLAGS
GAME = pyspiel.load_game('hearts')
NUM_CARDS = 52
NUM_ACTIONS = NUM_CARDS
NUM_PLAYERS = 4
TOP_K_ACTIONS = 5 # How many alternative actions to display
DEFAULT_LAYER_SIZES = [1024, 1024, 1024, 1024]
flags.DEFINE_integer('iterations', 100000, 'Number of iterations')
flags.DEFINE_string('data_path', None, 'Location for data')
flags.DEFINE_integer('eval_every', 10000, 'How often to evaluate the policy')
flags.DEFINE_integer('num_examples', 3,
'How many examples to print per evaluation')
flags.DEFINE_integer('train_batch', 128, 'Batch size for training step')
flags.DEFINE_integer('eval_batch', 10000, 'Batch size when evaluating')
flags.DEFINE_float('step_size', 1e-4, 'Step size for training')
flags.DEFINE_list('hidden_layer_sizes', None,
'Number of hidden units and layers in the network')
flags.DEFINE_integer('rng_seed', 42, 'Seed for initial network weights')
flags.DEFINE_string('save_path', None, 'Location for saved networks')
flags.DEFINE_string('checkpoint_file', None,
'Provides weights and optimzer state to resume training')
def _trajectory(line: str):
"""Returns parsed action trajectory."""
actions = [int(x) for x in line.split(' ')]
return tuple(actions)
def make_dataset(file: str):
"""Creates dataset as a generator of single examples."""
lines = [line for line in open(file)]
while True:
np.random.shuffle(lines)
for line in lines:
trajectory = _trajectory(line)
# skip pass_dir and deal actions
action_index = np.random.randint(NUM_CARDS + 1, len(trajectory))
state = GAME.new_initial_state()
for action in trajectory[:action_index]:
state.apply_action(action)
yield (state.information_state_tensor(), trajectory[action_index])
def batch(dataset, batch_size: int):
"""Creates a batched dataset from a one-at-a-time dataset."""
observations = np.zeros([batch_size] + GAME.information_state_tensor_shape(),
np.float32)
labels = np.zeros(batch_size, dtype=np.int32)
while True:
for batch_index in range(batch_size):
observations[batch_index], labels[batch_index] = next(dataset)
yield observations, labels
def one_hot(x, k):
"""Returns a one-hot encoding of `x` of size `k`."""
return jnp.array(x[..., jnp.newaxis] == jnp.arange(k), dtype=np.float32)
def net_fn(x):
"""Haiku module for our network."""
layers = []
for layer_size in FLAGS.hidden_layer_sizes:
layers.append(hk.Linear(int(layer_size)))
layers.append(jax.nn.relu)
layers.append(hk.Linear(NUM_ACTIONS))
layers.append(jax.nn.log_softmax)
net = hk.Sequential(layers)
return net(x)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
if FLAGS.hidden_layer_sizes is None:
# Cannot pass default arguments as lists due to style requirements, so we
# override it here if they are not set.
FLAGS.hidden_layer_sizes = DEFAULT_LAYER_SIZES
# Make the network.
net = hk.without_apply_rng(hk.transform(net_fn))
# Make the optimiser.
opt = optax.adam(FLAGS.step_size)
@jax.jit
def loss(
params: Params,
inputs: np.ndarray,
targets: np.ndarray,
) -> jax.Array:
"""Cross-entropy loss."""
assert targets.dtype == np.int32
log_probs = net.apply(params, inputs)
return -jnp.mean(one_hot(targets, NUM_ACTIONS) * log_probs)
@jax.jit
def accuracy(
params: Params,
inputs: np.ndarray,
targets: np.ndarray,
) -> jax.Array:
"""Classification accuracy."""
predictions = net.apply(params, inputs)
return jnp.mean(jnp.argmax(predictions, axis=-1) == targets)
@jax.jit
def update(
params: Params,
opt_state: OptState,
inputs: np.ndarray,
targets: np.ndarray,
) -> Tuple[Params, OptState]:
"""Learning rule (stochastic gradient descent)."""
_, gradient = jax.value_and_grad(loss)(params, inputs, targets)
updates, opt_state = opt.update(gradient, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, opt_state
def output_samples(params: Params, max_samples: int):
"""Output some cases where the policy disagrees with the dataset action."""
if max_samples == 0:
return
count = 0
with open(os.path.join(FLAGS.data_path, 'test.txt')) as f:
lines = list(f)
np.random.shuffle(lines)
for line in lines:
state = GAME.new_initial_state()
actions = _trajectory(line)
for action in actions:
if not state.is_chance_node():
observation = np.array(state.information_state_tensor(), np.float32)
policy = np.exp(net.apply(params, observation))
probs_actions = [(p, a) for a, p in enumerate(policy)]
pred = max(probs_actions)[1]
if pred != action:
print(state)
for p, a in reversed(sorted(probs_actions)[-TOP_K_ACTIONS:]):
print('{:7} {:.2f}'.format(state.action_to_string(a), p))
print('Ground truth {}\n'.format(state.action_to_string(action)))
count += 1
break
state.apply_action(action)
if count >= max_samples:
return
# Store what we need to rebuild the Haiku net.
if FLAGS.save_path:
filename = os.path.join(FLAGS.save_path, 'layers.txt')
with open(filename, 'w') as layer_def_file:
for s in FLAGS.hidden_layer_sizes:
layer_def_file.write(f'{s} ')
layer_def_file.write('\n')
# Make datasets.
if FLAGS.data_path is None:
raise app.UsageError(
'Please generate your own supervised training data and supply the local'
'location as --data_path')
train = batch(
make_dataset(os.path.join(FLAGS.data_path, 'train.txt')),
FLAGS.train_batch)
test = batch(
make_dataset(os.path.join(FLAGS.data_path, 'test.txt')), FLAGS.eval_batch)
# Initialize network and optimiser.
if FLAGS.checkpoint_file:
with open(FLAGS.checkpoint_file, 'rb') as pkl_file:
params, opt_state = pickle.load(pkl_file)
else:
rng = jax.random.PRNGKey(FLAGS.rng_seed) # seed used for network weights
inputs, unused_targets = next(train)
params = net.init(rng, inputs)
opt_state = opt.init(params)
# Train/eval loop.
for step in range(FLAGS.iterations):
# Do SGD on a batch of training examples.
inputs, targets = next(train)
params, opt_state = update(params, opt_state, inputs, targets)
# Periodically evaluate classification accuracy on the test set.
if (1 + step) % FLAGS.eval_every == 0:
inputs, targets = next(test)
test_accuracy = accuracy(params, inputs, targets)
print(f'After {1+step} steps, test accuracy: {test_accuracy}.')
if FLAGS.save_path:
filename = os.path.join(FLAGS.save_path, f'checkpoint-{1 + step}.pkl')
with open(filename, 'wb') as pkl_file:
pickle.dump((params, opt_state), pkl_file)
output_samples(params, FLAGS.num_examples)
if __name__ == '__main__':
app.run(main)
| open_spiel-master | open_spiel/python/examples/hearts_supervised_learning.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python spiel example."""
import random
from absl import app
from absl import flags
import numpy as np
from open_spiel.python import games # pylint: disable=unused-import
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_string("game", "tic_tac_toe", "Name of the game")
flags.DEFINE_integer("players", None, "Number of players")
flags.DEFINE_string("load_state", None,
"A file containing a string to load a specific state")
def main(_):
games_list = pyspiel.registered_games()
print("Registered games:")
print(games_list)
action_string = None
print("Creating game: " + FLAGS.game)
if FLAGS.players is not None:
game = pyspiel.load_game(FLAGS.game, {"players": FLAGS.players})
else:
game = pyspiel.load_game(FLAGS.game)
# Get a new state
if FLAGS.load_state is not None:
# Load a specific state
state_string = ""
with open(FLAGS.load_state, encoding="utf-8") as input_file:
for line in input_file:
state_string += line
state_string = state_string.rstrip()
print("Loading state:")
print(state_string)
print("")
state = game.deserialize_state(state_string)
else:
state = game.new_initial_state()
# Print the initial state
print(str(state))
while not state.is_terminal():
# The state can be three different types: chance node,
# simultaneous node, or decision node
if state.is_chance_node():
# Chance node: sample an outcome
outcomes = state.chance_outcomes()
num_actions = len(outcomes)
print("Chance node, got " + str(num_actions) + " outcomes")
action_list, prob_list = zip(*outcomes)
action = np.random.choice(action_list, p=prob_list)
print("Sampled outcome: ",
state.action_to_string(state.current_player(), action))
state.apply_action(action)
elif state.is_simultaneous_node():
# Simultaneous node: sample actions for all players.
random_choice = lambda a: np.random.choice(a) if a else [0]
chosen_actions = [
random_choice(state.legal_actions(pid))
for pid in range(game.num_players())
]
print("Chosen actions: ", [
state.action_to_string(pid, action)
for pid, action in enumerate(chosen_actions)
])
state.apply_actions(chosen_actions)
else:
# Decision node: sample action for the single current player
action = random.choice(state.legal_actions(state.current_player()))
action_string = state.action_to_string(state.current_player(), action)
print("Player ", state.current_player(), ", randomly sampled action: ",
action_string)
state.apply_action(action)
print(str(state))
# Game is now done. Print utilities for each player
returns = state.returns()
for pid in range(game.num_players()):
print("Utility for player {} is {}".format(pid, returns[pid]))
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/example.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TD Learning with N-Tuple Networks for 2048."""
from absl import app
from absl import flags
from absl import logging
import numpy as np
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_string("game", "2048", "Name of the game.")
flags.DEFINE_integer("num_train_episodes", 15000,
"Number of training episodes.")
flags.DEFINE_integer("eval_every", 1000,
"Episode frequency at which the agent is evaluated.")
flags.DEFINE_float("alpha", 0.02, "Learning rate")
class NTupleNetwork:
"""An N-tuple Network class.
N-Tuple Networks are an effective way of reducing the storage requirement for
evaluating and learning state values. This is accomplished by defining a
collection of N-Tuples that represent various segments in a game's
ObservationTensor.
The value of a given state is defined as the sum of values of each N-Tuple,
which are stored in a look up table. The policy of the agent is to chose an
action that maximises the value of the after-state. After each episode, all
the states that were reached in that episode is used for updating the state
values using Temporal Difference Learning.
References:
[1] Szubert, Marcin and Wojciech Jaśkowski. "Temporal difference learning of
n-tuple networks for the game 2048." Computational Intelligence and Games
(CIG), 2014 IEEE Conference on. IEEE, 2014.
"""
def __init__(self, n_tuple_size, max_tuple_index, n_tuples):
for tuples in n_tuples:
if len(tuples) != n_tuple_size:
raise ValueError("n_tuple_size does not match size of tuples")
n_tuple_network_size = len(n_tuples)
look_up_table_shape = (n_tuple_network_size,) + (
max_tuple_index,
) * n_tuple_size
self.n_tuples = n_tuples
self.look_up_table = np.zeros(look_up_table_shape)
def learn(self, states):
target = 0
while states:
state = states.pop()
error = target - self.value(state)
target = state.rewards()[0] + self.update(state, FLAGS.alpha * error)
def update(self, state, adjust):
v = 0
for idx, n_tuple in enumerate(self.n_tuples):
v += self.update_tuple(idx, n_tuple, state, adjust)
return v
def update_tuple(self, idx, n_tuple, state, adjust):
observation_tensor = state.observation_tensor(0)
index = (idx,) + tuple(
[
0
if observation_tensor[tile] == 0
else int(np.log2(observation_tensor[tile]))
for tile in n_tuple
]
)
self.look_up_table[index] += adjust
return self.look_up_table[index]
def evaluator(self, state, action):
working_state = state.clone()
working_state.apply_action(action)
return working_state.rewards()[0] + self.value(working_state)
def value(self, state):
"""Returns the value of this state."""
observation_tensor = state.observation_tensor(0)
v = 0
for idx, n_tuple in enumerate(self.n_tuples):
lookup_tuple_index = [
0
if observation_tensor[tile] == 0
else int(np.log2(observation_tensor[tile]))
for tile in n_tuple
]
lookup_index = (idx,) + tuple(lookup_tuple_index)
v += self.look_up_table[lookup_index]
return v
def main(_):
n_tuple_network = NTupleNetwork(
6,
15,
[
[0, 1, 2, 3, 4, 5],
[4, 5, 6, 7, 8, 9],
[0, 1, 2, 4, 5, 6],
[4, 5, 6, 8, 9, 10],
],
)
game = pyspiel.load_game(FLAGS.game)
sum_rewards = 0
largest_tile = 0
max_score = 0
for ep in range(FLAGS.num_train_episodes):
state = game.new_initial_state()
states_in_episode = []
while not state.is_terminal():
if state.is_chance_node():
outcomes = state.chance_outcomes()
action_list, prob_list = zip(*outcomes)
action = np.random.choice(action_list, p=prob_list)
state.apply_action(action)
else:
legal_actions = state.legal_actions(state.current_player())
# pylint: disable=cell-var-from-loop
best_action = max(
legal_actions,
key=lambda action: n_tuple_network.evaluator(state, action),
)
state.apply_action(best_action)
states_in_episode.append(state.clone())
sum_rewards += state.returns()[0]
largest_tile_from_episode = max(state.observation_tensor(0))
if largest_tile_from_episode > largest_tile:
largest_tile = largest_tile_from_episode
if state.returns()[0] > max_score:
max_score = state.returns()[0]
n_tuple_network.learn(states_in_episode)
if (ep + 1) % FLAGS.eval_every == 0:
logging.info(
"[%s] Average Score: %s, Max Score: %s, Largest Tile Reached: %s",
ep + 1,
int(sum_rewards / FLAGS.eval_every),
int(max_score),
int(largest_tile),
)
sum_rewards = 0
largest_tile = 0
max_score = 0
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/twenty_forty_eight_td_n_tuple_network.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python spiel example."""
import logging
from absl import app
from absl import flags
import numpy as np
from open_spiel.python import rl_environment
FLAGS = flags.FLAGS
flags.DEFINE_string("game", "tic_tac_toe", "Name of the game")
flags.DEFINE_integer("num_players", None, "Number of players")
def select_actions(observations, cur_player):
cur_legal_actions = observations["legal_actions"][cur_player]
actions = [np.random.choice(cur_legal_actions)]
return actions
def print_iteration(time_step, actions, player_id):
"""Print TimeStep information."""
obs = time_step.observations
logging.info("Player: %s", player_id)
if time_step.step_type.first():
logging.info("Info state: %s, - - %s", obs["info_state"][player_id],
time_step.step_type)
else:
logging.info("Info state: %s, %s %s %s", obs["info_state"][player_id],
time_step.rewards[player_id], time_step.discounts[player_id],
time_step.step_type)
logging.info("Action taken: %s", actions)
logging.info("-" * 80)
def turn_based_example(unused_arg):
"""Example usage of the RL environment for turn-based games."""
# `rl_main_loop.py` contains more details and simultaneous move examples.
logging.info("Registered games: %s", rl_environment.registered_games())
logging.info("Creating game %s", FLAGS.game)
env_configs = {"players": FLAGS.num_players} if FLAGS.num_players else {}
env = rl_environment.Environment(FLAGS.game, **env_configs)
logging.info("Env specs: %s", env.observation_spec())
logging.info("Action specs: %s", env.action_spec())
time_step = env.reset()
while not time_step.step_type.last():
pid = time_step.observations["current_player"]
actions = select_actions(time_step.observations, pid)
print_iteration(time_step, actions, pid)
time_step = env.step(actions)
# Print final state of end game.
for pid in range(env.num_players):
print_iteration(time_step, actions, pid)
if __name__ == "__main__":
app.run(turn_based_example)
| open_spiel-master | open_spiel/python/examples/rl_example.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python spiel example."""
from absl import app
from absl import flags
import numpy as np
from open_spiel.python.bots import human
from open_spiel.python.bots import uniform_random
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_integer("seed", 12761381, "The seed to use for the RNG.")
# Supported types of players: "random", "human", "check_call", "fold"
flags.DEFINE_string("player0", "random", "Type of the agent for player 0.")
flags.DEFINE_string("player1", "random", "Type of the agent for player 1.")
def LoadAgent(agent_type, game, player_id, rng):
"""Return a bot based on the agent type."""
if agent_type == "random":
return uniform_random.UniformRandomBot(player_id, rng)
elif agent_type == "human":
return human.HumanBot()
elif agent_type == "check_call":
policy = pyspiel.PreferredActionPolicy([1, 0])
return pyspiel.make_policy_bot(game, player_id, FLAGS.seed, policy)
elif agent_type == "fold":
policy = pyspiel.PreferredActionPolicy([0, 1])
return pyspiel.make_policy_bot(game, player_id, FLAGS.seed, policy)
else:
raise RuntimeError("Unrecognized agent type: {}".format(agent_type))
def main(_):
rng = np.random.RandomState(FLAGS.seed)
# Make sure poker is compiled into the library, as it requires an optional
# dependency: the ACPC poker code. To ensure it is compiled in, prepend both
# the install.sh and build commands with OPEN_SPIEL_BUILD_WITH_ACPC=ON.
# See here:
# https://github.com/deepmind/open_spiel/blob/master/docs/install.md#configuration-conditional-dependencies
# for more details on optional dependencies.
games_list = pyspiel.registered_names()
assert "universal_poker" in games_list
fcpa_game_string = pyspiel.hunl_game_string("fcpa")
print("Creating game: {}".format(fcpa_game_string))
game = pyspiel.load_game(fcpa_game_string)
agents = [
LoadAgent(FLAGS.player0, game, 0, rng),
LoadAgent(FLAGS.player1, game, 1, rng)
]
state = game.new_initial_state()
# Print the initial state
print("INITIAL STATE")
print(str(state))
while not state.is_terminal():
# The state can be three different types: chance node,
# simultaneous node, or decision node
current_player = state.current_player()
if state.is_chance_node():
# Chance node: sample an outcome
outcomes = state.chance_outcomes()
num_actions = len(outcomes)
print("Chance node with " + str(num_actions) + " outcomes")
action_list, prob_list = zip(*outcomes)
action = rng.choice(action_list, p=prob_list)
print("Sampled outcome: ",
state.action_to_string(state.current_player(), action))
state.apply_action(action)
else:
# Decision node: sample action for the single current player
legal_actions = state.legal_actions()
for action in legal_actions:
print("Legal action: {} ({})".format(
state.action_to_string(current_player, action), action))
action = agents[current_player].step(state)
action_string = state.action_to_string(current_player, action)
print("Player ", current_player, ", chose action: ",
action_string)
state.apply_action(action)
print("")
print("NEXT STATE:")
print(str(state))
# Game is now done. Print utilities for each player
returns = state.returns()
for pid in range(game.num_players()):
print("Utility for player {} is {}".format(pid, returns[pid]))
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/poker_fcpa_example.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example: using MMD with dilated entropy to solve for QRE in a matrix Game."""
from absl import app
from absl import flags
from open_spiel.python.algorithms import mmd_dilated
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_integer("iterations", 1000, "Number of iterations")
flags.DEFINE_float(
"alpha", 0.1, "QRE parameter, larger value amounts to more regularization")
flags.DEFINE_integer("print_freq", 100, "How often to print the gap")
# create pyspiel perturbed RPS matrix game
game = pyspiel.create_matrix_game([[0, -1, 3], [1, 0, -3], [-3, 3, 0]],
[[0, 1, -3], [-1, 0, 3], [3, -3, 0]])
game = pyspiel.convert_to_turn_based(game)
def main(_):
mmd = mmd_dilated.MMDDilatedEnt(game, FLAGS.alpha)
for i in range(FLAGS.iterations):
mmd.update_sequences()
if i % FLAGS.print_freq == 0:
conv = mmd.get_gap()
print("Iteration {} gap {}".format(i, conv))
# Extract policies for both players
print(mmd.get_policies().action_probability_array)
# Note the sequence form and behavioural-form coincide
# for a normal-form game (sequence form has extra root value of 1)
print(mmd.current_sequences())
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/mmd_matrix_example.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple AlphaZero tic tac toe example.
Take a look at the log-learner.txt in the output directory.
If you want more control, check out `alpha_zero.py`.
"""
from absl import app
from absl import flags
from open_spiel.python.algorithms.alpha_zero import alpha_zero
from open_spiel.python.utils import spawn
flags.DEFINE_string("path", None, "Where to save checkpoints.")
FLAGS = flags.FLAGS
def main(unused_argv):
config = alpha_zero.Config(
game="tic_tac_toe",
path=FLAGS.path,
learning_rate=0.01,
weight_decay=1e-4,
train_batch_size=128,
replay_buffer_size=2**14,
replay_buffer_reuse=4,
max_steps=25,
checkpoint_freq=25,
actors=4,
evaluators=4,
uct_c=1,
max_simulations=20,
policy_alpha=0.25,
policy_epsilon=1,
temperature=1,
temperature_drop=4,
evaluation_window=50,
eval_levels=7,
nn_model="resnet",
nn_width=128,
nn_depth=2,
observation_shape=None,
output_size=None,
quiet=True,
)
alpha_zero.alpha_zero(config)
if __name__ == "__main__":
with spawn.main_handler():
app.run(main)
| open_spiel-master | open_spiel/python/examples/tic_tac_toe_alpha_zero.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DQN agents trained on Skat by independent Q-learning."""
import os
import random
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import dqn
from open_spiel.python.algorithms import random_agent
FLAGS = flags.FLAGS
# Training parameters
flags.DEFINE_string("checkpoint_dir", "/tmp/skat_dqn/",
"Directory to save/load the agent.")
flags.DEFINE_integer("num_train_episodes", int(1e6),
"Number of training episodes.")
flags.DEFINE_integer(
"eval_every", 1000,
"Episode frequency at which the DQN agents are evaluated.")
flags.DEFINE_integer(
"num_eval_games", 1000,
"How many games to play during each evaluation.")
# DQN model hyper-parameters
flags.DEFINE_list("hidden_layers_sizes", [64, 64],
"Number of hidden units in the Q-Network MLP.")
flags.DEFINE_integer("replay_buffer_capacity", int(1e5),
"Size of the replay buffer.")
flags.DEFINE_integer("batch_size", 32,
"Number of transitions to sample at each learning step.")
flags.DEFINE_bool("randomize_positions", True,
"Randomize the position of each agent before every game.")
def eval_against_random_bots(env, trained_agents, random_agents, num_episodes):
"""Evaluates `trained_agents` against `random_agents` for `num_episodes`."""
num_players = len(trained_agents)
sum_episode_rewards = np.zeros(num_players)
for player_pos in range(num_players):
for _ in range(num_episodes):
cur_agents = random_agents[:]
if FLAGS.randomize_positions:
eval_player_pos = random.randrange(num_players)
else:
eval_player_pos = player_pos
cur_agents[eval_player_pos] = trained_agents[player_pos]
cur_agents[eval_player_pos].player_id = eval_player_pos
time_step = env.reset()
episode_rewards = 0
while not time_step.last():
player_id = time_step.observations["current_player"]
agent_output = cur_agents[player_id].step(
time_step, is_evaluation=True)
action_list = [agent_output.action]
time_step = env.step(action_list)
episode_rewards += time_step.rewards[eval_player_pos]
sum_episode_rewards[player_pos] += episode_rewards
return sum_episode_rewards / num_episodes
def main(_):
game = "skat"
num_players = 3
env_configs = {}
env = rl_environment.Environment(game, **env_configs)
observation_tensor_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
# random agents for evaluation
random_agents = [
random_agent.RandomAgent(player_id=idx, num_actions=num_actions)
for idx in range(num_players)
]
with tf.Session() as sess:
summaries_dir = os.path.join(FLAGS.checkpoint_dir, "random_eval")
summary_writer = tf.summary.FileWriter(
summaries_dir, tf.get_default_graph())
hidden_layers_sizes = [int(l) for l in FLAGS.hidden_layers_sizes]
# pylint: disable=g-complex-comprehension
agents = [
dqn.DQN(
session=sess,
player_id=idx,
state_representation_size=observation_tensor_size,
num_actions=num_actions,
hidden_layers_sizes=hidden_layers_sizes,
replay_buffer_capacity=FLAGS.replay_buffer_capacity,
batch_size=FLAGS.batch_size) for idx in range(num_players)
]
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
for ep in range(FLAGS.num_train_episodes):
if (ep + 1) % FLAGS.eval_every == 0:
r_mean = eval_against_random_bots(env, agents, random_agents,
FLAGS.num_eval_games)
logging.info("[%s] Mean episode rewards %s", ep + 1, r_mean)
for i in range(num_players):
summary = tf.Summary()
summary.value.add(tag="mean_reward/random_{}".format(i),
simple_value=r_mean[i])
summary_writer.add_summary(summary, ep)
summary_writer.flush()
saver.save(sess, FLAGS.checkpoint_dir, ep)
time_step = env.reset()
# Randomize position.
if FLAGS.randomize_positions:
positions = random.sample(range(len(agents)), len(agents))
while not time_step.last():
player_id = time_step.observations["current_player"]
if FLAGS.randomize_positions:
position = positions[player_id]
agents[position].player_id = player_id
else:
position = player_id
agent_output = agents[position].step(time_step)
action_list = [agent_output.action]
time_step = env.step(action_list)
# Episode is over, step all agents with final info state.
for agent in agents:
agent.step(time_step)
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/skat_dqn.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ephemeral Value Adjustment example: https://arxiv.org/abs/1810.08163."""
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
from open_spiel.python import policy
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import eva
from open_spiel.python.algorithms import exploitability
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_episodes", 1000, "Number of iterations")
flags.DEFINE_string("game_name", "kuhn_poker", "Name of the game")
class JointPolicy(policy.Policy):
"""Joint policy to be evaluated."""
def __init__(self, agents):
self._agents = agents
def action_probabilities(self, state, player_id=None):
cur_player = state.current_player()
return self._agents[cur_player].action_probabilities(state)
def main(unused_argv):
logging.info("Loading %s", FLAGS.game_name)
env = rl_environment.Environment(FLAGS.game_name)
num_players = env.num_players
num_actions = env.action_spec()["num_actions"]
state_size = env.observation_spec()["info_state"][0]
eva_agents = []
with tf.Session() as sess:
for player in range(num_players):
eva_agents.append(
eva.EVAAgent(
sess,
env,
player,
state_size,
num_actions,
embedding_network_layers=(64, 32),
embedding_size=12,
learning_rate=1e-4,
mixing_parameter=0.5,
memory_capacity=int(1e6),
discount_factor=1.0,
epsilon_start=1.0,
epsilon_end=0.1,
epsilon_decay_duration=int(1e6)))
sess.run(tf.global_variables_initializer())
for _ in range(FLAGS.num_episodes):
time_step = env.reset()
while not time_step.last():
current_player = time_step.observations["current_player"]
current_agent = eva_agents[current_player]
step_out = current_agent.step(time_step)
time_step = env.step([step_out.action])
for agent in eva_agents:
agent.step(time_step)
game = pyspiel.load_game(FLAGS.game_name)
joint_policy = JointPolicy(eva_agents)
conv = exploitability.nash_conv(game, joint_policy)
logging.info("EVA in '%s' - NashConv: %s", FLAGS.game_name, conv)
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/eva.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example use of the CFR algorithm on Kuhn Poker."""
from absl import app
from open_spiel.python.algorithms import cfr
from open_spiel.python.algorithms import expected_game_score
import pyspiel
def main(_):
game = pyspiel.load_game("kuhn_poker")
cfr_solver = cfr.CFRSolver(game)
iterations = 1000
for i in range(iterations):
cfr_value = cfr_solver.evaluate_and_update_policy()
print("Game util at iteration {}: {}".format(i, cfr_value))
average_policy = cfr_solver.average_policy()
average_policy_values = expected_game_score.policy_value(
game.new_initial_state(), [average_policy] * 2)
print("Computed player 0 value: {}".format(average_policy_values[0]))
print("Expected player 0 value: {}".format(-1 / 18))
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/kuhn_poker_cfr.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example: MMD with dilated entropy to compute approx. Nash in Kuhn poker."""
from absl import app
from absl import flags
from open_spiel.python.algorithms import exploitability
from open_spiel.python.algorithms import mmd_dilated
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_integer("iterations", 1000, "Number of iterations")
flags.DEFINE_string("game", "kuhn_poker", "Name of the game")
flags.DEFINE_integer("print_freq", 100, "How often to print the exploitability")
def main(_):
game = pyspiel.load_game(FLAGS.game)
# need to manually set stepsize if alpha = 0
mmd = mmd_dilated.MMDDilatedEnt(game, alpha=0, stepsize=1)
for i in range(FLAGS.iterations):
mmd.update_sequences()
if i % FLAGS.print_freq == 0:
conv = exploitability.exploitability(game, mmd.get_avg_policies())
print("Iteration {} exploitability {}".format(i, conv))
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/mmd_nash_example.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python Deep CFR example."""
from absl import app
from absl import flags
from absl import logging
from open_spiel.python import policy
from open_spiel.python.algorithms import expected_game_score
from open_spiel.python.algorithms import exploitability
from open_spiel.python.jax import deep_cfr
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_iterations", 100, "Number of iterations")
flags.DEFINE_integer("num_traversals", 1500, "Number of traversals/games")
flags.DEFINE_string("game_name", "leduc_poker", "Name of the game")
def main(unused_argv):
logging.info("Loading %s", FLAGS.game_name)
game = pyspiel.load_game(FLAGS.game_name)
deep_cfr_solver = deep_cfr.DeepCFRSolver(
game,
policy_network_layers=(64, 64, 64),
advantage_network_layers=(64, 64, 64),
num_iterations=FLAGS.num_iterations,
num_traversals=FLAGS.num_traversals,
learning_rate=1e-3,
batch_size_advantage=2048,
batch_size_strategy=2048,
memory_capacity=1e7,
policy_network_train_steps=5000,
advantage_network_train_steps=750,
reinitialize_advantage_networks=True)
_, advantage_losses, policy_loss = deep_cfr_solver.solve()
for player, losses in advantage_losses.items():
logging.info("Advantage for player %d: %s", player,
losses[:2] + ["..."] + losses[-2:])
logging.info("Advantage Buffer Size for player %s: '%s'", player,
len(deep_cfr_solver.advantage_buffers[player]))
logging.info("Strategy Buffer Size: '%s'",
len(deep_cfr_solver.strategy_buffer))
logging.info("Final policy loss: '%s'", policy_loss)
average_policy = policy.tabular_policy_from_callable(
game, deep_cfr_solver.action_probabilities)
conv = exploitability.nash_conv(game, average_policy)
logging.info("Deep CFR in '%s' - NashConv: %s", FLAGS.game_name, conv)
average_policy_values = expected_game_score.policy_value(
game.new_initial_state(), [average_policy] * 2)
print("Computed player 0 value: {}".format(average_policy_values[0]))
print("Computed player 1 value: {}".format(average_policy_values[1]))
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/deep_cfr_jax.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example use of JAX policy gradient implementatiom on catch environment."""
import logging
from absl import app
from absl import flags
from open_spiel.python.environments import catch
from open_spiel.python.jax import policy_gradient
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_episodes", int(1e5), "Number of train episodes.")
flags.DEFINE_integer("eval_every", int(1e3),
"'How often to evaluate the policy.")
flags.DEFINE_enum("algorithm", "a2c", ["rpg", "qpg", "rm", "a2c"],
"Algorithms to run.")
def _eval_agent(env, agent, num_episodes):
"""Evaluates `agent` for `num_episodes`."""
rewards = 0.0
for _ in range(num_episodes):
time_step = env.reset()
episode_reward = 0
while not time_step.last():
agent_output = agent.step(time_step, is_evaluation=True)
time_step = env.step([agent_output.action])
episode_reward += time_step.rewards[0]
rewards += episode_reward
return rewards / num_episodes
def main_loop(unused_arg):
"""Trains a Policy Gradient agent in the catch environment."""
env = catch.Environment()
info_state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
train_episodes = FLAGS.num_episodes
agent = policy_gradient.PolicyGradient(
player_id=0,
info_state_size=info_state_size,
num_actions=num_actions,
loss_str=FLAGS.algorithm,
hidden_layers_sizes=[128, 128],
lambda_=1.0,
entropy_cost=0.01,
critic_learning_rate=0.1,
pi_learning_rate=0.1,
num_critic_before_pi=3)
# Train agent
for ep in range(train_episodes):
time_step = env.reset()
while not time_step.last():
agent_output = agent.step(time_step)
action_list = [agent_output.action]
time_step = env.step(action_list)
# Episode is over, step agent with final info state.
agent.step(time_step)
if ep and ep % FLAGS.eval_every == 0:
logging.info("-" * 80)
logging.info("Episode %s", ep)
logging.info("Loss: %s", agent.loss)
avg_return = _eval_agent(env, agent, 100)
logging.info("Avg return: %s", avg_return)
if __name__ == "__main__":
app.run(main_loop)
| open_spiel-master | open_spiel/python/examples/catch_jax_policy_gradient.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Game tree visualization example."""
from absl import app
from absl import flags
from absl import logging
import pyspiel
from open_spiel.python.visualizations import treeviz
FLAGS = flags.FLAGS
flags.DEFINE_string("game", "kuhn_poker", "Name of the game")
flags.DEFINE_string("out", "/tmp/gametree.png", "Name of output file, e.g., "
"[*.png|*.pdf].")
flags.DEFINE_enum("prog", "dot", ["dot", "neato", "circo"], "Graphviz layout.")
flags.DEFINE_boolean("group_infosets", False, "Whether to group infosets.")
flags.DEFINE_boolean("group_terminal", False,
"Whether to group terminal nodes.")
flags.DEFINE_boolean("group_pubsets", False, "Whether to group public states.")
flags.DEFINE_string("target_pubset", "*",
"Limit grouping of public states only to specified state.")
flags.DEFINE_boolean("verbose", False, "Whether to print verbose output.")
def _zero_sum_node_decorator(state):
"""Custom node decorator that only shows the return of the first player."""
attrs = treeviz.default_node_decorator(state) # get default attributes
if state.is_terminal():
attrs["label"] = str(int(state.returns()[0]))
return attrs
def main(argv):
del argv
game = pyspiel.load_game(FLAGS.game)
game_type = game.get_type()
if game_type.dynamics == pyspiel.GameType.Dynamics.SIMULTANEOUS:
logging.warn("%s is not turn-based. Trying to reload game as turn-based.",
FLAGS.game)
game = pyspiel.load_game_as_turn_based(FLAGS.game)
game_type = game.get_type()
if game_type.dynamics != pyspiel.GameType.Dynamics.SEQUENTIAL:
raise ValueError("Game must be sequential, not {}".format(
game_type.dynamics))
if (game_type.utility == pyspiel.GameType.Utility.ZERO_SUM and
game.num_players() == 2):
logging.info("Game is zero-sum: only showing first-player's returns.")
gametree = treeviz.GameTree(
game,
node_decorator=_zero_sum_node_decorator,
group_infosets=FLAGS.group_infosets,
group_terminal=FLAGS.group_terminal,
group_pubsets=FLAGS.group_pubsets,
target_pubset=FLAGS.target_pubset)
else:
# use default decorators
gametree = treeviz.GameTree(
game,
group_infosets=FLAGS.group_infosets,
group_terminal=FLAGS.group_terminal,
group_pubsets=FLAGS.group_pubsets,
target_pubset=FLAGS.target_pubset)
if FLAGS.verbose:
logging.info("Game tree:\n%s", gametree.to_string())
gametree.draw(FLAGS.out, prog=FLAGS.prog)
logging.info("Game tree saved to file: %s", FLAGS.out)
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/treeviz_example.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python Exploitability Descent example.
This example uses a neural network to approximate the policy. For a simple
tabular example, see the unit tests for the exploitability_descent algorithm:
```
solver = exploitability_descent.Solver(game)
with tf.Session() as session:
for step in range(num_steps):
nash_conv = solver.Step(session, learning_rate)
```
"""
import time
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
from open_spiel.python.algorithms import exploitability_descent
import pyspiel
# Temporarily disable TF2 until we update the code.
tf.disable_v2_behavior()
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_steps", 10, "Number of iterations")
flags.DEFINE_string("game_name", "kuhn_poker", "Name of the game")
flags.DEFINE_integer("print_freq", 100, "Log progress every this many steps")
flags.DEFINE_float("init_lr", 0.1, "The initial learning rate")
flags.DEFINE_float("regularizer_scale", 0.001,
"Scale for L2 regularization of NN weights")
flags.DEFINE_integer("num_hidden", 64, "Hidden units.")
flags.DEFINE_integer("num_layers", 1, "Hidden layers.")
def main(argv):
del argv
# Create the game to use, and a loss calculator for it
logging.info("Loading %s", FLAGS.game_name)
game = pyspiel.load_game(FLAGS.game_name)
loss_calculator = exploitability_descent.LossCalculator(game)
# Build the network
num_hidden = FLAGS.num_hidden
num_layers = FLAGS.num_layers
layer = tf.constant(loss_calculator.tabular_policy.state_in, tf.float64)
for _ in range(num_layers):
regularizer = (tf.keras.regularizers.l2(l=FLAGS.regularizer_scale))
layer = tf.layers.dense(
layer, num_hidden, activation=tf.nn.relu,
kernel_regularizer=regularizer)
regularizer = (tf.keras.regularizers.l2(l=FLAGS.regularizer_scale))
layer = tf.layers.dense(
layer, game.num_distinct_actions(), kernel_regularizer=regularizer)
tabular_policy = loss_calculator.masked_softmax(layer)
# Build the loss - exploitability descent loss plus regularizer loss
nash_conv, loss = loss_calculator.loss(tabular_policy)
loss += tf.losses.get_regularization_loss()
# Use a simple gradient descent optimizer
learning_rate = tf.placeholder(tf.float64, (), name="learning_rate")
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
optimizer_step = optimizer.minimize(loss)
# Training loop
with tf.train.MonitoredTrainingSession() as sess:
for step in range(FLAGS.num_steps):
t0 = time.time()
nash_conv_value, _ = sess.run(
[nash_conv, optimizer_step],
feed_dict={
learning_rate: FLAGS.init_lr / np.sqrt(1 + step),
})
t1 = time.time()
# Optionally log our progress
if step % FLAGS.print_freq == 0:
logging.info("step=%d nash_conv=%g time per step=%.4f", step,
nash_conv_value, t1 - t0)
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/exploitability_descent.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python XFP example."""
import sys
from absl import app
from absl import flags
from open_spiel.python.algorithms import exploitability
from open_spiel.python.algorithms import fictitious_play
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_integer("iterations", 100, "Number of iterations")
flags.DEFINE_string("game", "leduc_poker", "Name of the game")
flags.DEFINE_integer("players", 2, "Number of players")
flags.DEFINE_integer("print_freq", 10, "How often to print the exploitability")
def main(_):
game = pyspiel.load_game(FLAGS.game, {"players": FLAGS.players})
xfp_solver = fictitious_play.XFPSolver(game)
for i in range(FLAGS.iterations):
xfp_solver.iteration()
conv = exploitability.exploitability(game, xfp_solver.average_policy())
if i % FLAGS.print_freq == 0:
print("Iteration: {} Conv: {}".format(i, conv))
sys.stdout.flush()
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/fictitious_play_example.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example use of the C++ MCCFR algorithms on Kuhn Poker.
This examples calls the underlying C++ implementations via the Python bindings.
Note that there are some pure Python implementations of some of these algorithms
in python/algorithms as well.
"""
import pickle
from absl import app
from absl import flags
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_enum(
"sampling",
"external",
["external", "outcome"],
"Sampling for the MCCFR solver",
)
flags.DEFINE_integer("iterations", 50, "Number of iterations")
flags.DEFINE_string("game", "kuhn_poker", "Name of the game")
flags.DEFINE_integer("players", 2, "Number of players")
MODEL_FILE_NAME = "{}_sampling_mccfr_solver.pickle"
def run_iterations(game, solver, start_iteration=0):
"""Run iterations of MCCFR."""
for i in range(int(FLAGS.iterations / 2)):
solver.run_iteration()
policy = solver.average_policy()
exploitability = pyspiel.exploitability(game, policy)
# We also compute NashConv to highlight an important API feature:
# when using Monte Carlo sampling, the policy
# may not have a table entry for every info state.
# Therefore, when calling nash_conv, ensure the third argument,
# "use_state_get_policy" is set to True
# See https://github.com/deepmind/open_spiel/issues/500
nash_conv = pyspiel.nash_conv(game, policy, True)
print("Iteration {} nashconv: {:.6f} exploitability: {:.6f}".format(
start_iteration + i, nash_conv, exploitability))
def main(_):
game = pyspiel.load_game(
FLAGS.game,
{"players": FLAGS.players},
)
if FLAGS.sampling == "external":
solver = pyspiel.ExternalSamplingMCCFRSolver(
game,
avg_type=pyspiel.MCCFRAverageType.FULL,
)
elif FLAGS.sampling == "outcome":
solver = pyspiel.OutcomeSamplingMCCFRSolver(game)
run_iterations(game, solver)
print("Persisting the model...")
with open(MODEL_FILE_NAME.format(FLAGS.sampling), "wb") as file:
pickle.dump(solver, file, pickle.HIGHEST_PROTOCOL)
print("Loading the model...")
with open(MODEL_FILE_NAME.format(FLAGS.sampling), "rb") as file:
loaded_solver = pickle.load(file)
print("Exploitability of the loaded model: {:.6f}".format(
pyspiel.exploitability(game, loaded_solver.average_policy())))
run_iterations(game, solver, start_iteration=int(FLAGS.iterations / 2))
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/mccfr_cpp_example.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example for policy_aggregator_example.
Example.
"""
from absl import app
from absl import flags
import numpy as np
from open_spiel.python import policy
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import exploitability
from open_spiel.python.algorithms import policy_aggregator
FLAGS = flags.FLAGS
flags.DEFINE_string("game_name", "kuhn_poker", "Game name")
class TestPolicy(policy.Policy):
def __init__(self, action_int):
self._action_int = action_int
def action_probabilities(self, state, player_id=None):
return {self._action_int: 1.0}
def main(unused_argv):
env = rl_environment.Environment(FLAGS.game_name)
policies = [[ # pylint: disable=g-complex-comprehension
policy.TabularPolicy(env.game).copy_with_noise(alpha=float(i), beta=1.0)
for i in range(2)
] for _ in range(2)]
probabilities = [
list(np.ones(len(policies[i])) / len(policies[i])) for i in range(2)
]
pol_ag = policy_aggregator.PolicyAggregator(env.game)
aggr_policies = pol_ag.aggregate([0, 1], policies, probabilities)
exploitabilities = exploitability.nash_conv(env.game, aggr_policies)
print("Exploitability : {}".format(exploitabilities))
print(policies[0][0].action_probability_array)
print(policies[0][1].action_probability_array)
print(aggr_policies.policy)
print("\nCopy Example")
mother_policy = policy.TabularPolicy(env.game).copy_with_noise(1, 10)
policies = [[mother_policy.__copy__() for _ in range(2)] for _ in range(2)]
probabilities = [
list(np.ones(len(policies)) / len(policies)) for _ in range(2)
]
pol_ag = policy_aggregator.PolicyAggregator(env.game)
aggr_policy = pol_ag.aggregate([0], policies, probabilities)
for state, value in aggr_policy.policy[0].items():
polici = mother_policy.policy_for_key(state)
value_normal = {
action: probability
for action, probability in enumerate(polici)
if probability > 0
}
for key in value.keys():
print("State : {}. Key : {}. Aggregated : {}. Real : {}. Passed : {}"
.format(state, key, value[key], value_normal[key],
np.abs(value[key] - value_normal[key]) < 1e-8))
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/policy_aggregator_example.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compute exploitability of a uniform policy."""
from absl import app
from absl import flags
from open_spiel.python import policy
from open_spiel.python.algorithms import exploitability
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_string("game", "kuhn_poker", "Name of the game")
def main(_):
game = pyspiel.load_game(FLAGS.game)
expl = exploitability.exploitability(game, policy.UniformRandomPolicy(game))
print("Exploitability: {}".format(expl))
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/uniform_policy_exploitability.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python example of multiagent Nash Q-learners."""
import enum
import logging
from absl import app
from open_spiel.python import rl_environment
from open_spiel.python.algorithms.tabular_multiagent_qlearner import MAQLearner
from open_spiel.python.algorithms.tabular_multiagent_qlearner import TwoPlayerNashSolver
from open_spiel.python.algorithms.tabular_qlearner import QLearner
class Action(enum.IntEnum):
STAY = 0
LEFT = 1
UP = 2
RIGHT = 3
DOWN = 4
def print_iteration(actions, state):
"""Print actions and state."""
logging.info("Action taken by agent 0: %s", Action(actions[0]).name)
logging.info("Action taken by agent 1: %s", Action(actions[1]).name)
logging.info("Board state:\n %s", state)
logging.info("-" * 80)
def marl_path_finding_example(_):
"""Example usage of multiagent Nash Q-learner.
Based on https://www.jmlr.org/papers/volume4/hu03a/hu03a.pdf
"""
logging.info("Creating the Grid Game")
env = rl_environment.Environment(
"pathfinding", grid="B.A\n...\na.b", players=2, step_reward=-1.)
qlearner = QLearner(0, env.game.num_distinct_actions())
nashqlearner = MAQLearner(1, 2, [env.game.num_distinct_actions()] * 2,
TwoPlayerNashSolver())
time_step = env.reset()
actions = [None, None]
while not time_step.last():
actions = [
qlearner.step(time_step).action,
nashqlearner.step(time_step, actions).action
]
time_step = env.step(actions)
print_iteration(actions, env.get_state)
if __name__ == "__main__":
app.run(marl_path_finding_example)
| open_spiel-master | open_spiel/python/examples/marl_nashq_example.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example use of the CFR algorithm on Kuhn Poker."""
from absl import app
from absl import flags
from open_spiel.python.algorithms import discounted_cfr
from open_spiel.python.algorithms import exploitability
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_integer("iterations", 500, "Number of iterations")
flags.DEFINE_string(
"game",
"turn_based_simultaneous_game(game=goofspiel(imp_info=True,num_cards=4,players=2,points_order=descending))",
"Name of the game")
flags.DEFINE_integer("players", 2, "Number of players")
flags.DEFINE_integer("print_freq", 10, "How often to print the exploitability")
def main(_):
game = pyspiel.load_game(FLAGS.game)
discounted_cfr_solver = discounted_cfr.DCFRSolver(game)
for i in range(FLAGS.iterations):
discounted_cfr_solver.evaluate_and_update_policy()
if i % FLAGS.print_freq == 0:
conv = exploitability.exploitability(
game, discounted_cfr_solver.average_policy())
print("Iteration {} exploitability {}".format(i, conv))
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/discounted_cfr.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example use of the MCCFR algorithm on Kuhn Poker."""
from absl import app
from absl import flags
from open_spiel.python.algorithms import exploitability
from open_spiel.python.algorithms import external_sampling_mccfr as external_mccfr
from open_spiel.python.algorithms import outcome_sampling_mccfr as outcome_mccfr
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_enum(
"sampling",
"outcome",
["external", "outcome"],
"Sampling for the MCCFR solver",
)
flags.DEFINE_integer("iterations", 10000, "Number of iterations")
flags.DEFINE_string("game", "kuhn_poker", "Name of the game")
flags.DEFINE_integer("players", 2, "Number of players")
flags.DEFINE_integer("print_freq", 1000,
"How often to print the exploitability")
def main(_):
game = pyspiel.load_game(FLAGS.game, {"players": FLAGS.players})
if FLAGS.sampling == "external":
cfr_solver = external_mccfr.ExternalSamplingSolver(
game, external_mccfr.AverageType.SIMPLE)
else:
cfr_solver = outcome_mccfr.OutcomeSamplingSolver(game)
for i in range(FLAGS.iterations):
cfr_solver.iteration()
if i % FLAGS.print_freq == 0:
conv = exploitability.nash_conv(game, cfr_solver.average_policy())
print("Iteration {} exploitability {}".format(i, conv))
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/mccfr_example.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python Deep CFR example."""
from absl import app
from absl import flags
from absl import logging
from open_spiel.python import policy
from open_spiel.python.algorithms import deep_cfr_tf2
from open_spiel.python.algorithms import expected_game_score
from open_spiel.python.algorithms import exploitability
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_iterations", 100, "Number of iterations")
flags.DEFINE_integer("num_traversals", 150, "Number of traversals/games")
flags.DEFINE_string("game_name", "leduc_poker", "Name of the game")
def main(unused_argv):
logging.info("Loading %s", FLAGS.game_name)
game = pyspiel.load_game(FLAGS.game_name)
deep_cfr_solver = deep_cfr_tf2.DeepCFRSolver(
game,
policy_network_layers=(64, 64, 64, 64),
advantage_network_layers=(64, 64, 64, 64),
num_iterations=FLAGS.num_iterations,
num_traversals=FLAGS.num_traversals,
learning_rate=1e-3,
batch_size_advantage=2048,
batch_size_strategy=2048,
memory_capacity=1e6,
policy_network_train_steps=5000,
advantage_network_train_steps=500,
reinitialize_advantage_networks=True,
infer_device="cpu",
train_device="cpu")
_, advantage_losses, policy_loss = deep_cfr_solver.solve()
for player, losses in advantage_losses.items():
logging.info("Advantage for player %d: %s", player,
losses[:2] + ["..."] + losses[-2:])
logging.info("Advantage Buffer Size for player %s: '%s'", player,
len(deep_cfr_solver.advantage_buffers[player]))
logging.info("Strategy Buffer Size: '%s'",
len(deep_cfr_solver.strategy_buffer))
logging.info("Final policy loss: '%s'", policy_loss)
average_policy = policy.tabular_policy_from_callable(
game, deep_cfr_solver.action_probabilities)
conv = exploitability.nash_conv(game, average_policy)
logging.info("Deep CFR in '%s' - NashConv: %s", FLAGS.game_name, conv)
average_policy_values = expected_game_score.policy_value(
game.new_initial_state(), [average_policy] * 2)
print("Computed player 0 value: {}".format(average_policy_values[0]))
print("Computed player 1 value: {}".format(average_policy_values[1]))
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/deep_cfr_tf2.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Plays a round of Tarok with actions from user input."""
import pyspiel
def play_tarok_game():
game = pyspiel.load_game("tarok(players=3)")
state = game.new_initial_state()
while not state.is_terminal():
print_info(game, state)
state.apply_action(int(input("Enter action: ")))
print("-" * 70, "\n")
print(state.current_game_phase())
print("Players' scores: {}".format(state.rewards()))
def print_info(unused_game, state):
"""Print information about the game state."""
print("Game phase: {}".format(state.current_game_phase()))
print("Selected contract: {}".format(state.selected_contract()))
print("Current player: {}".format(state.current_player()))
player_cards = state.player_cards(state.current_player())
action_names = [state.card_action_to_string(a) for a in player_cards]
print("\nPlayer cards: {}".format(
list(zip(action_names, player_cards))))
if state.current_game_phase() == pyspiel.TarokGamePhase.TALON_EXCHANGE:
print_talon_exchange_info(state)
elif state.current_game_phase() == pyspiel.TarokGamePhase.TRICKS_PLAYING:
print_tricks_playing_info(state)
else:
print()
legal_actions = state.legal_actions()
action_names = [state.action_to_string(a) for a in state.legal_actions()]
print("Legal actions: {}\n".format(
list(zip(action_names, legal_actions))))
def print_talon_exchange_info(state):
talon = [[state.card_action_to_string(x) for x in talon_set]
for talon_set in state.talon_sets()]
print("\nTalon: {}\n".format(talon))
def print_tricks_playing_info(state):
trick_cards = state.trick_cards()
action_names = [state.card_action_to_string(a) for a in trick_cards]
print("\nTrick cards: {}\n".format(
list(zip(action_names, trick_cards))))
if __name__ == "__main__":
play_tarok_game()
| open_spiel-master | open_spiel/python/examples/play_tarok_game.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python spiel example."""
import logging
from absl import app
from absl import flags
from open_spiel.python.environments import catch
from open_spiel.python.pytorch import policy_gradient
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_episodes", int(1e5), "Number of train episodes.")
flags.DEFINE_integer("eval_every", int(1e3),
"'How often to evaluate the policy.")
flags.DEFINE_enum("algorithm", "a2c", ["rpg", "qpg", "rm", "a2c"],
"Algorithms to run.")
def _eval_agent(env, agent, num_episodes):
"""Evaluates `agent` for `num_episodes`."""
rewards = 0.0
for _ in range(num_episodes):
time_step = env.reset()
episode_reward = 0
while not time_step.last():
agent_output = agent.step(time_step, is_evaluation=True)
time_step = env.step([agent_output.action])
episode_reward += time_step.rewards[0]
rewards += episode_reward
return rewards / num_episodes
def main_loop(unused_arg):
"""Trains a Policy Gradient agent in the catch environment."""
env = catch.Environment()
info_state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
train_episodes = FLAGS.num_episodes
agent = policy_gradient.PolicyGradient(
player_id=0,
info_state_size=info_state_size,
num_actions=num_actions,
loss_str=FLAGS.algorithm,
hidden_layers_sizes=[128, 128],
batch_size=128,
entropy_cost=0.01,
critic_learning_rate=0.1,
pi_learning_rate=0.1,
num_critic_before_pi=3)
# Train agent
for ep in range(train_episodes):
time_step = env.reset()
while not time_step.last():
agent_output = agent.step(time_step)
action_list = [agent_output.action]
time_step = env.step(action_list)
# Episode is over, step agent with final info state.
agent.step(time_step)
if ep and ep % FLAGS.eval_every == 0:
logging.info("-" * 80)
logging.info("Episode %s", ep)
logging.info("Loss: %s", agent.loss)
avg_return = _eval_agent(env, agent, 100)
logging.info("Avg return: %s", avg_return)
if __name__ == "__main__":
app.run(main_loop)
| open_spiel-master | open_spiel/python/examples/catch_pytorch_policy_gradient.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tabular Q-Learner self-play example.
Two Q-Learning agents are trained by playing against each other.
"""
import sys
from absl import app
from absl import flags
import numpy as np
from open_spiel.python import rl_environment
from open_spiel.python import rl_tools
from open_spiel.python.algorithms import tabular_qlearner
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_train_episodes", int(1e6),
"Number of training episodes.")
flags.DEFINE_integer("num_eval_episodes", int(1e4),
"Number of episodes to use during each evaluation.")
flags.DEFINE_integer("eval_freq", int(1e4),
"The frequency (in episodes) to run evaluation.")
flags.DEFINE_string(
"epsilon_schedule", None,
"Epsilon schedule: e.g. 'linear,init,final,num_steps' or "
"'constant,0.2'")
flags.DEFINE_string("game", "tic_tac_toe", "Game to load.")
def eval_agents(env, agents, num_episodes):
"""Evaluate the agents, returning a numpy array of average returns."""
rewards = np.array([0] * env.num_players, dtype=np.float64)
for _ in range(num_episodes):
time_step = env.reset()
while not time_step.last():
player_id = time_step.observations["current_player"]
agent_output = agents[player_id].step(time_step, is_evaluation=True)
time_step = env.step([agent_output.action])
for i in range(env.num_players):
rewards[i] += time_step.rewards[i]
rewards /= num_episodes
return rewards
def create_epsilon_schedule(sched_str):
"""Creates an epsilon schedule from the string as desribed in the flags."""
values = FLAGS.epsilon_schedule.split(",")
if values[0] == "linear":
assert len(values) == 4
return rl_tools.LinearSchedule(
float(values[1]), float(values[2]), int(values[3]))
elif values[0] == "constant":
assert len(values) == 2
return rl_tools.ConstantSchedule(float(values[1]))
else:
print("Unrecognized schedule string: {}".format(sched_str))
sys.exit()
def main(_):
env = rl_environment.Environment(FLAGS.game)
num_players = env.num_players
num_actions = env.action_spec()["num_actions"]
agents = []
if FLAGS.epsilon_schedule is not None:
for idx in range(num_players):
agents.append(
tabular_qlearner.QLearner(
player_id=idx,
num_actions=num_actions,
epsilon_schedule=create_epsilon_schedule(FLAGS.epsilon_schedule)))
else:
agents = [
tabular_qlearner.QLearner(player_id=idx, num_actions=num_actions)
for idx in range(num_players)
]
# 1. Train the agents
training_episodes = FLAGS.num_train_episodes
for cur_episode in range(training_episodes):
if cur_episode % int(FLAGS.eval_freq) == 0:
avg_rewards = eval_agents(env, agents, FLAGS.num_eval_episodes)
print("Training episodes: {}, Avg rewards: {}".format(
cur_episode, avg_rewards))
time_step = env.reset()
while not time_step.last():
player_id = time_step.observations["current_player"]
agent_output = agents[player_id].step(time_step)
time_step = env.step([agent_output.action])
# Episode is over, step all agents with final info state.
for agent in agents:
agent.step(time_step)
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/independent_tabular_qlearning.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NFSP agents trained on Kuhn Poker."""
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
from open_spiel.python import policy
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import exploitability
from open_spiel.python.algorithms import nfsp
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_train_episodes", int(3e6),
"Number of training episodes.")
flags.DEFINE_integer("eval_every", 10000,
"Episode frequency at which the agents are evaluated.")
flags.DEFINE_list("hidden_layers_sizes", [
128,
], "Number of hidden units in the avg-net and Q-net.")
flags.DEFINE_integer("replay_buffer_capacity", int(2e5),
"Size of the replay buffer.")
flags.DEFINE_integer("reservoir_buffer_capacity", int(2e6),
"Size of the reservoir buffer.")
flags.DEFINE_float("anticipatory_param", 0.1,
"Prob of using the rl best response as episode policy.")
class NFSPPolicies(policy.Policy):
"""Joint policy to be evaluated."""
def __init__(self, env, nfsp_policies, mode):
game = env.game
player_ids = [0, 1]
super(NFSPPolicies, self).__init__(game, player_ids)
self._policies = nfsp_policies
self._mode = mode
self._obs = {"info_state": [None, None], "legal_actions": [None, None]}
def action_probabilities(self, state, player_id=None):
cur_player = state.current_player()
legal_actions = state.legal_actions(cur_player)
self._obs["current_player"] = cur_player
self._obs["info_state"][cur_player] = (
state.information_state_tensor(cur_player))
self._obs["legal_actions"][cur_player] = legal_actions
info_state = rl_environment.TimeStep(
observations=self._obs, rewards=None, discounts=None, step_type=None)
with self._policies[cur_player].temp_mode_as(self._mode):
p = self._policies[cur_player].step(info_state, is_evaluation=True).probs
prob_dict = {action: p[action] for action in legal_actions}
return prob_dict
def main(unused_argv):
game = "kuhn_poker"
num_players = 2
env_configs = {"players": num_players}
env = rl_environment.Environment(game, **env_configs)
info_state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
hidden_layers_sizes = [int(l) for l in FLAGS.hidden_layers_sizes]
kwargs = {
"replay_buffer_capacity": FLAGS.replay_buffer_capacity,
"epsilon_decay_duration": FLAGS.num_train_episodes,
"epsilon_start": 0.06,
"epsilon_end": 0.001,
}
with tf.Session() as sess:
# pylint: disable=g-complex-comprehension
agents = [
nfsp.NFSP(sess, idx, info_state_size, num_actions, hidden_layers_sizes,
FLAGS.reservoir_buffer_capacity, FLAGS.anticipatory_param,
**kwargs) for idx in range(num_players)
]
expl_policies_avg = NFSPPolicies(env, agents, nfsp.MODE.average_policy)
sess.run(tf.global_variables_initializer())
for ep in range(FLAGS.num_train_episodes):
if (ep + 1) % FLAGS.eval_every == 0:
losses = [agent.loss for agent in agents]
logging.info("Losses: %s", losses)
expl = exploitability.exploitability(env.game, expl_policies_avg)
logging.info("[%s] Exploitability AVG %s", ep + 1, expl)
logging.info("_____________________________________________")
time_step = env.reset()
while not time_step.last():
player_id = time_step.observations["current_player"]
agent_output = agents[player_id].step(time_step)
action_list = [agent_output.action]
time_step = env.step(action_list)
# Episode is over, step all agents with final info state.
for agent in agents:
agent.step(time_step)
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/kuhn_nfsp.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MCTS example."""
import collections
import random
import sys
from absl import app
from absl import flags
import numpy as np
from open_spiel.python.algorithms import mcts
from open_spiel.python.algorithms.alpha_zero import evaluator as az_evaluator
from open_spiel.python.algorithms.alpha_zero import model as az_model
from open_spiel.python.bots import gtp
from open_spiel.python.bots import human
from open_spiel.python.bots import uniform_random
import pyspiel
_KNOWN_PLAYERS = [
# A generic Monte Carlo Tree Search agent.
"mcts",
# A generic random agent.
"random",
# You'll be asked to provide the moves.
"human",
# Run an external program that speaks the Go Text Protocol.
# Requires the gtp_path flag.
"gtp",
# Run an alpha_zero checkpoint with MCTS. Uses the specified UCT/sims.
# Requires the az_path flag.
"az"
]
flags.DEFINE_string("game", "tic_tac_toe", "Name of the game.")
flags.DEFINE_enum("player1", "mcts", _KNOWN_PLAYERS, "Who controls player 1.")
flags.DEFINE_enum("player2", "random", _KNOWN_PLAYERS, "Who controls player 2.")
flags.DEFINE_string("gtp_path", None, "Where to find a binary for gtp.")
flags.DEFINE_multi_string("gtp_cmd", [], "GTP commands to run at init.")
flags.DEFINE_string("az_path", None,
"Path to an alpha_zero checkpoint. Needed by an az player.")
flags.DEFINE_integer("uct_c", 2, "UCT's exploration constant.")
flags.DEFINE_integer("rollout_count", 1, "How many rollouts to do.")
flags.DEFINE_integer("max_simulations", 1000, "How many simulations to run.")
flags.DEFINE_integer("num_games", 1, "How many games to play.")
flags.DEFINE_integer("seed", None, "Seed for the random number generator.")
flags.DEFINE_bool("random_first", False, "Play the first move randomly.")
flags.DEFINE_bool("solve", True, "Whether to use MCTS-Solver.")
flags.DEFINE_bool("quiet", False, "Don't show the moves as they're played.")
flags.DEFINE_bool("verbose", False, "Show the MCTS stats of possible moves.")
FLAGS = flags.FLAGS
def _opt_print(*args, **kwargs):
if not FLAGS.quiet:
print(*args, **kwargs)
def _init_bot(bot_type, game, player_id):
"""Initializes a bot by type."""
rng = np.random.RandomState(FLAGS.seed)
if bot_type == "mcts":
evaluator = mcts.RandomRolloutEvaluator(FLAGS.rollout_count, rng)
return mcts.MCTSBot(
game,
FLAGS.uct_c,
FLAGS.max_simulations,
evaluator,
random_state=rng,
solve=FLAGS.solve,
verbose=FLAGS.verbose)
if bot_type == "az":
model = az_model.Model.from_checkpoint(FLAGS.az_path)
evaluator = az_evaluator.AlphaZeroEvaluator(game, model)
return mcts.MCTSBot(
game,
FLAGS.uct_c,
FLAGS.max_simulations,
evaluator,
random_state=rng,
child_selection_fn=mcts.SearchNode.puct_value,
solve=FLAGS.solve,
verbose=FLAGS.verbose)
if bot_type == "random":
return uniform_random.UniformRandomBot(player_id, rng)
if bot_type == "human":
return human.HumanBot()
if bot_type == "gtp":
bot = gtp.GTPBot(game, FLAGS.gtp_path)
for cmd in FLAGS.gtp_cmd:
bot.gtp_cmd(cmd)
return bot
raise ValueError("Invalid bot type: %s" % bot_type)
def _get_action(state, action_str):
for action in state.legal_actions():
if action_str == state.action_to_string(state.current_player(), action):
return action
return None
def _play_game(game, bots, initial_actions):
"""Plays one game."""
state = game.new_initial_state()
_opt_print("Initial state:\n{}".format(state))
history = []
if FLAGS.random_first:
assert not initial_actions
initial_actions = [state.action_to_string(
state.current_player(), random.choice(state.legal_actions()))]
for action_str in initial_actions:
action = _get_action(state, action_str)
if action is None:
sys.exit("Invalid action: {}".format(action_str))
history.append(action_str)
for bot in bots:
bot.inform_action(state, state.current_player(), action)
state.apply_action(action)
_opt_print("Forced action", action_str)
_opt_print("Next state:\n{}".format(state))
while not state.is_terminal():
current_player = state.current_player()
# The state can be three different types: chance node,
# simultaneous node, or decision node
if state.is_chance_node():
# Chance node: sample an outcome
outcomes = state.chance_outcomes()
num_actions = len(outcomes)
_opt_print("Chance node, got " + str(num_actions) + " outcomes")
action_list, prob_list = zip(*outcomes)
action = np.random.choice(action_list, p=prob_list)
action_str = state.action_to_string(current_player, action)
_opt_print("Sampled action: ", action_str)
elif state.is_simultaneous_node():
raise ValueError("Game cannot have simultaneous nodes.")
else:
# Decision node: sample action for the single current player
bot = bots[current_player]
action = bot.step(state)
action_str = state.action_to_string(current_player, action)
_opt_print("Player {} sampled action: {}".format(current_player,
action_str))
for i, bot in enumerate(bots):
if i != current_player:
bot.inform_action(state, current_player, action)
history.append(action_str)
state.apply_action(action)
_opt_print("Next state:\n{}".format(state))
# Game is now done. Print return for each player
returns = state.returns()
print("Returns:", " ".join(map(str, returns)), ", Game actions:",
" ".join(history))
for bot in bots:
bot.restart()
return returns, history
def main(argv):
game = pyspiel.load_game(FLAGS.game)
if game.num_players() > 2:
sys.exit("This game requires more players than the example can handle.")
bots = [
_init_bot(FLAGS.player1, game, 0),
_init_bot(FLAGS.player2, game, 1),
]
histories = collections.defaultdict(int)
overall_returns = [0, 0]
overall_wins = [0, 0]
game_num = 0
try:
for game_num in range(FLAGS.num_games):
returns, history = _play_game(game, bots, argv[1:])
histories[" ".join(history)] += 1
for i, v in enumerate(returns):
overall_returns[i] += v
if v > 0:
overall_wins[i] += 1
except (KeyboardInterrupt, EOFError):
game_num -= 1
print("Caught a KeyboardInterrupt, stopping early.")
print("Number of games played:", game_num + 1)
print("Number of distinct games played:", len(histories))
print("Players:", FLAGS.player1, FLAGS.player2)
print("Overall wins", overall_wins)
print("Overall returns", overall_returns)
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/mcts.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DQN example on Lewis Signaling Game."""
import copy
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import dqn
FLAGS = flags.FLAGS
# Env parameters
flags.DEFINE_integer("num_states", 3, "Number of states and actions")
flags.DEFINE_integer("num_messages", 3, "Number of messages")
flags.DEFINE_string("payoffs", "1, 0, 0, 0, 1, 0, 0, 0, 1",
"Payoffs to use ('random' for random [0, 1) payoffs)")
# Alg parameters
flags.DEFINE_integer("num_episodes", 50000, "Number of train episodes")
flags.DEFINE_float("step_size", 0.1, "Step size for updates")
flags.DEFINE_float("eps_init", 1.0, "Initial value of epsilon")
flags.DEFINE_float("eps_final", 0.0, "Final value of epsilon")
flags.DEFINE_integer("eps_decay_steps", 49000,
"Number of episodes to decay epsilon")
flags.DEFINE_integer("replay_buffer_capacity", int(1e4),
"Size of replay buffer")
# Misc paramters
flags.DEFINE_integer("num_runs", 10, "Number of repetitions")
flags.DEFINE_integer("log_interval", 100,
"Number of episodes between each logging")
flags.DEFINE_bool("plot", False, "Set to plot the graphs")
def main(_):
game = "lewis_signaling"
num_players = 2
num_states = FLAGS.num_states
num_messages = FLAGS.num_messages
if FLAGS.payoffs == "random":
payoffs = np.random.random((num_states, num_states))
payoffs_str = ",".join([str(x) for x in payoffs.flatten()])
elif FLAGS.payoffs == "climbing":
# This is a particular payoff matrix that is hard for decentralized
# algorithms. Introduced in C. Claus and C. Boutilier, "The dynamics of
# reinforcement learning in cooperative multiagent systems", 1998, for
# simultaneous action games, but it is difficult even in the case of
# signaling games.
payoffs = np.array([[11, -30, 0], [-30, 7, 6], [0, 0, 5]]) / 30
payoffs_str = ",".join([str(x) for x in payoffs.flatten()])
else:
payoffs_str = FLAGS.payoffs
try:
payoffs_list = [float(x) for x in payoffs_str.split(",")]
payoffs = np.array(payoffs_list).reshape((num_states, num_states))
except ValueError:
raise ValueError(
"There should be {} (states * actions) elements in payoff. Found {} elements"
.format(num_states * num_states, len(payoffs_list))) from None
env_configs = {
"num_states": num_states,
"num_messages": num_messages,
"payoffs": payoffs_str
}
env = rl_environment.Environment(game, **env_configs)
state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
replay_buffer_capacity = FLAGS.replay_buffer_capacity
# Results to store
num_runs = FLAGS.num_runs
training_episodes = FLAGS.num_episodes
log_interval = FLAGS.log_interval
rewards = np.zeros((num_runs, training_episodes // log_interval))
opts = np.zeros((num_runs, training_episodes // log_interval))
converge_point = np.zeros((num_states, num_states))
percent_opt = 0
# Repeat the experiment num_runs times
for i in range(num_runs):
with tf.Session() as sess:
# pylint: disable=g-complex-comprehension
agents = [
dqn.DQN(
sess,
player_id=idx,
state_representation_size=state_size,
num_actions=num_actions,
learning_rate=FLAGS.step_size,
replay_buffer_capacity=replay_buffer_capacity,
epsilon_start=FLAGS.eps_init,
epsilon_end=FLAGS.eps_final,
epsilon_decay_duration=FLAGS.eps_decay_steps * 2)
for idx in range(num_players)
]
# 1. Train the agents
for cur_episode in range(training_episodes):
time_step = env.reset()
# Find cur_state for logging. See lewis_signaling.cc for info_state
# details
cur_state = time_step.observations["info_state"][0][3:].index(1)
while not time_step.last():
player_id = time_step.observations["current_player"]
agent_output = agents[player_id].step(time_step)
time_step = env.step([agent_output.action])
# Episode is over, step all agents with final info state.
for agent in agents:
agent.step(time_step)
# Store rewards
reward = time_step.rewards[0]
max_reward = payoffs[cur_state].max()
cur_idx = (i, cur_episode // log_interval)
rewards[cur_idx] += reward / log_interval
opts[cur_idx] += np.isclose(reward, max_reward) / log_interval
base_info_state0 = [1.0, 0.0, 0.0] + [0.0] * num_states
base_info_state1 = [0.0, 1.0, 0.0] + [0.0] * num_states
for s in range(num_states):
info_state0 = copy.deepcopy(base_info_state0)
info_state0[3 + s] = 1.0
# pylint: disable=protected-access
m, _ = agents[0]._epsilon_greedy(info_state0, np.arange(num_messages),
0)
info_state1 = copy.deepcopy(base_info_state1)
info_state1[3 + m] = 1.0
a, _ = agents[1]._epsilon_greedy(info_state1, np.arange(num_states), 0)
converge_point[s, a] += 1
best_act = payoffs[s].argmax()
percent_opt += int(a == best_act) / num_runs / num_states
if FLAGS.plot:
# pylint: disable=g-import-not-at-top
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy import stats
params = {
"font.size": 13,
"axes.labelsize": 13,
"xtick.labelsize": 13,
"ytick.labelsize": 13,
}
mpl.rcParams.update(params)
def init_fig():
fig, ax = plt.subplots(1, 1)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
return fig, ax
def plot_scalars(scalars,
repetition_axis=0,
scalar_labels=None,
title=None,
ax_labels=None):
"""Plots scalar on ax by filling 1 standard error.
Args:
scalars: List of scalars to plot (mean taken over repetition
axis)
repetition_axis: Axis to take the mean over
scalar_labels: Labels for the scalars (for legend)
title: Figure title
ax_labels: Labels for x and y axis (list of 2 strings)
"""
if not all([len(s.shape) == 2 for s in scalars]):
raise ValueError("Only 2D arrays supported for plotting")
if scalar_labels is None:
scalar_labels = [None] * len(scalars)
if len(scalars) != len(scalar_labels):
raise ValueError(
"Wrong number of scalar labels, expected {} but received {}".format(
len(scalars), len(scalar_labels)))
_, plot_axis = init_fig()
for i, scalar in enumerate(scalars):
xs = np.arange(scalar.shape[1 - repetition_axis]) * FLAGS.log_interval
mean = scalar.mean(axis=repetition_axis)
sem = stats.sem(scalar, axis=repetition_axis)
plot_axis.plot(xs, mean, label=scalar_labels[i])
plot_axis.fill_between(xs, mean - sem, mean + sem, alpha=0.5)
if title is not None:
plot_axis.set_title(title)
if ax_labels is not None:
plot_axis.set_xlabel(ax_labels[0])
plot_axis.set_ylabel(ax_labels[1])
def plot_confusion_matrix(cm, cmap=plt.cm.Blues, title=None):
"""Plot the confusion matrix.
Args:
cm (np.ndarray): Confusion matrix to plot
cmap: Color map to be used in matplotlib's imshow
title: Figure title
Returns:
Figure and axis on which the confusion matrix is plotted.
"""
fig, ax = plt.subplots()
ax.imshow(cm, interpolation="nearest", cmap=cmap)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlabel("Receiver's action", fontsize=14)
ax.set_ylabel("Sender's state", fontsize=14)
# Loop over data dimensions and create text annotations.
fmt = "d"
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(
j,
i,
format(cm[i, j], fmt),
ha="center",
va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
if title is not None:
ax.set_title(title)
return fig, ax
plot_scalars([rewards],
title="Reward graph (DQN)",
ax_labels=["Episodes", "Reward per episode"])
plot_scalars([opts],
title="Percentage of optimal actions (DQN)",
ax_labels=["Episodes", "% optimal actions"])
plot_confusion_matrix(
converge_point.astype(int), title="Final policy (DQN)")
plt.show()
return percent_opt
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/lewis_signaling_dqn.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example of use of PPO.
Note: code adapted (with permission) from
https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/ppo.py and
https://github.com/vwxyzjn/ppo-implementation-details/blob/main/ppo_atari.py
"""
# pylint: disable=g-importing-member
import collections
from datetime import datetime
import logging
import os
import random
import sys
import time
from absl import app
from absl import flags
import numpy as np
import pandas as pd
import torch
from torch.utils.tensorboard import SummaryWriter
import pyspiel
from open_spiel.python.pytorch.ppo import PPO
from open_spiel.python.pytorch.ppo import PPOAgent
from open_spiel.python.pytorch.ppo import PPOAtariAgent
from open_spiel.python.rl_environment import ChanceEventSampler
from open_spiel.python.rl_environment import Environment
from open_spiel.python.rl_environment import ObservationType
from open_spiel.python.vector_env import SyncVectorEnv
FLAGS = flags.FLAGS
flags.DEFINE_string("exp_name",
os.path.basename(__file__).rstrip(".py"),
"the name of this experiment")
flags.DEFINE_string("game_name", "atari", "the id of the OpenSpiel game")
flags.DEFINE_float("learning_rate", 2.5e-4,
"the learning rate of the optimizer")
flags.DEFINE_integer("seed", 1, "seed of the experiment")
flags.DEFINE_integer("total_timesteps", 10_000_000,
"total timesteps of the experiments")
flags.DEFINE_integer("eval_every", 10, "evaluate the policy every N updates")
flags.DEFINE_bool("torch_deterministic", True,
"if toggled, `torch.backends.cudnn.deterministic=False`")
flags.DEFINE_bool("cuda", True, "if toggled, cuda will be enabled by default")
# Atari specific arguments
flags.DEFINE_string("gym_id", "BreakoutNoFrameskip-v4",
"the id of the environment")
flags.DEFINE_bool(
"capture_video", False,
"whether to capture videos of the agent performances (check out `videos` folder)"
)
# Algorithm specific arguments
flags.DEFINE_integer("num_envs", 8, "the number of parallel game environments")
flags.DEFINE_integer(
"num_steps", 128,
"the number of steps to run in each environment per policy rollout")
flags.DEFINE_bool(
"anneal_lr", True,
"Toggle learning rate annealing for policy and value networks")
flags.DEFINE_bool("gae", True, "Use GAE for advantage computation")
flags.DEFINE_float("gamma", 0.99, "the discount factor gamma")
flags.DEFINE_float("gae_lambda", 0.95,
"the lambda for the general advantage estimation")
flags.DEFINE_integer("num_minibatches", 4, "the number of mini-batches")
flags.DEFINE_integer("update_epochs", 4, "the K epochs to update the policy")
flags.DEFINE_bool("norm_adv", True, "Toggles advantages normalization")
flags.DEFINE_float("clip_coef", 0.1, "the surrogate clipping coefficient")
flags.DEFINE_bool(
"clip_vloss", True,
"Toggles whether or not to use a clipped loss for the value function, as per the paper"
)
flags.DEFINE_float("ent_coef", 0.01, "coefficient of the entropy")
flags.DEFINE_float("vf_coef", 0.5, "coefficient of the value function")
flags.DEFINE_float("max_grad_norm", 0.5,
"the maximum norm for the gradient clipping")
flags.DEFINE_float("target_kl", None, "the target KL divergence threshold")
def setup_logging():
root = logging.getLogger()
root.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
root.addHandler(handler)
def make_single_atari_env(gym_id,
seed,
idx,
capture_video,
run_name,
use_episodic_life_env=True):
"""Make the single-agent Atari environment."""
def gen_env():
game = pyspiel.load_game(
"atari", {
"gym_id": gym_id,
"seed": seed,
"idx": idx,
"capture_video": capture_video,
"run_name": run_name,
"use_episodic_life_env": use_episodic_life_env
})
return Environment(
game,
chance_event_sampler=ChanceEventSampler(seed=seed),
observation_type=ObservationType.OBSERVATION)
return gen_env
def make_single_env(game_name, seed):
def gen_env():
game = pyspiel.load_game(game_name)
return Environment(game, chance_event_sampler=ChanceEventSampler(seed=seed))
return gen_env
def main(_):
setup_logging()
batch_size = int(FLAGS.num_envs * FLAGS.num_steps)
if FLAGS.game_name == "atari":
# pylint: disable=unused-import
# pylint: disable=g-import-not-at-top
import open_spiel.python.games.atari
current_day = datetime.now().strftime("%d")
current_month_text = datetime.now().strftime("%h")
run_name = f"{FLAGS.game_name}__{FLAGS.exp_name}__"
if FLAGS.game_name == "atari":
run_name += f"{FLAGS.gym_id}__"
run_name += f"{FLAGS.seed}__{current_month_text}__{current_day}__{int(time.time())}"
writer = SummaryWriter(f"runs/{run_name}")
writer.add_text(
"hyperparameters",
"|param|value|\n|-|-|\n%s" %
("\n".join([f"|{key}|{value}|" for key, value in vars(FLAGS).items()])),
)
random.seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
torch.manual_seed(FLAGS.seed)
torch.backends.cudnn.deterministic = FLAGS.torch_deterministic
device = torch.device(
"cuda" if torch.cuda.is_available() and FLAGS.cuda else "cpu")
logging.info("Using device: %s", str(device))
if FLAGS.game_name == "atari":
envs = SyncVectorEnv([
make_single_atari_env(FLAGS.gym_id, FLAGS.seed + i, i, False,
run_name)() for i in range(FLAGS.num_envs)
])
agent_fn = PPOAtariAgent
else:
envs = SyncVectorEnv([
make_single_env(FLAGS.game_name, FLAGS.seed + i)()
for i in range(FLAGS.num_envs)
])
agent_fn = PPOAgent
game = envs.envs[0]._game # pylint: disable=protected-access
info_state_shape = game.observation_tensor_shape()
num_updates = FLAGS.total_timesteps // batch_size
agent = PPO(
input_shape=info_state_shape,
num_actions=game.num_distinct_actions(),
num_players=game.num_players(),
player_id=0,
num_envs=FLAGS.num_envs,
steps_per_batch=FLAGS.num_steps,
num_minibatches=FLAGS.num_minibatches,
update_epochs=FLAGS.update_epochs,
learning_rate=FLAGS.learning_rate,
gae=FLAGS.gae,
gamma=FLAGS.gamma,
gae_lambda=FLAGS.gae_lambda,
normalize_advantages=FLAGS.norm_adv,
clip_coef=FLAGS.clip_coef,
clip_vloss=FLAGS.clip_vloss,
entropy_coef=FLAGS.ent_coef,
value_coef=FLAGS.vf_coef,
max_grad_norm=FLAGS.max_grad_norm,
target_kl=FLAGS.target_kl,
device=device,
writer=writer,
agent_fn=agent_fn,
)
n_reward_window = 50
recent_rewards = collections.deque(maxlen=n_reward_window)
time_step = envs.reset()
for update in range(num_updates):
for _ in range(FLAGS.num_steps):
agent_output = agent.step(time_step)
time_step, reward, done, unreset_time_steps = envs.step(
agent_output, reset_if_done=True)
if FLAGS.game_name == "atari":
# Get around the fact that
# stable_baselines3.common.atari_wrappers.EpisodicLifeEnv will modify
# rewards at the LIFE and not GAME level by only counting
# rewards of finished episodes
for ts in unreset_time_steps:
info = ts.observations.get("info")
if info and "episode" in info:
real_reward = info["episode"]["r"]
writer.add_scalar("charts/player_0_training_returns", real_reward,
agent.total_steps_done)
recent_rewards.append(real_reward)
else:
for ts in unreset_time_steps:
if ts.last():
real_reward = ts.rewards[0]
writer.add_scalar("charts/player_0_training_returns", real_reward,
agent.total_steps_done)
recent_rewards.append(real_reward)
agent.post_step(reward, done)
if FLAGS.anneal_lr:
agent.anneal_learning_rate(update, num_updates)
agent.learn(time_step)
if update % FLAGS.eval_every == 0:
logging.info("-" * 80)
logging.info("Step %s", agent.total_steps_done)
logging.info("Summary of past %i rewards\n %s",
n_reward_window,
pd.Series(recent_rewards).describe())
writer.close()
logging.info("All done. Have a pleasant day :)")
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/ppo_example.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example computing ResponseGraphUCB sample complexity results."""
import itertools
from absl import app
from absl import flags
import matplotlib.pyplot as plt
import numpy as np
from open_spiel.python.algorithms import response_graph_ucb
from open_spiel.python.algorithms import response_graph_ucb_utils as utils
FLAGS = flags.FLAGS
flags.DEFINE_string('game_name', 'soccer', 'Name of the game.')
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
# Parameters to run
deltas = [0.01, 0.025, 0.05, 0.1, 0.25, 0.5]
sampling_methods = [
'uniform-exhaustive', 'uniform', 'valence-weighted', 'count-weighted'
]
conf_methods = [
'ucb-standard', 'ucb-standard-relaxed', 'clopper-pearson-ucb',
'clopper-pearson-ucb-relaxed'
]
methods = list(itertools.product(sampling_methods, conf_methods))
mean_counts = {m: [[] for _ in range(len(deltas))] for m in methods}
edge_errs = {m: [[] for _ in range(len(deltas))] for m in methods}
if FLAGS.game_name == 'bernoulli':
max_total_interactions = 50000
repetitions = 20
elif FLAGS.game_name == 'soccer':
max_total_interactions = 100000
repetitions = 5
elif FLAGS.game_name == 'kuhn_poker_3p':
max_total_interactions = 100000
repetitions = 5
else:
raise ValueError(
'game_name must be "bernoulli", "soccer", or "kuhn_poker_3p".')
for r in range(repetitions):
print('Iteration {}'.format(r + 1))
G = utils.get_game_for_sampler(FLAGS.game_name) # pylint: disable=invalid-name
for m in methods:
print(' Method: {}'.format(m))
for ix, d in enumerate(deltas):
print(' Delta: {}'.format(d))
r_ucb = response_graph_ucb.ResponseGraphUCB(
G,
exploration_strategy=m[0],
confidence_method=m[1],
delta=d,
ucb_eps=1e-1)
results = r_ucb.run(max_total_iterations=max_total_interactions)
# Updated
mean_counts[m][ix].append(results['interactions'])
real_graph = r_ucb.construct_real_graph()
edge_errs[m][ix].append(
utils.digraph_edge_hamming_dist(real_graph, results['graph']))
# Plotting
_, axes = plt.subplots(1, 2, figsize=(10, 4))
max_mean_count = 0
for m in methods:
utils.plot_timeseries(
axes,
id_ax=0,
data=np.asarray(mean_counts[m]).T,
xticks=deltas,
xlabel=r'$\delta$',
ylabel='Interactions required',
label=utils.get_method_tuple_acronym(m),
logx=True,
logy=True,
linespecs=utils.get_method_tuple_linespecs(m))
if np.max(mean_counts[m]) > max_mean_count:
max_mean_count = np.max(mean_counts[m])
plt.xlim(left=np.min(deltas), right=np.max(deltas))
plt.ylim(top=max_mean_count * 1.05)
max_error = 0
for m in methods:
utils.plot_timeseries(
axes,
id_ax=1,
data=np.asarray(edge_errs[m]).T,
xticks=deltas,
xlabel=r'$\delta$',
ylabel='Response graph errors',
label=utils.get_method_tuple_acronym(m),
logx=True,
logy=False,
linespecs=utils.get_method_tuple_linespecs(m))
if np.max(edge_errs[m]) > max_error:
max_error = np.max(edge_errs[m])
plt.xlim(left=np.min(deltas), right=np.max(deltas))
plt.ylim(bottom=0, top=max_error*1.05)
# Shared legend
plt.figure(figsize=(1, 6))
plt.figlegend(
*axes[0].get_legend_handles_labels(),
loc='center right',
bbox_to_anchor=(0.8, 0.5),
bbox_transform=plt.gcf().transFigure,
ncol=1,
handlelength=1.7)
plt.tight_layout()
plt.show()
if __name__ == '__main__':
app.run(main)
| open_spiel-master | open_spiel/python/examples/response_graph_ucb_sample_complexity.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of ResponseGraphUCB run on a 2x2 game."""
from absl import app
import matplotlib.pyplot as plt
import numpy as np
from open_spiel.python.algorithms import response_graph_ucb
from open_spiel.python.algorithms import response_graph_ucb_utils
def get_example_2x2_payoffs():
mean_payoffs = np.random.uniform(-1, 1, size=(2, 2, 2))
mean_payoffs[0, :, :] = np.asarray([[0.5, 0.85], [0.15, 0.5]])
mean_payoffs[1, :, :] = 1 - mean_payoffs[0, :, :]
return mean_payoffs
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
mean_payoffs = get_example_2x2_payoffs()
game = response_graph_ucb_utils.BernoulliGameSampler(
[2, 2], mean_payoffs, payoff_bounds=[-1., 1.])
game.p_max = mean_payoffs
game.means = mean_payoffs
print('Game means:\n', game.means)
exploration_strategy = 'uniform-exhaustive'
confidence_method = 'ucb-standard'
r_ucb = response_graph_ucb.ResponseGraphUCB(
game,
exploration_strategy=exploration_strategy,
confidence_method=confidence_method,
delta=0.1)
results = r_ucb.run()
# Plotting
print('Number of total samples: {}'.format(np.sum(r_ucb.count[0])))
r_ucb.visualise_2x2x2(real_values=game.means, graph=results['graph'])
r_ucb.visualise_count_history(figsize=(5, 3))
plt.gca().xaxis.label.set_fontsize(15)
plt.gca().yaxis.label.set_fontsize(15)
# Compare to ground truth graph
real_graph = r_ucb.construct_real_graph()
r_ucb.plot_graph(real_graph)
plt.show()
if __name__ == '__main__':
app.run(main)
| open_spiel-master | open_spiel/python/examples/response_graph_ucb_2x2_game.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example that computes the Nash bargaining score from negotiations.
This uses the bargaining game that was introduced in:
[1] Lewis et al., Deal or no deal? End-to-end learning of negotiation
dialogues, 2017. https://arxiv.org/abs/1706.05125
[2] David DeVault, Johnathan Mell, and Jonathan Gratch.
2015. Toward Natural Turn-taking in a Virtual Human Negotiation Agent
It computes the empirical Nash bargaining score (NBS) from three sources:
- Human play
- IS-MCTS in self-play
- A theoretical maximum NBS if the players had full information and can see
each other's utilities and then maximize their NBS.
These are all run on a data set extracted from the Lewis et al. '17 data set:
https://github.com/facebookresearch/end-to-end-negotiator/blob/master/src/data/negotiate/data.txt
This example is inspired by the paper (Iwasa and Fujita, "Prediction of Nash
Bargaining Solution in Negotiation Dialogue", 2018).
"""
from absl import app
from absl import flags
import numpy as np
from open_spiel.python import games # pylint: disable=unused-import
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_string("data_file", None, "Lewis et al. '17 data set file")
flags.DEFINE_string("instances_file", "/tmp/instances.txt",
"Filename for the temp instances database file.")
class Instance(object):
"""An instance of a bargaining problem."""
def __init__(self, pool, p1values, p2values):
self.pool = np.array(pool)
self.p1values = np.array(p1values)
self.p2values = np.array(p2values)
assert 5 <= sum(pool) <= 7
assert np.dot(pool, p1values) == 10
assert np.dot(pool, p2values) == 10
def __str__(self):
return (",".join([str(x) for x in self.pool]) + " " +
",".join([str(x) for x in self.p1values]) + " " +
",".join([str(x) for x in self.p2values]))
class Negotiation(object):
"""An instance of a bargaining game."""
def __init__(self, instance, outcome, rewards):
self.instance = instance
self.outcome = outcome
self.rewards = rewards
def __str__(self):
return (str(self.instance) + " " + str(self.outcome) + " " +
str(self.rewards))
def dialogue_matches_prev_line(line1, line2):
"""Checks if the dialogue matches the previous line's."""
parts1 = line1.split(" ")
parts2 = line2.split(" ")
for i in range(6, min(len(parts1), len(parts2))):
if parts1[i] == "YOU:" or parts1[i] == "THEM:":
if parts1[i] == "YOU:" and parts2[i] != "THEM:":
return False
if parts1[i] == "THEM:" and parts2[i] != "YOU:":
return False
elif parts1[i] != parts2[i]:
return False
if parts1[i] == "<selection>":
break
return True
# pylint: disable=line-too-long
def parse_dataset(filename):
"""Parse the Lewis et al. '17 data file."""
# book, hat, ball
# Example format
# 1 0 4 2 1 2 YOU: i would like 4 hats and you can have the rest . <eos> THEM: deal <eos> YOU: <selection> item0=0 item1=4 item2=0 <eos> reward=8 agree 1 4 4 1 1 2
# 1 4 4 1 1 2 THEM: i would like 4 hats and you can have the rest . <eos> YOU: deal <eos> THEM: <selection> item0=1 item1=0 item2=1 <eos> reward=6 agree 1 0 4 2 1 2
# 1 6 3 0 2 2 YOU: you can have all the hats if i get the book and basketballs . <eos> THEM: <selection> item0=1 item1=3 item2=2 <eos> reward=10 disagree 1 2 3 2 2 1
# 1 10 3 0 1 0 YOU: hi i would like the book and ball and you can have the hats <eos> THEM: i can give you either the book or the ball <eos> YOU: ill take the book <eos> THEM: ok i will take the hats and ball <eos> YOU: deal <eos> THEM: <selection> item0=1 item1=0 item2=0 <eos> reward=10 agree 1 2 3 2 1 2
# 1 2 3 2 1 2 THEM: hi i would like the book and ball and you can have the hats <eos> YOU: i can give you either the book or the ball <eos> THEM: ill take the book <eos> YOU: ok i will take the hats and ball <eos> THEM: deal <eos> YOU: <selection> item0=0 item1=3 item2=1 <eos> reward=8 agree 1 10 3 0 1 0
contents = pyspiel.read_contents_from_file(filename, "r")
lines = contents.split("\n")
cur_nego = None
negotiations = []
instances = []
for line_no in range(len(lines)):
line = lines[line_no]
if line:
parts = line.split(" ")
# parse the line to add a new negotiation
pool = [int(parts[0]), int(parts[2]), int(parts[4])]
my_values = [int(parts[1]), int(parts[3]), int(parts[5])]
pool2 = [int(parts[-6]), int(parts[-4]), int(parts[-2])]
other_values = [int(parts[-5]), int(parts[-3]), int(parts[-1])]
assert pool == pool2
rewards = [0, 0]
add_nego = False
outcome_str = parts[-7] # this will be "agree" or "disagree"
if parts[6] == "YOU:":
player_id = 0
instance = Instance(pool, my_values, other_values)
elif parts[6] == "THEM:":
player_id = 1
instance = Instance(pool, other_values, my_values)
else:
assert False, parts[6]
outcome = False
my_reward = 0
instances.append(instance)
if "disconnect" in line:
continue
# sometimes there is a "no agreement" in the rewards section
if (outcome_str == "disagree" or
(parts[-9] + " " + parts[-8]) == "reward=no agreement" or
parts[-8] == "reward=disconnect"):
# do not parse the reward, but must still parse the next line
add_nego = False
elif outcome_str == "agree":
outcome = True
reward_parts = parts[-8].split("=")
assert len(reward_parts) == 2, f"reward parts str: {parts[-8]}"
assert reward_parts[0] == "reward"
my_reward = int(reward_parts[1])
else:
assert False, f"Bad outcome: {outcome_str}"
if cur_nego is None:
rewards[player_id] = my_reward
if player_id == 0:
cur_nego = Negotiation(instance, outcome, rewards)
else:
cur_nego = Negotiation(instance, outcome, rewards)
else:
# There are some in the data set that are incomplete (i.e. are missing the second perspective).
# We should not count these.
if dialogue_matches_prev_line(line, lines[line_no - 1]):
assert list(cur_nego.instance.pool) == pool
if player_id == 1:
assert list(cur_nego.instance.p2values) == my_values
assert list(cur_nego.instance.p1values) == other_values
elif player_id == 0:
assert list(cur_nego.instance.p1values) == my_values
assert list(cur_nego.instance.p2values) == other_values
cur_nego.rewards[player_id] = my_reward
add_nego = True
else:
# not matching, treat as new negotiation
rewards[player_id] = my_reward
if player_id == 0:
cur_nego = Negotiation(instance, outcome, rewards)
else:
cur_nego = Negotiation(instance, outcome, rewards)
add_nego = False
if add_nego or outcome_str == "disagree":
negotiations.append(cur_nego)
print(str(cur_nego))
print(len(negotiations))
cur_nego = None
if outcome_str != "disagree":
# same instance was added twice, so remove the last one
instances.pop()
return instances, negotiations
def write_instances_file(negotiations, filename):
contents = ""
for nego in negotiations:
contents += str(nego.instance) + "\n"
pyspiel.write_contents_to_file(filename, "w", contents)
def compute_nbs_from_simulations(game, num_games, bots):
"""Compute empirical NBS from simulations."""
avg_returns = np.zeros(game.num_players())
for _ in range(num_games):
state = game.new_initial_state()
while not state.is_terminal():
if state.is_chance_node():
# Chance node: sample an outcome
outcomes = state.chance_outcomes()
action_list, prob_list = zip(*outcomes)
action = np.random.choice(action_list, p=prob_list)
state.apply_action(action)
else:
player = state.current_player()
action = bots[player].step(state)
state.apply_action(action)
returns = np.asarray(state.returns())
avg_returns += returns
avg_returns /= num_games
return np.prod(avg_returns)
class MaxBot(object):
"""Finds the single (deterministic) trade offer that maximizes the NBS."""
def __init__(self):
pass
def step(self, state):
"""Returns the NBS-maximizing action.
If i'm player 0, then search over all possible moves, assume player 2
takes the agree action, and choose the action that maximizes the NBS
Player 1 just always agrees.
Args:
state: the OpenSpiel state to act from.
"""
player = state.current_player()
if player == 1:
return state.agree_action()
max_nbs = -1
max_action = -1
for action in state.legal_actions():
state_clone = state.clone()
state_clone.apply_action(action)
state_clone.apply_action(state.agree_action())
returns = state_clone.returns()
nbs = np.prod(returns)
if nbs > max_nbs:
max_nbs = nbs
max_action = action
assert max_action >= 0
return max_action
def main(_):
assert FLAGS.data_file is not None
_, negotiations = parse_dataset(FLAGS.data_file)
print(f"Writing instances database: {FLAGS.instances_file}")
write_instances_file(negotiations, FLAGS.instances_file)
# Human averages + NBS
human_rewards = np.zeros(2, dtype=np.float64)
avg_human_nbs = 0
for neg in negotiations:
human_rewards += neg.rewards
human_rewards /= len(negotiations)
avg_human_nbs += np.prod(human_rewards)
print(f"Average human rewards: {human_rewards}")
print(f"Average human NBS: {avg_human_nbs}")
game = pyspiel.load_game("bargaining",
{"instances_file": FLAGS.instances_file})
# Max bot
bots = [MaxBot(), MaxBot()]
avg_max_nbs = compute_nbs_from_simulations(game, 6796, bots)
print(f"Average max NBS: {avg_max_nbs}")
# Uniform random NBS
bots = [
pyspiel.make_uniform_random_bot(0, np.random.randint(0, 1000000)),
pyspiel.make_uniform_random_bot(1, np.random.randint(0, 1000000)),
]
avg_uniform_nbs = compute_nbs_from_simulations(game, 6796, bots)
print(f"Average uniform NBS: {avg_uniform_nbs}")
# IS-MCTS NBS
evaluator = pyspiel.RandomRolloutEvaluator(1, np.random.randint(0, 1000000))
bots = [
pyspiel.ISMCTSBot(
np.random.randint(0, 1000000), evaluator, 10.0, 1000, -1,
pyspiel.ISMCTSFinalPolicyType.MAX_VISIT_COUNT, False, False),
pyspiel.ISMCTSBot(
np.random.randint(0, 1000000), evaluator, 10.0, 1000, -1,
pyspiel.ISMCTSFinalPolicyType.MAX_VISIT_COUNT, False, False)
]
avg_ismcts_nbs = compute_nbs_from_simulations(game, 6796, bots)
print(f"Average IS-MCTS NBS: {avg_ismcts_nbs}")
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/nego_nbs_example.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example use of the RCFR algorithm on Kuhn Poker."""
from absl import app
from absl import flags
import tensorflow.compat.v1 as tf
from open_spiel.python.algorithms import rcfr
import pyspiel
tf.enable_eager_execution()
FLAGS = flags.FLAGS
flags.DEFINE_integer("iterations", 100, "Number of iterations")
flags.DEFINE_string("game", "kuhn_poker", "Name of the game")
flags.DEFINE_integer("players", 2, "Number of players")
flags.DEFINE_integer("print_freq", 10, "How often to print the exploitability")
flags.DEFINE_boolean("bootstrap", False,
"Whether or not to use bootstrap targets")
flags.DEFINE_boolean("truncate_negative", False,
"Whether or not to truncate negative targets to zero")
flags.DEFINE_integer(
"buffer_size", -1,
"A reservoir buffer size. A non-positive size implies an effectively "
"infinite buffer.")
flags.DEFINE_integer("num_hidden_layers", 1,
"The number of hidden layers in the regret model.")
flags.DEFINE_integer("num_hidden_units", 13,
"The number of hidden layers in the regret model.")
flags.DEFINE_integer(
"num_hidden_factors", 8,
"The number of factors in each hidden layer in the regret model.")
flags.DEFINE_boolean(
"use_skip_connections", True,
"Whether or not to use skip connections in the regret model.")
flags.DEFINE_integer(
"num_epochs", 200,
"The number of epochs to run between each iterations to update the regret "
"models.")
flags.DEFINE_integer("batch_size", 100, "The regret model training batch size.")
flags.DEFINE_float("step_size", 0.01, "The ADAM (AMSGrad) optimizer step size.")
def main(_):
game = pyspiel.load_game(FLAGS.game, {"players": FLAGS.players})
models = []
for _ in range(game.num_players()):
models.append(
rcfr.DeepRcfrModel(
game,
num_hidden_layers=FLAGS.num_hidden_layers,
num_hidden_units=FLAGS.num_hidden_units,
num_hidden_factors=FLAGS.num_hidden_factors,
use_skip_connections=FLAGS.use_skip_connections))
if FLAGS.buffer_size > 0:
solver = rcfr.ReservoirRcfrSolver(
game,
models,
FLAGS.buffer_size,
truncate_negative=FLAGS.truncate_negative)
else:
solver = rcfr.RcfrSolver(
game,
models,
truncate_negative=FLAGS.truncate_negative,
bootstrap=FLAGS.bootstrap)
def _train_fn(model, data):
"""Train `model` on `data`."""
data = data.shuffle(FLAGS.batch_size * 10)
data = data.batch(FLAGS.batch_size)
data = data.repeat(FLAGS.num_epochs)
optimizer = tf.keras.optimizers.Adam(lr=FLAGS.step_size, amsgrad=True)
@tf.function
def _train():
for x, y in data:
optimizer.minimize(
lambda: tf.losses.huber_loss(y, model(x), delta=0.01), # pylint: disable=cell-var-from-loop
model.trainable_variables)
_train()
# End of _train_fn
for i in range(FLAGS.iterations):
solver.evaluate_and_update_policy(_train_fn)
if i % FLAGS.print_freq == 0:
conv = pyspiel.exploitability(game, solver.average_policy())
print("Iteration {} exploitability {}".format(i, conv))
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/rcfr_example.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example that trains two agents using either LOLA or LOLA-DiCE.
An example that trains using LOLA (Foerster et al., 2017) or LOLA-DiCE
(Foerster et al., 2018) on iterated matrix games. Hyperparameters are
taken from the paper and https://github.com/alexis-jacq/LOLA_DiCE.
"""
import itertools
import os
import typing
from typing import List
from typing import Tuple
import warnings
from absl import app
from absl import flags
import distrax
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import wandb
from open_spiel.python.environments.iterated_matrix_game import IteratedMatchingPennies
from open_spiel.python.environments.iterated_matrix_game import IteratedPrisonersDilemma
from open_spiel.python.jax.opponent_shaping import OpponentShapingAgent
from open_spiel.python.rl_environment import Environment
from open_spiel.python.rl_environment import TimeStep
warnings.simplefilter('ignore', FutureWarning)
FLAGS = flags.FLAGS
flags.DEFINE_string('exp_name', 'dice_1step_pytorchparams', 'Experiment name.')
flags.DEFINE_integer('seed', 42, 'Random seed.')
flags.DEFINE_string('game', 'ipd', 'Name of the game.')
flags.DEFINE_integer('epochs', 200, 'Number of training iterations.')
flags.DEFINE_integer('batch_size', 1024, 'Number of episodes in a batch.')
flags.DEFINE_integer(
'critic_mini_batches', 1, 'Number of minibatches for critic.'
)
flags.DEFINE_integer('game_iterations', 150, 'Number of iterated plays.')
flags.DEFINE_float('policy_lr', 0.2, 'Policy learning rate.')
flags.DEFINE_float('opp_policy_lr', 0.3, 'Policy learning rate.')
flags.DEFINE_float('critic_lr', 0.1, 'Critic learning rate.')
flags.DEFINE_string('correction_type', 'lola', 'Either "lola", "dice" or None.')
flags.DEFINE_integer(
'n_lookaheads', 2, 'Number of lookaheads for LOLA correction.'
)
flags.DEFINE_float(
'correction_max_grad_norm',
None,
'Maximum gradient norm of LOLA correction.',
)
flags.DEFINE_float('discount', 0.96, 'Discount factor.')
flags.DEFINE_integer(
'policy_update_interval',
1,
'Number of critic updates per before policy is updated.',
)
flags.DEFINE_integer('eval_batch_size', 1024, 'Random seed.')
flags.DEFINE_bool(
'use_jit', False, 'If true, JAX jit compilation will be enabled.'
)
flags.DEFINE_bool(
'use_opponent_modelling',
True,
'If false, ground truth opponent weights are used.',
)
flags.DEFINE_integer(
'opp_policy_mini_batches', 8, 'Number of minibatches for opponent policy.'
)
flags.DEFINE_float(
'opponent_model_learning_rate', 0.3, 'Learning rate for opponent model.'
)
flags.DEFINE_bool('debug', False, 'If true, debug mode is enabled.')
def get_action_probs(
agent: OpponentShapingAgent, game: str
) -> List[typing.Dict[str, typing.Any]]:
"""Returns the probability of cooperation and a string repr for each state.
Args:
agent: The agent.
game: The name of the game.
Returns:
A list of dictionaries, each containing the probability of cooperation
and a string representation
"""
actions = ['C', 'D'] if game == 'ipd' else ['H', 'T']
states = ['s0'] + [''.join(s) for s in itertools.product(actions, repeat=2)]
params = agent.train_state.policy_params[agent.player_id]
action_probs = []
for i, state_str in enumerate(states):
state = np.eye(len(states))[i]
prob = agent.policy_network.apply(params, state).prob(0)
action = actions[0]
action_probs.append(
{'prob': prob.item(), 'name': f'P({action}|{state_str})'}
)
return action_probs
def log_epoch_data(epoch: int, agents: List[OpponentShapingAgent], eval_batch):
"""Logs data to wandb and prints it to the console.
Args:
epoch: The current epoch.
agents: A list of agents.
eval_batch: A batch of episodes.
"""
logs = {}
for agent in agents:
avg_step_reward = np.mean(
[ts.rewards[agent.player_id] for ts in eval_batch]
)
probs = get_action_probs(agent, game=FLAGS.game)
for info in probs:
logs[f'agent_{agent.player_id}/{info["name"]}'] = info['prob']
probs = ', '.join([f'{info["name"]}: {info["prob"]:.2f}' for info in probs])
metrics = agent.metrics()
logs.update({
f'agent_{agent.player_id}/avg_step_reward': avg_step_reward,
**{
f'agent_{agent.player_id}/{k}': v.item() for k, v in metrics.items()
},
})
print(
f'[epoch {epoch}] Agent {agent.player_id}: {avg_step_reward:.2f} |'
f' {probs}'
)
wandb.log(logs)
def collect_batch(
env: Environment, agents: List[OpponentShapingAgent], eval_mode: bool
) -> List[TimeStep]:
"""Collects one episode.
Args:
env: The environment.
agents: A list of opponent shaping agents.
eval_mode: If true, the agents will be run in evaluation mode.
Returns:
A list of time steps.
"""
episode = []
time_step = env.reset()
episode.append(time_step)
while not time_step.last():
actions = []
for agent in agents:
action, _ = agent.step(time_step, is_evaluation=eval_mode)
if action is not None:
action = action.squeeze()
actions.append(action)
time_step = env.step(np.stack(actions, axis=1))
time_step.observations['actions'] = actions
episode.append(time_step)
for agent in agents:
agent.step(time_step, is_evaluation=eval_mode)
return episode
def make_agent(
key: jax.random.PRNGKey,
player_id: int,
env: Environment,
networks: Tuple[hk.Transformed, hk.Transformed],
) -> OpponentShapingAgent:
"""Creates an opponent shaping agent.
Args:
key: A random seed key.
player_id: The id of the player.
env: The environment.
networks: A tuple of policy and critic networks transformed by
hk.transform.
Returns:
An opponent shaping agent instance.
"""
policy_network, critic_network = networks
return OpponentShapingAgent(
player_id=player_id,
opponent_ids=[1 - player_id],
seed=key,
info_state_size=env.observation_spec()['info_state'][player_id],
num_actions=env.action_spec()['num_actions'][player_id],
policy=policy_network,
critic=critic_network,
batch_size=FLAGS.batch_size,
num_critic_mini_batches=FLAGS.critic_mini_batches,
pi_learning_rate=FLAGS.policy_lr,
opp_policy_learning_rate=FLAGS.opp_policy_lr,
num_opponent_updates=FLAGS.opp_policy_mini_batches,
critic_learning_rate=FLAGS.critic_lr,
opponent_model_learning_rate=FLAGS.opponent_model_learning_rate,
policy_update_interval=FLAGS.policy_update_interval,
discount=FLAGS.discount,
critic_discount=0, # Predict the imm. reward (for iterated matrix games)
correction_type=FLAGS.correction_type,
clip_grad_norm=FLAGS.correction_max_grad_norm,
use_jit=FLAGS.use_jit,
n_lookaheads=FLAGS.n_lookaheads,
env=env,
)
def make_agent_networks(
num_states: int, num_actions: int
) -> Tuple[hk.Transformed, hk.Transformed]:
"""Creates action weights for each state-action pair and values for each state.
Args:
num_states: The number of distinct states.
num_actions: The number of distinct actions.
Returns:
A tuple of policy and critic networks transformed by hk.transform.
"""
def policy(obs):
theta = hk.get_parameter(
'theta',
init=hk.initializers.Constant(0),
shape=(num_states, num_actions),
)
logits = jnp.select(obs, theta)
logits = jnp.nan_to_num(logits)
return distrax.Categorical(logits=logits)
def value_fn(obs):
w = hk.get_parameter(
'w', [num_states], init=jnp.zeros
) # @pylint: disable=invalid-name
return w[jnp.argmax(obs, axis=-1)].reshape(*obs.shape[:-1], 1)
return hk.without_apply_rng(hk.transform(policy)), hk.without_apply_rng(
hk.transform(value_fn)
)
def make_env(game: str, iterations: int, batch_size: int) -> Environment:
"""Creates an environment.
The environment is either iterated prisoners dilemma or iterated matching
pennies.
Args:
game: The game to play. Either 'ipd' or 'imp'.
iterations: The number of iterations to play.
batch_size: The batch size.
Returns:
An environment instance.
"""
if game == 'ipd':
env = IteratedPrisonersDilemma(iterations=iterations, batch_size=batch_size)
elif game == 'imp':
env = IteratedMatchingPennies(iterations=iterations, batch_size=batch_size)
else:
raise ValueError(f'Unknown game: {game}')
return env
def setup_agents(
env: Environment, rng: hk.PRNGSequence
) -> List[OpponentShapingAgent]:
"""Creates an opponent shaping agent for each player in the environment.
Args:
env: The environment.
rng: A random seed key.
Returns:
A list of opponent shaping agents.
"""
agents = []
num_actions = env.action_spec()['num_actions']
info_state_shape = env.observation_spec()['info_state']
for player_id in range(env.num_players):
networks = make_agent_networks(
num_states=info_state_shape[player_id][0],
num_actions=num_actions[player_id],
)
agent = make_agent(
key=next(rng), player_id=player_id, env=env, networks=networks
)
agents.append(agent)
return agents
def update_weights(agents: List[OpponentShapingAgent]):
"""Updates the weights of the opponent models.
Args:
agents: A list of opponent shaping agents.
Returns:
None
"""
agent: OpponentShapingAgent
for agent in agents:
for opp in [a for a in agents if a.player_id != agent.player_id]:
agent.update_params(state=opp.train_state, player_id=opp.player_id)
def main(_):
"""Main function. Runs the experiment."""
if FLAGS.exp_name is None:
FLAGS.exp_name = f'{FLAGS.game}_{FLAGS.seed}'
if not FLAGS.debug:
wandb.login(key=os.environ.get('WANDB_API_KEY', None))
wandb.init(
project='open-spiel-opponent-modelling',
group=FLAGS.exp_name,
config={
'game': FLAGS.game,
'seed': FLAGS.seed,
'epochs': FLAGS.epochs,
'batch_size': FLAGS.batch_size,
'critic_mini_batches': FLAGS.critic_mini_batches,
'game_iterations': FLAGS.game_iterations,
'policy_lr': FLAGS.policy_lr,
'opp_policy_lr': FLAGS.opp_policy_lr,
'critic_lr': FLAGS.critic_lr,
'correction_type': FLAGS.correction_type,
'n_lookaheads': FLAGS.n_lookaheads,
'correction_max_grad_norm': FLAGS.correction_max_grad_norm,
'discount': FLAGS.discount,
'policy_update_interval': FLAGS.policy_update_interval,
'use_opponent_modelling': FLAGS.use_opponent_modelling,
'opp_policy_mini_batches': FLAGS.opp_policy_mini_batches,
'opponent_model_learning_rate': FLAGS.opponent_model_learning_rate,
},
mode='disabled' if FLAGS.debug else 'online',
)
rng = hk.PRNGSequence(key_or_seed=FLAGS.seed)
env = make_env(
iterations=FLAGS.game_iterations,
batch_size=FLAGS.batch_size,
game=FLAGS.game,
)
agents = setup_agents(env=env, rng=rng)
if not FLAGS.use_opponent_modelling:
update_weights(agents)
batch = collect_batch(env=env, agents=agents, eval_mode=True)
log_epoch_data(epoch=0, agents=agents, eval_batch=batch)
for epoch in range(1, FLAGS.epochs + 1):
batch = collect_batch(env=env, agents=agents, eval_mode=False)
if not FLAGS.use_opponent_modelling:
update_weights(agents)
log_epoch_data(epoch=epoch, agents=agents, eval_batch=batch)
print('#' * 100)
wandb.finish()
if __name__ == '__main__':
app.run(main)
| open_spiel-master | open_spiel/python/examples/opponent_shaping/lola_iterated_matrix_games_jax.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation."""
from absl import flags
import jax
import jax.numpy as jnp
import numpy as np
FLAGS = flags.FLAGS
@jax.jit
def compute_best_response_strategy(utility):
actions_count = utility.shape[-1]
opponent_action = jnp.argmin(utility, axis=-1)
opponent_strategy = jax.nn.one_hot(opponent_action, actions_count)
return opponent_strategy
@jax.jit
def compute_values_against_best_response(strategy, payoff):
utility = jnp.matmul(strategy, payoff)
br_strategy = compute_best_response_strategy(utility)
return jnp.matmul(payoff, jnp.transpose(br_strategy))
def evaluate_against_best_response(agent, payoff_batch, steps_count):
"""Evaluation against best response agent.
Args:
agent: Agent model.
payoff_batch: Payoff matrix.
steps_count: Number of steps.
"""
current_policy = agent.initial_policy()
values = jax.vmap(compute_values_against_best_response)(current_policy,
payoff_batch)
for step in range(steps_count):
current_policy = agent.next_policy(values)
values = jax.vmap(compute_values_against_best_response)(current_policy,
payoff_batch)
values = jnp.transpose(values, [0, 1, 2])
value = jnp.matmul(current_policy, values)
for i in range(value.shape[0]):
print(step, np.mean(np.asarray(value[i])))
def compute_regrets(payoff_batch, strategy_x, strategy_y):
values_y = -jnp.matmul(strategy_x, payoff_batch)
values_x = jnp.transpose(
jnp.matmul(payoff_batch, jnp.transpose(strategy_y, [0, 2, 1])), [0, 2, 1])
value_x = jnp.matmul(
jnp.matmul(strategy_x, payoff_batch),
jnp.transpose(strategy_y, [0, 2, 1]))
value_y = -value_x
regrets_x = values_x - value_x
regrets_y = values_y - value_y
return regrets_x, regrets_y
def evaluate_in_selfplay(agent_x, agent_y, payoff_batch, steps_count):
"""Evalute in selfplay.
Args:
agent_x: First agent.
agent_y: Second agent.
payoff_batch: Payoff matrix.
steps_count: Number of steps.
"""
payoff_batch_size = payoff_batch.shape[0]
regret_sum_x = np.zeros(shape=[payoff_batch_size, 1, FLAGS.num_actions])
regret_sum_y = np.zeros(shape=[payoff_batch_size, 1, FLAGS.num_actions])
strategy_x = agent_x.initial_policy()
strategy_y = agent_y.initial_policy()
regrets_x, regrets_y = compute_regrets(payoff_batch, strategy_x, strategy_y)
regret_sum_x += regrets_x
regret_sum_y += regrets_y
for s in range(steps_count):
values_y = -jnp.matmul(strategy_x, payoff_batch)
values_x = jnp.transpose(
jnp.matmul(payoff_batch, jnp.transpose(strategy_y, [0, 2, 1])),
[0, 2, 1])
values_x = jnp.transpose(values_x, [0, 2, 1])
values_y = jnp.transpose(values_y, [0, 2, 1])
strategy_x = agent_x.next_policy(values_x)
strategy_y = agent_y.next_policy(values_y)
regrets_x, regrets_y = compute_regrets(payoff_batch, strategy_x, strategy_y)
regret_sum_x += regrets_x
regret_sum_y += regrets_y
print(
jnp.mean(
jnp.max(
jnp.concatenate([regret_sum_x, regret_sum_y], axis=2),
axis=[1, 2]) / (s + 1)))
| open_spiel-master | open_spiel/python/examples/meta_cfr/matrix_games/evaluation.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RNN meta-regret matching with self-play agents."""
from typing import List
from absl import flags
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
from open_spiel.python.examples.meta_cfr.matrix_games.rnn_model import RNNModel
FLAGS = flags.FLAGS
def _make_network(lstm_hidden_sizes: List[int],
mlp_hidden_sizes: List[int],
output_dim: int) -> hk.RNNCore:
"""set up the network."""
layers = []
for k, hidden_size in enumerate(lstm_hidden_sizes):
layers += [hk.LSTM(hidden_size, name=f'lstm_layer_{k}'), jax.nn.relu]
layers += [hk.nets.MLP(mlp_hidden_sizes + [output_dim], name='mlp')]
return RNNModel(layers)
def _make_forwards(lstm_hidden_sizes: List[int], mlp_hidden_sizes: List[int],
output_dim: int, batch_size: int) -> hk.Transformed:
"""Forward pass."""
def forward_fn(inputs):
rnn = _make_network(lstm_hidden_sizes, mlp_hidden_sizes, output_dim)
initial_state = rnn.initial_state(batch_size=batch_size)
outputs, _ = hk.dynamic_unroll(rnn, inputs, initial_state, time_major=False)
return outputs
network = hk.transform(forward_fn)
return network
def meta_loss(opt_params, net_apply, payoff, steps, rng):
"""Meta loss function."""
regret_sum_x = np.zeros(shape=[FLAGS.batch_size, 1, FLAGS.num_actions])
regret_sum_y = np.zeros(shape=[FLAGS.batch_size, 1, FLAGS.num_actions])
total_loss = 0
@jax.jit
def body_fun(s, total_loss):
nonlocal regret_sum_x
nonlocal regret_sum_y
x = net_apply(opt_params, rng, regret_sum_x / (s + 1))
y = net_apply(opt_params, rng, regret_sum_y / (s + 1))
strategy_x = jax.nn.softmax(x)
strategy_y = jnp.transpose(jax.nn.softmax(y), [0, 2, 1])
values_x = jnp.matmul(payoff, strategy_y)
values_y = -jnp.matmul(strategy_x, payoff)
value_x = jnp.matmul(jnp.matmul(strategy_x, payoff), strategy_y)
value_y = -value_x
curren_regret_x = values_x - value_x
curren_regret_y = values_y - value_y
curren_regret_x = jnp.transpose(curren_regret_x, [0, 2, 1])
regret_sum_x += curren_regret_x
regret_sum_y += curren_regret_y
current_loss = jnp.max(
jax.numpy.concatenate([curren_regret_x, curren_regret_y], axis=2),
axis=[1, 2])
total_loss += current_loss
return total_loss
def fori_loop(lower, steps, body_fun, total_loss):
val = total_loss
for i in range(lower, steps):
val = body_fun(i, total_loss)
return val
total_loss = fori_loop(0, steps, body_fun, total_loss)
return jnp.mean(total_loss)
class OptimizerModel:
"""Optimizer model."""
def __init__(self, learning_rate):
self.learning_rate = learning_rate
self.model = _make_forwards(
lstm_hidden_sizes=[20],
mlp_hidden_sizes=[],
output_dim=3,
batch_size=FLAGS.batch_size)
self.net_apply = self.model.apply
self.net_init = self.model.init
self.opt_update, self.net_params, self.opt_state = None, None, None
def lr_scheduler(self, init_value):
schedule_fn = optax.polynomial_schedule(
init_value=init_value, end_value=0.05, power=1., transition_steps=50)
return schedule_fn
def get_optimizer_model(self):
schedule_fn = self.lr_scheduler(self.learning_rate)
opt_init, self.opt_update = optax.chain(
optax.scale_by_adam(), optax.scale_by_schedule(schedule_fn),
optax.scale(-self.learning_rate))
rng = jax.random.PRNGKey(10)
dummy_input = np.random.normal(
loc=0, scale=10., size=(FLAGS.batch_size, 1, FLAGS.num_actions))
self.net_params = self.net_init(rng, dummy_input)
self.opt_state = opt_init(self.net_params)
class MetaSelfplayAgent:
"""Meta player agent."""
def __init__(self, repeats, training_epochs, data_loader):
self.repeats = repeats
self.training_epochs = training_epochs
self.net_apply = None
self.net_params = None
self.regret_sum = None
self.step = 0
self.data_loader = data_loader
self._rng = hk.PRNGSequence(10)
def train(self):
self.training_optimizer()
self.regret_sum = jnp.zeros(shape=[FLAGS.batch_size, 1, FLAGS.num_actions])
def initial_policy(self):
x = self.net_apply(self.net_params, next(self._rng), self.regret_sum)
self.last_policy = jax.nn.softmax(x)
self.step += 1
return self.last_policy
def next_policy(self, last_values):
value = jnp.matmul(self.last_policy, last_values)
curren_regret = jnp.transpose(last_values, [0, 2, 1]) - value
self.regret_sum += curren_regret
x = self.net_apply(self.net_params, next(self._rng),
self.regret_sum / (self.step + 1))
self.last_policy = jax.nn.softmax(x)
self.step += 1
return self.last_policy
def training_optimizer(self):
"""Train optimizer."""
optimizer = OptimizerModel(0.01)
optimizer.get_optimizer_model()
for _ in range(FLAGS.num_batches):
batch_payoff = next(self.data_loader)
for _ in range(self.repeats):
grads = jax.grad(
meta_loss, has_aux=False)(optimizer.net_params, optimizer.net_apply,
batch_payoff, self.training_epochs,
next(self._rng))
updates, optimizer.opt_state = optimizer.opt_update(
grads, optimizer.opt_state)
optimizer.net_params = optax.apply_updates(optimizer.net_params,
updates)
self.net_apply = optimizer.net_apply
self.net_params = optimizer.net_params
| open_spiel-master | open_spiel/python/examples/meta_cfr/matrix_games/rnn_meta_selfplay_agent.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset for structured payoff matrices."""
from absl import flags
import numpy as np
FLAGS = flags.FLAGS
class Dataset:
"""Dataset class."""
def __init__(self, base_matrix, num_training_batches, minval, maxval):
self._base_matrix = base_matrix
self._num_training_batches = num_training_batches
self._minval, self._maxval = minval, maxval
# to overfit
self._new_matrix = np.copy(self._base_matrix)
def get_training_batch(self):
"""Get training data."""
while True:
if not FLAGS.single_problem:
random_vec = np.random.randint(
low=self._minval, high=self._maxval, size=FLAGS.batch_size)
self._new_matrix = np.copy(self._base_matrix)
for i in range(FLAGS.batch_size):
self._new_matrix[self._new_matrix > 0] += random_vec[i]
self._new_matrix[self._new_matrix < 0] -= random_vec[i]
yield self._new_matrix
def get_eval_batch(self):
"""Get eval dataset."""
if not FLAGS.single_problem:
random_vec = np.random.randint(
low=self._minval, high=self._maxval, size=FLAGS.batch_size)
self._new_matrix = np.copy(self._base_matrix)
for i in range(FLAGS.batch_size):
self._new_matrix[self._new_matrix > 0] += random_vec[i]
self._new_matrix[self._new_matrix < 0] -= random_vec[i]
return self._new_matrix
| open_spiel-master | open_spiel/python/examples/meta_cfr/matrix_games/matrix_dataset.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Meta-regret matching with self-play agents."""
from typing import List
from absl import flags
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
from open_spiel.python.examples.meta_cfr.matrix_games import utils
FLAGS = flags.FLAGS
def opponent_best_response_strategy(utility):
opponent_action = jnp.argmin(utility, axis=-1)
opponent_strategy = jax.nn.one_hot(opponent_action, FLAGS.num_actions)
return opponent_strategy
def _mlp_forwards(mlp_hidden_sizes: List[int]) -> hk.Transformed:
"""Returns a haiku transformation of the MLP model to be used in optimizer.
Args:
mlp_hidden_sizes: List containing size of linear layers.
Returns:
Haiku transformation of the RNN network.
"""
def forward_fn(inputs):
mlp = hk.nets.MLP(mlp_hidden_sizes, activation=jax.nn.relu, name="mlp")
return mlp(inputs)
return hk.transform(forward_fn)
class OptimizerModel:
"""Optimizer model."""
def __init__(self, learning_rate):
self.learning_rate = learning_rate
self.model = _mlp_forwards([64, 16, FLAGS.num_actions])
self._net_init = self.model.init
self.net_apply = self.model.apply
self.opt_update, self.net_params, self.opt_state = None, None, None
def lr_scheduler(self, init_value):
schedule_fn = optax.polynomial_schedule(
init_value=init_value, end_value=0.05, power=1., transition_steps=50)
return schedule_fn
def get_optimizer_model(self):
schedule_fn = self.lr_scheduler(self.learning_rate)
opt_init, self.opt_update = optax.chain(
optax.scale_by_adam(), optax.scale_by_schedule(schedule_fn),
optax.scale(-self.learning_rate))
rng = jax.random.PRNGKey(10)
dummy_input = np.random.normal(
loc=0, scale=10., size=(FLAGS.batch_size, 1, FLAGS.num_actions))
self.net_params = self._net_init(rng, dummy_input)
self.opt_state = opt_init(self.net_params)
class MetaSelfplayAgent:
"""Meta player."""
def __init__(self, repeats, training_epochs, data_loader):
self.repeats = repeats
self.training_epochs = training_epochs
self.net_apply = None
self.net_params = None
self.regret_sum = None
self.step = 0
self.data_loader = data_loader
def train(self):
self.training_optimizer()
self.regret_sum = jnp.zeros(shape=[FLAGS.batch_size, 1, FLAGS.num_actions])
def initial_policy(self):
x = self.net_apply(self.net_params, None, self.regret_sum)
self.last_policy = jax.nn.softmax(x)
self.step += 1
return self.last_policy
def next_policy(self, last_values):
value = jnp.matmul(self.last_policy, last_values)
curren_regret = jnp.transpose(last_values, [0, 2, 1]) - value
self.regret_sum += curren_regret
x = self.net_apply(self.net_params, None, self.regret_sum / (self.step + 1))
self.last_policy = jax.nn.softmax(x)
self.step += 1
return self.last_policy
def training_optimizer(self):
"""Training optimizer."""
optimizer = OptimizerModel(0.01)
optimizer.get_optimizer_model()
for _ in range(FLAGS.num_batches):
batch_payoff = next(self.data_loader)
# for _ in range(self.repeats):
grads = jax.grad(
utils.meta_loss,
has_aux=False)(optimizer.net_params, optimizer.net_apply,
batch_payoff, self.training_epochs)
updates, optimizer.opt_state = optimizer.opt_update(
grads, optimizer.opt_state)
optimizer.net_params = optax.apply_updates(optimizer.net_params, updates)
self.net_apply = optimizer.net_apply
self.net_params = optimizer.net_params
| open_spiel-master | open_spiel/python/examples/meta_cfr/matrix_games/meta_selfplay_agent.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for meta learning for regret minimization."""
from absl import flags
import jax
import jax.numpy as jnp
import numpy as np
FLAGS = flags.FLAGS
def meta_loss(opt_params, net_apply, payoff, steps):
"""Returns the meta learning loss value.
Args:
opt_params: Optimizer parameters.
net_apply: Apply function.
payoff: Payoff matrix.
steps: Number of steps.
Returns:
Accumulated loss value over number of steps.
"""
regret_sum_x = np.zeros(shape=[FLAGS.batch_size, 1, FLAGS.num_actions])
regret_sum_y = np.zeros(shape=[FLAGS.batch_size, 1, FLAGS.num_actions])
total_loss = 0
step = 0
@jax.jit
def scan_body(carry, x):
nonlocal regret_sum_x
nonlocal regret_sum_y
regret_sum_x, regret_sum_y, current_step, total_loss = carry
x = net_apply(opt_params, None, regret_sum_x / (current_step + 1))
y = net_apply(opt_params, None, regret_sum_y / (current_step + 1))
strategy_x = jax.nn.softmax(x)
strategy_y = jnp.transpose(jax.nn.softmax(y), [0, 2, 1])
values_x = jnp.matmul(payoff, strategy_y) # val_x = payoff * st_y
values_y = -jnp.matmul(strategy_x, payoff) # val_y = -1 * payoff * st_x
value_x = jnp.matmul(jnp.matmul(strategy_x, payoff), strategy_y)
value_y = -value_x
curren_regret_x = values_x - value_x
curren_regret_y = values_y - value_y
curren_regret_x = jnp.transpose(curren_regret_x, [0, 2, 1])
regret_sum_x += curren_regret_x
regret_sum_y += curren_regret_y
current_loss = jnp.mean(jnp.max(
jax.numpy.concatenate([curren_regret_x, curren_regret_y], axis=2),
axis=[1, 2]), axis=-1)
total_loss += current_loss
current_step += 1
return (regret_sum_x, regret_sum_y, current_step, total_loss), None
(regret_sum_x, regret_sum_y, step, total_loss), _ = jax.lax.scan(
scan_body,
(regret_sum_x, regret_sum_y, step, total_loss),
None,
length=steps,
)
return total_loss
| open_spiel-master | open_spiel/python/examples/meta_cfr/matrix_games/utils.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RNN model."""
from typing import Callable, List, Union, Optional
import haiku as hk
import jax.numpy as jnp
class RNNModel(hk.RNNCore):
"""RNN model."""
def __init__(self,
layers: List[Union[hk.Module, Callable[[jnp.ndarray],
jnp.ndarray]]],
name: Optional[str] = 'RNN'):
super().__init__(name=name)
self._layers = layers
def __call__(self, inputs, prev_state):
x = inputs
curr_state = [None] * len(prev_state)
for k, layer in enumerate(self._layers):
if isinstance(layer, hk.RNNCore):
x, curr_state[k] = layer(x, prev_state[k])
else:
x = layer(x)
return x, tuple(curr_state)
def initial_state(self, batch_size: Optional[int]):
layerwise_init_state = []
for layer in self._layers:
if isinstance(layer, hk.RNNCore):
layerwise_init_state.append(layer.initial_state(batch_size))
else:
layerwise_init_state.append(None)
return tuple(layerwise_init_state)
| open_spiel-master | open_spiel/python/examples/meta_cfr/matrix_games/rnn_model.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Regret matching."""
from absl import flags
import jax
import jax.numpy as jnp
import numpy as np
FLAGS = flags.FLAGS
class RegretMatchingAgent:
"""Regret matching agent."""
def __init__(self, num_actions, data_loader):
self.num_actions = num_actions
# self.regret_sum = jax.numpy.array(np.zeros(self.num_actions))
self.regret_sum = jax.numpy.array(
np.zeros(shape=[FLAGS.batch_size, 1, self.num_actions]))
self.data_loader = data_loader
def train(self):
pass
def initial_policy(self):
self.last_policy = self.regret_matching_policy(self.regret_sum)
return self.last_policy
def next_policy(self, last_values):
value = jnp.matmul(self.last_policy, last_values)
last_values = jnp.transpose(last_values, [0, 2, 1])
current_regrets = last_values - value
self.regret_sum += current_regrets
self.last_policy = self.regret_matching_policy(self.regret_sum)
return self.last_policy
def regret_matching_policy(self, regret_sum):
"""Regret matching policy."""
strategy = np.copy(regret_sum)
strategy[strategy < 0] = 0
strategy_sum = np.sum(strategy, axis=-1)
for i in range(FLAGS.batch_size):
if strategy_sum[i] > 0:
strategy[i] /= strategy_sum[i]
else:
strategy[i] = np.repeat(1 / self.num_actions, self.num_actions)
return strategy
| open_spiel-master | open_spiel/python/examples/meta_cfr/matrix_games/regret_matching_agent.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main file to train and evaluate meta-regret and regret matching agents."""
from absl import app
from absl import flags
import numpy as np
from open_spiel.python.examples.meta_cfr.matrix_games import evaluation
from open_spiel.python.examples.meta_cfr.matrix_games import matrix_dataset
from open_spiel.python.examples.meta_cfr.matrix_games import meta_selfplay_agent
from open_spiel.python.examples.meta_cfr.matrix_games import regret_matching_agent
FLAGS = flags.FLAGS
flags.DEFINE_integer("batch_size", 1, "Batch size.")
flags.DEFINE_integer("evaluation_steps", 1000, "Number of evaluation steps.")
flags.DEFINE_integer("num_batches", 1,
"Number of batches to train a meta optimizer.")
flags.DEFINE_integer("repeats", 10,
"Number of training each batch in meta learning.")
flags.DEFINE_integer("seed", 10, "random seed.")
flags.DEFINE_integer("min_val", 0,
"minimum value for randomizing a payoff matrix.")
flags.DEFINE_integer("max_val", 10,
"maximum value for randomizing a payoff matrix.")
flags.DEFINE_integer("num_actions", 3, "Number of actions an agent can take.")
flags.DEFINE_bool("single_problem", False,
"If the matrix dataset generates only a single matrix.")
def selfplay_main(argv):
"""Self play."""
del argv
np.random.seed(FLAGS.seed)
# rock-paper-scissor
base_matrix = np.array([[[0, -1, 1], [1, 0, -1], [-1, 1, 0]]] *
FLAGS.batch_size)
dataset = matrix_dataset.Dataset(
base_matrix=base_matrix,
num_training_batches=FLAGS.num_batches,
minval=FLAGS.min_val,
maxval=FLAGS.max_val)
data_loader = dataset.get_training_batch()
eval_payoff_batch = dataset.get_eval_batch()
mr_agent = meta_selfplay_agent.MetaSelfplayAgent(
repeats=FLAGS.repeats,
training_epochs=FLAGS.evaluation_steps,
data_loader=data_loader)
mr_agent.train()
mr_agent2 = meta_selfplay_agent.MetaSelfplayAgent(
repeats=FLAGS.repeats,
training_epochs=FLAGS.evaluation_steps,
data_loader=data_loader)
mr_agent2.train()
rm_agent = regret_matching_agent.RegretMatchingAgent(
num_actions=FLAGS.num_actions, data_loader=data_loader)
rm_agent.train()
rm_agent2 = regret_matching_agent.RegretMatchingAgent(
num_actions=FLAGS.num_actions, data_loader=data_loader)
rm_agent2.train()
print("Regret matching")
evaluation.evaluate_in_selfplay(
agent_x=rm_agent,
agent_y=rm_agent2,
payoff_batch=eval_payoff_batch,
steps_count=FLAGS.evaluation_steps)
print("Meta regret matching")
evaluation.evaluate_in_selfplay(
agent_x=mr_agent,
agent_y=mr_agent2,
payoff_batch=eval_payoff_batch,
steps_count=FLAGS.evaluation_steps)
if __name__ == "__main__":
app.run(selfplay_main)
| open_spiel-master | open_spiel/python/examples/meta_cfr/matrix_games/main.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for meta CFR Algorithm."""
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
import jax
import mock
import numpy as np
import optax
from open_spiel.python.examples.meta_cfr.sequential_games import meta_learning
from open_spiel.python.examples.meta_cfr.sequential_games import models
from open_spiel.python.examples.meta_cfr.sequential_games import openspiel_api
FLAGS = flags.FLAGS
def meta_cfr_agent(game_name='kuhn_poker'):
return meta_learning.MetaCFRRegretAgent(
training_epochs=1,
meta_learner_training_epochs=1,
game_name=game_name,
game_config={'players': 2},
perturbation=False,
seed=0,
model_type='MLP',
best_response=True)
class MetaLearningTest(parameterized.TestCase):
def setup_optimizer(self, num_actions, num_infostates):
if FLAGS.use_infostate_representation:
dummy_input = np.zeros(
shape=[FLAGS.batch_size, 1, num_actions + num_infostates])
else:
dummy_input = np.zeros(shape=[FLAGS.batch_size, 1, num_actions])
def mlp_forward(dummy_input):
mlp = hk.nets.MLP([10, num_actions])
return mlp(dummy_input)
forward = hk.transform(mlp_forward)
rng_seq = jax.random.PRNGKey(10)
params = forward.init(rng_seq, dummy_input)
lr_scheduler_fn = optax.polynomial_schedule(
init_value=0.2, end_value=0.0001, power=1., transition_steps=100)
opt_init, opt_update = optax.chain(
optax.scale_by_adam(), optax.scale_by_schedule(lr_scheduler_fn),
optax.scale(-0.2))
net_apply = forward.apply
opt_state = opt_init(params)
return params, net_apply, opt_state, opt_update
@parameterized.named_parameters(('kuhn_poker_game', 'kuhn_poker'),
('leduc_poker_game', 'leduc_poker'))
def test_worldstate_initialization(self, game_name):
self._world_state = openspiel_api.WorldState(
game_name, {'players': 2}, perturbation=False, random_seed=0)
self._all_actions = self._world_state.get_distinct_actions()
self.assertNotEmpty(self._all_actions,
'Number of distinct actions should be greater that 0.')
@parameterized.named_parameters(('kuhn_poker_game', 'kuhn_poker'),
('leduc_poker_game', 'leduc_poker'))
def test_meta_cfr_agent_initialization(self, game_name):
with mock.patch.object(meta_learning.MetaCFRRegretAgent,
'get_num_infostates') as mock_get_num_infostates:
mock_get_num_infostates.return_value = (mock.MagicMock(),
mock.MagicMock())
meta_learning.MetaCFRRegretAgent(
training_epochs=1,
meta_learner_training_epochs=1,
game_name=game_name,
game_config={'players': 2},
perturbation=False,
seed=0,
model_type='MLP',
best_response=True)
mock_get_num_infostates.assert_called_once_with()
@parameterized.named_parameters(('kuhn_poker_game', 'kuhn_poker'),
('leduc_poker_game', 'leduc_poker'))
def test_meta_learning_training(self, game_name):
agent = meta_learning.MetaCFRRegretAgent(
training_epochs=1,
meta_learner_training_epochs=1,
game_name=game_name,
game_config={'players': 2},
perturbation=False,
seed=0,
model_type=models.ModelType.MLP.value,
best_response=True)
num_infostates, _ = agent.get_num_infostates()
num_actions = len(agent._all_actions)
params, net_apply, opt_state, opt_update = self.setup_optimizer(
num_actions, num_infostates)
agent.training_optimizer()
agent.optimizer.net_apply = net_apply
agent.optimizer.opt_state = opt_state
agent.optimizer.net_params = params
agent.optimizer.opt_update = opt_update
world_state = openspiel_api.WorldState(
game_name, {'players': 2}, perturbation=False, random_seed=0)
best_response_val_player_2 = agent.next_policy(world_state)
self.assertGreater(best_response_val_player_2[-1], 0)
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/examples/meta_cfr/sequential_games/meta_learning_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model definitions for optimizer network."""
import enum
from typing import Any, Callable, List, Optional, Union
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
class ModelType(enum.Enum):
MLP = "MLP"
RNN = "RNN"
def _mlp_forwards(mlp_hidden_sizes: List[int]) -> hk.Transformed:
"""Returns a haiku transformation of the MLP model to be used in optimizer.
Args:
mlp_hidden_sizes: List containing size of linear layers.
Returns:
Haiku transformation of the RNN network.
"""
def forward_fn(inputs):
mlp = hk.nets.MLP(mlp_hidden_sizes, activation=jax.nn.relu, name="mlp")
return mlp(inputs)
return hk.transform(forward_fn)
def _make_rnn_network(lstm_hidden_sizes: List[int],
mlp_hidden_sizes: List[int]) -> hk.RNNCore:
"""Returns the RNN network.
Args:
lstm_hidden_sizes: List containing size of lstm layers.
mlp_hidden_sizes: List containing size of linear layers.
Returns:
Returns an instance of RNN model.
"""
layers = []
for k, hidden_size in enumerate(lstm_hidden_sizes):
layers += [hk.LSTM(hidden_size, name=f"lstm_layer_{k}"), jax.nn.relu]
layers += [hk.nets.MLP(mlp_hidden_sizes, name="mlp")]
return RNNModel(layers)
def _rnn_forwards(lstm_hidden_sizes: List[int], mlp_hidden_sizes: List[int],
batch_size: int) -> hk.Transformed:
"""Returns a haiku transformation of the RNN model to be used in optimizer.
Args:
lstm_hidden_sizes: List containing size of lstm layers.
mlp_hidden_sizes: List containing size of linear layers.
batch_size: Batch size.
Returns:
Haiku transformation of the RNN network.
"""
def forward_fn(inputs):
rnn = _make_rnn_network(lstm_hidden_sizes, mlp_hidden_sizes)
initial_state = rnn.initial_state(batch_size=batch_size)
outputs, _ = hk.dynamic_unroll(rnn, inputs, initial_state, time_major=False)
return outputs
return hk.transform(forward_fn)
class RNNModel(hk.RNNCore):
"""RNN model."""
def __init__(self,
layers: List[Union[hk.Module, Callable[[jnp.ndarray],
jnp.ndarray]]],
name: Optional[str] = None):
super().__init__(name=name)
self._layers = layers
def __call__(self, inputs, prev_state):
x = inputs
curr_state = [None] * len(prev_state)
for k, layer in enumerate(self._layers):
if isinstance(layer, hk.RNNCore):
x, curr_state[k] = layer(x, prev_state[k])
else:
x = layer(x)
return x, tuple(curr_state)
def initial_state(self, batch_size: Optional[int]) -> Any:
layerwise_init_state = []
for layer in self._layers:
if isinstance(layer, hk.RNNCore):
layerwise_init_state.append(layer.initial_state(batch_size))
else:
layerwise_init_state.append(None)
return tuple(layerwise_init_state)
class OptimizerModel:
"""Optimizer model in l2l paradigm to learn update rules of regret minimizers.
Attributes:
mlp_sizes: Size of mlp layers. This is a string, containing sequence of
numbers, each number indicate size of a linear layer.
lstm_sizes: Size of lstm layers. This is a string, containing sequence of
numbers, each number indicate size of an lstm layer.
initial_learning_rate: Initial value of learning rate used in learning
rate scheduler.
batch_size: Batch size.
num_actions: Number of possible actions.
num_infostates: Total number of information states.
model_type: Type of model. For now it can be either MLP or RNN.
use_infostate_representation: Boolean value to indicate if we use
information state information as part of model input or not.
rng: Jax pseudo random number generator.
model: Neural network model we want to optimize.
opt_update: Optax optimizer update function.
net_params: Network parameters.
opt_state: Optax optimizer state.
net_apply: Network apply function.
"""
def __init__(self,
mlp_sizes: str,
lstm_sizes: str,
initial_learning_rate: float,
batch_size: int,
num_actions: int,
num_infostates: int,
model_type: str = "MLP",
use_infostate_representation: bool = True):
self.num_actions = num_actions
self.num_infostates = num_infostates
self.initial_learning_rate = initial_learning_rate
self.batch_size = batch_size
self.use_infostate_representation = use_infostate_representation
self.rng = jax.random.PRNGKey(10)
mlp_sizes_list = [
int(mlp_size.strip()) for mlp_size in mlp_sizes.split(",")
]
mlp_sizes_list.append(self.num_actions)
lstm_sizes_list = [
int(lstm_size.strip()) for lstm_size in lstm_sizes.split(",")
]
if model_type == ModelType.MLP.value:
self.model = _mlp_forwards(mlp_sizes_list)
elif model_type == ModelType.RNN.value:
self.model = _rnn_forwards(lstm_sizes_list, mlp_sizes_list,
self.batch_size)
else:
raise ValueError(
f"{model_type} is not a valid model, model_type should be MLP or RNN."
)
self.net_apply = self.model.apply
self._net_init = self.model.init
self.opt_update, self.net_params, self.opt_state = None, None, None
def lr_scheduler(self, init_value: float) -> optax.Schedule:
schedule_fn = optax.polynomial_schedule(
init_value=init_value, end_value=0.0001, power=1., transition_steps=100)
return schedule_fn
def initialize_optimizer_model(self):
"""Initializes the optax optimizer and neural network model."""
lr_scheduler_fn = self.lr_scheduler(self.initial_learning_rate)
opt_init, self.opt_update = optax.chain(
optax.scale_by_adam(), optax.scale_by_schedule(lr_scheduler_fn),
optax.scale(-self.initial_learning_rate))
input_size = self.num_actions
if self.use_infostate_representation:
input_size += self.num_infostates
dummy_input = np.zeros(shape=[self.batch_size, 1, input_size])
self.net_params = self._net_init(self.rng, dummy_input)
self.opt_state = opt_init(self.net_params)
| open_spiel-master | open_spiel/python/examples/meta_cfr/sequential_games/models.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests counterfactual regret minimization."""
from absl.testing import absltest
from absl.testing import parameterized
from open_spiel.python.examples.meta_cfr.sequential_games import cfr
from open_spiel.python.examples.meta_cfr.sequential_games import game_tree_utils as trees
from open_spiel.python.examples.meta_cfr.sequential_games import openspiel_api
def _uniform_policy(size):
if size > 0:
return [1./size]*size
return []
class CfrTest(parameterized.TestCase):
@parameterized.named_parameters(('kuhn_poker_test', 'kuhn_poker'),
('leduc_poker_test', 'leduc_poker'))
def test_zero_policy_is_uniform(self, game):
config = {'players': 2}
cfr_game_tree = trees.build_game_tree(
openspiel_api.WorldState(
game_name=game, config=config, perturbation=False))
cfr.compute_cfr_values(cfr_game_tree, 1)
infostates_p1 = list(cfr_game_tree.all_infostates_map[1].values())
infostates_p2 = list(cfr_game_tree.all_infostates_map[2].values())
with self.subTest('player_1_initial_policy'):
for i in range(len(infostates_p1)):
self.assertListEqual(
list(infostates_p1[i].policy.values()),
_uniform_policy(len(infostates_p1[i].policy.values())))
with self.subTest('player_2_initial_policy'):
for i in range(len(infostates_p2)):
self.assertListEqual(
list(infostates_p2[i].policy.values()),
_uniform_policy(len(infostates_p2[i].policy.values())))
def test_cfr_leduc_poker(self):
config = {'players': 2}
exploitability_error = 0.2
cfr_game_tree = trees.build_game_tree(
openspiel_api.WorldState(
game_name='leduc_poker', config=config, perturbation=False))
best_response_value_p1, best_response_value_p2 = cfr.compute_cfr_values(
cfr_game_tree, 20)
last_best_response_value_player_1 = best_response_value_p1[-1]
last_best_response_value_player_2 = best_response_value_p2[-1]
exploitability = (last_best_response_value_player_1 +
last_best_response_value_player_2) / 2
# Exploitability values are computed using OpenSpiel cfr
self.assertLessEqual(exploitability, 0.59 + exploitability_error)
def test_cfr_kuhn_poker(self):
config = {'players': 2}
exploitability_error = 0.2
cfr_game_tree = trees.build_game_tree(
openspiel_api.WorldState(
game_name='kuhn_poker', config=config, perturbation=False))
best_response_value_p1, best_response_value_p2 = cfr.compute_cfr_values(
cfr_game_tree, 20)
last_best_response_value_player_1 = best_response_value_p1[-1]
last_best_response_value_player_2 = best_response_value_p2[-1]
exploitability = (last_best_response_value_player_1 +
last_best_response_value_player_2) / 2
# Exploitability values are computed using OpenSpiel cfr
self.assertLessEqual(exploitability, 0.06 + exploitability_error)
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/examples/meta_cfr/sequential_games/cfr_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation of a CFR best response agent given the world state."""
from absl import flags
FLAGS = flags.FLAGS
def CFRBREvaluation(agent, world_state):
return agent.next_policy(world_state)
| open_spiel-master | open_spiel/python/examples/meta_cfr/sequential_games/evaluation.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Meta learning algorithm."""
import os
from typing import Dict, List, Any
from absl import flags
from absl import logging
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
from open_spiel.python.examples.meta_cfr.sequential_games import cfr
from open_spiel.python.examples.meta_cfr.sequential_games import dataset_generator
from open_spiel.python.examples.meta_cfr.sequential_games import game_tree_utils
from open_spiel.python.examples.meta_cfr.sequential_games import models
from open_spiel.python.examples.meta_cfr.sequential_games import openspiel_api
from open_spiel.python.examples.meta_cfr.sequential_games import typing
from open_spiel.python.examples.meta_cfr.sequential_games import utils
FLAGS = flags.FLAGS
flags.DEFINE_integer("batch_size", 250, "Batch size.")
flags.DEFINE_integer("num_batches", 1, "Number of batches.")
flags.DEFINE_integer("meta_learner_training_epochs", 1,
"Number of meta_learner_training_epochs")
flags.DEFINE_integer("num_tasks", 1, "Number tasks to train meta learner.")
flags.DEFINE_integer("random_seed", 2, "Random seed.")
flags.DEFINE_integer("checkpoint_interval", 50,
"Checkpoint every checkpoint_interval.")
flags.DEFINE_string("game", "leduc_poker", "Name of the game")
flags.DEFINE_integer("players", 2, "Number of players")
flags.DEFINE_bool("perturbation", True, "Random perturbation of the game.")
flags.DEFINE_bool(
"use_infostate_representation", True,
"Use infostate representation as extra input to meta network.")
flags.DEFINE_float("init_lr", 0.2, "Initial learning rate")
flags.DEFINE_string("lstm_sizes", "64", "Size of lstm layers.")
flags.DEFINE_string("mlp_sizes", "20, 20", "Size of mlp layers.")
flags.DEFINE_string("model_type", "MLP", "Model type.")
os.environ["XLA_PYTHON_CLIENT_PREALLOCATE"] = "false"
os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "1.5"
def append_counterfactual_values(
infostates: List[typing.InfostateNode],
counterfactual_values: Dict[str, List[List[float]]]):
for infostate in infostates:
counterfactual_values[infostate.infostate_string].append([
infostate.counterfactual_action_values[a]
for a in infostate.get_actions()
])
def compute_next_policy_invariants(
infostates: typing.InfostateMapping, all_actions: List[int],
infostate_map: typing.InfostateMapping
) -> tuple[Dict[str, jnp.ndarray], Dict[str, List[int]]]:
"""Computes information needed to calculate next policy.
This function computes one hot encodings of infostates and returns mappings
from infostate strings to one hot representations of infostates as well as
illegal actions.
Args:
infostates: List of infostate mappings.
all_actions: List of actions.
infostate_map: Mapping from infostate string to infostate.
Returns:
Returns mappings of infostate strings to one hot representation for
infostates and illegal actions
"""
one_hot_representations = {}
illegal_actions = {}
for (infostate_str, infostate) in infostates.items():
if infostate.is_terminal():
continue
legal_actions = infostate.get_actions()
if len(legal_actions) == 1:
infostate.policy[infostate.get_actions()[0]] = 1
continue
infostate_str_one_hot = jax.nn.one_hot(infostate_map[infostate_str],
len(infostates))
one_hot_representations[infostate_str] = infostate_str_one_hot
illegal_actions[infostate_str] = [
i for i, a in enumerate(all_actions) if a not in legal_actions
]
return one_hot_representations, illegal_actions
def compute_next_policy(infostates: typing.InfostateMapping,
net_apply: typing.ApplyFn, net_params: typing.Params,
epoch: int, all_actions: List[int],
one_hot_representations: Dict[str, jnp.ndarray],
illegal_actions: Dict[str,
List[int]], key: hk.PRNGSequence):
"""Computes next step policy from output of the model.
Args:
infostates: List of infostate mappings.
net_apply: Apply function.
net_params: Model params.
epoch: epoch.
all_actions: List of actions.
one_hot_representations: Dictionary from infostate string to infostate.
illegal_actions: Dictionary from infostate string to the list of illegal
actions.
key: Haiku Pseudo random number generator.
"""
infostate_lst = []
input_lst = []
illegal_action_lst = []
batched_net_output = []
for (infostate_str, infostate) in infostates.items():
if infostate.is_terminal():
continue
legal_actions = infostate.get_actions()
if len(legal_actions) == 1:
infostate.policy[infostate.get_actions()[0]] = 1
continue
regret_vec = np.array([
infostate.regret[a] /
(epoch + 1) if a in infostate.get_actions() else 0
for a in all_actions
])
if FLAGS.use_infostate_representation:
one_hot_representation = one_hot_representations[infostate_str]
net_input = jnp.concatenate([regret_vec, one_hot_representation])
else:
net_input = regret_vec
input_lst.append(net_input)
infostate_lst.append(infostate)
illegal_action_lst.append(illegal_actions[infostate_str])
batched_inputs, output_mappings, relevant_illegal_actions = (
utils.get_batched_input(
input_lst, infostate_lst, illegal_action_lst, FLAGS.batch_size
)
)
idx = 0
for _ in range(int(len(batched_inputs) / FLAGS.batch_size)):
batched_input, output_mapping, relevant_illegal_action = batched_inputs[
idx:idx + FLAGS.batch_size], output_mappings[
idx:idx +
FLAGS.batch_size], relevant_illegal_actions[idx:idx +
FLAGS.batch_size]
idx += FLAGS.batch_size
batched_input_jnp = jnp.array(
np.expand_dims(np.array(batched_input), axis=1))
batched_net_output = utils.get_network_output_batched(
net_apply, net_params,
batched_input_jnp,
relevant_illegal_action, key)
for i, infostate in enumerate(output_mapping):
net_output = jnp.squeeze(batched_net_output[i])
for ai, action in enumerate(infostate.get_actions()):
infostate.policy[action] = float(net_output[ai])
def cfr_br_meta_data(
history_tree_node: typing.HistoryNode,
infostate_nodes: List[typing.InfostateNode],
all_infostates_map: List[typing.InfostateMapping], epochs: int,
net_apply: typing.ApplyFn, net_params: typing.Params,
all_actions: List[int], infostate_map: typing.InfostateMapping,
key: hk.PRNGSequence
) -> tuple[Dict[str, jnp.ndarray], Dict[str, jnp.ndarray], List[float]]:
"""Collects counterfactual values for both players and best response for player_2.
Args:
history_tree_node: Game tree HistoryTreeNode which is the root of the game
tree.
infostate_nodes: Infostates.
all_infostates_map: List of mappings from infostate strings to infostates.
epochs: Number of epochs.
net_apply: Apply function.
net_params: Network parameters.
all_actions: List of all actions.
infostate_map: A mapping from infostate strings to infostates.
key: Haiku pseudo random number generator.
Returns:
Returns counterfactual values for player_1, counterfactual values for
player_2 and best response values for player_2.
"""
counterfactual_values_player1 = {
infostate.infostate_string: []
for infostate in list(all_infostates_map[1].values())
}
counterfactual_values_player2 = {
infostate.infostate_string: []
for infostate in list(all_infostates_map[2].values())
}
non_terminal_infostates_map_player1 = utils.filter_terminal_infostates(
all_infostates_map[1]
)
one_hot_representations_player1, illegal_actions_player1 = (
compute_next_policy_invariants(
non_terminal_infostates_map_player1, all_actions, infostate_map
)
)
player_2_last_best_response_values = []
for epoch in range(epochs):
compute_next_policy(non_terminal_infostates_map_player1, net_apply,
net_params, epoch, all_actions,
one_hot_representations_player1,
illegal_actions_player1, key)
cfr.compute_reach_probabilities(history_tree_node, all_infostates_map)
cfr.cumulate_average_policy(list(all_infostates_map[1].values()))
cfr.compute_best_response_policy(infostate_nodes[2])
cfr.compute_reach_probabilities(history_tree_node, all_infostates_map)
cfr.compute_counterfactual_values(infostate_nodes[1])
cfr.update_regrets(list(all_infostates_map[1].values()))
append_counterfactual_values(
list(all_infostates_map[1].values()), counterfactual_values_player1)
cfr.normalize_average_policy(all_infostates_map[1].values())
cfr.compute_reach_probabilities(history_tree_node, all_infostates_map)
player_2_last_best_response_values.append(
float(cfr.compute_best_response_values(infostate_nodes[2]))
)
logging.info(
"Epoch %d: player_2 best response value is %f",
epoch,
player_2_last_best_response_values[-1],
)
return ( # pytype: disable=bad-return-type # jax-ndarray
counterfactual_values_player1,
counterfactual_values_player2,
player_2_last_best_response_values,
)
class MetaCFRRegretAgent:
"""Meta regret minimizer agent.
Attributes:
training_epochs: Number of training epochs.
meta_learner_training_epochs: Number of epochs for meta learner.
game_name: Name of the game.
game_config: Game configuration.
perturbation: Binary variable to specify perturbation.
seed: Random seed.
model_type: Type of NN model for meta learner.
best_response: Binary variable to specify if using best response.
optimizer: Optimizer model.
"""
def __init__(self,
training_epochs,
meta_learner_training_epochs,
game_name,
game_config,
perturbation,
seed,
model_type="MLP",
best_response=True):
self._training_epochs = training_epochs
self._meta_learner_training_epochs = meta_learner_training_epochs
self._game_name = game_name
self._model_type = model_type
self._perturbation = perturbation
self._game_config = game_config
self._best_response = best_response
self._seed = seed
self._rng = hk.PRNGSequence(100)
self._world_state = openspiel_api.WorldState(self._game_name,
self._game_config,
self._perturbation,
self._seed)
self._all_actions = self._world_state.get_distinct_actions()
self._num_infostates, self._infostate_map = self.get_num_infostates()
self._step = 0
def get_num_infostates(self):
"""Returns number of infostates and infostate mapping.
Returns:
Returns sum of number of infostates for both players and a mapping from
infostate string to infostates.
"""
all_infostates_map = [{}, {}, {}]
_, _ = game_tree_utils.build_tree_dfs(
self._world_state, all_infostates_map)
non_terminal_infostates_map_player1 = utils.filter_terminal_infostates(
all_infostates_map[1])
non_terminal_infostates_map_player2 = utils.filter_terminal_infostates(
all_infostates_map[2])
if self._best_response:
infostate_map = {
infostate_str: infostate_node
for (infostate_node, infostate_str
) in enumerate(list(non_terminal_infostates_map_player1.keys()))
}
return len(non_terminal_infostates_map_player1), infostate_map
nont_terminal_infostates_map_both_players = list(
non_terminal_infostates_map_player1.keys()) + list(
non_terminal_infostates_map_player2.keys())
infostate_map = {
infostate_str: infostate_node
for (infostate_node, infostate_str
) in enumerate(nont_terminal_infostates_map_both_players)
}
return len(non_terminal_infostates_map_player1) + len(
non_terminal_infostates_map_player2), infostate_map
def train(self):
self.training_optimizer()
def next_policy(self, world_state: openspiel_api.WorldState):
"""Computes best reponses for the next step of cfr.
Args:
world_state: Current state of the world.
Returns:
Returns best response values for player_2.
"""
all_infostates_map = [{}, {}, {}]
first_history_node, infostate_nodes = game_tree_utils.build_tree_dfs(
world_state, all_infostates_map)
_, _, player_2_best_response_values = cfr_br_meta_data(
history_tree_node=first_history_node,
infostate_nodes=infostate_nodes,
all_infostates_map=all_infostates_map,
epochs=self._meta_learner_training_epochs,
net_apply=self.optimizer.net_apply,
net_params=self.optimizer.net_params,
all_actions=self._all_actions,
infostate_map=self._infostate_map,
key=self._rng)
return player_2_best_response_values
def optimize_infoset(self, cfvalues: Any, infoset: List[typing.InfostateNode],
infostate_map: typing.InfostateMapping,
rng: hk.PRNGSequence):
"""Apply updates to optimizer state.
Args:
cfvalues: Counterfactual values.
infoset: Infostates.
infostate_map: Mapping from infostate string to infostate.
rng: Next random seed.
"""
grads = jax.grad(
utils.meta_loss, has_aux=False)(self.optimizer.net_params, cfvalues,
self.optimizer.net_apply,
self._meta_learner_training_epochs,
len(self._all_actions), infoset,
infostate_map, FLAGS.batch_size,
next(rng),
FLAGS.use_infostate_representation)
updates, self.optimizer.opt_state = self.optimizer.opt_update(
grads, self.optimizer.opt_state)
self.optimizer.net_params = optax.apply_updates(self.optimizer.net_params,
updates)
def training_optimizer(self):
"""Train an optimizer for meta learner."""
self.optimizer = models.OptimizerModel(
mlp_sizes=FLAGS.mlp_sizes,
lstm_sizes=FLAGS.lstm_sizes,
initial_learning_rate=FLAGS.init_lr,
batch_size=FLAGS.batch_size,
num_actions=len(self._all_actions),
num_infostates=self._num_infostates,
model_type=self._model_type,
use_infostate_representation=FLAGS.use_infostate_representation)
self.optimizer.initialize_optimizer_model()
while self._step < FLAGS.num_tasks:
if self._perturbation:
self._seed = np.random.choice(np.array(list(range(100))))
self._world_state = openspiel_api.WorldState(
self._game_name,
self._game_config,
perturbation=self._perturbation,
random_seed=self._seed)
for epoch in range(self._training_epochs):
logging.info("Training epoch %d", epoch)
all_infostates_map = [{}, {}, {}]
first_history_node, infostate_nodes = game_tree_utils.build_tree_dfs(
self._world_state, all_infostates_map)
cfr_values_player1, cfr_values_player2, _ = cfr_br_meta_data(
history_tree_node=first_history_node,
infostate_nodes=infostate_nodes,
all_infostates_map=all_infostates_map,
epochs=self._meta_learner_training_epochs,
net_apply=self.optimizer.net_apply,
net_params=self.optimizer.net_params,
all_actions=self._all_actions,
infostate_map=self._infostate_map,
key=self._rng)
train_dataset = []
cfvalues_per_player = [
cfr_values_player1, cfr_values_player2
]
# for CFRBR we consider player 0.
player_ix = 0
infosets = [
infoset for infoset in all_infostates_map[player_ix + 1].values()
if len(infoset.get_actions()) >= 2
]
for infoset in infosets:
cfvalues = cfvalues_per_player[player_ix][infoset.infostate_string]
train_dataset.append((cfvalues, infoset))
dataset = dataset_generator.Dataset(train_dataset, FLAGS.batch_size) # pytype: disable=wrong-arg-types # jax-ndarray
data_loader = dataset.get_batch()
for _ in range(FLAGS.num_batches):
batch = next(data_loader)
cfvalues, infoset = zip(*batch)
cfvalues = np.array(list(cfvalues), dtype=object)
cfvalues = utils.mask(cfvalues, infoset, len(self._all_actions),
FLAGS.batch_size)
self.optimize_infoset(cfvalues, infoset, self._infostate_map,
self._rng)
logging.info("Game: %d", self._step)
self._step += 1
| open_spiel-master | open_spiel/python/examples/meta_cfr/sequential_games/meta_learning.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OpenSpiel API."""
import random
from typing import Any, List, Text, Tuple, Dict
from open_spiel.python.examples.meta_cfr.sequential_games import world_representation
import pyspiel
class WorldState(world_representation.WorldState):
"""World state representation for openspiel games.
This class implements world_representation class for openspiel games.
Attributes:
game_name: Name of openspiel game we want to initialize.
config: Config containing game parameters to initialize the game.
state: Initial state of an openspeil game.
chance_policy: The policy of the chance node in the game tree.
"""
def __init__(self, game_name: str, config: Dict[str, Any],
perturbation: bool, random_seed: int = 100):
self._perturbation = perturbation
self._history = []
self._random_seed = random_seed
self.game_name = game_name
self.config = config
self._game = pyspiel.load_game(self.game_name, self.config)
if str(self._game.get_type().dynamics) == "Dynamics.SIMULTANEOUS":
self._game = pyspiel.convert_to_turn_based(self._game)
# initial_state
self.state = self._game.new_initial_state()
self.chance_policy = self.get_chance_policy()
random.seed(self._random_seed)
def get_distinct_actions(self) -> List[int]:
"""See base class."""
return list(range(self._game.num_distinct_actions()))
def is_terminal(self) -> bool:
"""See base class."""
return self.state.is_terminal()
def get_actions(self) -> List[Any]:
"""See base class."""
if self.is_terminal():
return [[], [], []]
actions = [[0], [0], [0]]
if self.state.is_chance_node():
legal_actions = [
action for (action, prob) in self.state.chance_outcomes()
]
else:
legal_actions = self.state.legal_actions()
actions[self.state.current_player() + 1] = legal_actions
return actions
def get_infostate_string(self, player: int) -> Text:
"""See base class."""
infostate = self.state.information_state_string(player - 1)
return str(len(self._history)) + "|" + str(infostate)
def apply_actions(self, actions: Tuple[int, int, int]) -> None:
"""See base class."""
self.state.apply_action(actions[self.state.current_player() + 1])
self.chance_policy = self.get_chance_policy()
self._history.append(actions)
def get_utility(self, player: int) -> float:
"""See base class."""
assert self.is_terminal()
return float(self.state.returns()[player - 1])
def get_chance_policy(self) -> Dict[int, float]:
"""See base class."""
if self.is_terminal():
return {}
if not self.state.is_chance_node():
return {0: 1}
chance_policy = {
action: prob for (action, prob) in self.state.chance_outcomes()
}
if self._perturbation:
probs = [random.random() for _ in self.state.chance_outcomes()]
chance_policy = {
action: probs[i] / sum(probs)
for i, (action, prob) in enumerate(self.state.chance_outcomes())
}
return chance_policy
| open_spiel-master | open_spiel/python/examples/meta_cfr/sequential_games/openspiel_api.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Game tree structure for imperfect information games."""
import copy
from typing import Any, Dict, List, Text, Tuple
from open_spiel.python.examples.meta_cfr.sequential_games import cfr
from open_spiel.python.examples.meta_cfr.sequential_games import openspiel_api
class HistoryTreeNode:
"""Tree node to build game tree in cfr and do DFS traverse on game tree.
Attributes:
world_state: Current world state representation.
reach_probs: Reach probability of tree node for each player. We consider
reach probability for chance player, player 1 and player 2.
action_probs: Probability of actions taken by each player. We consider
actions taken by chance player, player 1 and player 2. Keys of this
dictionary are tuples of (action_chance, action_player_1,
action_player_2).
children: A dictionary from a taken action from this node to the
HistoryTreeNode of the child we derive in the game tree by taking an
action.
"""
def __init__(self, world_state: openspiel_api.WorldState):
self.world_state = world_state
self.reach_probs = [1.0, 1.0, 1.0]
self.action_probs = {}
self._value_p1 = 0
self.children = {}
def add_child(self, child_world_state: 'HistoryTreeNode',
actions: Tuple[int, int, int]) -> None:
"""Adds the child world state to dictionary of children of this node."""
self.children[actions] = child_world_state
def get_child(self, actions: Tuple[int, int, int]) -> 'HistoryTreeNode':
"""Returns a child world state that can be derived from an action."""
return self.children[actions]
class InfoState:
"""Information state class.
Attributes:
history_nodes: History of game as players play.
player: Index of current player.
infostate_string: String representation of current informantion state.
world_state: Current game world state.
children: Children nodes of information states. The keys are actions, and
values are dictionary from information state string to information state
node.
counterfactual_reach_prob: Counterfactural values of reach probability for
the current information state.
player_reach_prob: Reach probability of information state for the acting
player.
counterfactual_action_values: Counterfactual values for each action in this
information state. This is a dictionary from action to counterfactual
value of this action in this information state.
counterfactual_value: Counterfactual value of this information state.
regret: Regret of each action for all player's actions in this information
state.
policy: Policy of player in this information state.
average_policy: Average policy for all player's actions in this information
state.
average_policy_weight_sum: Sum of weighted average policy. This is used to
normalize average policy and derive policy in this information state.
"""
def __init__(self, world_state: openspiel_api.WorldState, player: int,
infostate_string: Text):
self.history_nodes = []
self.player = player
self.infostate_string = infostate_string
self.world_state = world_state
self._actions = world_state.get_actions()
self.children = {a: {} for a in self._actions[player]}
self.counterfactual_reach_prob = 0.
self.player_reach_prob = 0.
self.counterfactual_action_values = {}
self.counterfactual_value = 0
self.regret = {a: 0. for a in self._actions[player]}
actions_count = len(self._actions[player])
self.policy = {
a: 1.0 / actions_count for a in world_state.get_actions()[player]
}
self.average_policy = {a: 0. for a in self._actions[player]}
self.average_policy_weight_sum = 0.
def add_history_node(self, history_node: HistoryTreeNode) -> None:
"""Updates history nodes with a given(last) history node."""
self.history_nodes.append(history_node)
def add_child_infostate(self, action: int,
infostate_child: Any) -> None:
"""Adds child infostate derived from taking an action to self.children."""
self.children[action][infostate_child.infostate_string] = infostate_child
def get_actions(self) -> List[int]:
"""Returns legal actions in current information state for current player."""
return self.history_nodes[0].world_state.get_actions()[self.player]
def is_terminal(self) -> bool:
"""Returns True if information state is terminal, False otherwise."""
return self.history_nodes[0].world_state.is_terminal()
class GameTree:
"""Game tree class to build for CFR-based algorithms.
Attributes:
first_history_node: Root node of game tree.
infostate_nodes: List of information state nodes for each player (including
chance player).
all_infostates_map: List of dictionaries (mapping from information state
string representation to information state object) for each players
(including chance player).
"""
def __init__(self, first_history_node: HistoryTreeNode,
infostate_nodes: List[InfoState],
all_infostates_map: List[Dict[str, InfoState]]):
self.first_history_node = first_history_node
self.infostate_nodes = infostate_nodes
self.all_infostates_map = all_infostates_map
def build_tree_dfs(
world_state: openspiel_api.WorldState,
all_infostates_map: List[Dict[str, InfoState]]
) -> Tuple[HistoryTreeNode, List[InfoState]]:
"""Builds the game tree by DFS traversal.
Args:
world_state: An openspiel game world state representation that will be the
root of game tree.
all_infostates_map: List of dictionaries (mapping from information state
string representation to information state object) for each players
(including chance player). This list will be empty when this function is
called and it'll be population during DFS tree traversal.
Returns:
tree_node: Root of the game tree built in DFS traversal.
infostate_nodes: List of information state (root) tree node for each player
(including chance player).
"""
tree_node = HistoryTreeNode(world_state)
infostate_nodes = [
InfoState(world_state, 1, world_state.get_infostate_string(1)),
InfoState(world_state, 1, world_state.get_infostate_string(1)),
InfoState(world_state, 2, world_state.get_infostate_string(2))
]
for p in [cfr.Players.PLAYER_1, cfr.Players.PLAYER_2]:
infostate_string = world_state.get_infostate_string(p)
if infostate_string not in all_infostates_map[p]:
all_infostates_map[p][infostate_string] = InfoState(
world_state, p, infostate_string)
infostate = all_infostates_map[p][infostate_string]
infostate.add_history_node(tree_node)
infostate_nodes[p] = infostate
actions = world_state.get_actions()
actions_chance, actions_p1, actions_p2 = actions
for action_chance in actions_chance:
for action_p1 in actions_p1:
for action_p2 in actions_p2:
child_state = copy.deepcopy(world_state)
child_state.apply_actions((action_chance, action_p1, action_p2))
child_tree_node, child_infostates = build_tree_dfs(
child_state, all_infostates_map)
tree_node.add_child(child_tree_node,
(action_chance, action_p1, action_p2))
infostate_nodes[1].add_child_infostate(action_p1, child_infostates[1])
infostate_nodes[2].add_child_infostate(action_p2, child_infostates[2])
return tree_node, infostate_nodes
def build_game_tree(world_state: openspiel_api.WorldState) -> GameTree:
"""Builds game tree for CFR-based algorithms.
Args:
world_state: An openspiel game world state representation that will be the
root of game tree.
Returns:
Calls GameTree function which returns the following:
tree_node: Root of the game tree built in DFS traversal.
infostate_nodes: List of information state (root) tree node for each player
(including chance player).
"""
all_infostates_map = [{}, {}, {}]
first_history_node, infostate_nodes = build_tree_dfs(world_state,
all_infostates_map)
return GameTree(first_history_node, infostate_nodes, all_infostates_map)
| open_spiel-master | open_spiel/python/examples/meta_cfr/sequential_games/game_tree_utils.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for meta-cfr algorithm."""
import functools
from typing import List
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from open_spiel.python.examples.meta_cfr.sequential_games.typing import ApplyFn
from open_spiel.python.examples.meta_cfr.sequential_games.typing import InfostateMapping
from open_spiel.python.examples.meta_cfr.sequential_games.typing import InfostateNode
from open_spiel.python.examples.meta_cfr.sequential_games.typing import Params
def get_batched_input(input_list: List[jax.Array],
infostate_list: List[InfostateNode],
illegal_action_list: List[List[int]], batch_size: int):
"""Returns list of function arguments extended to be consistent with batch size.
Args:
input_list: List of DeviceArrays.
infostate_list: List of information state nodes.
illegal_action_list: List of List of illegal actions. Each internal list
contains illegal actions in each information state.
batch_size: Batch size.
Returns:
input_list, infostate_list, and illegal_action_list with a size consistent
with batch size (the size of returned arrays are multipliers of batch size).
"""
items_to_sample = batch_size * (int(len(input_list) / batch_size) +
1) - len(input_list)
idx_sample = np.random.choice(len(input_list), items_to_sample)
input_zip = np.array(
list(zip(input_list, infostate_list, illegal_action_list)),
dtype=object)
input_lst_sample = input_zip[idx_sample]
input_sample, infostate_sample, illegal_action_sample = zip(*input_lst_sample)
input_list.extend(list(input_sample))
infostate_list.extend(list(infostate_sample))
illegal_action_list.extend(list(illegal_action_sample))
return input_list, infostate_list, illegal_action_list
def mask(cfvalues: np.ndarray, infoset: List[InfostateNode], num_actions: int,
batch_size: int) -> np.ndarray:
"""Returns counterfactual values of legal actions and put 0 for illegal ones.
Args:
cfvalues: Numpy array of counterfactual values.
infoset: List of information states.
num_actions: Number of possible actions to take.
batch_size: Batch size.
Returns:
Masked counterfactual values. The counterfactual values of legal actions are
kept as passed to this function and for illegal actions, we consider 0
counterfactual value.
"""
legal_actions = [[infoset[i].world_state.state.legal_actions()] *
cfvalues.shape[1] for i in range(batch_size)]
masked_cfvalues = np.zeros(shape=[batch_size, cfvalues.shape[1], num_actions])
for i in range(cfvalues.shape[0]):
for j in range(cfvalues.shape[1]):
np.put(masked_cfvalues[i][j], legal_actions[i][j], cfvalues[i][j])
return np.stack(masked_cfvalues)
def filter_terminal_infostates(infostates_map: InfostateMapping):
"""Filter out terminal infostate_node values."""
return {
infostate_string: infostate_node
for infostate_string, infostate_node in infostates_map.items()
if not infostate_node.is_terminal()
}
def get_network_output(net_apply: ApplyFn, net_params: Params,
net_input: np.ndarray, illegal_actions: List[int],
key: hk.PRNGSequence) -> jax.Array:
"""Returns policy generated as output of model.
Args:
net_apply: Haiku apply function.
net_params: Haiku network parameters.
net_input: Input of the model.
illegal_actions: List of illegal actions we use to mask the model output.
key: Pseudo random number.
Returns:
Policy generated by model. Model output is filtered to mask illegal actions.
"""
net_output = jax.jit(net_apply)(net_params, key, net_input)
if illegal_actions:
net_output = jnp.delete(net_output, np.array(illegal_actions))
return jax.nn.softmax(net_output)
def get_network_output_batched(
net_apply: ApplyFn, net_params: Params, net_input: np.ndarray,
all_illegal_actions: List[List[int]],
key: hk.PRNGSequence) -> List[jax.Array]:
"""Returns policy of batched input generated as output of model.
Args:
net_apply: Haiku apply function.
net_params: Haiku network parameters.
net_input: Input of the model.
all_illegal_actions: Nested list of illegal actions we use to mask the model
output. Length of outer list is equal to the batch size.
key: Pseudo random number.
Returns:
List of policies generated by model. Model output is filtered to mask
illegal actions. Length of the returned list is equal to batch size.
"""
net_output_batched = net_apply(net_params, next(key), net_input)
batch_policies = []
for i, illegal_actions in enumerate(all_illegal_actions):
net_output = net_output_batched[i]
if illegal_actions:
net_output = jnp.expand_dims(
jnp.delete(net_output, jnp.array(illegal_actions)), axis=0)
batch_policies.append(jax.nn.softmax(net_output))
return batch_policies
@functools.partial(jax.jit, static_argnums=(2, 3, 4, 5, 7, 9))
def meta_loss(net_params: Params, cfvalues: np.ndarray,
net_apply: ApplyFn, steps: int, num_all_actions: int,
infosets: List[InfostateNode],
infostate_map: InfostateMapping,
batch_size: int,
key: hk.PRNGSequence,
use_infostate_representation: bool = True) -> float:
"""Meta learning loss function.
Args:
net_params: Network parameters.
cfvalues: Counterfactual values.
net_apply: Haiku apply function.
steps: Number of unrolling steps.
num_all_actions: Number of actions.
infosets: List of information states.
infostate_map: Mapping from information state string to information state
node.
batch_size: Batch size.
key: Pseudo random number.
use_infostate_representation: Boolean value indicating if information state
representation is used as part of input.
Returns:
Mean meta learning loss value.
"""
regret_sum = np.zeros(shape=[batch_size, 1, num_all_actions])
total_loss = 0
step = 0
infostate_str_one_hot = jnp.expand_dims(
jnp.array([
jax.nn.one_hot(infostate_map[infoset.infostate_string],
len(infostate_map)) for infoset in infosets
]),
axis=1)
def scan_body(carry, x):
del x # Unused
regret_sum, current_step, total_loss = carry
average_regret = regret_sum / (current_step + 1)
if use_infostate_representation:
net_input = jnp.concatenate((average_regret, infostate_str_one_hot),
axis=-1)
else:
net_input = average_regret
next_step_x = jax.jit(net_apply)(net_params, key, net_input)
strategy = jax.nn.softmax(next_step_x)
value = jnp.matmul(
jnp.array(cfvalues), jnp.transpose(strategy, axes=[0, 2, 1]))
curren_regret = jnp.array(cfvalues) - value
regret_sum += jnp.expand_dims(jnp.mean(curren_regret, axis=1), axis=1)
current_loss = jnp.mean(
jnp.max(
jax.numpy.concatenate(
[regret_sum,
jnp.zeros(shape=[batch_size, 1, 1])],
axis=-1),
axis=-1))
total_loss += current_loss
current_step += 1
return (regret_sum, current_step, total_loss), None
(regret_sum, step, total_loss), _ = jax.lax.scan(
scan_body, (regret_sum, step, total_loss), None, length=steps)
return total_loss
| open_spiel-master | open_spiel/python/examples/meta_cfr/sequential_games/utils.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset generation for meta-CFR algorithm."""
from typing import List, Tuple
import numpy as np
from open_spiel.python.examples.meta_cfr.sequential_games.typing import InfostateNode
class Dataset:
"""Dataset class to generate data for training meta-CFR model."""
def __init__(self, train_dataset: List[Tuple[List[List[float]],
InfostateNode]],
batch_size: int):
self._train_dataset = np.array(train_dataset, dtype=object)
self._size = self._train_dataset.shape[0]
self._batch_size = batch_size
def get_batch(self):
while True:
np.random.shuffle(self._train_dataset)
idx_sample = np.random.choice(self._size, self._batch_size)
next_batch = self._train_dataset[idx_sample, :]
yield next_batch
| open_spiel-master | open_spiel/python/examples/meta_cfr/sequential_games/dataset_generator.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Counterfactual Regret Minimization."""
import copy
import enum
from typing import List, Tuple
from open_spiel.python.examples.meta_cfr.sequential_games.typing import GameTree
from open_spiel.python.examples.meta_cfr.sequential_games.typing import HistoryNode
from open_spiel.python.examples.meta_cfr.sequential_games.typing import InfostateMapping
from open_spiel.python.examples.meta_cfr.sequential_games.typing import InfostateNode
class Players(enum.IntEnum):
CHANCE_PLAYER = 0
PLAYER_1 = 1
PLAYER_2 = 2
def compute_reach_probabilities(
history_tree_node: HistoryNode,
all_infostates_map: List[InfostateMapping]) -> None:
"""Computes reach probabilities for game tree information states.
This function initializes counterfactual_reach_prob and player_reach_prob for
all information states in the game tree, and then these values will be
calculated in compute_reach_probability_dfs.
Args:
history_tree_node: Game tree HistoryTreeNode which is the root of the game
tree.
all_infostates_map: List of dictionaries (mapping from information state
string representation to information state object) for each players
(including chance player). This list will be empty when this function is
called fot the first time and it'll be population during DFS tree
traversal.
"""
for infostate in (list(all_infostates_map[Players.PLAYER_1].values()) +
list(all_infostates_map[Players.PLAYER_2].values())):
infostate.counterfactual_reach_prob = 0.
infostate.player_reach_prob = 0.
compute_reach_probability_dfs(history_tree_node, all_infostates_map)
def compute_reach_probability_dfs(
history_tree_node: HistoryNode,
all_infostates_map: List[InfostateMapping]) -> None:
"""Calculate reach probability values in dfs tree.
This function is initially called by compute_reach_probabilities and it
computes reach probabilities for all information state nodes in the tree by
traversing the tree using DFS.
Args:
history_tree_node: Game tree HistoryTreeNode which is the root of the game
tree.
all_infostates_map: List of dictionaries (mapping from information state
string representation to information state object) for each players
(including chance player). This list will be empty when this function is
called fot the first time and it'll be population during DFS tree
traversal.
"""
world_state = history_tree_node.world_state
infostate_p1 = all_infostates_map[Players.PLAYER_1][
world_state.get_infostate_string(Players.PLAYER_1)]
infostate_p2 = all_infostates_map[Players.PLAYER_2][
world_state.get_infostate_string(Players.PLAYER_2)]
infostate_p1.counterfactual_reach_prob += history_tree_node.reach_probs[
0] * history_tree_node.reach_probs[Players.PLAYER_2]
infostate_p2.counterfactual_reach_prob += history_tree_node.reach_probs[
0] * history_tree_node.reach_probs[Players.PLAYER_1]
if infostate_p1.player_reach_prob != 0.:
assert (infostate_p1.player_reach_prob == history_tree_node.reach_probs[
Players.PLAYER_1])
if infostate_p2.player_reach_prob != 0.:
assert (infostate_p2.player_reach_prob == history_tree_node.reach_probs[
Players.PLAYER_2])
infostate_p1.player_reach_prob = history_tree_node.reach_probs[
Players.PLAYER_1]
infostate_p2.player_reach_prob = history_tree_node.reach_probs[
Players.PLAYER_2]
policy_p1 = infostate_p1.policy
policy_p2 = infostate_p2.policy
policy_chance = world_state.chance_policy
actions_chance, actions_p1, actions_p2 = world_state.get_actions()
for action_chance in actions_chance:
for action_p1 in actions_p1:
for action_p2 in actions_p2:
history_tree_node.action_probs[(
action_chance, action_p1, action_p2)] = policy_chance[
action_chance] * policy_p1[action_p1] * policy_p2[action_p2]
child_node = history_tree_node.get_child(
(action_chance, action_p1, action_p2))
child_node.reach_probs[
Players.CHANCE_PLAYER] = history_tree_node.reach_probs[
Players.CHANCE_PLAYER] * policy_chance[action_chance]
child_node.reach_probs[
Players.PLAYER_1] = history_tree_node.reach_probs[
Players.PLAYER_1] * policy_p1[action_p1]
child_node.reach_probs[
Players.PLAYER_2] = history_tree_node.reach_probs[
Players.PLAYER_2] * policy_p2[action_p2]
compute_reach_probability_dfs(child_node, all_infostates_map)
def _get_opponent(player: int) -> int:
return -1 * player + 3
def compute_best_response_values(infostate: InfostateNode) -> float:
"""Returns best response value for an infostate.
Args:
infostate: Information state.
Returns:
Best response value, which is the maximum action value chosen among all
actions values of possible actions from infostate. If information state is a
terminal node in the game tree, this value is calculated from history nodes
reach probability for player and opponent, and game utility of terminal
node. If infostate is not terminal, this value will be calculated in a
recursive way.
"""
if infostate.is_terminal():
terminal_utility = 0
for history_node in infostate.history_nodes:
terminal_utility += history_node.reach_probs[
0] * history_node.reach_probs[_get_opponent(
infostate.player)] * history_node.world_state.get_utility(
infostate.player)
return terminal_utility
action_values = {action: 0 for action in infostate.get_actions()}
infostate_actions = infostate.get_actions()
for action in infostate_actions:
action_values[action] = 0
for child in infostate.children[action].values():
action_values[action] += compute_best_response_values(child)
return max(action_values.values())
def compute_best_response_policy(infostate: InfostateNode) -> float:
"""Calculate best response policy and returns best response value of infostate.
Args:
infostate: Information state.
Returns:
Best response value similar to what compute_best_response_values returns.
"""
if infostate.is_terminal():
terminal_utility = 0
for history_node in infostate.history_nodes:
terminal_utility += history_node.reach_probs[
0] * history_node.reach_probs[_get_opponent(
infostate.player)] * history_node.world_state.get_utility(
infostate.player)
return terminal_utility
action_values = {action: 0 for action in infostate.get_actions()}
infostate_actions = infostate.get_actions()
for action in infostate_actions:
action_values[action] = 0
for child in infostate.children[action].values():
action_values[action] += compute_best_response_policy(child)
infostate.policy = {action: 0 for action in infostate.get_actions()}
max_action_value = max(action_values.values())
for action in infostate_actions:
if action_values[action] == max_action_value:
infostate.policy[action] = 1
break
return max_action_value
def compute_counterfactual_values(infostate: InfostateNode) -> float:
"""Returns cfr value for an infostate.
Args:
infostate: Information state.
Returns:
Counterfactual value for infostate. This value is calculated from action
value and policy of all legal actions of infostate information state.
"""
if infostate.is_terminal():
terminal_utility = 0
for history_node in infostate.history_nodes:
terminal_utility += history_node.reach_probs[
0] * history_node.reach_probs[_get_opponent(
infostate.player)] * history_node.world_state.get_utility(
infostate.player)
return terminal_utility
infostate_actions = infostate.get_actions()
action_values = {action: 0 for action in infostate_actions}
for action in infostate_actions:
for child in infostate.children[action].values():
action_values[action] += compute_counterfactual_values(child)
infostate.counterfactual_action_values = action_values
counterfactual_value = 0
for action in infostate_actions:
counterfactual_value += infostate.policy[action] * action_values[action]
infostate.counterfactual_value = counterfactual_value
return counterfactual_value
def update_regrets(infostates: List[InfostateNode]) -> None:
"""Updates regret value for each infostate in infostates.
Args:
infostates: List of information states
"""
for infostate in infostates:
for action in infostate.get_actions():
current_regret = infostate.counterfactual_action_values[
action] - infostate.counterfactual_value
infostate.regret[action] += current_regret
def compute_next_policy(infostates: List[InfostateNode],
cfr_plus: bool = False) -> None:
"""Computes policy of next iteration for each infostate in infostates.
Args:
infostates: List of information states.
cfr_plus: A flag which specifies if we update policy according to CFR or
CFR-plus algorithm. True if we use CFR-plus, otherwise we use CFR.
"""
for infostate in infostates:
infostate_actions = infostate.get_actions()
if cfr_plus:
for action in infostate_actions:
infostate.regret[action] = max(infostate.regret[action], 0.0)
positive_regret_sum = 0
for action in infostate_actions:
if infostate.regret[action] > 0:
positive_regret_sum += infostate.regret[action]
actions_count = len(infostate_actions)
next_policy = {a: 1.0 / actions_count for a in infostate_actions}
if positive_regret_sum > 0:
for action in infostate_actions:
next_policy[action] = max(infostate.regret[action],
0) / positive_regret_sum
infostate.policy = next_policy
def cumulate_average_policy(infostates: List[InfostateNode],
weight: int = 1) -> None:
"""Cumulates policy values of each infostate in infostates.
For each infostate, we update average policy and the sum of weighted average
policy.
Args:
infostates: List of information states.
weight: The weight we use to update policy and sum of weighted average
policy. For CFR algorithm, weight is 1.
"""
for infostate in infostates:
for action in infostate.get_actions():
infostate.average_policy[
action] += infostate.player_reach_prob * infostate.policy[
action] * weight
infostate.average_policy_weight_sum += infostate.player_reach_prob * weight
def normalize_average_policy(infostates) -> None:
"""Updates infostate policy by normalizing average policy.
Args:
infostates: List of information states that their policies will be updated.
"""
for infostate in infostates:
for action in infostate.get_actions():
infostate.policy[action] = infostate.average_policy[
action] / infostate.average_policy_weight_sum
def best_response_counterfactual_regret_minimization_iteration(
history_tree_node: HistoryNode,
infostate_nodes: List[InfostateNode],
all_infostates_map: List[InfostateMapping]) -> None:
"""Calculates CFRBR values.
Args:
history_tree_node: Game tree HistoryTreeNode which is the root of the game
tree.
infostate_nodes: List of all information state nodes.
all_infostates_map: List of dictionaries (mapping from information state
string representation to information state object) for each players
(including chance player). This list will be empty when this function is
called fot the first time and it'll be population during DFS tree
traversal.
"""
compute_next_policy(list(all_infostates_map[Players.PLAYER_1].values()))
compute_reach_probabilities(history_tree_node, all_infostates_map)
cumulate_average_policy(list(all_infostates_map[Players.PLAYER_1].values()))
compute_best_response_policy(infostate_nodes[Players.PLAYER_2])
compute_reach_probabilities(history_tree_node, all_infostates_map)
compute_counterfactual_values(infostate_nodes[Players.PLAYER_1])
update_regrets(list(all_infostates_map[Players.PLAYER_1].values()))
def counterfactual_regret_minimization_iteration(
cfr_game_tree: GameTree,
alternating_updates: bool,
cfr_plus: bool,
weight: int = 1) -> None:
"""Performs one iteration of CFR or CFR-plus.
Args:
cfr_game_tree: Game tree for an imperfect information game. This game tree
is game tree of an openspiel game.
alternating_updates: Boolean flag to do alternative update for players
policies or not. If True, alternative updates will be performed (meaning
we first calculate average policy, counterfactual values, regrets and next
policy for player 1 first and then calculate all of these for player 2),
otherwise both players average policies, counterfactual values and regrets
will be updated right after each other (meaning, for example we calculate
next_policy of player 1, and then next policy of player 2. Then, we
calculate average policy for player 1 and then average policy for player
2, and so on).
cfr_plus: Boolean flag indicating if we perform CFR algorithm or CFR-plus.
If True, we perform CFR-plus algorithm, otherwise we perform CFR
algorithm.
weight: The weight we use to update policy and sum of weighted average
policy.
"""
if alternating_updates:
compute_reach_probabilities(cfr_game_tree.first_history_node,
cfr_game_tree.all_infostates_map)
cumulate_average_policy(
list(cfr_game_tree.all_infostates_map[Players.PLAYER_1].values()),
weight)
compute_counterfactual_values(
cfr_game_tree.infostate_nodes[Players.PLAYER_1])
update_regrets(
list(cfr_game_tree.all_infostates_map[Players.PLAYER_1].values()))
compute_next_policy(
list(cfr_game_tree.all_infostates_map[Players.PLAYER_1].values()),
cfr_plus)
compute_reach_probabilities(cfr_game_tree.first_history_node,
cfr_game_tree.all_infostates_map)
cumulate_average_policy(
list(cfr_game_tree.all_infostates_map[Players.PLAYER_2].values()),
weight)
compute_counterfactual_values(
cfr_game_tree.infostate_nodes[Players.PLAYER_2])
update_regrets(
list(cfr_game_tree.all_infostates_map[Players.PLAYER_2].values()))
compute_next_policy(
list(cfr_game_tree.all_infostates_map[Players.PLAYER_2].values()),
cfr_plus)
else:
compute_next_policy(
list(cfr_game_tree.all_infostates_map[Players.PLAYER_1].values()),
cfr_plus)
compute_next_policy(
list(cfr_game_tree.all_infostates_map[Players.PLAYER_2].values()),
cfr_plus)
compute_reach_probabilities(cfr_game_tree.first_history_node,
cfr_game_tree.all_infostates_map)
cumulate_average_policy(
list(cfr_game_tree.all_infostates_map[Players.PLAYER_1].values()),
weight)
cumulate_average_policy(
list(cfr_game_tree.all_infostates_map[Players.PLAYER_2].values()),
weight)
compute_counterfactual_values(
cfr_game_tree.infostate_nodes[Players.PLAYER_1])
compute_counterfactual_values(
cfr_game_tree.infostate_nodes[Players.PLAYER_2])
update_regrets(
list(cfr_game_tree.all_infostates_map[Players.PLAYER_1].values()))
update_regrets(
list(cfr_game_tree.all_infostates_map[Players.PLAYER_2].values()))
def compute_cfr_plus_values(cfr_game_tree: GameTree,
steps: int) -> Tuple[List[float], List[float]]:
"""Performs CFR-plus algorithm for a given number of steps.
Args:
cfr_game_tree: Game tree for an imperfect information game. This game tree
is game tree of an openspiel game.
steps: Number of CFR-plus steps.
Returns:
best_response_values_p1: List of best response values for player 1. The
length of this list is equal to the number of steps.
best_response_values_p2: List of best response values for player 2. The
length of this list is equal to the number of steps.
"""
best_response_values_p1 = []
best_response_values_p2 = []
for i in range(steps):
counterfactual_regret_minimization_iteration(
cfr_game_tree=cfr_game_tree,
alternating_updates=True,
cfr_plus=True,
weight=i + 1)
game_tree_copy = copy.deepcopy(cfr_game_tree)
normalize_average_policy(
game_tree_copy.all_infostates_map[Players.PLAYER_1].values())
normalize_average_policy(
game_tree_copy.all_infostates_map[Players.PLAYER_2].values())
compute_reach_probabilities(game_tree_copy.first_history_node,
game_tree_copy.all_infostates_map)
best_response_values_p1.append(
compute_best_response_values(
game_tree_copy.infostate_nodes[Players.PLAYER_1]))
best_response_values_p2.append(
compute_best_response_values(
game_tree_copy.infostate_nodes[Players.PLAYER_2]))
return best_response_values_p1, best_response_values_p2
def compute_cfr_values(cfr_game_tree: GameTree,
steps: int) -> Tuple[List[float], List[float]]:
"""Performs CFR algorithm for a given number of steps.
Args:
cfr_game_tree: Game tree for an imperfect information game. This game tree
is game tree of an openspiel game.
steps: Number of CFR-plus steps.
Returns:
best_response_values_p1: List of best response values for player 1. The
length of this list is equal to the number of steps.
best_response_values_p2: List of best response values for player 2. The
length of this list is equal to the number of steps.
"""
best_response_values_p1 = []
best_response_values_p2 = []
for _ in range(steps):
counterfactual_regret_minimization_iteration(
cfr_game_tree=cfr_game_tree, alternating_updates=False, cfr_plus=False)
normalize_average_policy(
cfr_game_tree.all_infostates_map[Players.PLAYER_1].values())
normalize_average_policy(
cfr_game_tree.all_infostates_map[Players.PLAYER_2].values())
compute_reach_probabilities(cfr_game_tree.first_history_node,
cfr_game_tree.all_infostates_map)
best_response_values_p1.append(
compute_best_response_values(
cfr_game_tree.infostate_nodes[Players.PLAYER_1]))
best_response_values_p2.append(
compute_best_response_values(
cfr_game_tree.infostate_nodes[Players.PLAYER_2]))
return best_response_values_p1, best_response_values_p2
| open_spiel-master | open_spiel/python/examples/meta_cfr/sequential_games/cfr.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Typing definitions."""
from typing import Any, Dict, Callable
import jax.numpy as jnp
import optax
from open_spiel.python.examples.meta_cfr.sequential_games import game_tree_utils
PyTree = Any
Params = PyTree
ApplyFn = Callable[..., jnp.ndarray]
OptState = optax.OptState
GameTree = game_tree_utils.GameTree
InfostateNode = game_tree_utils.InfoState
InfostateMapping = Dict[str, InfostateNode]
HistoryNode = game_tree_utils.HistoryTreeNode
| open_spiel-master | open_spiel/python/examples/meta_cfr/sequential_games/typing.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main file to train and evaluate meta-cfr agent, cfr and cfr-plus."""
from typing import Sequence
from absl import app
from absl import flags
import numpy as np
from open_spiel.python.examples.meta_cfr.sequential_games import cfr
from open_spiel.python.examples.meta_cfr.sequential_games import evaluation
from open_spiel.python.examples.meta_cfr.sequential_games import game_tree_utils
from open_spiel.python.examples.meta_cfr.sequential_games import meta_learning
from open_spiel.python.examples.meta_cfr.sequential_games import openspiel_api
FLAGS = flags.FLAGS
flags.DEFINE_integer("random_seed_size", 30, "Number of random seeds to use.")
def main(argv: Sequence[str]) -> None:
del argv
config = {"players": FLAGS.players}
random_seeds_eval = np.random.choice(
np.array(list(range(1000))), size=FLAGS.random_seed_size, replace=False)
# Train a meta-cfr agent
meta_cfr_agent = meta_learning.MetaCFRRegretAgent(
training_epochs=1,
meta_learner_training_epochs=FLAGS.meta_learner_training_epochs,
game_name=FLAGS.game,
game_config=config,
perturbation=FLAGS.perturbation,
seed=FLAGS.random_seed,
model_type=FLAGS.model_type,
best_response=True)
meta_cfr_agent.train()
cfr_vals = np.zeros((FLAGS.meta_learner_training_epochs,))
cfr_plus_vals = np.zeros((FLAGS.meta_learner_training_epochs,))
for seed in list(random_seeds_eval):
# Evaluate a meta-cfr agent
world_state = openspiel_api.WorldState(
FLAGS.game, config, perturbation=True, random_seed=seed)
meta_cfr_vals = evaluation.CFRBREvaluation(meta_cfr_agent, world_state)
# Evaluate a cfr plus agent
game_tree = game_tree_utils.build_game_tree(
openspiel_api.WorldState(
FLAGS.game,
config,
perturbation=FLAGS.perturbation,
random_seed=seed))
_, cfr_plus_vals = cfr.compute_cfr_plus_values(
game_tree, FLAGS.meta_learner_training_epochs)
# Evaluate a cfr agent
game_tree = game_tree_utils.build_game_tree(
openspiel_api.WorldState(
FLAGS.game,
config,
perturbation=FLAGS.perturbation,
random_seed=seed))
_, cfr_vals = cfr.compute_cfr_values(
game_tree, FLAGS.meta_learner_training_epochs)
print("Evaluation seed:", random_seeds_eval)
print("Meta_cfr agent:", meta_cfr_vals)
print("cfr_plus agent:", cfr_plus_vals)
print("cfr agent:", cfr_vals)
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/meta_cfr/sequential_games/main.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for world state representation."""
import abc
from typing import Any, List, Text, Tuple
class WorldState(abc.ABC):
"""Base class for world state representation.
We can implement this class for world state representations in both
sequential and matrix games.
Attributes:
chance_policy: Policy of the chance node in the game tree.
"""
def __init__(self):
self.chance_policy = {0: 1.0}
self._history = []
@abc.abstractmethod
def get_distinct_actions(self) -> List[int]:
"""Returns all possible distinct actions in the game."""
pass
@abc.abstractmethod
def is_terminal(self) -> bool:
"""Returns if the current state of the game is a terminal or not."""
pass
@abc.abstractmethod
def get_actions(self) -> List[Any]:
"""Returns the list of legal actions from the current state of the game."""
pass
@abc.abstractmethod
def get_infostate_string(self, player: int) -> Text:
"""Returns the string form of infostate representation of a given player.
Args:
player: Index of player.
Returns:
The string representation of the infostate of player.
"""
pass
@abc.abstractmethod
def apply_actions(self, actions: Tuple[int, int, int]) -> None:
"""Applies the current player's action to change state of the world.
At each timestep of the game, the state of the world is changing by the
current player's action. At the same time, we should update self._history
with actions, by appending actions to self._history.
Args:
actions: List of actions for chance node, player 1 and player 2.
"""
pass
@abc.abstractmethod
def get_utility(self, player: int) -> float:
"""Returns player's utility when the game reaches to a terminal state.
Args:
player: Index of player.
Returns:
Utility that player receives when we reach a terminal state in the game.
"""
pass
| open_spiel-master | open_spiel/python/examples/meta_cfr/sequential_games/world_representation.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements Deep CFR Algorithm.
See https://arxiv.org/abs/1811.00164.
The algorithm defines an `advantage` and `strategy` networks that compute
advantages used to do regret matching across information sets and to approximate
the strategy profiles of the game. To train these networks a fixed ring buffer
(other data structures may be used) memory is used to accumulate samples to
train the networks.
"""
import collections
import math
import random
import numpy as np
from scipy import stats
import torch
from torch import nn
import torch.nn.functional as F
from open_spiel.python import policy
import pyspiel
AdvantageMemory = collections.namedtuple(
"AdvantageMemory", "info_state iteration advantage action")
StrategyMemory = collections.namedtuple(
"StrategyMemory", "info_state iteration strategy_action_probs")
class SonnetLinear(nn.Module):
"""A Sonnet linear module.
Always includes biases and only supports ReLU activations.
"""
def __init__(self, in_size, out_size, activate_relu=True):
"""Creates a Sonnet linear layer.
Args:
in_size: (int) number of inputs
out_size: (int) number of outputs
activate_relu: (bool) whether to include a ReLU activation layer
"""
super(SonnetLinear, self).__init__()
self._activate_relu = activate_relu
self._in_size = in_size
self._out_size = out_size
# stddev = 1.0 / math.sqrt(self._in_size)
# mean = 0
# lower = (-2 * stddev - mean) / stddev
# upper = (2 * stddev - mean) / stddev
# # Weight initialization inspired by Sonnet's Linear layer,
# # which cites https://arxiv.org/abs/1502.03167v3
# # pytorch default: initialized from
# # uniform(-sqrt(1/in_features), sqrt(1/in_features))
self._weight = None
self._bias = None
self.reset()
def forward(self, tensor):
y = F.linear(tensor, self._weight, self._bias)
return F.relu(y) if self._activate_relu else y
def reset(self):
stddev = 1.0 / math.sqrt(self._in_size)
mean = 0
lower = (-2 * stddev - mean) / stddev
upper = (2 * stddev - mean) / stddev
# Weight initialization inspired by Sonnet's Linear layer,
# which cites https://arxiv.org/abs/1502.03167v3
# pytorch default: initialized from
# uniform(-sqrt(1/in_features), sqrt(1/in_features))
self._weight = nn.Parameter(
torch.Tensor(
stats.truncnorm.rvs(
lower,
upper,
loc=mean,
scale=stddev,
size=[self._out_size, self._in_size])))
self._bias = nn.Parameter(torch.zeros([self._out_size]))
class MLP(nn.Module):
"""A simple network built from nn.linear layers."""
def __init__(self,
input_size,
hidden_sizes,
output_size,
activate_final=False):
"""Create the MLP.
Args:
input_size: (int) number of inputs
hidden_sizes: (list) sizes (number of units) of each hidden layer
output_size: (int) number of outputs
activate_final: (bool) should final layer should include a ReLU
"""
super(MLP, self).__init__()
self._layers = []
# Hidden layers
for size in hidden_sizes:
self._layers.append(SonnetLinear(in_size=input_size, out_size=size))
input_size = size
# Output layer
self._layers.append(
SonnetLinear(
in_size=input_size,
out_size=output_size,
activate_relu=activate_final))
self.model = nn.ModuleList(self._layers)
def forward(self, x):
for layer in self.model:
x = layer(x)
return x
def reset(self):
for layer in self._layers:
layer.reset()
class ReservoirBuffer(object):
"""Allows uniform sampling over a stream of data.
This class supports the storage of arbitrary elements, such as observation
tensors, integer actions, etc.
See https://en.wikipedia.org/wiki/Reservoir_sampling for more details.
"""
def __init__(self, reservoir_buffer_capacity):
self._reservoir_buffer_capacity = reservoir_buffer_capacity
self._data = []
self._add_calls = 0
def add(self, element):
"""Potentially adds `element` to the reservoir buffer.
Args:
element: data to be added to the reservoir buffer.
"""
if len(self._data) < self._reservoir_buffer_capacity:
self._data.append(element)
else:
idx = np.random.randint(0, self._add_calls + 1)
if idx < self._reservoir_buffer_capacity:
self._data[idx] = element
self._add_calls += 1
def sample(self, num_samples):
"""Returns `num_samples` uniformly sampled from the buffer.
Args:
num_samples: `int`, number of samples to draw.
Returns:
An iterable over `num_samples` random elements of the buffer.
Raises:
ValueError: If there are less than `num_samples` elements in the buffer
"""
if len(self._data) < num_samples:
raise ValueError("{} elements could not be sampled from size {}".format(
num_samples, len(self._data)))
return random.sample(self._data, num_samples)
def clear(self):
self._data = []
self._add_calls = 0
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
class DeepCFRSolver(policy.Policy):
"""Implements a solver for the Deep CFR Algorithm with PyTorch.
See https://arxiv.org/abs/1811.00164.
Define all networks and sampling buffers/memories. Derive losses & learning
steps. Initialize the game state and algorithmic variables.
Note: batch sizes default to `None` implying that training over the full
dataset in memory is done by default. To sample from the memories you
may set these values to something less than the full capacity of the
memory.
"""
def __init__(self,
game,
policy_network_layers=(256, 256),
advantage_network_layers=(128, 128),
num_iterations: int = 100,
num_traversals: int = 20,
learning_rate: float = 1e-4,
batch_size_advantage=None,
batch_size_strategy=None,
memory_capacity: int = int(1e6),
policy_network_train_steps: int = 1,
advantage_network_train_steps: int = 1,
reinitialize_advantage_networks: bool = True):
"""Initialize the Deep CFR algorithm.
Args:
game: Open Spiel game.
policy_network_layers: (list[int]) Layer sizes of strategy net MLP.
advantage_network_layers: (list[int]) Layer sizes of advantage net MLP.
num_iterations: (int) Number of training iterations.
num_traversals: (int) Number of traversals per iteration.
learning_rate: (float) Learning rate.
batch_size_advantage: (int or None) Batch size to sample from advantage
memories.
batch_size_strategy: (int or None) Batch size to sample from strategy
memories.
memory_capacity: Number af samples that can be stored in memory.
policy_network_train_steps: Number of policy network training steps (per
iteration).
advantage_network_train_steps: Number of advantage network training steps
(per iteration).
reinitialize_advantage_networks: Whether to re-initialize the advantage
network before training on each iteration.
"""
all_players = list(range(game.num_players()))
super(DeepCFRSolver, self).__init__(game, all_players)
self._game = game
if game.get_type().dynamics == pyspiel.GameType.Dynamics.SIMULTANEOUS:
# `_traverse_game_tree` does not take into account this option.
raise ValueError("Simulatenous games are not supported.")
self._batch_size_advantage = batch_size_advantage
self._batch_size_strategy = batch_size_strategy
self._policy_network_train_steps = policy_network_train_steps
self._advantage_network_train_steps = advantage_network_train_steps
self._num_players = game.num_players()
self._root_node = self._game.new_initial_state()
self._embedding_size = len(self._root_node.information_state_tensor(0))
self._num_iterations = num_iterations
self._num_traversals = num_traversals
self._reinitialize_advantage_networks = reinitialize_advantage_networks
self._num_actions = game.num_distinct_actions()
self._iteration = 1
# Define strategy network, loss & memory.
self._strategy_memories = ReservoirBuffer(memory_capacity)
self._policy_network = MLP(self._embedding_size,
list(policy_network_layers),
self._num_actions)
# Illegal actions are handled in the traversal code where expected payoff
# and sampled regret is computed from the advantage networks.
self._policy_sm = nn.Softmax(dim=-1)
self._loss_policy = nn.MSELoss()
self._optimizer_policy = torch.optim.Adam(
self._policy_network.parameters(), lr=learning_rate)
# Define advantage network, loss & memory. (One per player)
self._advantage_memories = [
ReservoirBuffer(memory_capacity) for _ in range(self._num_players)
]
self._advantage_networks = [
MLP(self._embedding_size, list(advantage_network_layers),
self._num_actions) for _ in range(self._num_players)
]
self._loss_advantages = nn.MSELoss(reduction="mean")
self._optimizer_advantages = []
for p in range(self._num_players):
self._optimizer_advantages.append(
torch.optim.Adam(
self._advantage_networks[p].parameters(), lr=learning_rate))
self._learning_rate = learning_rate
@property
def advantage_buffers(self):
return self._advantage_memories
@property
def strategy_buffer(self):
return self._strategy_memories
def clear_advantage_buffers(self):
for p in range(self._num_players):
self._advantage_memories[p].clear()
def reinitialize_advantage_network(self, player):
self._advantage_networks[player].reset()
self._optimizer_advantages[player] = torch.optim.Adam(
self._advantage_networks[player].parameters(), lr=self._learning_rate)
def reinitialize_advantage_networks(self):
for p in range(self._num_players):
self.reinitialize_advantage_network(p)
def solve(self):
"""Solution logic for Deep CFR.
Traverses the game tree, while storing the transitions for training
advantage and policy networks.
Returns:
1. (nn.Module) Instance of the trained policy network for inference.
2. (list of floats) Advantage network losses for
each player during each iteration.
3. (float) Policy loss.
"""
advantage_losses = collections.defaultdict(list)
for _ in range(self._num_iterations):
for p in range(self._num_players):
for _ in range(self._num_traversals):
self._traverse_game_tree(self._root_node, p)
if self._reinitialize_advantage_networks:
# Re-initialize advantage network for player and train from scratch.
self.reinitialize_advantage_network(p)
# Re-initialize advantage networks and train from scratch.
advantage_losses[p].append(self._learn_advantage_network(p))
self._iteration += 1
# Train policy network.
policy_loss = self._learn_strategy_network()
return self._policy_network, advantage_losses, policy_loss
def _traverse_game_tree(self, state, player):
"""Performs a traversal of the game tree.
Over a traversal the advantage and strategy memories are populated with
computed advantage values and matched regrets respectively.
Args:
state: Current OpenSpiel game state.
player: (int) Player index for this traversal.
Returns:
(float) Recursively returns expected payoffs for each action.
"""
expected_payoff = collections.defaultdict(float)
if state.is_terminal():
# Terminal state get returns.
return state.returns()[player]
elif state.is_chance_node():
# If this is a chance node, sample an action
chance_outcome, chance_proba = zip(*state.chance_outcomes())
action = np.random.choice(chance_outcome, p=chance_proba)
return self._traverse_game_tree(state.child(action), player)
elif state.current_player() == player:
sampled_regret = collections.defaultdict(float)
# Update the policy over the info set & actions via regret matching.
_, strategy = self._sample_action_from_advantage(state, player)
for action in state.legal_actions():
expected_payoff[action] = self._traverse_game_tree(
state.child(action), player)
cfv = 0
for a_ in state.legal_actions():
cfv += strategy[a_] * expected_payoff[a_]
for action in state.legal_actions():
sampled_regret[action] = expected_payoff[action]
sampled_regret[action] -= cfv
sampled_regret_arr = [0] * self._num_actions
for action in sampled_regret:
sampled_regret_arr[action] = sampled_regret[action]
self._advantage_memories[player].add(
AdvantageMemory(state.information_state_tensor(), self._iteration,
sampled_regret_arr, action))
return cfv
else:
other_player = state.current_player()
_, strategy = self._sample_action_from_advantage(state, other_player)
# Recompute distribution for numerical errors.
probs = np.array(strategy)
probs /= probs.sum()
sampled_action = np.random.choice(range(self._num_actions), p=probs)
self._strategy_memories.add(
StrategyMemory(
state.information_state_tensor(other_player), self._iteration,
strategy))
return self._traverse_game_tree(state.child(sampled_action), player)
def _sample_action_from_advantage(self, state, player):
"""Returns an info state policy by applying regret-matching.
Args:
state: Current OpenSpiel game state.
player: (int) Player index over which to compute regrets.
Returns:
1. (list) Advantage values for info state actions indexed by action.
2. (list) Matched regrets, prob for actions indexed by action.
"""
info_state = state.information_state_tensor(player)
legal_actions = state.legal_actions(player)
with torch.no_grad():
state_tensor = torch.FloatTensor(np.expand_dims(info_state, axis=0))
raw_advantages = self._advantage_networks[player](state_tensor)[0].numpy()
advantages = [max(0., advantage) for advantage in raw_advantages]
cumulative_regret = np.sum([advantages[action] for action in legal_actions])
matched_regrets = np.array([0.] * self._num_actions)
if cumulative_regret > 0.:
for action in legal_actions:
matched_regrets[action] = advantages[action] / cumulative_regret
else:
matched_regrets[max(legal_actions, key=lambda a: raw_advantages[a])] = 1
return advantages, matched_regrets
def action_probabilities(self, state):
"""Computes action probabilities for the current player in state.
Args:
state: (pyspiel.State) The state to compute probabilities for.
Returns:
(dict) action probabilities for a single batch.
"""
cur_player = state.current_player()
legal_actions = state.legal_actions(cur_player)
info_state_vector = np.array(state.information_state_tensor())
if len(info_state_vector.shape) == 1:
info_state_vector = np.expand_dims(info_state_vector, axis=0)
with torch.no_grad():
logits = self._policy_network(torch.FloatTensor(info_state_vector))
probs = self._policy_sm(logits).numpy()
return {action: probs[0][action] for action in legal_actions}
def _learn_advantage_network(self, player):
"""Compute the loss on sampled transitions and perform a Q-network update.
If there are not enough elements in the buffer, no loss is computed and
`None` is returned instead.
Args:
player: (int) player index.
Returns:
(float) The average loss over the advantage network.
"""
for _ in range(self._advantage_network_train_steps):
if self._batch_size_advantage:
if self._batch_size_advantage > len(self._advantage_memories[player]):
## Skip if there aren't enough samples
return None
samples = self._advantage_memories[player].sample(
self._batch_size_advantage)
else:
samples = self._advantage_memories[player]
info_states = []
advantages = []
iterations = []
for s in samples:
info_states.append(s.info_state)
advantages.append(s.advantage)
iterations.append([s.iteration])
# Ensure some samples have been gathered.
if not info_states:
return None
self._optimizer_advantages[player].zero_grad()
advantages = torch.FloatTensor(np.array(advantages))
iters = torch.FloatTensor(np.sqrt(np.array(iterations)))
outputs = self._advantage_networks[player](
torch.FloatTensor(np.array(info_states)))
loss_advantages = self._loss_advantages(iters * outputs,
iters * advantages)
loss_advantages.backward()
self._optimizer_advantages[player].step()
return loss_advantages.detach().numpy()
def _learn_strategy_network(self):
"""Compute the loss over the strategy network.
Returns:
(float) The average loss obtained on this batch of transitions or `None`.
"""
for _ in range(self._policy_network_train_steps):
if self._batch_size_strategy:
if self._batch_size_strategy > len(self._strategy_memories):
## Skip if there aren't enough samples
return None
samples = self._strategy_memories.sample(self._batch_size_strategy)
else:
samples = self._strategy_memories
info_states = []
action_probs = []
iterations = []
for s in samples:
info_states.append(s.info_state)
action_probs.append(s.strategy_action_probs)
iterations.append([s.iteration])
self._optimizer_policy.zero_grad()
iters = torch.FloatTensor(np.sqrt(np.array(iterations)))
ac_probs = torch.FloatTensor(np.array(np.squeeze(action_probs)))
logits = self._policy_network(torch.FloatTensor(np.array(info_states)))
outputs = self._policy_sm(logits)
loss_strategy = self._loss_policy(iters * outputs, iters * ac_probs)
loss_strategy.backward()
self._optimizer_policy.step()
return loss_strategy.detach().numpy()
| open_spiel-master | open_spiel/python/pytorch/deep_cfr.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.pytorch.deep_cfr."""
from absl import app
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import torch
from open_spiel.python import policy
import pyspiel
from open_spiel.python.pytorch import deep_cfr
SEED = 24984617
class DeepCFRPyTorchTest(parameterized.TestCase):
@parameterized.parameters('leduc_poker', 'kuhn_poker', 'liars_dice')
def test_deep_cfr_runs(self, game_name):
game = pyspiel.load_game(game_name)
deep_cfr_solver = deep_cfr.DeepCFRSolver(
game,
policy_network_layers=(8, 4),
advantage_network_layers=(4, 2),
num_iterations=2,
num_traversals=2,
learning_rate=1e-3,
batch_size_advantage=None,
batch_size_strategy=None,
memory_capacity=1e7)
deep_cfr_solver.solve()
def test_matching_pennies_3p(self):
game = pyspiel.load_game_as_turn_based('matching_pennies_3p')
deep_cfr_solver = deep_cfr.DeepCFRSolver(
game,
policy_network_layers=(16, 8),
advantage_network_layers=(32, 16),
num_iterations=2,
num_traversals=2,
learning_rate=1e-3,
batch_size_advantage=None,
batch_size_strategy=None,
memory_capacity=1e7)
deep_cfr_solver.solve()
conv = pyspiel.nash_conv(
game,
policy.python_policy_to_pyspiel_policy(
policy.tabular_policy_from_callable(
game, deep_cfr_solver.action_probabilities)))
logging.info('Deep CFR in Matching Pennies 3p. NashConv: %.2f', conv)
def main(_):
torch.manual_seed(SEED)
absltest.main()
if __name__ == '__main__':
# Necessary to run main via app.run for internal tests.
app.run(main)
| open_spiel-master | open_spiel/python/pytorch/deep_cfr_pytorch_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.dqn."""
import random
from absl.testing import absltest
import numpy as np
import torch
from open_spiel.python import rl_environment
import pyspiel
from open_spiel.python.pytorch import dqn
# A simple two-action game encoded as an EFG game. Going left gets -1, going
# right gets a +1.
SIMPLE_EFG_DATA = """
EFG 2 R "Simple single-agent problem" { "Player 1" } ""
p "ROOT" 1 1 "ROOT" { "L" "R" } 0
t "L" 1 "Outcome L" { -1.0 }
t "R" 2 "Outcome R" { 1.0 }
"""
SEED = 24261711
class DQNTest(absltest.TestCase):
def test_simple_game(self):
game = pyspiel.load_efg_game(SIMPLE_EFG_DATA)
env = rl_environment.Environment(game=game)
agent = dqn.DQN(
0,
state_representation_size=game.information_state_tensor_shape()[0],
num_actions=game.num_distinct_actions(),
min_buffer_size_to_learn=10,
hidden_layers_sizes=[16],
replay_buffer_capacity=1000,
update_target_network_every=100,
learn_every=10,
discount_factor=0.99,
epsilon_decay_duration=1000,
batch_size=32,
epsilon_start=0.5,
epsilon_end=0.01)
total_eval_reward = 0
for _ in range(1000):
time_step = env.reset()
while not time_step.last():
agent_output = agent.step(time_step)
time_step = env.step([agent_output.action])
agent.step(time_step)
for _ in range(1000):
time_step = env.reset()
while not time_step.last():
agent_output = agent.step(time_step, is_evaluation=True)
time_step = env.step([agent_output.action])
total_eval_reward += time_step.rewards[0]
self.assertGreaterEqual(total_eval_reward, 250)
def test_run_tic_tac_toe(self):
env = rl_environment.Environment("tic_tac_toe")
state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
agents = [
dqn.DQN( # pylint: disable=g-complex-comprehension
player_id,
state_representation_size=state_size,
num_actions=num_actions,
hidden_layers_sizes=[16],
replay_buffer_capacity=10,
batch_size=5) for player_id in [0, 1]
]
time_step = env.reset()
while not time_step.last():
current_player = time_step.observations["current_player"]
current_agent = agents[current_player]
agent_output = current_agent.step(time_step)
time_step = env.step([agent_output.action])
for agent in agents:
agent.step(time_step)
def test_run_hanabi(self):
# Hanabi is an optional game, so check we have it before running the test.
game = "hanabi"
if game not in pyspiel.registered_names():
return
num_players = 3
env_configs = {
"players": num_players,
"max_life_tokens": 1,
"colors": 2,
"ranks": 3,
"hand_size": 2,
"max_information_tokens": 3,
"discount": 0.
}
env = rl_environment.Environment(game, **env_configs)
state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
agents = [
dqn.DQN( # pylint: disable=g-complex-comprehension
player_id,
state_representation_size=state_size,
num_actions=num_actions,
hidden_layers_sizes=[16],
replay_buffer_capacity=10,
batch_size=5) for player_id in range(num_players)
]
time_step = env.reset()
while not time_step.last():
current_player = time_step.observations["current_player"]
agent_output = [agent.step(time_step) for agent in agents]
time_step = env.step([agent_output[current_player].action])
for agent in agents:
agent.step(time_step)
if __name__ == "__main__":
random.seed(SEED)
torch.manual_seed(SEED)
np.random.seed(SEED)
absltest.main()
| open_spiel-master | open_spiel/python/pytorch/dqn_pytorch_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import torch
import torch.nn as nn
# Note: this import needs to come before Tensorflow to fix a malloc error.
import pyspiel # pylint: disable=g-bad-import-order
from open_spiel.python.pytorch import rcfr
_GAME = pyspiel.load_game('kuhn_poker')
_BOOLEANS = [False, True]
_BATCH_SIZE = 12
SEED = 24984617
def _new_model():
return rcfr.DeepRcfrModel(
_GAME,
num_hidden_layers=1,
num_hidden_units=13,
num_hidden_factors=1,
use_skip_connections=True)
class RcfrTest(parameterized.TestCase, absltest.TestCase):
def setUp(self):
# pylint: disable=useless-super-delegation
super(RcfrTest, self).setUp()
def assertListAlmostEqual(self, list1, list2, delta=1e-06):
self.assertEqual(len(list1), len(list2))
for a, b in zip(list1, list2):
self.assertAlmostEqual(a, b, delta=delta)
def test_with_one_hot_action_features_single_state_vector(self):
information_state_features = [1., 2., 3.]
features = rcfr.with_one_hot_action_features(
information_state_features,
legal_actions=[0, 1],
num_distinct_actions=3)
np.testing.assert_array_equal([1., 2., 3., 1., 0., 0.], features[0])
np.testing.assert_array_equal([1., 2., 3., 0., 1., 0.], features[1])
features = rcfr.with_one_hot_action_features(
information_state_features,
legal_actions=[1, 2],
num_distinct_actions=3)
np.testing.assert_array_equal([1., 2., 3., 0., 1., 0.], features[0])
np.testing.assert_array_equal([1., 2., 3., 0., 0., 1.], features[1])
def test_sequence_features(self):
state = _GAME.new_initial_state()
while state.is_chance_node():
state.apply_action(state.legal_actions()[0])
assert len(state.legal_actions()) == 2
features = rcfr.sequence_features(state, 3)
x = state.information_state_tensor()
np.testing.assert_array_equal(x + [1., 0., 0.], features[0])
np.testing.assert_array_equal(x + [0., 1., 0.], features[1])
def test_num_features(self):
assert rcfr.num_features(_GAME) == 13
def test_root_state_wrapper_num_sequences(self):
root_state_wrapper = rcfr.RootStateWrapper(_GAME.new_initial_state())
assert root_state_wrapper.num_player_sequences[0] == 12
assert root_state_wrapper.num_player_sequences[1] == 12
def test_root_state_wrapper_sequence_indices(self):
root_state_wrapper = rcfr.RootStateWrapper(_GAME.new_initial_state())
self.assertEqual(
{
# Info state string -> initial sequence index map for player 1.
'0': 0,
'0pb': 2,
'1': 4,
'1pb': 6,
'2': 8,
'2pb': 10,
# Info state string -> initial sequence index map for player 2.
'1p': 0,
'1b': 2,
'2p': 4,
'2b': 6,
'0p': 8,
'0b': 10,
},
root_state_wrapper.info_state_to_sequence_idx)
def test_root_state_wrapper_sequence_features(self):
root_state_wrapper = rcfr.RootStateWrapper(_GAME.new_initial_state())
p1_info_state_features = [
[1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 1., 0., 0., 1., 0., 0., 1., 0., 0.],
[1., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 1., 0., 1., 0., 0., 1., 0., 0.],
[1., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 1., 1., 0., 0., 1., 0., 0.],
]
p2_info_state_features = [
[0., 1., 0., 1., 0., 1., 0., 0., 0., 0., 0.],
[0., 1., 0., 1., 0., 0., 1., 0., 0., 0., 0.],
[0., 1., 0., 0., 1., 1., 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 1., 0., 1., 0., 0., 0., 0.],
[0., 1., 1., 0., 0., 1., 0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0., 0., 1., 0., 0., 0., 0.],
]
action_features = [[1., 0.], [0., 1.]]
expected_p1_sequence_features = [
p1_info_state_features[0] + action_features[0],
p1_info_state_features[0] + action_features[1],
p1_info_state_features[1] + action_features[0],
p1_info_state_features[1] + action_features[1],
p1_info_state_features[2] + action_features[0],
p1_info_state_features[2] + action_features[1],
p1_info_state_features[3] + action_features[0],
p1_info_state_features[3] + action_features[1],
p1_info_state_features[4] + action_features[0],
p1_info_state_features[4] + action_features[1],
p1_info_state_features[5] + action_features[0],
p1_info_state_features[5] + action_features[1],
]
expected_p2_sequence_features = [
p2_info_state_features[0] + action_features[0],
p2_info_state_features[0] + action_features[1],
p2_info_state_features[1] + action_features[0],
p2_info_state_features[1] + action_features[1],
p2_info_state_features[2] + action_features[0],
p2_info_state_features[2] + action_features[1],
p2_info_state_features[3] + action_features[0],
p2_info_state_features[3] + action_features[1],
p2_info_state_features[4] + action_features[0],
p2_info_state_features[4] + action_features[1],
p2_info_state_features[5] + action_features[0],
p2_info_state_features[5] + action_features[1],
]
np.testing.assert_array_equal(expected_p1_sequence_features,
root_state_wrapper.sequence_features[0])
np.testing.assert_array_equal(expected_p2_sequence_features,
root_state_wrapper.sequence_features[1])
def test_root_state_wrapper_sequence_terminal_values(self):
root_state_wrapper = rcfr.RootStateWrapper(_GAME.new_initial_state())
expected_terminal_values = {}
no_call_histories_p1_win = [
'2, 0, 0, 0', '2, 0, 1, 0', '0, 1, 1, 0', '1, 2, 1, 0', '1, 0, 1, 0',
'1, 0, 0, 0', '2, 1, 1, 0', '2, 1, 0, 0', '0, 2, 1, 0'
]
for h in no_call_histories_p1_win:
expected_terminal_values[h] = [1., -1.]
no_call_histories_p2_win = [
'0, 2, 0, 1, 0', '0, 1, 0, 0', '0, 1, 0, 1, 0', '0, 2, 0, 0',
'1, 2, 0, 0', '2, 0, 0, 1, 0', '1, 2, 0, 1, 0', '2, 1, 0, 1, 0',
'1, 0, 0, 1, 0'
]
for h in no_call_histories_p2_win:
expected_terminal_values[h] = [-1., 1.]
call_histories_p1_win = [
'1, 0, 1, 1', '2, 1, 1, 1', '2, 1, 0, 1, 1', '2, 0, 0, 1, 1',
'1, 0, 0, 1, 1', '2, 0, 1, 1'
]
for h in call_histories_p1_win:
expected_terminal_values[h] = [2., -2.]
call_histories_p2_win = [
'0, 2, 0, 1, 1', '0, 1, 0, 1, 1', '0, 1, 1, 1', '1, 2, 1, 1',
'1, 2, 0, 1, 1', '0, 2, 1, 1'
]
for h in call_histories_p2_win:
expected_terminal_values[h] = [-2., 2.]
self.assertEqual(
expected_terminal_values,
{k: v.tolist() for k, v in root_state_wrapper.terminal_values.items()})
def test_normalized_by_sum(self):
self.assertListAlmostEqual(
rcfr.normalized_by_sum([1., 2., 3., 4.]), [0.1, 0.2, 0.3, 0.4])
def test_counterfactual_regrets_and_reach_weights_value_error(self):
root = rcfr.RootStateWrapper(_GAME.new_initial_state())
# Initialize arbitrary weights to generate an arbitrary profile.
sequence_weights1_with_a_missing_sequence = [
0.4967141530112327,
0.0,
0.6476885381006925,
1.5230298564080254,
0.0,
0.0,
1.5792128155073915,
0.7674347291529088,
0.0,
0.5425600435859647,
0.0,
# 0.0,
]
# Ensure this player's policy is fully mixed so that each of player 1's
# information states are reached.
sequence_weights2 = [
0.24196227156603412,
0.1,
0.1,
0.1,
0.1,
0.3142473325952739,
0.1,
0.1,
1.465648768921554,
0.1,
0.06752820468792384,
0.1,
]
with self.assertRaises(ValueError):
root.counterfactual_regrets_and_reach_weights(
0, 1, sequence_weights1_with_a_missing_sequence, sequence_weights2)
def test_counterfactual_regrets_and_reach_weights(self):
root = rcfr.RootStateWrapper(_GAME.new_initial_state())
# Initialize arbitrary weights to generate an arbitrary profile.
sequence_weights1 = [
0.4967141530112327,
0.0,
0.6476885381006925,
1.5230298564080254,
0.0,
0.0,
1.5792128155073915,
0.7674347291529088,
0.0,
0.5425600435859647,
0.0,
0.0,
]
sequence_weights2 = [
0.24196227156603412,
0.0,
0.0,
0.0,
0.0,
0.3142473325952739,
0.0,
0.0,
1.465648768921554,
0.0,
0.06752820468792384,
0.0,
]
# These expected regrets and sequence weights were computed for the given
# sequence weights.
expected_regrets_given_sequence_weights = [
0.,
0.283604,
0.116937,
-0.049729,
-0.06892,
0.06892,
0.054506,
-0.112161,
-0.083333,
0.,
0.,
0.,
]
expected_reach_weights_given_sequence_weights = [
2.,
0.,
1.,
1.,
0.,
2.,
1.,
1.,
2.,
0.,
2.,
0.,
]
regrets, weights = root.counterfactual_regrets_and_reach_weights(
0, 1, sequence_weights1, sequence_weights2)
self.assertListAlmostEqual(
regrets,
expected_regrets_given_sequence_weights)
self.assertListAlmostEqual(
weights,
expected_reach_weights_given_sequence_weights)
def test_all_states(self):
states = rcfr.all_states(
_GAME.new_initial_state(),
depth_limit=-1,
include_terminals=False,
include_chance_states=False)
self.assertLen(list(states), 24)
states = rcfr.all_states(
_GAME.new_initial_state(),
depth_limit=-1,
include_terminals=True,
include_chance_states=False)
self.assertLen(list(states), 54)
states = rcfr.all_states(
_GAME.new_initial_state(),
depth_limit=-1,
include_terminals=False,
include_chance_states=True)
self.assertLen(list(states), 28)
states = rcfr.all_states(
_GAME.new_initial_state(),
depth_limit=-1,
include_terminals=True,
include_chance_states=True)
self.assertLen(list(states), 58)
def test_sequence_weights_to_tabular_profile(self):
root = rcfr.RootStateWrapper(_GAME.new_initial_state())
def policy_fn(state):
"""Generates a policy profile by treating sequence indices as weights."""
info_state = state.information_state_string()
sequence_offset = root.info_state_to_sequence_idx[info_state]
num_actions = len(state.legal_actions())
return rcfr.normalized_by_sum(
list(range(sequence_offset, sequence_offset + num_actions)))
profile = rcfr.sequence_weights_to_tabular_profile(root.root, policy_fn)
expected_profile = {
# Player 1
'0': [(0, 0.), (1, 1.)], # Sequences 0 and 1 (sums to 1)
'0pb': [(0, 0.4), (1, 0.6)], # Sequences 2 and 3 (sums to 5)
# Sequences 4 and 5 (sums to 9)
'1': [(0, 0.44444444444444442), (1, 0.55555555555555558)],
# Sequences 6 and 7 (sums to 13)
'1pb': [(0, 0.46153846153846156), (1, 0.53846153846153844)],
# Sequences 8 and 9 (sums to 17)
'2': [(0, 0.47058823529411764), (1, 0.52941176470588236)],
# Sequences 10 and 11 (sums to 21)
'2pb': [(0, 0.47619047619047616), (1, 0.52380952380952384)],
# Player 2
'1p': [(0, 0.), (1, 1.)], # Sequences 0 and 1 (sums to 1)
'1b': [(0, 0.4), (1, 0.6)], # Sequences 2 and 3 (sums to 5)
# Sequences 4 and 5 (sums to 9)
'2p': [(0, 0.44444444444444442), (1, 0.55555555555555558)],
# Sequences 6 and 7 (sums to 13)
'2b': [(0, 0.46153846153846156), (1, 0.53846153846153844)],
# Sequences 8 and 9 (sums to 17)
'0p': [(0, 0.47058823529411764), (1, 0.52941176470588236)],
# Sequences 10 and 11 (sums to 21)
'0b': [(0, 0.47619047619047616), (1, 0.52380952380952384)],
}
self.assertAlmostEqual(profile, expected_profile, delta=1e-06)
def test_cfr(self):
root = rcfr.RootStateWrapper(_GAME.new_initial_state())
num_half_iterations = 6
cumulative_regrets = [np.zeros(n) for n in root.num_player_sequences]
cumulative_reach_weights = [np.zeros(n) for n in root.num_player_sequences]
average_profile = root.sequence_weights_to_tabular_profile(
cumulative_reach_weights)
# parameterized.TestCase
self.assertGreater(pyspiel.nash_conv(_GAME, average_profile), 0.91)
regret_player = 0
for _ in range(num_half_iterations):
reach_weights_player = 1 if regret_player == 0 else 0
regrets, reach = root.counterfactual_regrets_and_reach_weights(
regret_player, reach_weights_player, *rcfr.relu(cumulative_regrets))
cumulative_regrets[regret_player] += regrets
cumulative_reach_weights[reach_weights_player] += reach
regret_player = reach_weights_player
average_profile = root.sequence_weights_to_tabular_profile(
cumulative_reach_weights)
self.assertLess(pyspiel.nash_conv(_GAME, average_profile), 0.27)
def test_rcfr_functions(self):
models = [_new_model() for _ in range(_GAME.num_players())]
root = rcfr.RootStateWrapper(_GAME.new_initial_state())
num_half_iterations = 4
num_epochs = 100
cumulative_regrets = [np.zeros(n) for n in root.num_player_sequences]
cumulative_reach_weights = [np.zeros(n) for n in root.num_player_sequences]
average_profile = root.sequence_weights_to_tabular_profile(
cumulative_reach_weights)
self.assertGreater(pyspiel.nash_conv(_GAME, average_profile), 0.91)
regret_player = 0
sequence_weights = [
model(root.sequence_features[player]).detach().numpy()
for player, model in enumerate(models)
]
for _ in range(num_half_iterations):
reach_weights_player = 1 if regret_player == 0 else 0
sequence_weights[reach_weights_player] = models[reach_weights_player](
root.sequence_features[reach_weights_player]).detach().numpy()
regrets, seq_probs = root.counterfactual_regrets_and_reach_weights(
regret_player, reach_weights_player, *sequence_weights)
cumulative_regrets[regret_player] += regrets
cumulative_reach_weights[reach_weights_player] += seq_probs
data = torch.utils.data.TensorDataset(
root.sequence_features[regret_player],
torch.unsqueeze(
torch.Tensor(cumulative_regrets[regret_player]), axis=1))
data = torch.utils.data.DataLoader(
data, batch_size=_BATCH_SIZE, shuffle=True)
loss_fn = nn.SmoothL1Loss()
optimizer = torch.optim.Adam(
models[regret_player].parameters(), lr=0.005, amsgrad=True)
for _ in range(num_epochs):
for x, y in data:
optimizer.zero_grad()
output = models[regret_player](x)
loss = loss_fn(output, y)
loss.backward()
optimizer.step()
regret_player = reach_weights_player
average_profile = root.sequence_weights_to_tabular_profile(
cumulative_reach_weights)
self.assertLess(pyspiel.nash_conv(_GAME, average_profile), 0.91)
@parameterized.parameters(list(itertools.product(_BOOLEANS, _BOOLEANS)))
def test_rcfr(self, bootstrap, truncate_negative):
num_epochs = 100
num_iterations = 2
models = [_new_model() for _ in range(_GAME.num_players())]
patient = rcfr.RcfrSolver(
_GAME, models, bootstrap=bootstrap, truncate_negative=truncate_negative)
def _train(model, data):
data = torch.utils.data.DataLoader(
data, batch_size=_BATCH_SIZE, shuffle=True)
loss_fn = nn.SmoothL1Loss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.005, amsgrad=True)
for _ in range(num_epochs):
for x, y in data:
optimizer.zero_grad()
output = model(x)
loss = loss_fn(output, y)
loss.backward()
optimizer.step()
average_policy = patient.average_policy()
self.assertGreater(pyspiel.nash_conv(_GAME, average_policy), 0.91)
for _ in range(num_iterations):
patient.evaluate_and_update_policy(_train)
average_policy = patient.average_policy()
self.assertLess(pyspiel.nash_conv(_GAME, average_policy), 0.91)
def test_reservior_buffer_insert(self):
buffer_size = 10
patient = rcfr.ReservoirBuffer(buffer_size)
x_buffer = []
for i in range(buffer_size):
patient.insert(i)
x_buffer.append(i)
assert patient.num_elements == len(x_buffer)
np.testing.assert_array_equal(x_buffer, patient.buffer)
assert patient.num_available_spaces() == 0
for i in range(buffer_size):
patient.insert(buffer_size + i)
assert patient.num_elements == buffer_size
def test_reservior_buffer_insert_all(self):
buffer_size = 10
patient = rcfr.ReservoirBuffer(buffer_size)
x_buffer = list(range(buffer_size))
patient.insert_all(x_buffer)
assert patient.num_elements == buffer_size
np.testing.assert_array_equal(x_buffer, patient.buffer)
assert patient.num_available_spaces() == 0
x_buffer = list(range(buffer_size, 2 * buffer_size))
patient.insert_all(x_buffer)
assert patient.num_elements == buffer_size
def test_rcfr_with_buffer(self):
buffer_size = 12
num_epochs = 100
num_iterations = 2
models = [_new_model() for _ in range(_GAME.num_players())]
patient = rcfr.ReservoirRcfrSolver(_GAME, models, buffer_size=buffer_size)
def _train(model, data):
data = torch.utils.data.DataLoader(
data, batch_size=_BATCH_SIZE, shuffle=True)
loss_fn = nn.SmoothL1Loss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.005, amsgrad=True)
for _ in range(num_epochs):
for x, y in data:
optimizer.zero_grad()
output = model(x)
loss = loss_fn(output, y)
loss.backward()
optimizer.step()
average_policy = patient.average_policy()
self.assertGreater(pyspiel.nash_conv(_GAME, average_policy), 0.91)
for _ in range(num_iterations):
patient.evaluate_and_update_policy(_train)
average_policy = patient.average_policy()
self.assertLess(pyspiel.nash_conv(_GAME, average_policy), 0.91)
if __name__ == '__main__':
torch.manual_seed(SEED)
absltest.main()
| open_spiel-master | open_spiel/python/pytorch/rcfr_pytorch_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/pytorch/__init__.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.rpg."""
import itertools
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import torch
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import exploitability
from open_spiel.python.examples import kuhn_policy_gradient
import pyspiel
from open_spiel.python.pytorch import policy_gradient
from open_spiel.python.pytorch.losses import rl_losses
SEED = 24984617
class PolicyGradientTest(parameterized.TestCase, absltest.TestCase):
@parameterized.parameters(
itertools.product(("rpg", "qpg", "rm", "a2c", "neurd"),
("kuhn_poker", "leduc_poker")))
def test_run_game(self, loss_str, game_name):
env = rl_environment.Environment(game_name)
env.seed(SEED)
info_state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
agents = [
policy_gradient.PolicyGradient( # pylint: disable=g-complex-comprehension
player_id=player_id,
info_state_size=info_state_size,
num_actions=num_actions,
loss_str=loss_str,
hidden_layers_sizes=[32, 32],
batch_size=16,
entropy_cost=0.001,
critic_learning_rate=0.01,
pi_learning_rate=0.01,
num_critic_before_pi=4) for player_id in [0, 1]
]
for _ in range(2):
time_step = env.reset()
while not time_step.last():
current_player = time_step.observations["current_player"]
current_agent = agents[current_player]
agent_output = current_agent.step(time_step)
time_step = env.step([agent_output.action])
for agent in agents:
agent.step(time_step)
def test_neurd_kuhn(self):
env = rl_environment.Environment("kuhn_poker")
env.seed(SEED)
info_state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
agents = [
policy_gradient.PolicyGradient( # pylint: disable=g-complex-comprehension
player_id=player_id,
info_state_size=info_state_size,
num_actions=num_actions,
loss_str="neurd",
hidden_layers_sizes=[32],
batch_size=16,
entropy_cost=0.001,
critic_learning_rate=0.01,
pi_learning_rate=0.01,
num_critic_before_pi=4) for player_id in [0, 1]
]
expl_policies_avg = kuhn_policy_gradient.PolicyGradientPolicies(env, agents)
for _ in range(100):
time_step = env.reset()
while not time_step.last():
current_player = time_step.observations["current_player"]
current_agent = agents[current_player]
agent_output = current_agent.step(time_step)
time_step = env.step([agent_output.action])
for agent in agents:
agent.step(time_step)
expl = exploitability.exploitability(env.game, expl_policies_avg)
# Check the exploitability is less than the target upper bound.
self.assertLess(expl, 0.7)
def test_run_hanabi(self):
# Hanabi is an optional game, so check we have it before running the test.
game = "hanabi"
if game not in pyspiel.registered_names():
return
num_players = 3
env_configs = {
"players": num_players,
"max_life_tokens": 1,
"colors": 2,
"ranks": 3,
"hand_size": 2,
"max_information_tokens": 3,
"discount": 0.99
}
env = rl_environment.Environment(game, **env_configs)
env.seed(SEED)
info_state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
agents = [
policy_gradient.PolicyGradient( # pylint: disable=g-complex-comprehension
player_id=player_id,
info_state_size=info_state_size,
num_actions=num_actions,
hidden_layers_sizes=[8, 8],
batch_size=16,
entropy_cost=0.001,
critic_learning_rate=0.001,
pi_learning_rate=0.001,
num_critic_before_pi=4) for player_id in range(num_players)
]
time_step = env.reset()
while not time_step.last():
current_player = time_step.observations["current_player"]
agent_output = [agent.step(time_step) for agent in agents]
time_step = env.step([agent_output[current_player].action])
for agent in agents:
agent.step(time_step)
def test_loss_modes(self):
loss_dict = {
"qpg": rl_losses.BatchQPGLoss,
"rpg": rl_losses.BatchRPGLoss,
"rm": rl_losses.BatchRMLoss,
"a2c": rl_losses.BatchA2CLoss,
"neurd": rl_losses.BatchNeuRDLoss,
}
for loss_str, loss_class in loss_dict.items():
agent_by_str = policy_gradient.PolicyGradient(
player_id=0,
info_state_size=32,
num_actions=2,
loss_str=loss_str,
loss_class=None)
agent_by_class = policy_gradient.PolicyGradient(
player_id=0,
info_state_size=32,
num_actions=2,
loss_str=None,
loss_class=loss_class)
self.assertEqual(agent_by_str._loss_class, agent_by_class._loss_class)
if __name__ == "__main__":
np.random.seed(SEED)
torch.manual_seed(SEED)
absltest.main()
| open_spiel-master | open_spiel/python/pytorch/policy_gradient_pytorch_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.nfsp."""
import random
from absl.testing import absltest
import torch
from open_spiel.python import rl_environment
from open_spiel.python.pytorch import nfsp
SEED = 24984617
class NFSPTest(absltest.TestCase):
def test_run_kuhn(self):
env = rl_environment.Environment("kuhn_poker")
state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
agents = [
nfsp.NFSP( # pylint: disable=g-complex-comprehension
player_id,
state_representation_size=state_size,
num_actions=num_actions,
hidden_layers_sizes=[16],
reservoir_buffer_capacity=10,
anticipatory_param=0.1) for player_id in [0, 1]
]
for unused_ep in range(10):
time_step = env.reset()
while not time_step.last():
current_player = time_step.observations["current_player"]
current_agent = agents[current_player]
agent_output = current_agent.step(time_step)
time_step = env.step([agent_output.action])
for agent in agents:
agent.step(time_step)
class ReservoirBufferTest(absltest.TestCase):
def test_reservoir_buffer_add(self):
reservoir_buffer = nfsp.ReservoirBuffer(reservoir_buffer_capacity=10)
self.assertEmpty(reservoir_buffer)
reservoir_buffer.add("entry1")
self.assertLen(reservoir_buffer, 1)
reservoir_buffer.add("entry2")
self.assertLen(reservoir_buffer, 2)
self.assertIn("entry1", reservoir_buffer)
self.assertIn("entry2", reservoir_buffer)
def test_reservoir_buffer_max_capacity(self):
reservoir_buffer = nfsp.ReservoirBuffer(reservoir_buffer_capacity=2)
reservoir_buffer.add("entry1")
reservoir_buffer.add("entry2")
reservoir_buffer.add("entry3")
self.assertLen(reservoir_buffer, 2)
def test_reservoir_buffer_sample(self):
replay_buffer = nfsp.ReservoirBuffer(reservoir_buffer_capacity=3)
replay_buffer.add("entry1")
replay_buffer.add("entry2")
replay_buffer.add("entry3")
samples = replay_buffer.sample(3)
self.assertIn("entry1", samples)
self.assertIn("entry2", samples)
self.assertIn("entry3", samples)
if __name__ == "__main__":
random.seed(SEED)
torch.manual_seed(SEED)
absltest.main()
| open_spiel-master | open_spiel/python/pytorch/nfsp_pytorch_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
import torch
import torch.nn.functional as F
import pyspiel
from open_spiel.python.pytorch import neurd
_GAME = pyspiel.load_game('kuhn_poker')
def _new_model():
return neurd.DeepNeurdModel(
_GAME,
num_hidden_layers=1,
num_hidden_units=13,
num_hidden_factors=1,
use_skip_connections=True,
autoencode=True)
class NeurdTest(absltest.TestCase):
def setUp(self):
super(NeurdTest, self).setUp()
torch.manual_seed(42)
def test_neurd(self):
num_iterations = 2
models = [_new_model() for _ in range(_GAME.num_players())]
solver = neurd.CounterfactualNeurdSolver(_GAME, models)
average_policy = solver.average_policy()
self.assertGreater(pyspiel.nash_conv(_GAME, average_policy), 0.91)
def _train(model, data):
neurd.train(
model=model,
data=data,
batch_size=12,
step_size=10.0,
autoencoder_loss=F.huber_loss)
for _ in range(num_iterations):
solver.evaluate_and_update_policy(_train)
average_policy = solver.average_policy()
self.assertLess(pyspiel.nash_conv(_GAME, average_policy), 0.91)
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/pytorch/neurd_pytorch_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements an Ephemeral Value Adjustment Agent.
See https://arxiv.org/abs/1810.08163.
The algorithm queries trajectories from a replay buffer based on similarities
to embedding representations and uses a parametric model to compute values for
counterfactual state-action pairs when integrating across those trajectories.
Finally, a weighted average between the parametric (DQN in this case) and the
non-parametric model is used to compute the policy.
"""
# pylint: disable=protected-access
import collections
import copy
import numpy as np
import torch
from open_spiel.python import rl_agent
from open_spiel.python.pytorch import dqn
MEM_KEY_NAME = "embedding"
ValueBufferElement = collections.namedtuple("ValueElement", "embedding value")
ReplayBufferElement = collections.namedtuple(
"ReplayElement", "embedding info_state action reward next_info_state "
"is_final_step legal_actions_mask")
# TODO(author3) Refactor into data structures lib.
class QueryableFixedSizeRingBuffer(dqn.ReplayBuffer):
"""ReplayBuffer of fixed size with a FIFO replacement policy.
Stored transitions can be sampled uniformly. This extends the DQN replay
buffer by allowing the contents to be fetched by L2 proximity to a query
value.
The underlying datastructure is a ring buffer, allowing 0(1) adding and
sampling.
"""
def knn(self, key, key_name, k, trajectory_len=1):
"""Computes top-k neighbours based on L2 distance.
Args:
key: (np.array) key value to query memory.
key_name: (str) attribute name of key in memory elements.
k: (int) number of neighbours to fetch.
trajectory_len: (int) length of trajectory to fetch from replay buffer.
Returns:
List of tuples (L2 negative distance, BufferElement) sorted in increasing
order by the negative L2 distqances from the key.
"""
distances = [(np.linalg.norm(getattr(sample, key_name) - key, 2,
axis=0), sample) for sample in self._data]
return sorted(distances, key=lambda v: -v[0])[:k]
class EVAAgent(object):
"""Implements a solver for Ephemeral VAlue Adjustment.
See https://arxiv.org/abs/1810.08163.
Define all networks and sampling buffers/memories. Derive losses & learning
steps. Initialize the game state and algorithmic variables.
"""
def __init__(self,
game,
player_id,
state_size,
num_actions,
embedding_network_layers=(128,),
embedding_size=16,
dqn_hidden_layers=(128, 128),
batch_size=16,
trajectory_len=10,
num_neighbours=5,
learning_rate=1e-4,
mixing_parameter=0.9,
memory_capacity=int(1e6),
discount_factor=1.0,
update_target_network_every=1000,
epsilon_start=1.0,
epsilon_end=0.1,
epsilon_decay_duration=int(1e4),
embedding_as_parametric_input=False):
"""Initialize the Ephemeral VAlue Adjustment algorithm.
Args:
game: (rl_environment.Environment) Open Spiel game.
player_id: (int) Player id for this player.
state_size: (int) Size of info state vector.
num_actions: (int) number of actions.
embedding_network_layers: (list[int]) Layer sizes of strategy net MLP.
embedding_size: (int) Size of memory embeddings.
dqn_hidden_layers: (list(int)) MLP layer sizes of DQN network.
batch_size: (int) Size of batches for DQN learning steps.
trajectory_len: (int) Length of trajectories from replay buffer.
num_neighbours: (int) Number of neighbours to fetch from replay buffer.
learning_rate: (float) Learning rate.
mixing_parameter: (float) Value mixing parameter between 0 and 1.
memory_capacity: Number af samples that can be stored in memory.
discount_factor: (float) Discount factor for Q-Learning.
update_target_network_every: How often to update DQN target network.
epsilon_start: (float) Starting epsilon-greedy value.
epsilon_end: (float) Final epsilon-greedy value.
epsilon_decay_duration: (float) Number of steps over which epsilon decays.
embedding_as_parametric_input: (bool) Whether we use embeddings as input
to the parametric model.
"""
assert (mixing_parameter >= 0 and mixing_parameter <= 1)
self._game = game
self.player_id = player_id
self._env = game
self._num_actions = num_actions
self._info_state_size = state_size
self._embedding_size = embedding_size
self._lambda = mixing_parameter
self._trajectory_len = trajectory_len
self._num_neighbours = num_neighbours
self._discount = discount_factor
self._epsilon_start = epsilon_start
self._epsilon_end = epsilon_end
self._epsilon_decay_duration = epsilon_decay_duration
self._last_time_step = None
self._last_action = None
self._embedding_as_parametric_input = embedding_as_parametric_input
self._embedding_network = dqn.MLP(self._info_state_size,
list(embedding_network_layers),
embedding_size)
# The DQN agent requires this be an integer.
if not isinstance(memory_capacity, int):
raise ValueError("Memory capacity not an integer.")
# Initialize the parametric & non-parametric Q-networks.
self._agent = dqn.DQN(
player_id,
state_representation_size=self._info_state_size,
num_actions=self._num_actions,
hidden_layers_sizes=list(dqn_hidden_layers),
replay_buffer_capacity=memory_capacity,
replay_buffer_class=QueryableFixedSizeRingBuffer,
batch_size=batch_size,
learning_rate=learning_rate,
update_target_network_every=update_target_network_every,
learn_every=batch_size,
discount_factor=1.0,
epsilon_start=1.0,
epsilon_end=0.1,
epsilon_decay_duration=int(1e6))
# Initialize Value Buffers - Fetch Replay buffers from agents.
self._value_buffer = QueryableFixedSizeRingBuffer(memory_capacity)
self._replay_buffer = self._agent.replay_buffer
# Initialize non-parametric & EVA Q-values.
self._v_np = collections.defaultdict(float)
self._q_np = collections.defaultdict(lambda: [0] * self._num_actions)
self._q_eva = collections.defaultdict(lambda: [0] * self._num_actions)
@property
def env(self):
return self._env
@property
def loss(self):
return self._agent.loss
def _add_transition_value(self, infostate_embedding, value):
"""Adds the embedding and value to the ValueBuffer.
Args:
infostate_embedding: (np.array) embeddig vector.
value: (float) Value associated with state embeding.
"""
transition = ValueBufferElement(embedding=infostate_embedding, value=value)
self._value_buffer.add(transition)
def _add_transition_replay(self, infostate_embedding, time_step):
"""Adds the new transition using `time_step` to the replay buffer.
Adds the transition from `self._prev_timestep` to `time_step` by
`self._prev_action`.
Args:
infostate_embedding: embeddig vector.
time_step: an instance of rl_environment.TimeStep.
"""
prev_timestep = self._last_time_step
assert prev_timestep is not None
legal_actions = (
prev_timestep.observations["legal_actions"][self.player_id])
legal_actions_mask = np.zeros(self._num_actions)
legal_actions_mask[legal_actions] = 1.0
reward = time_step.rewards[self.player_id] if time_step.rewards else 0.0
transition = ReplayBufferElement(
embedding=infostate_embedding,
info_state=(prev_timestep.observations["info_state"][self.player_id]),
action=self._last_action,
reward=reward,
next_info_state=time_step.observations["info_state"][self.player_id],
is_final_step=float(time_step.last()),
legal_actions_mask=legal_actions_mask)
self._replay_buffer.add(transition)
def step(self, time_step, is_evaluation=False):
"""Returns the action to be taken and updates the value functions.
Args:
time_step: an instance of rl_environment.TimeStep.
is_evaluation: bool, whether this is a training or evaluation call.
Returns:
A `rl_agent.StepOutput` containing the action probs and chosen action.
"""
# Act step: don't act at terminal info states.
if not time_step.last():
info_state = time_step.observations["info_state"][self.player_id]
legal_actions = time_step.observations["legal_actions"][self.player_id]
epsilon = self._get_epsilon(self._agent.step_counter, is_evaluation)
# Sample an action from EVA via epsilon greedy policy.
action, probs = self._epsilon_greedy(self._q_eva[tuple(info_state)],
legal_actions, epsilon)
# Update Step: Only with transitions and not when evaluating.
if (not is_evaluation and self._last_time_step is not None):
info_state = self._last_time_step.observations["info_state"][
self.player_id]
legal_actions = self._last_time_step.observations["legal_actions"][
self.player_id]
epsilon = self._get_epsilon(self._agent.step_counter, is_evaluation)
# Get embedding.
self._info_state = torch.Tensor(np.expand_dims(info_state, axis=0))
infostate_embedding = self._embedding_network(
self._info_state).detach()[0]
neighbours_value = self._value_buffer.knn(infostate_embedding,
MEM_KEY_NAME,
self._num_neighbours, 1)
# collect trace values of knn from L (value buffer) .. Q_np(s_k)
neighbours_replay = self._replay_buffer.knn(infostate_embedding,
MEM_KEY_NAME,
self._num_neighbours,
self._trajectory_len)
# Take a step with the parametric model and get q-values. Use embedding as
# input to the parametric meodel.
# TODO(author6) Recompute embeddings for buffers on learning steps.
if self._embedding_as_parametric_input:
last_time_step_copy = copy.deepcopy(self._last_time_step)
last_time_step_copy.observations["info_state"][
self.player_id] = infostate_embedding
self._agent.step(last_time_step_copy, add_transition_record=False)
else:
self._agent.step(self._last_time_step, add_transition_record=False)
q_values = self._agent._q_network(self._info_state).detach()[0]
# Update EVA: Q_eva = lambda q_theta(s_t) + (1-lambda) sum(Q_np(s_k, .))/K
for a in legal_actions:
q_theta = q_values[a]
self._q_eva[tuple(info_state)][a] = (
self._lambda * q_theta + (1 - self._lambda) *
sum([elem[1].value
for elem in neighbours_value]) / self._num_neighbours)
# Append (e,s,a,r,s') to Replay Buffer
self._add_transition_replay(infostate_embedding, time_step)
# update Q_np with Traces using TCP
self._trajectory_centric_planning(neighbours_replay)
# Append Q_np(s, a) to Value Buffer
self._add_transition_value(
infostate_embedding, self._q_np[tuple(info_state)][self._last_action])
# Prepare for the next episode.
if time_step.last():
self._last_time_step = None
self._last_action = None
return
self._last_time_step = time_step
self._last_action = action
return rl_agent.StepOutput(action=action, probs=probs)
def _trajectory_centric_planning(self, trajectories):
"""Performs trajectory centric planning.
Uses trajectories from the replay buffer to update the non-parametric values
while supplying counter-factual values with the parametric model.
Args:
trajectories: Current OpenSpiel game state.
"""
# Calculate non-parametric values over the trajectories.
# Iterate backward through trajectories
for t in range(len(trajectories) - 1, 0, -1):
elem = trajectories[t][1]
s_tp1 = tuple(elem.next_info_state)
s_t = tuple(elem.info_state)
a_t = elem.action
r_t = elem.reward
legal_actions = elem.legal_actions_mask
if t < len(trajectories) - 1:
for action in range(len(legal_actions)):
if not legal_actions[action]:
continue
if action == elem.action:
self._q_np[s_t][a_t] = (r_t + self._discount * self._v_np[s_tp1])
else:
self._agent.info_state = torch.Tensor(
np.expand_dims(elem.info_state, axis=0))
q_values_parametric = self._agent._q_network(
self._agent.info_state).detach().numpy()
self._q_np[s_t][a_t] = q_values_parametric[0][action]
# Set V(s_t)
if t == len(trajectories) - 1:
# Sample from the parametric model.
self._agent.info_state = torch.Tensor(
np.expand_dims(elem.info_state, axis=0))
q_values_parametric = self._agent._q_network(
self._agent.info_state).detach().numpy()
self._v_np[s_t] = np.max(q_values_parametric)
else:
self._v_np[s_t] = max(self._q_np[s_t])
def _epsilon_greedy(self, q_values, legal_actions, epsilon):
"""Returns a valid epsilon-greedy action and valid action probs.
Action probabilities are given by a softmax over legal q-values.
Args:
q_values: list of Q-values by action.
legal_actions: list of legal actions at `info_state`.
epsilon: float, probability of taking an exploratory action.
Returns:
A valid epsilon-greedy action and valid action probabilities.
"""
probs = np.zeros(self._num_actions)
q_values = np.array(q_values)
if np.random.rand() < epsilon:
action = np.random.choice(legal_actions)
probs[legal_actions] = 1.0 / len(legal_actions)
else:
legal_q_values = q_values[legal_actions]
action = legal_actions[np.argmax(legal_q_values)]
# Reduce max_q for numerical stability. Result is the same.
max_q = np.max(legal_q_values)
e_x = np.exp(legal_q_values - max_q)
probs[legal_actions] = e_x / e_x.sum(axis=0)
return action, probs
def _get_epsilon(self, step_counter, is_evaluation):
"""Returns the evaluation or decayed epsilon value."""
if is_evaluation:
return 0.0
decay_steps = min(step_counter, self._epsilon_decay_duration)
decayed_epsilon = (
self._epsilon_end + (self._epsilon_start - self._epsilon_end) *
(1 - decay_steps / self._epsilon_decay_duration))
return decayed_epsilon
def action_probabilities(self, state):
"""Returns action probabilites dict for a single batch."""
# TODO(author3, author6): Refactor this to expect pre-normalized form.
if hasattr(state, "information_state_tensor"):
state_rep = tuple(state.information_state_tensor(self.player_id))
elif hasattr(state, "observation_tensor"):
state_rep = tuple(state.observation_tensor(self.player_id))
else:
raise AttributeError("Unable to extract normalized state vector.")
legal_actions = state.legal_actions(self.player_id)
if legal_actions:
_, probs = self._epsilon_greedy(
self._q_eva[state_rep], legal_actions, epsilon=0.0)
return {a: probs[a] for a in range(self._num_actions)}
else:
raise ValueError("Node has no legal actions to take.")
| open_spiel-master | open_spiel/python/pytorch/eva.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.eva."""
from absl.testing import absltest
from absl.testing import parameterized
import torch
from open_spiel.python import rl_environment
from open_spiel.python.pytorch import eva
SEED = 24984617
class EVATest(parameterized.TestCase):
@parameterized.parameters("tic_tac_toe", "kuhn_poker", "liars_dice")
def test_run_games(self, game):
env = rl_environment.Environment(game)
num_players = env.num_players
eva_agents = []
num_actions = env.action_spec()["num_actions"]
state_size = env.observation_spec()["info_state"][0]
for player in range(num_players):
eva_agents.append(
eva.EVAAgent(
env,
player,
state_size,
num_actions,
embedding_network_layers=(64, 32),
embedding_size=12,
learning_rate=1e-4,
mixing_parameter=0.5,
memory_capacity=int(1e6),
discount_factor=1.0,
epsilon_start=1.0,
epsilon_end=0.1,
epsilon_decay_duration=int(1e6)))
time_step = env.reset()
while not time_step.last():
current_player = time_step.observations["current_player"]
current_agent = eva_agents[current_player]
# 1. Step the agent.
# 2. Step the Environment.
agent_output = current_agent.step(time_step)
time_step = env.step([agent_output.action])
for agent in eva_agents:
agent.step(time_step)
class QueryableFixedSizeRingBufferTest(absltest.TestCase):
def test_replay_buffer_add(self):
replay_buffer = eva.QueryableFixedSizeRingBuffer(replay_buffer_capacity=10)
self.assertEmpty(replay_buffer)
replay_buffer.add("entry1")
self.assertLen(replay_buffer, 1)
replay_buffer.add("entry2")
self.assertLen(replay_buffer, 2)
self.assertIn("entry1", replay_buffer)
self.assertIn("entry2", replay_buffer)
def test_replay_buffer_max_capacity(self):
replay_buffer = eva.QueryableFixedSizeRingBuffer(replay_buffer_capacity=2)
replay_buffer.add("entry1")
replay_buffer.add("entry2")
replay_buffer.add("entry3")
self.assertLen(replay_buffer, 2)
self.assertIn("entry2", replay_buffer)
self.assertIn("entry3", replay_buffer)
def test_replay_buffer_sample(self):
replay_buffer = eva.QueryableFixedSizeRingBuffer(replay_buffer_capacity=3)
replay_buffer.add("entry1")
replay_buffer.add("entry2")
replay_buffer.add("entry3")
samples = replay_buffer.sample(3)
self.assertIn("entry1", samples)
self.assertIn("entry2", samples)
self.assertIn("entry3", samples)
# TODO(author6) Test knn query.
if __name__ == "__main__":
torch.manual_seed(SEED)
absltest.main()
| open_spiel-master | open_spiel/python/pytorch/eva_pytorch_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3.
r"""Policy Gradient based agents implemented in PyTorch.
This class is composed of three policy gradient (PG) algorithms:
- Q-based Policy Gradient (QPG): an "all-actions" advantage actor-critic
algorithm differing from A2C in that all action values are used to estimate the
policy gradient (as opposed to only using the action taken into account):
baseline = \sum_a pi_a * Q_a
loss = - \sum_a pi_a * (Q_a - baseline)
where (Q_a - baseline) is the usual advantage. QPG is also known as Mean
Actor-Critic (https://arxiv.org/abs/1709.00503).
- Regret policy gradient (RPG): a PG algorithm inspired by counterfactual regret
minimization (CFR). Unlike standard actor-critic methods (e.g. A2C), the loss is
defined purely in terms of thresholded regrets as follows:
baseline = \sum_a pi_a * Q_a
loss = regret = \sum_a relu(Q_a - baseline)
where gradients only flow through the action value (Q_a) part and are blocked on
the baseline part (which is trained separately by usual MSE loss).
The lack of negative sign in the front of the loss represents a switch from
gradient ascent on the score to descent on the loss.
- Regret Matching Policy Gradient (RMPG): inspired by regret-matching, the
policy gradient is by weighted by the thresholded regret:
baseline = \sum_a pi_a * Q_a
loss = - \sum_a pi_a * relu(Q_a - baseline)
These algorithms were published in NeurIPS 2018. Paper title: "Actor-Critic
Policy Optimization in Partially Observable Multiagent Environment", the paper
is available at: https://arxiv.org/abs/1810.09026.
- Advantage Actor Critic (A2C): The popular advantage actor critic (A2C)
algorithm. The algorithm uses the baseline (Value function) as a control variate
to reduce variance of the policy gradient. The loss is only computed for the
actions actually taken in the episode as opposed to a loss computed for all
actions in the variants above.
advantages = returns - baseline
loss = -log(pi_a) * advantages
The algorithm can be found in the textbook:
https://incompleteideas.net/book/RLbook2018.pdf under the chapter on
`Policy Gradients`.
See open_spiel/python/pytorch/losses/rl_losses_test.py for an example of the
loss computation.
"""
import collections
import os
from absl import logging
import numpy as np
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from open_spiel.python import rl_agent
from open_spiel.python.pytorch.dqn import SonnetLinear
from open_spiel.python.pytorch.losses import rl_losses
Transition = collections.namedtuple(
"Transition", "info_state action reward discount legal_actions_mask")
class MLPTorso(nn.Module):
"""A specialized half-MLP module when constructing multiple heads.
Note that every layer includes a ReLU non-linearity activation.
"""
def __init__(self, input_size, hidden_sizes):
"""Create the MLPTorso.
Args:
input_size: (int) number of inputs
hidden_sizes: (list) sizes (number of units) of each hidden layer
"""
super(MLPTorso, self).__init__()
self._layers = []
# Hidden layers
for size in hidden_sizes:
self._layers.append(SonnetLinear(in_size=input_size, out_size=size))
input_size = size
self.model = nn.ModuleList(self._layers)
def forward(self, x):
for layer in self.model:
x = layer(x)
return x
class PolicyGradient(rl_agent.AbstractAgent):
"""RPG Agent implementation in PyTorch.
See open_spiel/python/examples/single_agent_catch.py for an usage example.
"""
def __init__(self,
player_id,
info_state_size,
num_actions,
loss_str="a2c",
loss_class=None,
hidden_layers_sizes=(128,),
batch_size=16,
critic_learning_rate=0.01,
pi_learning_rate=0.001,
entropy_cost=0.01,
num_critic_before_pi=8,
additional_discount_factor=1.0,
max_global_gradient_norm=None,
optimizer_str="sgd"):
"""Initialize the PolicyGradient agent.
Args:
player_id: int, player identifier. Usually its position in the game.
info_state_size: int, info_state vector size.
num_actions: int, number of actions per info state.
loss_str: string or None. If string, must be one of ["rpg", "qpg", "rm",
"a2c", "neurd"] and defined in `_get_loss_class`. If None, a loss class
must be passed through `loss_class`. Defaults to "a2c".
loss_class: Class or None. If Class, it must define the policy gradient
loss. If None a loss class in a string format must be passed through
`loss_str`. Defaults to None.
hidden_layers_sizes: iterable, defines the neural network layers. Defaults
to (128,), which produces a NN: [INPUT] -> [128] -> ReLU -> [OUTPUT].
batch_size: int, batch size to use for Q and Pi learning. Defaults to 128.
critic_learning_rate: float, learning rate used for Critic (Q or V).
Defaults to 0.001.
pi_learning_rate: float, learning rate used for Pi. Defaults to 0.001.
entropy_cost: float, entropy cost used to multiply the entropy loss. Can
be set to None to skip entropy computation. Defaults to 0.001.
num_critic_before_pi: int, number of Critic (Q or V) updates before each
Pi update. Defaults to 8 (every 8th critic learning step, Pi also
learns).
additional_discount_factor: float, additional discount to compute returns.
Defaults to 1.0, in which case, no extra discount is applied. None that
users must provide *only one of* `loss_str` or `loss_class`.
max_global_gradient_norm: float or None, maximum global norm of a gradient
to which the gradient is shrunk if its value is larger.
optimizer_str: String defining which optimizer to use. Supported values
are {sgd, adam}
"""
assert bool(loss_str) ^ bool(loss_class), "Please provide only one option."
self._kwargs = locals()
loss_class = loss_class if loss_class else self._get_loss_class(loss_str)
self._loss_class = loss_class
self.player_id = player_id
self._num_actions = num_actions
self._layer_sizes = hidden_layers_sizes
self._batch_size = batch_size
self._extra_discount = additional_discount_factor
self._num_critic_before_pi = num_critic_before_pi
self._max_global_gradient_norm = max_global_gradient_norm
self._episode_data = []
self._dataset = collections.defaultdict(list)
self._prev_time_step = None
self._prev_action = None
# Step counters
self._step_counter = 0
self._episode_counter = 0
self._num_learn_steps = 0
# Keep track of the last training loss achieved in an update step.
self._last_loss_value = None
# Network
# activate final as we plug logit and qvalue heads afterwards.
self._net_torso = MLPTorso(info_state_size, self._layer_sizes)
torso_out_size = self._layer_sizes[-1]
self._policy_logits_layer = SonnetLinear(
torso_out_size, self._num_actions, activate_relu=False)
# Do not remove policy_logits_network. Even if it's not used directly here,
# other code outside this file refers to it.
self.policy_logits_network = nn.Sequential(self._net_torso,
self._policy_logits_layer)
self._savers = []
# Add baseline (V) head for A2C (or Q-head for QPG / RPG / RMPG / NeuRD)
if optimizer_str == "adam":
self._critic_optimizer = optim.Adam
elif optimizer_str == "sgd":
self._critic_optimizer = optim.SGD
else:
raise ValueError("Not implemented, choose from 'adam' and 'sgd'.")
if loss_class.__name__ == "BatchA2CLoss":
self._baseline_layer = SonnetLinear(
torso_out_size, 1, activate_relu=False)
self._critic_network = nn.Sequential(self._net_torso,
self._baseline_layer)
else:
self._q_values_layer = SonnetLinear(
torso_out_size, self._num_actions, activate_relu=False)
self._critic_network = nn.Sequential(self._net_torso,
self._q_values_layer)
self._critic_optimizer = self._critic_optimizer(
self._critic_network.parameters(), lr=critic_learning_rate)
# Pi loss
self.pg_class = loss_class(entropy_cost=entropy_cost)
self._pi_network = nn.Sequential(self._net_torso, self._policy_logits_layer)
if optimizer_str == "adam":
self._pi_optimizer = optim.Adam(
self._pi_network.parameters(), lr=pi_learning_rate)
elif optimizer_str == "sgd":
self._pi_optimizer = optim.SGD(
self._pi_network.parameters(), lr=pi_learning_rate)
self._loss_str = loss_str
def _get_loss_class(self, loss_str):
if loss_str == "rpg":
return rl_losses.BatchRPGLoss
elif loss_str == "qpg":
return rl_losses.BatchQPGLoss
elif loss_str == "rm":
return rl_losses.BatchRMLoss
elif loss_str == "a2c":
return rl_losses.BatchA2CLoss
elif loss_str == "neurd":
return rl_losses.BatchNeuRDLoss
def minimize_with_clipping(self, model, optimizer, loss):
optimizer.zero_grad()
loss.backward()
if self._max_global_gradient_norm is not None:
nn.utils.clip_grad_norm_(model.parameters(),
self._max_global_gradient_norm)
optimizer.step()
def _act(self, info_state, legal_actions):
# Make a singleton batch for NN compatibility: [1, info_state_size]
info_state = torch.Tensor(np.reshape(info_state, [1, -1]))
torso_out = self._net_torso(info_state)
self._policy_logits = self._policy_logits_layer(torso_out)
policy_probs = F.softmax(self._policy_logits, dim=1).detach()
# Remove illegal actions, re-normalize probs
probs = np.zeros(self._num_actions)
probs[legal_actions] = policy_probs[0][legal_actions]
if sum(probs) != 0:
probs /= sum(probs)
else:
probs[legal_actions] = 1 / len(legal_actions)
action = np.random.choice(len(probs), p=probs)
return action, probs
def step(self, time_step, is_evaluation=False):
"""Returns the action to be taken and updates the network if needed.
Args:
time_step: an instance of rl_environment.TimeStep.
is_evaluation: bool, whether this is a training or evaluation call.
Returns:
A `rl_agent.StepOutput` containing the action probs and chosen action.
"""
# Act step: don't act at terminal info states or if its not our turn.
if (not time_step.last()) and (
time_step.is_simultaneous_move() or
self.player_id == time_step.current_player()):
info_state = time_step.observations["info_state"][self.player_id]
legal_actions = time_step.observations["legal_actions"][self.player_id]
action, probs = self._act(info_state, legal_actions)
else:
action = None
probs = []
if not is_evaluation:
self._step_counter += 1
# Add data points to current episode buffer.
if self._prev_time_step:
self._add_transition(time_step)
# Episode done, add to dataset and maybe learn.
if time_step.last():
self._add_episode_data_to_dataset()
self._episode_counter += 1
if len(self._dataset["returns"]) >= self._batch_size:
self._critic_update()
self._num_learn_steps += 1
if self._num_learn_steps % self._num_critic_before_pi == 0:
self._pi_update()
self._dataset = collections.defaultdict(list)
self._prev_time_step = None
self._prev_action = None
return
else:
self._prev_time_step = time_step
self._prev_action = action
return rl_agent.StepOutput(action=action, probs=probs)
def _full_checkpoint_name(self, checkpoint_dir, name):
checkpoint_filename = "_".join(
[self._loss_str, name, "pid" + str(self.player_id)])
return os.path.join(checkpoint_dir, checkpoint_filename)
def _latest_checkpoint_filename(self, name):
checkpoint_filename = "_".join(
[self._loss_str, name, "pid" + str(self.player_id)])
return checkpoint_filename + "_latest"
def save(self, checkpoint_dir):
for name, model in self._savers:
path = self._full_checkpoint_name(checkpoint_dir, name)
torch.save(model.state_dict(), path)
logging.info("Saved to path: %s", path)
def has_checkpoint(self, checkpoint_dir):
for name, _ in self._savers:
path = self._full_checkpoint_name(checkpoint_dir, name)
if os.path.exists(path):
return True
return False
def restore(self, checkpoint_dir):
for name, model in self._savers:
full_checkpoint_dir = self._full_checkpoint_name(checkpoint_dir, name)
logging.info("Restoring checkpoint: %s", full_checkpoint_dir)
model.load_state_dict(torch.load(full_checkpoint_dir))
@property
def loss(self):
return (self._last_critic_loss_value, self._last_pi_loss_value)
def _add_episode_data_to_dataset(self):
"""Add episode data to the buffer."""
info_states = [data.info_state for data in self._episode_data]
rewards = [data.reward for data in self._episode_data]
discount = [data.discount for data in self._episode_data]
actions = [data.action for data in self._episode_data]
# Calculate returns
returns = np.array(rewards)
for idx in reversed(range(len(rewards[:-1]))):
returns[idx] = (
rewards[idx] +
discount[idx] * returns[idx + 1] * self._extra_discount)
# Add flattened data points to dataset
self._dataset["actions"].extend(actions)
self._dataset["returns"].extend(returns)
self._dataset["info_states"].extend(info_states)
self._episode_data = []
def _add_transition(self, time_step):
"""Adds intra-episode transition to the `_episode_data` buffer.
Adds the transition from `self._prev_time_step` to `time_step`.
Args:
time_step: an instance of rl_environment.TimeStep.
"""
assert self._prev_time_step is not None
legal_actions = (
self._prev_time_step.observations["legal_actions"][self.player_id])
legal_actions_mask = np.zeros(self._num_actions)
legal_actions_mask[legal_actions] = 1.0
transition = Transition(
info_state=(
self._prev_time_step.observations["info_state"][self.player_id][:]),
action=self._prev_action,
reward=time_step.rewards[self.player_id],
discount=time_step.discounts[self.player_id],
legal_actions_mask=legal_actions_mask)
self._episode_data.append(transition)
def _critic_update(self):
"""Compute the Critic loss on sampled transitions & perform a critic update.
Returns:
The average Critic loss obtained on this batch.
"""
# TODO(author3): illegal action handling.
info_state = torch.Tensor(self._dataset["info_states"])
action = torch.LongTensor(self._dataset["actions"])
return_ = torch.Tensor(self._dataset["returns"])
torso_out = self._net_torso(info_state)
# Critic loss
# Baseline loss in case of A2C
if self._loss_class.__name__ == "BatchA2CLoss":
baseline = torch.squeeze(self._baseline_layer(torso_out), dim=1)
critic_loss = torch.mean(F.mse_loss(baseline, return_))
self.minimize_with_clipping(self._baseline_layer, self._critic_optimizer,
critic_loss)
else:
# Q-loss otherwise.
q_values = self._q_values_layer(torso_out)
action_indices = torch.stack(
[torch.arange(q_values.shape[0], dtype=torch.long), action], dim=0)
value_predictions = q_values[list(action_indices)]
critic_loss = torch.mean(F.mse_loss(value_predictions, return_))
self.minimize_with_clipping(self._q_values_layer, self._critic_optimizer,
critic_loss)
self._last_critic_loss_value = critic_loss
return critic_loss
def _pi_update(self):
"""Compute the Pi loss on sampled transitions and perform a Pi update.
Returns:
The average Pi loss obtained on this batch.
"""
# TODO(author3): illegal action handling.
info_state = torch.Tensor(self._dataset["info_states"])
action = torch.LongTensor(self._dataset["actions"])
return_ = torch.Tensor(self._dataset["returns"])
torso_out = self._net_torso(info_state)
self._policy_logits = self._policy_logits_layer(torso_out)
if self._loss_class.__name__ == "BatchA2CLoss":
baseline = torch.squeeze(self._baseline_layer(torso_out), dim=1)
pi_loss = self.pg_class.loss(
policy_logits=self._policy_logits,
baseline=baseline,
actions=action,
returns=return_)
self.minimize_with_clipping(self._policy_logits_layer, self._pi_optimizer,
pi_loss)
else:
q_values = self._q_values_layer(torso_out)
pi_loss = self.pg_class.loss(
policy_logits=self._policy_logits, action_values=q_values)
self.minimize_with_clipping(self._policy_logits_layer, self._pi_optimizer,
pi_loss)
self._last_pi_loss_value = pi_loss
return pi_loss
def get_weights(self):
variables = [m.weight for m in self._net_torso.model]
variables.append(self._policy_logits_layer.weight)
if self._loss_class.__name__ == "BatchA2CLoss":
variables.append(self._baseline_layer.weight)
else:
variables.append(self._q_values_layer.weight)
return variables
def copy_with_noise(self, sigma=0.0, copy_weights=True):
"""Copies the object and perturbates its network's weights with noise.
Args:
sigma: gaussian dropout variance term : Multiplicative noise following
(1+sigma*epsilon), epsilon standard gaussian variable, multiplies each
model weight. sigma=0 means no perturbation.
copy_weights: Boolean determining whether to copy model weights (True) or
just model hyperparameters.
Returns:
Perturbated copy of the model.
"""
_ = self._kwargs.pop("self", None)
copied_object = PolicyGradient(**self._kwargs)
net_torso = getattr(copied_object, "_net_torso")
policy_logits_layer = getattr(copied_object, "_policy_logits_layer")
if hasattr(copied_object, "_q_values_layer"):
q_values_layer = getattr(copied_object, "_q_values_layer")
if hasattr(copied_object, "_baseline_layer"):
baseline_layer = getattr(copied_object, "_baseline_layer")
if copy_weights:
with torch.no_grad():
for layer in net_torso.model:
layer.weight *= (1 + sigma * torch.randn(layer.weight.shape))
policy_logits_layer.weight *= (
1 + sigma * torch.randn(policy_logits_layer.weight.shape))
if hasattr(copied_object, "_q_values_layer"):
q_values_layer.weight *= (
1 + sigma * torch.randn(q_values_layer.weight.shape))
if hasattr(copied_object, "_baseline_layer"):
baseline_layer.weight *= (
1 + sigma * torch.randn(baseline_layer.weight.shape))
return copied_object
| open_spiel-master | open_spiel/python/pytorch/policy_gradient.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Neural Fictitious Self-Play (NFSP) agent implemented in PyTorch.
See the paper https://arxiv.org/abs/1603.01121 for more details.
"""
import collections
import contextlib
import enum
import os
import random
from absl import logging
import numpy as np
import torch
import torch.nn.functional as F
from open_spiel.python import rl_agent
from open_spiel.python.pytorch import dqn
Transition = collections.namedtuple(
"Transition", "info_state action_probs legal_actions_mask")
MODE = enum.Enum("mode", "best_response average_policy")
class NFSP(rl_agent.AbstractAgent):
"""NFSP Agent implementation in PyTorch.
See open_spiel/python/examples/kuhn_nfsp.py for an usage example.
"""
def __init__(self,
player_id,
state_representation_size,
num_actions,
hidden_layers_sizes,
reservoir_buffer_capacity,
anticipatory_param,
batch_size=128,
rl_learning_rate=0.01,
sl_learning_rate=0.01,
min_buffer_size_to_learn=1000,
learn_every=64,
optimizer_str="sgd",
**kwargs):
"""Initialize the `NFSP` agent."""
self.player_id = player_id
self._num_actions = num_actions
self._layer_sizes = hidden_layers_sizes
self._batch_size = batch_size
self._learn_every = learn_every
self._anticipatory_param = anticipatory_param
self._min_buffer_size_to_learn = min_buffer_size_to_learn
self._reservoir_buffer = ReservoirBuffer(reservoir_buffer_capacity)
self._prev_timestep = None
self._prev_action = None
# Step counter to keep track of learning.
self._step_counter = 0
# Inner RL agent
kwargs.update({
"batch_size": batch_size,
"learning_rate": rl_learning_rate,
"learn_every": learn_every,
"min_buffer_size_to_learn": min_buffer_size_to_learn,
"optimizer_str": optimizer_str,
})
self._rl_agent = dqn.DQN(player_id, state_representation_size,
num_actions, hidden_layers_sizes, **kwargs)
# Keep track of the last training loss achieved in an update step.
self._last_rl_loss_value = lambda: self._rl_agent.loss
self._last_sl_loss_value = None
# Average policy network.
self._avg_network = dqn.MLP(state_representation_size,
self._layer_sizes, num_actions)
self._savers = [
("q_network", self._rl_agent._q_network),
("avg_network", self._avg_network)
]
if optimizer_str == "adam":
self.optimizer = torch.optim.Adam(
self._avg_network.parameters(), lr=sl_learning_rate)
elif optimizer_str == "sgd":
self.optimizer = torch.optim.SGD(
self._avg_network.parameters(), lr=sl_learning_rate)
else:
raise ValueError("Not implemented. Choose from ['adam', 'sgd'].")
self._sample_episode_policy()
@contextlib.contextmanager
def temp_mode_as(self, mode):
"""Context manager to temporarily overwrite the mode."""
previous_mode = self._mode
self._mode = mode
yield
self._mode = previous_mode
def _sample_episode_policy(self):
if np.random.rand() < self._anticipatory_param:
self._mode = MODE.best_response
else:
self._mode = MODE.average_policy
def _act(self, info_state, legal_actions):
info_state = np.reshape(info_state, [1, -1])
action_values = self._avg_network(torch.Tensor(info_state))
action_probs = F.softmax(action_values, dim=1).detach()
self._last_action_values = action_values[0]
# Remove illegal actions, normalize probs
probs = np.zeros(self._num_actions)
probs[legal_actions] = action_probs[0][legal_actions]
probs /= sum(probs)
action = np.random.choice(len(probs), p=probs)
return action, probs
@property
def mode(self):
return self._mode
@property
def loss(self):
return (self._last_sl_loss_value, self._last_rl_loss_value().detach())
def step(self, time_step, is_evaluation=False):
"""Returns the action to be taken and updates the Q-networks if needed.
Args:
time_step: an instance of rl_environment.TimeStep.
is_evaluation: bool, whether this is a training or evaluation call.
Returns:
A `rl_agent.StepOutput` containing the action probs and chosen action.
"""
if self._mode == MODE.best_response:
agent_output = self._rl_agent.step(time_step, is_evaluation)
if not is_evaluation and not time_step.last():
self._add_transition(time_step, agent_output)
elif self._mode == MODE.average_policy:
# Act step: don't act at terminal info states.
if not time_step.last():
info_state = time_step.observations["info_state"][self.player_id]
legal_actions = time_step.observations["legal_actions"][self.player_id]
action, probs = self._act(info_state, legal_actions)
agent_output = rl_agent.StepOutput(action=action, probs=probs)
if self._prev_timestep and not is_evaluation:
self._rl_agent.add_transition(self._prev_timestep, self._prev_action,
time_step)
else:
raise ValueError("Invalid mode ({})".format(self._mode))
if not is_evaluation:
self._step_counter += 1
if self._step_counter % self._learn_every == 0:
self._last_sl_loss_value = self._learn()
# If learn step not triggered by rl policy, learn.
if self._mode == MODE.average_policy:
self._rl_agent.learn()
# Prepare for the next episode.
if time_step.last():
self._sample_episode_policy()
self._prev_timestep = None
self._prev_action = None
return
else:
self._prev_timestep = time_step
self._prev_action = agent_output.action
return agent_output
def _add_transition(self, time_step, agent_output):
"""Adds the new transition using `time_step` to the reservoir buffer.
Transitions are in the form (time_step, agent_output.probs, legal_mask).
Args:
time_step: an instance of rl_environment.TimeStep.
agent_output: an instance of rl_agent.StepOutput.
"""
legal_actions = time_step.observations["legal_actions"][self.player_id]
legal_actions_mask = np.zeros(self._num_actions)
legal_actions_mask[legal_actions] = 1.0
transition = Transition(
info_state=(time_step.observations["info_state"][self.player_id][:]),
action_probs=agent_output.probs,
legal_actions_mask=legal_actions_mask)
self._reservoir_buffer.add(transition)
def _learn(self):
"""Compute the loss on sampled transitions and perform a avg-network update.
If there are not enough elements in the buffer, no loss is computed and
`None` is returned instead.
Returns:
The average loss obtained on this batch of transitions or `None`.
"""
if (len(self._reservoir_buffer) < self._batch_size or
len(self._reservoir_buffer) < self._min_buffer_size_to_learn):
return None
transitions = self._reservoir_buffer.sample(self._batch_size)
info_states = torch.Tensor([t.info_state for t in transitions])
action_probs = torch.Tensor([t.action_probs for t in transitions])
self.optimizer.zero_grad()
loss = F.cross_entropy(self._avg_network(info_states),
torch.max(action_probs, dim=1)[1])
loss.backward()
self.optimizer.step()
return loss.detach()
def _full_checkpoint_name(self, checkpoint_dir, name):
checkpoint_filename = "_".join([name, "pid" + str(self.player_id)])
return os.path.join(checkpoint_dir, checkpoint_filename)
def _latest_checkpoint_filename(self, name):
checkpoint_filename = "_".join([name, "pid" + str(self.player_id)])
return checkpoint_filename + "_latest"
def save(self, checkpoint_dir):
"""Saves the average policy network and the inner RL agent's q-network.
Note that this does not save the experience replay buffers and should
only be used to restore the agent's policy, not resume training.
Args:
checkpoint_dir: directory where checkpoints will be saved.
"""
for name, model in self._savers:
path = self._full_checkpoint_name(checkpoint_dir, name)
torch.save(model.state_dict(), path)
logging.info("Saved to path: %s", path)
def has_checkpoint(self, checkpoint_dir):
for name, _ in self._savers:
path = self._full_checkpoint_name(checkpoint_dir, name)
if os.path.exists(path):
return True
return False
def restore(self, checkpoint_dir):
"""Restores the average policy network and the inner RL agent's q-network.
Note that this does not restore the experience replay buffers and should
only be used to restore the agent's policy, not resume training.
Args:
checkpoint_dir: directory from which checkpoints will be restored.
"""
for name, model in self._savers:
full_checkpoint_dir = self._full_checkpoint_name(checkpoint_dir, name)
logging.info("Restoring checkpoint: %s", full_checkpoint_dir)
model.load_state_dict(torch.load(full_checkpoint_dir))
class ReservoirBuffer(object):
"""Allows uniform sampling over a stream of data.
This class supports the storage of arbitrary elements, such as observation
tensors, integer actions, etc.
See https://en.wikipedia.org/wiki/Reservoir_sampling for more details.
"""
def __init__(self, reservoir_buffer_capacity):
self._reservoir_buffer_capacity = reservoir_buffer_capacity
self._data = []
self._add_calls = 0
def add(self, element):
"""Potentially adds `element` to the reservoir buffer.
Args:
element: data to be added to the reservoir buffer.
"""
if len(self._data) < self._reservoir_buffer_capacity:
self._data.append(element)
else:
idx = np.random.randint(0, self._add_calls + 1)
if idx < self._reservoir_buffer_capacity:
self._data[idx] = element
self._add_calls += 1
def sample(self, num_samples):
"""Returns `num_samples` uniformly sampled from the buffer.
Args:
num_samples: `int`, number of samples to draw.
Returns:
An iterable over `num_samples` random elements of the buffer.
Raises:
ValueError: If there are less than `num_samples` elements in the buffer
"""
if len(self._data) < num_samples:
raise ValueError("{} elements could not be sampled from size {}".format(
num_samples, len(self._data)))
return random.sample(self._data, num_samples)
def clear(self):
self._data = []
self._add_calls = 0
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
| open_spiel-master | open_spiel/python/pytorch/nfsp.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.ppo."""
import random
from absl.testing import absltest
import numpy as np
import torch
from open_spiel.python import rl_environment
import pyspiel
from open_spiel.python.pytorch.ppo import PPO
from open_spiel.python.pytorch.ppo import PPOAgent
from open_spiel.python.vector_env import SyncVectorEnv
# A simple two-action game encoded as an EFG game. Going left gets -1, going
# right gets a +1.
SIMPLE_EFG_DATA = """
EFG 2 R "Simple single-agent problem" { "Player 1" } ""
p "ROOT" 1 1 "ROOT" { "L" "R" } 0
t "L" 1 "Outcome L" { -1.0 }
t "R" 2 "Outcome R" { 1.0 }
"""
SEED = 24261711
class PPOTest(absltest.TestCase):
def test_simple_game(self):
game = pyspiel.load_efg_game(SIMPLE_EFG_DATA)
env = rl_environment.Environment(game=game)
envs = SyncVectorEnv([env])
agent_fn = PPOAgent
anneal_lr = True
info_state_shape = tuple(
np.array(env.observation_spec()["info_state"]).flatten())
total_timesteps = 1000
steps_per_batch = 8
batch_size = int(len(envs) * steps_per_batch)
num_updates = total_timesteps // batch_size
agent = PPO(
input_shape=info_state_shape,
num_actions=game.num_distinct_actions(),
num_players=game.num_players(),
player_id=0,
num_envs=1,
agent_fn=agent_fn,
)
time_step = envs.reset()
for update in range(num_updates):
for _ in range(steps_per_batch):
agent_output = agent.step(time_step)
time_step, reward, done, _ = envs.step(
agent_output, reset_if_done=True)
agent.post_step(reward, done)
if anneal_lr:
agent.anneal_learning_rate(update, num_updates)
agent.learn(time_step)
total_eval_reward = 0
n_total_evaluations = 1000
n_evaluations = 0
time_step = envs.reset()
while n_evaluations < n_total_evaluations:
agent_output = agent.step(time_step, is_evaluation=True)
time_step, reward, done, _ = envs.step(
agent_output, reset_if_done=True)
total_eval_reward += reward[0][0]
n_evaluations += sum(done)
self.assertGreaterEqual(total_eval_reward, 900)
if __name__ == "__main__":
random.seed(SEED)
torch.manual_seed(SEED)
np.random.seed(SEED)
absltest.main()
| open_spiel-master | open_spiel/python/pytorch/ppo_pytorch_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DQN agent implemented in PyTorch."""
import collections
import math
import sys
import numpy as np
from scipy import stats
import torch
from torch import nn
import torch.nn.functional as F
from open_spiel.python import rl_agent
from open_spiel.python.utils.replay_buffer import ReplayBuffer
Transition = collections.namedtuple(
"Transition",
"info_state action reward next_info_state is_final_step legal_actions_mask")
ILLEGAL_ACTION_LOGITS_PENALTY = sys.float_info.min
class SonnetLinear(nn.Module):
"""A Sonnet linear module.
Always includes biases and only supports ReLU activations.
"""
def __init__(self, in_size, out_size, activate_relu=True):
"""Creates a Sonnet linear layer.
Args:
in_size: (int) number of inputs
out_size: (int) number of outputs
activate_relu: (bool) whether to include a ReLU activation layer
"""
super(SonnetLinear, self).__init__()
self._activate_relu = activate_relu
stddev = 1.0 / math.sqrt(in_size)
mean = 0
lower = (-2 * stddev - mean) / stddev
upper = (2 * stddev - mean) / stddev
# Weight initialization inspired by Sonnet's Linear layer,
# which cites https://arxiv.org/abs/1502.03167v3
# pytorch default: initialized from
# uniform(-sqrt(1/in_features), sqrt(1/in_features))
self._weight = nn.Parameter(
torch.Tensor(
stats.truncnorm.rvs(
lower, upper, loc=mean, scale=stddev, size=[out_size,
in_size])))
self._bias = nn.Parameter(torch.zeros([out_size]))
def forward(self, tensor):
y = F.linear(tensor, self._weight, self._bias)
return F.relu(y) if self._activate_relu else y
class MLP(nn.Module):
"""A simple network built from nn.linear layers."""
def __init__(self,
input_size,
hidden_sizes,
output_size,
activate_final=False):
"""Create the MLP.
Args:
input_size: (int) number of inputs
hidden_sizes: (list) sizes (number of units) of each hidden layer
output_size: (int) number of outputs
activate_final: (bool) should final layer should include a ReLU
"""
super(MLP, self).__init__()
self._layers = []
# Hidden layers
for size in hidden_sizes:
self._layers.append(SonnetLinear(in_size=input_size, out_size=size))
input_size = size
# Output layer
self._layers.append(
SonnetLinear(
in_size=input_size,
out_size=output_size,
activate_relu=activate_final))
self.model = nn.ModuleList(self._layers)
def forward(self, x):
for layer in self.model:
x = layer(x)
return x
class DQN(rl_agent.AbstractAgent):
"""DQN Agent implementation in PyTorch.
See open_spiel/python/examples/breakthrough_dqn.py for an usage example.
"""
def __init__(self,
player_id,
state_representation_size,
num_actions,
hidden_layers_sizes=128,
replay_buffer_capacity=10000,
batch_size=128,
replay_buffer_class=ReplayBuffer,
learning_rate=0.01,
update_target_network_every=1000,
learn_every=10,
discount_factor=1.0,
min_buffer_size_to_learn=1000,
epsilon_start=1.0,
epsilon_end=0.1,
epsilon_decay_duration=int(1e6),
optimizer_str="sgd",
loss_str="mse"):
"""Initialize the DQN agent."""
# This call to locals() is used to store every argument used to initialize
# the class instance, so it can be copied with no hyperparameter change.
self._kwargs = locals()
self.player_id = player_id
self._num_actions = num_actions
if isinstance(hidden_layers_sizes, int):
hidden_layers_sizes = [hidden_layers_sizes]
self._layer_sizes = hidden_layers_sizes
self._batch_size = batch_size
self._update_target_network_every = update_target_network_every
self._learn_every = learn_every
self._min_buffer_size_to_learn = min_buffer_size_to_learn
self._discount_factor = discount_factor
self._epsilon_start = epsilon_start
self._epsilon_end = epsilon_end
self._epsilon_decay_duration = epsilon_decay_duration
# TODO(author6) Allow for optional replay buffer config.
if not isinstance(replay_buffer_capacity, int):
raise ValueError("Replay buffer capacity not an integer.")
self._replay_buffer = replay_buffer_class(replay_buffer_capacity)
self._prev_timestep = None
self._prev_action = None
# Step counter to keep track of learning, eps decay and target network.
self._step_counter = 0
# Keep track of the last training loss achieved in an update step.
self._last_loss_value = None
# Create the Q-network instances
self._q_network = MLP(state_representation_size, self._layer_sizes,
num_actions)
self._target_q_network = MLP(state_representation_size, self._layer_sizes,
num_actions)
if loss_str == "mse":
self.loss_class = F.mse_loss
elif loss_str == "huber":
self.loss_class = F.smooth_l1_loss
else:
raise ValueError("Not implemented, choose from 'mse', 'huber'.")
if optimizer_str == "adam":
self._optimizer = torch.optim.Adam(
self._q_network.parameters(), lr=learning_rate)
elif optimizer_str == "sgd":
self._optimizer = torch.optim.SGD(
self._q_network.parameters(), lr=learning_rate)
else:
raise ValueError("Not implemented, choose from 'adam' and 'sgd'.")
def step(self, time_step, is_evaluation=False, add_transition_record=True):
"""Returns the action to be taken and updates the Q-network if needed.
Args:
time_step: an instance of rl_environment.TimeStep.
is_evaluation: bool, whether this is a training or evaluation call.
add_transition_record: Whether to add to the replay buffer on this step.
Returns:
A `rl_agent.StepOutput` containing the action probs and chosen action.
"""
# Act step: don't act at terminal info states or if its not our turn.
if (not time_step.last()) and (
time_step.is_simultaneous_move() or
self.player_id == time_step.current_player()):
info_state = time_step.observations["info_state"][self.player_id]
legal_actions = time_step.observations["legal_actions"][self.player_id]
epsilon = self._get_epsilon(is_evaluation)
action, probs = self._epsilon_greedy(info_state, legal_actions, epsilon)
else:
action = None
probs = []
# Don't mess up with the state during evaluation.
if not is_evaluation:
self._step_counter += 1
if self._step_counter % self._learn_every == 0:
self._last_loss_value = self.learn()
if self._step_counter % self._update_target_network_every == 0:
# state_dict method returns a dictionary containing a whole state of the
# module.
self._target_q_network.load_state_dict(self._q_network.state_dict())
if self._prev_timestep and add_transition_record:
# We may omit record adding here if it's done elsewhere.
self.add_transition(self._prev_timestep, self._prev_action, time_step)
if time_step.last(): # prepare for the next episode.
self._prev_timestep = None
self._prev_action = None
return
else:
self._prev_timestep = time_step
self._prev_action = action
return rl_agent.StepOutput(action=action, probs=probs)
def add_transition(self, prev_time_step, prev_action, time_step):
"""Adds the new transition using `time_step` to the replay buffer.
Adds the transition from `self._prev_timestep` to `time_step` by
`self._prev_action`.
Args:
prev_time_step: prev ts, an instance of rl_environment.TimeStep.
prev_action: int, action taken at `prev_time_step`.
time_step: current ts, an instance of rl_environment.TimeStep.
"""
assert prev_time_step is not None
legal_actions = (time_step.observations["legal_actions"][self.player_id])
legal_actions_mask = np.zeros(self._num_actions)
legal_actions_mask[legal_actions] = 1.0
transition = Transition(
info_state=(
prev_time_step.observations["info_state"][self.player_id][:]),
action=prev_action,
reward=time_step.rewards[self.player_id],
next_info_state=time_step.observations["info_state"][self.player_id][:],
is_final_step=float(time_step.last()),
legal_actions_mask=legal_actions_mask)
self._replay_buffer.add(transition)
def _epsilon_greedy(self, info_state, legal_actions, epsilon):
"""Returns a valid epsilon-greedy action and valid action probs.
Action probabilities are given by a softmax over legal q-values.
Args:
info_state: hashable representation of the information state.
legal_actions: list of legal actions at `info_state`.
epsilon: float, probability of taking an exploratory action.
Returns:
A valid epsilon-greedy action and valid action probabilities.
"""
probs = np.zeros(self._num_actions)
if np.random.rand() < epsilon:
action = np.random.choice(legal_actions)
probs[legal_actions] = 1.0 / len(legal_actions)
else:
info_state = torch.Tensor(np.reshape(info_state, [1, -1]))
q_values = self._q_network(info_state).detach()[0]
legal_q_values = q_values[legal_actions]
action = legal_actions[torch.argmax(legal_q_values)]
probs[action] = 1.0
return action, probs
def _get_epsilon(self, is_evaluation, power=1.0):
"""Returns the evaluation or decayed epsilon value."""
if is_evaluation:
return 0.0
decay_steps = min(self._step_counter, self._epsilon_decay_duration)
decayed_epsilon = (
self._epsilon_end + (self._epsilon_start - self._epsilon_end) *
(1 - decay_steps / self._epsilon_decay_duration)**power)
return decayed_epsilon
def learn(self):
"""Compute the loss on sampled transitions and perform a Q-network update.
If there are not enough elements in the buffer, no loss is computed and
`None` is returned instead.
Returns:
The average loss obtained on this batch of transitions or `None`.
"""
if (len(self._replay_buffer) < self._batch_size or
len(self._replay_buffer) < self._min_buffer_size_to_learn):
return None
transitions = self._replay_buffer.sample(self._batch_size)
info_states = torch.Tensor([t.info_state for t in transitions])
actions = torch.LongTensor([t.action for t in transitions])
rewards = torch.Tensor([t.reward for t in transitions])
next_info_states = torch.Tensor([t.next_info_state for t in transitions])
are_final_steps = torch.Tensor([t.is_final_step for t in transitions])
legal_actions_mask = torch.Tensor(
np.array([t.legal_actions_mask for t in transitions]))
self._q_values = self._q_network(info_states)
self._target_q_values = self._target_q_network(next_info_states).detach()
illegal_actions_mask = 1 - legal_actions_mask
legal_target_q_values = self._target_q_values.masked_fill(
illegal_actions_mask, ILLEGAL_ACTION_LOGITS_PENALTY)
max_next_q = torch.max(legal_target_q_values, dim=1)[0]
target = (
rewards + (1 - are_final_steps) * self._discount_factor * max_next_q)
action_indices = torch.stack([
torch.arange(self._q_values.shape[0], dtype=torch.long), actions
],
dim=0)
predictions = self._q_values[list(action_indices)]
loss = self.loss_class(predictions, target)
self._optimizer.zero_grad()
loss.backward()
self._optimizer.step()
return loss
@property
def q_values(self):
return self._q_values
@property
def replay_buffer(self):
return self._replay_buffer
@property
def loss(self):
return self._last_loss_value
@property
def prev_timestep(self):
return self._prev_timestep
@property
def prev_action(self):
return self._prev_action
@property
def step_counter(self):
return self._step_counter
def get_weights(self):
variables = [m.weight for m in self._q_network.model]
variables.append([m.weight for m in self._target_q_network.model])
return variables
def copy_with_noise(self, sigma=0.0, copy_weights=True):
"""Copies the object and perturbates it with noise.
Args:
sigma: gaussian dropout variance term : Multiplicative noise following
(1+sigma*epsilon), epsilon standard gaussian variable, multiplies each
model weight. sigma=0 means no perturbation.
copy_weights: Boolean determining whether to copy model weights (True) or
just model hyperparameters.
Returns:
Perturbated copy of the model.
"""
_ = self._kwargs.pop("self", None)
copied_object = DQN(**self._kwargs)
q_network = getattr(copied_object, "_q_network")
target_q_network = getattr(copied_object, "_target_q_network")
if copy_weights:
with torch.no_grad():
for q_model in q_network.model:
q_model.weight *= (1 + sigma * torch.randn(q_model.weight.shape))
for tq_model in target_q_network.model:
tq_model.weight *= (1 + sigma * torch.randn(tq_model.weight.shape))
return copied_object
def save(self, data_path, optimizer_data_path=None):
"""Save checkpoint/trained model and optimizer.
Args:
data_path: Path for saving model. It can be relative or absolute but the
filename should be included. For example: q_network.pt or
/path/to/q_network.pt
optimizer_data_path: Path for saving the optimizer states. It can be
relative or absolute but the filename should be included. For example:
optimizer.pt or /path/to/optimizer.pt
"""
torch.save(self._q_network, data_path)
if optimizer_data_path is not None:
torch.save(self._optimizer, optimizer_data_path)
def load(self, data_path, optimizer_data_path=None):
"""Load checkpoint/trained model and optimizer.
Args:
data_path: Path for loading model. It can be relative or absolute but the
filename should be included. For example: q_network.pt or
/path/to/q_network.pt
optimizer_data_path: Path for loading the optimizer states. It can be
relative or absolute but the filename should be included. For example:
optimizer.pt or /path/to/optimizer.pt
"""
torch.load(self._q_network, data_path)
torch.load(self._target_q_network, data_path)
if optimizer_data_path is not None:
torch.load(self._optimizer, optimizer_data_path)
| open_spiel-master | open_spiel/python/pytorch/dqn.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An implementation of PPO.
Note: code adapted (with permission) from
https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/ppo.py and
https://github.com/vwxyzjn/ppo-implementation-details/blob/main/ppo_atari.py.
Currently only supports the single-agent case.
"""
import time
import numpy as np
import torch
from torch import nn
from torch import optim
from torch.distributions.categorical import Categorical
from open_spiel.python.rl_agent import StepOutput
INVALID_ACTION_PENALTY = -1e6
def layer_init(layer, std=np.sqrt(2), bias_const=0.0):
torch.nn.init.orthogonal_(layer.weight, std)
torch.nn.init.constant_(layer.bias, bias_const)
return layer
class CategoricalMasked(Categorical):
"""A masked categorical."""
# pylint: disable=dangerous-default-value
def __init__(self,
probs=None,
logits=None,
validate_args=None,
masks=[],
mask_value=None):
logits = torch.where(masks.bool(), logits, mask_value)
super(CategoricalMasked, self).__init__(probs, logits, validate_args)
class PPOAgent(nn.Module):
"""A PPO agent module."""
def __init__(self, num_actions, observation_shape, device):
super().__init__()
self.critic = nn.Sequential(
layer_init(nn.Linear(np.array(observation_shape).prod(), 64)),
nn.Tanh(),
layer_init(nn.Linear(64, 64)),
nn.Tanh(),
layer_init(nn.Linear(64, 1), std=1.0),
)
self.actor = nn.Sequential(
layer_init(nn.Linear(np.array(observation_shape).prod(), 64)),
nn.Tanh(),
layer_init(nn.Linear(64, 64)),
nn.Tanh(),
layer_init(nn.Linear(64, num_actions), std=0.01),
)
self.device = device
self.num_actions = num_actions
self.register_buffer("mask_value", torch.tensor(INVALID_ACTION_PENALTY))
def get_value(self, x):
return self.critic(x)
def get_action_and_value(self, x, legal_actions_mask=None, action=None):
if legal_actions_mask is None:
legal_actions_mask = torch.ones((len(x), self.num_actions)).bool()
logits = self.actor(x)
probs = CategoricalMasked(
logits=logits, masks=legal_actions_mask, mask_value=self.mask_value)
if action is None:
action = probs.sample()
return action, probs.log_prob(action), probs.entropy(), self.critic(
x), probs.probs
class PPOAtariAgent(nn.Module):
"""A PPO Atari agent module."""
def __init__(self, num_actions, observation_shape, device):
super(PPOAtariAgent, self).__init__()
# Note: this network is intended for atari games, taken from
# https://github.com/vwxyzjn/ppo-implementation-details/blob/main/ppo_atari.py
self.network = nn.Sequential(
layer_init(nn.Conv2d(4, 32, 8, stride=4)),
nn.ReLU(),
layer_init(nn.Conv2d(32, 64, 4, stride=2)),
nn.ReLU(),
layer_init(nn.Conv2d(64, 64, 3, stride=1)),
nn.ReLU(),
nn.Flatten(),
layer_init(nn.Linear(64 * 7 * 7, 512)),
nn.ReLU(),
)
self.actor = layer_init(nn.Linear(512, num_actions), std=0.01)
self.critic = layer_init(nn.Linear(512, 1), std=1)
self.num_actions = num_actions
self.device = device
self.register_buffer("mask_value", torch.tensor(INVALID_ACTION_PENALTY))
def get_value(self, x):
return self.critic(self.network(x / 255.0))
def get_action_and_value(self, x, legal_actions_mask=None, action=None):
if legal_actions_mask is None:
legal_actions_mask = torch.ones((len(x), self.num_actions)).bool()
hidden = self.network(x / 255.0)
logits = self.actor(hidden)
probs = CategoricalMasked(
logits=logits, masks=legal_actions_mask, mask_value=self.mask_value)
if action is None:
action = probs.sample()
return action, probs.log_prob(action), probs.entropy(), self.critic(
hidden), probs.probs
def legal_actions_to_mask(legal_actions_list, num_actions):
"""Converts a list of legal actions to a mask.
The mask has size num actions with a 1 in a legal positions.
Args:
legal_actions_list: the list of legal actions
num_actions: number of actions (width of mask)
Returns:
legal actions mask.
"""
legal_actions_mask = torch.zeros((len(legal_actions_list), num_actions),
dtype=torch.bool)
for i, legal_actions in enumerate(legal_actions_list):
legal_actions_mask[i, legal_actions] = 1
return legal_actions_mask
class PPO(nn.Module):
"""PPO Agent implementation in PyTorch.
See open_spiel/python/examples/ppo_example.py for an usage example.
Note that PPO runs multiple environments concurrently on each step (see
open_spiel/python/vector_env.py). In practice, this tends to improve PPO's
performance. The number of parallel environments is controlled by the
num_envs argument.
"""
def __init__(
self,
input_shape,
num_actions,
num_players,
player_id=0,
num_envs=1,
steps_per_batch=128,
num_minibatches=4,
update_epochs=4,
learning_rate=2.5e-4,
gae=True,
gamma=0.99,
gae_lambda=0.95,
normalize_advantages=True,
clip_coef=0.2,
clip_vloss=True,
entropy_coef=0.01,
value_coef=0.5,
max_grad_norm=0.5,
target_kl=None,
device="cpu",
writer=None, # Tensorboard SummaryWriter
agent_fn=PPOAtariAgent,
):
super().__init__()
self.input_shape = input_shape
self.num_actions = num_actions
self.num_players = num_players
self.player_id = player_id
self.device = device
# Training settings
self.num_envs = num_envs
self.steps_per_batch = steps_per_batch
self.batch_size = self.num_envs * self.steps_per_batch
self.num_minibatches = num_minibatches
self.minibatch_size = self.batch_size // self.num_minibatches
self.update_epochs = update_epochs
self.learning_rate = learning_rate
# Loss function
self.gae = gae
self.gamma = gamma
self.gae_lambda = gae_lambda
self.normalize_advantages = normalize_advantages
self.clip_coef = clip_coef
self.clip_vloss = clip_vloss
self.entropy_coef = entropy_coef
self.value_coef = value_coef
self.max_grad_norm = max_grad_norm
self.target_kl = target_kl
# Logging
self.writer = writer
# Initialize networks
self.network = agent_fn(self.num_actions, self.input_shape,
device).to(device)
self.optimizer = optim.Adam(
self.parameters(), lr=self.learning_rate, eps=1e-5)
# Initialize training buffers
self.legal_actions_mask = torch.zeros(
(self.steps_per_batch, self.num_envs, self.num_actions),
dtype=torch.bool).to(device)
self.obs = torch.zeros((self.steps_per_batch, self.num_envs) +
self.input_shape).to(device)
self.actions = torch.zeros((self.steps_per_batch, self.num_envs)).to(device)
self.logprobs = torch.zeros(
(self.steps_per_batch, self.num_envs)).to(device)
self.rewards = torch.zeros((self.steps_per_batch, self.num_envs)).to(device)
self.dones = torch.zeros((self.steps_per_batch, self.num_envs)).to(device)
self.values = torch.zeros((self.steps_per_batch, self.num_envs)).to(device)
# Initialize counters
self.cur_batch_idx = 0
self.total_steps_done = 0
self.updates_done = 0
self.start_time = time.time()
def get_value(self, x):
return self.network.get_value(x)
def get_action_and_value(self, x, legal_actions_mask=None, action=None):
return self.network.get_action_and_value(x, legal_actions_mask, action)
def step(self, time_step, is_evaluation=False):
if is_evaluation:
with torch.no_grad():
legal_actions_mask = legal_actions_to_mask([
ts.observations["legal_actions"][self.player_id] for ts in time_step
], self.num_actions).to(self.device)
obs = torch.Tensor(
np.array([
np.reshape(ts.observations["info_state"][self.player_id],
self.input_shape) for ts in time_step
])).to(self.device)
action, _, _, value, probs = self.get_action_and_value(
obs, legal_actions_mask=legal_actions_mask)
return [
StepOutput(action=a.item(), probs=p)
for (a, p) in zip(action, probs)
]
else:
with torch.no_grad():
# act
obs = torch.Tensor(
np.array([
np.reshape(ts.observations["info_state"][self.player_id],
self.input_shape) for ts in time_step
])).to(self.device)
legal_actions_mask = legal_actions_to_mask([
ts.observations["legal_actions"][self.player_id] for ts in time_step
], self.num_actions).to(self.device)
action, logprob, _, value, probs = self.get_action_and_value(
obs, legal_actions_mask=legal_actions_mask)
# store
self.legal_actions_mask[self.cur_batch_idx] = legal_actions_mask
self.obs[self.cur_batch_idx] = obs
self.actions[self.cur_batch_idx] = action
self.logprobs[self.cur_batch_idx] = logprob
self.values[self.cur_batch_idx] = value.flatten()
agent_output = [
StepOutput(action=a.item(), probs=p)
for (a, p) in zip(action, probs)
]
return agent_output
def post_step(self, reward, done):
self.rewards[self.cur_batch_idx] = torch.tensor(reward).to(
self.device).view(-1)
self.dones[self.cur_batch_idx] = torch.tensor(done).to(self.device).view(-1)
self.total_steps_done += self.num_envs
self.cur_batch_idx += 1
def learn(self, time_step):
next_obs = torch.Tensor(
np.array([
np.reshape(ts.observations["info_state"][self.player_id],
self.input_shape) for ts in time_step
])).to(self.device)
# bootstrap value if not done
with torch.no_grad():
next_value = self.get_value(next_obs).reshape(1, -1)
if self.gae:
advantages = torch.zeros_like(self.rewards).to(self.device)
lastgaelam = 0
for t in reversed(range(self.steps_per_batch)):
nextvalues = next_value if t == self.steps_per_batch - 1 else self.values[
t + 1]
nextnonterminal = 1.0 - self.dones[t]
delta = self.rewards[
t] + self.gamma * nextvalues * nextnonterminal - self.values[t]
advantages[
t] = lastgaelam = delta + self.gamma * self.gae_lambda * nextnonterminal * lastgaelam
returns = advantages + self.values
else:
returns = torch.zeros_like(self.rewards).to(self.device)
for t in reversed(range(self.steps_per_batch)):
next_return = next_value if t == self.steps_per_batch - 1 else returns[
t + 1]
nextnonterminal = 1.0 - self.dones[t]
returns[
t] = self.rewards[t] + self.gamma * nextnonterminal * next_return
advantages = returns - self.values
# flatten the batch
b_legal_actions_mask = self.legal_actions_mask.reshape(
(-1, self.num_actions))
b_obs = self.obs.reshape((-1,) + self.input_shape)
b_logprobs = self.logprobs.reshape(-1)
b_actions = self.actions.reshape(-1)
b_advantages = advantages.reshape(-1)
b_returns = returns.reshape(-1)
b_values = self.values.reshape(-1)
# Optimizing the policy and value network
b_inds = np.arange(self.batch_size)
clipfracs = []
for _ in range(self.update_epochs):
np.random.shuffle(b_inds)
for start in range(0, self.batch_size, self.minibatch_size):
end = start + self.minibatch_size
mb_inds = b_inds[start:end]
_, newlogprob, entropy, newvalue, _ = self.get_action_and_value(
b_obs[mb_inds],
legal_actions_mask=b_legal_actions_mask[mb_inds],
action=b_actions.long()[mb_inds])
logratio = newlogprob - b_logprobs[mb_inds]
ratio = logratio.exp()
with torch.no_grad():
# calculate approx_kl http://joschu.net/blog/kl-approx.html
old_approx_kl = (-logratio).mean()
approx_kl = ((ratio - 1) - logratio).mean()
clipfracs += [
((ratio - 1.0).abs() > self.clip_coef).float().mean().item()
]
mb_advantages = b_advantages[mb_inds]
if self.normalize_advantages:
mb_advantages = (mb_advantages - mb_advantages.mean()) / (
mb_advantages.std() + 1e-8)
# Policy loss
pg_loss1 = -mb_advantages * ratio
pg_loss2 = -mb_advantages * torch.clamp(ratio, 1 - self.clip_coef,
1 + self.clip_coef)
pg_loss = torch.max(pg_loss1, pg_loss2).mean()
# Value loss
newvalue = newvalue.view(-1)
if self.clip_vloss:
v_loss_unclipped = (newvalue - b_returns[mb_inds])**2
v_clipped = b_values[mb_inds] + torch.clamp(
newvalue - b_values[mb_inds],
-self.clip_coef,
self.clip_coef,
)
v_loss_clipped = (v_clipped - b_returns[mb_inds])**2
v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)
v_loss = 0.5 * v_loss_max.mean()
else:
v_loss = 0.5 * ((newvalue - b_returns[mb_inds])**2).mean()
entropy_loss = entropy.mean()
loss = pg_loss - self.entropy_coef * entropy_loss + v_loss * self.value_coef
self.optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(self.parameters(), self.max_grad_norm)
self.optimizer.step()
if self.target_kl is not None:
if approx_kl > self.target_kl:
break
y_pred, y_true = b_values.cpu().numpy(), b_returns.cpu().numpy()
var_y = np.var(y_true)
explained_var = np.nan if var_y == 0 else 1 - np.var(y_true -
y_pred) / var_y
# TRY NOT TO MODIFY: record rewards for plotting purposes
if self.writer is not None:
self.writer.add_scalar("charts/learning_rate",
self.optimizer.param_groups[0]["lr"],
self.total_steps_done)
self.writer.add_scalar("losses/value_loss", v_loss.item(),
self.total_steps_done)
self.writer.add_scalar("losses/policy_loss", pg_loss.item(),
self.total_steps_done)
self.writer.add_scalar("losses/entropy", entropy_loss.item(),
self.total_steps_done)
self.writer.add_scalar("losses/old_approx_kl", old_approx_kl.item(),
self.total_steps_done)
self.writer.add_scalar("losses/approx_kl", approx_kl.item(),
self.total_steps_done)
self.writer.add_scalar("losses/clipfrac", np.mean(clipfracs),
self.total_steps_done)
self.writer.add_scalar("losses/explained_variance", explained_var,
self.total_steps_done)
self.writer.add_scalar(
"charts/SPS",
int(self.total_steps_done / (time.time() - self.start_time)),
self.total_steps_done)
# Update counters
self.updates_done += 1
self.cur_batch_idx = 0
def anneal_learning_rate(self, update, num_total_updates):
# Annealing the rate
frac = 1.0 - (update / num_total_updates)
if frac <= 0:
raise ValueError("Annealing learning rate to <= 0")
lrnow = frac * self.learning_rate
self.optimizer.param_groups[0]["lr"] = lrnow
| open_spiel-master | open_spiel/python/pytorch/ppo.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Regression counterfactual regret minimization (RCFR) [Waugh et al., 2015; Morrill, 2016].
In contrast to (tabular) counterfactual regret minimization (CFR)
[Zinkevich et al., 2007], RCFR replaces the table of regrets that generate the
current policy profile with a profile of regression models. The average
policy is still tracked exactly with a full game-size table. The exploitability
of the average policy in zero-sum games decreases as the model accuracy and
the number of iterations increase [Waugh et al., 2015; Morrill, 2016]. As long
as the regression model errors decrease across iterations, the average policy
converges toward a Nash equilibrium in zero-sum games.
# References
Dustin Morrill. Using Regret Estimation to Solve Games Compactly.
M.Sc. thesis, Computing Science Department, University of Alberta,
Apr 1, 2016, Edmonton Alberta, Canada.
Kevin Waugh, Dustin Morrill, J. Andrew Bagnell, and Michael Bowling.
Solving Games with Functional Regret Estimation. At the Twenty-Ninth AAAI
Conference on Artificial Intelligence, January 25-29, 2015, Austin Texas,
USA. Pages 2138-2145.
Martin Zinkevich, Michael Johanson, Michael Bowling, and Carmelo Piccione.
Regret Minimization in Games with Incomplete Information.
At Advances in Neural Information Processing Systems 20 (NeurIPS). 2007.
"""
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
def tensor_to_matrix(tensor):
"""Converts `tensor` to a matrix (a rank-2 tensor) or raises an exception.
Args:
tensor: The tensor to convert.
Returns:
A PyTorch matrix (rank-2 `torch.Tensor`).
Raises:
ValueError: If `tensor` cannot be trivially converted to a matrix, i.e.
`tensor` has a rank > 2.
"""
tensor = torch.Tensor(tensor)
rank = tensor.ndim
# rank = len(list(tensor.shape))
if rank > 2:
raise ValueError(
("Tensor {} cannot be converted into a matrix as it is rank "
"{} > 2.").format(tensor, rank))
elif rank < 2:
num_columns = 1 if rank == 0 else tensor.shape[0]
tensor = torch.reshape(tensor, [1, num_columns])
return tensor
def with_one_hot_action_features(state_features, legal_actions,
num_distinct_actions):
"""Constructs features for each sequence by extending state features.
Sequences features are constructed by concatenating one-hot features
indicating each action to the information state features and stacking them.
Args:
state_features: The features for the information state alone. Must be a
`torch.Tensor` with a rank less than or equal to (if batched) 2.
legal_actions: The list of legal actions in this state. Determines the
number of rows in the returned feature matrix.
num_distinct_actions: The number of globally distinct actions in the game.
Determines the length of the action feature vector concatenated onto the
state features.
Returns:
A `torch.Tensor` feature matrix with one row for each sequence and # state
features plus `num_distinct_actions`-columns.
Raises:
ValueError: If `state_features` has a rank > 2.
"""
state_features = tensor_to_matrix(state_features)
with_action_features = []
for action in legal_actions:
action_features = F.one_hot(
torch.tensor([action]), num_classes=num_distinct_actions)
all_features = torch.cat([state_features, action_features], axis=1)
with_action_features.append(all_features)
return torch.cat(with_action_features, axis=0)
def sequence_features(state, num_distinct_actions):
"""The sequence features at `state`.
Features are constructed by concatenating `state`'s normalized feature
vector with one-hot vectors indicating each action (see
`with_one_hot_action_features`).
Args:
state: An OpenSpiel `State`.
num_distinct_actions: The number of globally distinct actions in `state`'s
game.
Returns:
A `torch.Tensor` feature matrix with one row for each sequence.
"""
return with_one_hot_action_features(state.information_state_tensor(),
state.legal_actions(),
num_distinct_actions)
def num_features(game):
"""Returns the number of features returned by `sequence_features`.
Args:
game: An OpenSpiel `Game`.
"""
return game.information_state_tensor_size() + game.num_distinct_actions()
class RootStateWrapper(object):
"""Analyzes the subgame at a given root state.
It enumerates features for each player sequence, creates a mapping between
information states to sequence index offsets, and caches terminal values
in a dictionary with history string keys.
Properties:
root: An OpenSpiel `State`.
sequence_features: A `list` of sequence feature matrices, one for each
player. This list uses depth-first, information state-major ordering, so
sequences are grouped by information state. I.e. the first legal action
in the first state has index 0, the second action in the same information
state has index 1, the third action will have index 3, and so on.
Sequences in the next information state descendant of the first action
will begin indexing its sequences at the number of legal actions in the
ancestor information state.
num_player_sequences: The number of sequences for each player.
info_state_to_sequence_idx: A `dict` mapping each information state string
to the `sequence_features` index of the first sequence in the
corresponding information state.
terminal_values: A `dict` mapping history strings to terminal values for
each player.
"""
def __init__(self, state):
self.root = state
self._num_distinct_actions = len(state.legal_actions_mask(0))
self.sequence_features = [[] for _ in range(state.num_players())]
self.num_player_sequences = [0] * state.num_players()
self.info_state_to_sequence_idx = {}
self.terminal_values = {}
self._walk_descendants(state)
self.sequence_features = [
torch.cat(rows, axis=0) for rows in self.sequence_features
]
def _walk_descendants(self, state):
"""Records information about `state` and its descendants."""
if state.is_terminal():
self.terminal_values[state.history_str()] = np.array(state.returns())
return
elif state.is_chance_node():
for action, _ in state.chance_outcomes():
self._walk_descendants(state.child(action))
return
player = state.current_player()
info_state = state.information_state_string(player)
actions = state.legal_actions()
if info_state not in self.info_state_to_sequence_idx:
n = self.num_player_sequences[player]
self.info_state_to_sequence_idx[info_state] = n
self.sequence_features[player].append(
sequence_features(state, self._num_distinct_actions))
self.num_player_sequences[player] += len(actions)
for action in actions:
self._walk_descendants(state.child(action))
def sequence_weights_to_policy(self, sequence_weights, state):
"""Returns a behavioral policy at `state` from sequence weights.
Args:
sequence_weights: An array of non-negative weights, one for each of
`state.current_player()`'s sequences in `state`'s game.
state: An OpenSpiel `State` that represents an information state in an
alternating-move game.
Returns:
A `np.array<double>` probability distribution representing the policy in
`state` encoded by `sequence_weights`. Weights corresponding to actions
in `state` are normalized by their sum.
Raises:
ValueError: If there are too few sequence weights at `state`.
"""
info_state = state.information_state_string()
sequence_offset = self.info_state_to_sequence_idx[info_state]
actions = state.legal_actions()
sequence_idx_end = sequence_offset + len(actions)
weights = sequence_weights[sequence_offset:sequence_idx_end]
if len(weights) < len(actions):
raise ValueError(
("Invalid policy: Policy {player} at sequence offset "
"{sequence_offset} has only {policy_len} elements but there "
"are {num_actions} legal actions.").format(
player=state.current_player(),
sequence_offset=sequence_offset,
policy_len=len(weights),
num_actions=len(actions)))
return normalized_by_sum(weights)
def sequence_weights_to_policy_fn(self, player_sequence_weights):
"""Returns a policy function based on sequence weights for each player.
Args:
player_sequence_weights: A list of weight arrays, one for each player.
Each array should have a weight for each of that player's sequences in
`state`'s game.
Returns:
A `State` -> `np.array<double>` function. The output of this function is
a probability distribution that represents the policy at the given
`State` encoded by `player_sequence_weights` according to
`sequence_weights_to_policy`.
"""
def policy_fn(state):
player = state.current_player()
return self.sequence_weights_to_policy(player_sequence_weights[player],
state)
return policy_fn
def sequence_weights_to_tabular_profile(self, player_sequence_weights):
"""Returns the tabular profile-form of `player_sequence_weights`."""
return sequence_weights_to_tabular_profile(
self.root, self.sequence_weights_to_policy_fn(player_sequence_weights))
def counterfactual_regrets_and_reach_weights(self, regret_player,
reach_weight_player,
*sequence_weights):
"""Returns counterfactual regrets and reach weights as a tuple.
Args:
regret_player: The player for whom counterfactual regrets are computed.
reach_weight_player: The player for whom reach weights are computed.
*sequence_weights: A list of non-negative sequence weights for each player
determining the policy profile. Behavioral policies are generated by
normalizing sequence weights corresponding to actions in each
information state by their sum.
Returns:
The counterfactual regrets and reach weights as an `np.array`-`np.array`
tuple.
Raises:
ValueError: If there are too few sequence weights at any information state
for any player.
"""
num_players = len(sequence_weights)
regrets = np.zeros(self.num_player_sequences[regret_player])
reach_weights = np.zeros(self.num_player_sequences[reach_weight_player])
def _walk_descendants(state, reach_probabilities, chance_reach_probability):
"""Compute `state`'s counterfactual regrets and reach weights.
Args:
state: An OpenSpiel `State`.
reach_probabilities: The probability that each player plays to reach
`state`'s history.
chance_reach_probability: The probability that all chance outcomes in
`state`'s history occur.
Returns:
The counterfactual value of `state`'s history.
Raises:
ValueError if there are too few sequence weights at any information
state for any player.
"""
if state.is_terminal():
player_reach = (
np.prod(reach_probabilities[:regret_player]) *
np.prod(reach_probabilities[regret_player + 1:]))
counterfactual_reach_prob = player_reach * chance_reach_probability
u = self.terminal_values[state.history_str()]
return u[regret_player] * counterfactual_reach_prob
elif state.is_chance_node():
v = 0.0
for action, action_prob in state.chance_outcomes():
v += _walk_descendants(
state.child(action), reach_probabilities,
chance_reach_probability * action_prob)
return v
player = state.current_player()
info_state = state.information_state_string(player)
sequence_idx_offset = self.info_state_to_sequence_idx[info_state]
actions = state.legal_actions(player)
sequence_idx_end = sequence_idx_offset + len(actions)
my_sequence_weights = sequence_weights[player][
sequence_idx_offset:sequence_idx_end]
if len(my_sequence_weights) < len(actions):
raise ValueError(
("Invalid policy: Policy {player} at sequence offset "
"{sequence_idx_offset} has only {policy_len} elements but there "
"are {num_actions} legal actions.").format(
player=player,
sequence_idx_offset=sequence_idx_offset,
policy_len=len(my_sequence_weights),
num_actions=len(actions)))
policy = normalized_by_sum(my_sequence_weights)
action_values = np.zeros(len(actions))
state_value = 0.0
is_reach_weight_player_node = player == reach_weight_player
is_regret_player_node = player == regret_player
reach_prob = reach_probabilities[player]
for action_idx, action in enumerate(actions):
action_prob = policy[action_idx]
next_reach_prob = reach_prob * action_prob
if is_reach_weight_player_node:
reach_weight_player_plays_down_this_line = next_reach_prob > 0
if not reach_weight_player_plays_down_this_line:
continue
sequence_idx = sequence_idx_offset + action_idx
reach_weights[sequence_idx] += next_reach_prob
reach_probabilities[player] = next_reach_prob
action_value = _walk_descendants(
state.child(action), reach_probabilities, chance_reach_probability)
if is_regret_player_node:
state_value = state_value + action_prob * action_value
else:
state_value = state_value + action_value
action_values[action_idx] = action_value
reach_probabilities[player] = reach_prob
if is_regret_player_node:
regrets[sequence_idx_offset:sequence_idx_end] += (
action_values - state_value)
return state_value
# End of _walk_descendants
_walk_descendants(self.root, np.ones(num_players), 1.0)
return regrets, reach_weights
def normalized_by_sum(v, axis=0, mutate=False):
"""Divides each element of `v` along `axis` by the sum of `v` along `axis`.
Assumes `v` is non-negative. Sets of `v` elements along `axis` that sum to
zero are normalized to `1 / v.shape[axis]` (a uniform distribution).
Args:
v: Non-negative array of values.
axis: An integer axis.
mutate: Whether or not to store the result in `v`.
Returns:
The normalized array.
"""
v = np.asarray(v)
denominator = v.sum(axis=axis, keepdims=True)
denominator_is_zero = denominator == 0
# Every element of `denominator_is_zero` that is true corresponds to a
# set of elements in `v` along `axis` that are all zero. By setting these
# denominators to `v.shape[axis]` and adding 1 to each of the corresponding
# elements in `v`, these elements are normalized to `1 / v.shape[axis]`
# (a uniform distribution).
denominator += v.shape[axis] * denominator_is_zero
if mutate:
v += denominator_is_zero
v /= denominator
else:
v = (v + denominator_is_zero) / denominator
return v
def relu(v):
"""Returns the element-wise maximum between `v` and 0."""
return np.maximum(v, 0)
def _descendant_states(state, depth_limit, depth, include_terminals,
include_chance_states):
"""Recursive descendant state generator.
Decision states are always yielded.
Args:
state: The current state.
depth_limit: The descendant depth limit. Zero will ensure only
`initial_state` is generated and negative numbers specify the absence of a
limit.
depth: The current descendant depth.
include_terminals: Whether or not to include terminal states.
include_chance_states: Whether or not to include chance states.
Yields:
`State`, a state that is `initial_state` or one of its descendants.
"""
if state.is_terminal():
if include_terminals:
yield state
return
if depth > depth_limit >= 0:
return
if not state.is_chance_node() or include_chance_states:
yield state
for action in state.legal_actions():
state_for_search = state.child(action)
for substate in _descendant_states(state_for_search, depth_limit, depth + 1,
include_terminals,
include_chance_states):
yield substate
def all_states(initial_state,
depth_limit=-1,
include_terminals=False,
include_chance_states=False):
"""Generates states from `initial_state`.
Generates the set of states that includes only the `initial_state` and its
descendants that satisfy the inclusion criteria specified by the remaining
parameters. Decision states are always included.
Args:
initial_state: The initial state from which to generate states.
depth_limit: The descendant depth limit. Zero will ensure only
`initial_state` is generated and negative numbers specify the absence of a
limit. Defaults to no limit.
include_terminals: Whether or not to include terminal states. Defaults to
`False`.
include_chance_states: Whether or not to include chance states. Defaults to
`False`.
Returns:
A generator that yields the `initial_state` and its descendants that
satisfy the inclusion criteria specified by the remaining parameters.
"""
return _descendant_states(
state=initial_state,
depth_limit=depth_limit,
depth=0,
include_terminals=include_terminals,
include_chance_states=include_chance_states)
def sequence_weights_to_tabular_profile(root, policy_fn):
"""Returns the `dict` of `list`s of action-prob pairs-form of `policy_fn`."""
tabular_policy = {}
players = list(range(root.num_players()))
for state in all_states(root):
for player in players:
legal_actions = state.legal_actions(player)
if len(legal_actions) < 1:
continue
info_state = state.information_state_string(player)
if info_state in tabular_policy:
continue
my_policy = policy_fn(state)
tabular_policy[info_state] = list(zip(legal_actions, my_policy))
return tabular_policy
def feedforward_evaluate(layers,
x,
use_skip_connections=False,
hidden_are_factored=False,
hidden_activation=nn.ReLU):
"""Evaluates `layers` as a feedforward neural network on `x`.
Args:
layers: The neural network layers (`torch.Tensor` -> `torch.Tensor`
callables).
x: The array-like input to evaluate. Must be trivially convertible to a
matrix (tensor rank <= 2).
use_skip_connections: Whether or not to use skip connections between layers.
If the layer input has too few features to be added to the layer output,
then the end of input is padded with zeros. If it has too many features,
then the input is truncated.
hidden_are_factored: Whether or not hidden logical layers are factored into
two separate linear transformations stored as adjacent elements of
`layers`.
hidden_activation: the activation function following the hidden layers.
Returns:
The `torch.Tensor` evaluation result.
Raises:
ValueError: If `x` has a rank greater than 2.
"""
x = tensor_to_matrix(x)
i = 0
while i < len(layers) - 1:
if isinstance(layers[i], hidden_activation):
x = layers[i](x)
i += 1
continue
y = layers[i](x)
i += 1
if hidden_are_factored:
y = layers[i](y)
i += 1
if use_skip_connections:
my_num_features = x.shape[1]
padding = y.shape[1] - my_num_features
if padding > 0:
zeros = torch.zeros([x.shape[0], padding])
x = torch.cat([x, zeros], axis=1)
elif padding < 0:
x = x[0:x.shape[0], 0:y.shape[1]]
y = x + y
x = y
return layers[-1](x)
class DeepRcfrModel(nn.Module):
"""A flexible deep feedforward RCFR model class.
Properties:
layers: The `torch.keras.Layer` layers describing this model.
"""
def __init__(self,
game,
num_hidden_units,
num_hidden_layers=1,
num_hidden_factors=0,
hidden_activation=nn.ReLU,
use_skip_connections=False,
regularizer=None):
"""Creates a new `DeepRcfrModel.
Args:
game: The OpenSpiel game being solved.
num_hidden_units: The number of units in each hidden layer.
num_hidden_layers: The number of hidden layers. Defaults to 1.
num_hidden_factors: The number of hidden factors or the matrix rank of the
layer. If greater than zero, hidden layers will be split into two
separate linear transformations, the first with
`num_hidden_factors`-columns and the second with
`num_hidden_units`-columns. The result is that the logical hidden layer
is a rank-`num_hidden_units` matrix instead of a rank-`num_hidden_units`
matrix. When `num_hidden_units < num_hidden_units`, this is effectively
implements weight sharing. Defaults to 0.
hidden_activation: The activation function to apply over hidden layers.
Defaults to `torch.nn.ReLU`.
use_skip_connections: Whether or not to apply skip connections (layer
output = layer(x) + x) on hidden layers. Zero padding or truncation is
used to match the number of columns on layer inputs and outputs.
regularizer: A regularizer to apply to each layer. Defaults to `None`.
"""
super(DeepRcfrModel, self).__init__()
self._use_skip_connections = use_skip_connections
self._hidden_are_factored = num_hidden_factors > 0
self._hidden_activation = hidden_activation
input_size = num_features(game)
self.layers = []
for _ in range(num_hidden_layers):
if self._hidden_are_factored:
self.layers.append(nn.Linear(input_size, num_hidden_factors, bias=True))
self.layers.append(
nn.Linear(
num_hidden_factors if self._hidden_are_factored else input_size,
num_hidden_units,
bias=True))
if hidden_activation:
self.layers.append(hidden_activation())
self.layers.append(nn.Linear(num_hidden_units, 1, bias=True))
self.layers = nn.ModuleList(self.layers)
# Construct variables for all layers by exercising the network.
x = torch.zeros([1, num_features(game)])
for layer in self.layers:
x = layer(x)
def __call__(self, x):
"""Evaluates this model on `x`."""
return feedforward_evaluate(
layers=self.layers,
x=x,
use_skip_connections=self._use_skip_connections,
hidden_are_factored=self._hidden_are_factored,
hidden_activation=self._hidden_activation)
class _RcfrSolver(object):
"""An abstract RCFR solver class.
Requires that subclasses implement `evaluate_and_update_policy`.
"""
def __init__(self, game, models, truncate_negative=False):
"""Creates a new `_RcfrSolver`.
Args:
game: An OpenSpiel `Game`.
models: Current policy models (optimizable array-like -> `torch.Tensor`
callables) for both players.
truncate_negative: Whether or not to truncate negative (approximate)
cumulative regrets to zero to implement RCFR+. Defaults to `False`.
"""
self._game = game
self._models = models
self._truncate_negative = truncate_negative
self._root_wrapper = RootStateWrapper(game.new_initial_state())
self._cumulative_seq_probs = [
np.zeros(n) for n in self._root_wrapper.num_player_sequences
]
def _sequence_weights(self, player=None):
"""Returns regret-like weights for each sequence as an `np.array`.
Negative weights are truncated to zero.
Args:
player: The player to compute weights for, or both if `player` is `None`.
Defaults to `None`.
"""
if player is None:
return [
self._sequence_weights(player)
for player in range(self._game.num_players())
]
else:
tensor = F.relu(
torch.squeeze(self._models[player](
self._root_wrapper.sequence_features[player])))
return tensor.detach().numpy()
def evaluate_and_update_policy(self, train_fn):
"""Performs a single step of policy evaluation and policy improvement.
Args:
train_fn: A (model, `torch.data.Dataset`) function that trains the given
regression model to accurately reproduce the x to y mapping given x-y
data.
Raises:
NotImplementedError: If not overridden by child class.
"""
raise NotImplementedError()
def current_policy(self):
"""Returns the current policy profile.
Returns:
A `dict<info state, list<Action, probability>>` that maps info state
strings to `Action`-probability pairs describing each player's policy.
"""
return self._root_wrapper.sequence_weights_to_tabular_profile(
self._sequence_weights())
def average_policy(self):
"""Returns the average of all policies iterated.
This average policy converges toward a Nash policy as the number of
iterations increases as long as the regret prediction error decreases
continually [Morrill, 2016].
The policy is computed using the accumulated policy probabilities computed
using `evaluate_and_update_policy`.
Returns:
A `dict<info state, list<Action, probability>>` that maps info state
strings to (Action, probability) pairs describing each player's policy.
"""
return self._root_wrapper.sequence_weights_to_tabular_profile(
self._cumulative_seq_probs)
def _previous_player(self, player):
"""The previous player in the turn ordering."""
return player - 1 if player > 0 else self._game.num_players() - 1
def _average_policy_update_player(self, regret_player):
"""The player for whom the average policy should be updated."""
return self._previous_player(regret_player)
class RcfrSolver(_RcfrSolver):
"""RCFR with an effectively infinite regret data buffer.
Exact or bootstrapped cumulative regrets are stored as if an infinitely
large data buffer. The average strategy is updated and stored in a full
game-size table. Reproduces the RCFR versions used in experiments by
Waugh et al. [2015] and Morrill [2016] except that this class does not
restrict the user to regression tree models.
"""
def __init__(self, game, models, bootstrap=None, truncate_negative=False):
self._bootstrap = bootstrap
super(RcfrSolver, self).__init__(
game, models, truncate_negative=truncate_negative)
self._regret_targets = [
np.zeros(n) for n in self._root_wrapper.num_player_sequences
]
def evaluate_and_update_policy(self, train_fn):
"""Performs a single step of policy evaluation and policy improvement.
Args:
train_fn: A (model, `torch.data.Dataset`) function that trains the given
regression model to accurately reproduce the x to y mapping given x-y
data.
"""
sequence_weights = self._sequence_weights()
player_seq_features = self._root_wrapper.sequence_features
for regret_player in range(self._game.num_players()):
seq_prob_player = self._average_policy_update_player(regret_player)
regrets, seq_probs = (
self._root_wrapper.counterfactual_regrets_and_reach_weights(
regret_player, seq_prob_player, *sequence_weights))
if self._bootstrap:
self._regret_targets[regret_player][:] = sequence_weights[regret_player]
if self._truncate_negative:
regrets = np.maximum(-relu(self._regret_targets[regret_player]),
regrets)
self._regret_targets[regret_player] += regrets
self._cumulative_seq_probs[seq_prob_player] += seq_probs
targets = torch.unsqueeze(
torch.Tensor(self._regret_targets[regret_player]), axis=1)
data = torch.utils.data.TensorDataset(player_seq_features[regret_player],
targets)
regret_player_model = self._models[regret_player]
train_fn(regret_player_model, data)
sequence_weights[regret_player] = self._sequence_weights(regret_player)
class ReservoirBuffer(object):
"""A generic reservoir buffer data structure.
After every insertion, its contents represents a `size`-size uniform
random sample from the stream of candidates that have been encountered.
"""
def __init__(self, size):
self.size = size
self.num_elements = 0
self._buffer = np.full([size], None, dtype=object)
self._num_candidates = 0
@property
def buffer(self):
return self._buffer[:self.num_elements]
def insert(self, candidate):
"""Consider this `candidate` for inclusion in this sampling buffer."""
self._num_candidates += 1
if self.num_elements < self.size:
self._buffer[self.num_elements] = candidate
self.num_elements += 1
return
idx = np.random.choice(self._num_candidates)
if idx < self.size:
self._buffer[idx] = candidate
def insert_all(self, candidates):
"""Consider all `candidates` for inclusion in this sampling buffer."""
for candidate in candidates:
self.insert(candidate)
def num_available_spaces(self):
"""The number of freely available spaces in this buffer."""
return self.size - self.num_elements
class ReservoirRcfrSolver(_RcfrSolver):
"""RCFR with a reservoir buffer for storing regret data.
The average strategy is updated and stored in a full game-size table.
"""
def __init__(self, game, models, buffer_size, truncate_negative=False):
self._buffer_size = buffer_size
super(ReservoirRcfrSolver, self).__init__(
game, models, truncate_negative=truncate_negative)
self._reservoirs = [
ReservoirBuffer(self._buffer_size) for _ in range(game.num_players())
]
def evaluate_and_update_policy(self, train_fn):
"""Performs a single step of policy evaluation and policy improvement.
Args:
train_fn: A (model, `torch.data.Dataset`) function that trains the given
regression model to accurately reproduce the x to y mapping given x-y
data.
"""
sequence_weights = self._sequence_weights()
player_seq_features = self._root_wrapper.sequence_features
for regret_player in range(self._game.num_players()):
seq_prob_player = self._average_policy_update_player(regret_player)
regrets, seq_probs = (
self._root_wrapper.counterfactual_regrets_and_reach_weights(
regret_player, seq_prob_player, *sequence_weights))
if self._truncate_negative:
regrets = np.maximum(-relu(sequence_weights[regret_player]), regrets)
next_data = list(
zip(player_seq_features[regret_player],
torch.unsqueeze(torch.Tensor(regrets), axis=1)))
self._reservoirs[regret_player].insert_all(next_data)
self._cumulative_seq_probs[seq_prob_player] += seq_probs
my_buffer = list(
torch.stack(a) for a in zip(*self._reservoirs[regret_player].buffer))
data = torch.utils.data.TensorDataset(*my_buffer)
regret_player_model = self._models[regret_player]
train_fn(regret_player_model, data)
sequence_weights[regret_player] = self._sequence_weights(regret_player)
| open_spiel-master | open_spiel/python/pytorch/rcfr.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Neural Replicator Dynamics [Omidshafiei et al, 2019].
A policy gradient-like extension to replicator dynamics and the hedge algorithm
that incorporates function approximation.
# References
Shayegan Omidshafiei, Daniel Hennes, Dustin Morrill, Remi Munos,
Julien Perolat, Marc Lanctot, Audrunas Gruslys, Jean-Baptiste Lespiau,
Karl Tuyls. Neural Replicator Dynamics. https://arxiv.org/abs/1906.00190.
2019.
"""
import numpy as np
import torch
from torch import nn
from open_spiel.python.pytorch import rcfr
def thresholded(logits, regrets, threshold=2.0):
"""Zeros out `regrets` where `logits` are too negative or too large."""
can_decrease = torch.gt(logits, -threshold).float()
can_increase = torch.lt(logits, threshold).float()
regrets_negative = torch.minimum(regrets, torch.Tensor([0.0]))
regrets_positive = torch.maximum(regrets, torch.Tensor([0.0]))
return can_decrease * regrets_negative + can_increase * regrets_positive
def train(model,
data,
batch_size,
step_size=1.0,
threshold=2.0,
autoencoder_loss=None):
"""Train NeuRD `model` on `data`."""
data = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True)
for x, regrets in data:
output = model(x, training=True)
logits = output[:, :1]
logits = logits - torch.mean(logits)
regrets = thresholded(logits, regrets, threshold=threshold).detach()
utility = torch.mean(logits * regrets)
if autoencoder_loss is not None:
utility = utility - autoencoder_loss(x, output[:, 1:])
model.zero_grad()
utility.backward()
with torch.no_grad():
for var in model.layers.parameters():
new_var = var + step_size * var.grad
var.copy_(new_var)
class DeepNeurdModel(nn.Module):
"""A flexible deep feedforward NeuRD model class.
Properties:
layers: The `torch.nn.Linear` layers describing this model.
"""
def __init__(self,
game,
num_hidden_units,
num_hidden_layers=1,
num_hidden_factors=0,
hidden_activation=nn.ReLU,
use_skip_connections=False,
autoencode=False):
"""Creates a new `DeepNeurdModel.
Args:
game: The OpenSpiel game being solved.
num_hidden_units: The number of units in each hidden layer.
num_hidden_layers: The number of hidden layers. Defaults to 1.
num_hidden_factors: The number of hidden factors or the matrix rank of the
layer. If greater than zero, hidden layers will be split into two
separate linear transformations, the first with
`num_hidden_factors`-columns and the second with
`num_hidden_units`-columns. The result is that the logical hidden layer
is a rank-`num_hidden_units` matrix instead of a rank-`num_hidden_units`
matrix. When `num_hidden_units < num_hidden_units`, this is effectively
implements weight sharing. Defaults to 0.
hidden_activation: The activation function to apply over hidden layers.
Defaults to `torch.nn.Relu`.
use_skip_connections: Whether or not to apply skip connections (layer
output = layer(x) + x) on hidden layers. Zero padding or truncation is
used to match the number of columns on layer inputs and outputs.
autoencode: Whether or not to output a reconstruction of the inputs upon
being called. Defaults to `False`.
"""
super(DeepNeurdModel, self).__init__()
self._autoencode = autoencode
self._use_skip_connections = use_skip_connections
self._hidden_are_factored = num_hidden_factors > 0
self.layers = nn.ModuleList()
self.input_size = rcfr.num_features(game)
for _ in range(num_hidden_layers):
if self._hidden_are_factored:
self.layers.append(
nn.Linear(self.input_size, num_hidden_factors, bias=True))
self.input_size = num_hidden_factors
self.layers.append(
nn.Linear(self.input_size, num_hidden_units, bias=True))
if hidden_activation:
self.layers.append(hidden_activation())
self.input_size = num_hidden_units
self.layers.append(
nn.Linear(
self.input_size,
1 + self._autoencode * rcfr.num_features(game),
bias=True))
def forward(self, x, training=False):
"""Evaluates this model on x.
Args:
x: Model input.
training: Whether or not this is being called during training. If
`training` and the constructor argument `autoencode` was `True`, then
the output will contain the estimated regrets concatenated with a
reconstruction of the input, otherwise only regrets will be returned.
Defaults to `False`.
Returns:
The `torch.Tensor` resulting from evaluating this model on `x`. If
`training` and the constructor argument `autoencode` was `True`, then
it will contain the estimated regrets concatenated with a
reconstruction of the input, otherwise only regrets will be returned.
"""
y = rcfr.feedforward_evaluate(
layers=self.layers,
x=x,
use_skip_connections=self._use_skip_connections,
hidden_are_factored=self._hidden_are_factored)
return y if training else y[:, :1]
class CounterfactualNeurdSolver(object):
"""All-actions, strong NeuRD on counterfactual regrets.
No regularization bonus is applied, so the current policy likely will not
converge. The average policy profile is updated and stored in a full
game-size table and may converge to an approximate Nash equilibrium in
two-player, zero-sum games.
"""
def __init__(self, game, models):
"""Creates a new `CounterfactualNeurdSolver`.
Args:
game: An OpenSpiel `Game`.
models: Current policy models (optimizable array-like -> `torch.Tensor`
callables) for both players.
"""
self._game = game
self._models = models
self._root_wrapper = rcfr.RootStateWrapper(game.new_initial_state())
self._cumulative_seq_probs = [
np.zeros(n) for n in self._root_wrapper.num_player_sequences
]
def _sequence_weights(self, player=None):
"""Returns exponentiated weights for each sequence as an `np.array`."""
if player is None:
return [
self._sequence_weights(player)
for player in range(self._game.num_players())
]
else:
tensor = torch.squeeze(self._models[player](
self._root_wrapper.sequence_features[player]))
tensor = tensor - torch.max(tensor, dim=0)[0]
tensor = torch.exp(tensor)
return tensor.detach().numpy()
def current_policy(self):
"""Returns the current policy profile.
Returns:
A `dict<info state, list<Action, probability>>` that maps info state
strings to `Action`-probability pairs describing each player's policy.
"""
return self._root_wrapper.sequence_weights_to_tabular_profile(
self._sequence_weights())
def average_policy(self):
"""Returns the average of all policies iterated.
The policy is computed using the accumulated policy probabilities computed
using `evaluate_and_update_policy`.
Returns:
A `dict<info state, list<Action, probability>>` that maps info state
strings to (Action, probability) pairs describing each player's policy.
"""
return self._root_wrapper.sequence_weights_to_tabular_profile(
self._cumulative_seq_probs)
def _previous_player(self, player):
"""The previous player in the turn ordering."""
return player - 1 if player > 0 else self._game.num_players() - 1
def _average_policy_update_player(self, regret_player):
"""The player for whom the average policy should be updated."""
return self._previous_player(regret_player)
def evaluate_and_update_policy(self, train_fn):
"""Performs a single step of policy evaluation and policy improvement.
Args:
train_fn: A (model, `torch.utils.data.TensorDataset`) function that trains
the given regression model to accurately reproduce the x to y mapping
given x-y data.
"""
sequence_weights = self._sequence_weights()
player_seq_features = self._root_wrapper.sequence_features
for regret_player in range(self._game.num_players()):
seq_prob_player = self._average_policy_update_player(regret_player)
regrets, seq_probs = (
self._root_wrapper.counterfactual_regrets_and_reach_weights(
regret_player, seq_prob_player, *sequence_weights))
self._cumulative_seq_probs[seq_prob_player] += seq_probs
targets = torch.unsqueeze(torch.Tensor(regrets), axis=1)
data = torch.utils.data.TensorDataset(player_seq_features[regret_player],
targets)
regret_player_model = self._models[regret_player]
train_fn(regret_player_model, data)
sequence_weights[regret_player] = self._sequence_weights(regret_player)
| open_spiel-master | open_spiel/python/pytorch/neurd.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/pytorch/losses/__init__.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reinforcement learning loss functions.
All the loss functions implemented here compute the loss for the policy (actor).
The critic loss functions are typically regression loss are omitted for their
simplicity.
For the batch QPG, RM and RPG loss, please refer to the paper:
https://papers.nips.cc/paper/7602-actor-critic-policy-optimization-in-partially-observable-multiagent-environments.pdf
The BatchA2C loss uses code from the `TRFL` library:
https://github.com/deepmind/trfl/blob/master/trfl/discrete_policy_gradient_ops.py
"""
import torch
import torch.nn.functional as F
def _assert_rank_and_shape_compatibility(tensors, rank):
if not tensors:
raise ValueError("List of tensors cannot be empty")
tmp_shape = tensors[0].shape
for tensor in tensors:
if tensor.ndim != rank:
raise ValueError("Shape %s must have rank %d" % (tensor.ndim, rank))
if tensor.shape != tmp_shape:
raise ValueError("Shapes %s and %s are not compatible" %
(tensor.shape, tmp_shape))
def thresholded(logits, regrets, threshold=2.0):
"""Zeros out `regrets` where `logits` are too negative or too large."""
can_decrease = logits.gt(-threshold).float()
can_increase = logits.lt(threshold).float()
regrets_negative = regrets.clamp(max=0.0)
regrets_positive = regrets.clamp(min=0.0)
return can_decrease * regrets_negative + can_increase * regrets_positive
def compute_baseline(policy, action_values):
# V = pi * Q, backprop through pi but not Q.
return torch.sum(torch.mul(policy, action_values.detach()), dim=1)
def compute_regrets(policy_logits, action_values):
"""Compute regrets using pi and Q."""
# Compute regret.
policy = F.softmax(policy_logits, dim=1)
# Avoid computing gradients for action_values.
action_values = action_values.detach()
baseline = compute_baseline(policy, action_values)
regrets = torch.sum(
F.relu(action_values - torch.unsqueeze(baseline, 1)), dim=1)
return regrets
def compute_advantages(policy_logits,
action_values,
use_relu=False,
threshold_fn=None):
"""Compute advantages using pi and Q."""
# Compute advantage.
policy = F.softmax(policy_logits, dim=1)
# Avoid computing gradients for action_values.
action_values = action_values.detach()
baseline = compute_baseline(policy, action_values)
advantages = action_values - torch.unsqueeze(baseline, 1)
if use_relu:
advantages = F.relu(advantages)
if threshold_fn:
# Compute thresholded advanteges weighted by policy logits for NeuRD.
policy_logits = policy_logits - policy_logits.mean(-1, keepdim=True)
advantages = threshold_fn(policy_logits, advantages)
policy_advantages = -torch.mul(policy_logits, advantages.detach())
else:
# Compute advantage weighted by policy.
policy_advantages = -torch.mul(policy, advantages.detach())
return torch.sum(policy_advantages, dim=1)
def compute_a2c_loss(policy_logits, actions, advantages):
cross_entropy = F.cross_entropy(policy_logits, actions, reduction="none")
advantages = advantages.detach()
if advantages.ndim != cross_entropy.ndim:
raise ValueError("Shapes %s and %s are not compatible" %
(advantages.ndim, cross_entropy.ndim))
return torch.mul(cross_entropy, advantages)
def compute_entropy(policy_logits):
return torch.sum(
-F.softmax(policy_logits, dim=1) * F.log_softmax(policy_logits, dim=1),
dim=-1)
class BatchQPGLoss(object):
"""Defines the batch QPG loss op."""
def __init__(self, entropy_cost=None, name="batch_qpg_loss"):
self._entropy_cost = entropy_cost
self._name = name
def loss(self, policy_logits, action_values):
"""Constructs a PyTorch Crierion that computes the QPG loss for batches.
Args:
policy_logits: `B x A` tensor corresponding to policy logits.
action_values: `B x A` tensor corresponding to Q-values.
Returns:
loss: A 0-D `float` tensor corresponding the loss.
"""
_assert_rank_and_shape_compatibility([policy_logits, action_values], 2)
advantages = compute_advantages(policy_logits, action_values)
_assert_rank_and_shape_compatibility([advantages], 1)
total_adv = torch.mean(advantages, dim=0)
total_loss = total_adv
if self._entropy_cost:
policy_entropy = torch.mean(compute_entropy(policy_logits))
entropy_loss = torch.mul(float(self._entropy_cost), policy_entropy)
total_loss = torch.add(total_loss, entropy_loss)
return total_loss
class BatchNeuRDLoss(object):
"""Defines the batch NeuRD loss op."""
def __init__(self, entropy_cost=None, name="batch_neurd_loss"):
self._entropy_cost = entropy_cost
self._name = name
def loss(self, policy_logits, action_values):
"""Constructs a PyTorch Crierion that computes the NeuRD loss for batches.
Args:
policy_logits: `B x A` tensor corresponding to policy logits.
action_values: `B x A` tensor corresponding to Q-values.
Returns:
loss: A 0-D `float` tensor corresponding the loss.
"""
_assert_rank_and_shape_compatibility([policy_logits, action_values], 2)
advantages = compute_advantages(
policy_logits, action_values, threshold_fn=thresholded)
_assert_rank_and_shape_compatibility([advantages], 1)
total_adv = torch.mean(advantages, axis=0)
total_loss = total_adv
if self._entropy_cost:
policy_entropy = torch.mean(compute_entropy(policy_logits))
entropy_loss = torch.mul(float(self._entropy_cost), policy_entropy)
total_loss = torch.add(total_loss, entropy_loss)
return total_loss
class BatchRMLoss(object):
"""Defines the batch RM loss op."""
def __init__(self, entropy_cost=None, name="batch_rm_loss"):
self._entropy_cost = entropy_cost
self._name = name
def loss(self, policy_logits, action_values):
"""Constructs a PyTorch Crierion that computes the RM loss for batches.
Args:
policy_logits: `B x A` tensor corresponding to policy logits.
action_values: `B x A` tensor corresponding to Q-values.
Returns:
loss: A 0-D `float` tensor corresponding the loss.
"""
_assert_rank_and_shape_compatibility([policy_logits, action_values], 2)
advantages = compute_advantages(policy_logits, action_values, use_relu=True)
_assert_rank_and_shape_compatibility([advantages], 1)
total_adv = torch.mean(advantages, dim=0)
total_loss = total_adv
if self._entropy_cost:
policy_entropy = torch.mean(compute_entropy(policy_logits))
entropy_loss = torch.mul(float(self._entropy_cost), policy_entropy)
total_loss = torch.add(total_loss, entropy_loss)
return total_loss
class BatchRPGLoss(object):
"""Defines the batch RPG loss op."""
def __init__(self, entropy_cost=None, name="batch_rpg_loss"):
self._entropy_cost = entropy_cost
self._name = name
def loss(self, policy_logits, action_values):
"""Constructs a PyTorch Crierion that computes the RPG loss for batches.
Args:
policy_logits: `B x A` tensor corresponding to policy logits.
action_values: `B x A` tensor corresponding to Q-values.
Returns:
loss: A 0-D `float` tensor corresponding the loss.
"""
_assert_rank_and_shape_compatibility([policy_logits, action_values], 2)
regrets = compute_regrets(policy_logits, action_values)
_assert_rank_and_shape_compatibility([regrets], 1)
total_regret = torch.mean(regrets, dim=0)
total_loss = total_regret
if self._entropy_cost:
policy_entropy = torch.mean(compute_entropy(policy_logits))
entropy_loss = torch.mul(float(self._entropy_cost), policy_entropy)
total_loss = torch.add(total_loss, entropy_loss)
return total_loss
class BatchA2CLoss(object):
"""Defines the batch A2C loss op."""
def __init__(self, entropy_cost=None, name="batch_a2c_loss"):
self._entropy_cost = entropy_cost
self._name = name
def loss(self, policy_logits, baseline, actions, returns):
"""Constructs a PyTorch Crierion that computes the A2C loss for batches.
Args:
policy_logits: `B x A` tensor corresponding to policy logits.
baseline: `B` tensor corresponding to baseline (V-values).
actions: `B` tensor corresponding to actions taken.
returns: `B` tensor corresponds to returns accumulated.
Returns:
loss: A 0-D `float` tensor corresponding the loss.
"""
_assert_rank_and_shape_compatibility([policy_logits], 2)
_assert_rank_and_shape_compatibility([baseline, actions, returns], 1)
advantages = returns - baseline
policy_loss = compute_a2c_loss(policy_logits, actions, advantages)
total_loss = torch.mean(policy_loss, dim=0)
if self._entropy_cost:
policy_entropy = torch.mean(compute_entropy(policy_logits))
entropy_loss = torch.mul(float(self._entropy_cost), policy_entropy)
total_loss = torch.add(total_loss, entropy_loss)
return total_loss
| open_spiel-master | open_spiel/python/pytorch/losses/rl_losses.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.pytorch.losses.rl_losses."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import torch
from open_spiel.python.pytorch.losses import rl_losses
SEED = 24984617
class RLLossesTest(parameterized.TestCase, absltest.TestCase):
@parameterized.named_parameters(('no_entropy_cost', 0.),
('with_entropy_cost', 1.))
def test_batch_qpg_loss_with_entropy_cost(self, entropy_cost):
batch_qpg_loss = rl_losses.BatchQPGLoss(entropy_cost=entropy_cost)
q_values = torch.FloatTensor([[0., -1., 1.], [1., -1., 0]])
policy_logits = torch.FloatTensor([[1., 1., 1.], [1., 1., 4.]])
total_loss = batch_qpg_loss.loss(policy_logits, q_values)
# Compute expected quantities.
expected_policy_entropy = (1.0986 + 0.3665) / 2
# baseline = \sum_a pi_a * Q_a = 0.
# -\sum_a pi_a * (Q_a - baseline)
expected_policy_loss = (0.0 + 0.0) / 2
expected_total_loss = (
expected_policy_loss + entropy_cost * expected_policy_entropy)
np.testing.assert_allclose(total_loss, expected_total_loss, atol=1e-4)
@parameterized.named_parameters(('no_entropy_cost', 0.),
('with_entropy_cost', 1.))
def test_batch_rm_loss_with_entropy_cost(self, entropy_cost):
batch_rpg_loss = rl_losses.BatchRMLoss(entropy_cost=entropy_cost)
q_values = torch.FloatTensor([[0., -1., 1.], [1., -1., 0]])
policy_logits = torch.FloatTensor([[1., 1., 1.], [1., 1., 4.]])
total_loss = batch_rpg_loss.loss(policy_logits, q_values)
# Compute expected quantities.
expected_policy_entropy = (1.0986 + 0.3665) / 2
# baseline = \sum_a pi_a * Q_a = 0.
# -\sum_a pi_a * relu(Q_a - baseline)
# negative sign as it's a loss term and loss needs to be minimized.
expected_policy_loss = -(.3333 + .0452) / 2
expected_total_loss = (
expected_policy_loss + entropy_cost * expected_policy_entropy)
np.testing.assert_allclose(total_loss, expected_total_loss, atol=1e-4)
@parameterized.named_parameters(('no_entropy_cost', 0.),
('with_entropy_cost', 1.))
def test_batch_rpg_loss_with_entropy_cost(self, entropy_cost):
batch_rpg_loss = rl_losses.BatchRPGLoss(entropy_cost=entropy_cost)
q_values = torch.FloatTensor([[0., -1., 1.], [1., -1., 0]])
policy_logits = torch.FloatTensor([[1., 1., 1.], [1., 1., 4.]])
total_loss = batch_rpg_loss.loss(policy_logits, q_values)
# Compute expected quantities.
expected_policy_entropy = (1.0986 + 0.3665) / 2
# baseline = \sum_a pi_a * Q_a = 0.
# \sum_a relu(Q_a - baseline)
expected_policy_loss = (1.0 + 1.0) / 2
expected_total_loss = (
expected_policy_loss + entropy_cost * expected_policy_entropy)
np.testing.assert_allclose(total_loss, expected_total_loss, atol=1e-4)
@parameterized.named_parameters(('no_entropy_cost', 0.),
('with_entropy_cost', 1.))
def test_batch_a2c_loss_with_entropy_cost(self, entropy_cost):
batch_a2c_loss = rl_losses.BatchA2CLoss(entropy_cost=entropy_cost)
policy_logits = torch.FloatTensor([[1., 1., 1.], [1., 1., 4.]])
baseline = torch.FloatTensor([1. / 3, 0.5])
actions = torch.LongTensor([1, 2])
returns = torch.FloatTensor([0., 1.])
total_loss = batch_a2c_loss.loss(policy_logits, baseline, actions, returns)
# Compute expected quantities.
# advantages = returns - baseline = [-1./3, 0.5]
# cross_entropy = [-log(e^1./3 * e^1), -log(e^4/(e^4+ e + e))]
# = [1.0986, 0.09492]
# policy_loss = cross_entropy * advantages = [-0.3662, 0.04746]
expected_policy_entropy = (1.0986 + 0.3665) / 2
expected_policy_loss = (-0.3662 + 0.04746) / 2
expected_total_loss = (
expected_policy_loss + entropy_cost * expected_policy_entropy)
np.testing.assert_allclose(total_loss, expected_total_loss, atol=1e-4)
if __name__ == '__main__':
torch.manual_seed(SEED)
absltest.main()
| open_spiel-master | open_spiel/python/pytorch/losses/rl_losses_pytorch_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.jax.dqn."""
from absl.testing import absltest
from open_spiel.python import rl_environment
from open_spiel.python.jax import dqn
import pyspiel
# A simple two-action game encoded as an EFG game. Going left gets -1, going
# right gets a +1.
SIMPLE_EFG_DATA = """
EFG 2 R "Simple single-agent problem" { "Player 1" } ""
p "ROOT" 1 1 "ROOT" { "L" "R" } 0
t "L" 1 "Outcome L" { -1.0 }
t "R" 2 "Outcome R" { 1.0 }
"""
class DQNTest(absltest.TestCase):
def test_simple_game(self):
game = pyspiel.load_efg_game(SIMPLE_EFG_DATA)
env = rl_environment.Environment(game=game)
agent = dqn.DQN(0,
state_representation_size=
game.information_state_tensor_shape()[0],
num_actions=game.num_distinct_actions(),
hidden_layers_sizes=[16],
replay_buffer_capacity=100,
batch_size=5,
epsilon_start=0.02,
epsilon_end=0.01,
gradient_clipping=1.0)
total_reward = 0
for _ in range(100):
time_step = env.reset()
while not time_step.last():
agent_output = agent.step(time_step)
time_step = env.step([agent_output.action])
total_reward += time_step.rewards[0]
agent.step(time_step)
self.assertGreaterEqual(total_reward, -100)
def test_run_tic_tac_toe(self):
env = rl_environment.Environment("tic_tac_toe")
state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
agents = [
dqn.DQN( # pylint: disable=g-complex-comprehension
player_id,
state_representation_size=state_size,
num_actions=num_actions,
hidden_layers_sizes=[16],
replay_buffer_capacity=10,
batch_size=5) for player_id in [0, 1]
]
time_step = env.reset()
while not time_step.last():
current_player = time_step.observations["current_player"]
current_agent = agents[current_player]
agent_output = current_agent.step(time_step)
time_step = env.step([agent_output.action])
for agent in agents:
agent.step(time_step)
def test_run_hanabi(self):
# Hanabi is an optional game, so check we have it before running the test.
game = "hanabi"
if game not in pyspiel.registered_names():
return
num_players = 3
env_configs = {
"players": num_players,
"max_life_tokens": 1,
"colors": 2,
"ranks": 3,
"hand_size": 2,
"max_information_tokens": 3,
"discount": 0.
}
env = rl_environment.Environment(game, **env_configs)
state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
agents = [
dqn.DQN( # pylint: disable=g-complex-comprehension
player_id,
state_representation_size=state_size,
num_actions=num_actions,
hidden_layers_sizes=[16],
replay_buffer_capacity=10,
batch_size=5) for player_id in range(num_players)
]
time_step = env.reset()
while not time_step.last():
current_player = time_step.observations["current_player"]
agent_output = [agent.step(time_step) for agent in agents]
time_step = env.step([agent_output[current_player].action])
for agent in agents:
agent.step(time_step)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/jax/dqn_jax_test.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JAX implementation of LOLA and LOLA-DiCE (Foerster et al. 2018).
The DiCE implementation is also based on the pytorch implementation from
https://github.com/alexis-jacq/LOLA_DiCE by Alexis David Jacq.
Both algorithm implementations, LOLA and LOLA-DiCE, currently support only
two-player simultaneous move games and assume access to the opponent's
actions (the observation field in the time step must contain a key
'actions' with the opponent's actions).
"""
# pylint: disable=g-importing-member
# pylint: disable=g-bare-generic
from copy import deepcopy
from functools import partial
import typing
import chex
import distrax
import haiku as hk
import jax
from jax import grad
from jax import vmap
import jax.numpy as jnp
import numpy as np
import optax
import rlax
from open_spiel.python import rl_agent
from open_spiel.python import rl_environment
from open_spiel.python.rl_environment import TimeStep
@chex.dataclass
class TransitionBatch: # pylint: disable=too-few-public-methods
"""A transition batch is a collection of transitions.
Each item in the batch is a numpy array.
"""
info_state: np.ndarray
action: np.ndarray
reward: np.ndarray
discount: np.ndarray = None
terminal: np.ndarray = None
legal_actions_mask: np.ndarray = None
values: np.ndarray = None
@chex.dataclass
class TrainState: # pylint: disable=too-few-public-methods
"""TrainState class.
The training state contains the parameters and optimizer states of the
policy and critic networks for each agent. The parameters are stored in a
dictionary with the agent id as key.
"""
policy_params: typing.Dict[typing.Any, hk.Params]
policy_opt_states: typing.Dict[typing.Any, optax.OptState]
critic_params: typing.Dict[typing.Any, hk.Params]
critic_opt_states: typing.Dict[typing.Any, optax.OptState]
# A function that takes the current train state and a transition batch and
# returns the new train state and a dictionary of metrics.
UpdateFn = typing.Callable[
[TrainState, TransitionBatch], typing.Tuple[TrainState, typing.Dict]
]
def get_minibatches(
batch: TransitionBatch, num_minibatches: int
) -> typing.Iterator[TransitionBatch]:
"""Yields an iterator over minibatches of the given batch.
Args:
batch: A transition batch.
num_minibatches: The number of minibatches to return.
Yields:
An iterator over minibatches of the given batch.
"""
def get_minibatch(x, start, end):
return x[:, start:end] if len(x.shape) > 2 else x
for i in range(num_minibatches):
start, end = i * (batch.reward.shape[1] // num_minibatches), (i + 1) * (
batch.reward.shape[1] // num_minibatches
)
mini_batch = jax.tree_util.tree_map(
partial(get_minibatch, start=start, end=end), batch
)
yield mini_batch
def get_critic_update_fn(
agent_id: int,
critic_network: hk.Transformed,
optimizer: optax.TransformUpdateFn,
num_minibatches: int = 8,
gamma: float = 0.99,
) -> UpdateFn:
"""Returns the update function for the critic parameters.
Args:
agent_id: The id of the agent that will be updated.
critic_network: A transformed haiku function.
optimizer: Optimizer update function.
num_minibatches: the number of minibatches.
gamma: the discount factor.
Returns:
An update function that takes the current train state together with a
transition batch and returns the new train state and a dictionary of
metrics.
"""
def loss_fn(params, batch: TransitionBatch):
info_states, rewards = batch.info_state[agent_id], batch.reward[agent_id]
discounts = jnp.ones_like(rewards) * gamma
values = critic_network.apply(params, info_states).squeeze()
v_t = values[:, :-1].reshape(-1)
v_tp1 = values[:, 1:].reshape(-1)
r_t = rewards[:, :-1].reshape(-1)
d_t = discounts[:, 1:].reshape(-1)
td_error = jax.lax.stop_gradient(r_t + d_t * v_tp1) - v_t
return jnp.mean(td_error**2)
def update(train_state: TrainState, batch: TransitionBatch):
"""The critic update function.
Updates the critic parameters of the train state with the given
transition batch.
Args:
train_state: The current train state.
batch: A transition batch.
Returns:
The updated train state with the new critic params and a dictionary
with the critic loss
"""
losses = []
critic_params = train_state.critic_params[agent_id]
opt_state = train_state.critic_opt_states[agent_id]
for mini_batch in get_minibatches(batch, num_minibatches):
loss, grads = jax.value_and_grad(loss_fn)(critic_params, mini_batch)
updates, opt_state = optimizer(grads, opt_state)
critic_params = optax.apply_updates(critic_params, updates)
losses.append(loss)
train_state = deepcopy(train_state)
state = TrainState(
policy_params=train_state.policy_params,
policy_opt_states=train_state.policy_opt_states,
critic_params={**train_state.critic_params, agent_id: critic_params},
critic_opt_states={
**train_state.critic_opt_states,
agent_id: opt_state,
},
)
return state, {'loss': jnp.mean(jnp.array(losses))}
return update
def get_dice_update_fn(
agent_id: int,
rng: hk.PRNGSequence,
policy_network: hk.Transformed,
critic_network: hk.Transformed,
optimizer: optax.TransformUpdateFn,
opp_pi_lr: float,
env: rl_environment.Environment,
n_lookaheads: int = 1,
gamma: float = 0.99,
):
"""Get the DiCE update function."""
def magic_box(x):
return jnp.exp(x - jax.lax.stop_gradient(x))
@jax.jit
@partial(jax.vmap, in_axes=(None, 0, 0))
def get_action(params, s, rng_key):
pi = policy_network.apply(params, s)
action = pi.sample(seed=rng_key)
return action
def rollout(params, other_params):
states, rewards, actions = [], [], []
step = env.reset()
batch_size = (
step.observations['batch_size']
if 'batch_size' in step.observations
else 1
)
while not step.last():
obs = step.observations
s_1, s_2 = jnp.array(obs['info_state'][0]), jnp.array(
obs['info_state'][1]
)
if batch_size == 1:
s_1, s_2 = s_1[None, :], s_2[None, :]
a_1 = get_action(params, s_1, jax.random.split(next(rng), num=batch_size))
a_2 = get_action(
other_params, s_2, jax.random.split(next(rng), num=batch_size)
)
a = jnp.stack([a_1, a_2], axis=1)
step = env.step(a.squeeze())
r_1, r_2 = jnp.array(step.rewards[0]), jnp.array(step.rewards[1])
if batch_size == 1:
r_1, r_2 = r_1[None], r_2[None]
actions.append(a.T)
states.append(jnp.stack([s_1, s_2], axis=0))
rewards.append(jnp.stack([r_1, r_2], axis=0))
return {
'states': jnp.stack(states, axis=2),
'rewards': jnp.stack(rewards, axis=2),
'actions': jnp.stack(actions, axis=2),
}
def dice_correction(train_state: TrainState):
"""Computes the dice update for the given train state.
Args:
train_state: The current train state.
Returns:
The updated train state with the new policy params and metrics dict.
"""
@jax.jit
def dice_objective(params, other_params, states, actions, rewards, values):
self_logprobs = vmap(
vmap(lambda s, a: policy_network.apply(params, s).log_prob(a))
)(states[0], actions[0])
other_logprobs = vmap(
vmap(lambda s, a: policy_network.apply(other_params, s).log_prob(a))
)(states[1], actions[1])
# apply discount:
cum_discount = jnp.cumprod(gamma * jnp.ones_like(rewards), axis=1) / gamma
discounted_rewards = rewards * cum_discount
discounted_values = values.squeeze() * cum_discount
# stochastics nodes involved in rewards dependencies:
dependencies = jnp.cumsum(self_logprobs + other_logprobs, axis=1)
# logprob of each stochastic nodes:
stochastic_nodes = self_logprobs + other_logprobs
# dice objective:
dice_objective = jnp.mean(
jnp.sum(magic_box(dependencies) * discounted_rewards, axis=1)
)
baseline_term = jnp.mean(
jnp.sum((1 - magic_box(stochastic_nodes)) * discounted_values, axis=1)
)
dice_objective = dice_objective + baseline_term
return -dice_objective # want to minimize -objective
def outer_update(params, opp_params, agent_id, opp_id):
other_theta = opp_params
for _ in range(n_lookaheads):
trajectories = rollout(other_theta, params)
other_grad = jax.grad(dice_objective)(
other_theta,
other_params=params,
states=trajectories['states'],
actions=trajectories['actions'],
rewards=trajectories['rewards'][0],
values=critic_network.apply(
train_state.critic_params[opp_id], trajectories['states'][0]
),
)
# Update the other player's policy:
other_theta = jax.tree_util.tree_map(
lambda param, grad: param - opp_pi_lr * grad,
other_theta,
other_grad,
)
trajectories = rollout(params, other_theta)
values = critic_network.apply(
train_state.critic_params[agent_id], trajectories['states'][0]
)
loss = dice_objective(
params=params,
other_params=other_theta,
states=trajectories['states'],
actions=trajectories['actions'],
rewards=trajectories['rewards'][0],
values=values,
)
return loss, {'loss': loss}
opp = 1 - agent_id
grads, metrics = grad(outer_update, has_aux=True)(
train_state.policy_params[agent_id],
opp_params=train_state.policy_params[opp],
agent_id=agent_id,
opp_id=opp,
)
return grads, metrics
def update(
train_state: TrainState, batch: TransitionBatch
) -> typing.Tuple[TrainState, typing.Dict]:
"""Updates the policy parameters in train_state.
If lola_weight > 0, the correction term according to Foerster et al. will be
applied.
Args:
train_state: the agent's train state.
batch: a transition batch
Returns:
A tuple (new_train_state, metrics)
"""
del batch
grads, metrics = dice_correction(train_state)
updates, opt_state = optimizer(
grads, train_state.policy_opt_states[agent_id]
)
policy_params = optax.apply_updates(
train_state.policy_params[agent_id], updates
)
train_state = TrainState(
policy_params={**train_state.policy_params, agent_id: policy_params},
policy_opt_states={
**train_state.policy_opt_states,
agent_id: opt_state,
},
critic_params=deepcopy(train_state.critic_params),
critic_opt_states=deepcopy(train_state.critic_opt_states),
)
return train_state, metrics
return update
def get_lola_update_fn(
agent_id: int,
policy_network: hk.Transformed,
optimizer: optax.TransformUpdateFn,
pi_lr: float,
gamma: float = 0.99,
lola_weight: float = 1.0,
) -> UpdateFn:
"""Get the LOLA update function.
Returns a function that updates the policy parameters using the LOLA
correction formula.
Args:
agent_id: the agent's id
policy_network: A haiku transformed policy network.
optimizer: An optax optimizer.
pi_lr: Policy learning rate.
gamma: Discount factor.
lola_weight: The LOLA correction weight to scale the correction term.
Returns:
A UpdateFn function that updates the policy parameters.
"""
def flat_params(
params,
) -> typing.Tuple[
typing.Dict[str, jnp.ndarray], typing.Dict[typing.Any, typing.Callable]
]:
"""Flattens the policy parameters.
Flattens the parameters of the policy network into a single vector and
returns the unravel function.
Args:
params: The policy parameters.
Returns:
A tuple (flat_params, unravel_fn)
"""
flat_param_dict = {
agent_id: jax.flatten_util.ravel_pytree(p)
for agent_id, p in params.items()
}
params = dict((k, flat_param_dict[k][0]) for k in flat_param_dict)
unravel_fns = dict((k, flat_param_dict[k][1]) for k in flat_param_dict)
return params, unravel_fns
def lola_correction(
train_state: TrainState, batch: TransitionBatch
) -> hk.Params:
"""Computes the LOLA correction term.
Args:
train_state: The agent's current train state.
batch: A transition batch.
Returns:
The LOLA correction term.
"""
a_t, o_t, r_t, values = (
batch.action,
batch.info_state,
batch.reward,
batch.values,
)
params, unravel_fns = flat_params(train_state.policy_params)
compute_returns = partial(rlax.lambda_returns, lambda_=0.0)
g_t = vmap(vmap(compute_returns))(
r_t=r_t, v_t=values, discount_t=jnp.full_like(r_t, gamma)
)
g_t = (g_t - g_t.mean()) / (g_t.std() + 1e-8)
def log_pi(params, i, a_t, o_t):
return policy_network.apply(unravel_fns[i](params), o_t).log_prob(a_t)
opp_id = 1 - agent_id
def cross_term(a_t, o_t, r_t):
"""Computes the second order correction term of the LOLA update.
Args:
a_t: actions of both players
o_t: observations of both players
r_t: rewards of both players
Returns:
The second order correction term.
"""
grad_log_pi = vmap(jax.value_and_grad(log_pi), in_axes=(None, None, 0, 0))
log_probs, grads = grad_log_pi(
params[agent_id], agent_id, a_t[agent_id], o_t[agent_id]
)
opp_logrpobs, opp_grads = grad_log_pi(
params[opp_id], opp_id, a_t[opp_id], o_t[opp_id]
)
grads = grads.cumsum(axis=0)
opp_grads = opp_grads.cumsum(axis=0)
log_probs = log_probs.cumsum(axis=0)
opp_logrpobs = opp_logrpobs.cumsum(axis=0)
cross_term = 0.0
for t in range(0, len(a_t[agent_id])):
discounted_reward = r_t[opp_id, t] * jnp.power(gamma, t)
cross_term += (
discounted_reward
* jnp.outer(grads[t], opp_grads[t])
* jnp.exp(log_probs[t] + opp_logrpobs[t])
)
return cross_term # * jnp.exp(log_probs.sum() + opp_logrpobs.sum())
def policy_gradient(a_t, o_t, g_t):
grad_log_pi = vmap(grad(log_pi), in_axes=(None, None, 0, 0))
opp_grads = grad_log_pi(params[opp_id], opp_id, a_t[opp_id], o_t[opp_id])
pg = g_t[agent_id] @ opp_grads
return pg
cross = vmap(cross_term, in_axes=(1, 1, 1))(a_t, o_t, r_t).mean(axis=0)
pg = vmap(policy_gradient, in_axes=(1, 1, 1))(a_t, o_t, g_t).mean(axis=0)
correction = -pi_lr * (pg @ cross)
return unravel_fns[agent_id](correction)
def policy_loss(params, agent_id, batch):
"""Computes the policy gradient loss.
Args:
params: The policy parameters.
agent_id: The agent's id.
batch: A transition batch.
Returns:
The policy gradient loss.
"""
a_t, o_t, r_t, values = (
batch.action[agent_id],
batch.info_state[agent_id],
batch.reward[agent_id],
batch.values[agent_id],
)
logits_t = vmap(vmap(lambda s: policy_network.apply(params, s).logits))(o_t)
discount = jnp.full(r_t.shape, gamma)
returns = vmap(rlax.lambda_returns)(
r_t=r_t,
v_t=values,
discount_t=discount,
lambda_=jnp.ones_like(discount),
)
adv_t = returns - values
loss = vmap(rlax.policy_gradient_loss)(
logits_t=logits_t, a_t=a_t, adv_t=adv_t, w_t=jnp.ones_like(adv_t)
)
return loss.mean()
def update(
train_state: TrainState, batch: TransitionBatch
) -> typing.Tuple[TrainState, typing.Dict]:
"""Updates the policy parameters in train_state.
If lola_weight > 0, the correction term by Foerster et al. will be applied.
Args:
train_state: the agent's train state.
batch: a transition batch
Returns:
A tuple (new_train_state, metrics)
"""
loss, policy_grads = jax.value_and_grad(policy_loss)(
train_state.policy_params[agent_id], agent_id, batch
)
correction = lola_correction(train_state, batch)
policy_grads = jax.tree_util.tree_map(
lambda grad, corr: grad - lola_weight * corr, policy_grads, correction
)
updates, opt_state = optimizer(
policy_grads, train_state.policy_opt_states[agent_id]
)
policy_params = optax.apply_updates(
train_state.policy_params[agent_id], updates
)
train_state = TrainState(
policy_params={**train_state.policy_params, agent_id: policy_params},
policy_opt_states={
**train_state.policy_opt_states,
agent_id: opt_state,
},
critic_params=deepcopy(train_state.critic_params),
critic_opt_states=deepcopy(train_state.critic_opt_states),
)
return train_state, {'loss': loss}
return update
def get_opponent_update_fn(
agent_id: int,
policy_network: hk.Transformed,
optimizer: optax.TransformUpdateFn,
num_minibatches: int = 1,
) -> UpdateFn:
"""Get the opponent update function."""
def loss_fn(params, batch: TransitionBatch):
def loss(p, states, actions):
log_prob = policy_network.apply(p, states).log_prob(actions)
return log_prob
log_probs = vmap(vmap(loss, in_axes=(None, 0, 0)), in_axes=(None, 0, 0))(
params, batch.info_state[agent_id], batch.action[agent_id]
)
return -log_probs.sum(axis=-1).mean()
def update(
train_state: TrainState, batch: TransitionBatch
) -> typing.Tuple[TrainState, typing.Dict]:
policy_params = train_state.policy_params[agent_id]
opt_state = train_state.policy_opt_states[agent_id]
loss = 0
for mini_batch in get_minibatches(batch, num_minibatches):
loss, policy_grads = jax.value_and_grad(loss_fn)(
policy_params, mini_batch
)
updates, opt_state = optimizer(policy_grads, opt_state)
policy_params = optax.apply_updates(
train_state.policy_params[agent_id], updates
)
train_state = TrainState(
policy_params={**train_state.policy_params, agent_id: policy_params},
policy_opt_states={
**train_state.policy_opt_states,
agent_id: opt_state,
},
critic_params=deepcopy(train_state.critic_params),
critic_opt_states=deepcopy(train_state.critic_opt_states),
)
return train_state, {'loss': loss}
return update
class OpponentShapingAgent(rl_agent.AbstractAgent):
"""Opponent Shaping Agent.
This agent uses either LOLA or LOLA-DiCE to influence the parameter updates
of the opponent policies.
"""
def __init__(
self,
player_id: int,
opponent_ids: typing.List[int],
info_state_size: chex.Shape,
num_actions: int,
policy: hk.Transformed,
critic: hk.Transformed,
batch_size: int = 16,
critic_learning_rate: typing.Union[float, optax.Schedule] = 0.01,
pi_learning_rate: typing.Union[float, optax.Schedule] = 0.001,
opp_policy_learning_rate: typing.Union[float, optax.Schedule] = 0.001,
opponent_model_learning_rate: typing.Union[float, optax.Schedule] = 0.001,
clip_grad_norm: float = 0.5,
policy_update_interval: int = 8,
discount: float = 0.99,
critic_discount: float = 0.99,
seed: jax.random.PRNGKey = 42,
fit_opponent_model=True,
correction_type: str = 'dice',
use_jit: bool = False,
n_lookaheads: int = 1,
num_critic_mini_batches: int = 1,
num_opponent_updates: int = 1,
env: typing.Optional[rl_environment.Environment] = None,
):
self.player_id = player_id
self._num_actions = num_actions
self._batch_size = batch_size
self._policy_update_interval = policy_update_interval
self._discount = discount
self._num_opponent_updates = num_opponent_updates
self._num_mini_batches = num_critic_mini_batches
self._prev_time_step = None
self._prev_action = None
self._data = []
self._metrics = []
self._fit_opponent_model = fit_opponent_model
self._opponent_ids = opponent_ids
self._rng = hk.PRNGSequence(seed)
# Step counters
self._step_counter = 0
self._episode_counter = 0
self._num_learn_steps = 0
self._pi_network = policy
self._critic_network = critic
self._critic_opt = optax.sgd(learning_rate=critic_learning_rate)
self._opponent_opt = optax.adam(opponent_model_learning_rate)
self._policy_opt = optax.chain(
optax.clip_by_global_norm(clip_grad_norm)
if clip_grad_norm
else optax.identity(),
optax.sgd(learning_rate=pi_learning_rate),
)
self._train_state = self._init_train_state(info_state_size=info_state_size)
self._current_policy = self.get_policy(return_probs=True)
if correction_type == 'dice':
policy_update_fn = get_dice_update_fn(
agent_id=player_id,
rng=self._rng,
policy_network=policy,
critic_network=critic,
optimizer=self._policy_opt.update,
opp_pi_lr=opp_policy_learning_rate,
gamma=discount,
n_lookaheads=n_lookaheads,
env=env,
)
# pylint: disable=consider-using-in
elif correction_type == 'lola' or correction_type == 'none':
# if correction_type is none, use policy gradient without corrections
lola_weight = 1.0 if correction_type == 'lola' else 0.0
update_fn = get_lola_update_fn(
agent_id=player_id,
policy_network=policy,
pi_lr=pi_learning_rate,
optimizer=self._policy_opt.update,
lola_weight=lola_weight,
)
policy_update_fn = jax.jit(update_fn) if use_jit else update_fn
else:
raise ValueError(f'Unknown correction type: {correction_type}')
critic_update_fn = get_critic_update_fn(
agent_id=player_id,
critic_network=critic,
optimizer=self._critic_opt.update,
num_minibatches=num_critic_mini_batches,
gamma=critic_discount,
)
self._policy_update_fns = {player_id: policy_update_fn}
self._critic_update_fns = {
player_id: jax.jit(critic_update_fn) if use_jit else critic_update_fn
}
for opponent in opponent_ids:
opp_update_fn = get_opponent_update_fn(
agent_id=opponent,
policy_network=policy,
optimizer=self._opponent_opt.update,
num_minibatches=num_opponent_updates,
)
opp_critic_update_fn = get_critic_update_fn(
agent_id=opponent,
critic_network=critic,
optimizer=self._critic_opt.update,
num_minibatches=num_critic_mini_batches,
gamma=critic_discount,
)
self._policy_update_fns[opponent] = (
jax.jit(opp_update_fn) if use_jit else opp_update_fn
)
self._critic_update_fns[opponent] = (
jax.jit(opp_critic_update_fn) if use_jit else opp_critic_update_fn
)
@property
def train_state(self):
return deepcopy(self._train_state)
@property
def policy_network(self):
return self._pi_network
@property
def critic_network(self):
return self._critic_network
def metrics(self, return_last_only: bool = True):
if not self._metrics:
return {}
metrics = self._metrics[-1] if return_last_only else self._metrics
return metrics
def update_params(self, state: TrainState, player_id: int) -> None:
"""Updates the parameters of the other agents.
Args:
state: the train state of the other agent.
player_id: id of the other agent
Returns:
"""
self._train_state.policy_params[player_id] = deepcopy(
state.policy_params[player_id]
)
self._train_state.critic_params[player_id] = deepcopy(
state.critic_params[player_id]
)
def get_value_fn(self) -> typing.Callable:
def value_fn(obs: jnp.ndarray):
obs = jnp.array(obs)
return self._critic_network.apply(
self.train_state.critic_params[self.player_id], obs
).squeeze(-1)
return jax.jit(value_fn)
def get_policy(self, return_probs=True) -> typing.Callable:
"""Get the policy.
Returns a function that takes a random key, an observation and
optionally an action mask. The function produces actions which are
sampled from the current policy. Additionally, if eturn_probs is true,
it also returns the action probabilities.
Args:
return_probs: if true, the policy returns a tuple (action,
action_probs).
Returns:
A function that maps observations to actions
"""
def _policy(key: jax.random.PRNGKey, obs: jnp.ndarray, action_mask=None):
"""The actual policy function.
Takes a random key, the current observation and optionally an action
mask.
Args:
key: a random key for sampling
obs: numpy array of observations
action_mask: optional numpy array to mask out illegal actions
Returns:
Either the sampled actions or, if return_probs is true, a tuple
(actions, action_probs).
"""
params = self._train_state.policy_params[self.player_id]
pi = self._pi_network.apply(params, obs)
if action_mask is not None:
probs = pi.probs * action_mask
probs = probs / probs.sum()
pi = distrax.Categorical(probs=probs)
actions = pi.sample(seed=key)
if return_probs:
return actions, pi.prob(actions)
else:
return actions
return jax.jit(_policy)
def step(self, time_step: TimeStep, is_evaluation=False):
"""Produces an action and possibly triggers a parameter update.
LOLA agents depend on having access to previous actions made by the
opponent. Assumes that the field 'observations' of time_step contains a
field 'actions' and its first axis is indexed by the player id. Similar, the
fields 'rewards' and 'legal_actions' are assumed to be of shape
(num_players,).
Args:
time_step: a TimeStep instance which has a field 'actions' in the
observations dict.
is_evaluation: if true, the agent will not update.
Returns:
A tuple containing the action that was taken and its probability
under the current policy.
"""
do_step = (
time_step.is_simultaneous_move()
or self.player_id == time_step.current_player()
)
action, probs = None, []
batch_policy = vmap(self._current_policy, in_axes=(0, 0, None))
if not time_step.last() and do_step:
info_state = time_step.observations['info_state'][self.player_id]
legal_actions = time_step.observations['legal_actions'][self.player_id]
action_mask = np.zeros(self._num_actions)
action_mask[legal_actions] = 1
# If we are not in a batched environment, we need to add a batch dimension
if 'batch_size' not in time_step.observations:
info_state = jnp.array(info_state)[None]
batch_size = 1
else:
batch_size = time_step.observations['batch_size']
sample_keys = jax.random.split(next(self._rng), batch_size)
action, probs = batch_policy(sample_keys, info_state, action_mask)
if not is_evaluation:
self._store_time_step(time_step=time_step, action=action)
if time_step.last() and self._should_update():
self._train_step()
return rl_agent.StepOutput(action=action, probs=probs)
def _init_train_state(self, info_state_size: chex.Shape):
init_inputs = jnp.ones(info_state_size)
agent_ids = self._opponent_ids + [self.player_id]
policy_params, policy_opt_states = {}, {}
critic_params, critic_opt_states = {}, {}
for agent_id in agent_ids:
policy_params[agent_id] = self._pi_network.init(
next(self._rng), init_inputs
)
if agent_id == self.player_id:
policy_opt_state = self._policy_opt.init(policy_params[agent_id])
else:
policy_opt_state = self._opponent_opt.init(policy_params[agent_id])
policy_opt_states[agent_id] = policy_opt_state
critic_params[agent_id] = self._critic_network.init(
next(self._rng), init_inputs
)
critic_opt_states[agent_id] = self._critic_opt.init(
critic_params[agent_id]
)
return TrainState(
policy_params=policy_params,
critic_params=critic_params,
policy_opt_states=policy_opt_states,
critic_opt_states=critic_opt_states,
)
def _store_time_step(self, time_step: TimeStep, action: np.ndarray):
"""Store the time step.
Converts the timestep and the action into a transition and steps the
counters.
Args:
time_step: the current time step.
action: the action that was taken before observing time_step
Returns: None
"""
self._step_counter += (
time_step.observations['batch_size']
if 'batch_size' in time_step.observations
else 1
)
if self._prev_time_step:
transition = self._make_transition(time_step)
self._data.append(transition)
if time_step.last():
self._prev_time_step = None
self._prev_action = None
self._episode_counter += 1
else:
obs = time_step.observations['info_state']
time_step.observations['values'] = jnp.stack(
[
self._critic_network.apply(
self.train_state.critic_params[id], jnp.array(obs[id])
).squeeze(-1)
for id in sorted(self.train_state.critic_params.keys())
]
)
self._prev_time_step = time_step
self._prev_action = action
def _train_step(self):
"""Updates the critic and the policy parameters.
After the update, the data buffer is cleared. Returns: None
"""
batch = self._construct_episode_batches(self._data)
update_metrics = self._update_agent(batch)
self._metrics.append(update_metrics)
self._data.clear()
def _should_update(self) -> bool:
"""Indicates whether to update or not.
Returns:
True, if the number of episodes in the buffer is equal to the batch
size. False otherwise.
"""
return (
self._step_counter >= self._batch_size * (self._num_learn_steps + 1)
and self._episode_counter > 0
)
def _update_agent(self, batch: TransitionBatch) -> typing.Dict:
"""Updates the critic and policy parameters of the agent.
Args:
batch: A batch of training episodes.
Dimensions (N=player, B=batch_size, T=timesteps, S=state_dim):
action: (N, B, T),
discount: (B, T),
info_state: (N, B, T, *S),
legal_actions_mask: (N, B, T),
reward: (N, B, T),
terminal: (B, T),
values: (N, B, T)
Returns:
A dictionary that contains relevant training metrics.
"""
metrics = {}
self._num_learn_steps += 1
# if we do opponent modelling, we update the opponents first
if self._fit_opponent_model:
opponent_update_metrics = self._update_opponents(batch)
metrics.update(
(f'opp_models/{k}', v) for k, v in opponent_update_metrics.items()
)
# then we update the critic
critic_update_metrics = self._update_critic(batch)
metrics.update((f'critic/{k}', v) for k, v in critic_update_metrics.items())
# and finally we update the policy
if self._num_learn_steps % self._policy_update_interval == 0:
policy_update_metrics = self._update_policy(batch)
metrics.update(
(f'policy/{k}', v) for k, v in policy_update_metrics.items()
)
return metrics
def _construct_episode_batches(
self, transitions: typing.List[TransitionBatch]
) -> TransitionBatch:
"""Constructs a list of transitions into a single transition batch instance.
The fields 'info_state', 'rewards', 'legal_action_mask' and 'actions' of the
produced transition batch have shape (num_agents, batch_size,
sequence_length, *shape). The fields 'discount' and 'terminal' have shape
(batch_size, sequence_length).
Args:
transitions: a list of single step transitions
Returns:
A transition batch instance with items of according shape.
"""
episode, batches = [], []
max_episode_length = 0
for transition in transitions:
episode.append(transition)
if transition.terminal.any():
max_episode_length = max(max_episode_length, len(episode))
# pylint: disable=no-value-for-parameter
batch = jax.tree_map(lambda *xs: jnp.stack(xs), *episode)
batch = batch.replace(
info_state=batch.info_state.transpose(1, 2, 0, 3),
action=batch.action.transpose(1, 2, 0),
legal_actions_mask=batch.legal_actions_mask.T,
reward=batch.reward.transpose(1, 2, 0),
values=batch.values.transpose(1, 2, 0),
discount=batch.discount.transpose(1, 2, 0),
terminal=batch.terminal.transpose(1, 2, 0),
)
batches.append(batch)
episode.clear()
return batches[0]
def _update_policy(self, batch: TransitionBatch):
self._train_state, metrics = self._policy_update_fns[self.player_id](
self._train_state, batch
)
self._current_policy = self.get_policy(return_probs=True)
return metrics
def _update_critic(self, batch: TransitionBatch):
self._train_state, metrics = self._critic_update_fns[self.player_id](
self._train_state, batch
)
return metrics
def _update_opponents(self, batch: TransitionBatch):
update_metrics = {}
for opponent in self._opponent_ids:
self._train_state, metrics = self._critic_update_fns[opponent](
self._train_state, batch
)
update_metrics.update(
{f'agent_{opponent}/critic/{k}': v for k, v in metrics.items()}
)
self._train_state, metrics = self._policy_update_fns[opponent](
self._train_state, batch
)
update_metrics.update(
{f'agent_{opponent}/policy/{k}': v for k, v in metrics.items()}
)
return update_metrics
def _make_transition(self, time_step: TimeStep):
assert self._prev_time_step is not None
legal_actions = self._prev_time_step.observations['legal_actions'][
self.player_id
]
legal_actions_mask = np.zeros((self._batch_size, self._num_actions))
legal_actions_mask[..., legal_actions] = 1
actions = np.array(time_step.observations['actions'])
rewards = np.array(time_step.rewards)
discounts = self._discount * (1 - time_step.last()) * np.ones_like(rewards)
terminal = time_step.last() * np.ones_like(rewards)
obs = np.array(self._prev_time_step.observations['info_state'])
transition = TransitionBatch(
info_state=obs,
action=actions,
reward=rewards,
discount=discounts,
terminal=terminal,
legal_actions_mask=legal_actions_mask,
values=self._prev_time_step.observations['values'],
)
if len(rewards.shape) < 2: # if not a batch, add a batch dimension
transition = jax.tree_map(lambda x: x[None], transition)
return transition
| open_spiel-master | open_spiel/python/jax/opponent_shaping.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements Deep CFR Algorithm.
See https://arxiv.org/abs/1811.00164.
The algorithm defines an `advantage` and `strategy` networks that compute
advantages used to do regret matching across information sets and to approximate
the strategy profiles of the game. To train these networks a reservoir buffer
(other data structures may be used) memory is used to accumulate samples to
train the networks.
This implementation uses skip connections as described in the paper if two
consecutive layers of the advantage or policy network have the same number
of units, except for the last connection. Before the last hidden layer
a layer normalization is applied.
"""
import collections
import random
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
# tensorflow is only used for data processing
import tensorflow as tf
import tensorflow_datasets as tfds
from open_spiel.python import policy
import pyspiel
# The size of the shuffle buffer used to reshuffle part of the data each
# epoch within one training iteration
ADVANTAGE_TRAIN_SHUFFLE_SIZE = 100000
STRATEGY_TRAIN_SHUFFLE_SIZE = 1000000
# TODO(author3) Refactor into data structures lib.
class ReservoirBuffer(object):
"""Allows uniform sampling over a stream of data.
This class supports the storage of arbitrary elements, such as observation
tensors, integer actions, etc.
See https://en.wikipedia.org/wiki/Reservoir_sampling for more details.
"""
def __init__(self, reservoir_buffer_capacity):
self._reservoir_buffer_capacity = reservoir_buffer_capacity
self._data = []
self._add_calls = 0
def add(self, element):
"""Potentially adds `element` to the reservoir buffer.
Args:
element: data to be added to the reservoir buffer.
"""
if len(self._data) < self._reservoir_buffer_capacity:
self._data.append(element)
else:
idx = np.random.randint(0, self._add_calls + 1)
if idx < self._reservoir_buffer_capacity:
self._data[idx] = element
self._add_calls += 1
def sample(self, num_samples):
"""Returns `num_samples` uniformly sampled from the buffer.
Args:
num_samples: `int`, number of samples to draw.
Returns:
An iterable over `num_samples` random elements of the buffer.
Raises:
ValueError: If there are less than `num_samples` elements in the buffer
"""
if len(self._data) < num_samples:
raise ValueError('{} elements could not be sampled from size {}'.format(
num_samples, len(self._data)))
return random.sample(self._data, num_samples)
def clear(self):
self._data = []
self._add_calls = 0
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
@property
def data(self):
return self._data
def shuffle_data(self):
random.shuffle(self._data)
class DeepCFRSolver(policy.Policy):
"""Implements a solver for the Deep CFR Algorithm.
See https://arxiv.org/abs/1811.00164.
Define all networks and sampling buffers/memories. Derive losses & learning
steps. Initialize the game state and algorithmic variables.
"""
def __init__(self,
game,
policy_network_layers=(256, 256),
advantage_network_layers=(128, 128),
num_iterations: int = 100,
num_traversals: int = 100,
learning_rate: float = 1e-3,
batch_size_advantage: int = 2048,
batch_size_strategy: int = 2048,
memory_capacity: int = int(1e6),
policy_network_train_steps: int = 5000,
advantage_network_train_steps: int = 750,
reinitialize_advantage_networks: bool = True):
"""Initialize the Deep CFR algorithm.
Args:
game: Open Spiel game.
policy_network_layers: (list[int]) Layer sizes of strategy net MLP.
advantage_network_layers: (list[int]) Layer sizes of advantage net MLP.
num_iterations: Number of iterations.
num_traversals: Number of traversals per iteration.
learning_rate: Learning rate.
batch_size_advantage: (int) Batch size to sample from advantage memories.
batch_size_strategy: (int) Batch size to sample from strategy memories.
memory_capacity: Number of samples that can be stored in memory.
policy_network_train_steps: Number of policy network training steps (one
policy training iteration at the end).
advantage_network_train_steps: Number of advantage network training steps
(per iteration).
reinitialize_advantage_networks: Whether to re-initialize the advantage
network before training on each iteration.
"""
all_players = list(range(game.num_players()))
super(DeepCFRSolver, self).__init__(game, all_players)
self._game = game
if game.get_type().dynamics == pyspiel.GameType.Dynamics.SIMULTANEOUS:
# `_traverse_game_tree` does not take into account this option.
raise ValueError('Simulatenous games are not supported.')
self._batch_size_advantage = batch_size_advantage
self._batch_size_strategy = batch_size_strategy
self._policy_network_train_steps = policy_network_train_steps
self._advantage_network_train_steps = advantage_network_train_steps
self._policy_network_layers = policy_network_layers
self._advantage_network_layers = advantage_network_layers
self._num_players = game.num_players()
self._root_node = self._game.new_initial_state()
self._embedding_size = len(self._root_node.information_state_tensor(0))
self._num_iterations = num_iterations
self._num_traversals = num_traversals
self._reinitialize_advantage_networks = reinitialize_advantage_networks
self._num_actions = game.num_distinct_actions()
self._iteration = 1
self._learning_rate = learning_rate
self._rngkey = jax.random.PRNGKey(42)
# Initialize networks
def base_network(x, layers):
x = hk.nets.MLP(layers[:-1], activate_final=True)(x)
x = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(x)
x = hk.Linear(layers[-1])(x)
x = jax.nn.relu(x)
x = hk.Linear(self._num_actions)(x)
return x
def adv_network(x, mask):
x = base_network(x, advantage_network_layers)
x = mask * x
return x
def policy_network(x, mask):
x = base_network(x, policy_network_layers)
x = jnp.where(mask == 1, x, -10e20)
x = jax.nn.softmax(x)
return x
x, mask = (jnp.ones([1, self._embedding_size]),
jnp.ones([1, self._num_actions]))
self._hk_adv_network = hk.without_apply_rng(hk.transform(adv_network))
self._params_adv_network = [
self._hk_adv_network.init(self._next_rng_key(), x, mask)
for _ in range(self._num_players)
]
self._hk_policy_network = hk.without_apply_rng(hk.transform(policy_network))
self._params_policy_network = self._hk_policy_network.init(
self._next_rng_key(), x, mask)
# initialize losses and grads
self._adv_loss = optax.l2_loss
self._policy_loss = optax.l2_loss
self._adv_grads = jax.value_and_grad(self._loss_adv)
self._policy_grads = jax.value_and_grad(self._loss_policy)
# initialize optimizers
self._opt_adv_init, self._opt_adv_update = optax.adam(learning_rate)
self._opt_adv_state = [
self._opt_adv_init(params) for params in self._params_adv_network
]
self._opt_policy_init, self._opt_policy_update = optax.adam(learning_rate)
self._opt_policy_state = self._opt_policy_init(self._params_policy_network)
# initialize memories
self._create_memories(memory_capacity)
# jit param updates and matched regrets calculations
self._jitted_matched_regrets = self._get_jitted_matched_regrets()
self._jitted_adv_update = self._get_jitted_adv_update()
self._jitted_policy_update = self._get_jitted_policy_update()
def _get_jitted_adv_update(self):
"""get jitted advantage update function."""
@jax.jit
def update(params_adv, opt_state, info_states, samp_regrets, iterations,
masks, total_iterations):
main_loss, grads = self._adv_grads(params_adv, info_states, samp_regrets,
iterations, masks, total_iterations)
updates, new_opt_state = self._opt_adv_update(grads, opt_state)
new_params = optax.apply_updates(params_adv, updates)
return new_params, new_opt_state, main_loss
return update
def _get_jitted_policy_update(self):
"""get jitted policy update function."""
@jax.jit
def update(params_policy, opt_state, info_states, action_probs, iterations,
masks, total_iterations):
main_loss, grads = self._policy_grads(params_policy, info_states,
action_probs, iterations, masks,
total_iterations)
updates, new_opt_state = self._opt_policy_update(grads, opt_state)
new_params = optax.apply_updates(params_policy, updates)
return new_params, new_opt_state, main_loss
return update
def _get_jitted_matched_regrets(self):
"""get jitted regret matching function."""
@jax.jit
def get_matched_regrets(info_state, legal_actions_mask, params_adv):
advs = self._hk_adv_network.apply(params_adv, info_state,
legal_actions_mask)
advantages = jnp.maximum(advs, 0)
summed_regret = jnp.sum(advantages)
matched_regrets = jax.lax.cond(
summed_regret > 0, lambda _: advantages / summed_regret,
lambda _: jax.nn.one_hot( # pylint: disable=g-long-lambda
jnp.argmax(jnp.where(legal_actions_mask == 1, advs, -10e20)), self
._num_actions), None)
return advantages, matched_regrets
return get_matched_regrets
def _next_rng_key(self):
"""Get the next rng subkey from class rngkey."""
self._rngkey, subkey = jax.random.split(self._rngkey)
return subkey
def _reinitialize_policy_network(self):
"""Reinitalize policy network and optimizer for training."""
x, mask = (jnp.ones([1, self._embedding_size]),
jnp.ones([1, self._num_actions]))
self._params_policy_network = self._hk_policy_network.init(
self._next_rng_key(), x, mask)
self._opt_policy_state = self._opt_policy_init(self._params_policy_network)
def _reinitialize_advantage_network(self, player):
"""Reinitalize player's advantage network and optimizer for training."""
x, mask = (jnp.ones([1, self._embedding_size]),
jnp.ones([1, self._num_actions]))
self._params_adv_network[player] = self._hk_adv_network.init(
self._next_rng_key(), x, mask)
self._opt_adv_state[player] = self._opt_adv_init(
self._params_adv_network[player])
@property
def advantage_buffers(self):
return self._advantage_memories
@property
def strategy_buffer(self):
return self._strategy_memories
def clear_advantage_buffers(self):
for p in range(self._num_players):
self._advantage_memories[p].clear()
def _create_memories(self, memory_capacity):
"""Create memory buffers and associated feature descriptions."""
self._strategy_memories = ReservoirBuffer(memory_capacity)
self._advantage_memories = [
ReservoirBuffer(memory_capacity) for _ in range(self._num_players)
]
self._strategy_feature_description = {
'info_state': tf.io.FixedLenFeature([self._embedding_size], tf.float32),
'action_probs': tf.io.FixedLenFeature([self._num_actions], tf.float32),
'iteration': tf.io.FixedLenFeature([1], tf.float32),
'legal_actions': tf.io.FixedLenFeature([self._num_actions], tf.float32)
}
self._advantage_feature_description = {
'info_state': tf.io.FixedLenFeature([self._embedding_size], tf.float32),
'iteration': tf.io.FixedLenFeature([1], tf.float32),
'samp_regret': tf.io.FixedLenFeature([self._num_actions], tf.float32),
'legal_actions': tf.io.FixedLenFeature([self._num_actions], tf.float32)
}
def solve(self):
"""Solution logic for Deep CFR."""
advantage_losses = collections.defaultdict(list)
for _ in range(self._num_iterations):
for p in range(self._num_players):
for _ in range(self._num_traversals):
self._traverse_game_tree(self._root_node, p)
if self._reinitialize_advantage_networks:
# Re-initialize advantage network for p and train from scratch.
self._reinitialize_advantage_network(p)
advantage_losses[p].append(self._learn_advantage_network(p))
self._iteration += 1
# Train policy network.
policy_loss = self._learn_strategy_network()
return None, advantage_losses, policy_loss
def _serialize_advantage_memory(self, info_state, iteration, samp_regret,
legal_actions_mask):
"""Create serialized example to store an advantage entry."""
example = tf.train.Example(
features=tf.train.Features(
feature={
'info_state':
tf.train.Feature(
float_list=tf.train.FloatList(value=info_state)),
'iteration':
tf.train.Feature(
float_list=tf.train.FloatList(value=[iteration])),
'samp_regret':
tf.train.Feature(
float_list=tf.train.FloatList(value=samp_regret)),
'legal_actions':
tf.train.Feature(
float_list=tf.train.FloatList(value=legal_actions_mask))
}))
return example.SerializeToString()
def _deserialize_advantage_memory(self, serialized):
"""Deserializes a batch of advantage examples for the train step."""
tups = tf.io.parse_example(serialized, self._advantage_feature_description)
return (tups['info_state'], tups['samp_regret'], tups['iteration'],
tups['legal_actions'])
def _serialize_strategy_memory(self, info_state, iteration,
strategy_action_probs, legal_actions_mask):
"""Create serialized example to store a strategy entry."""
example = tf.train.Example(
features=tf.train.Features(
feature={
'info_state':
tf.train.Feature(
float_list=tf.train.FloatList(value=info_state)),
'action_probs':
tf.train.Feature(
float_list=tf.train.FloatList(
value=strategy_action_probs)),
'iteration':
tf.train.Feature(
float_list=tf.train.FloatList(value=[iteration])),
'legal_actions':
tf.train.Feature(
float_list=tf.train.FloatList(value=legal_actions_mask))
}))
return example.SerializeToString()
def _deserialize_strategy_memory(self, serialized):
"""Deserializes a batch of strategy examples for the train step."""
tups = tf.io.parse_example(serialized, self._strategy_feature_description)
return (tups['info_state'], tups['action_probs'], tups['iteration'],
tups['legal_actions'])
def _add_to_strategy_memory(self, info_state, iteration,
strategy_action_probs, legal_actions_mask):
# pylint: disable=g-doc-args
"""Adds the given strategy data to the memory.
Uses either a tfrecordsfile on disk if provided, or a reservoir buffer.
"""
serialized_example = self._serialize_strategy_memory(
info_state, iteration, strategy_action_probs, legal_actions_mask)
self._strategy_memories.add(serialized_example)
def _traverse_game_tree(self, state, player):
"""Performs a traversal of the game tree using external sampling.
Over a traversal the advantage and strategy memories are populated with
computed advantage values and matched regrets respectively.
Args:
state: Current OpenSpiel game state.
player: (int) Player index for this traversal.
Returns:
Recursively returns expected payoffs for each action.
"""
if state.is_terminal():
# Terminal state get returns.
return state.returns()[player]
elif state.is_chance_node():
# If this is a chance node, sample an action
chance_outcome, chance_proba = zip(*state.chance_outcomes())
action = np.random.choice(chance_outcome, p=chance_proba)
return self._traverse_game_tree(state.child(action), player)
elif state.current_player() == player:
# Update the policy over the info set & actions via regret matching.
_, strategy = self._sample_action_from_advantage(state, player)
strategy = np.array(strategy)
exp_payoff = 0 * strategy
for action in state.legal_actions():
exp_payoff[action] = self._traverse_game_tree(
state.child(action), player)
ev = np.sum(exp_payoff * strategy)
samp_regret = (exp_payoff - ev) * state.legal_actions_mask(player)
self._advantage_memories[player].add(
self._serialize_advantage_memory(state.information_state_tensor(),
self._iteration, samp_regret,
state.legal_actions_mask(player)))
return ev
else:
other_player = state.current_player()
_, strategy = self._sample_action_from_advantage(state, other_player)
# Recompute distribution for numerical errors.
probs = np.array(strategy)
probs /= probs.sum()
sampled_action = np.random.choice(range(self._num_actions), p=probs)
self._add_to_strategy_memory(
state.information_state_tensor(other_player), self._iteration, probs,
state.legal_actions_mask(other_player))
return self._traverse_game_tree(state.child(sampled_action), player)
def _sample_action_from_advantage(self, state, player):
"""Returns an info state policy by applying regret-matching.
Args:
state: Current OpenSpiel game state.
player: (int) Player index over which to compute regrets.
Returns:
1. (np-array) Advantage values for info state actions indexed by action.
2. (np-array) Matched regrets, prob for actions indexed by action.
"""
info_state = jnp.array(
state.information_state_tensor(player), dtype=jnp.float32)
legal_actions_mask = jnp.array(
state.legal_actions_mask(player), dtype=jnp.float32)
advantages, matched_regrets = self._jitted_matched_regrets(
info_state, legal_actions_mask, self._params_adv_network[player])
return advantages, matched_regrets
def action_probabilities(self, state):
"""Returns action probabilities dict for a single batch."""
cur_player = state.current_player()
legal_actions = state.legal_actions(cur_player)
info_state_vector = jnp.array(
state.information_state_tensor(), dtype=jnp.float32)
legal_actions_mask = jnp.array(
state.legal_actions_mask(cur_player), dtype=jnp.float32)
probs = self._hk_policy_network.apply(self._params_policy_network,
info_state_vector, legal_actions_mask)
return {action: probs[action] for action in legal_actions}
def _get_advantage_dataset(self, player, nr_steps=1):
"""Returns the collected regrets for the given player as a dataset."""
self._advantage_memories[player].shuffle_data()
data = tf.data.Dataset.from_tensor_slices(
self._advantage_memories[player].data)
data = data.repeat()
data = data.shuffle(ADVANTAGE_TRAIN_SHUFFLE_SIZE)
data = data.batch(self._batch_size_advantage)
data = data.map(self._deserialize_advantage_memory)
data = data.prefetch(tf.data.experimental.AUTOTUNE)
data = data.take(nr_steps)
return iter(tfds.as_numpy(data))
def _get_strategy_dataset(self, nr_steps=1):
"""Returns the collected strategy memories as a dataset."""
self._strategy_memories.shuffle_data()
data = tf.data.Dataset.from_tensor_slices(self._strategy_memories.data)
data = data.repeat()
data = data.shuffle(STRATEGY_TRAIN_SHUFFLE_SIZE)
data = data.batch(self._batch_size_strategy)
data = data.map(self._deserialize_strategy_memory)
data = data.prefetch(tf.data.experimental.AUTOTUNE)
data = data.take(nr_steps)
return iter(tfds.as_numpy(data))
def _loss_adv(self, params_adv, info_states, samp_regrets, iterations, masks,
total_iterations):
"""Loss function for our advantage network."""
preds = self._hk_adv_network.apply(params_adv, info_states, masks)
loss_values = jnp.mean(self._adv_loss(preds, samp_regrets), axis=-1)
loss_values = loss_values * iterations * 2 / total_iterations
return jnp.mean(loss_values)
def _learn_advantage_network(self, player):
"""Compute the loss on sampled transitions and perform a Q-network update.
If there are not enough elements in the buffer, no loss is computed and
`None` is returned instead.
Args:
player: (int) player index.
Returns:
The average loss over the advantage network of the last batch.
"""
for data in self._get_advantage_dataset(
player, self._advantage_network_train_steps):
(self._params_adv_network[player], self._opt_adv_state[player],
main_loss) = self._jitted_adv_update(self._params_adv_network[player],
self._opt_adv_state[player],
*data, jnp.array(self._iteration))
return main_loss
def _loss_policy(self, params_policy, info_states, action_probs, iterations,
masks, total_iterations):
"""Loss function for our policy network."""
preds = self._hk_policy_network.apply(params_policy, info_states, masks)
loss_values = jnp.mean(self._policy_loss(preds, action_probs), axis=-1)
loss_values = loss_values * iterations * 2 / total_iterations
return jnp.mean(loss_values)
def _learn_strategy_network(self):
"""Compute the loss over the strategy network.
Returns:
The average loss obtained on the last training batch of transitions
or `None`.
"""
for data in self._get_strategy_dataset(self._policy_network_train_steps):
(self._params_policy_network, self._opt_policy_state,
main_loss) = self._jitted_policy_update(self._params_policy_network,
self._opt_policy_state,
*data, self._iteration)
return main_loss
| open_spiel-master | open_spiel/python/jax/deep_cfr.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.jax.opponent_shaping."""
import typing
from typing import Tuple
from absl.testing import absltest
from absl.testing import parameterized
import distrax
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from open_spiel.python import rl_environment
from open_spiel.python.jax.opponent_shaping import OpponentShapingAgent
import pyspiel
SEED = 24984617
def make_iterated_matrix_game(
game: str, iterations=5, batch_size=8
) -> rl_environment.Environment:
matrix_game = pyspiel.load_matrix_game(game)
config = {'num_repetitions': iterations, 'batch_size': batch_size}
game = pyspiel.create_repeated_game(matrix_game, config)
env = rl_environment.Environment(game)
return env
def make_agent_networks(
num_actions: int,
) -> Tuple[hk.Transformed, hk.Transformed]:
def policy(obs):
logits = hk.nets.MLP(output_sizes=[8, 8, num_actions], with_bias=True)(obs)
logits = jnp.nan_to_num(logits)
return distrax.Categorical(logits=logits)
def value_fn(obs):
values = hk.nets.MLP(output_sizes=[8, 8, 1], with_bias=True)(obs)
return values
return hk.without_apply_rng(hk.transform(policy)), hk.without_apply_rng(
hk.transform(value_fn)
)
def run_agents(
agents: typing.List[OpponentShapingAgent],
env: rl_environment.Environment,
num_steps=1000,
):
time_step = env.reset()
for _ in range(num_steps):
actions = []
for agent in agents:
action, _ = agent.step(time_step)
if action is not None:
action = action.squeeze()
actions.append(action)
if time_step.last():
time_step = env.reset()
else:
time_step = env.step(actions)
time_step.observations['actions'] = np.array(actions)
class LolaPolicyGradientTest(parameterized.TestCase, absltest.TestCase):
@parameterized.parameters(['matrix_pd'])
def test_run_game(self, game_name):
batch_size = 8
iterations = 5
env = make_iterated_matrix_game(
game_name, batch_size=1, iterations=iterations
)
env.seed(SEED)
key = jax.random.PRNGKey(SEED)
num_actions = env.action_spec()['num_actions']
policy_network, critic_network = make_agent_networks(
num_actions=num_actions
)
# pylint: disable=g-complex-comprehension
agents = [
OpponentShapingAgent(
player_id=i,
opponent_ids=[1 - i],
seed=key,
correction_type='lola',
env=env,
n_lookaheads=1,
info_state_size=env.observation_spec()['info_state'],
num_actions=env.action_spec()['num_actions'],
policy=policy_network,
critic=critic_network,
batch_size=batch_size,
pi_learning_rate=0.005,
critic_learning_rate=1.0,
policy_update_interval=2,
discount=0.96,
use_jit=False,
)
for i in range(2)
]
run_agents(agents=agents, env=env, num_steps=batch_size * 10)
class DicePolicyGradientTest(parameterized.TestCase, absltest.TestCase):
@parameterized.parameters(['matrix_pd'])
def test_run_game(self, game_name):
batch_size = 8
iterations = 5
env = make_iterated_matrix_game(
game_name, batch_size=1, iterations=iterations
)
env.seed(SEED)
key = jax.random.PRNGKey(SEED)
num_actions = env.action_spec()['num_actions']
policy_network, critic_network = make_agent_networks(
num_actions=num_actions
)
# pylint: disable=g-complex-comprehension
agents = [
OpponentShapingAgent(
player_id=i,
opponent_ids=[1 - i],
seed=key,
correction_type='dice',
env=env,
n_lookaheads=2,
info_state_size=env.observation_spec()['info_state'],
num_actions=env.action_spec()['num_actions'],
policy=policy_network,
critic=critic_network,
batch_size=batch_size,
pi_learning_rate=0.005,
critic_learning_rate=1.0,
policy_update_interval=2,
discount=0.96,
use_jit=False,
)
for i in range(2)
]
run_agents(agents=agents, env=env, num_steps=batch_size * 10)
if __name__ == '__main__':
np.random.seed(SEED)
absltest.main()
| open_spiel-master | open_spiel/python/jax/opponent_shaping_jax_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/jax/__init__.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.jax.dqn."""
from absl.testing import absltest
from open_spiel.python import rl_agent_policy
from open_spiel.python import rl_environment
from open_spiel.python.jax import boltzmann_dqn
import pyspiel
# A simple two-action game encoded as an EFG game. Going left gets -1, going
# right gets a +1.
SIMPLE_EFG_DATA = """
EFG 2 R "Simple single-agent problem" { "Player 1" } ""
p "ROOT" 1 1 "ROOT" { "L" "R" } 0
t "L" 1 "Outcome L" { -1.0 }
t "R" 2 "Outcome R" { 1.0 }
"""
class DQNTest(absltest.TestCase):
def test_train(self):
game = pyspiel.load_efg_game(SIMPLE_EFG_DATA)
env = rl_environment.Environment(game=game)
agent = boltzmann_dqn.BoltzmannDQN(
0,
state_representation_size=game.information_state_tensor_shape()[0],
num_actions=game.num_distinct_actions(),
hidden_layers_sizes=[16],
replay_buffer_capacity=100,
batch_size=5,
epsilon_start=0.02,
epsilon_end=0.01,
eta=5.0)
total_reward = 0
# Training. This will use the epsilon-greedy actions.
for _ in range(100):
time_step = env.reset()
while not time_step.last():
agent_output = agent.step(time_step)
time_step = env.step([agent_output.action])
total_reward += time_step.rewards[0]
agent.step(time_step)
self.assertGreaterEqual(total_reward, -100)
# Update the previous Q-network.
agent.update_prev_q_network()
# This will use the soft-max actions.
policy = rl_agent_policy.RLAgentPolicy(game, agent, 0, False)
probs = policy.action_probabilities(game.new_initial_state())
self.assertAlmostEqual(probs[0], 0.54, places=2)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/jax/boltzmann_dqn_jax_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.jax.deep_cfr."""
from absl.testing import absltest
from absl.testing import parameterized
from open_spiel.python import policy
from open_spiel.python.algorithms import exploitability
from open_spiel.python.jax import deep_cfr
import pyspiel
class DeepCFRTest(parameterized.TestCase):
@parameterized.parameters('leduc_poker', 'kuhn_poker', 'liars_dice')
def test_deep_cfr_runs(self, game_name):
game = pyspiel.load_game(game_name)
deep_cfr_solver = deep_cfr.DeepCFRSolver(
game,
policy_network_layers=(8, 4),
advantage_network_layers=(4, 2),
num_iterations=2,
num_traversals=2,
learning_rate=1e-3,
batch_size_advantage=8,
batch_size_strategy=8,
memory_capacity=1e7)
deep_cfr_solver.solve()
def test_matching_pennies_3p(self):
# We don't expect Deep CFR to necessarily converge on 3-player games but
# it's nonetheless interesting to see this result.
game = pyspiel.load_game_as_turn_based('matching_pennies_3p')
deep_cfr_solver = deep_cfr.DeepCFRSolver(
game,
policy_network_layers=(16, 8),
advantage_network_layers=(32, 16),
num_iterations=2,
num_traversals=2,
learning_rate=1e-3,
batch_size_advantage=8,
batch_size_strategy=8,
memory_capacity=1e7)
deep_cfr_solver.solve()
conv = exploitability.nash_conv(
game,
policy.tabular_policy_from_callable(
game, deep_cfr_solver.action_probabilities))
print('Deep CFR in Matching Pennies 3p. NashConv: {}'.format(conv))
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/jax/deep_cfr_jax_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Policy gradient methods implemented in JAX."""
import collections
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
import rlax
from open_spiel.python import rl_agent
Transition = collections.namedtuple(
"Transition",
"info_state action reward discount legal_actions_mask next_info_state")
class NetA2C(hk.Module):
"""A simple network with a policy head and a baseline value head."""
def __init__(self, num_actions, hidden_layers_sizes):
super().__init__()
self._num_actions = num_actions
self._hidden_layers_sizes = hidden_layers_sizes
def __call__(self, info_state):
"""Process a batch of observations."""
torso = hk.nets.MLP(self._hidden_layers_sizes, activate_final=True)
hidden = torso(info_state)
policy_logits = hk.Linear(self._num_actions)(hidden)
baseline = hk.Linear(1)(hidden)
return policy_logits, baseline
class NetPG(hk.Module):
"""A simple network with a policy head and an action-value head."""
def __init__(self, num_actions, hidden_layers_sizes):
super().__init__()
self._num_actions = num_actions
self._hidden_layers_sizes = hidden_layers_sizes
def __call__(self, info_state):
"""Process a batch of observations."""
torso = hk.nets.MLP(self._hidden_layers_sizes, activate_final=True)
hidden = torso(info_state)
policy_logits = hk.Linear(self._num_actions)(hidden)
q_values = hk.Linear(self._num_actions)(hidden)
return policy_logits, q_values
def generate_a2c_pi_loss(net_apply, loss_class, entropy_cost, l2_actor_weight,
lambda_):
"""A function generator generates loss function."""
def _a2c_pi_loss(net_params, batch):
info_states, actions, rewards, discounts = batch["info_states"], batch[
"actions"], batch["rewards"], batch["discounts"]
policy_logits, baselines = net_apply(net_params, info_states)
policy_logits = policy_logits[:-1]
baselines = jnp.squeeze(baselines, axis=1)
baselines = jnp.concatenate([baselines[:-1], jnp.zeros(1)])
td_returns = rlax.lambda_returns(
rewards,
discounts,
baselines[1:],
lambda_=lambda_,
stop_target_gradients=True)
advantages = td_returns - baselines[:-1]
chex.assert_equal_shape([td_returns, actions, advantages])
pi_loss = loss_class(
logits_t=policy_logits,
a_t=actions,
adv_t=advantages,
w_t=jnp.ones(td_returns.shape))
ent_loss = rlax.entropy_loss(
logits_t=policy_logits, w_t=jnp.ones(td_returns.shape))
l2_loss = jnp.sum(jnp.square(jax.flatten_util.ravel_pytree(net_params)[0]))
return pi_loss + entropy_cost * ent_loss + l2_actor_weight * l2_loss
return _a2c_pi_loss
def generate_a2c_critic_loss(net_apply, l2_critic_weight, lambda_):
"""A function generator generates loss function."""
def _a2c_critic_loss(net_params, batch):
info_states, rewards, discounts = batch["info_states"], batch[
"rewards"], batch["discounts"]
_, baselines = net_apply(net_params, info_states)
baselines = jnp.squeeze(baselines, axis=1)
baselines = jnp.concatenate([baselines[:-1], jnp.zeros(1)])
td_lambda = rlax.td_lambda(
v_tm1=baselines[:-1],
r_t=rewards,
discount_t=discounts,
v_t=baselines[1:],
lambda_=lambda_,
stop_target_gradients=True)
l2_loss = jnp.sum(jnp.square(jax.flatten_util.ravel_pytree(net_params)[0]))
return jnp.mean(jnp.square(td_lambda)) + l2_critic_weight * l2_loss
return _a2c_critic_loss
def generate_pg_pi_loss(net_apply, loss_class, entropy_cost, l2_actor_weight):
"""A function generator generates loss function."""
def _pg_loss(net_params, batch):
info_states = batch["info_states"]
policy_logits, q_values = net_apply(net_params, info_states)
chex.assert_equal_shape([policy_logits, q_values])
pi_loss = loss_class(logits_t=policy_logits, q_t=q_values)
ent_loss = rlax.entropy_loss(
logits_t=policy_logits, w_t=jnp.ones(policy_logits.shape[:1]))
l2_loss = jnp.sum(jnp.square(jax.flatten_util.ravel_pytree(net_params)[0]))
return pi_loss + entropy_cost * ent_loss + l2_actor_weight * l2_loss
return _pg_loss
def generate_pg_critic_loss(net_apply, l2_critic_weight, lambda_):
"""A function generator generates loss function."""
def _critic_loss(net_params, batch):
info_states, actions, rewards, discounts = batch["info_states"], batch[
"actions"], batch["rewards"], batch["discounts"]
_, q_values = net_apply(net_params, info_states)
q_values = q_values[:-1]
q_values = jnp.concatenate(
[q_values, jnp.zeros(q_values[-1].reshape(1, -1).shape)])
actions = jnp.concatenate([actions, jnp.zeros(1, dtype=int)])
sarsa_lambda = rlax.sarsa_lambda(
q_tm1=q_values[:-1],
a_tm1=actions[:-1],
r_t=rewards,
discount_t=discounts,
q_t=q_values[1:],
a_t=actions[1:],
lambda_=lambda_,
stop_target_gradients=True)
l2_loss = jnp.sum(jnp.square(jax.flatten_util.ravel_pytree(net_params)[0]))
return jnp.mean(jnp.square(sarsa_lambda)) + l2_critic_weight * l2_loss
return _critic_loss
def generate_act_func(net_apply):
"""A function generator generates act function."""
def _act(net_params, info_state, action_mask, rng):
info_state = jnp.reshape(info_state, [1, -1])
policy_logits, _ = net_apply(net_params, info_state)
policy_probs = jax.nn.softmax(policy_logits, axis=1)
# Remove illegal actions, re-normalize probs
probs = policy_probs[0] * action_mask
probs /= jnp.sum(probs)
action = jax.random.choice(rng, len(probs), p=probs)
return action, probs
return _act
class PolicyGradient(rl_agent.AbstractAgent):
"""Policy Gradient Agent implementation in JAX."""
def __init__(self,
player_id,
info_state_size,
num_actions,
loss_str="a2c",
loss_class=None,
hidden_layers_sizes=(128,),
lambda_=1.0,
critic_learning_rate=0.01,
pi_learning_rate=0.001,
entropy_cost=0.01,
l2_weight_actor=0.0,
l2_weight_critic=0.0,
num_critic_before_pi=8,
additional_discount_factor=1.0,
max_global_gradient_norm=None,
optimizer_str="sgd",
seed=42):
"""Initialize the PolicyGradient agent.
Args:
player_id: int, player identifier. Usually its position in the game.
info_state_size: int, info_state vector size.
num_actions: int, number of actions per info state.
loss_str: string or None. If string, must be one of ["rpg", "qpg", "rm",
"a2c"] and defined in `_get_loss_class`. If None, a loss class must be
passed through `loss_class`. Defaults to "a2c".
loss_class: Class or None. If Class, it must define the policy gradient
loss. If None a loss class in a string format must be passed through
`loss_str`. Defaults to None.
hidden_layers_sizes: iterable, defines the neural network layers. Defaults
to (128,), which produces a NN: [INPUT] -> [128] -> ReLU -> [OUTPUT].
lambda_: float, lambda in TD(lambda) or SARSA(lambda). Defaults to 1.0.
critic_learning_rate: float, learning rate used for Critic (Q or V).
Defaults to 0.001.
pi_learning_rate: float, learning rate used for Pi. Defaults to 0.001.
entropy_cost: float, entropy cost used to multiply the entropy loss. Can
be set to None to skip entropy computation. Defaults to 0.001.
l2_weight_actor: l2 penaly weight for actor network. Defaults to 0.0.
l2_weight_critic: l2 penalty weight for critic network. Defaults to
0.0.
num_critic_before_pi: int, number of Critic (Q or V) updates before each
Pi update. Defaults to 8 (every 8th critic learning step, Pi also
learns).
additional_discount_factor: float, additional discount to compute returns.
Defaults to 1.0, in which case, no extra discount is applied. None that
users must provide *only one of* `loss_str` or `loss_class`.
max_global_gradient_norm: float or None, maximum global norm of a gradient
to which the gradient is shrunk if its value is larger.
optimizer_str: String defining which optimizer to use. Supported values
are {sgd, adam}
seed: random seed
"""
assert bool(loss_str) ^ bool(loss_class), "Please provide only one option."
self._kwargs = locals()
loss_class = loss_class if loss_class else self._get_loss_class(loss_str)
self.player_id = player_id
self._num_actions = num_actions
self._extra_discount = additional_discount_factor
self._num_critic_before_pi = num_critic_before_pi
self._episode_data = []
self._dataset = collections.defaultdict(list)
self._prev_time_step = None
self._prev_action = None
# Step counters
self._step_counter = 0
self._episode_counter = 0
self._num_learn_steps = 0
# Keep track of the last training loss achieved in an update step.
self._last_loss_value = None
self._loss_str = loss_str
# Network
# activate final as we plug logit and qvalue heads afterwards.
net_class = NetA2C if loss_str == "a2c" else NetPG
def net_func(info_input):
net = net_class(num_actions, hidden_layers_sizes)
return net(info_input)
hk_net = hk.without_apply_rng(hk.transform(net_func))
hk_net_apply = hk_net.apply
self.rng = jax.random.PRNGKey(seed)
init_inputs = jnp.ones((1, info_state_size))
self.hk_net_params = hk_net.init(self.rng, init_inputs)
self._act = jax.jit(generate_act_func(hk_net_apply))
if optimizer_str == "adam":
critic_optimizer = optax.adam(critic_learning_rate)
pi_optimizer = optax.adam(pi_learning_rate)
elif optimizer_str == "sgd":
critic_optimizer = optax.sgd(critic_learning_rate)
pi_optimizer = optax.sgd(pi_learning_rate)
else:
raise ValueError("Not implemented, choose from 'adam' and 'sgd'.")
if max_global_gradient_norm:
pi_optimizer = optax.chain(
pi_optimizer, optax.clip_by_global_norm(max_global_gradient_norm))
critic_optimizer = optax.chain(
critic_optimizer, optax.clip_by_global_norm(max_global_gradient_norm))
pi_opt_init, pi_opt_update = pi_optimizer.init, pi_optimizer.update
critic_opt_init, critic_opt_update = critic_optimizer.init, critic_optimizer.update
self._pi_opt_state = pi_opt_init(self.hk_net_params)
if loss_str == "a2c":
pi_loss_and_grad = jax.value_and_grad(
generate_a2c_pi_loss(hk_net_apply, loss_class, entropy_cost,
l2_weight_actor, lambda_))
critic_loss_and_grad = jax.value_and_grad(
generate_a2c_critic_loss(hk_net_apply, l2_weight_critic, lambda_))
self._critic_opt_state = critic_opt_init(self.hk_net_params)
else:
pi_loss_and_grad = jax.value_and_grad(
generate_pg_pi_loss(hk_net_apply, loss_class, entropy_cost,
l2_weight_actor))
critic_loss_and_grad = jax.value_and_grad(
generate_pg_critic_loss(hk_net_apply, l2_weight_critic, lambda_))
self._critic_opt_state = critic_opt_init(self.hk_net_params)
self._jit_pi_update = jax.jit(
self._get_update(pi_opt_update, pi_loss_and_grad))
self._jit_critic_update = jax.jit(
self._get_update(critic_opt_update, critic_loss_and_grad))
def _get_loss_class(self, loss_str):
if loss_str == "rpg":
return rlax.rpg_loss
elif loss_str == "qpg":
return rlax.qpg_loss
elif loss_str == "rm":
return rlax.rm_loss
elif loss_str == "a2c":
return rlax.policy_gradient_loss
def _get_update(self, opt_update, loss_fn):
def update(net_params, opt_state, batch):
loss_val, grad_val = loss_fn(net_params, batch)
updates, new_opt_state = opt_update(grad_val, opt_state)
new_net_params = optax.apply_updates(net_params, updates)
return new_net_params, new_opt_state, loss_val
return update
def step(self, time_step, is_evaluation=False):
"""Returns the action to be taken and updates the network if needed.
Args:
time_step: an instance of rl_environment.TimeStep.
is_evaluation: bool, whether this is a training or evaluation call.
Returns:
A `rl_agent.StepOutput` containing the action probs and chosen action.
"""
# Act step: don't act at terminal info states or if its not our turn.
if (not time_step.last()) and (time_step.is_simultaneous_move() or
self.player_id
== time_step.current_player()):
info_state = time_step.observations["info_state"][self.player_id]
legal_actions = time_step.observations["legal_actions"][self.player_id]
action_mask = np.zeros(self._num_actions)
action_mask[legal_actions] = 1
self.rng, _ = jax.random.split(self.rng)
action, probs = self._act(self.hk_net_params, np.asarray(info_state),
action_mask, self.rng)
else:
action = None
probs = []
if not is_evaluation:
self._step_counter += 1
# Add data points to current episode buffer.
if self._prev_time_step:
self._add_transition(time_step)
# Episode done, add to dataset and maybe learn.
if time_step.last():
self._episode_counter += 1
self._critic_update()
self._num_learn_steps += 1
if self._num_learn_steps % self._num_critic_before_pi == 0:
self._pi_update()
self._episode_data = []
self._prev_time_step = None
self._prev_action = None
return
else:
self._prev_time_step = time_step
self._prev_action = action
return rl_agent.StepOutput(action=action, probs=probs)
@property
def loss(self):
return (self._last_critic_loss_value, self._last_pi_loss_value)
def _add_transition(self, time_step):
"""Adds intra-episode transition to the `_episode_data` buffer.
Adds the transition from `self._prev_time_step` to `time_step`.
Args:
time_step: an instance of rl_environment.TimeStep.
"""
assert self._prev_time_step is not None
legal_actions = (
self._prev_time_step.observations["legal_actions"][self.player_id])
legal_actions_mask = np.zeros(self._num_actions)
legal_actions_mask[legal_actions] = 1.0
transition = Transition(
info_state=(
self._prev_time_step.observations["info_state"][self.player_id][:]),
action=self._prev_action,
reward=time_step.rewards[self.player_id],
discount=time_step.discounts[self.player_id],
legal_actions_mask=legal_actions_mask,
next_info_state=(
time_step.observations["info_state"][self.player_id][:]))
self._episode_data.append(transition)
def _critic_update(self):
"""Compute the Critic loss on sampled transitions & perform a critic update.
Returns:
The average Critic loss obtained on this batch.
"""
batch = {}
batch["info_states"] = jnp.asarray(
[transition.info_state for transition in self._episode_data] +
[self._episode_data[-1].next_info_state])
batch["rewards"] = jnp.asarray(
[transition.reward for transition in self._episode_data])
batch["discounts"] = jnp.asarray(
[transition.discount for transition in self._episode_data])
if self._loss_str != "a2c":
batch["actions"] = jnp.asarray(
[transition.action for transition in self._episode_data])
self.hk_net_params, self._critic_opt_state, self._last_critic_loss_value = self._jit_critic_update(
self.hk_net_params, self._critic_opt_state, batch)
return self._last_critic_loss_value
def _pi_update(self):
"""Compute the Pi loss on sampled transitions and perform a Pi update.
Returns:
The average Pi loss obtained on this batch.
"""
batch = {}
batch["info_states"] = jnp.asarray(
[transition.info_state for transition in self._episode_data] +
[self._episode_data[-1].next_info_state])
if self._loss_str == "a2c":
batch["discounts"] = jnp.asarray(
[transition.discount for transition in self._episode_data])
batch["actions"] = jnp.asarray(
[transition.action for transition in self._episode_data])
batch["rewards"] = jnp.asarray(
[transition.reward for transition in self._episode_data])
self.hk_net_params, self._pi_opt_state, self._last_pi_loss_value = self._jit_pi_update(
self.hk_net_params, self._pi_opt_state, batch)
return self._last_pi_loss_value
| open_spiel-master | open_spiel/python/jax/policy_gradient.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.