python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
---|---|---|
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for policy_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import tensorflow.compat.v1 as tf
from trfl import policy_ops
class EpsilonGreedyTest(tf.test.TestCase):
def testTieBreaking(self):
num_actions = 4
# Given some action values that are all equal:
action_values = [1.1] * num_actions
epsilon = 0.
# We expect the policy to be a uniform distribution.
expected = [1 / num_actions] * num_actions
result = policy_ops.epsilon_greedy(action_values, epsilon).probs
with self.test_session() as sess:
self.assertAllClose(sess.run(result), expected)
def testGreedy(self):
# Given some action values with one largest value:
action_values = [0.5, 0.99, 0.9, 1., 0.1, -0.1, -100.]
# And zero epsilon:
epsilon = 0.
# We expect a deterministic greedy policy that chooses one action.
expected = [0., 0., 0., 1., 0., 0., 0.]
result = policy_ops.epsilon_greedy(action_values, epsilon).probs
with self.test_session() as sess:
self.assertAllClose(sess.run(result), expected)
def testDistribution(self):
# Given some action values and non-zero epsilon:
action_values = [0.9, 1., 0.9, 0.1, -0.6]
epsilon = 0.1
# We expect a distribution that concentrates the right probabilities.
expected = [0.02, 0.92, 0.02, 0.02, 0.02]
result = policy_ops.epsilon_greedy(action_values, epsilon).probs
with self.test_session() as sess:
self.assertAllClose(sess.run(result), expected)
def testBatched(self):
# Given batched action values:
action_values = [[1., 2., 3.],
[4., 5., 6.],
[6., 5., 4.],
[3., 2., 1.]]
epsilon = 0.
# We expect batched probabilities.
expected = [[0., 0., 1.],
[0., 0., 1.],
[1., 0., 0.],
[1., 0., 0.]]
result = policy_ops.epsilon_greedy(action_values, epsilon).probs
with self.test_session() as sess:
self.assertAllClose(sess.run(result), expected)
def testFloat64(self):
# Given action values that are float 64:
action_values = tf.convert_to_tensor([1., 2., 4., 3.], dtype=tf.float64)
epsilon = 0.1
expected = [0.025, 0.025, 0.925, 0.025]
result = policy_ops.epsilon_greedy(action_values, epsilon).probs
with self.test_session() as sess:
self.assertAllClose(sess.run(result), expected)
def testLegalActionsMask(self):
action_values = [0.9, 1., 0.9, 0.1, -0.6]
legal_actions_mask = [0., 1., 1., 1., 1.]
epsilon = 0.1
expected = [0.00, 0.925, 0.025, 0.025, 0.025]
result = policy_ops.epsilon_greedy(action_values, epsilon,
legal_actions_mask).probs
with self.test_session() as sess:
self.assertAllClose(sess.run(result), expected)
if __name__ == "__main__":
tf.test.main()
| trfl-master | trfl/policy_ops_test.py |
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tensorflow ops for common discrete-action value learning rules.
These ops define action value learning rules for discrete, scalar, action
spaces. Actions must be represented as indices in the range `[0, K)` where `K`
is the number of distinct actions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Dependency imports
import tensorflow.compat.v1 as tf
from trfl import base_ops
from trfl import indexing_ops
from trfl import sequence_ops
QExtra = collections.namedtuple(
"qlearning_extra", ["target", "td_error"])
DoubleQExtra = collections.namedtuple(
"double_qlearning_extra", ["target", "td_error", "best_action"])
def qlearning(q_tm1, a_tm1, r_t, pcont_t, q_t, name="QLearning"):
"""Implements the Q-learning loss as a TensorFlow op.
The loss is `0.5` times the squared difference between `q_tm1[a_tm1]` and
the target `r_t + pcont_t * max q_t`.
See "Reinforcement Learning: An Introduction" by Sutton and Barto.
(http://incompleteideas.net/book/ebook/node65.html).
Args:
q_tm1: Tensor holding Q-values for first timestep in a batch of
transitions, shape `[B x num_actions]`.
a_tm1: Tensor holding action indices, shape `[B]`.
r_t: Tensor holding rewards, shape `[B]`.
pcont_t: Tensor holding pcontinue values, shape `[B]`.
q_t: Tensor holding Q-values for second timestep in a batch of
transitions, shape `[B x num_actions]`.
name: name to prefix ops created within this op.
Returns:
A namedtuple with fields:
* `loss`: a tensor containing the batch of losses, shape `[B]`.
* `extra`: a namedtuple with fields:
* `target`: batch of target values for `q_tm1[a_tm1]`, shape `[B]`.
* `td_error`: batch of temporal difference errors, shape `[B]`.
"""
# Rank and compatibility checks.
base_ops.wrap_rank_shape_assert(
[[q_tm1, q_t], [a_tm1, r_t, pcont_t]], [2, 1], name)
# Q-learning op.
with tf.name_scope(name, values=[q_tm1, a_tm1, r_t, pcont_t, q_t]):
# Build target and select head to update.
with tf.name_scope("target"):
target = tf.stop_gradient(
r_t + pcont_t * tf.reduce_max(q_t, axis=1))
qa_tm1 = indexing_ops.batched_index(q_tm1, a_tm1)
# Temporal difference error and loss.
# Loss is MSE scaled by 0.5, so the gradient is equal to the TD error.
td_error = target - qa_tm1
loss = 0.5 * tf.square(td_error)
return base_ops.LossOutput(loss, QExtra(target, td_error))
def double_qlearning(
q_tm1, a_tm1, r_t, pcont_t, q_t_value, q_t_selector,
name="DoubleQLearning"):
"""Implements the double Q-learning loss as a TensorFlow op.
The loss is `0.5` times the squared difference between `q_tm1[a_tm1]` and
the target `r_t + pcont_t * q_t_value[argmax q_t_selector]`.
See "Double Q-learning" by van Hasselt.
(https://papers.nips.cc/paper/3964-double-q-learning.pdf).
Args:
q_tm1: Tensor holding Q-values for first timestep in a batch of
transitions, shape `[B x num_actions]`.
a_tm1: Tensor holding action indices, shape `[B]`.
r_t: Tensor holding rewards, shape `[B]`.
pcont_t: Tensor holding pcontinue values, shape `[B]`.
q_t_value: Tensor of Q-values for second timestep in a batch of transitions,
used to estimate the value of the best action, shape `[B x num_actions]`.
q_t_selector: Tensor of Q-values for second timestep in a batch of
transitions used to estimate the best action, shape `[B x num_actions]`.
name: name to prefix ops created within this op.
Returns:
A namedtuple with fields:
* `loss`: a tensor containing the batch of losses, shape `[B]`.
* `extra`: a namedtuple with fields:
* `target`: batch of target values for `q_tm1[a_tm1]`, shape `[B]`
* `td_error`: batch of temporal difference errors, shape `[B]`
* `best_action`: batch of greedy actions wrt `q_t_selector`, shape `[B]`
"""
# Rank and compatibility checks.
base_ops.wrap_rank_shape_assert(
[[q_tm1, q_t_value, q_t_selector], [a_tm1, r_t, pcont_t]], [2, 1], name)
# double Q-learning op.
with tf.name_scope(
name, values=[q_tm1, a_tm1, r_t, pcont_t, q_t_value, q_t_selector]):
# Build target and select head to update.
best_action = tf.argmax(q_t_selector, 1, output_type=tf.int32)
double_q_bootstrapped = indexing_ops.batched_index(q_t_value, best_action)
target = tf.stop_gradient(r_t + pcont_t * double_q_bootstrapped)
qa_tm1 = indexing_ops.batched_index(q_tm1, a_tm1)
# Temporal difference error and loss.
# Loss is MSE scaled by 0.5, so the gradient is equal to the TD error.
td_error = target - qa_tm1
loss = 0.5 * tf.square(td_error)
return base_ops.LossOutput(
loss, DoubleQExtra(target, td_error, best_action))
def persistent_qlearning(
q_tm1, a_tm1, r_t, pcont_t, q_t, action_gap_scale=0.5,
name="PersistentQLearning"):
"""Implements the persistent Q-learning loss as a TensorFlow op.
The loss is `0.5` times the squared difference between `q_tm1[a_tm1]` and
`r_t + pcont_t * [(1-action_gap_scale) max q_t + action_gap_scale qa_t]`
See "Increasing the Action Gap: New Operators for Reinforcement Learning"
by Bellemare, Ostrovski, Guez et al. (https://arxiv.org/abs/1512.04860).
Args:
q_tm1: Tensor holding Q-values for first timestep in a batch of
transitions, shape `[B x num_actions]`.
a_tm1: Tensor holding action indices, shape `[B]`.
r_t: Tensor holding rewards, shape `[B]`.
pcont_t: Tensor holding pcontinue values, shape `[B]`.
q_t: Tensor holding Q-values for second timestep in a batch of
transitions, shape `[B x num_actions]`.
These values are used for estimating the value of the best action. In
DQN they come from the target network.
action_gap_scale: coefficient in [0, 1] for scaling the action gap term.
name: name to prefix ops created within this op.
Returns:
A namedtuple with fields:
* `loss`: a tensor containing the batch of losses, shape `[B]`.
* `extra`: a namedtuple with fields:
* `target`: batch of target values for `q_tm1[a_tm1]`, shape `[B]`.
* `td_error`: batch of temporal difference errors, shape `[B]`.
"""
# Rank and compatibility checks.
base_ops.wrap_rank_shape_assert(
[[q_tm1, q_t], [a_tm1, r_t, pcont_t]], [2, 1], name)
base_ops.assert_arg_bounded(action_gap_scale, 0, 1, name, "action_gap_scale")
# persistent Q-learning op.
with tf.name_scope(name, values=[q_tm1, a_tm1, r_t, pcont_t, q_t]):
# Build target and select head to update.
with tf.name_scope("target"):
max_q_t = tf.reduce_max(q_t, axis=1)
qa_t = indexing_ops.batched_index(q_t, a_tm1)
corrected_q_t = (1 - action_gap_scale) * max_q_t + action_gap_scale * qa_t
target = tf.stop_gradient(r_t + pcont_t * corrected_q_t)
qa_tm1 = indexing_ops.batched_index(q_tm1, a_tm1)
# Temporal difference error and loss.
# Loss is MSE scaled by 0.5, so the gradient is equal to the TD error.
td_error = target - qa_tm1
loss = 0.5 * tf.square(td_error)
return base_ops.LossOutput(loss, QExtra(target, td_error))
def sarsa(q_tm1, a_tm1, r_t, pcont_t, q_t, a_t, name="Sarsa"):
"""Implements the SARSA loss as a TensorFlow op.
The loss is `0.5` times the squared difference between `q_tm1[a_tm1]` and
the target `r_t + pcont_t * q_t[a_t]`.
See "Reinforcement Learning: An Introduction" by Sutton and Barto.
(http://incompleteideas.net/book/ebook/node64.html.)
Args:
q_tm1: Tensor holding Q-values for first timestep in a batch of
transitions, shape `[B x num_actions]`.
a_tm1: Tensor holding action indices, shape `[B]`.
r_t: Tensor holding rewards, shape `[B]`.
pcont_t: Tensor holding pcontinue values, shape `[B]`.
q_t: Tensor holding Q-values for second timestep in a batch of
transitions, shape `[B x num_actions]`.
a_t: Tensor holding action indices for second timestep, shape `[B]`.
name: name to prefix ops created within this op.
Returns:
A namedtuple with fields:
* `loss`: a tensor containing the batch of losses, shape `[B]`.
* `extra`: a namedtuple with fields:
* `target`: batch of target values for `q_tm1[a_tm1]`, shape `[B]`.
* `td_error`: batch of temporal difference errors, shape `[B]`.
"""
# Rank and compatibility checks.
base_ops.wrap_rank_shape_assert(
[[q_tm1, q_t], [a_t, r_t, pcont_t]], [2, 1], name)
# SARSA op.
with tf.name_scope(name, values=[q_tm1, a_tm1, r_t, pcont_t, q_t, a_t]):
# Select head to update and build target.
qa_tm1 = indexing_ops.batched_index(q_tm1, a_tm1)
qa_t = indexing_ops.batched_index(q_t, a_t)
target = tf.stop_gradient(r_t + pcont_t * qa_t)
# Temporal difference error and loss.
# Loss is MSE scaled by 0.5, so the gradient is equal to the TD error.
td_error = target - qa_tm1
loss = 0.5 * tf.square(td_error)
return base_ops.LossOutput(loss, QExtra(target, td_error))
def sarse(
q_tm1, a_tm1, r_t, pcont_t, q_t, probs_a_t, debug=False, name="Sarse"):
"""Implements the SARSE (Expected SARSA) loss as a TensorFlow op.
The loss is `0.5` times the squared difference between `q_tm1[a_tm1]` and
the target `r_t + pcont_t * (sum_a probs_a_t[a] * q_t[a])`.
See "A Theoretical and Empirical Analysis of Expected Sarsa" by Seijen,
van Hasselt, Whiteson et al.
(http://www.cs.ox.ac.uk/people/shimon.whiteson/pubs/vanseijenadprl09.pdf).
Args:
q_tm1: Tensor holding Q-values for first timestep in a batch of
transitions, shape `[B x num_actions]`.
a_tm1: Tensor holding action indices, shape `[B]`.
r_t: Tensor holding rewards, shape `[B]`.
pcont_t: Tensor holding pcontinue values, shape `[B]`.
q_t: Tensor holding Q-values for second timestep in a batch of
transitions, shape `[B x num_actions]`.
probs_a_t: Tensor holding action probabilities for second timestep,
shape `[B x num_actions]`.
debug: Boolean flag, when set to True adds ops to check whether probs_a_t
is a batch of (approximately) valid probability distributions.
name: name to prefix ops created by this function.
Returns:
A namedtuple with fields:
* `loss`: a tensor containing the batch of losses, shape `[B]`.
* `extra`: a namedtuple with fields:
* `target`: batch of target values for `q_tm1[a_tm1]`, shape `[B]`.
* `td_error`: batch of temporal difference errors, shape `[B]`.
"""
# Rank and compatibility checks.
base_ops.wrap_rank_shape_assert(
[[q_tm1, q_t, probs_a_t], [a_tm1, r_t, pcont_t]], [2, 1], name)
# SARSE (Expected SARSA) op.
with tf.name_scope(name, values=[q_tm1, a_tm1, r_t, pcont_t, q_t, probs_a_t]):
# Debug ops.
deps = []
if debug:
cumulative_prob = tf.reduce_sum(probs_a_t, axis=1)
almost_prob = tf.less(tf.abs(tf.subtract(cumulative_prob, 1.0)), 1e-6)
deps.append(tf.Assert(
tf.reduce_all(almost_prob),
["probs_a_t tensor does not sum to 1", probs_a_t]))
# With dependency on possible debug ops.
with tf.control_dependencies(deps):
# Select head to update and build target.
qa_tm1 = indexing_ops.batched_index(q_tm1, a_tm1)
target = tf.stop_gradient(
r_t + pcont_t * tf.reduce_sum(tf.multiply(q_t, probs_a_t), axis=1))
# Temporal difference error and loss.
# Loss is MSE scaled by 0.5, so the gradient is equal to the TD error.
td_error = target - qa_tm1
loss = 0.5 * tf.square(td_error)
return base_ops.LossOutput(loss, QExtra(target, td_error))
def qlambda(
q_tm1, a_tm1, r_t, pcont_t, q_t, lambda_, name="GeneralizedQLambda"):
"""Implements Peng's and Watkins' Q(lambda) loss as a TensorFlow op.
This function is general enough to implement both Peng's and Watkins'
Q-lambda algorithms.
See "Reinforcement Learning: An Introduction" by Sutton and Barto.
(http://incompleteideas.net/book/ebook/node78.html).
Args:
q_tm1: `Tensor` holding a sequence of Q-values starting at the first
timestep; shape `[T, B, num_actions]`
a_tm1: `Tensor` holding a sequence of action indices, shape `[T, B]`
r_t: Tensor holding a sequence of rewards, shape `[T, B]`
pcont_t: `Tensor` holding a sequence of pcontinue values, shape `[T, B]`
q_t: `Tensor` holding a sequence of Q-values for second timestep;
shape `[T, B, num_actions]`. In a target network setting,
this quantity is often supplied by the target network.
lambda_: a scalar or `Tensor` of shape `[T, B]`
specifying the ratio of mixing between bootstrapped and MC returns;
if lambda_ is the same for all time steps then the function implements
Peng's Q-learning algorithm; if lambda_ = 0 at every sub-optimal action
and a constant otherwise, then the function implements Watkins'
Q-learning algorithm. Generally lambda_ can be a Tensor of any values
in the range [0, 1] supplied by the user.
name: a name of the op.
Returns:
A namedtuple with fields:
* `loss`: a tensor containing the batch of losses, shape `[T, B]`.
* `extra`: a namedtuple with fields:
* `target`: batch of target values for `q_tm1[a_tm1]`, shape `[T, B]`.
* `td_error`: batch of temporal difference errors, shape `[T, B]`.
"""
# Rank and compatibility checks.
base_ops.wrap_rank_shape_assert([[q_tm1, q_t]], [3], name)
if isinstance(
lambda_, tf.Tensor
) and lambda_.get_shape().ndims is not None and lambda_.get_shape().ndims > 0:
base_ops.wrap_rank_shape_assert([[a_tm1, r_t, pcont_t, lambda_]], [2], name)
else:
base_ops.wrap_rank_shape_assert([[a_tm1, r_t, pcont_t]], [2], name)
# QLambda op.
with tf.name_scope(name, values=[q_tm1, a_tm1, r_t, pcont_t, q_t]):
# Build target and select head to update.
with tf.name_scope("target"):
state_values = tf.reduce_max(q_t, axis=2)
target = sequence_ops.multistep_forward_view(
r_t, pcont_t, state_values, lambda_, back_prop=False)
target = tf.stop_gradient(target)
qa_tm1 = indexing_ops.batched_index(q_tm1, a_tm1)
# Temporal difference error and loss.
# Loss is MSE scaled by 0.5, so the gradient is equal to the TD error.
td_error = target - qa_tm1
loss = 0.5 * tf.square(td_error)
return base_ops.LossOutput(loss, QExtra(target, td_error))
def sarsa_lambda(q_tm1,
a_tm1,
r_t,
pcont_t,
q_t,
a_t,
lambda_,
name="SarsaLambda"):
"""Implements SARSA(lambda) loss as a TensorFlow op.
See "Reinforcement Learning: An Introduction" by Sutton and Barto.
(http://incompleteideas.net/book/ebook/node77.html).
Args:
q_tm1: `Tensor` holding a sequence of Q-values starting at the first
timestep; shape `[T, B, num_actions]`
a_tm1: `Tensor` holding a sequence of action indices, shape `[T, B]`
r_t: Tensor holding a sequence of rewards, shape `[T, B]`
pcont_t: `Tensor` holding a sequence of pcontinue values, shape `[T, B]`
q_t: `Tensor` holding a sequence of Q-values for second timestep;
shape `[T, B, num_actions]`.
a_t: `Tensor` holding a sequence of action indices for second timestep;
shape `[T, B]`
lambda_: a scalar specifying the ratio of mixing between bootstrapped and
MC returns.
name: a name of the op.
Returns:
A namedtuple with fields:
* `loss`: a tensor containing the batch of losses, shape `[T, B]`.
* `extra`: a namedtuple with fields:
* `target`: batch of target values for `q_tm1[a_tm1]`, shape `[T, B]`.
* `td_error`: batch of temporal difference errors, shape `[T, B]`.
"""
# Rank and compatibility checks.
base_ops.wrap_rank_shape_assert(
[[q_tm1, q_t], [a_tm1, r_t, pcont_t, a_t]], [3, 2], name)
# SARSALambda op.
with tf.name_scope(name, values=[q_tm1, a_tm1, r_t, pcont_t, q_t, a_t]):
# Select head to update and build target.
qa_tm1 = indexing_ops.batched_index(q_tm1, a_tm1)
qa_t = indexing_ops.batched_index(q_t, a_t)
target = sequence_ops.multistep_forward_view(
r_t, pcont_t, qa_t, lambda_, back_prop=False)
target = tf.stop_gradient(target)
# Temporal difference error and loss.
# Loss is MSE scaled by 0.5, so the gradient is equal to the TD error.
td_error = target - qa_tm1
loss = 0.5 * tf.square(td_error)
return base_ops.LossOutput(loss, QExtra(target, td_error))
def qv_learning(q_tm1, a_tm1, r_t, pcont_t, v_t, name="QVLearning"):
"""Implements the QV loss as a TensorFlow op.
The loss is `0.5` times the squared difference between `q_tm1[a_tm1]` and
the target `r_t + pcont_t * v_t`, where `v_t` is separately learned through
temporal difference learning (c.f. `value_ops.td_learning`).
See "Two Novel On-policy Reinforcement Learning Algorithms based on
TD(lambda)-methods" by Wiering and van Hasselt
(https://ieeexplore.ieee.org/abstract/document/4220845.)
Args:
q_tm1: Tensor holding Q-values for first timestep in a batch of
transitions, shape `[B x num_actions]`.
a_tm1: Tensor holding action indices, shape `[B]`.
r_t: Tensor holding rewards, shape `[B]`.
pcont_t: Tensor holding pcontinue values, shape `[B]`.
v_t: Tensor holding state-values for second timestep in a batch of
transitions, shape `[B]`.
name: name to prefix ops created within this op.
Returns:
A namedtuple with fields:
* `loss`: a tensor containing the batch of losses, shape `[B]`.
* `extra`: a namedtuple with fields:
* `target`: batch of target values for `q_tm1[a_tm1]`, shape `[B]`.
* `td_error`: batch of temporal difference errors, shape `[B]`.
"""
# Rank and compatibility checks.
base_ops.wrap_rank_shape_assert(
[[q_tm1], [a_tm1, r_t, pcont_t, v_t]], [2, 1], name)
# QV op.
with tf.name_scope(name, values=[q_tm1, a_tm1, r_t, pcont_t, v_t]):
# Build target and select head to update.
with tf.name_scope("target"):
target = tf.stop_gradient(r_t + pcont_t * v_t)
qa_tm1 = indexing_ops.batched_index(q_tm1, a_tm1)
# Temporal difference error and loss.
# Loss is MSE scaled by 0.5, so the gradient is equal to the TD error.
td_error = target - qa_tm1
loss = 0.5 * tf.square(td_error)
return base_ops.LossOutput(loss, QExtra(target, td_error))
| trfl-master | trfl/action_value_ops.py |
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tensorflow ops for updating target networks.
Tensorflow ops that are used to update a target network from a source network.
This is used in agents such as DQN or DPG, which use a target network that
changes more slowly than the online network, in order to improve stability.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import tensorflow.compat.v1 as tf
from trfl import periodic_ops
def update_target_variables(target_variables,
source_variables,
tau=1.0,
use_locking=False,
name="update_target_variables"):
"""Returns an op to update a list of target variables from source variables.
The update rule is:
`target_variable = (1 - tau) * target_variable + tau * source_variable`.
Args:
target_variables: a list of the variables to be updated.
source_variables: a list of the variables used for the update.
tau: weight used to gate the update. The permitted range is 0 < tau <= 1,
with small tau representing an incremental update, and tau == 1
representing a full update (that is, a straight copy).
use_locking: use `tf.Variable.assign`'s locking option when assigning
source variable values to target variables.
name: sets the `name_scope` for this op.
Raises:
TypeError: when tau is not a Python float
ValueError: when tau is out of range, or the source and target variables
have different numbers or shapes.
Returns:
An op that executes all the variable updates.
"""
if not isinstance(tau, float) and not tf.is_tensor(tau):
raise TypeError("Tau has wrong type (should be float) {}".format(tau))
if not tf.is_tensor(tau) and not 0.0 < tau <= 1.0:
raise ValueError("Invalid parameter tau {}".format(tau))
if len(target_variables) != len(source_variables):
raise ValueError("Number of target variables {} is not the same as "
"number of source variables {}".format(
len(target_variables), len(source_variables)))
same_shape = all(trg.get_shape() == src.get_shape()
for trg, src in zip(target_variables, source_variables))
if not same_shape:
raise ValueError("Target variables don't have the same shape as source "
"variables.")
def update_op(target_variable, source_variable, tau):
if tau == 1.0:
return target_variable.assign(source_variable, use_locking)
else:
return target_variable.assign(
tau * source_variable + (1.0 - tau) * target_variable, use_locking)
with tf.name_scope(name, values=target_variables + source_variables):
update_ops = [update_op(target_var, source_var, tau)
for target_var, source_var
in zip(target_variables, source_variables)]
return tf.group(name="update_all_variables", *update_ops)
def periodic_target_update(target_variables,
source_variables,
update_period,
tau=1.0,
use_locking=False,
counter=None,
name="periodic_target_update"):
"""Returns an op to periodically update a list of target variables.
The `update_target_variables` op is executed every `update_period`
executions of the `periodic_target_update` op.
The update rule is:
`target_variable = (1 - tau) * target_variable + tau * source_variable`.
Args:
target_variables: a list of the variables to be updated.
source_variables: a list of the variables used for the update.
update_period: inverse frequency with which to apply the update.
tau: weight used to gate the update. The permitted range is 0 < tau <= 1,
with small tau representing an incremental update, and tau == 1
representing a full update (that is, a straight copy).
use_locking: use `tf.variable.Assign`'s locking option when assigning
source variable values to target variables.
counter: an optional tensorflow variable to use as a counter relative to
`update_period`, which be passed to `periodic_ops.periodically`. See
description in `periodic_ops.periodically` for details.
name: sets the `name_scope` for this op.
Returns:
An op that periodically updates `target_variables` with `source_variables`.
"""
def update_op():
return update_target_variables(
target_variables, source_variables, tau, use_locking)
with tf.name_scope(name, values=target_variables + source_variables):
return periodic_ops.periodically(update_op, update_period, counter=counter)
| trfl-master | trfl/target_update_ops.py |
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for dpg_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf
from trfl import dpg_ops
class DpgTest(tf.test.TestCase):
"""Tests for DpgLearning.
"""
def setUp(self):
"""Sets up test scenario.
a_tm1_max = s_tm1 * w_s + b_s
q_tm1_max = a_tm1_max * w + b
"""
super(DpgTest, self).setUp()
self.s_tm1 = tf.constant([[0, 1, 0], [1, 1, 2]], dtype=tf.float32)
self.w_s = tf.Variable(tf.random_normal([3, 2]), dtype=tf.float32)
self.b_s = tf.Variable(tf.zeros([2]), dtype=tf.float32)
self.a_tm1_max = tf.matmul(self.s_tm1, self.w_s) + self.b_s
self.w = tf.Variable(tf.random_normal([2, 1]), dtype=tf.float32)
self.b = tf.Variable(tf.zeros([1]), dtype=tf.float32)
self.q_tm1_max = tf.matmul(self.a_tm1_max, self.w) + self.b
self.loss, self.dpg_extra = dpg_ops.dpg(self.q_tm1_max, self.a_tm1_max)
self.batch_size = self.a_tm1_max.get_shape()[0]
def testDpgNoGradient(self):
"""Test case: q_tm1_max does not depend on a_tm1_max => exception raised.
"""
with self.test_session():
a_tm1_max = tf.constant([[0, 1, 0], [1, 1, 2]])
q_tm1_max = tf.constant([[1], [0]])
self.assertRaises(ValueError, dpg_ops.dpg, q_tm1_max, a_tm1_max)
def testDpgDqda(self):
"""Tests the gradient qd/qa produced by the DPGLearner is correct."""
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
value_grad = np.transpose(self.w.eval())[0]
for i in range(int(self.batch_size)):
self.assertAllClose(self.dpg_extra.dqda.eval()[i], value_grad)
def testDpgGradient(self):
"""Gradient of loss w.r.t. actor network parameter w_s is correct."""
with self.test_session() as sess:
weight_gradient = tf.gradients(self.loss, self.w_s)
sess.run(tf.global_variables_initializer())
value_dpg_gradient, value_s_tm1, value_w = sess.run(
[weight_gradient[0], self.s_tm1, self.w])
true_grad = self.calculateTrueGradient(value_w, value_s_tm1)
self.assertAllClose(value_dpg_gradient, true_grad)
def testDpgNoOtherGradients(self):
"""No gradient of loss w.r.t. parameters other than that of actor network.
"""
with self.test_session():
gradients = tf.gradients([self.loss], [self.q_tm1_max, self.w, self.b])
self.assertListEqual(gradients, [None] * len(gradients))
def testDpgDqdaClippingError(self):
self.assertRaises(
ValueError, dpg_ops.dpg,
self.q_tm1_max, self.a_tm1_max, dqda_clipping=-10)
def testDpgGradientClipping(self):
"""Tests the gradient qd/qa are clipped."""
_, dpg_extra = dpg_ops.dpg(
self.q_tm1_max, self.a_tm1_max, dqda_clipping=0.01)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
value_grad = np.transpose(self.w.eval())[0]
for i in range(int(self.batch_size)):
self.assertAllClose(dpg_extra.dqda.eval()[i],
np.clip(value_grad, -0.01, 0.01))
self.assertTrue(np.greater(np.absolute(value_grad), 0.01).any())
def testDpgGradientNormClipping(self):
"""Tests the gradient qd/qa are clipped using norm clipping."""
_, dpg_extra = dpg_ops.dpg(
self.q_tm1_max, self.a_tm1_max, dqda_clipping=0.01, clip_norm=True)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(int(self.batch_size)):
self.assertAllClose(np.linalg.norm(dpg_extra.dqda.eval()[i]), 0.01)
def testLossShape(self):
self.assertEqual(self.loss.shape.as_list(), [self.batch_size])
def calculateTrueGradient(self, value_w, value_s_tm1):
"""Calculates the true gradient over the batch.
sum_k dq/dw_s = sum_k dq/da * da/dw_s
= w * sum_k da/dw_s
Args:
value_w: numpy.ndarray containing weights of the linear layer.
value_s_tm1: state representation.
Returns:
The true_gradient of the test case.
"""
dadws = np.zeros((value_w.shape[0],
np.product(self.w_s.get_shape().as_list())))
for i in range(self.batch_size):
dadws += np.vstack((np.hstack((value_s_tm1[i], np.zeros(3))),
np.hstack((np.zeros(3), value_s_tm1[i]))))
true_grad = np.dot(np.transpose(value_w), dadws)
true_grad = -np.transpose(np.reshape(
true_grad, self.w_s.get_shape().as_list()[::-1]))
return true_grad
if __name__ == '__main__':
tf.test.main()
| trfl-master | trfl/dpg_ops_test.py |
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Periodic execution ops.
It is very common in Reinforcement Learning for certain ops to only need to be
executed periodically, for example: once every N agent steps. The ops below
support this common use-case by wrapping a subgraph as a periodic op that only
actually executes the underlying computation once every N evaluations of the op,
behaving as a no-op in all other calls.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf
def periodically(body, period, counter=None, name="periodically"):
"""Periodically performs a tensorflow op.
The body tensorflow op will be executed every `period` times the periodically
op is executed. More specifically, with `n` the number of times the op has
been executed, the body will be executed when `n` is a non zero positive
multiple of `period` (i.e. there exist an integer `k > 0` such that
`k * period == n`).
If `period` is 0 or `None`, it would not perform any op and would return a
`tf.no_op()`.
Args:
body: callable that returns the tensorflow op to be performed every time
an internal counter is divisible by the period. The op must have no
output (for example, a tf.group()).
period: inverse frequency with which to perform the op.
counter: an optional tensorflow variable to use as a counter relative to the
period. It will be incremented per call and reset to 1 in every update. In
order to ensure that `body` is run in the first count, initialize the
counter at a value bigger than `period`. If not given, an internal counter
will be created in the graph. (not that this is incompatible with
Tensorflow 2 behavior)
name: name of the variable_scope.
Raises:
TypeError: if body is not a callable.
ValueError: if period is negative.
Returns:
An op that periodically performs the specified op.
"""
if not callable(body):
raise TypeError("body must be callable.")
if period is None:
return tf.no_op()
elif isinstance(period, (int, float)):
if period == 0:
return tf.no_op()
if period < 0:
raise ValueError("period cannot be less than 0.")
if period == 1:
return body()
if counter is None:
with tf.variable_scope(None, default_name=name):
counter = tf.get_variable(
"counter",
shape=[],
dtype=tf.int64,
trainable=False,
initializer=tf.constant_initializer(
np.iinfo(np.int64).max, dtype=tf.int64))
def _wrapped_body():
with tf.control_dependencies([body()]):
# Done the deed, resets the counter.
return counter.assign(1)
update = tf.cond(
tf.math.greater_equal(counter, tf.to_int64(period)),
_wrapped_body, lambda: counter.assign_add(1))
return update
| trfl-master | trfl/periodic_ops.py |
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Flattened namespace for trfl."""
from trfl.action_value_ops import double_qlearning
from trfl.action_value_ops import persistent_qlearning
from trfl.action_value_ops import qlambda
from trfl.action_value_ops import qlearning
from trfl.action_value_ops import qv_learning
from trfl.action_value_ops import sarsa
from trfl.action_value_ops import sarsa_lambda
from trfl.action_value_ops import sarse
from trfl.base_ops import assert_rank_and_shape_compatibility
from trfl.base_ops import best_effort_shape
from trfl.clipping_ops import huber_loss
from trfl.continuous_retrace_ops import retrace_from_action_log_probs
from trfl.continuous_retrace_ops import retrace_from_importance_weights
from trfl.discrete_policy_gradient_ops import discrete_policy_entropy_loss
from trfl.discrete_policy_gradient_ops import discrete_policy_gradient
from trfl.discrete_policy_gradient_ops import discrete_policy_gradient_loss
from trfl.discrete_policy_gradient_ops import sequence_advantage_actor_critic_loss
from trfl.dist_value_ops import categorical_dist_double_qlearning
from trfl.dist_value_ops import categorical_dist_qlearning
from trfl.dist_value_ops import categorical_dist_td_learning
from trfl.dpg_ops import dpg
from trfl.indexing_ops import batched_index
from trfl.periodic_ops import periodically
from trfl.pixel_control_ops import pixel_control_loss
from trfl.pixel_control_ops import pixel_control_rewards
from trfl.policy_gradient_ops import policy_entropy_loss
from trfl.policy_gradient_ops import policy_gradient
from trfl.policy_gradient_ops import policy_gradient_loss
from trfl.policy_gradient_ops import sequence_a2c_loss
from trfl.policy_ops import epsilon_greedy
from trfl.retrace_ops import retrace
from trfl.retrace_ops import retrace_core
from trfl.sequence_ops import multistep_forward_view
from trfl.sequence_ops import scan_discounted_sum
from trfl.target_update_ops import periodic_target_update
from trfl.target_update_ops import update_target_variables
from trfl.value_ops import generalized_lambda_returns
from trfl.value_ops import qv_max
from trfl.value_ops import td_lambda
from trfl.value_ops import td_learning
from trfl.vtrace_ops import vtrace_from_importance_weights
from trfl.vtrace_ops import vtrace_from_logits
__version__ = '1.2.0'
| trfl-master | trfl/__init__.py |
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TensorFlow ops for various distribution projection operations.
All ops support multidimensional tensors. All dimensions except for the last
one can be considered as batch dimensions. They are processed in parallel
and are fully independent. The last dimension represents the number of bins.
The op supports broadcasting across all dimensions except for the last one.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
def l2_project(z_p, p, z_q):
"""Projects distribution (z_p, p) onto support z_q under L2-metric over CDFs.
The supports z_p and z_q are specified as tensors of distinct atoms (given
in ascending order).
Let Kq be len(z_q) and Kp be len(z_p). This projection works for any
support z_q, in particular Kq need not be equal to Kp.
Args:
z_p: Tensor holding support of distribution p, shape `[batch_size, Kp]`.
p: Tensor holding probability values p(z_p[i]), shape `[batch_size, Kp]`.
z_q: Tensor holding support to project onto, shape `[Kq]`.
Returns:
Projection of (z_p, p) onto support z_q under Cramer distance.
"""
# Broadcasting of tensors is used extensively in the code below. To avoid
# accidental broadcasting along unintended dimensions, tensors are defensively
# reshaped to have equal number of dimensions (3) throughout and intended
# shapes are indicated alongside tensor definitions. To reduce verbosity,
# extra dimensions of size 1 are inserted by indexing with `None` instead of
# `tf.expand_dims()` (e.g., `x[:, None, :]` reshapes a tensor of shape
# `[k, l]' to one of shape `[k, 1, l]`).
# Extract vmin and vmax and construct helper tensors from z_q
vmin, vmax = z_q[0], z_q[-1]
d_pos = tf.concat([z_q, vmin[None]], 0)[1:] # 1 x Kq x 1
d_neg = tf.concat([vmax[None], z_q], 0)[:-1] # 1 x Kq x 1
# Clip z_p to be in new support range (vmin, vmax).
z_p = tf.clip_by_value(z_p, vmin, vmax)[:, None, :] # B x 1 x Kp
# Get the distance between atom values in support.
d_pos = (d_pos - z_q)[None, :, None] # z_q[i+1] - z_q[i]. 1 x B x 1
d_neg = (z_q - d_neg)[None, :, None] # z_q[i] - z_q[i-1]. 1 x B x 1
z_q = z_q[None, :, None] # 1 x Kq x 1
# Ensure that we do not divide by zero, in case of atoms of identical value.
d_neg = tf.where(d_neg > 0, 1./d_neg, tf.zeros_like(d_neg)) # 1 x Kq x 1
d_pos = tf.where(d_pos > 0, 1./d_pos, tf.zeros_like(d_pos)) # 1 x Kq x 1
delta_qp = z_p - z_q # clip(z_p)[j] - z_q[i]. B x Kq x Kp
d_sign = tf.cast(delta_qp >= 0., dtype=p.dtype) # B x Kq x Kp
# Matrix of entries sgn(a_ij) * |a_ij|, with a_ij = clip(z_p)[j] - z_q[i].
# Shape B x Kq x Kp.
delta_hat = (d_sign * delta_qp * d_pos) - ((1. - d_sign) * delta_qp * d_neg)
p = p[:, None, :] # B x 1 x Kp.
return tf.reduce_sum(tf.clip_by_value(1. - delta_hat, 0., 1.) * p, 2)
def factorised_kl_gaussian(dist1_mean,
dist1_covariance_or_scale,
dist2_mean,
dist2_covariance_or_scale,
both_diagonal=False):
"""Compute the KL divergence KL(dist1, dist2) between two Gaussians.
The KL is factorised into two terms - `kl_mean` and `kl_cov`. This
factorisation is specific to multivariate gaussian distributions and arises
from its analytic form.
Specifically, if we assume two multivariate Gaussian distributions with rank
k and means, M1 and M2 and variance S1 and S2, the analytic KL can be written
out as:
D_KL(N0 || N1) = 0.5 * (tr(inv(S1) * S0) + ln(det(S1)/det(S0)) - k +
(M1 - M0).T * inv(S1) * (M1 - M0))
The terms on the first row correspond to the covariance factor and the terms
on the second row correspond to the mean factor in the factorized KL.
These terms can thus be used to independently control how much the mean and
covariance between the two gaussians can vary.
This implementation ensures that gradient flow is equivalent to calling
`tfp.distributions.kl_divergence` once.
More details on the equation can be found here:
https://stats.stackexchange.com/questions/60680/kl-divergence-between-two-multivariate-gaussians
Args:
dist1_mean: The mean of the first Multivariate Gaussian distribution.
dist1_covariance_or_scale: The covariance or scale of the first Multivariate
Gaussian distribution. In cases where *both* distributions are Gaussians
with diagonal covariance matrices (for instance, if both are instances of
`tfp.distributions.MultivariateNormalDiag`), then the `scale` can be
passed in instead and the `both_diagonal` flag must be set to `True`.
A more efficient sparse computation path is used in this case. For all
other cases, the full covariance matrix must be passed in.
dist2_mean: The mean of the second Multivariate Gaussian distribution.
dist2_covariance_or_scale: The covariance or scale tensor of the second
Multivariate Gaussian distribution, as for `dist1_covariance_or_scale`.
both_diagonal: A `bool` indicating that both dist1 and dist2 are diagonal
matrices. A more efficient sparse computation is used in this case.
Returns:
A tuple consisting of (`kl_mean`, `kl_cov`) which correspond to the mean and
the covariance factorisation of the KL.
"""
if both_diagonal:
dist1_mean_rank = dist1_mean.get_shape().ndims
dist1_covariance_or_scale.get_shape().assert_has_rank(dist1_mean_rank)
dist2_mean_rank = dist2_mean.get_shape().ndims
dist2_covariance_or_scale.get_shape().assert_has_rank(dist2_mean_rank)
dist_type = tfp.distributions.MultivariateNormalDiag
else:
dist_type = tfp.distributions.MultivariateNormalFullCovariance
# Recreate the distributions but with stop gradients on the mean and cov.
dist1_stop_grad_mean = dist_type(
tf.stop_gradient(dist1_mean), dist1_covariance_or_scale)
dist2 = dist_type(dist2_mean, dist2_covariance_or_scale)
# Now create a third distribution with the mean of dist1 and the variance of
# dist2 and appropriate stop_gradients.
dist3 = dist_type(dist1_mean, dist2_covariance_or_scale)
dist3_stop_grad_mean = dist_type(
tf.stop_gradient(dist1_mean), dist2_covariance_or_scale)
# Finally get the two components of the KL between dist1 and dist2
# using dist3
kl_mean = tfp.distributions.kl_divergence(dist3, dist2)
kl_cov = tfp.distributions.kl_divergence(dist1_stop_grad_mean,
dist3_stop_grad_mean)
return kl_mean, kl_cov
| trfl-master | trfl/distribution_ops.py |
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for value_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import parameterized
import tensorflow.compat.v1 as tf
import tree as nest
from trfl import value_ops
class TDLearningTest(tf.test.TestCase):
"""Tests for ValueLearning."""
def setUp(self):
super(TDLearningTest, self).setUp()
self.v_tm1 = tf.constant([1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=tf.float32)
self.v_t = tf.constant([0, 0, 0, 1, 1, 1, 2, 2, 2], dtype=tf.float32)
self.pcont_t = tf.constant(
[0, 0.5, 1, 0, 0.5, 1, 0, 0.5, 1], dtype=tf.float32)
self.r_t = tf.constant(
[-1, -1, -1, -1, -1, -1, -1, -1, -1], dtype=tf.float32)
self.value_learning = value_ops.td_learning(
self.v_tm1, self.r_t, self.pcont_t, self.v_t)
def testRankCheck(self):
v_tm1 = tf.placeholder(tf.float32, [None, None])
with self.assertRaisesRegexp(
ValueError, 'TDLearning: Error in rank and/or compatibility check'):
self.value_learning = value_ops.td_learning(
v_tm1, self.r_t, self.pcont_t, self.v_t)
def testCompatibilityCheck(self):
pcont_t = tf.placeholder(tf.float32, [8])
with self.assertRaisesRegexp(
ValueError, 'TDLearning: Error in rank and/or compatibility check'):
self.value_learning = value_ops.td_learning(
self.v_tm1, self.r_t, pcont_t, self.v_t)
def testTarget(self):
"""Tests that target value == r_t + pcont_t * v_t."""
with self.test_session() as sess:
self.assertAllClose(
sess.run(self.value_learning.extra.target),
[-1, -1, -1, -1, -0.5, 0, -1, 0, 1])
def testTDError(self):
"""Tests that td_error == target_value - v_tm1."""
with self.test_session() as sess:
self.assertAllClose(
sess.run(self.value_learning.extra.td_error),
[-2, -2, -2, -2, -1.5, -1, -2, -1, 0])
def testLoss(self):
"""Tests that loss == 0.5 * td_error^2."""
with self.test_session() as sess:
# Loss is 0.5 * td_error^2
self.assertAllClose(
sess.run(self.value_learning.loss),
[2, 2, 2, 2, 1.125, 0.5, 2, 0.5, 0])
def testGradVtm1(self):
"""Tests that the gradients of negative loss are equal to the td_error."""
with self.test_session() as sess:
# Take gradients of the negative loss, so that the tests here check the
# values propagated during gradient _descent_, rather than _ascent_.
gradients = tf.gradients([-self.value_learning.loss], [self.v_tm1])
grad_v_tm1 = sess.run(gradients[0])
self.assertAllClose(grad_v_tm1, [-2, -2, -2, -2, -1.5, -1, -2, -1, 0])
def testNoOtherGradients(self):
"""Tests no gradient propagates through things other than v_tm1."""
# Gradients are only defined for v_tm1, not any other input.
gradients = tf.gradients([self.value_learning.loss],
[self.v_t, self.r_t, self.pcont_t])
self.assertEqual(gradients, [None] * len(gradients))
class TDLambdaTest(parameterized.TestCase, tf.test.TestCase):
def _setUp_td_loss(self, gae_lambda=1, sequence_length=4, batch_size=2):
t, b = sequence_length, batch_size
self._state_values = tf.placeholder(tf.float32, shape=(t, b))
self._rewards = tf.placeholder(tf.float32, shape=(t, b))
self._pcontinues = tf.placeholder(tf.float32, shape=(t, b))
self._bootstrap_value = tf.placeholder(tf.float32, shape=(b,))
loss, (td, discounted_returns) = value_ops.td_lambda(
state_values=self._state_values,
rewards=self._rewards,
pcontinues=self._pcontinues,
bootstrap_value=self._bootstrap_value,
lambda_=gae_lambda)
self._loss = loss
self._temporal_differences = td
self._discounted_returns = discounted_returns
@parameterized.parameters(
(1,),
(0.9,),)
def testShapeInference(self, gae_lambda):
sequence_length = 4
batch_size = 2
self._setUp_td_loss(
gae_lambda, sequence_length=sequence_length, batch_size=batch_size)
sequence_batch_shape = tf.TensorShape([sequence_length, batch_size])
batch_shape = tf.TensorShape(batch_size)
self.assertEqual(self._discounted_returns.get_shape(), sequence_batch_shape)
self.assertEqual(self._temporal_differences.get_shape(),
sequence_batch_shape)
self.assertEqual(self._loss.get_shape(), batch_shape)
@parameterized.named_parameters(
('Length', None, 4),
('Batch', 5, None),
('BatchAndLength', None, None),)
def testShapeInferenceDynamic(self, sequence_length, batch_size):
self._setUp_td_loss(
sequence_length=sequence_length, batch_size=batch_size, gae_lambda=1.)
t, b = sequence_length, batch_size
self.assertEqual(self._discounted_returns.get_shape().as_list(), [t, b])
self.assertEqual(self._temporal_differences.get_shape().as_list(), [t, b])
self.assertEqual(self._loss.get_shape().as_list(), [b])
@parameterized.parameters(
(1,),
(0.9,),)
def testInvalidGradients(self, gae_lambda):
self._setUp_td_loss(gae_lambda=gae_lambda)
ins = nest.flatten([self._rewards, self._pcontinues, self._bootstrap_value])
outs = [None] * len(ins)
self.assertAllEqual(tf.gradients(self._loss, ins), outs)
def testGradientsLoss(self):
self._setUp_td_loss()
gradient = tf.gradients(self._loss, self._state_values)[0]
self.assertEqual(gradient.get_shape(), self._state_values.get_shape())
class GeneralizedLambdaReturnsTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(0.25, 0.5, 1)
def testGeneralizedLambdaReturns(self, lambda_):
"""Tests the module-level function generalized_lambda_returns."""
# Sequence length 2, batch size 1.
state_values = tf.constant([[0.2], [0.3]], dtype=tf.float32)
rewards = tf.constant([[0.4], [0.5]], dtype=tf.float32)
pcontinues = tf.constant([[0.9], [0.8]], dtype=tf.float32)
bootstrap_value = tf.constant([0.1], dtype=tf.float32)
discounted_returns = value_ops.generalized_lambda_returns(
rewards, pcontinues, state_values, bootstrap_value, lambda_)
# Manually calculate the discounted returns.
return1 = 0.5 + 0.8 * 0.1
return0 = 0.4 + 0.9 * (lambda_ * return1 + (1 - lambda_) * 0.3)
with self.test_session() as sess:
self.assertAllClose(sess.run(discounted_returns), [[return0], [return1]])
class QVMAXTest(tf.test.TestCase):
"""Tests for the QVMAX loss."""
def setUp(self):
super(QVMAXTest, self).setUp()
self.v_tm1 = tf.constant([1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=tf.float32)
self.pcont_t = tf.constant(
[0, 0.5, 1, 0, 0.5, 1, 0, 0.5, 1], dtype=tf.float32)
self.r_t = tf.constant(
[-1, -1, -1, -1, -1, -1, -1, -1, -1], dtype=tf.float32)
self.q_t = tf.constant(
[[0, -1], [-2, 0], [0, -3], [1, 0], [1, 1],
[0, 1], [1, 2], [2, -2], [2, 2]], dtype=tf.float32)
self.loss_op, self.extra_ops = value_ops.qv_max(
self.v_tm1, self.r_t, self.pcont_t, self.q_t)
def testRankCheck(self):
v_tm1 = tf.placeholder(tf.float32, [None, None])
with self.assertRaisesRegexp(
ValueError, 'QVMAX: Error in rank and/or compatibility check'):
value_ops.qv_max(v_tm1, self.r_t, self.pcont_t, self.q_t)
def testCompatibilityCheck(self):
pcont_t = tf.placeholder(tf.float32, [8])
with self.assertRaisesRegexp(
ValueError, 'QVMAX: Error in rank and/or compatibility check'):
value_ops.qv_max(self.v_tm1, self.r_t, pcont_t, self.q_t)
def testTarget(self):
"""Tests that target value == r_t + pcont_t * max q_t."""
with self.test_session() as sess:
self.assertAllClose(
sess.run(self.extra_ops.target),
[-1, -1, -1, -1, -0.5, 0, -1, 0, 1])
def testTDError(self):
"""Tests that td_error == target_value - v_tm1."""
with self.test_session() as sess:
self.assertAllClose(
sess.run(self.extra_ops.td_error),
[-2, -2, -2, -2, -1.5, -1, -2, -1, 0])
def testLoss(self):
"""Tests that loss == 0.5 * td_error^2."""
with self.test_session() as sess:
# Loss is 0.5 * td_error^2
self.assertAllClose(
sess.run(self.loss_op),
[2, 2, 2, 2, 1.125, 0.5, 2, 0.5, 0])
def testGradVtm1(self):
"""Tests that the gradients of negative loss are equal to the td_error."""
with self.test_session() as sess:
# Take gradients of the negative loss, so that the tests here check the
# values propagated during gradient _descent_, rather than _ascent_.
gradients = tf.gradients([-self.loss_op], [self.v_tm1])
grad_v_tm1 = sess.run(gradients[0])
self.assertAllClose(grad_v_tm1, [-2, -2, -2, -2, -1.5, -1, -2, -1, 0])
def testNoOtherGradients(self):
"""Tests no gradient propagates through things other than v_tm1."""
# Gradients are only defined for v_tm1, not any other input.
gradients = tf.gradients([self.loss_op],
[self.q_t, self.r_t, self.pcont_t])
self.assertEqual(gradients, [None] * len(gradients))
if __name__ == '__main__':
tf.test.main()
| trfl-master | trfl/value_ops_test.py |
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Ops for computing v-trace learning targets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Dependency imports
import tensorflow.compat.v1 as tf
VTraceFromLogitsReturns = collections.namedtuple(
'VTraceFromLogitsReturns',
['vs', 'pg_advantages', 'log_rhos',
'behaviour_action_log_probs', 'target_action_log_probs'])
VTraceReturns = collections.namedtuple('VTraceReturns', 'vs pg_advantages')
def log_probs_from_logits_and_actions(policy_logits, actions):
"""Computes action log-probs from policy logits and actions.
In the notation used throughout documentation and comments, T refers to the
time dimension ranging from 0 to T-1. B refers to the batch size and
NUM_ACTIONS refers to the number of actions.
Args:
policy_logits: A float32 tensor of shape `[T, B, NUM_ACTIONS]` with
un-normalized log-probabilities parameterizing a softmax policy.
actions: An int32 tensor of shape `[T, B]` with actions.
Returns:
A float32 tensor of shape `[T, B]` corresponding to the sampling log
probability of the chosen action w.r.t. the policy.
"""
policy_logits = tf.convert_to_tensor(policy_logits, dtype=tf.float32)
actions = tf.convert_to_tensor(actions, dtype=tf.int32)
policy_logits.shape.assert_has_rank(3)
actions.shape.assert_has_rank(2)
return -tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=policy_logits, labels=actions)
def vtrace_from_logits(
behaviour_policy_logits, target_policy_logits, actions,
discounts, rewards, values, bootstrap_value,
clip_rho_threshold=1.0, clip_pg_rho_threshold=1.0,
name='vtrace_from_logits'):
r"""V-trace for softmax policies.
Calculates V-trace actor critic targets for softmax polices as described in
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
Target policy refers to the policy we are interested in improving and
behaviour policy refers to the policy that generated the given
rewards and actions.
In the notation used throughout documentation and comments, `T` refers to the
time dimension ranging from `0` to `T-1`. `B` refers to the batch size and
`NUM_ACTIONS` refers to the number of actions.
Args:
behaviour_policy_logits: A float32 tensor of shape `[T, B, NUM_ACTIONS]`
with un-normalized log-probabilities parametrizing the softmax behaviour
policy.
target_policy_logits: A float32 tensor of shape `[T, B, NUM_ACTIONS]` with
un-normalized log-probabilities parametrizing the softmax target policy.
actions: An int32 tensor of shape `[T, B]` of actions sampled from the
behaviour policy.
discounts: A float32 tensor of shape `[T, B]` with the discount encountered
when following the behaviour policy.
rewards: A float32 tensor of shape `[T, B]` with the rewards generated by
following the behaviour policy.
values: A float32 tensor of shape `[T, B]` with the value function estimates
wrt. the target policy.
bootstrap_value: A float32 of shape `[B]` with the value function estimate
at time T.
clip_rho_threshold: A scalar float32 tensor with the clipping threshold for
importance weights (rho) when calculating the baseline targets (vs).
rho^bar in the paper.
clip_pg_rho_threshold: A scalar float32 tensor with the clipping threshold
on rho_s in \rho_s \delta log \pi(a|x) (r + \gamma v_{s+1} - V(x_s)).
name: The name scope that all V-trace operations will be created in.
Returns:
A `VTraceFromLogitsReturns` namedtuple with the following fields:
vs: A float32 tensor of shape `[T, B]`. Can be used as target to train a
baseline (V(x_t) - vs_t)^2.
pg_advantages: A float 32 tensor of shape `[T, B]`. Can be used as an
estimate of the advantage in the calculation of policy gradients.
log_rhos: A float32 tensor of shape `[T, B]` containing the log importance
sampling weights (log rhos).
behaviour_action_log_probs: A float32 tensor of shape `[T, B]` containing
behaviour policy action log probabilities (log \mu(a_t)).
target_action_log_probs: A float32 tensor of shape `[T, B]` containing
target policy action probabilities (log \pi(a_t)).
"""
behaviour_policy_logits = tf.convert_to_tensor(
behaviour_policy_logits, dtype=tf.float32)
target_policy_logits = tf.convert_to_tensor(
target_policy_logits, dtype=tf.float32)
actions = tf.convert_to_tensor(actions, dtype=tf.int32)
# Make sure tensor ranks are as expected.
# The rest will be checked by from_action_log_probs.
behaviour_policy_logits.shape.assert_has_rank(3)
target_policy_logits.shape.assert_has_rank(3)
actions.shape.assert_has_rank(2)
with tf.name_scope(name, values=[
behaviour_policy_logits, target_policy_logits, actions,
discounts, rewards, values, bootstrap_value]):
target_action_log_probs = log_probs_from_logits_and_actions(
target_policy_logits, actions)
behaviour_action_log_probs = log_probs_from_logits_and_actions(
behaviour_policy_logits, actions)
log_rhos = target_action_log_probs - behaviour_action_log_probs
vtrace_returns = vtrace_from_importance_weights(
log_rhos=log_rhos,
discounts=discounts,
rewards=rewards,
values=values,
bootstrap_value=bootstrap_value,
clip_rho_threshold=clip_rho_threshold,
clip_pg_rho_threshold=clip_pg_rho_threshold)
return VTraceFromLogitsReturns(
log_rhos=log_rhos,
behaviour_action_log_probs=behaviour_action_log_probs,
target_action_log_probs=target_action_log_probs,
**vtrace_returns._asdict()
)
def vtrace_from_importance_weights(
log_rhos, discounts, rewards, values, bootstrap_value,
clip_rho_threshold=1.0, clip_pg_rho_threshold=1.0,
name='vtrace_from_importance_weights'):
r"""V-trace from log importance weights.
Calculates V-trace actor critic targets as described in
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
In the notation used throughout documentation and comments, T refers to the
time dimension ranging from 0 to T-1. B refers to the batch size. This code
also supports the case where all tensors have the same number of additional
dimensions, e.g., `rewards` is `[T, B, C]`, `values` is `[T, B, C]`,
`bootstrap_value` is `[B, C]`.
Args:
log_rhos: A float32 tensor of shape `[T, B]` representing the
log importance sampling weights, i.e.
log(target_policy(a) / behaviour_policy(a)). V-trace performs operations
on rhos in log-space for numerical stability.
discounts: A float32 tensor of shape `[T, B]` with discounts encountered
when following the behaviour policy.
rewards: A float32 tensor of shape `[T, B]` containing rewards generated by
following the behaviour policy.
values: A float32 tensor of shape `[T, B]` with the value function estimates
wrt. the target policy.
bootstrap_value: A float32 of shape `[B]` with the value function estimate
at time T.
clip_rho_threshold: A scalar float32 tensor with the clipping threshold for
importance weights (rho) when calculating the baseline targets (vs).
rho^bar in the paper. If None, no clipping is applied.
clip_pg_rho_threshold: A scalar float32 tensor with the clipping threshold
on rho_s in \rho_s \delta log \pi(a|x) (r + \gamma v_{s+1} - V(x_s)). If
None, no clipping is applied.
name: The name scope that all V-trace operations will be created in.
Returns:
A VTraceReturns namedtuple (vs, pg_advantages) where:
vs: A float32 tensor of shape `[T, B]`. Can be used as target to
train a baseline (V(x_t) - vs_t)^2.
pg_advantages: A float32 tensor of shape `[T, B]`. Can be used as the
advantage in the calculation of policy gradients.
"""
log_rhos = tf.convert_to_tensor(log_rhos, dtype=tf.float32)
discounts = tf.convert_to_tensor(discounts, dtype=tf.float32)
rewards = tf.convert_to_tensor(rewards, dtype=tf.float32)
values = tf.convert_to_tensor(values, dtype=tf.float32)
bootstrap_value = tf.convert_to_tensor(bootstrap_value, dtype=tf.float32)
if clip_rho_threshold is not None:
clip_rho_threshold = tf.convert_to_tensor(clip_rho_threshold,
dtype=tf.float32)
if clip_pg_rho_threshold is not None:
clip_pg_rho_threshold = tf.convert_to_tensor(clip_pg_rho_threshold,
dtype=tf.float32)
# Make sure tensor ranks are consistent.
rho_rank = log_rhos.shape.ndims # Usually 2.
values.shape.assert_has_rank(rho_rank)
bootstrap_value.shape.assert_has_rank(rho_rank - 1)
discounts.shape.assert_has_rank(rho_rank)
rewards.shape.assert_has_rank(rho_rank)
if clip_rho_threshold is not None:
clip_rho_threshold.shape.assert_has_rank(0)
if clip_pg_rho_threshold is not None:
clip_pg_rho_threshold.shape.assert_has_rank(0)
with tf.name_scope(name, values=[
log_rhos, discounts, rewards, values, bootstrap_value]):
rhos = tf.exp(log_rhos)
if clip_rho_threshold is not None:
clipped_rhos = tf.minimum(clip_rho_threshold, rhos, name='clipped_rhos')
else:
clipped_rhos = rhos
cs = tf.minimum(1.0, rhos, name='cs')
# Append bootstrapped value to get [v1, ..., v_t+1]
values_t_plus_1 = tf.concat(
[values[1:], tf.expand_dims(bootstrap_value, 0)], axis=0)
deltas = clipped_rhos * (rewards + discounts * values_t_plus_1 - values)
# Note that all sequences are reversed, computation starts from the back.
sequences = (
tf.reverse(discounts, axis=[0]),
tf.reverse(cs, axis=[0]),
tf.reverse(deltas, axis=[0]),
)
# V-trace vs are calculated through a scan from the back to the beginning
# of the given trajectory.
def scanfunc(acc, sequence_item):
discount_t, c_t, delta_t = sequence_item
return delta_t + discount_t * c_t * acc
initial_values = tf.zeros_like(bootstrap_value)
vs_minus_v_xs = tf.scan(
fn=scanfunc,
elems=sequences,
initializer=initial_values,
parallel_iterations=1,
back_prop=False,
name='scan')
# Reverse the results back to original order.
vs_minus_v_xs = tf.reverse(vs_minus_v_xs, [0], name='vs_minus_v_xs')
# Add V(x_s) to get v_s.
vs = tf.add(vs_minus_v_xs, values, name='vs')
# Advantage for policy gradient.
vs_t_plus_1 = tf.concat([
vs[1:], tf.expand_dims(bootstrap_value, 0)], axis=0)
if clip_pg_rho_threshold is not None:
clipped_pg_rhos = tf.minimum(clip_pg_rho_threshold, rhos,
name='clipped_pg_rhos')
else:
clipped_pg_rhos = rhos
pg_advantages = (
clipped_pg_rhos * (rewards + discounts * vs_t_plus_1 - values))
# Make sure no gradients backpropagated through the returned values.
return VTraceReturns(vs=tf.stop_gradient(vs),
pg_advantages=tf.stop_gradient(pg_advantages))
| trfl-master | trfl/vtrace_ops.py |
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Unit tests for discrete-action Policy Gradient functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import parameterized
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
import tree as nest
from trfl import discrete_policy_gradient_ops as pg_ops
class EntropyCostTest(parameterized.TestCase, tf.test.TestCase):
"""Tests for discrete_policy_entropy op."""
@parameterized.named_parameters(('SingleAction', False),
('MultiActions', True))
def testEntropy(self, is_multi_actions):
with self.test_session() as sess:
# Large values check numerical stability through the logs
policy_logits_np = np.array([[0, 1], [1, 2], [0, 2], [1, 1], [0, -1000],
[0, 1000]])
if is_multi_actions:
num_action_components = 3
policy_logits_nest = [tf.constant(policy_logits_np, dtype=tf.float32)
for _ in xrange(num_action_components)]
else:
num_action_components = 1
policy_logits_nest = tf.constant(policy_logits_np, dtype=tf.float32)
entropy_op = pg_ops.discrete_policy_entropy_loss(policy_logits_nest)
entropy = entropy_op.extra.entropy
self.assertEqual(entropy.get_shape(), tf.TensorShape(6))
# Get these reference values in Torch with:
# c = nnd.EntropyCriterion()
# s = nn.LogSoftMax()
# result = c:forward(s:forward(logits))
expected_entropy = num_action_components * np.array(
[0.58220309, 0.58220309, 0.36533386, 0.69314718, 0, 0])
self.assertAllClose(sess.run(entropy),
expected_entropy,
atol=1e-4)
@parameterized.named_parameters(('SingleAction', False),
('MultiActions', True))
def testGradient(self, is_multi_actions):
with self.test_session() as sess:
policy_logits_np = np.array([[0, 1], [1, 2], [0, 2], [1, 1], [0, -1000],
[0, 1000]])
if is_multi_actions:
num_action_components = 3
policy_logits_nest = [tf.constant(policy_logits_np, dtype=tf.float32)
for _ in xrange(num_action_components)]
else:
num_action_components = 1
policy_logits_nest = tf.constant(policy_logits_np, dtype=tf.float32)
entropy_op = pg_ops.discrete_policy_entropy_loss(policy_logits_nest)
entropy = entropy_op.extra.entropy
# Counterintuitively, the gradient->0 as policy->deterministic, that's why
# the gradients for the large logit cases are `[0, 0]`. They should
# strictly be >0, but they get truncated when we run out of precision.
expected_gradients = np.array([[0.1966119, -0.1966119],
[0.1966119, -0.1966119],
[0.2099872, -0.2099872],
[0, 0],
[0, 0],
[0, 0]])
for policy_logits in nest.flatten(policy_logits_nest):
gradients = tf.gradients(entropy, policy_logits)
grad_policy_logits = sess.run(gradients[0])
self.assertAllClose(grad_policy_logits,
expected_gradients,
atol=1e-4)
@parameterized.named_parameters(('TwoActions', 2),
('FiveActions', 5),
('TenActions', 10),
('MixedMultiActions', [2, 5, 10]))
def testNormalisation(self, num_actions):
with self.test_session() as sess:
if isinstance(num_actions, list):
policy_logits = [tf.constant([[1.0] * n], dtype=tf.float32)
for n in num_actions]
else:
policy_logits = tf.constant(
[[1.0] * num_actions], dtype=tf.float32)
entropy_op = pg_ops.discrete_policy_entropy_loss(
policy_logits, normalise=True)
self.assertAllClose(sess.run(entropy_op.loss), [-1.0])
@parameterized.named_parameters(
('Fixed', 5, 4, 3, False),
('DynamicLength', None, 4, 3, False),
('DynamicBatch', 5, None, 3, False),
('DynamicBatchAndLength', None, None, 3, False),
('DynamicAll', None, None, None, False),
('NormFixed', 5, 4, 3, True),
('NormDynamicLength', None, 4, 3, True),
('NormDynamicBatch', 5, None, 3, True),
('NormDynamicBatchAndLength', None, None, 3, True),
('NormDynamicAll', None, None, None, True))
def testShapeInference3D(self, sequence_length, batch_size, num_actions,
normalise):
T, B, A = sequence_length, batch_size, num_actions # pylint: disable=invalid-name
op = pg_ops.discrete_policy_entropy_loss(
policy_logits=tf.placeholder(tf.float32, shape=[T, B, A]),
normalise=normalise)
op.extra.entropy.get_shape().assert_is_compatible_with([T, B])
op.loss.get_shape().assert_is_compatible_with([T, B])
@parameterized.named_parameters(
('Fixed2D', 4, 3, False),
('DynamicBatch2D', None, 3, False),
('DynamicAll2D', None, None, False),
('NormFixed2D', 4, 3, True),
('NormDynamicBatch2D', None, 3, True),
('NormDynamicAll2D', None, None, True))
def testShapeInference2D(self, batch_size, num_actions, normalise):
policy_logits = tf.placeholder(tf.float32, shape=[batch_size, num_actions])
op = pg_ops.discrete_policy_entropy_loss(policy_logits, normalise=normalise)
op.extra.entropy.get_shape().assert_is_compatible_with([batch_size])
op.loss.get_shape().assert_is_compatible_with([batch_size])
@parameterized.named_parameters(('SingleAction', False),
('MultiActions', True))
class DiscretePolicyGradientLossTest(parameterized.TestCase, tf.test.TestCase):
"""Tests for discrete_policy_gradient_loss op."""
def _setUpLoss(self, is_multi_actions):
policy_logits_np = np.array([[[0, 1], [0, 1]],
[[1, 1], [0, 100]]])
actions_np = np.array([[0, 0],
[1, 1]], dtype=np.int32)
if is_multi_actions:
self._num_action_components = 3
self._policy_logits_nest = [
tf.constant(policy_logits_np, dtype=tf.float32)
for _ in xrange(self._num_action_components)]
self._actions_nest = [tf.constant(actions_np, dtype=tf.int32)
for _ in xrange(self._num_action_components)]
else:
self._num_action_components = 1
self._policy_logits_nest = tf.constant(policy_logits_np, dtype=tf.float32)
self._actions_nest = tf.constant(actions_np, dtype=tf.int32)
self._action_values = tf.constant([[0, 1], [2, 1]], dtype=tf.float32)
self._loss = pg_ops.discrete_policy_gradient_loss(
self._policy_logits_nest, self._actions_nest, self._action_values)
def testLoss(self, is_multi_actions):
self._setUpLoss(is_multi_actions)
with self.test_session() as sess:
self.assertEqual(self._loss.get_shape(), tf.TensorShape(2)) # [B]
self.assertAllClose(
sess.run(self._loss),
# computed by summing expected losses from DiscretePolicyGradientTest
# over the two sequences of length two which I've split the batch
# into:
self._num_action_components * np.array([1.386294, 1.313262]))
def testGradients(self, is_multi_actions):
self._setUpLoss(is_multi_actions)
with self.test_session() as sess:
total_loss = tf.reduce_sum(self._loss)
gradients = tf.gradients(
[total_loss], nest.flatten(self._policy_logits_nest))
grad_policy_logits_nest = sess.run(gradients)
for grad_policy_logits in grad_policy_logits_nest:
self.assertAllClose(grad_policy_logits,
[[[0, 0], [-0.731, 0.731]],
[[1, -1], [0, 0]]], atol=1e-4)
dead_grads = tf.gradients(
[total_loss],
nest.flatten(self._actions_nest) + [self._action_values])
for grad in dead_grads:
self.assertIsNone(grad)
class DiscretePolicyGradientTest(tf.test.TestCase):
"""Tests for discrete_policy_gradient op."""
def testLoss(self):
with self.test_session() as sess:
policy_logits = tf.constant([[0, 1], [0, 1], [1, 1], [0, 100]],
dtype=tf.float32)
action_values = tf.constant([0, 1, 2, 1], dtype=tf.float32)
actions = tf.constant([0, 0, 1, 1], dtype=tf.int32)
loss = pg_ops.discrete_policy_gradient(policy_logits, actions,
action_values)
self.assertEqual(loss.get_shape(), tf.TensorShape(4))
# Calculate the targets with:
# loss = action_value*(-logits[action] + log(sum_a(exp(logits[a]))))
# The final case (with large logits), runs out of precision and gets
# truncated to 0, but isn't `nan`.
self.assertAllClose(sess.run(loss), [0, 1.313262, 1.386294, 0])
def testGradients(self):
with self.test_session() as sess:
policy_logits = tf.constant([[0, 1], [0, 1], [1, 1], [0, 100]],
dtype=tf.float32)
action_values = tf.constant([0, 1, 2, 1], dtype=tf.float32)
actions = tf.constant([0, 0, 1, 1], dtype=tf.int32)
loss = pg_ops.discrete_policy_gradient(policy_logits, actions,
action_values)
total_loss = tf.reduce_sum(loss)
gradients = tf.gradients([total_loss], [policy_logits])
grad_policy_logits = sess.run(gradients[0])
# The final case (with large logits), runs out of precision and gets
# truncated to 0, but isn't `nan`.
self.assertAllClose(grad_policy_logits,
[[0, 0], [-0.731, 0.731], [1, -1], [0, 0]], atol=1e-4)
self.assertAllEqual(tf.gradients([total_loss], [actions, action_values]),
[None, None])
def testDynamicBatchSize(self):
policy_logits = tf.placeholder(tf.float32, shape=[None, 3])
action_values = tf.placeholder(tf.float32, shape=[None])
actions = tf.placeholder(tf.int32, shape=[None])
loss = pg_ops.discrete_policy_gradient(policy_logits, actions,
action_values)
self.assertEqual(loss.get_shape().as_list(), [None])
gradients = tf.gradients(tf.reduce_sum(loss), [policy_logits])
self.assertAllEqual(gradients[0].get_shape().as_list(), [None, 3])
class SequenceAdvantageActorCriticLossTest(parameterized.TestCase,
tf.test.TestCase):
@parameterized.named_parameters(
('SingleActionEntropyNormalise', False, True),
('SingleActionNoEntropyNormalise', False, False),
('MultiActionsEntropyNormalise', True, True),
('MultiActionsNoEntropyNormalise', True, False),
)
def testLossSequence(self, is_multi_actions, normalise_entropy):
# A sequence of length 2, batch size 1, 3 possible actions.
num_actions = 3
policy_logits = [[[0., 0., 1.]], [[0., 1., 0.]]]
actions = [[0], [1]]
baseline_values = [[0.2], [0.3]]
rewards = [[0.4], [0.5]]
pcontinues = [[0.9], [0.8]]
bootstrap_value = [0.1]
baseline_cost = 0.15
entropy_cost = 0.25
if is_multi_actions:
num_action_components = 3
policy_logits_nest = [tf.constant(policy_logits, dtype=tf.float32)
for _ in xrange(num_action_components)]
actions_nest = [tf.constant(actions, dtype=tf.int32)
for _ in xrange(num_action_components)]
else:
num_action_components = 1
policy_logits_nest = tf.constant(policy_logits, dtype=tf.float32)
actions_nest = tf.constant(actions, dtype=tf.int32)
loss, extra = pg_ops.sequence_advantage_actor_critic_loss(
policy_logits_nest,
tf.constant(baseline_values, dtype=tf.float32),
actions_nest,
tf.constant(rewards, dtype=tf.float32),
tf.constant(pcontinues, dtype=tf.float32),
tf.constant(bootstrap_value, dtype=tf.float32),
baseline_cost=baseline_cost,
entropy_cost=entropy_cost,
normalise_entropy=normalise_entropy)
# Manually calculate the discounted returns.
return1 = 0.5 + 0.8 * 0.1
return0 = 0.4 + 0.9 * return1
with self.test_session() as sess:
# Discounted returns
self.assertAllClose(sess.run(extra.discounted_returns),
[[return0], [return1]])
# Advantages
advantages = [return0 - baseline_values[0][0],
return1 - baseline_values[1][0]]
self.assertAllClose(sess.run(extra.advantages),
[[adv] for adv in advantages])
# Baseline
expected_baseline_loss = baseline_cost*sum([0.5 * adv**2 for adv in
advantages])
self.assertAllClose(
sess.run(extra.baseline_loss), [expected_baseline_loss])
# Policy Gradient loss
# loss = sum_t(action_value*(-logits[action] +
# log(sum_a(exp(logits[a])))))
#
# The below takes advantage of there only being one minibatch dim.
normalise = lambda logits: np.log(np.exp(logits).sum())
batch = 0
expected_policy_gradient_loss = num_action_components * sum([
advantages[0]*(-(policy_logits[0][batch][actions[0][batch]]) +
normalise(policy_logits[0])),
advantages[1]*(-(policy_logits[1][batch][actions[1][batch]]) +
normalise(policy_logits[1])),
])
self.assertAllClose(sess.run(extra.policy_gradient_loss),
[expected_policy_gradient_loss])
# Entropy, calculated as per discrete_policy_entropy tests.
expected_entropy = num_action_components*0.97533*2
expected_entropy_loss = -entropy_cost*expected_entropy
if normalise_entropy:
expected_entropy_loss /= (num_action_components * np.log(num_actions))
self.assertAllClose(sess.run(extra.entropy),
[expected_entropy], atol=1e-4)
self.assertAllClose(sess.run(extra.entropy_loss), [expected_entropy_loss],
atol=1e-4)
# Total loss
expected_loss = [expected_entropy_loss + expected_policy_gradient_loss +
expected_baseline_loss]
self.assertAllClose(sess.run(loss), expected_loss, atol=1e-4)
@parameterized.named_parameters(('Fixed', 5, 4, 3),
('DynamicLength', None, 4, 3),
('DynamicBatch', 5, None, 3),
('DynamicBatchAndLength', None, None, 3),
('DynamicAll', None, None, None))
def testShapeInference(self, sequence_length, batch_size, num_actions):
T, B, A = sequence_length, batch_size, num_actions # pylint: disable=invalid-name
loss, extra = pg_ops.sequence_advantage_actor_critic_loss(
policy_logits=tf.placeholder(tf.float32, shape=[T, B, A]),
baseline_values=tf.placeholder(tf.float32, shape=[T, B]),
actions=tf.placeholder(tf.int32, shape=[T, B]),
rewards=tf.placeholder(tf.float32, shape=[T, B]),
pcontinues=tf.placeholder(tf.float32, shape=[T, B]),
bootstrap_value=tf.placeholder(tf.float32, shape=[B]),
entropy_cost=1)
extra.discounted_returns.get_shape().assert_is_compatible_with([T, B])
extra.advantages.get_shape().assert_is_compatible_with([T, B])
extra.baseline_loss.get_shape().assert_is_compatible_with([B])
extra.policy_gradient_loss.get_shape().assert_is_compatible_with([B])
extra.entropy.get_shape().assert_is_compatible_with([B])
extra.entropy_loss.get_shape().assert_is_compatible_with([B])
loss.get_shape().assert_is_compatible_with([B])
@parameterized.named_parameters(('Fixed', 5, 4, 3),
('DynamicLength', None, 4, 3),
('DynamicBatch', 5, None, 3),
('DynamicBatchAndLength', None, None, 3),
('DynamicAll', None, None, None))
def testShapeInferenceGAE(self, sequence_length, batch_size, num_actions):
T, B, A = sequence_length, batch_size, num_actions # pylint: disable=invalid-name
loss, extra = pg_ops.sequence_advantage_actor_critic_loss(
policy_logits=tf.placeholder(tf.float32, shape=[T, B, A]),
baseline_values=tf.placeholder(tf.float32, shape=[T, B]),
actions=tf.placeholder(tf.int32, shape=[T, B]),
rewards=tf.placeholder(tf.float32, shape=[T, B]),
pcontinues=tf.placeholder(tf.float32, shape=[T, B]),
bootstrap_value=tf.placeholder(tf.float32, shape=[B]),
lambda_=0.9,
entropy_cost=1)
extra.discounted_returns.get_shape().assert_is_compatible_with([T, B])
extra.advantages.get_shape().assert_is_compatible_with([T, B])
extra.baseline_loss.get_shape().assert_is_compatible_with([B])
extra.policy_gradient_loss.get_shape().assert_is_compatible_with([B])
extra.entropy.get_shape().assert_is_compatible_with([B])
extra.entropy_loss.get_shape().assert_is_compatible_with([B])
loss.get_shape().assert_is_compatible_with([B])
class SequenceAdvantageActorCriticLossGradientTest(parameterized.TestCase,
tf.test.TestCase):
def setUp(self):
super(SequenceAdvantageActorCriticLossGradientTest, self).setUp()
self.num_actions = 3
self.num_action_components = 5
policy_logits_np = np.array([[[0., 0., 1.]], [[0., 1., 0.]]])
self.policy_logits = tf.constant(policy_logits_np, dtype=tf.float32)
self.multi_policy_logits = [tf.constant(policy_logits_np, dtype=tf.float32)
for _ in xrange(self.num_action_components)]
self.baseline_values = tf.constant([[0.2], [0.3]])
actions_np = np.array([[0], [1]])
actions = tf.constant(actions_np)
multi_actions = [tf.constant(actions_np)
for _ in xrange(self.num_action_components)]
rewards = tf.constant([[0.4], [0.5]])
pcontinues = tf.constant([[0.9], [0.8]])
bootstrap_value = tf.constant([0.1])
baseline_cost = 0.15
entropy_cost = 0.25
self.op = pg_ops.sequence_advantage_actor_critic_loss(
self.policy_logits, self.baseline_values, actions, rewards, pcontinues,
bootstrap_value, baseline_cost=baseline_cost, entropy_cost=entropy_cost)
self.multi_op = pg_ops.sequence_advantage_actor_critic_loss(
self.multi_policy_logits, self.baseline_values, multi_actions, rewards,
pcontinues, bootstrap_value, baseline_cost=baseline_cost,
entropy_cost=entropy_cost)
self.invalid_grad_inputs = [actions, rewards, pcontinues, bootstrap_value]
self.invalid_grad_outputs = [None]*len(self.invalid_grad_inputs)
@parameterized.named_parameters(('SingleAction', False),
('MultiActions', True))
def testPolicyGradients(self, is_multi_actions):
if is_multi_actions:
loss = self.multi_op.extra.policy_gradient_loss
policy_logits_nest = self.multi_policy_logits
else:
loss = self.op.extra.policy_gradient_loss
policy_logits_nest = self.policy_logits
grad_policy_list = [
tf.gradients(loss, policy_logits)[0] * self.num_actions
for policy_logits in nest.flatten(policy_logits_nest)]
for grad_policy in grad_policy_list:
self.assertEqual(grad_policy.get_shape(), tf.TensorShape([2, 1, 3]))
self.assertAllEqual(tf.gradients(loss, self.baseline_values), [None])
self.assertAllEqual(tf.gradients(loss, self.invalid_grad_inputs),
self.invalid_grad_outputs)
def testNonDifferentiableDiscountedReturns(self):
self.assertAllEqual(tf.gradients(self.op.extra.discounted_returns,
self.invalid_grad_inputs),
self.invalid_grad_outputs)
@parameterized.named_parameters(('SingleAction', False),
('MultiActions', True))
def testEntropyGradients(self, is_multi_actions):
if is_multi_actions:
loss = self.multi_op.extra.entropy_loss
policy_logits_nest = self.multi_policy_logits
else:
loss = self.op.extra.entropy_loss
policy_logits_nest = self.policy_logits
grad_policy_list = [
tf.gradients(loss, policy_logits)[0] * self.num_actions
for policy_logits in nest.flatten(policy_logits_nest)]
for grad_policy in grad_policy_list:
self.assertEqual(grad_policy.get_shape(), tf.TensorShape([2, 1, 3]))
self.assertAllEqual(tf.gradients(loss, self.baseline_values), [None])
self.assertAllEqual(tf.gradients(loss, self.invalid_grad_inputs),
self.invalid_grad_outputs)
def testBaselineGradients(self):
loss = self.op.extra.baseline_loss
grad_baseline = tf.gradients(loss, self.baseline_values)[0]
self.assertEqual(grad_baseline.get_shape(), tf.TensorShape([2, 1]))
self.assertAllEqual(tf.gradients(loss, self.policy_logits), [None])
self.assertAllEqual(tf.gradients(loss, self.invalid_grad_inputs),
self.invalid_grad_outputs)
@parameterized.named_parameters(('SingleAction', False),
('MultiActions', True))
def testTotalLossGradients(self, is_multi_actions):
with self.test_session() as sess:
if is_multi_actions:
total_loss = tf.reduce_sum(self.multi_op.loss)
policy_logits_nest = self.multi_policy_logits
else:
total_loss = tf.reduce_sum(self.op.loss)
policy_logits_nest = self.policy_logits
grad_policy_list = [
tf.gradients(total_loss, policy_logits)[0]
for policy_logits in nest.flatten(policy_logits_nest)]
grad_baseline = tf.gradients(total_loss, self.baseline_values)[0]
for grad_policy in grad_policy_list:
self.assertEqual(grad_policy.get_shape(), tf.TensorShape([2, 1, 3]))
# These values were just generated once and hard-coded here to check for
# regressions. Calculating by hand would be too time-consuming,
# error-prone and unreadable.
self.assertAllClose(sess.run(grad_policy),
[[[-0.5995, 0.1224, 0.4770]],
[[0.0288, -0.0576, 0.0288]]],
atol=1e-4)
self.assertEqual(grad_baseline.get_shape(), tf.TensorShape([2, 1]))
self.assertAllClose(sess.run(grad_baseline), [[-0.1083], [-0.0420]],
atol=1e-4)
self.assertAllEqual(tf.gradients(total_loss, self.invalid_grad_inputs),
self.invalid_grad_outputs)
if __name__ == '__main__':
tf.test.main()
| trfl-master | trfl/discrete_policy_gradient_ops_test.py |
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for target_update_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import parameterized
import tensorflow.compat.v1 as tf
from trfl import target_update_ops
class UpdateTargetVariablesTest(tf.test.TestCase, parameterized.TestCase):
"""Tests function update_target_variables."""
@parameterized.parameters({'use_locking': True}, {'use_locking': False})
def testFullUpdate(self, use_locking):
"""Tests full update of the target variables from the source variables."""
target_variables = [
tf.Variable(tf.random_normal(shape=[1, 2])),
tf.Variable(tf.random_normal(shape=[3, 4])),
]
source_variables = [
tf.Variable(tf.random_normal(shape=[1, 2])),
tf.Variable(tf.random_normal(shape=[3, 4])),
]
updated = target_update_ops.update_target_variables(
target_variables, source_variables, use_locking=use_locking)
# Collect all the tensors and ops we want to evaluate in the session.
vars_ops = target_variables + source_variables
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(updated)
results = sess.run(vars_ops)
# First target variable is updated with first source variable.
self.assertAllClose(results[0], results[2])
# Second target variable is updated with second source variable.
self.assertAllClose(results[1], results[3])
@parameterized.parameters({'use_locking': True}, {'use_locking': False})
def testIncrementalUpdate(self, use_locking):
"""Tests incremental update of the target variables."""
target_variables = [tf.Variable(tf.random_normal(shape=[1, 2]))]
source_variables = [tf.Variable(tf.random_normal(shape=[1, 2]))]
updated = target_update_ops.update_target_variables(
target_variables, source_variables, tau=0.1, use_locking=use_locking)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
before_assign = sess.run(target_variables[0])
sess.run(updated)
results = sess.run([target_variables[0], source_variables[0]])
self.assertAllClose(results[0], 0.1 * results[1] + 0.9 * before_assign)
def testIncompatibleLength(self):
"""Tests error when variable lists have unequal length."""
with self.test_session():
target_variables = [tf.Variable(tf.random_normal(shape=[1, 2]))]
source_variables = [
tf.Variable(tf.random_normal(shape=[1, 2])),
tf.Variable(tf.random_normal(shape=[3, 4])),
]
self.assertRaises(ValueError, target_update_ops.update_target_variables,
target_variables, source_variables)
def testIncompatibleShape(self):
"""Tests error when variable lists have unequal shapes."""
with self.test_session():
target_variables = [
tf.Variable(tf.random_normal(shape=[1, 2])),
tf.Variable(tf.random_normal(shape=[1, 2])),
]
source_variables = [
tf.Variable(tf.random_normal(shape=[1, 2])),
tf.Variable(tf.random_normal(shape=[3, 4])),
]
self.assertRaises(ValueError, target_update_ops.update_target_variables,
target_variables, source_variables)
def testInvalidTypeTau(self):
"""Tests error when tau has wrong type."""
target_variables = [tf.Variable(tf.random_normal(shape=[1, 2]))]
source_variables = [tf.Variable(tf.random_normal(shape=[1, 2]))]
self.assertRaises(TypeError, target_update_ops.update_target_variables,
target_variables, source_variables, 1)
def testInvalidRangeTau(self):
"""Tests error when tau is outside permitted range."""
target_variables = [tf.Variable(tf.random_normal(shape=[1, 2]))]
source_variables = [tf.Variable(tf.random_normal(shape=[1, 2]))]
self.assertRaises(ValueError, target_update_ops.update_target_variables,
target_variables, source_variables, -0.1)
self.assertRaises(ValueError, target_update_ops.update_target_variables,
target_variables, source_variables, 1.1)
class PeriodicTargetUpdateTest(tf.test.TestCase, parameterized.TestCase):
"""Tests function period_target_update."""
@parameterized.parameters(
{'use_locking': True, 'update_period': 1},
{'use_locking': False, 'update_period': 1},
{'use_locking': True, 'update_period': 3},
{'use_locking': False, 'update_period': 3}
)
def testPeriodicTargetUpdate(self, use_locking, update_period):
"""Tests that the simple success case works as expected.
This is an integration test. The periodically and update parts are
unit-tested in the preceding.
Args:
use_locking: value for `periodic_target_update`'s `use_locking` argument.
update_period: how often an update should happen.
"""
target_variables = [tf.Variable(tf.zeros([1, 2]))]
source_variables = [tf.Variable(tf.random_normal([1, 2]))]
increment = tf.ones([1, 2])
update_source_op = tf.assign_add(source_variables[0], increment)
updated = target_update_ops.periodic_target_update(
target_variables,
source_variables,
update_period=update_period,
use_locking=use_locking)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(3 * update_period):
sess.run(update_source_op)
sess.run(updated)
targets, sources = sess.run([target_variables, source_variables])
if step % update_period == 0:
self.assertAllClose(targets, sources)
else:
self.assertNotAllClose(targets, sources)
if __name__ == '__main__':
tf.test.main()
| trfl-master | trfl/target_update_ops_test.py |
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TensorFlow ops for implementing Pixel Control.
Pixel Control is an auxiliary task introduced in the UNREAL agent.
In Pixel Control an additional agent head is trained off-policy to predict
action-value functions for a host of pseudo rewards derived from the stream of
observations. This leads to better state representations and therefore improved
performance, both in terms of data efficiency and final performance.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Dependency imports
import tensorflow.compat.v1 as tf
from trfl import action_value_ops
from trfl import base_ops
PixelControlExtra = collections.namedtuple(
"pixel_control_extra", ["spatial_loss", "pseudo_rewards"])
def pixel_control_rewards(observations, cell_size):
"""Calculates pixel control task rewards from observation sequence.
The observations are first split in a grid of KxK cells. For each cell a
distinct pseudo reward is computed as the average absolute change in pixel
intensity for all pixels in the cell. The change in intensity is averaged
across both pixels and channels (e.g. RGB).
The `observations` provided to this function should be cropped suitably, to
ensure that the observations' height and width are a multiple of `cell_size`.
The values of the `observations` tensor should be rescaled to [0, 1]. In the
UNREAL agent observations are cropped to 80x80, and each cell is 4x4 in size.
See "Reinforcement Learning with Unsupervised Auxiliary Tasks" by Jaderberg,
Mnih, Czarnecki et al. (https://arxiv.org/abs/1611.05397).
Args:
observations: A tensor of shape `[T+1,B,H,W,C...]`, where
* `T` is the sequence length, `B` is the batch size.
* `H` is height, `W` is width.
* `C...` is at least one channel dimension (e.g., colour, stack).
* `T` and `B` can be statically unknown.
cell_size: The size of each cell.
Returns:
A tensor of pixel control rewards calculated from the observation. The
shape is `[T,B,H',W']`, where `H'` and `W'` are determined by the
`cell_size`. If evenly-divisible, `H' = H/cell_size`, and similar for `W`.
"""
# Calculate the absolute differences across the sequence.
abs_diff = tf.abs(observations[1:] - observations[:-1])
# Average over cells. `abs_diff` has shape [T,B,H,W,C...], e.g.,
# [T,B,H,W,C] if we have a colour channel. We want to use the TF avg_pool3d
# op, but it expects 5D inputs so we collapse all channel dimensions.
# Merge remaining dimensions after W: [T,B,H,W,C'].
full_shape = tf.shape(abs_diff)
preserved_shape = full_shape[:4]
trailing_shape = (tf.reduce_prod(full_shape[4:]),)
shape = tf.concat([preserved_shape, trailing_shape], 0)
abs_diff = tf.reshape(abs_diff, shape)
# Apply the averaging using average pooling and reducing over channel.
avg_abs_diff = tf.nn.avg_pool3d(
abs_diff,
ksize=[1, 1, cell_size, cell_size, 1],
strides=[1, 1, cell_size, cell_size, 1],
padding="VALID") # [T,B,H',W',C'].
pseudo_rewards = tf.reduce_mean(
avg_abs_diff, axis=[4], name="pseudo_rewards") # [T,B,H',W'].
sequence_batch = abs_diff.get_shape()[:2]
new_height_width = avg_abs_diff.get_shape()[2:4]
pseudo_rewards.set_shape(sequence_batch.concatenate(new_height_width))
return pseudo_rewards
def pixel_control_loss(
observations, actions, action_values, cell_size, discount_factor,
scale, crop_height_dim=(None, None), crop_width_dim=(None, None)):
"""Calculate n-step Q-learning loss for pixel control auxiliary task.
For each pixel-based pseudo reward signal, the corresponding action-value
function is trained off-policy, using Q(lambda). A discount of 0.9 is
commonly used for learning the value functions.
Note that, since pseudo rewards have a spatial structure, with neighbouring
cells exhibiting strong correlations, it is convenient to predict the action
values for all the cells through a deconvolutional head.
See "Reinforcement Learning with Unsupervised Auxiliary Tasks" by Jaderberg,
Mnih, Czarnecki et al. (https://arxiv.org/abs/1611.05397).
Args:
observations: A tensor of shape `[T+1,B, ...]`; `...` is the observation
shape, `T` the sequence length, and `B` the batch size. `T` and `B` can
be statically unknown for `observations`, `actions` and `action_values`.
actions: A tensor, shape `[T,B]`, of the actions across each sequence.
action_values: A tensor, shape `[T+1,B,H,W,N]` of pixel control action
values, where `H`, `W` are the number of pixel control cells/tasks, and
`N` is the number of actions.
cell_size: size of the cells used to derive the pixel based pseudo-rewards.
discount_factor: discount used for learning the value function associated
to the pseudo rewards; must be a scalar or a Tensor of shape [T,B].
scale: scale factor for pixels in `observations`.
crop_height_dim: tuple (min_height, max_height) specifying how
to crop the input observations before computing the pseudo-rewards.
crop_width_dim: tuple (min_width, max_width) specifying how
to crop the input observations before computing the pseudo-rewards.
Returns:
A namedtuple with fields:
* `loss`: a tensor containing the batch of losses, shape [B].
* `extra`: a namedtuple with fields:
* `target`: batch of target values for `q_tm1[a_tm1]`, shape [B].
* `td_error`: batch of temporal difference errors, shape [B].
Raises:
ValueError: if the shape of `action_values` is not compatible with that of
the pseudo-rewards derived from the observations.
"""
# Useful shapes.
sequence_length, batch_size = base_ops.best_effort_shape(actions)
num_actions = action_values.get_shape().as_list()[-1]
height_width_q = action_values.get_shape().as_list()[2:-1]
# Calculate rewards using the observations. Crop observations if appropriate.
if crop_height_dim[0] is not None:
h_low, h_high = crop_height_dim
observations = observations[:, :, h_low:h_high, :]
if crop_width_dim[0] is not None:
w_low, w_high = crop_width_dim
observations = observations[:, :, :, w_low:w_high]
# Rescale observations by a constant factor.
observations *= tf.constant(scale)
# Compute pseudo-rewards and get their shape.
pseudo_rewards = pixel_control_rewards(observations, cell_size)
height_width = pseudo_rewards.get_shape().as_list()[2:]
# Check that pseudo-rewards and Q-values are compatible in shape.
if height_width != height_width_q:
raise ValueError(
"Pixel Control values are not compatible with the shape of the"
"pseudo-rewards derived from the observation. Pseudo-rewards have shape"
"{}, while Pixel Control values have shape {}".format(
height_width, height_width_q))
# We now have Q(s,a) and rewards, so can calculate the n-step loss. The
# QLambda loss op expects inputs of shape [T,B,N] and [T,B], but our tensors
# are in a variety of incompatible shapes. The state-action values have
# shape [T,B,H,W,N] and rewards have shape [T,B,H,W]. We can think of the
# [H,W] dimensions as extra batch dimensions for the purposes of the loss
# calculation, so we first collapse [B,H,W] into a single dimension.
q_tm1 = tf.reshape(
action_values[:-1], # [T,B,H,W,N].
[sequence_length, -1, num_actions],
name="q_tm1") # [T,BHW,N].
r_t = tf.reshape(
pseudo_rewards, # [T,B,H,W].
[sequence_length, -1],
name="r_t") # [T,BHW].
q_t = tf.reshape(
action_values[1:], # [T,B,H,W,N].
[sequence_length, -1, num_actions],
name="q_t") # [T,BHW,N].
# The actions tensor is of shape [T,B], and is the same for each H and W.
# We thus expand it to be same shape as the reward tensor, [T,BHW].
expanded_actions = tf.expand_dims(tf.expand_dims(actions, -1), -1)
a_tm1 = tf.tile(
expanded_actions, multiples=[1, 1] + height_width) # [T,B,H,W].
a_tm1 = tf.reshape(a_tm1, [sequence_length, -1]) # [T,BHW].
# We similarly expand-and-tile the discount to [T,BHW].
discount_factor = tf.convert_to_tensor(discount_factor)
if discount_factor.shape.ndims == 0:
pcont_t = tf.reshape(discount_factor, [1, 1]) # [1,1].
pcont_t = tf.tile(pcont_t, tf.shape(a_tm1)) # [T,BHW].
elif discount_factor.shape.ndims == 2:
tiled_pcont = tf.tile(
tf.expand_dims(tf.expand_dims(discount_factor, -1), -1),
[1, 1] + height_width)
pcont_t = tf.reshape(tiled_pcont, [sequence_length, -1])
else:
raise ValueError(
"The discount_factor must be a scalar or a tensor of rank 2."
"instead is a tensor of shape {}".format(
discount_factor.shape.as_list()))
# Compute a QLambda loss of shape [T,BHW]
loss, _ = action_value_ops.qlambda(q_tm1, a_tm1, r_t, pcont_t, q_t, lambda_=1)
# Take sum over sequence, sum over cells.
expanded_shape = [sequence_length, batch_size] + height_width
spatial_loss = tf.reshape(loss, expanded_shape) # [T,B,H,W].
# Return.
extra = PixelControlExtra(
spatial_loss=spatial_loss, pseudo_rewards=pseudo_rewards)
return base_ops.LossOutput(
tf.reduce_sum(spatial_loss, axis=[0, 2, 3]), extra) # [B]
| trfl-master | trfl/pixel_control_ops.py |
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TensorFlow ops for continuous-action Policy Gradient algorithms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Dependency imports
from six.moves import zip
import tensorflow.compat.v1 as tf
import tree as nest
from trfl import base_ops
from trfl import value_ops
PolicyEntropyExtra = collections.namedtuple("policy_entropy_extra", ["entropy"])
SequenceA2CExtra = collections.namedtuple(
"sequence_a2c_extra", ["entropy", "entropy_loss", "baseline_loss",
"policy_gradient_loss", "advantages",
"discounted_returns"])
def policy_gradient(policies, actions, action_values, policy_vars=None,
name="policy_gradient"):
"""Computes policy gradient losses for a batch of trajectories.
See `policy_gradient_loss` for more information on expected inputs and usage.
Args:
policies: A distribution over a batch supporting a `log_prob` method, e.g.
an instance of `tfp.distributions.Distribution`. For example, for
a diagonal gaussian policy:
`policies = tfp.distributions.MultivariateNormalDiag(mus, sigmas)`
actions: An action batch Tensor used as the argument for `log_prob`. Has
shape equal to the batch shape of the policies concatenated with the
event shape of the policies (which may be scalar, in which case
concatenation leaves shape just equal to batch shape).
action_values: A Tensor containing estimates of the values of the `actions`.
Has shape equal to the batch shape of the policies.
policy_vars: An optional iterable of Tensors used by `policies`. If provided
is used in scope checks. For the multivariate normal example above this
would be `[mus, sigmas]`.
name: Customises the name_scope for this op.
Returns:
loss: Tensor with same shape as `actions` containing the total loss for each
element in the batch. Differentiable w.r.t the variables in `policies`
only.
"""
policy_vars = list(policy_vars) if policy_vars else list()
with tf.name_scope(values=policy_vars + [actions, action_values], name=name):
actions = tf.stop_gradient(actions)
action_values = tf.stop_gradient(action_values)
log_prob_actions = policies.log_prob(actions)
# Prevent accidental broadcasting if possible at construction time.
action_values.get_shape().assert_is_compatible_with(
log_prob_actions.get_shape())
return -tf.multiply(log_prob_actions, action_values)
def policy_gradient_loss(policies, actions, action_values, policy_vars=None,
name="policy_gradient_loss"):
"""Computes policy gradient losses for a batch of trajectories.
This wraps `policy_gradient` to accept a possibly nested array of `policies`
and `actions` in order to allow for multiple action distribution types or
independent multivariate distributions if not directly available. It also sums
up losses along the time dimension, and is more restrictive about shapes,
assuming a [T, B] layout for the `batch_shape` of the policies and a
concatenate(`[T, B]`, `event_shape` of the policies) shape for the actions.
Args:
policies: A (possibly nested structure of) distribution(s) supporting
`batch_shape` and `event_shape` properties along with a `log_prob`
method (e.g. an instance of `tfp.distributions.Distribution`),
with `batch_shape` equal to `[T, B]`.
actions: A (possibly nested structure of) N-D Tensor(s) with shape
`[T, B, ...]` where the final dimensions are the `event_shape` of the
corresponding distribution in the nested structure (the shape can be
just `[T, B]` if the `event_shape` is scalar).
action_values: Tensor of shape `[T, B]` containing an estimate of the value
of the selected `actions`.
policy_vars: An optional (possibly nested structure of) iterable(s) of
Tensors used by `policies`. If provided is used in scope checks.
name: Customises the name_scope for this op.
Returns:
loss: Tensor of shape `[B]` containing the total loss for each sequence
in the batch. Differentiable w.r.t `policy_logits` only.
"""
actions = nest.flatten(actions)
if policy_vars:
policy_vars = nest.flatten_up_to(policies, policy_vars)
else:
policy_vars = [list()] * len(actions)
policies = nest.flatten(policies)
# Check happens after flatten so that we can be more flexible on nest
# structures. This is equivalent to asserting that `len(policies) ==
# len(actions)`, which is sufficient for what we're doing here.
nest.assert_same_structure(policies, actions)
for policies_, actions_ in zip(policies, actions):
policies_.batch_shape.assert_has_rank(2)
actions_.get_shape().assert_is_compatible_with(
policies_.batch_shape.concatenate(policies_.event_shape))
scoped_values = policy_vars + actions + [action_values]
with tf.name_scope(name, values=scoped_values):
# Loss for the policy gradient. Doesn't push additional gradients through
# the action_values.
policy_gradient_loss_sequence = tf.add_n([
policy_gradient(policies_, actions_, action_values, pvars)
for policies_, actions_, pvars in zip(policies, actions, policy_vars)])
return tf.reduce_sum(
policy_gradient_loss_sequence, axis=[0],
name="policy_gradient_loss")
def policy_entropy_loss(policies,
policy_vars=None,
scale_op=None,
name="policy_entropy_loss"):
"""Calculates entropy 'loss' for policies represented by a distributions.
Given a (possible nested structure of) batch(es) of policies, this
calculates the total entropy and corrects the sign so that minimizing the
resulting loss op is equivalent to increasing entropy in the batch.
This function accepts a nested structure of `policies` in order to allow for
multiple distribution types or for multiple action dimensions in the case
where there is no corresponding mutivariate form for available for a given
univariate distribution. In this case, the loss is `sum_i(H(p_i, p_i))`
where `p_i` are members of the `policies` nest. It can be shown that this is
equivalent to calculating the entropy loss on the Cartesian product space
over all the action dimensions, if the sampled actions are independent.
The entropy loss is optionally scaled by some function of the policies.
E.g. for Categorical distributions there exists such a scaling which maps
the entropy loss into the range `[-1, 0]` in order to make it invariant to
the size of the action space - specifically one can divide the loss by
`sum_i(log(A_i))` where `A_i` is the number of categories in the i'th
Categorical distribution in the `policies` nest).
Args:
policies: A (possibly nested structure of) batch distribution(s)
supporting an `entropy` method that returns an N-D Tensor with shape
equal to the `batch_shape` of the distribution, e.g. an instance of
`tfp.distributions.Distribution`.
policy_vars: An optional (possibly nested structure of) iterable(s) of
Tensors used by `policies`. If provided is used in scope checks.
scale_op: An optional op that takes `policies` as its only argument and
returns a scalar Tensor that is used to scale the entropy loss.
E.g. for Diag(sigma) Gaussian policies dividing by the number of
dimensions makes entropy loss invariant to the action space dimension.
name: Optional, name of this op.
Returns:
A namedtuple with fields:
* `loss`: a tensor containing the batch of losses, shape `[B1, B2, ...]`.
* `extra`: a namedtuple with fields:
* `entropy`: entropy of the policy, shape `[B1, B2, ...]`.
where [B1, B2, ... ] == policy.batch_shape
"""
flat_policy_vars = nest.flatten(policy_vars) if policy_vars else list()
with tf.name_scope(name, values=flat_policy_vars):
# We want a value that we can minimize along with other losses, and where
# minimizing means driving the policy towards a uniform distribution over
# the actions. We thus scale it by negative one so that it can be simply
# added to other losses.
scale = tf.constant(-1.0, dtype=tf.float32)
if scale_op:
scale *= scale_op(policies)
policies = nest.flatten(policies)
entropy = tf.add_n(
[policy.entropy() for policy in policies], name="entropy")
loss = tf.multiply(scale, entropy, name="entropy_loss")
return base_ops.LossOutput(loss, PolicyEntropyExtra(entropy))
def sequence_a2c_loss(policies,
baseline_values,
actions,
rewards,
pcontinues,
bootstrap_value,
policy_vars=None,
lambda_=1,
entropy_cost=None,
baseline_cost=1,
entropy_scale_op=None,
name="SequenceA2CLoss"):
"""Constructs a TensorFlow graph computing the A2C/GAE loss for sequences.
This loss jointly learns the policy and the baseline. Therefore, gradients
for this loss flow through each tensor in `policies` and through each tensor
in `baseline_values`, but no other input tensors. The policy is learnt with
the advantage actor-critic loss, plus an optional entropy term. The baseline
is regressed towards the n-step bootstrapped returns given by the
reward/pcontinue sequence. The `baseline_cost` parameter scales the
gradients w.r.t the baseline relative to the policy gradient, i.e.
d(loss) / d(baseline) = baseline_cost * (n_step_return - baseline)`.
This function is designed for batches of sequences of data. Tensors are
assumed to be time major (i.e. the outermost dimension is time, the second
outermost dimension is the batch dimension). We denote the sequence length in
the shapes of the arguments with the variable `T`, the batch size with the
variable `B`, neither of which needs to be known at construction time. Index
`0` of the time dimension is assumed to be the start of the sequence.
`rewards` and `pcontinues` are the sequences of data taken directly from the
environment, possibly modulated by a discount. `baseline_values` are the
sequences of (typically learnt) estimates of the values of the states
visited along a batch of trajectories as observed by the agent given the
sequences of one or more actions sampled from `policies`.
The sequences in the tensors should be aligned such that an agent in a state
with value `V` that takes an action `a` transitions into another state
with value `V'`, receiving reward `r` and pcontinue `p`. Then `V`, `a`, `r`
and `p` are all at the same index `i` in the corresponding tensors. `V'` is
at index `i+1`, or in the `bootstrap_value` tensor if `i == T`.
For n-dimensional action vectors, a multivariate distribution must be used
for `policies`. In case there is no multivariate version for the desired
univariate distribution, or in case the `actions` object is a nested
structure (e.g. for multiple action types), this function also accepts a
nested structure of `policies`. In this case, the loss is given by
`sum_i(loss(p_i, a_i))` where `p_i` are members of the `policies` nest, and
`a_i` are members of the `actions` nest. We assume that a single baseline is
used across all action dimensions for each timestep.
Args:
policies: A (possibly nested structure of) distribution(s) supporting
`batch_shape` and `event_shape` properties & `log_prob` and `entropy`
methods (e.g. an instance of `tfp.distributions.Distribution`),
with `batch_shape` equal to `[T, B]`. E.g. for a (non-nested) diagonal
multivariate gaussian with dimension `A` this would be:
`policies = tfp.distributions.MultivariateNormalDiag(mus, sigmas)`
where `mus` and `sigmas` have shape `[T, B, A]`.
baseline_values: 2-D Tensor containing an estimate of the state value with
shape `[T, B]`.
actions: A (possibly nested structure of) N-D Tensor(s) with shape
`[T, B, ...]` where the final dimensions are the `event_shape` of the
corresponding distribution in the nested structure (the shape can be
just `[T, B]` if the `event_shape` is scalar).
rewards: 2-D Tensor with shape `[T, B]`.
pcontinues: 2-D Tensor with shape `[T, B]`.
bootstrap_value: 1-D Tensor with shape `[B]`.
policy_vars: An optional (possibly nested structure of) iterables of
Tensors used by `policies`. If provided is used in scope checks. For
the multivariate normal example above this would be `[mus, sigmas]`.
lambda_: an optional scalar or 2-D Tensor with shape `[T, B]` for
Generalised Advantage Estimation as per
https://arxiv.org/abs/1506.02438.
entropy_cost: optional scalar cost that pushes the policy to have high
entropy, larger values cause higher entropies.
baseline_cost: scalar cost that scales the derivatives of the baseline
relative to the policy gradient.
entropy_scale_op: An optional op that takes `policies` as its only
argument and returns a scalar Tensor that is used to scale the entropy
loss. E.g. for Diag(sigma) Gaussian policies dividing by the number of
dimensions makes entropy loss invariant to the action space dimension.
See `policy_entropy_loss` for more info.
name: Customises the name_scope for this op.
Returns:
A namedtuple with fields:
* `loss`: a tensor containing the total loss, shape `[B]`.
* `extra`: a namedtuple with fields:
* `entropy`: total loss per sequence, shape `[B]`.
* `entropy_loss`: scaled entropy loss per sequence, shape `[B]`.
* `baseline_loss`: scaled baseline loss per sequence, shape `[B]`.
* `policy_gradient_loss`: policy gradient loss per sequence,
shape `[B]`.
* `advantages`: advantange estimates per timestep, shape `[T, B]`.
* `discounted_returns`: discounted returns per timestep,
shape `[T, B]`.
"""
flat_policy_vars = nest.flatten(policy_vars) if policy_vars else list()
scoped_values = (flat_policy_vars + nest.flatten(actions) +
[baseline_values, rewards, pcontinues, bootstrap_value])
with tf.name_scope(name, values=scoped_values):
# Loss for the baseline, summed over the time dimension.
baseline_loss_td, td_lambda = value_ops.td_lambda(
baseline_values, rewards, pcontinues, bootstrap_value, lambda_)
# The TD error provides an estimate of the advantages of the actions.
advantages = td_lambda.temporal_differences
baseline_loss = tf.multiply(
tf.convert_to_tensor(baseline_cost, dtype=tf.float32),
baseline_loss_td,
name="baseline_loss")
# Loss for the policy. Doesn't push additional gradients through
# the advantages.
pg_loss = policy_gradient_loss(
policies, actions, advantages, policy_vars,
name="policy_gradient_loss")
total_loss = tf.add(pg_loss, baseline_loss, name="total_loss")
if entropy_cost is not None:
loss, extra = policy_entropy_loss(policies, policy_vars, entropy_scale_op)
entropy = tf.reduce_sum(extra.entropy, axis=0, name="entropy") # [B].
entropy_loss = tf.multiply(
tf.convert_to_tensor(entropy_cost, dtype=tf.float32),
tf.reduce_sum(loss, axis=0),
name="scaled_entropy_loss") # [B].
total_loss = tf.add(total_loss, entropy_loss,
name="total_loss_with_entropy")
else:
entropy = None
entropy_loss = None
extra = SequenceA2CExtra(
entropy=entropy,
entropy_loss=entropy_loss,
baseline_loss=baseline_loss,
policy_gradient_loss=pg_loss,
advantages=advantages,
discounted_returns=td_lambda.discounted_returns)
return base_ops.LossOutput(total_loss, extra)
| trfl-master | trfl/policy_gradient_ops.py |
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TensorFlow ops for the Retrace algorithm and continuous actions.
Safe and Efficient Off-Policy Reinforcement Learning
R. Munos, T. Stepleton, A. Harutyunyan, M. G. Bellemare
https://arxiv.org/abs/1606.02647
This variant is commonly used to update the Q function in RS0, which
additionally uses SVG or a SVG variant to update the policy.
Learning by Playing - Solving Sparse Reward Tasks from Scratch
M. Riedmiller, R. Hafner, T. Lampe, M. Neunert, J. Degrave, T. Van de Wiele,
V. Mnih, N. Heess, J. T. Springenberg
https://arxiv.org/abs/1802.10567
Learning Continuous Control Policies by Stochastic Value Gradients
N. Heess, G. Wayne, D. Silver, T. Lillicrap, Y. Tassa, T. Erez
https://arxiv.org/abs/1510.09142
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow.compat.v1 as tf
QTraceReturns = collections.namedtuple("QTraceReturns", [
"qs", "importance_weights", "log_importance_weights",
"truncated_importance_weights", "deltas", "vs_minus_q_xs"
])
def retrace_from_action_log_probs(
behaviour_action_log_probs,
target_action_log_probs,
discounts,
rewards,
q_values,
values,
bootstrap_value,
lambda_=1.,
name="retrace_from_action_log_probs"):
"""Constructs Q/Retrace ops.
This is an implementation of Retrace. In the description of the arguments
the notation is as follows: `T` refers to the sequence size over which
the return is calculated, finally `B` denotes the batch size.
Args:
behaviour_action_log_probs: Log-probabilities. Shape [T, B].
target_action_log_probs: Log-probabilities for target policy. Shape [T, B]
discounts: Also called pcontinues. Discount encountered when following
the behaviour policy. Shape [T, B].
rewards: A tensor containing rewards generated by following the behaviour
policy. Shape [T, B].
q_values: Q-function estimates wrt. the target policy. Shape [T, B].
values: Value function estimates wrt. the target policy. Shape [T, B].
bootstrap_value: Value function estimate at time `T`. Shape [B].
lambda_: Mix between 1-step (lambda_=0) and n-step (lambda_=1).
name: The name scope that all qtrace ops will be created in.
Returns:
A `QTraceReturns` namedtuple containing:
* qs: The Retrace regression/policy gradient targets.
Can be used to calculate estimates of the advantage for policy
gradients or as regression target for Q-value functions. Shape [T, B].
* importance_weights: Importance sampling weights. Shape [T, B].
* log_importance_weights: Importance sampling weights. Shape [T, B].
* truncated_importance_weights: Called c_t in the paper. Shape [T, B].
* deltas: Shape [T, B]
* vs_minus_q_xs: Q-Retrace targets - Q(x_s, u_s). Shape [T, B].
"""
# Turn arguments to tensors.
behaviour_action_log_probs = tf.convert_to_tensor(
behaviour_action_log_probs, dtype=tf.float32)
target_action_log_probs = tf.convert_to_tensor(
target_action_log_probs, dtype=tf.float32)
values = tf.convert_to_tensor(values, dtype=tf.float32)
q_values = tf.convert_to_tensor(q_values, dtype=tf.float32)
bootstrap_value = tf.convert_to_tensor(bootstrap_value, dtype=tf.float32)
discounts = tf.convert_to_tensor(discounts, dtype=tf.float32)
rewards = tf.convert_to_tensor(rewards, dtype=tf.float32)
# Make sure tensor ranks are as expected.
behaviour_action_log_probs.get_shape().assert_has_rank(2)
target_action_log_probs.get_shape().assert_has_rank(2)
values.get_shape().assert_has_rank(2)
q_values.get_shape().assert_has_rank(2)
bootstrap_value.get_shape().assert_has_rank(1)
discounts.get_shape().assert_has_rank(2)
rewards.get_shape().assert_has_rank(2)
with tf.name_scope(
name,
values=[
behaviour_action_log_probs, target_action_log_probs, discounts,
rewards, q_values, values, bootstrap_value
]):
log_rhos = target_action_log_probs - behaviour_action_log_probs
return retrace_from_importance_weights(
log_rhos=log_rhos,
discounts=discounts,
rewards=rewards,
q_values=q_values,
values=values,
bootstrap_value=bootstrap_value,
lambda_=lambda_)
def retrace_from_importance_weights(log_rhos,
discounts,
rewards,
q_values,
values,
bootstrap_value,
lambda_=1.0,
name="retrace_from_importance_weights"):
"""Constructs Q/Retrace ops.
This is an implementation of Retrace. In the description of the arguments
the notation is as follows: `T` refers to the sequence size over which
the return is calculated, finally `B` denotes the batch size.
Args:
log_rhos: Log-probabilities for target policy. Shape [T, B]
discounts: Also called pcontinues. Discount encountered when following
the behaviour policy. Shape [T, B].
rewards: A tensor containing rewards generated by following the behaviour
policy. Shape [T, B].
q_values: Q-function estimates wrt. the target policy. Shape [T, B].
values: Value function estimates wrt. the target policy. Shape [T, B].
bootstrap_value: Value function estimate at time `T`. Shape [B].
lambda_: Mix between 1-step (lambda_=0) and n-step (lambda_=1).
name: The name scope that all qtrace ops will be created in.
Returns:
A `QTraceReturns` namedtuple containing:
* qs: The Retrace regression/policy gradient targets.
Can be used to calculate estimates of the advantage for policy
gradients or as regression target for Q-value functions. Shape [T, B].
* importance_weights: Importance sampling weights. Shape [T, B].
* log_importance_weights: Importance sampling weights. Shape [T, B].
* truncated_importance_weights: Called c_t in the paper. Shape [T, B].
* deltas: Shape [T, B]
* vs_minus_q_xs: Q-Retrace targets - Q(x_s, u_s). Shape [T, B].
Raises:
ValueError: If compiled=True, but log_rhos has rank other than 2.
"""
# Make sure tensor ranks are consistent.
rho_rank = log_rhos.get_shape().ndims # Usually 2.
q_values.get_shape().assert_has_rank(rho_rank)
values.get_shape().assert_has_rank(rho_rank)
bootstrap_value.get_shape().assert_has_rank(rho_rank - 1)
discounts.get_shape().assert_has_rank(rho_rank)
rewards.get_shape().assert_has_rank(rho_rank)
lambda_ = tf.convert_to_tensor(lambda_, dtype=tf.float32)
with tf.name_scope(
name, values=[log_rhos, discounts, rewards, values, bootstrap_value]):
rhos = tf.exp(log_rhos)
cs = tf.minimum(1.0, rhos, name="cs")
# Set the last c to 1.
cs = tf.concat([cs[1:], tf.ones_like(cs[-1:])], axis=0)
cs *= lambda_
# Append bootstrapped value to get [v1, ..., v_t+1]
values_t_plus_1 = tf.concat(
[values[1:], tf.expand_dims(bootstrap_value, 0)], axis=0)
# delta_t = (r_t + discount * V(x_{t+1}) - Q(x_t, a_t))
deltas = (rewards + discounts * values_t_plus_1 - q_values)
# Note that all sequences are reversed, computation starts from the back.
sequences = (
tf.reverse(discounts, axis=[0]),
tf.reverse(cs, axis=[0]),
tf.reverse(deltas, axis=[0]),
)
# Re-trace vs are calculated through a scan from the back to the beginning
# of the given trajectory.
def scanfunc(acc, sequence_item):
discount_t, c_t, delta_t = sequence_item
return delta_t + discount_t * c_t * acc
initial_values = tf.zeros_like(bootstrap_value)
vs_minus_q_xs = tf.scan(
fn=scanfunc,
elems=sequences,
initializer=initial_values,
parallel_iterations=1,
back_prop=False,
name="scan")
# Reverse the results back to original order.
vs_minus_q_xs = tf.reverse(vs_minus_q_xs, [0], name="vs_minus_q_xs")
# Add V(x_s) to get q targets.
qs = tf.add(vs_minus_q_xs, q_values, name="s")
result = QTraceReturns(
qs=tf.stop_gradient(qs),
importance_weights=tf.stop_gradient(rhos),
log_importance_weights=tf.stop_gradient(log_rhos),
truncated_importance_weights=tf.stop_gradient(cs),
deltas=tf.stop_gradient(deltas),
vs_minus_q_xs=tf.stop_gradient(vs_minus_q_xs))
return result
| trfl-master | trfl/continuous_retrace_ops.py |
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for multistep_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
from trfl import sequence_ops
def get_n_step_backup(rewards, pcontinues, state_values, start, n):
"""Evaluates a single n-step backup (return) starting at position start.
http://incompleteideas.net/sutton/book/ebook/node73.html (Eq. 7.1)
Args:
rewards: a list containing a sequence of rewards.
pcontinues: a list containing a sequence of discounts.
state_values: a list containing a sequence of state-values.
start: position at which the n-Step return has to be evaluated.
n: number of steps over which rewards are summed before adding the
respective bootstrapped state-value.
Returns:
Sum of discounted rewards plus discounted bootstrapped value.
"""
accumulator = 0.0
k = 1.0
for i in xrange(start, start + n):
accumulator += k * rewards[i]
k *= pcontinues[i]
accumulator += k * state_values[start + n - 1]
return accumulator
def get_complex_n_step_backup(rewards, pcontinues, state_values, start, n,
lambda_):
"""Evaluates a complex n=step backup (sum of lambda-weighted n-step backups).
http://incompleteideas.net/sutton/book/ebook/node74.html (Eq. 7.3)
Args:
rewards: a list containing rewards.
pcontinues: a list containing discounts.
state_values: a list containing boostrapped state values.
start: position at which the n-Step return has to be evaluated.
n: number of steps over which rewards are summed before adding respective
boostrapped state values.
lambda_: mixing parameter lambda.
Returns:
A single complex backup.
"""
accumulator = 0.0
for t in xrange(1, n):
value = get_n_step_backup(rewards, pcontinues, state_values, start, t)
weight = (1 - lambda_) * (lambda_ ** (t - 1))
accumulator += + value * weight
value = get_n_step_backup(rewards, pcontinues, state_values, start, n)
weight = lambda_ ** (n - 1)
accumulator += value * weight
return accumulator
def get_complex_n_step_backup_at_all_times(rewards, pcontinues, state_values,
lambda_):
"""Evaluates complex n-step backups at all time-points.
Args:
rewards: a list containing rewards.
pcontinues: a list containing discounts.
state_values: a list containing bootstrapped state values.
lambda_: mixing parameter lambda.
Returns:
A list containing complex backups at all times.
"""
res = []
length = len(rewards)
for i in xrange(0, length):
res.append(get_complex_n_step_backup(rewards, pcontinues, state_values, i,
length - i, lambda_))
return res
class ScanDiscountedSumTest(tf.test.TestCase):
def testScanSumShapeInference(self):
"""scan_discounted_sum should support static shape inference."""
# No session needed since we're not evaluating any ops.
sequence_in = tf.placeholder(tf.float32, shape=[1647, 2001])
decays_in = tf.placeholder(tf.float32, shape=[1647, 2001])
bootstrap = tf.placeholder(tf.float32, shape=[2001])
result = sequence_ops.scan_discounted_sum(sequence_in, decays_in,
bootstrap,
reverse=False)
self.assertAllEqual(result.get_shape(), [1647, 2001])
# Let's do it again with higher-dimensional inputs.
sequence_in = tf.placeholder(tf.float32, shape=[4, 8, 15, 16, 23, 42])
decays_in = tf.placeholder(tf.float32, shape=[4, 8, 15, 16, 23, 42])
bootstrap = tf.placeholder(tf.float32, shape=[8, 15, 16, 23, 42])
result = sequence_ops.scan_discounted_sum(sequence_in, decays_in,
bootstrap,
reverse=False)
self.assertAllEqual(result.get_shape(), [4, 8, 15, 16, 23, 42])
def testScanSumShapeInferenceWithSeqLen(self):
"""scan_discounted_sum should support static shape inference."""
# No session needed since we're not evaluating any ops.
sequence_in = tf.placeholder(tf.float32, shape=[1647, 2001])
decays_in = tf.placeholder(tf.float32, shape=[1647, 2001])
bootstrap = tf.placeholder(tf.float32, shape=[2001])
result = sequence_ops.scan_discounted_sum(sequence_in, decays_in,
bootstrap,
reverse=False)
self.assertAllEqual(result.get_shape(), [1647, 2001])
# Let's do it again with higher-dimensional inputs.
sequence_in = tf.placeholder(tf.float32, shape=[4, 8, 15, 16, 23, 42])
decays_in = tf.placeholder(tf.float32, shape=[4, 8, 15, 16, 23, 42])
bootstrap = tf.placeholder(tf.float32, shape=[8, 15, 16, 23, 42])
sequence_lengths = tf.placeholder(tf.float32, shape=[8])
result = sequence_ops.scan_discounted_sum(sequence_in, decays_in,
bootstrap,
reverse=False,
sequence_lengths=sequence_lengths)
self.assertAllEqual(result.get_shape(), [4, 8, 15, 16, 23, 42])
def testScanSumWithDecays(self):
with self.test_session() as sess:
sequence = [[3, 1, 5, 2, 1], [-1.7, 1.2, 2.3, 0, 1]]
decays = [[0.5, 0.9, 1.0, 0.1, 0.5], [0.9, 0.5, 0.0, 2, 0.8]]
# We use transpose because it is easier to define the input data in
# BxT (batch x time) form, while scan_discounted_sum assumes TxB form.
sequence_in = tf.transpose(tf.constant(sequence, dtype=tf.float32))
decays_in = tf.transpose(tf.constant(decays, dtype=tf.float32))
bootstrap = tf.constant([0, 1.5], dtype=tf.float32)
result = sequence_ops.scan_discounted_sum(sequence_in, decays_in,
bootstrap,
reverse=False)
expected_result = tf.constant(
[[3,
3 * 0.9 + 1,
(3 * 0.9 + 1) * 1.0 + 5,
((3 * 0.9 + 1) * 1.0 + 5) * 0.1 + 2,
(((3 * 0.9 + 1) * 1.0 + 5) * 0.1 + 2) * 0.5 + 1],
[-1.7 + 1.5 * 0.9,
(-1.7 + 1.5 * 0.9) * 0.5 + 1.2,
((-1.7 + 1.5 * 0.9) * 0.5 + 1.2) * 0.0 + 2.3,
(((-1.7 + 1.5 * 0.9) * 0.5 + 1.2) * 0.0 + 2.3) * 2 + 0,
((((-1.7 + 1.5 * 0.9) * 0.5 + 1.2) * 0.0 + 2.3) * 2 + 0) * 0.8 + 1,
]], dtype=tf.float32)
self.assertAllClose(sess.run(result),
sess.run(tf.transpose(expected_result)))
def testScanSumWithDecaysWithSeqLen(self):
with self.test_session() as sess:
sequence = [[3, 1, 5, 2, 1], [-1.7, 1.2, 2.3, 0, 1]]
decays = [[0.5, 0.9, 1.0, 0.1, 0.5], [0.9, 0.5, 0.0, 2, 0.8]]
sequence_lengths = [0, 2]
# We use transpose because it is easier to define the input data in
# BxT (batch x time) form, while scan_discounted_sum assumes TxB form.
sequence_in = tf.transpose(tf.constant(sequence, dtype=tf.float32))
decays_in = tf.transpose(tf.constant(decays, dtype=tf.float32))
bootstrap = tf.constant([0, 1.5], dtype=tf.float32)
result = sequence_ops.scan_discounted_sum(
sequence_in, decays_in, bootstrap, reverse=False,
sequence_lengths=sequence_lengths)
expected_result = tf.constant(
[[0, 0, 0, 0, 0],
[-1.7 + 1.5 * 0.9, (-1.7 + 1.5 * 0.9) * 0.5 + 1.2, 0, 0, 0]],
dtype=tf.float32)
self.assertAllClose(sess.run(result),
sess.run(tf.transpose(expected_result)))
def testScanSumEquivalenceWithSeqLen(self):
with self.test_session() as sess:
sequence_lengths = [0, 2]
bootstrap = tf.constant([0.5, 1.5], dtype=tf.float32)
sequence = [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]
decays = [[.1, .2, .3, .4, .5], [.6, .7, .8, .9, .10]]
eq_sequence = [[0, 0, 0, 0, 0], [6, 7, 0, 0, 0]]
eq_decays = [[0, 0, 0, 0, 0], [.6, .7, 0, 0, 0]]
eq_reverse_sequence = [[0, 0, 0, 0, 0], [7, 6, 0, 0, 0]]
eq_reverse_decays = [[0, 0, 0, 0, 0], [.7, .6, 0, 0, 0]]
# We use transpose because it is easier to define the input data in
# BxT (batch x time) form, while scan_discounted_sum assumes TxB form.
sequence_in = tf.transpose(tf.constant(sequence, dtype=tf.float32))
decays_in = tf.transpose(tf.constant(decays, dtype=tf.float32))
eq_sequence_in = tf.transpose(tf.constant(eq_sequence, dtype=tf.float32))
eq_decays_in = tf.transpose(tf.constant(eq_decays, dtype=tf.float32))
eq_reverse_sequence_in = tf.transpose(
tf.constant(eq_reverse_sequence, dtype=tf.float32))
eq_reverse_decays_in = tf.transpose(
tf.constant(eq_reverse_decays, dtype=tf.float32))
eq_result = sequence_ops.scan_discounted_sum(
sequence_in, decays_in, bootstrap, reverse=False,
sequence_lengths=sequence_lengths)
exp_eq_result = sequence_ops.scan_discounted_sum(
eq_sequence_in, eq_decays_in, bootstrap)
eq_reverse_result = sequence_ops.scan_discounted_sum(
sequence_in, decays_in, bootstrap, reverse=True,
sequence_lengths=sequence_lengths)
exp_eq_reverse_result = sequence_ops.scan_discounted_sum(
eq_reverse_sequence_in, eq_reverse_decays_in, bootstrap)
exp_eq_reverse_result = tf.reverse_sequence(
exp_eq_reverse_result, sequence_lengths, seq_axis=0, batch_axis=1)
self.assertAllClose(sess.run(eq_result),
sess.run(exp_eq_result))
self.assertAllClose(sess.run(eq_reverse_result),
sess.run(exp_eq_reverse_result))
def testScanSumWithDecaysReverse(self):
with self.test_session() as sess:
sequence = [[3, 1, 5], [-1.7, 1.2, 2.3]]
decays = [[0.5, 0.9, 1.0], [0.9, 0.5, 0.3]]
# We use transpose because it is easier to define the input data in
# BxT (batch x time) form, while scan_discounted_sum assumes TxB form.
sequence_in = tf.transpose(tf.constant(sequence, dtype=tf.float32))
decays_in = tf.transpose(tf.constant(decays, dtype=tf.float32))
bootstrap = tf.constant([0, 1.5], dtype=tf.float32)
result = sequence_ops.scan_discounted_sum(sequence_in, decays_in,
bootstrap,
reverse=True)
expected_result = tf.constant(
[[(5 * 0.9 + 1) * 0.5 + 3,
5 * 0.9 + 1,
5],
[((2.3 + 0.3 * 1.5) * 0.5 + 1.2) * 0.9 - 1.7,
(2.3 + 0.3 * 1.5) * 0.5 + 1.2,
2.3 + 0.3 * 1.5,
]], dtype=tf.float32)
self.assertAllClose(sess.run(result),
sess.run(tf.transpose(expected_result)))
def testScanSumWithDecaysReverseWithSeqLen(self):
with self.test_session() as sess:
sequence = [[3, 1, 5], [-1.7, 1.2, 2.3]]
decays = [[0.5, 0.9, 1.0], [0.9, 0.5, 0.3]]
sequence_lengths = [2, 0]
# We use transpose because it is easier to define the input data in
# BxT (batch x time) form, while scan_discounted_sum assumes TxB form.
sequence_in = tf.transpose(tf.constant(sequence, dtype=tf.float32))
decays_in = tf.transpose(tf.constant(decays, dtype=tf.float32))
bootstrap = tf.constant([2.5, 1.5], dtype=tf.float32)
result = sequence_ops.scan_discounted_sum(
sequence_in, decays_in, bootstrap, reverse=True,
sequence_lengths=sequence_lengths)
expected_result = tf.constant(
[[(0.9 * 2.5 + 1) * 0.5 + 3, (0.9 * 2.5 + 1), 0], [0, 0, 0]],
dtype=tf.float32)
self.assertAllClose(sess.run(result),
sess.run(tf.transpose(expected_result)))
def testScanSumWithDecaysReverse3D(self):
"""scan_discounted_sum vs. higher-dimensional arguments."""
with self.test_session() as sess:
sequence = [[[3, 33], [1, 11], [5, 55]],
[[-1.7, -17], [1.2, 12], [2.3, 23]]]
decays = [[[0.5, 5], [0.9, 9], [1.0, 10]],
[[0.9, 9], [0.5, 5], [0.3, 3]]]
# We use transpose because it is easier to define the input data in
# BxT (batch x time) form, while scan_discounted_sum assumes TxB form.
sequence_in = tf.transpose(tf.constant(sequence, dtype=tf.float32),
perm=[1, 0, 2])
decays_in = tf.transpose(tf.constant(decays, dtype=tf.float32),
perm=[1, 0, 2])
bootstrap = tf.constant([[0, 0], [1.5, 15]], dtype=tf.float32)
result = sequence_ops.scan_discounted_sum(sequence_in, decays_in,
bootstrap,
reverse=True)
expected_result = tf.constant(
[[[(5 * 0.9 + 1) * 0.5 + 3,
(55 * 9 + 11) * 5 + 33],
[5 * 0.9 + 1,
55 * 9 + 11],
[5,
55]],
[[((2.3 + 0.3 * 1.5) * 0.5 + 1.2) * 0.9 - 1.7,
((23 + 3 * 15) * 5 + 12) * 9 - 17],
[(2.3 + 0.3 * 1.5) * 0.5 + 1.2,
(23 + 3 * 15) * 5 + 12],
[2.3 + 0.3 * 1.5,
23 + 3 * 15]]],
dtype=tf.float32)
self.assertAllClose(sess.run(result),
sess.run(tf.transpose(expected_result,
perm=[1, 0, 2])))
def testScanSumWithDecaysReverse3DWithSeqLen(self):
"""scan_discounted_sum vs. higher-dimensional arguments."""
with self.test_session() as sess:
sequence = [[[3, 33], [1, 11], [5, 55]],
[[-1.7, -17], [1.2, 12], [2.3, 23]]]
decays = [[[0.5, 5], [0.9, 9], [1.0, 10]],
[[0.9, 9], [0.5, 5], [0.3, 3]]]
sequence_lengths = [2, 0]
# We use transpose because it is easier to define the input data in
# BxT (batch x time) form, while scan_discounted_sum assumes TxB form.
sequence_in = tf.transpose(tf.constant(sequence, dtype=tf.float32),
perm=[1, 0, 2])
decays_in = tf.transpose(tf.constant(decays, dtype=tf.float32),
perm=[1, 0, 2])
bootstrap = tf.constant([[0, 0], [1.5, 15]], dtype=tf.float32)
result = sequence_ops.scan_discounted_sum(
sequence_in, decays_in, bootstrap, reverse=True,
sequence_lengths=sequence_lengths)
expected_result = np.asarray(
[[[1 * 0.5 + 3, 11 * 5 + 33], [1, 11], [0, 0]],
[[0, 0], [0, 0], [0, 0]]], dtype=np.float32)
self.assertAllClose(sess.run(result),
np.transpose(expected_result, axes=[1, 0, 2]))
class MultistepForwardViewTest(tf.test.TestCase):
def testMultistepForwardView(self):
with self.test_session() as sess:
# Define input data.
rewards = [[1, 0, -1, 0, 1], [0.5, 0.8, -0.7, 0.0, 2.1]]
pcontinues = [[0.5, 0.9, 1.0, 0.5, 0.8], [0.9, 0.5, 0.3, 0.8, 0.7]]
state_values = [[3, 1, 5, -5, 3], [-1.7, 1.2, 2.3, 2.2, 2.7]]
lambda_ = 0.75
# Evaluate expected complex backups at all time-steps for both batches.
expected_result = []
for b in xrange(0, 2):
expected_result.append(
get_complex_n_step_backup_at_all_times(rewards[b], pcontinues[b],
state_values[b], lambda_))
# Only partially-specify the input shapes - verifies that the
# dynamically sized Tensors are handled correctly.
state_values_pl = tf.placeholder(tf.float32, shape=[None, None])
rewards_pl = tf.placeholder(tf.float32, shape=[None, None])
pcontinues_pl = tf.placeholder(tf.float32, shape=[None, None])
# We use transpose because it is easier to define the input data in
# BxT (batch x time) form, while scan_discounted_sum assumes TxB form.
state_values_in = tf.transpose(state_values_pl)
rewards_in = tf.transpose(rewards_pl)
pcontinues_in = tf.transpose(pcontinues_pl)
expected = tf.transpose(tf.constant(expected_result, dtype=tf.float32))
# Evaluate complex backups.
result = sequence_ops.multistep_forward_view(rewards_in, pcontinues_in,
state_values_in, lambda_)
feed_dict = {state_values_pl: state_values,
rewards_pl: rewards,
pcontinues_pl: pcontinues}
self.assertAllClose(sess.run(result, feed_dict=feed_dict),
sess.run(expected))
if __name__ == "__main__":
tf.test.main()
| trfl-master | trfl/sequence_ops_test.py |
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for action_value_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf
from trfl import action_value_ops as rl
class QLearningTest(tf.test.TestCase):
def setUp(self):
super(QLearningTest, self).setUp()
self.q_tm1 = tf.constant([[1, 1, 0], [1, 2, 0]], dtype=tf.float32)
self.q_t = tf.constant([[0, 1, 0], [1, 2, 0]], dtype=tf.float32)
self.a_tm1 = tf.constant([0, 1], dtype=tf.int32)
self.pcont_t = tf.constant([0, 1], dtype=tf.float32)
self.r_t = tf.constant([1, 1], dtype=tf.float32)
self.qlearning = rl.qlearning(self.q_tm1, self.a_tm1, self.r_t,
self.pcont_t, self.q_t)
def testRankCheck(self):
q_tm1 = tf.placeholder(tf.float32, [None])
with self.assertRaisesRegexp(
ValueError, "QLearning: Error in rank and/or compatibility check"):
self.qlearning = rl.qlearning(q_tm1, self.a_tm1, self.r_t, self.pcont_t,
self.q_t)
def testCompatibilityCheck(self):
a_tm1 = tf.placeholder(tf.int32, [3])
with self.assertRaisesRegexp(
ValueError, "QLearning: Error in rank and/or compatibility check"):
self.qlearning = rl.qlearning(self.q_tm1, a_tm1, self.r_t, self.pcont_t,
self.q_t)
def testTarget(self):
with self.test_session() as sess:
self.assertAllClose(sess.run(self.qlearning.extra.target), [1, 3])
def testTDError(self):
with self.test_session() as sess:
self.assertAllClose(sess.run(self.qlearning.extra.td_error), [0, 1])
def testLoss(self):
with self.test_session() as sess:
# Loss is 0.5 * td_error^2
self.assertAllClose(sess.run(self.qlearning.loss), [0, 0.5])
def testGradQtm1(self):
with self.test_session() as sess:
# Take gradients of the negative loss, so that the tests here check the
# values propogated during gradient _descent_, rather than _ascent_.
gradients = tf.gradients([-self.qlearning.loss], [self.q_tm1])
grad_q_tm1 = sess.run(gradients[0])
self.assertAllClose(grad_q_tm1, [[0, 0, 0], [0, 1, 0]])
def testNoOtherGradients(self):
# Gradients are only defined for q_tm1, not any other input.
# Bellman residual variants could potentially generate a gradient wrt q_t.
gradients = tf.gradients([self.qlearning.loss],
[self.q_t, self.r_t, self.a_tm1, self.pcont_t])
self.assertEqual(gradients, [None, None, None, None])
class DoubleQLearningTest(tf.test.TestCase):
def setUp(self):
super(DoubleQLearningTest, self).setUp()
self.q_tm1 = tf.constant([[1, 1, 0], [1, 2, 0]], dtype=tf.float32)
self.a_tm1 = tf.constant([0, 1], dtype=tf.int32)
self.pcont_t = tf.constant([0, 1], dtype=tf.float32)
self.r_t = tf.constant([1, 1], dtype=tf.float32)
# The test is written so that it calculates the same thing as QLearningTest:
# The selector, despite having different values, select the same actions,
self.q_t_selector = tf.constant([[2, 10, 1], [11, 20, 1]], dtype=tf.float32)
# whose values are unchanged. (Other values are changed and larger.)
self.q_t_value = tf.constant([[99, 1, 98], [91, 2, 66]], dtype=tf.float32)
self.double_qlearning = rl.double_qlearning(self.q_tm1, self.a_tm1,
self.r_t, self.pcont_t,
self.q_t_value,
self.q_t_selector)
def testRankCheck(self):
q_t_selector = tf.placeholder(tf.float32, [None])
with self.assertRaisesRegexp(
ValueError,
"DoubleQLearning: Error in rank and/or compatibility check"):
self.double_qlearning = rl.double_qlearning(self.q_tm1, self.a_tm1,
self.r_t, self.pcont_t,
self.q_t_value, q_t_selector)
def testCompatibilityCheck(self):
r_t = tf.placeholder(tf.float32, [3])
with self.assertRaisesRegexp(
ValueError,
"DoubleQLearning: Error in rank and/or compatibility check"):
self.double_qlearning = rl.double_qlearning(self.q_tm1, self.a_tm1, r_t,
self.pcont_t, self.q_t_value,
self.q_t_selector)
def testDoubleQLearningBestAction(self):
with self.test_session() as sess:
self.assertAllClose(
sess.run(self.double_qlearning.extra.best_action), [1, 1])
def testDoubleQLearningTarget(self):
with self.test_session() as sess:
self.assertAllClose(sess.run(self.double_qlearning.extra.target), [1, 3])
def testDoubleQLearningTDError(self):
with self.test_session() as sess:
self.assertAllClose(
sess.run(self.double_qlearning.extra.td_error), [0, 1])
def testDoubleQLearningLoss(self):
with self.test_session() as sess:
# Loss is 0.5 * td_error^2
self.assertAllClose(sess.run(self.double_qlearning.loss), [0, 0.5])
def testDoubleQLearningGradQtm1(self):
with self.test_session() as sess:
# Take gradients of the negative loss, so that the tests here check the
# values propogated during gradient _descent_, rather than _ascent_.
gradients = tf.gradients([-self.double_qlearning.loss], [self.q_tm1])
grad_q_tm1 = sess.run(gradients[0])
self.assertAllClose(grad_q_tm1, [[0, 0, 0], [0, 1, 0]])
def testDoubleQLearningNoOtherGradients(self):
# Gradients are only defined for q_tm1, not any other input.
# Bellman residual variants could potentially generate a gradient wrt q_t.
no_grads = [
self.r_t, self.a_tm1, self.pcont_t, self.q_t_value, self.q_t_selector
]
gradients = tf.gradients([self.double_qlearning.loss], no_grads)
self.assertEqual(gradients, [None] * len(no_grads))
class PersistentQLearningTest(tf.test.TestCase):
def setUp(self):
super(PersistentQLearningTest, self).setUp()
self.q_tm1 = tf.constant([[1, 2], [3, 4], [5, 6]], dtype=tf.float32)
self.a_tm1 = tf.constant([0, 1, 1], dtype=tf.int32)
self.pcont_t = tf.constant([0, 1, 0.5], dtype=tf.float32)
self.r_t = tf.constant([3, 2, 7], dtype=tf.float32)
self.q_t = tf.constant([[11, 12], [20, 16], [-8, -4]], dtype=tf.float32)
self.action_gap_scale = 0.25
self.persistent_qlearning = rl.persistent_qlearning(self.q_tm1, self.a_tm1,
self.r_t, self.pcont_t,
self.q_t,
self.action_gap_scale)
def testScalarCheck(self):
action_gap_scale = 2
with self.assertRaisesRegexp(
ValueError,
r"PersistentQLearning: action_gap_scale has to lie in \[0, 1\]\."):
self.persistent_qlearning = rl.persistent_qlearning(
self.q_tm1, self.a_tm1, self.r_t, self.pcont_t, self.q_t,
action_gap_scale)
def testCompatibilityCheck(self):
r_t = tf.placeholder(tf.float32, [2])
with self.assertRaisesRegexp(
ValueError,
"PersistentQLearning: Error in rank and/or compatibility check"):
self.persistent_qlearning = rl.persistent_qlearning(
self.q_tm1, self.a_tm1, r_t, self.pcont_t, self.q_t,
self.action_gap_scale)
def testPersistentQLearningTarget(self):
with self.test_session() as sess:
self.assertAllClose(
sess.run(self.persistent_qlearning.extra.target), [3, 21, 5])
def testPersistentQLearningTDError(self):
with self.test_session() as sess:
self.assertAllClose(
sess.run(self.persistent_qlearning.extra.td_error), [2, 17, -1])
def testPersistentQLearningLoss(self):
with self.test_session() as sess:
# Loss is 0.5 * td_error^2
self.assertAllClose(
sess.run(self.persistent_qlearning.loss), [2, 144.5, 0.5])
def testPersistentQLearningGradQtm1(self):
with self.test_session() as sess:
# Take gradients of the negative loss, so that the tests here check the
# values propogated during gradient _descent_, rather than _ascent_.
gradients = tf.gradients([-self.persistent_qlearning.loss], [self.q_tm1])
grad_q_tm1 = sess.run(gradients[0])
self.assertAllClose(grad_q_tm1, [[2, 0], [0, 17], [0, -1]])
def testPersistentQLearningNoOtherGradients(self):
# Gradients are only defined for q_tm1, not any other input.
# Bellman residual variants could potentially generate a gradient wrt q_t.
no_grads = [self.r_t, self.a_tm1, self.pcont_t, self.q_t]
gradients = tf.gradients([self.persistent_qlearning.loss], no_grads)
self.assertEqual(gradients, [None] * len(no_grads))
class SarsaTest(tf.test.TestCase):
"""Tests for Sarsa learner."""
def setUp(self):
super(SarsaTest, self).setUp()
self.q_tm1 = tf.constant([[1, 1, 0], [1, 1, 0]], dtype=tf.float32)
self.q_t = tf.constant([[0, 1, 0], [3, 2, 0]], dtype=tf.float32)
self.a_tm1 = tf.constant([0, 1], dtype=tf.int32)
self.a_t = tf.constant([1, 0], dtype=tf.int32)
self.pcont_t = tf.constant([0, 1], dtype=tf.float32)
self.r_t = tf.constant([1, 1], dtype=tf.float32)
self.sarsa = rl.sarsa(self.q_tm1, self.a_tm1, self.r_t, self.pcont_t,
self.q_t, self.a_t)
def testRankCheck(self):
q_tm1 = tf.placeholder(tf.float32, [None])
with self.assertRaisesRegexp(
ValueError, "Sarsa: Error in rank and/or compatibility check"):
self.sarsa = rl.sarsa(q_tm1, self.a_tm1, self.r_t, self.pcont_t, self.q_t,
self.a_t)
def testCompatibilityCheck(self):
a_t = tf.placeholder(tf.float32, [3])
with self.assertRaisesRegexp(
ValueError, "Sarsa: Error in rank and/or compatibility check"):
self.sarsa = rl.sarsa(self.q_tm1, self.a_tm1, self.r_t, self.pcont_t,
self.q_t, a_t)
def testVariableBatchSize(self):
q_tm1 = tf.placeholder(tf.float32, shape=[None, 3])
q_t = tf.placeholder(tf.float32, shape=[None, 3])
a_tm1 = tf.placeholder(tf.int32, shape=[None])
pcont_t = tf.placeholder(tf.float32, shape=[None])
a_t = tf.placeholder(tf.int32, shape=[None])
r_t = tf.placeholder(tf.float32, shape=[None])
sarsa = rl.sarsa(q_tm1, a_tm1, r_t, pcont_t, q_t, a_t)
# Check static shapes.
self.assertEqual(sarsa.loss.get_shape().as_list(), [None])
self.assertEqual(sarsa.extra.td_error.get_shape().as_list(), [None])
self.assertEqual(sarsa.extra.target.get_shape().as_list(), [None])
# Check runtime shapes.
batch_size = 11
feed_dict = {
q_tm1: np.random.random([batch_size, 3]),
q_t: np.random.random([batch_size, 3]),
a_tm1: np.random.randint(0, 3, [batch_size]),
pcont_t: np.random.random([batch_size]),
a_t: np.random.randint(0, 3, [batch_size]),
r_t: np.random.random(batch_size)
}
with self.test_session() as sess:
loss, td_error, target = sess.run(
[sarsa.loss, sarsa.extra.td_error, sarsa.extra.target],
feed_dict=feed_dict)
self.assertEqual(loss.shape, (batch_size,))
self.assertEqual(td_error.shape, (batch_size,))
self.assertEqual(target.shape, (batch_size,))
def testTarget(self):
"""Tests that target value == r_t + pcont_t * q_t[a_t]."""
with self.test_session() as sess:
self.assertAllClose(sess.run(self.sarsa.extra.target), [1, 4])
def testTDError(self):
"""Tests that td_error = target - q_tm1[a_tm1]."""
with self.test_session() as sess:
self.assertAllClose(sess.run(self.sarsa.extra.td_error), [0, 3])
def testLoss(self):
"""Tests that loss == 0.5 * td_error^2."""
with self.test_session() as sess:
# Loss is 0.5 * td_error^2
self.assertAllClose(sess.run(self.sarsa.loss), [0, 4.5])
def testGradQtm1(self):
"""Tests that the gradients of negative loss are equal to the td_error."""
with self.test_session() as sess:
# Take gradients of the negative loss, so that the tests here check the
# values propogated during gradient _descent_, rather than _ascent_.
gradients = tf.gradients([-self.sarsa.loss], [self.q_tm1])
grad_q_tm1 = sess.run(gradients[0])
self.assertAllClose(grad_q_tm1, [[0, 0, 0], [0, 3, 0]])
def testNoOtherGradients(self):
"""Tests no gradient propagates through any tensors other than q_tm1."""
# Gradients are only defined for q_tm1, not any other input.
# Bellman residual variants could potentially generate a gradient wrt q_t.
gradients = tf.gradients(
[self.sarsa.loss],
[self.q_t, self.r_t, self.a_tm1, self.pcont_t, self.a_t])
self.assertEqual(gradients, [None, None, None, None, None])
class SarseTest(tf.test.TestCase):
"""Tests for Sarse learner."""
def setUp(self):
super(SarseTest, self).setUp()
self.q_tm1 = tf.constant([[1, 1, 0.5], [1, 1, 3]], dtype=tf.float32)
self.q_t = tf.constant([[1.5, 1, 2], [3, 2, 1]], dtype=tf.float32)
self.a_tm1 = tf.constant([0, 1], dtype=tf.int32)
self.probs_a_t = tf.constant([[0.2, 0.5, 0.3], [0.3, 0.4, 0.3]],
dtype=tf.float32)
self.pcont_t = tf.constant([1, 1], dtype=tf.float32)
self.r_t = tf.constant([4, 1], dtype=tf.float32)
self.sarse = rl.sarse(self.q_tm1, self.a_tm1, self.r_t, self.pcont_t,
self.q_t, self.probs_a_t)
def testRankCheck(self):
q_tm1 = tf.placeholder(tf.float32, [None])
with self.assertRaisesRegexp(
ValueError, "Sarse: Error in rank and/or compatibility check"):
self.sarse = rl.sarse(q_tm1, self.a_tm1, self.r_t, self.pcont_t, self.q_t,
self.probs_a_t)
def testCompatibilityCheck(self):
probs_a_t = tf.placeholder(tf.float32, [None, 2])
with self.assertRaisesRegexp(
ValueError, "Sarse: Error in rank and/or compatibility check"):
self.sarse = rl.sarse(self.q_tm1, self.a_tm1, self.r_t, self.pcont_t,
self.q_t, probs_a_t)
def testIncorrectProbsTensor(self):
probs_a_t = tf.constant([[0.2, 0.5, 0.3], [0.3, 0.5, 0.3]],
dtype=tf.float32)
with self.test_session() as sess:
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"probs_a_t tensor does not sum to 1"):
self.sarse = rl.sarse(
self.q_tm1,
self.a_tm1,
self.r_t,
self.pcont_t,
self.q_t,
probs_a_t,
debug=True)
sess.run(self.sarse.extra.target)
def testVariableBatchSize(self):
q_tm1 = tf.placeholder(tf.float32, shape=[None, 3])
q_t = tf.placeholder(tf.float32, shape=[None, 3])
a_tm1 = tf.placeholder(tf.int32, shape=[None])
pcont_t = tf.placeholder(tf.float32, shape=[None])
probs_a_t = tf.placeholder(tf.float32, shape=[None, 3])
r_t = tf.placeholder(tf.float32, shape=[None])
sarse = rl.sarse(q_tm1, a_tm1, r_t, pcont_t, q_t, probs_a_t)
# Check static shapes.
self.assertEqual(sarse.loss.get_shape().as_list(), [None])
self.assertEqual(sarse.extra.td_error.get_shape().as_list(), [None])
self.assertEqual(sarse.extra.target.get_shape().as_list(), [None])
# Check runtime shapes.
batch_size = 11
feed_dict = {
q_tm1: np.random.random([batch_size, 3]),
q_t: np.random.random([batch_size, 3]),
a_tm1: np.random.randint(0, 3, [batch_size]),
pcont_t: np.random.random([batch_size]),
r_t: np.random.random(batch_size),
probs_a_t: np.random.uniform(size=[batch_size, 3])
}
with self.test_session() as sess:
loss, td_error, target = sess.run(
[sarse.loss, sarse.extra.td_error, sarse.extra.target],
feed_dict=feed_dict)
self.assertEqual(loss.shape, (batch_size,))
self.assertEqual(td_error.shape, (batch_size,))
self.assertEqual(target.shape, (batch_size,))
def testTarget(self):
with self.test_session() as sess:
# target is r_t + sum_a (probs_a_t[a] * q_t[a])
self.assertAllClose(sess.run(self.sarse.extra.target), [5.4, 3])
def testTDError(self):
"""Tests that td_error = target - q_tm1[a_tm1]."""
with self.test_session() as sess:
self.assertAllClose(sess.run(self.sarse.extra.td_error), [4.4, 2])
def testLoss(self):
"""Tests that loss == 0.5 * td_error^2."""
with self.test_session() as sess:
# Loss is 0.5 * td_error^2
self.assertAllClose(sess.run(self.sarse.loss), [9.68, 2])
def testGradQtm1(self):
"""Tests that the gradients of negative loss are equal to the td_error."""
with self.test_session() as sess:
# Take gradients of the negative loss, so that the tests here check the
# values propogated during gradient _descent_, rather than _ascent_.
gradients = tf.gradients([-self.sarse.loss], [self.q_tm1])
grad_q_tm1 = sess.run(gradients[0])
self.assertAllClose(grad_q_tm1, [[4.4, 0, 0], [0, 2, 0]])
def testNoOtherGradients(self):
"""Tests no gradient propagates through any tensors other than q_tm1."""
# Gradients are only defined for q_tm1, not any other input.
# Bellman residual variants could potentially generate a gradient wrt q_t.
gradients = tf.gradients(
[self.sarse.loss],
[self.q_t, self.r_t, self.a_tm1, self.pcont_t, self.probs_a_t])
self.assertEqual(gradients, [None, None, None, None, None])
class QLambdaTest(tf.test.TestCase):
def setUp(self):
super(QLambdaTest, self).setUp()
# Tensor dimensions below: TxBxA (time, batch id, action).
self.q_tm1 = tf.constant(
[[[1.1, 2.1], [2.1, 3.1]], [[-1.1, 1.1], [-1.1, 0.1]],
[[3.1, -3.1], [-2.1, -1.1]]],
dtype=tf.float32)
self.q_t = tf.constant([[[1.2, 2.2], [4.2, 2.2]], [[-1.2, 0.2], [1.2, 1.2]],
[[2.2, -1.2], [-1.2, -2.2]]],
dtype=tf.float32)
# Tensor dimensions below: TxB (time, batch id).
self.a_tm1 = tf.constant([[0, 1], [1, 0], [0, 0]], dtype=tf.int32)
self.pcont_t = tf.constant([[0.00, 0.88], [0.89, 1.00], [0.85, 0.83]],
dtype=tf.float32)
self.r_t = tf.constant([[-1.3, 1.3], [-1.3, 5.3], [2.3, -3.3]],
dtype=tf.float32)
self.lambda_ = tf.constant([[0.67, 0.68], [0.65, 0.69], [0.66, 0.64]],
dtype=tf.float32)
self.qlearning = rl.qlambda(self.q_tm1, self.a_tm1, self.r_t, self.pcont_t,
self.q_t, self.lambda_)
# Evaluate target Q-values used for testing.
# t20 is Target for timestep 2, batch 0
self.t20 = 2.2 * 0.85 + 2.3
self.t10 = (self.t20 * 0.65 + 0.2 * (1 - 0.65)) * 0.89 - 1.3
self.t00 = (self.t10 * 0.67 + 2.2 * (1 - 0.67)) * 0.00 - 1.3
self.t21 = -1.2 * 0.83 - 3.3
self.t11 = (self.t21 * 0.69 + 1.2 * (1 - 0.69)) * 1.00 + 5.3
self.t01 = (self.t11 * 0.68 + 4.2 * (1 - 0.68)) * 0.88 + 1.3
def testRankCheck(self):
lambda_ = tf.placeholder(tf.float32, [None, None, 2])
with self.assertRaisesRegexp(
ValueError, "QLambda: Error in rank and/or compatibility check"):
self.qlearning = rl.qlambda(self.q_tm1, self.a_tm1, self.r_t,
self.pcont_t, self.q_t, lambda_)
def testCompatibilityCheck(self):
r_t = tf.placeholder(tf.float32, [4, 2])
with self.assertRaisesRegexp(
ValueError, "QLambda: Error in rank and/or compatibility check"):
self.qlearning = rl.qlambda(self.q_tm1, self.a_tm1, r_t, self.pcont_t,
self.q_t, self.lambda_)
def testTarget(self):
# Please note: the last two values of lambda_ are effectively ignored as
# there is nothing to mix at the end of the sequence.
with self.test_session() as sess:
self.assertAllClose(
sess.run(self.qlearning.extra.target),
[[self.t00, self.t01], [self.t10, self.t11], [self.t20, self.t21]])
def testTDError(self):
with self.test_session() as sess:
self.assertAllClose(
sess.run(self.qlearning.extra.td_error),
[[self.t00 - 1.1, self.t01 - 3.1], [self.t10 - 1.1, self.t11 + 1.1],
[self.t20 - 3.1, self.t21 + 2.1]])
def testLoss(self):
with self.test_session() as sess:
self.assertAllClose(
sess.run(self.qlearning.loss),
[[0.5 * (self.t00 - 1.1)**2, 0.5 * (self.t01 - 3.1)**2],
[0.5 * (self.t10 - 1.1)**2, 0.5 * (self.t11 + 1.1)**2],
[0.5 * (self.t20 - 3.1)**2, 0.5 * (self.t21 + 2.1)**2]])
def testGradQtm1(self):
with self.test_session() as sess:
# Take gradients of the negative loss, so that the tests here check the
# values propagated during gradient _descent_, rather than _ascent_.
gradients = tf.gradients([-self.qlearning.loss], [self.q_tm1])
grad_q_tm1 = sess.run(gradients[0])
self.assertAllClose(grad_q_tm1,
[[[self.t00 - 1.1, 0], [0, self.t01 - 3.1]],
[[0, self.t10 - 1.1], [self.t11 + 1.1, 0]],
[[self.t20 - 3.1, 0], [self.t21 + 2.1, 0]]])
def testNoOtherGradients(self):
# Gradients are only defined for q_tm1, not any other input.
# Bellman residual variants could potentially generate a gradient wrt q_t.
gradients = tf.gradients(
[self.qlearning.loss],
[self.q_t, self.r_t, self.a_tm1, self.pcont_t, self.lambda_])
self.assertEqual(gradients, [None, None, None, None, None])
class PengsQLambdaTest(tf.test.TestCase):
# These tests verify that GeneralizedQLambda operates as expected when
# the lambda_ parameter is a constant. We compare against results
# calculated by GeneralizedQLambda when lambda_ is a tensor whose entries
# are all equal to the constant value. (The correct operation of this
# configuration is tested by GeneralizedQLambdaTest.)
def setUp(self):
super(PengsQLambdaTest, self).setUp()
# Tensor dimensions below: TxBxA (time, batch id, action).
self.q_tm1 = tf.constant(
[[[1.1, 2.1], [2.1, 3.1]], [[-1.1, 1.1], [-1.1, 0.1]],
[[3.1, -3.1], [-2.1, -1.1]]],
dtype=tf.float32)
self.q_t = tf.constant([[[1.2, 2.2], [4.2, 2.2]], [[-1.2, 0.2], [1.2, 1.2]],
[[2.2, -1.2], [-1.2, -2.2]]],
dtype=tf.float32)
# Tensor dimensions below: TxB (time, batch id).
self.a_tm1 = tf.constant([[0, 1], [1, 0], [0, 0]], dtype=tf.int32)
self.pcont_t = tf.constant([[0.00, 0.88], [0.89, 1.00], [0.85, 0.83]],
dtype=tf.float32)
self.r_t = tf.constant([[-1.3, 1.3], [-1.3, 5.3], [2.3, -3.3]],
dtype=tf.float32)
self.lambda_scalar = 0.5
self.lambda_ = tf.constant([[self.lambda_scalar, self.lambda_scalar],
[self.lambda_scalar, self.lambda_scalar],
[self.lambda_scalar, self.lambda_scalar]],
dtype=tf.float32)
# Evaluate trusted values by defining lambda_ as a tensor.
self.qlearning_reference = rl.qlambda(self.q_tm1, self.a_tm1, self.r_t,
self.pcont_t, self.q_t, self.lambda_)
# Evaluate values by defining lambda_ as a python number.
self.qlearning = rl.qlambda(self.q_tm1, self.a_tm1, self.r_t, self.pcont_t,
self.q_t, self.lambda_scalar)
def testRankCheck(self):
q_tm1 = tf.placeholder(tf.float32, [None, 3])
with self.assertRaisesRegexp(
ValueError, "QLambda: Error in rank and/or compatibility check"):
self.qlearning = rl.qlambda(q_tm1, self.a_tm1, self.r_t, self.pcont_t,
self.q_t, self.lambda_scalar)
def testCompatibilityCheck(self):
a_tm1 = tf.placeholder(tf.int32, [5, 2])
with self.assertRaisesRegexp(
ValueError, "QLambda: Error in rank and/or compatibility check"):
self.qlearning = rl.qlambda(self.q_tm1, a_tm1, self.r_t, self.pcont_t,
self.q_t, self.lambda_scalar)
def testTarget(self):
with self.test_session() as sess:
self.assertAllClose(
sess.run(self.qlearning.extra.target),
sess.run(self.qlearning_reference.extra.target))
def testTDError(self):
with self.test_session() as sess:
self.assertAllClose(
sess.run(self.qlearning.extra.td_error),
sess.run(self.qlearning_reference.extra.td_error))
def testLoss(self):
with self.test_session() as sess:
self.assertAllClose(
sess.run(self.qlearning.loss),
sess.run(self.qlearning_reference.loss))
def testGradQtm1(self):
with self.test_session() as sess:
gradients = tf.gradients([-self.qlearning.loss], [self.q_tm1])
gradients_reference = tf.gradients([-self.qlearning_reference.loss],
[self.q_tm1])
self.assertAllClose(
sess.run(gradients[0]), sess.run(gradients_reference[0]))
class SarsaLambdaTest(tf.test.TestCase):
def setUp(self):
super(SarsaLambdaTest, self).setUp()
# Tensor dimensions below: TxBxA (time, batch id, action).
self.q_tm1 = tf.constant(
[[[1.1, 2.1], [2.1, 3.1]], [[-1.1, 1.1], [-1.1, 0.1]],
[[3.1, -3.1], [-2.1, -1.1]]],
dtype=tf.float32)
self.q_t = tf.constant([[[1.2, 2.2], [4.2, 2.2]], [[-1.2, 0.2], [1.2, 1.2]],
[[2.2, -1.2], [-1.2, -2.2]]],
dtype=tf.float32)
# Tensor dimensions below: TxB (time, batch id).
self.a_tm1 = tf.constant([[0, 1], [1, 0], [0, 0]], dtype=tf.int32)
self.pcont_t = tf.constant([[0.00, 0.88], [0.89, 1.00], [0.85, 0.83]],
dtype=tf.float32)
self.r_t = tf.constant([[-1.3, 1.3], [-1.3, 5.3], [2.3, -3.3]],
dtype=tf.float32)
self.lambda_ = 0.65
self.a_t = tf.constant([[1, 0], [0, 0], [0, 1]], dtype=tf.int32)
self.sarsa = rl.sarsa_lambda(self.q_tm1, self.a_tm1, self.r_t, self.pcont_t,
self.q_t, self.a_t, self.lambda_)
def testRankCheck(self):
q_tm1 = tf.placeholder(tf.float32, [None, 3])
with self.assertRaisesRegexp(
ValueError, "SarsaLambda: Error in rank and/or compatibility check"):
self.sarsa = rl.sarsa_lambda(q_tm1, self.a_tm1, self.r_t, self.pcont_t,
self.q_t, self.a_t, self.lambda_)
def testCompatibilityCheck(self):
r_t = tf.placeholder(tf.float32, [4, 2])
with self.assertRaisesRegexp(
ValueError, "SarsaLambda: Error in rank and/or compatibility check"):
self.sarsa = rl.sarsa_lambda(self.q_tm1, self.a_tm1, r_t, self.pcont_t,
self.q_t, self.a_t, self.lambda_)
def testNoOtherGradients(self):
# Gradients are only defined for q_tm1, not any other input.
# Bellman residual variants could potentially generate a gradient wrt q_t.
gradients = tf.gradients(
[self.sarsa.loss],
[self.q_t, self.r_t, self.a_tm1, self.pcont_t, self.a_t, self.lambda_])
self.assertEqual(gradients, [None, None, None, None, None, None])
class QVTest(tf.test.TestCase):
"""Tests for QV learner."""
def setUp(self):
super(QVTest, self).setUp()
self.q_tm1 = tf.constant([[1, 1, 0], [1, 1, 0]], dtype=tf.float32)
self.a_tm1 = tf.constant([0, 1], dtype=tf.int32)
self.pcont_t = tf.constant([0, 1], dtype=tf.float32)
self.r_t = tf.constant([1, 1], dtype=tf.float32)
self.v_t = tf.constant([1, 3], dtype=tf.float32)
self.loss_op, self.extra_ops = rl.qv_learning(self.q_tm1, self.a_tm1,
self.r_t, self.pcont_t,
self.v_t)
def testRankCheck(self):
q_tm1 = tf.placeholder(tf.float32, [None])
with self.assertRaisesRegexp(
ValueError, "QVLearning: Error in rank and/or compatibility check"):
rl.qv_learning(q_tm1, self.a_tm1, self.r_t, self.pcont_t, self.v_t)
def testVariableBatchSize(self):
q_tm1 = tf.placeholder(tf.float32, shape=[None, 3])
a_tm1 = tf.placeholder(tf.int32, shape=[None])
pcont_t = tf.placeholder(tf.float32, shape=[None])
r_t = tf.placeholder(tf.float32, shape=[None])
v_t = tf.placeholder(tf.float32, shape=[None])
loss_op, extra_ops = rl.qv_learning(q_tm1, a_tm1, r_t, pcont_t, v_t)
# Check static shapes.
self.assertEqual(loss_op.get_shape().as_list(), [None])
self.assertEqual(extra_ops.td_error.get_shape().as_list(), [None])
self.assertEqual(extra_ops.target.get_shape().as_list(), [None])
# Check runtime shapes.
batch_size = 11
feed_dict = {
q_tm1: np.random.random([batch_size, 3]),
a_tm1: np.random.randint(0, 3, [batch_size]),
pcont_t: np.random.random([batch_size]),
r_t: np.random.random(batch_size),
v_t: np.random.random(batch_size),
}
with self.test_session() as sess:
loss, td_error, target = sess.run(
[loss_op, extra_ops.td_error, extra_ops.target], feed_dict=feed_dict)
self.assertEqual(loss.shape, (batch_size,))
self.assertEqual(td_error.shape, (batch_size,))
self.assertEqual(target.shape, (batch_size,))
def testTarget(self):
"""Tests that target value == r_t + pcont_t * q_t[a_t]."""
with self.test_session() as sess:
self.assertAllClose(sess.run(self.extra_ops.target), [1, 4])
def testTDError(self):
"""Tests that td_error = target - q_tm1[a_tm1]."""
with self.test_session() as sess:
self.assertAllClose(sess.run(self.extra_ops.td_error), [0, 3])
def testLoss(self):
"""Tests that loss == 0.5 * td_error^2."""
with self.test_session() as sess:
# Loss is 0.5 * td_error^2
self.assertAllClose(sess.run(self.loss_op), [0, 4.5])
def testGradQtm1(self):
"""Tests that the gradients of negative loss are equal to the td_error."""
with self.test_session() as sess:
# Take gradients of the negative loss, so that the tests here check the
# values propogated during gradient _descent_, rather than _ascent_.
gradients = tf.gradients([-self.loss_op], [self.q_tm1])
grad_q_tm1 = sess.run(gradients[0])
self.assertAllClose(grad_q_tm1, [[0, 0, 0], [0, 3, 0]])
def testNoOtherGradients(self):
"""Tests no gradient propagates through any tensors other than `q_tm1`."""
# Gradients are only defined for q_tm1, not any other input.
# Bellman residual variants could potentially generate a gradient wrt q_t.
gradients = tf.gradients([self.loss_op],
[self.r_t, self.a_tm1, self.pcont_t, self.v_t])
self.assertEqual(gradients, [None, None, None, None])
if __name__ == "__main__":
tf.test.main()
| trfl-master | trfl/action_value_ops_test.py |
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for dist_value_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf
from trfl import dist_value_ops as rl
class CategoricalDistRLTest(tf.test.TestCase):
"""Abstract base class for Distributional RL value ops tests."""
def setUp(self):
super(CategoricalDistRLTest, self).setUp()
# Define both state- and action-value transitions here for the different
# learning rules tested in the subclasses.
self.atoms_tm1 = tf.constant([0.5, 1.0, 1.5], dtype=tf.float32)
self.atoms_t = tf.identity(self.atoms_tm1)
self.logits_q_tm1 = tf.constant(
[[[1, 1, 1], [0, 9, 9], [0, 9, 0], [0, 0, 0]],
[[9, 9, 0], [9, 0, 9], [0, 0, 0], [9, -9, 0]],
[[1, 1, 1], [0, 9, 9], [0, 0, 0], [0, 9, 0]],
[[9, 9, 0], [9, 0, 9], [0, 0, 0], [9, -9, 0]],
[[9, 9, 0], [9, 0, 9], [0, 0, 0], [9, -9, 0]]], dtype=tf.float32)
self.logits_q_t = tf.constant(
[[[1, 1, 1], [9, 0, 9], [1, 0, 0], [0, 0, 9]],
[[9, 9, 0], [9, 0, 0], [1, 1, 1], [9, -9, 0]],
[[1, 1, 1], [9, 0, 9], [0, 0, 9], [1, 0, 0]],
[[9, 9, 0], [9, 0, 0], [1, 1, 1], [9, -9, 0]],
[[9, 9, 0], [9, 0, 0], [0, 9, 9], [9, -9, 0]]], dtype=tf.float32)
# mean Q_t are approximately:
# 1.0 1.0 0.5 1.5
# 0.75 0.5 1.0 0.5
# 1.0 1.0 1.5 0.5
# 0.75 0.5 1.0 0.5
# 0.75 0.5 1.25 0.5
self.logits_v_tm1 = tf.constant(
[[0, 9, 0],
[9, 0, 9],
[0, 9, 0],
[9, 9, 0],
[9, 0, 9]], dtype=tf.float32)
self.logits_v_t = tf.constant(
[[0, 0, 9],
[1, 1, 1],
[0, 0, 9],
[1, 1, 1],
[0, 9, 9]], dtype=tf.float32)
self.a_tm1 = tf.constant([2, 1, 3, 0, 1], dtype=tf.int32)
self.r_t = tf.constant([0.5, 0., 0.5, 0.8, -0.1], dtype=tf.float32)
self.pcont_t = tf.constant([0.8, 1., 0.8, 0., 1.], dtype=tf.float32)
def assertEachInputRankAndCompatibilityChecked(self, nt, inputs,
invalid_inputs, nt_name):
"""Check class constructor raises exception if an input tensor is invalid.
Args:
nt: namedtuple to be tested.
inputs: list of (valid) inputs to class constructor.
invalid_inputs: list of invalid alternative inputs. Should be of same
length as `inputs`, so that each input can be swapped out for a broken
input individually.
nt_name: A string specifying the name of the namedtuple.
"""
for i, alt_input in enumerate(invalid_inputs):
broken_inputs = list(inputs)
broken_inputs[i] = alt_input
with self.assertRaisesRegexp(
ValueError,
"{}: Error in rank and/or compatibility check".format(nt_name)):
nt(*broken_inputs)
class CategoricalDistQLearningTest(CategoricalDistRLTest):
def setUp(self):
super(CategoricalDistQLearningTest, self).setUp()
self.inputs = [self.atoms_tm1, self.logits_q_tm1, self.a_tm1, self.r_t,
self.pcont_t, self.atoms_t, self.logits_q_t]
self.qlearning = rl.categorical_dist_qlearning(*self.inputs)
def testRankCheck(self):
alt_inputs = [tf.placeholder(tf.float32, ()) for _ in self.inputs]
self.assertEachInputRankAndCompatibilityChecked(
rl.categorical_dist_qlearning, self.inputs, alt_inputs,
"CategoricalDistQLearning")
def testCompatibilityCheck(self):
alt_inputs = [tf.placeholder(tf.float32, [1]) for _ in self.inputs]
self.assertEachInputRankAndCompatibilityChecked(
rl.categorical_dist_qlearning, self.inputs, alt_inputs,
"CategoricalDistQLearning")
def testTarget(self):
with self.test_session() as sess:
# Target is projected KL between r_t + pcont_t atoms_t and
# probabilities corresponding to logits_q_tm1 [ a_tm1 ].
expected = np.array([[0.0, 0.0, 1.0],
[1/3, 1/3, 1/3],
[0.0, 0.0, 1.0],
[0.4, 0.6, 0.0],
[0.1, 0.5, 0.4]])
self.assertAllClose(
sess.run(self.qlearning.extra.target), expected, atol=1e-3)
def testLoss(self):
with self.test_session() as sess:
# Loss is CE between logits_q_tm1 [a_tm1] and target.
expected = np.array([9.0, 3.69, 9.0, 0.69, 5.19])
self.assertAllClose(sess.run(self.qlearning.loss), expected, atol=1e-2)
def testGradQtm1(self):
with self.test_session() as sess:
# Take gradients of the negative loss, so that the tests here check the
# values propagated during gradient _descent_, rather than _ascent_.
gradients = tf.gradients([-self.qlearning.loss], [self.logits_q_tm1])
grad_q_tm1 = sess.run(gradients[0])
# Correct gradient directions (including 0.0 for unused actions at t=tm1).
expected = np.zeros_like(grad_q_tm1)
expected[0, 2] = [-1, -1, 1]
expected[1, 1] = [-1, 1, -1]
expected[2, 3] = [-1, -1, 1]
expected[3, 0] = [-1, 1, -1]
expected[4, 1] = [-1, 1, -1]
self.assertAllClose(np.sign(grad_q_tm1), expected)
def testNoOtherGradients(self):
# Gradients are only defined for logits_q_tm1, not any other input.
# Bellman residual variants could potentially generate a gradient wrt q_t.
gradients = tf.gradients([self.qlearning.loss],
[self.logits_q_t, self.r_t, self.a_tm1,
self.pcont_t, self.atoms_t, self.atoms_tm1])
self.assertEqual(gradients, [None for _ in gradients])
class CategoricalDistDoubleQLearningTest(CategoricalDistRLTest):
def setUp(self):
super(CategoricalDistDoubleQLearningTest, self).setUp()
self.q_t_selector = tf.constant(
[[0, 2, 0, 5],
[0, 1, 2, 1],
[0, 2, 5, 0],
[0, 1, 2, 1],
[1, 2, 3, 1]], dtype=tf.float32)
self.inputs = [
self.atoms_tm1, self.logits_q_tm1, self.a_tm1, self.r_t, self.pcont_t,
self.atoms_t, self.logits_q_t, self.q_t_selector]
self.qlearning = rl.categorical_dist_double_qlearning(*self.inputs)
def testRankCheck(self):
alt_inputs = [tf.placeholder(tf.float32, ()) for _ in self.inputs]
self.assertEachInputRankAndCompatibilityChecked(
rl.categorical_dist_double_qlearning, self.inputs, alt_inputs,
"CategoricalDistDoubleQLearning")
def testCompatibilityCheck(self):
alt_inputs = [tf.placeholder(tf.float32, [1]) for _ in self.inputs]
self.assertEachInputRankAndCompatibilityChecked(
rl.categorical_dist_double_qlearning, self.inputs, alt_inputs,
"CategoricalDistDoubleQLearning")
def testTarget(self):
with self.test_session() as sess:
# Target is projected KL between r_t + pcont_t atoms_t and
# probabilities corresponding to logits_q_tm1 [ a_tm1 ].
expected = np.array([[0.0, 0.0, 1.0],
[1/3, 1/3, 1/3],
[0.0, 0.0, 1.0],
[0.4, 0.6, 0.0],
[0.1, 0.5, 0.4]])
self.assertAllClose(
sess.run(self.qlearning.extra.target), expected, atol=1e-3)
def testLoss(self):
with self.test_session() as sess:
# Loss is CE between logits_q_tm1 [a_tm1] and target.
expected = np.array([9.0, 3.69, 9.0, 0.69, 5.19])
self.assertAllClose(sess.run(self.qlearning.loss), expected, atol=1e-2)
def testGradQtm1(self):
with self.test_session() as sess:
# Take gradients of the negative loss, so that the tests here check the
# values propagated during gradient _descent_, rather than _ascent_.
gradients = tf.gradients([-self.qlearning.loss], [self.logits_q_tm1])
grad_q_tm1 = sess.run(gradients[0])
# Correct gradient directions (including 0.0 for unused actions at t=tm1).
expected = np.zeros_like(grad_q_tm1)
expected[0, 2] = [-1, -1, 1]
expected[1, 1] = [-1, 1, -1]
expected[2, 3] = [-1, -1, 1]
expected[3, 0] = [-1, 1, -1]
expected[4, 1] = [-1, 1, -1]
self.assertAllClose(np.sign(grad_q_tm1), expected)
def testNoOtherGradients(self):
# Gradients are only defined for logits_q_tm1, not any other input.
# Bellman residual variants could potentially generate a gradient wrt q_t.
gradients = tf.gradients([self.qlearning.loss],
[self.logits_q_t, self.r_t, self.a_tm1,
self.pcont_t, self.atoms_t, self.atoms_tm1,
self.q_t_selector])
self.assertEqual(gradients, [None for _ in gradients])
class CategoricalDistTDLearningTest(CategoricalDistRLTest):
def setUp(self):
super(CategoricalDistTDLearningTest, self).setUp()
self.inputs = [self.atoms_tm1, self.logits_v_tm1, self.r_t, self.pcont_t,
self.atoms_t, self.logits_v_t]
self.tdlearning = rl.categorical_dist_td_learning(*self.inputs)
def testRankCheck(self):
alt_inputs = [tf.placeholder(tf.float32, ()) for _ in self.inputs]
self.assertEachInputRankAndCompatibilityChecked(
rl.categorical_dist_td_learning, self.inputs, alt_inputs,
"CategoricalDistTDLearning")
def testCompatibilityCheck(self):
alt_inputs = [tf.placeholder(tf.float32, [1]) for _ in self.inputs]
self.assertEachInputRankAndCompatibilityChecked(
rl.categorical_dist_td_learning, self.inputs, alt_inputs,
"CategoricalDistTDLearning")
def testTarget(self):
with self.test_session() as sess:
# Target is projected KL between r_t + pcont_t atoms_t and
# probabilities corresponding to logits_v_tm1.
expected = np.array([[0.0, 0.0, 1.0],
[1/3, 1/3, 1/3],
[0.0, 0.0, 1.0],
[0.4, 0.6, 0.0],
[0.1, 0.5, 0.4]])
self.assertAllClose(
sess.run(self.tdlearning.extra.target), expected, atol=1e-3)
def testLoss(self):
with self.test_session() as sess:
# Loss is CE between logits_v_tm1 and target.
expected = np.array([9.0, 3.69, 9.0, 0.69, 5.19])
self.assertAllClose(sess.run(self.tdlearning.loss), expected, atol=1e-2)
def testGradVtm1(self):
with self.test_session() as sess:
# Take gradients of the negative loss, so that the tests here check the
# values propagated during gradient _descent_, rather than _ascent_.
gradients = tf.gradients([-self.tdlearning.loss], [self.logits_v_tm1])
grad_v_tm1 = sess.run(gradients[0])
# Correct gradient directions.
expected = np.array([[-1, -1, 1],
[-1, 1, -1],
[-1, -1, 1],
[-1, 1, -1],
[-1, 1, -1]])
self.assertAllClose(np.sign(grad_v_tm1), expected)
def testNoOtherGradients(self):
# Gradients are only defined for logits_v_tm1, not any other input.
# Bellman residual variants could potentially generate a gradient wrt v_t.
gradients = tf.gradients([self.tdlearning.loss],
[self.logits_v_t, self.r_t, self.pcont_t,
self.atoms_t, self.atoms_tm1])
self.assertEqual(gradients, [None for _ in gradients])
if __name__ == "__main__":
tf.test.main()
| trfl-master | trfl/dist_value_ops_test.py |
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Deterministic Policy Gradient (DPG) ops.
These ops support training a value based agent on control problems with
continuous action spaces. The agent's actions are assumed to be continuous
vectors of size `action_dimension`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Dependency imports
import tensorflow.compat.v1 as tf
from trfl import base_ops
DPGExtra = collections.namedtuple("dpg_extra", ["q_max", "a_max", "dqda"])
def dpg(q_max, a_max, dqda_clipping=None, clip_norm=False, name="DpgLearning"):
"""Implements the Deterministic Policy Gradient (DPG) loss as a TensorFlow Op.
This op implements the loss for the `actor`, the `critic` can instead be
updated by minimizing the `value_ops.td_learning` loss.
See "Deterministic Policy Gradient Algorithms" by Silver, Lever, Heess,
Degris, Wierstra, Riedmiller (http://proceedings.mlr.press/v32/silver14.pdf).
Args:
q_max: Tensor holding Q-values generated by Q network with the input of
(state, a_max) pair, shape `[B]`.
a_max: Tensor holding the optimal action, shape `[B, action_dimension]`.
dqda_clipping: `int` or `float`, clips the gradient dqda element-wise
between `[-dqda_clipping, dqda_clipping]`.
clip_norm: Whether to perform dqda clipping on the vector norm of the last
dimension, or component wise (default).
name: name to prefix ops created within this op.
Returns:
A namedtuple with fields:
* `loss`: a tensor containing the batch of losses, shape `[B]`.
* `extra`: a namedtuple with fields:
* `q_max`: Tensor holding the optimal Q values, `[B]`.
* `a_max`: Tensor holding the optimal action, `[B, action_dimension]`.
* `dqda`: Tensor holding the derivative dq/da, `[B, action_dimension]`.
Raises:
ValueError: If `q_max` doesn't depend on `a_max` or if `dqda_clipping <= 0`.
"""
# DPG op.
with tf.name_scope(name, values=[q_max, a_max]):
# Calculate the gradient dq/da.
dqda = tf.gradients([q_max], [a_max])[0]
# Check that `q_max` depends on `a_max`.
if dqda is None:
raise ValueError("q_max needs to be a function of a_max")
# Clipping the gradient dq/da.
if dqda_clipping is not None:
if dqda_clipping <= 0:
raise ValueError("dqda_clipping should be bigger than 0, {} found"
.format(dqda_clipping))
if clip_norm:
dqda = tf.clip_by_norm(dqda, dqda_clipping, axes=-1)
else:
dqda = tf.clip_by_value(dqda, -1. * dqda_clipping, dqda_clipping)
# Target_a ensures correct gradient calculated during backprop.
target_a = dqda + a_max
# Stop the gradient going through Q network when backprop.
target_a = tf.stop_gradient(target_a)
# Gradient only go through actor network.
loss = 0.5 * tf.reduce_sum(tf.square(target_a - a_max), axis=-1)
return base_ops.LossOutput(
loss, DPGExtra(q_max=q_max, a_max=a_max, dqda=dqda))
| trfl-master | trfl/dpg_ops.py |
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for policy_gradient_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import parameterized
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
import tree as nest
from trfl import policy_gradient_ops as pg_ops
class MockDistribution(object):
"""A mock univariate distribution with a given batch shape."""
def __init__(self, batch_shape, parameter):
self.batch_shape = tf.TensorShape(batch_shape)
self.event_shape = tf.TensorShape([])
self._parameter = parameter
entropy = np.arange(np.prod(batch_shape)).reshape(batch_shape)
entropy *= parameter * parameter
self._entropy = tf.constant(entropy, dtype=tf.float32)
def log_prob(self, actions):
return tf.to_float(self._parameter * actions)
def entropy(self):
return self._entropy
def _setup_pgops_mock(sequence_length=3, batch_size=2, num_policies=3):
"""Setup ops using mock distribution for numerical tests."""
t, b = sequence_length, batch_size
policies = [MockDistribution((t, b), i + 1) for i in xrange(num_policies)]
actions = [tf.constant(np.arange(t * b).reshape((t, b)))
for i in xrange(num_policies)]
if num_policies == 1:
policies, actions = policies[0], actions[0]
entropy_scale_op = lambda policies: len(nest.flatten(policies))
return policies, actions, entropy_scale_op
def _setup_pgops(multi_actions=False,
normalise_entropy=False,
sequence_length=4,
batch_size=2,
num_mvn_actions=3,
num_discrete_actions=5):
"""Setup polices, actions, policy_vars and (optionally) entropy_scale_op."""
t = sequence_length
b = batch_size
a = num_mvn_actions
c = num_discrete_actions
# MVN actions
mu = tf.placeholder(tf.float32, shape=(t, b, a))
sigma = tf.placeholder(tf.float32, shape=(t, b, a))
mvn_policies = tfp.distributions.MultivariateNormalDiag(
loc=mu, scale_diag=sigma)
mvn_actions = tf.placeholder(tf.float32, shape=(t, b, a))
mvn_params = [mu, sigma]
if multi_actions:
# Create a list of n_cat Categorical distributions
n_cat = 2
cat_logits = [tf.placeholder(tf.float32, shape=(t, b, c))
for _ in xrange(n_cat)]
cat_policies = [tfp.distributions.Categorical(logits=logits)
for logits in cat_logits]
cat_actions = [tf.placeholder(tf.int32, shape=(t, b))
for _ in xrange(n_cat)]
cat_params = [[logits] for logits in cat_logits]
# Create an exponential distribution
exp_rate = tf.placeholder(tf.float32, shape=(t, b))
exp_policies = tfp.distributions.Exponential(rate=exp_rate)
exp_actions = tf.placeholder(tf.float32, shape=(t, b))
exp_params = [exp_rate]
# Nest all policies and nest corresponding actions and parameters
policies = [mvn_policies, cat_policies, exp_policies]
actions = [mvn_actions, cat_actions, exp_actions]
policy_vars = [mvn_params, cat_params, exp_params]
else:
# No nested policy structure
policies = mvn_policies
actions = mvn_actions
policy_vars = mvn_params
entropy_scale_op = None
if normalise_entropy:
# Scale op that divides by total action dims
def scale_op(policies):
policies = nest.flatten(policies)
num_dims = [tf.to_float(tf.reduce_prod(policy.event_shape_tensor()))
for policy in policies]
return 1. / tf.reduce_sum(tf.stack(num_dims))
entropy_scale_op = scale_op
return policies, actions, policy_vars, entropy_scale_op
class PolicyGradientTest(parameterized.TestCase, tf.test.TestCase):
"""Tests for policy_gradient op."""
def _setUp_loss(self, sequence_length, batch_size, action_dim):
# Use default single MVN action setup
policies, self._actions, self._policy_vars, _ = _setup_pgops(
sequence_length=sequence_length,
batch_size=batch_size,
num_mvn_actions=action_dim)
self._action_values = tf.placeholder(
tf.float32, shape=(sequence_length, batch_size))
self._loss = pg_ops.policy_gradient(
policies, self._actions, self._action_values)
@parameterized.named_parameters(('Fixed', 4, 2, 3),
('DynamicLength', None, 2, 3),
('DynamicBatch', 4, None, 3),
('DynamicBatchAndLength', None, None, 3),
('DynamicAll', None, None, None))
def testLoss(self, sequence_length, batch_size, action_dim):
self._setUp_loss(sequence_length, batch_size, action_dim)
expected_loss_shape = [sequence_length, batch_size]
self.assertEqual(self._loss.get_shape().as_list(), expected_loss_shape)
@parameterized.named_parameters(('Fixed', 4, 2, 3),
('DynamicLength', None, 2, 3),
('DynamicBatch', 4, None, 3),
('DynamicBatchAndLength', None, None, 3),
('DynamicAll', None, None, None))
def testGradients(self, sequence_length, batch_size, action_dim):
self._setUp_loss(sequence_length, batch_size, action_dim)
total_loss = tf.reduce_sum(self._loss)
for policy_var in self._policy_vars:
gradients = tf.gradients(total_loss, policy_var)
self.assertEqual(gradients[0].get_shape().as_list(),
policy_var.get_shape().as_list())
gradients = tf.gradients([total_loss], [self._actions, self._action_values])
self.assertEqual(gradients, [None, None])
def testRun(self):
policies, actions, _ = _setup_pgops_mock(
sequence_length=3, batch_size=2, num_policies=1)
action_values = tf.constant([[-0.5, 0.5], [-1.0, 0.5], [1.5, -0.5]])
loss = pg_ops.policy_gradient(policies, actions, action_values)
expected_loss = [[0., -0.5], [2., -1.5], [-6., 2.5]]
with self.test_session() as sess:
# Expected values are from manual calculation in a Colab.
self.assertAllEqual(sess.run(loss), expected_loss)
class PolicyGradientLossTest(parameterized.TestCase, tf.test.TestCase):
"""Tests for policy_gradient op."""
def _setUp_loss(self, multi_actions, batch_size=2):
# Use fixed sizes
sequence_length = 4
policies, self._actions, self._policy_vars, _ = _setup_pgops(
multi_actions=multi_actions,
sequence_length=sequence_length,
batch_size=batch_size,
num_mvn_actions=3,
num_discrete_actions=5)
self._action_values = tf.placeholder(
tf.float32, shape=(sequence_length, batch_size))
self._loss = pg_ops.policy_gradient_loss(
policies, self._actions, self._action_values, self._policy_vars)
@parameterized.named_parameters(('SingleAction', False),
('MultiActions', True))
def testLoss(self, multi_actions):
batch_size = 2
self._setUp_loss(multi_actions, batch_size=batch_size)
self.assertEqual(self._loss.get_shape(), tf.TensorShape(batch_size))
@parameterized.named_parameters(('SingleAction', False),
('MultiActions', True))
def testGradients(self, multi_actions):
self._setUp_loss(multi_actions)
total_loss = tf.reduce_sum(self._loss)
for policy_var in nest.flatten(self._policy_vars):
gradients = tf.gradients(total_loss, policy_var)
self.assertEqual(gradients[0].get_shape(), policy_var.get_shape())
def testRun(self):
policies, actions, _ = _setup_pgops_mock(
sequence_length=3, batch_size=2, num_policies=3)
action_values = tf.constant([[-0.5, 0.5], [-1.0, 0.5], [1.5, -0.5]])
loss = pg_ops.policy_gradient_loss(policies, actions, action_values)
expected_loss = [-24., 3.]
with self.test_session() as sess:
# Expected values are from manual calculation in a Colab.
self.assertAllEqual(sess.run(loss), expected_loss)
class EntropyCostTest(parameterized.TestCase, tf.test.TestCase):
"""Tests for policy_entropy op."""
def _setUp_entropy(self, multi_actions, normalise,
sequence_length, batch_size, num_mvn_actions=3):
policies, _, self._policy_vars, scale_op = _setup_pgops(
multi_actions=multi_actions,
sequence_length=sequence_length,
normalise_entropy=normalise,
batch_size=batch_size,
num_mvn_actions=num_mvn_actions,
num_discrete_actions=5)
self._policy_entropy_loss = pg_ops.policy_entropy_loss(
policies, self._policy_vars, scale_op=scale_op)
@parameterized.named_parameters(('SingleAction', False, False),
('MultiActions', True, False),
('SingleActionNorm', False, True),
('MultiActionsNorm', True, True))
def testEntropyLoss(self, multi_actions, normalise):
sequence_length = 4
batch_size = 2
self._setUp_entropy(
multi_actions, normalise, sequence_length, batch_size)
entropy = self._policy_entropy_loss.extra.entropy
loss = self._policy_entropy_loss.loss
expected_shape = [sequence_length, batch_size]
self.assertEqual(entropy.get_shape(), expected_shape)
self.assertEqual(loss.get_shape(), expected_shape)
@parameterized.named_parameters(('Length', None, 2, 3, False),
('Batch', 4, None, 3, False),
('BatchAndLength', None, None, 3, False),
('All', None, None, None, False),
('LengthNorm', None, 2, 3, True),
('BatchNorm', 4, None, 3, True),
('BatchAndLengthNorm', None, None, 3, True),
('AllNorm', None, None, None, True))
def testEntropyLossMultiActionDynamic(self, sequence_length, batch_size,
action_dim, normalise):
self._setUp_entropy(
multi_actions=True,
normalise=normalise,
sequence_length=sequence_length,
batch_size=batch_size,
num_mvn_actions=action_dim)
entropy = self._policy_entropy_loss.extra.entropy
loss = self._policy_entropy_loss.loss
expected_shape = [sequence_length, batch_size]
self.assertEqual(entropy.get_shape().as_list(), expected_shape)
self.assertEqual(loss.get_shape().as_list(), expected_shape)
@parameterized.named_parameters(('SingleAction', False, False),
('MultiActions', True, False),
('SingleActionNorm', False, True),
('MultiActionsNorm', True, True))
def testGradient(self, multi_actions, normalise):
sequence_length = 4
batch_size = 2
self._setUp_entropy(
multi_actions, normalise, sequence_length, batch_size)
loss = self._policy_entropy_loss.loss
# MVN mu has None gradient
self.assertIsNone(tf.gradients(loss, nest.flatten(self._policy_vars)[0])[0])
for policy_var in nest.flatten(self._policy_vars)[1:]:
gradient = tf.gradients(loss, policy_var)[0]
self.assertEqual(gradient.get_shape(), policy_var.get_shape())
def testRun(self):
policies, _, scale_op = _setup_pgops_mock(
sequence_length=3, batch_size=2, num_policies=3)
loss, extra = pg_ops.policy_entropy_loss(policies, scale_op=scale_op)
expected_entropy = [[0., 14.], [28., 42.], [56., 70.]]
expected_scaling = 3
expected_loss = (-expected_scaling * np.array(expected_entropy))
with self.test_session() as sess:
# Expected values are from manual calculation in a Colab.
self.assertAllEqual(sess.run(extra.entropy), expected_entropy)
self.assertAllEqual(sess.run(loss), expected_loss)
class SequenceA2CLossTest(parameterized.TestCase, tf.test.TestCase):
def _setUp_a2c_loss(self,
multi_actions=False,
normalise_entropy=False,
gae_lambda=1,
sequence_length=4,
batch_size=2,
num_mvn_actions=3,
num_discrete_actions=5):
policies, self._actions, self._policy_vars, entropy_scale_op = _setup_pgops(
multi_actions=multi_actions,
sequence_length=sequence_length,
normalise_entropy=normalise_entropy,
batch_size=batch_size,
num_mvn_actions=num_mvn_actions,
num_discrete_actions=num_discrete_actions)
t, b = sequence_length, batch_size
entropy_cost, baseline_cost = 0.1, 0.2
self._baseline_values = tf.placeholder(tf.float32, shape=(t, b))
self._rewards = tf.placeholder(tf.float32, shape=(t, b))
self._pcontinues = tf.placeholder(tf.float32, shape=(t, b))
self._bootstrap_value = tf.placeholder(tf.float32, shape=(b,))
self._loss, self._extra = pg_ops.sequence_a2c_loss(
policies=policies,
baseline_values=self._baseline_values,
actions=self._actions,
rewards=self._rewards,
pcontinues=self._pcontinues,
bootstrap_value=self._bootstrap_value,
policy_vars=self._policy_vars,
lambda_=gae_lambda,
entropy_cost=entropy_cost,
baseline_cost=baseline_cost,
entropy_scale_op=entropy_scale_op)
@parameterized.named_parameters(
('SingleActionEntropyNorm', False, True, 1),
('SingleActionNoEntropyNorm', False, False, 1),
('MultiActionsEntropyNorm', True, True, 1),
('MultiActionsNoEntropyNorm', True, False, 1),
('SingleActionEntropyNormGAE', False, True, 0.9),
('SingleActionNoEntropyNormGAE', False, False, 0.9),
('MultiActionsEntropyNormGAE', True, True, 0.9),
('MultiActionsNoEntropyNormGAE', True, False, 0.9),
)
def testShapeInference(self, multi_actions, normalise_entropy, gae_lambda):
sequence_length = 4
batch_size = 2
self._setUp_a2c_loss(multi_actions, normalise_entropy, gae_lambda,
sequence_length=sequence_length, batch_size=batch_size)
sequence_batch_shape = tf.TensorShape([sequence_length, batch_size])
batch_shape = tf.TensorShape(batch_size)
self.assertEqual(self._extra.discounted_returns.get_shape(),
sequence_batch_shape)
self.assertEqual(self._extra.advantages.get_shape(), sequence_batch_shape)
self.assertEqual(self._extra.policy_gradient_loss.get_shape(), batch_shape)
self.assertEqual(self._extra.baseline_loss.get_shape(), batch_shape)
self.assertEqual(self._extra.entropy.get_shape(), batch_shape)
self.assertEqual(self._extra.entropy_loss.get_shape(), batch_shape)
self.assertEqual(self._loss.get_shape(), batch_shape)
@parameterized.named_parameters(('Length', None, 4, 3),
('Batch', 5, None, 3),
('BatchAndLength', None, None, 3),
('All', None, None, None))
def testShapeInferenceSingleActionNoEntropyNormDynamic(
self, sequence_length, batch_size, num_actions):
self._setUp_a2c_loss(sequence_length=sequence_length,
batch_size=batch_size,
num_mvn_actions=num_actions,
num_discrete_actions=num_actions,
multi_actions=False,
normalise_entropy=False,
gae_lambda=1.)
t, b = sequence_length, batch_size
self.assertEqual(
self._extra.discounted_returns.get_shape().as_list(), [t, b])
self.assertEqual(self._extra.advantages.get_shape().as_list(), [t, b])
self.assertEqual(
self._extra.policy_gradient_loss.get_shape().as_list(), [b])
self.assertEqual(self._extra.entropy.get_shape().as_list(), [b])
self.assertEqual(self._extra.entropy_loss.get_shape().as_list(), [b])
self.assertEqual(self._loss.get_shape().as_list(), [b])
@parameterized.named_parameters(
('SingleAction', False, 1),
('MultiActions', True, 1),
('SingleActionGAE', False, 0.9),
('MultiActionsGAE', True, 0.9),
)
def testInvalidGradients(self, multi_actions, gae_lambda):
self._setUp_a2c_loss(multi_actions=multi_actions, gae_lambda=gae_lambda)
ins = nest.flatten(
[self._actions, self._rewards, self._pcontinues, self._bootstrap_value])
outs = [None] * len(ins)
self.assertAllEqual(tf.gradients(
self._extra.discounted_returns, ins), outs)
self.assertAllEqual(tf.gradients(
self._extra.policy_gradient_loss, ins), outs)
self.assertAllEqual(tf.gradients(self._extra.entropy_loss, ins), outs)
self.assertAllEqual(tf.gradients(self._extra.baseline_loss, ins), outs)
self.assertAllEqual(tf.gradients(self._loss, ins), outs)
@parameterized.named_parameters(
('SingleAction', False),
('MultiActions', True),
)
def testGradientsPolicyGradientLoss(self, multi_actions):
self._setUp_a2c_loss(multi_actions=multi_actions)
loss = self._extra.policy_gradient_loss
for policy_var in nest.flatten(self._policy_vars):
gradient = tf.gradients(loss, policy_var)[0]
self.assertEqual(gradient.get_shape(), policy_var.get_shape())
self.assertAllEqual(tf.gradients(loss, self._baseline_values), [None])
@parameterized.named_parameters(
('SingleActionNoEntropyNorm', False, False),
('MultiActionsNoEntropyNorm', True, False),
('SingleActionEntropyNorm', False, True),
('MultiActionsEntropyNorm', True, True),
)
def testGradientsEntropy(self, multi_actions, normalise_entropy):
self._setUp_a2c_loss(multi_actions=multi_actions,
normalise_entropy=normalise_entropy)
loss = self._extra.entropy_loss
# MVN mu has None gradient for entropy
self.assertIsNone(tf.gradients(loss, nest.flatten(self._policy_vars)[0])[0])
for policy_var in nest.flatten(self._policy_vars)[1:]:
gradient = tf.gradients(loss, policy_var)[0]
self.assertEqual(gradient.get_shape(), policy_var.get_shape())
self.assertAllEqual(tf.gradients(loss, self._baseline_values), [None])
def testGradientsBaselineLoss(self):
self._setUp_a2c_loss()
loss = self._extra.baseline_loss
gradient = tf.gradients(loss, self._baseline_values)[0]
self.assertEqual(gradient.get_shape(), self._baseline_values.get_shape())
policy_vars = nest.flatten(self._policy_vars)
self.assertAllEqual(tf.gradients(loss, policy_vars),
[None]*len(policy_vars))
@parameterized.named_parameters(
('SingleAction', False),
('MultiActions', True),
)
def testGradientsTotalLoss(self, multi_actions):
self._setUp_a2c_loss(multi_actions=multi_actions)
loss = self._loss
gradient = tf.gradients(loss, self._baseline_values)[0]
self.assertEqual(gradient.get_shape(), self._baseline_values.get_shape())
for policy_var in nest.flatten(self._policy_vars):
gradient = tf.gradients(loss, policy_var)[0]
self.assertEqual(gradient.get_shape(), policy_var.get_shape())
def testRun(self):
t, b = 3, 2
policies, actions, entropy_scale_op = _setup_pgops_mock(
sequence_length=t, batch_size=b, num_policies=3)
baseline_values = tf.constant(np.arange(-3, 3).reshape((t, b)),
dtype=tf.float32)
rewards = tf.constant(np.arange(-2, 4).reshape((t, b)), dtype=tf.float32)
pcontinues = tf.ones(shape=(t, b), dtype=tf.float32)
bootstrap_value = tf.constant([-2., 4.], dtype=tf.float32)
self._loss, self._extra = pg_ops.sequence_a2c_loss(
policies=policies,
baseline_values=baseline_values,
actions=actions,
rewards=rewards,
pcontinues=pcontinues,
bootstrap_value=bootstrap_value,
entropy_cost=0.5,
baseline_cost=2.,
entropy_scale_op=entropy_scale_op)
with self.test_session() as sess:
# Expected values are from manual calculation in a Colab.
self.assertAllEqual(
sess.run(self._extra.baseline_loss), [3., 170.])
self.assertAllEqual(
sess.run(self._extra.policy_gradient_loss), [12., -348.])
self.assertAllEqual(sess.run(self._extra.entropy_loss), [-126., -189.])
self.assertAllEqual(sess.run(self._loss), [-111., -367.])
if __name__ == '__main__':
tf.test.main()
| trfl-master | trfl/policy_gradient_ops_test.py |
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for clipping_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf
from trfl import clipping_ops
class HuberLossTest(tf.test.TestCase):
def testValue(self):
with self.test_session():
quadratic_linear_boundary = 2
xs = np.array(
[-3.5, -2.1, 2, -1.9 - 1, -0.5, 0, 0.5, 1, 1.9, 2, 2.1, 3.5])
ys = clipping_ops.huber_loss(xs, quadratic_linear_boundary).eval()
d = quadratic_linear_boundary
# Check values for x <= -2
ys_lo = ys[xs <= -d]
xs_lo = xs[xs <= -d]
expected_ys_lo = [0.5 * d**2 + d * (-x - d) for x in xs_lo]
self.assertAllClose(ys_lo, expected_ys_lo)
# Check values for x >= 2
ys_hi = ys[xs >= d]
xs_hi = xs[xs >= d]
expected_ys_hi = [0.5 * d**2 + d * (x - d) for x in xs_hi]
self.assertAllClose(ys_hi, expected_ys_hi)
# Check values for x in (-2, 2)
ys_mid = ys[np.abs(xs) < d]
xs_mid = xs[np.abs(xs) < d]
expected_ys_mid = [0.5 * x**2 for x in xs_mid]
self.assertAllClose(ys_mid, expected_ys_mid)
def testGradient(self):
with self.test_session() as sess:
x = tf.placeholder(tf.float64)
quadratic_linear_boundary = 3
loss = clipping_ops.huber_loss(x, quadratic_linear_boundary)
xs = np.array([-5, -4, -3.1, -3, -2.9, 2, -1, 0, 1, 2, 2.9, 3, 3.1, 4, 5])
grads = sess.run(tf.gradients([loss], [x]), feed_dict={x: xs})[0]
self.assertTrue(np.all(np.abs(grads) <= quadratic_linear_boundary))
# Everything <= -3 should have gradient -3.
grads_lo = grads[xs <= -quadratic_linear_boundary]
self.assertAllEqual(grads_lo,
[-quadratic_linear_boundary] * grads_lo.shape[0])
# Everything >= 3 should have gradient 3.
grads_hi = grads[xs >= quadratic_linear_boundary]
self.assertAllEqual(grads_hi,
[quadratic_linear_boundary] * grads_hi.shape[0])
# x in (-3, 3) should have gradient x.
grads_mid = grads[np.abs(xs) <= quadratic_linear_boundary]
xs_mid = xs[np.abs(xs) <= quadratic_linear_boundary]
self.assertAllEqual(grads_mid, xs_mid)
if __name__ == "__main__":
tf.test.main()
| trfl-master | trfl/clipping_ops_test.py |
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tensorflow ops for common Distributional RL learning rules.
Distributions are taken to be categorical over a support of 'N' distinct atoms,
which are always specified in ascending order.
These ops define state/action value distribution learning rules for discrete,
scalar, action spaces. Actions must be represented as indices in the range
`[0, K)` where `K` is the number of distinct actions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Dependency imports
import tensorflow.compat.v1 as tf
from trfl import base_ops
from trfl import distribution_ops
Extra = collections.namedtuple("dist_value_extra", ["target"])
_l2_project = distribution_ops.l2_project
def _slice_with_actions(embeddings, actions):
"""Slice a Tensor.
Take embeddings of the form [batch_size, num_actions, embed_dim]
and actions of the form [batch_size, 1], and return the sliced embeddings
like embeddings[:, actions, :].
Args:
embeddings: Tensor of embeddings to index.
actions: int Tensor to use as index into embeddings
Returns:
Tensor of embeddings indexed by actions
"""
batch_size, num_actions = embeddings.get_shape()[:2]
# Values are the 'values' in a sparse tensor we will be setting
act_indx = tf.cast(actions, tf.int64)[:, None]
values = tf.reshape(tf.cast(tf.ones(tf.shape(actions)), tf.bool), [-1])
# Create a range for each index into the batch
act_range = tf.range(0, batch_size, dtype=tf.int64)[:, None]
# Combine this into coordinates with the action indices
indices = tf.concat([act_range, act_indx], 1)
actions_mask = tf.SparseTensor(indices, values, [batch_size, num_actions])
actions_mask = tf.stop_gradient(
tf.sparse_tensor_to_dense(actions_mask, default_value=False))
sliced_emb = tf.boolean_mask(embeddings, actions_mask)
return sliced_emb
def categorical_dist_qlearning(atoms_tm1,
logits_q_tm1,
a_tm1,
r_t,
pcont_t,
atoms_t,
logits_q_t,
name="CategoricalDistQLearning"):
"""Implements Distributional Q-learning as TensorFlow ops.
The function assumes categorical value distributions parameterized by logits.
See "A Distributional Perspective on Reinforcement Learning" by Bellemare,
Dabney and Munos. (https://arxiv.org/abs/1707.06887).
Args:
atoms_tm1: 1-D tensor containing atom values for first timestep,
shape `[num_atoms]`.
logits_q_tm1: Tensor holding logits for first timestep in a batch of
transitions, shape `[B, num_actions, num_atoms]`.
a_tm1: Tensor holding action indices, shape `[B]`.
r_t: Tensor holding rewards, shape `[B]`.
pcont_t: Tensor holding pcontinue values, shape `[B]`.
atoms_t: 1-D tensor containing atom values for second timestep,
shape `[num_atoms]`.
logits_q_t: Tensor holding logits for second timestep in a batch of
transitions, shape `[B, num_actions, num_atoms]`.
name: name to prefix ops created by this function.
Returns:
A namedtuple with fields:
* `loss`: a tensor containing the batch of losses, shape `[B]`.
* `extra`: a namedtuple with fields:
* `target`: a tensor containing the values that `q_tm1` at actions
`a_tm1` are regressed towards, shape `[B, num_atoms]`.
Raises:
ValueError: If the tensors do not have the correct rank or compatibility.
"""
# Rank and compatibility checks.
assertion_lists = [[logits_q_tm1, logits_q_t], [a_tm1, r_t, pcont_t],
[atoms_tm1, atoms_t]]
base_ops.wrap_rank_shape_assert(assertion_lists, [3, 1, 1], name)
# Categorical distributional Q-learning op.
with tf.name_scope(
name,
values=[
atoms_tm1, logits_q_tm1, a_tm1, r_t, pcont_t, atoms_t, logits_q_t
]):
with tf.name_scope("target"):
# Scale and shift time-t distribution atoms by discount and reward.
target_z = r_t[:, None] + pcont_t[:, None] * atoms_t[None, :]
# Convert logits to distribution, then find greedy action in state s_t.
q_t_probs = tf.nn.softmax(logits_q_t)
q_t_mean = tf.reduce_sum(q_t_probs * atoms_t, 2)
pi_t = tf.argmax(q_t_mean, 1, output_type=tf.int32)
# Compute distribution for greedy action.
p_target_z = _slice_with_actions(q_t_probs, pi_t)
# Project using the Cramer distance
target = tf.stop_gradient(_l2_project(target_z, p_target_z, atoms_tm1))
logit_qa_tm1 = _slice_with_actions(logits_q_tm1, a_tm1)
loss = tf.nn.softmax_cross_entropy_with_logits(
logits=logit_qa_tm1, labels=target)
return base_ops.LossOutput(loss, Extra(target))
def categorical_dist_double_qlearning(atoms_tm1,
logits_q_tm1,
a_tm1,
r_t,
pcont_t,
atoms_t,
logits_q_t,
q_t_selector,
name="CategoricalDistDoubleQLearning"):
"""Implements Distributional Double Q-learning as TensorFlow ops.
The function assumes categorical value distributions parameterized by logits,
and combines distributional RL with double Q-learning.
See "Rainbow: Combining Improvements in Deep Reinforcement Learning" by
Hessel, Modayil, van Hasselt, Schaul et al.
(https://arxiv.org/abs/1710.02298).
Args:
atoms_tm1: 1-D tensor containing atom values for first timestep,
shape `[num_atoms]`.
logits_q_tm1: Tensor holding logits for first timestep in a batch of
transitions, shape `[B, num_actions, num_atoms]`.
a_tm1: Tensor holding action indices, shape `[B]`.
r_t: Tensor holding rewards, shape `[B]`.
pcont_t: Tensor holding pcontinue values, shape `[B]`.
atoms_t: 1-D tensor containing atom values for second timestep,
shape `[num_atoms]`.
logits_q_t: Tensor holding logits for second timestep in a batch of
transitions, shape `[B, num_actions, num_atoms]`.
q_t_selector: Tensor holding another set of Q-values for second timestep
in a batch of transitions, shape `[B, num_actions]`.
These values are used for estimating the best action. In Double DQN they
come from the online network.
name: name to prefix ops created by this function.
Returns:
A namedtuple with fields:
* `loss`: Tensor containing the batch of losses, shape `[B]`.
* `extra`: a namedtuple with fields:
* `target`: Tensor containing the values that `q_tm1` at actions
`a_tm1` are regressed towards, shape `[B, num_atoms]` .
Raises:
ValueError: If the tensors do not have the correct rank or compatibility.
"""
# Rank and compatibility checks.
assertion_lists = [[logits_q_tm1, logits_q_t], [a_tm1, r_t, pcont_t],
[atoms_tm1, atoms_t], [q_t_selector]]
base_ops.wrap_rank_shape_assert(assertion_lists, [3, 1, 1, 2], name)
# Categorical distributional double Q-learning op.
with tf.name_scope(
name,
values=[
atoms_tm1, logits_q_tm1, a_tm1, r_t, pcont_t, atoms_t, logits_q_t,
q_t_selector
]):
with tf.name_scope("target"):
# Scale and shift time-t distribution atoms by discount and reward.
target_z = r_t[:, None] + pcont_t[:, None] * atoms_t[None, :]
# Convert logits to distribution, then find greedy policy action in
# state s_t.
q_t_probs = tf.nn.softmax(logits_q_t)
pi_t = tf.argmax(q_t_selector, 1, output_type=tf.int32)
# Compute distribution for greedy action.
p_target_z = _slice_with_actions(q_t_probs, pi_t)
# Project using the Cramer distance
target = tf.stop_gradient(_l2_project(target_z, p_target_z, atoms_tm1))
logit_qa_tm1 = _slice_with_actions(logits_q_tm1, a_tm1)
loss = tf.nn.softmax_cross_entropy_with_logits(
logits=logit_qa_tm1, labels=target)
return base_ops.LossOutput(loss, Extra(target))
def categorical_dist_td_learning(atoms_tm1,
logits_v_tm1,
r_t,
pcont_t,
atoms_t,
logits_v_t,
name="CategoricalDistTDLearning"):
"""Implements Distributional TD-learning as TensorFlow ops.
The function assumes categorical value distributions parameterized by logits.
See "A Distributional Perspective on Reinforcement Learning" by Bellemare,
Dabney and Munos. (https://arxiv.org/abs/1707.06887).
Args:
atoms_tm1: 1-D tensor containing atom values for first timestep,
shape `[num_atoms]`.
logits_v_tm1: Tensor holding logits for first timestep in a batch of
transitions, shape `[B, num_atoms]`.
r_t: Tensor holding rewards, shape `[B]`.
pcont_t: Tensor holding pcontinue values, shape `[B]`.
atoms_t: 1-D tensor containing atom values for second timestep,
shape `[num_atoms]`.
logits_v_t: Tensor holding logits for second timestep in a batch of
transitions, shape `[B, num_atoms]`.
name: name to prefix ops created by this function.
Returns:
A namedtuple with fields:
* `loss`: Tensor containing the batch of losses, shape `[B]`.
* `extra`: A namedtuple with fields:
* `target`: Tensor containing the values that `v_tm1` are
regressed towards, shape `[B, num_atoms]`.
Raises:
ValueError: If the tensors do not have the correct rank or compatibility.
"""
# Rank and compatibility checks.
assertion_lists = [[logits_v_tm1, logits_v_t], [r_t, pcont_t],
[atoms_tm1, atoms_t]]
base_ops.wrap_rank_shape_assert(assertion_lists, [2, 1, 1], name)
# Categorical distributional TD-learning op.
with tf.name_scope(
name, values=[atoms_tm1, logits_v_tm1, r_t, pcont_t, atoms_t,
logits_v_t]):
with tf.name_scope("target"):
# Scale and shift time-t distribution atoms by discount and reward.
target_z = r_t[:, None] + pcont_t[:, None] * atoms_t[None, :]
v_t_probs = tf.nn.softmax(logits_v_t)
# Project using the Cramer distance
target = tf.stop_gradient(_l2_project(target_z, v_t_probs, atoms_tm1))
loss = tf.nn.softmax_cross_entropy_with_logits(
logits=logits_v_tm1, labels=target)
return base_ops.LossOutput(loss, Extra(target))
| trfl-master | trfl/dist_value_ops.py |
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for pixel_control_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf
from trfl import pixel_control_ops
class PixelControlRewardsTest(tf.test.TestCase):
"""Test the `pixel_control_rewards` op."""
def setUp(self):
"""Defines example data and expected result for the op."""
super(PixelControlRewardsTest, self).setUp()
# Configure.
self._cell = 2
obs_size = (5, 2, 4, 4, 3, 2)
y = obs_size[2] // self._cell
x = obs_size[3] // self._cell
channels = np.prod(obs_size[4:])
rew_size = (obs_size[0]-1, obs_size[1], x, y)
# Input data.
self._obs_np = np.random.uniform(size=obs_size)
self._obs_tf = tf.placeholder(tf.float32, obs_size)
# Expected pseudo-rewards.
abs_diff = np.absolute(self._obs_np[1:] - self._obs_np[:-1])
abs_diff = abs_diff.reshape((-1,) + obs_size[2:4] + (channels,))
abs_diff = abs_diff.reshape((-1, y, self._cell, x, self._cell, channels))
avg_abs_diff = abs_diff.mean(axis=(2, 4, 5))
self._expected_pseudo_rewards = avg_abs_diff.reshape(rew_size)
def testPixelControlRewards(self):
"""Compute pseudo rewards from observations."""
pseudo_rewards_tf = pixel_control_ops.pixel_control_rewards(
self._obs_tf, self._cell)
with self.test_session() as sess:
self.assertAllClose(
sess.run(pseudo_rewards_tf, feed_dict={self._obs_tf: self._obs_np}),
self._expected_pseudo_rewards)
class PixelControlLossTest(tf.test.TestCase):
"""Test the `pixel_control_loss` op."""
def setUp(self):
"""Defines example data and expected result for the op."""
super(PixelControlLossTest, self).setUp()
# Observation shape is (2,2,3) (i.e., height 2, width 2, and 3 channels).
# We will use no cropping, and a cell size of 1. We have num_actions = 3,
# meaning our Q values should be (2,2,3). We will set the Q value equal to
# the observation.
self.seq_length = 3
self.batch_size = 1
num_actions = 3
obs_shape = (2, 2, num_actions)
self.discount = 0.9
self.cell_size = 1
self.scale = 1.0
# Create ops to feed actions and rewards.
self.observations_ph = tf.placeholder(
shape=(self.seq_length+1, self.batch_size)+obs_shape, dtype=tf.float32)
self.action_values_ph = tf.placeholder(
shape=(self.seq_length+1, self.batch_size)+obs_shape, dtype=tf.float32)
self.actions_ph = tf.placeholder(
shape=(self.seq_length, self.batch_size), dtype=tf.int32)
# Observations.
obs1 = np.array([[[1, 2, 3], [3, 4, 5]], [[5, 6, 7], [7, 8, 9]]])
obs2 = np.array([[[7, 8, 9], [1, 2, 3]], [[3, 4, 5], [5, 6, 7]]])
obs3 = np.array([[[5, 6, 7], [7, 8, 9]], [[1, 2, 3], [3, 4, 5]]])
obs4 = np.array([[[3, 4, 5], [5, 6, 7]], [[7, 8, 9], [1, 2, 3]]])
# Actions.
action1 = 0
action2 = 1
action3 = 2
# Compute loss for constant discount.
qa_tm1 = obs3[:, :, action3]
reward3 = np.mean(np.abs(obs4 - obs3), axis=2)
qmax_t = np.amax(obs4, axis=2)
target = reward3 + self.discount * qmax_t
error3 = target - qa_tm1
qa_tm1 = obs2[:, :, action2]
reward2 = np.mean(np.abs(obs3 - obs2), axis=2)
target = reward2 + self.discount * target
error2 = target - qa_tm1
qa_tm1 = obs1[:, :, action1]
reward1 = np.mean(np.abs(obs2 - obs1), axis=2)
target = reward1 + self.discount * target
error1 = target - qa_tm1
# Compute loss for episode termination with discount 0.
qa_tm1 = obs1[:, :, action1]
reward1 = np.mean(np.abs(obs2 - obs1), axis=2)
target = reward1 + 0. * target
error1_term = target - qa_tm1
self.error = np.sum(
np.square(error1) + np.square(error2) + np.square(error3)) * 0.5
self.error_term = np.sum(
np.square(error1_term) + np.square(error2) + np.square(error3)) * 0.5
# Placeholder data.
self.observations = np.expand_dims(
np.stack([obs1, obs2, obs3, obs4], axis=0), axis=1)
self.action_values = self.observations
self.actions = np.stack(
[np.array([action1]), np.array([action2]), np.array([action3])], axis=0)
def testPixelControlLossScalarDiscount(self):
"""Compute loss for given observations, actions, values, scalar discount."""
loss, _ = pixel_control_ops.pixel_control_loss(
self.observations_ph, self.actions_ph, self.action_values_ph,
self.cell_size, self.discount, self.scale)
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
feed_dict = {
self.observations_ph: self.observations,
self.action_values_ph: self.action_values,
self.actions_ph: self.actions}
loss_np = sess.run(loss, feed_dict=feed_dict)
self.assertNear(loss_np, self.error, 1e-3)
def testPixelControlLossTensorDiscount(self):
"""Compute loss for given observations, actions, values, tensor discount."""
zero_discount = tf.zeros((1, self.batch_size))
non_zero_discount = tf.tile(
tf.reshape(self.discount, [1, 1]),
[self.seq_length - 1, self.batch_size])
tensor_discount = tf.concat([zero_discount, non_zero_discount], axis=0)
loss, _ = pixel_control_ops.pixel_control_loss(
self.observations_ph, self.actions_ph, self.action_values_ph,
self.cell_size, tensor_discount, self.scale)
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
feed_dict = {
self.observations_ph: self.observations,
self.action_values_ph: self.action_values,
self.actions_ph: self.actions}
loss_np = sess.run(loss, feed_dict=feed_dict)
self.assertNear(loss_np, self.error_term, 1e-3)
def testPixelControlLossShapes(self):
with self.assertRaisesRegexp(
ValueError, "Pixel Control values are not compatible"):
pixel_control_ops.pixel_control_loss(
self.observations_ph, self.actions_ph,
self.action_values_ph[:, :, :-1], self.cell_size, self.discount,
self.scale)
def testTensorDiscountShape(self):
with self.assertRaisesRegexp(
ValueError, "discount_factor must be a scalar or a tensor of rank 2"):
tensor_discount = tf.tile(
tf.reshape(self.discount, [1, 1, 1]),
[self.seq_length, self.batch_size, 1])
pixel_control_ops.pixel_control_loss(
self.observations_ph, self.actions_ph,
self.action_values_ph, self.cell_size, tensor_discount,
self.scale)
if __name__ == "__main__":
tf.test.main()
| trfl-master | trfl/pixel_control_ops_test.py |
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for vtrace_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from trfl import vtrace_ops
def _shaped_arange(*shape):
"""Runs np.arange, converts to float and reshapes."""
return np.arange(np.prod(shape), dtype=np.float32).reshape(*shape)
def _softmax(logits):
"""Applies softmax non-linearity on inputs."""
return np.exp(logits) / np.sum(np.exp(logits), axis=-1, keepdims=True)
def _ground_truth_calculation(discounts, log_rhos, rewards, values,
bootstrap_value, clip_rho_threshold,
clip_pg_rho_threshold):
"""Calculates the ground truth for V-trace in Python/Numpy."""
vs = []
seq_len = len(discounts)
rhos = np.exp(log_rhos)
cs = np.minimum(rhos, 1.0)
clipped_rhos = rhos
if clip_rho_threshold:
clipped_rhos = np.minimum(rhos, clip_rho_threshold)
clipped_pg_rhos = rhos
if clip_pg_rho_threshold:
clipped_pg_rhos = np.minimum(rhos, clip_pg_rho_threshold)
# This is a very inefficient way to calculate the V-trace ground truth.
# We calculate it this way because it is close to the mathematical notation of
# V-trace.
# v_s = V(x_s)
# + \sum^{T-1}_{t=s} \gamma^{t-s}
# * \prod_{i=s}^{t-1} c_i
# * \rho_t (r_t + \gamma V(x_{t+1}) - V(x_t))
# Note that when we take the product over c_i, we write `s:t` as the notation
# of the paper is inclusive of the `t-1`, but Python is exclusive.
# Also note that np.prod([]) == 1.
values_t_plus_1 = np.concatenate([values, bootstrap_value[None, :]], axis=0)
for s in range(seq_len):
v_s = np.copy(values[s]) # Very important copy.
for t in range(s, seq_len):
v_s += (
np.prod(discounts[s:t], axis=0) * np.prod(cs[s:t - 1],
axis=0) * clipped_rhos[t] *
(rewards[t] + discounts[t] * values_t_plus_1[t + 1] - values[t]))
vs.append(v_s)
vs = np.stack(vs, axis=0)
pg_advantages = (
clipped_pg_rhos * (rewards + discounts * np.concatenate(
[vs[1:], bootstrap_value[None, :]], axis=0) - values))
return vtrace_ops.VTraceReturns(vs=vs, pg_advantages=pg_advantages)
class LogProbsFromLogitsAndActionsTest(tf.test.TestCase,
parameterized.TestCase):
@parameterized.named_parameters(('Batch1', 1), ('Batch2', 2))
def testLogProbsFromLogitsAndActions(self, batch_size):
"""Tests log_probs_from_logits_and_actions."""
seq_len = 7
num_actions = 3
policy_logits = _shaped_arange(seq_len, batch_size, num_actions) + 10
actions = np.random.randint(
0, num_actions - 1, size=(seq_len, batch_size), dtype=np.int32)
action_log_probs_tensor = vtrace_ops.log_probs_from_logits_and_actions(
policy_logits, actions)
# Ground Truth
# Using broadcasting to create a mask that indexes action logits
action_index_mask = actions[..., None] == np.arange(num_actions)
def index_with_mask(array, mask):
return array[mask].reshape(*array.shape[:-1])
# Note: Normally log(softmax) is not a good idea because it's not
# numerically stable. However, in this test we have well-behaved values.
ground_truth_v = index_with_mask(
np.log(_softmax(policy_logits)), action_index_mask)
with self.test_session() as session:
self.assertAllClose(ground_truth_v, session.run(action_log_probs_tensor))
class VtraceTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('Batch1', 1), ('Batch5', 5))
def testVTrace(self, batch_size):
"""Tests V-trace against ground truth data calculated in python."""
seq_len = 5
values = {
# Note that this is only for testing purposes using well-formed inputs.
# In practice we'd be more careful about taking log() of arbitrary
# quantities.
'log_rhos':
np.log((_shaped_arange(seq_len, batch_size)) / batch_size /
seq_len + 1),
# T, B where B_i: [0.9 / (i+1)] * T
'discounts':
np.array([[0.9 / (b + 1)
for b in range(batch_size)]
for _ in range(seq_len)]),
'rewards':
_shaped_arange(seq_len, batch_size),
'values':
_shaped_arange(seq_len, batch_size) / batch_size,
'bootstrap_value':
_shaped_arange(batch_size) + 1.0,
'clip_rho_threshold':
3.7,
'clip_pg_rho_threshold':
2.2,
}
output = vtrace_ops.vtrace_from_importance_weights(**values)
with self.test_session() as session:
output_v = session.run(output)
ground_truth_v = _ground_truth_calculation(**values)
for a, b in zip(ground_truth_v, output_v):
self.assertAllClose(a, b)
@parameterized.named_parameters(('Batch1', 1), ('Batch2', 2))
def testVTraceFromLogits(self, batch_size):
"""Tests V-trace calculated from logits."""
seq_len = 5
num_actions = 3
clip_rho_threshold = None # No clipping.
clip_pg_rho_threshold = None # No clipping.
# Intentionally leaving shapes unspecified to test if V-trace can
# deal with that.
placeholders = {
# T, B, NUM_ACTIONS
'behaviour_policy_logits':
tf.placeholder(dtype=tf.float32, shape=[None, None, None]),
# T, B, NUM_ACTIONS
'target_policy_logits':
tf.placeholder(dtype=tf.float32, shape=[None, None, None]),
'actions':
tf.placeholder(dtype=tf.int32, shape=[None, None]),
'discounts':
tf.placeholder(dtype=tf.float32, shape=[None, None]),
'rewards':
tf.placeholder(dtype=tf.float32, shape=[None, None]),
'values':
tf.placeholder(dtype=tf.float32, shape=[None, None]),
'bootstrap_value':
tf.placeholder(dtype=tf.float32, shape=[None]),
}
from_logits_output = vtrace_ops.vtrace_from_logits(
clip_rho_threshold=clip_rho_threshold,
clip_pg_rho_threshold=clip_pg_rho_threshold,
**placeholders)
target_log_probs = vtrace_ops.log_probs_from_logits_and_actions(
placeholders['target_policy_logits'], placeholders['actions'])
behaviour_log_probs = vtrace_ops.log_probs_from_logits_and_actions(
placeholders['behaviour_policy_logits'], placeholders['actions'])
log_rhos = target_log_probs - behaviour_log_probs
ground_truth = (log_rhos, behaviour_log_probs, target_log_probs)
values = {
'behaviour_policy_logits':
_shaped_arange(seq_len, batch_size, num_actions),
'target_policy_logits':
_shaped_arange(seq_len, batch_size, num_actions),
'actions':
np.random.randint(0, num_actions - 1, size=(seq_len, batch_size)),
'discounts':
np.array( # T, B where B_i: [0.9 / (i+1)] * T
[[0.9 / (b + 1)
for b in range(batch_size)]
for _ in range(seq_len)]),
'rewards':
_shaped_arange(seq_len, batch_size),
'values':
_shaped_arange(seq_len, batch_size) / batch_size,
'bootstrap_value':
_shaped_arange(batch_size) + 1.0, # B
}
feed_dict = {placeholders[k]: v for k, v in values.items()}
with self.test_session() as session:
from_logits_output_v = session.run(
from_logits_output, feed_dict=feed_dict)
(ground_truth_log_rhos, ground_truth_behaviour_action_log_probs,
ground_truth_target_action_log_probs) = session.run(
ground_truth, feed_dict=feed_dict)
# Calculate V-trace using the ground truth logits.
from_iw = vtrace_ops.vtrace_from_importance_weights(
log_rhos=ground_truth_log_rhos,
discounts=values['discounts'],
rewards=values['rewards'],
values=values['values'],
bootstrap_value=values['bootstrap_value'],
clip_rho_threshold=clip_rho_threshold,
clip_pg_rho_threshold=clip_pg_rho_threshold)
with self.test_session() as session:
from_iw_v = session.run(from_iw)
self.assertAllClose(from_iw_v.vs, from_logits_output_v.vs)
self.assertAllClose(from_iw_v.pg_advantages,
from_logits_output_v.pg_advantages)
self.assertAllClose(ground_truth_behaviour_action_log_probs,
from_logits_output_v.behaviour_action_log_probs)
self.assertAllClose(ground_truth_target_action_log_probs,
from_logits_output_v.target_action_log_probs)
self.assertAllClose(ground_truth_log_rhos, from_logits_output_v.log_rhos)
def testHigherRankInputsForIW(self):
"""Checks support for additional dimensions in inputs."""
placeholders = {
'log_rhos': tf.placeholder(dtype=tf.float32, shape=[None, None, 1]),
'discounts': tf.placeholder(dtype=tf.float32, shape=[None, None, 1]),
'rewards': tf.placeholder(dtype=tf.float32, shape=[None, None, 42]),
'values': tf.placeholder(dtype=tf.float32, shape=[None, None, 42]),
'bootstrap_value': tf.placeholder(dtype=tf.float32, shape=[None, 42])
}
output = vtrace_ops.vtrace_from_importance_weights(**placeholders)
self.assertEqual(output.vs.shape.as_list()[-1], 42)
def testInconsistentRankInputsForIW(self):
"""Test one of many possible errors in shape of inputs."""
placeholders = {
'log_rhos': tf.placeholder(dtype=tf.float32, shape=[None, None, 1]),
'discounts': tf.placeholder(dtype=tf.float32, shape=[None, None, 1]),
'rewards': tf.placeholder(dtype=tf.float32, shape=[None, None, 42]),
'values': tf.placeholder(dtype=tf.float32, shape=[None, None, 42]),
# Should be [None, 42].
'bootstrap_value': tf.placeholder(dtype=tf.float32, shape=[None])
}
with self.assertRaisesRegexp(ValueError, 'must have rank 2'):
vtrace_ops.vtrace_from_importance_weights(**placeholders)
if __name__ == '__main__':
tf.test.main()
| trfl-master | trfl/vtrace_ops_test.py |
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TensorFlow ops for state value learning."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Dependency imports
import tensorflow.compat.v1 as tf
from trfl import base_ops
from trfl import sequence_ops
TDExtra = collections.namedtuple("td_extra", ["target", "td_error"])
TDLambdaExtra = collections.namedtuple(
"td_lambda_extra", ["temporal_differences", "discounted_returns"])
def td_learning(v_tm1, r_t, pcont_t, v_t, name="TDLearning"):
"""Implements the TD(0)-learning loss as a TensorFlow op.
The TD loss is `0.5` times the squared difference between `v_tm1` and
the target `r_t + pcont_t * v_t`.
See "Learning to Predict by the Methods of Temporal Differences" by Sutton.
(https://link.springer.com/article/10.1023/A:1022633531479).
Args:
v_tm1: Tensor holding values at previous timestep, shape `[B]`.
r_t: Tensor holding rewards, shape `[B]`.
pcont_t: Tensor holding pcontinue values, shape `[B]`.
v_t: Tensor holding values at current timestep, shape `[B]`.
name: name to prefix ops created by this function.
Returns:
A namedtuple with fields:
* `loss`: a tensor containing the batch of losses, shape `[B]`.
* `extra`: a namedtuple with fields:
* `target`: batch of target values for `v_tm1`, shape `[B]`.
* `td_error`: batch of temporal difference errors, shape `[B]`.
"""
# Rank and compatibility checks.
base_ops.wrap_rank_shape_assert([[v_tm1, v_t, r_t, pcont_t]], [1], name)
# TD(0)-learning op.
with tf.name_scope(name, values=[v_tm1, r_t, pcont_t, v_t]):
# Build target.
target = tf.stop_gradient(r_t + pcont_t * v_t)
# Temporal difference error and loss.
# Loss is MSE scaled by 0.5, so the gradient is equal to the TD error.
td_error = target - v_tm1
loss = 0.5 * tf.square(td_error)
return base_ops.LossOutput(loss, TDExtra(target, td_error))
def generalized_lambda_returns(rewards,
pcontinues,
values,
bootstrap_value,
lambda_=1,
name="generalized_lambda_returns"):
"""Computes lambda-returns along a batch of (chunks of) trajectories.
For lambda=1 these will be multistep returns looking ahead from each
state to the end of the chunk, where bootstrap_value is used. If you pass an
entire trajectory and zeros for bootstrap_value, this is just the Monte-Carlo
return / TD(1) target.
For lambda=0 these are one-step TD(0) targets.
For inbetween values of lambda these are lambda-returns / TD(lambda) targets,
except that traces are always cut off at the end of the chunk, since we can't
see returns beyond then. If you pass an entire trajectory with zeros for
bootstrap_value though, then they're plain TD(lambda) targets.
lambda can also be a tensor of values in [0, 1], determining the mix of
bootstrapping vs further accumulation of multistep returns at each timestep.
This can be used to implement Retrace and other algorithms. See
`sequence_ops.multistep_forward_view` for more info on this. Another way to
think about the end-of-chunk cutoff is that lambda is always effectively zero
on the timestep after the end of the chunk, since at the end of the chunk we
rely entirely on bootstrapping and can't accumulate returns looking further
into the future.
The sequences in the tensors should be aligned such that an agent in a state
with value `V` transitions into another state with value `V'`, receiving
reward `r` and pcontinue `p`. Then `V`, `r` and `p` are all at the same index
`i` in the corresponding tensors. `V'` is at index `i+1`, or in the
`bootstrap_value` tensor if `i == T`.
Subtracting `values` from these lambda-returns will yield estimates of the
advantage function which can be used for both the policy gradient loss and
the baseline value function loss in A3C / GAE.
Args:
rewards: 2-D Tensor with shape `[T, B]`.
pcontinues: 2-D Tensor with shape `[T, B]`.
values: 2-D Tensor containing estimates of the state values for timesteps
0 to `T-1`. Shape `[T, B]`.
bootstrap_value: 1-D Tensor containing an estimate of the value of the
final state at time `T`, used for bootstrapping the target n-step
returns. Shape `[B]`.
lambda_: an optional scalar or 2-D Tensor with shape `[T, B]`.
name: Customises the name_scope for this op.
Returns:
2-D Tensor with shape `[T, B]`
"""
values.get_shape().assert_has_rank(2)
rewards.get_shape().assert_has_rank(2)
pcontinues.get_shape().assert_has_rank(2)
bootstrap_value.get_shape().assert_has_rank(1)
scoped_values = [rewards, pcontinues, values, bootstrap_value, lambda_]
with tf.name_scope(name, values=scoped_values):
if lambda_ == 1:
# This is actually equivalent to the branch below, just an optimisation
# to avoid unnecessary work in this case:
return sequence_ops.scan_discounted_sum(
rewards,
pcontinues,
initial_value=bootstrap_value,
reverse=True,
back_prop=False,
name="multistep_returns")
else:
v_tp1 = tf.concat(
axis=0, values=[values[1:, :],
tf.expand_dims(bootstrap_value, 0)])
# `back_prop=False` prevents gradients flowing into values and
# bootstrap_value, which is what you want when using the bootstrapped
# lambda-returns in an update as targets for values.
return sequence_ops.multistep_forward_view(
rewards,
pcontinues,
v_tp1,
lambda_,
back_prop=False,
name="generalized_lambda_returns")
def td_lambda(state_values,
rewards,
pcontinues,
bootstrap_value,
lambda_=1,
name="BaselineLoss"):
"""Constructs a TensorFlow graph computing the L2 loss for sequences.
This loss learns the baseline for advantage actor-critic models. Gradients
for this loss flow through each tensor in `state_values`, but no other
input tensors. The baseline is regressed towards the n-step bootstrapped
returns given by the reward/pcontinue sequence.
This function is designed for batches of sequences of data. Tensors are
assumed to be time major (i.e. the outermost dimension is time, the second
outermost dimension is the batch dimension). We denote the sequence length
in the shapes of the arguments with the variable `T`, the batch size with
the variable `B`, neither of which needs to be known at construction time.
Index `0` of the time dimension is assumed to be the start of the sequence.
`rewards` and `pcontinues` are the sequences of data taken directly from the
environment, possibly modulated by a discount. `state_values` are the
sequences of (typically learnt) estimates of the values of the states
visited along a batch of trajectories.
The sequences in the tensors should be aligned such that an agent in a state
with value `V` that takes an action transitions into another state
with value `V'`, receiving reward `r` and pcontinue `p`. Then `V`, `r`
and `p` are all at the same index `i` in the corresponding tensors. `V'` is
at index `i+1`, or in the `bootstrap_value` tensor if `i == T`.
See "High-dimensional continuous control using generalized advantage
estimation" by Schulman, Moritz, Levine et al.
(https://arxiv.org/abs/1506.02438).
Args:
state_values: 2-D Tensor of state-value estimates with shape `[T, B]`.
rewards: 2-D Tensor with shape `[T, B]`.
pcontinues: 2-D Tensor with shape `[T, B]`.
bootstrap_value: 1-D Tensor with shape `[B]`.
lambda_: an optional scalar or 2-D Tensor with shape `[T, B]`.
name: Customises the name_scope for this op.
Returns:
A namedtuple with fields:
* `loss`: a tensor containing the batch of losses, shape `[B]`.
* `extra`: a namedtuple with fields:
* temporal_differences, Tensor of shape `[T, B]`
* discounted_returns, Tensor of shape `[T, B]`
"""
scoped_values = [state_values, rewards, pcontinues, bootstrap_value]
with tf.name_scope(name, values=scoped_values):
discounted_returns = generalized_lambda_returns(
rewards, pcontinues, state_values, bootstrap_value, lambda_)
temporal_differences = discounted_returns - state_values
loss = 0.5 * tf.reduce_sum(
tf.square(temporal_differences), axis=0, name="l2_loss")
return base_ops.LossOutput(
loss, TDLambdaExtra(
temporal_differences=temporal_differences,
discounted_returns=discounted_returns))
def qv_max(v_tm1, r_t, pcont_t, q_t, name="QVMAX"):
"""Implements the QVMAX learning loss as a TensorFlow op.
The QVMAX loss is `0.5` times the squared difference between `v_tm1` and
the target `r_t + pcont_t * max q_t`, where `q_t` is separately learned
through QV learning (c.f. `action_value_ops.qv_learning`).
See "The QV Family Compared to Other Reinforcement Learning Algorithms" by
Wiering and van Hasselt (2009).
(http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.713.1931)
Args:
v_tm1: Tensor holding values at previous timestep, shape `[B]`.
r_t: Tensor holding rewards, shape `[B]`.
pcont_t: Tensor holding pcontinue values, shape `[B]`.
q_t: Tensor of action values at current timestep, shape `[B, num_actions]`.
name: name to prefix ops created by this function.
Returns:
A namedtuple with fields:
* `loss`: a tensor containing the batch of losses, shape `[B]`.
* `extra`: a namedtuple with fields:
* `target`: batch of target values for `v_tm1`, shape `[B]`.
* `td_error`: batch of temporal difference errors, shape `[B]`.
"""
# Rank and compatibility checks.
base_ops.wrap_rank_shape_assert([[v_tm1, r_t, pcont_t], [q_t]], [1, 2], name)
# The QVMAX op.
with tf.name_scope(name, values=[v_tm1, r_t, pcont_t, q_t]):
# Build target.
target = tf.stop_gradient(r_t + pcont_t * tf.reduce_max(q_t, axis=1))
# Temporal difference error and loss.
# Loss is MSE scaled by 0.5, so the gradient is equal to the TD error.
td_error = target - v_tm1
loss = 0.5 * tf.square(td_error)
return base_ops.LossOutput(loss, TDExtra(target, td_error))
| trfl-master | trfl/value_ops.py |
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TensorFlow ops for discrete-action Policy Gradient functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Dependency imports
from six.moves import zip
import tensorflow.compat.v1 as tf
import tree as nest
from trfl import base_ops
from trfl import value_ops
# pylint: disable=g-complex-comprehension
DiscretePolicyEntropyExtra = collections.namedtuple(
"discrete_policy_entropy_extra", ["entropy"])
SequenceAdvantageActorCriticExtra = collections.namedtuple(
"sequence_advantage_actor_critic_extra",
["entropy", "entropy_loss", "baseline_loss", "policy_gradient_loss",
"advantages", "discounted_returns"])
def discrete_policy_entropy_loss(policy_logits,
normalise=False,
name="discrete_policy_entropy_loss"):
"""Computes the entropy 'loss' for a batch of policy logits.
Given a batch of policy logits, calculates the entropy and corrects the sign
so that minimizing the resulting loss op is equivalent to increasing entropy
in the batch. This loss is optionally normalised to the range `[-1, 0]` by
dividing by the log number of actions. This makes it more invariant to the
size of the action space.
This function accepts a nested array of `policy_logits` in order
to allow for multiple discrete actions. In this case, the loss is given by
`-sum_i(H(p_i))` where `p_i` are members of the `policy_logits` nest and
H is the Shannon entropy.
Args:
policy_logits: A (possibly nested structure of) (N+1)-D Tensor(s) with
shape `[..., A]`, representing the log-probabilities of a set of
Categorical distributions, where `...` represents at least one
dimension (e.g., batch, sequence), and `A` is the number of discrete
actions (which need not be identical across all tensors).
Does not need to be centered.
normalise: If True, divide the loss by the `sum_i(log(A_i))` where `A_i`
is the number of actions for the i'th tensor in the `policy_logits`
nest. Default is False.
name: Optional, name of this op.
Returns:
A namedtuple with fields:
* `loss`: Entropy 'loss', shape `[B]`.
* `extra`: a namedtuple with fields:
* `entropy`: Entropy of the policy, shape `[B]`.
"""
policy_logits = nest.flatten(policy_logits)
with tf.name_scope(name, values=policy_logits):
entropy = tf.add_n([
tf.reduce_sum(
-tf.nn.softmax(scalar_policy_logits)
* tf.nn.log_softmax(scalar_policy_logits), axis=-1)
for scalar_policy_logits in policy_logits], name="entropy")
# We want a value that we can minimize along with other losses, and where
# minimizing means driving the policy towards a uniform distribution over
# the actions. We thus scale it by negative one so that it can be simply
# added to other losses.
scale = tf.constant(-1.0, dtype=tf.float32)
if normalise:
num_actions = [tf.to_float(tf.shape(scalar_policy_logits)[-1])
for scalar_policy_logits in policy_logits]
scale /= tf.reduce_sum(tf.log(tf.stack(num_actions)))
loss = tf.multiply(scale, entropy, name="entropy_loss")
return base_ops.LossOutput(loss, DiscretePolicyEntropyExtra(entropy))
def sequence_advantage_actor_critic_loss(
policy_logits, baseline_values, actions, rewards,
pcontinues, bootstrap_value, lambda_=1, entropy_cost=None,
baseline_cost=1, normalise_entropy=False,
name="SequenceAdvantageActorCriticLoss"):
"""Calculates the loss for an A2C update along a batch of trajectories.
Technically A2C is the special case where lambda=1; for general lambda
this is the loss for Generalized Advantage Estimation (GAE), modulo chunking
behaviour if passing chunks of episodes (see `generalized_lambda_returns` for
more detail).
Note: This function takes policy _logits_ as input, not the log-policy like
`learning.deepmind.lua.rl.learners.Reinforce` does.
This loss jointly learns the policy and the baseline. Therefore, gradients
for this loss flow through each tensor in `policy_logits` and
`baseline_values`, but no other input tensors. The policy is learnt with the
advantage actor-critic loss, plus an optional entropy term. The baseline is
regressed towards the n-step bootstrapped returns given by the
reward/pcontinue sequence. The `baseline_cost` parameter scales the
gradients w.r.t the baseline relative to the policy gradient. i.e:
`d(loss) / d(baseline) = baseline_cost * (n_step_return - baseline)`.
`rewards` and `pcontinues` are the sequences of data taken directly from the
environment, possibly modulated by a discount. `baseline_values` are the
sequences of (typically learnt) estimates of the values of the states
visited along a batch of trajectories as observed by the agent given the
sequences of one or more actions sampled from the `policy_logits`.
The sequences in the tensors should be aligned such that an agent in a state
with value `V` that takes an action `a` transitions into another state
with value `V'`, receiving reward `r` and pcontinue `p`. Then `V`, `a`, `r`
and `p` are all at the same index `i` in the corresponding tensors. `V'` is
at index `i+1`, or in the `bootstrap_value` tensor if `i == T`.
This function accepts a nested array of `policy_logits` and `actions` in order
to allow for multidimensional discrete action spaces. In this case, the loss
is given by `sum_i(loss(p_i, a_i))` where `p_i` are members of the
`policy_logits` nest, and `a_i` are members of the `actions` nest.
We assume that a single baseline is used across all action dimensions for
each timestep.
Args:
policy_logits: A (possibly nested structure of) 3-D Tensor(s) with shape
`[T, B, num_actions]` and possibly different dimension `num_actions`.
baseline_values: 2-D Tensor containing an estimate of state values `[T, B]`.
actions: A (possibly nested structure of) 2-D Tensor(s) with shape
`[T, B]` and integer type.
rewards: 2-D Tensor with shape `[T, B]`.
pcontinues: 2-D Tensor with shape `[T, B]`.
bootstrap_value: 1-D Tensor with shape `[B]`.
lambda_: an optional scalar or 2-D Tensor with shape `[T, B]` for
Generalised Advantage Estimation as per
https://arxiv.org/abs/1506.02438.
entropy_cost: optional scalar cost that pushes the policy to have high
entropy, larger values cause higher entropies.
baseline_cost: scalar cost that scales the derivatives of the baseline
relative to the policy gradient.
normalise_entropy: if True, the entropy loss is normalised to the range
`[-1, 0]` by dividing by the log number of actions. This makes it more
invariant to the size of the action space. Default is False.
name: Customises the name_scope for this op.
Returns:
A namedtuple with fields:
* `loss`: a tensor containing the total loss, shape `[B]`.
* `extra`: a namedtuple with fields:
* `entropy`: total loss per sequence, shape `[B]`.
* `entropy_loss`: scaled entropy loss per sequence, shape `[B]`.
* `baseline_loss`: scaled baseline loss per sequence, shape `[B]`.
* `policy_gradient_loss`: policy gradient loss per sequence,
shape `[B]`.
* `advantages`: advantange estimates per timestep, shape `[T, B]`.
* `discounted_returns`: discounted returns per timestep,
shape `[T, B]`.
"""
scoped_values = (nest.flatten(policy_logits) + nest.flatten(actions) +
[baseline_values, rewards, pcontinues, bootstrap_value])
with tf.name_scope(name, values=scoped_values):
# Loss for the baseline, summed over the time dimension.
baseline_loss_td, td_lambda = value_ops.td_lambda(
baseline_values, rewards, pcontinues, bootstrap_value, lambda_)
# The TD error provides an estimate of the advantages of the actions.
advantages = td_lambda.temporal_differences
baseline_loss = tf.multiply(
tf.convert_to_tensor(baseline_cost, dtype=tf.float32),
baseline_loss_td,
name="baseline_loss")
# Loss for the policy. Doesn't push additional gradients through
# the advantages.
policy_gradient_loss = discrete_policy_gradient_loss(
policy_logits, actions, advantages, name="policy_gradient_loss")
total_loss = tf.add(policy_gradient_loss, baseline_loss, name="total_loss")
if entropy_cost is not None:
entropy_loss_op, policy_entropy = discrete_policy_entropy_loss(
policy_logits, normalise=normalise_entropy) # [T,B].
entropy = tf.reduce_sum(
policy_entropy.entropy, axis=0, name="entropy") # [B].
entropy_loss = tf.multiply(
tf.convert_to_tensor(entropy_cost, dtype=tf.float32),
tf.reduce_sum(entropy_loss_op, axis=0),
name="scaled_entropy_loss") # [B].
total_loss = tf.add(total_loss, entropy_loss,
name="total_loss_with_entropy")
else:
entropy = None
entropy_loss = None
extra = SequenceAdvantageActorCriticExtra(
entropy=entropy, entropy_loss=entropy_loss,
baseline_loss=baseline_loss,
policy_gradient_loss=policy_gradient_loss,
advantages=advantages,
discounted_returns=td_lambda.discounted_returns)
return base_ops.LossOutput(total_loss, extra)
def discrete_policy_gradient(policy_logits, actions, action_values,
name="discrete_policy_gradient"):
"""Computes a batch of discrete-action policy gradient losses.
See notes by Silver et al here:
http://www0.cs.ucl.ac.uk/staff/D.Silver/web/Teaching_files/pg.pdf
From slide 41, denoting by `policy` the probability distribution with
log-probabilities `policy_logit`:
```
* `action` should have been sampled according to `policy`.
* `action_value` can be any estimate of `Q^{policy}(s, a)`, potentially
minus a baseline that doesn't depend on the action. This admits
many possible algorithms:
* `v_t` (Monte-Carlo return for time t) : REINFORCE
* `Q^w(s, a)` : Q Actor-Critic
* `v_t - V(s)` : Monte-Carlo Advantage Actor-Critic
* `A^{GAE(gamma, lambda)}` : Generalized Avantage Actor Critic
* + many more.
```
Gradients for this op are only defined with respect to the `policy_logits`,
not `actions` or `action_values`.
This op supports multiple batch dimensions. The first N >= 1 dimensions of
each input/output tensor index into independent values. All tensors must
having matching sizes for each batch dimension.
Args:
policy_logits: (N+1)-D Tensor of shape
`[batch_size_1, ..., batch_size_N, num_actions]` containing uncentered
log-probabilities.
actions: N-D Tensor of shape `[batch_size_1, ..., batch_size_N]` and integer
type, containing indices for the selected actions.
action_values: N-D Tensor of shape `[batch_size_1, ..., batch_size_N]`
containing an estimate of the value of the selected `actions`.
name: Customises the name_scope for this op.
Returns:
loss: N-D Tensor of shape `[batch_size_1, ..., batch_size_N]` containing the
loss. Differentiable w.r.t `policy_logits` only.
Raises:
ValueError: If the batch dimensions of `policy_logits` and `action_values`
do not match.
"""
with tf.name_scope(name, values=[policy_logits, actions, action_values]):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=actions, logits=policy_logits)
action_values = tf.stop_gradient(action_values)
# Prevent accidental broadcasting if possible at construction time.
action_values.get_shape().assert_is_compatible_with(
cross_entropy.get_shape())
return tf.multiply(cross_entropy, action_values)
def discrete_policy_gradient_loss(policy_logits, actions, action_values,
name="discrete_policy_gradient_loss"):
"""Computes discrete policy gradient losses for a batch of trajectories.
This wraps `discrete_policy_gradient` to accept a possibly nested array of
`policy_logits` and `actions` in order to allow for multiple discrete actions.
It also sums up losses along the time dimension, and is more restrictive about
shapes, assuming a [T, B] layout.
Args:
policy_logits: A (possibly nested structure of) Tensor(s) of shape
`[T, B, num_actions]` containing uncentered log-probabilities.
actions: A (possibly nested structure of) Tensor(s) of shape
`[T, B]` and integer type, containing indices for the selected actions.
action_values: Tensor of shape `[T, B]`
containing an estimate of the value of the selected `actions`, see
`discrete_policy_gradient`.
name: Customises the name_scope for this op.
Returns:
loss: Tensor of shape `[B]` containing the total loss for each sequence
in the batch. Differentiable w.r.t `policy_logits` only.
"""
policy_logits = nest.flatten(policy_logits)
actions = nest.flatten(actions)
# Check happens after flatten so that we can be more flexible on
# nest structures. This is equivalent to asserting that
# `len(policy_logits) == len(actions)`, which is sufficient for what we're
# doing here. In particular, it means that we can allow one argument to be
# a tensor, while the other one to be a single-element tensor iterable.
nest.assert_same_structure(policy_logits, actions)
for scalar_policy_logits in policy_logits:
scalar_policy_logits.get_shape().assert_has_rank(3)
for scalar_actions in actions:
scalar_actions.get_shape().assert_has_rank(2)
scoped_values = policy_logits + actions + [action_values]
with tf.name_scope(name, values=scoped_values):
# Loss for the policy gradient. Doesn't push additional gradients through
# the action_values.
policy_gradient_loss_sequence = tf.add_n([
discrete_policy_gradient(
scalar_policy_logits, scalar_actions, action_values)
for scalar_policy_logits, scalar_actions
in zip(policy_logits, actions)])
return tf.reduce_sum(
policy_gradient_loss_sequence, axis=[0],
name="policy_gradient_loss")
| trfl-master | trfl/discrete_policy_gradient_ops.py |
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for indexing_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from trfl import indexing_ops
class BatchIndexingTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters([None, True, False])
def testOrdinaryValues(self, keepdims):
"""Indexing value functions by action for a minibatch of values."""
values = [[1.1, 1.2, 1.3],
[1.4, 1.5, 1.6],
[2.1, 2.2, 2.3],
[2.4, 2.5, 2.6],
[3.1, 3.2, 3.3],
[3.4, 3.5, 3.6],
[4.1, 4.2, 4.3],
[4.4, 4.5, 4.6]]
action_indices = [0, 2, 1, 0, 2, 1, 0, 2]
result = indexing_ops.batched_index(
values, action_indices, keepdims=keepdims)
expected_result = [1.1, 1.6, 2.2, 2.4, 3.3, 3.5, 4.1, 4.6]
if keepdims:
expected_result = np.expand_dims(expected_result, axis=-1)
with self.test_session() as sess:
self.assertAllClose(sess.run(result), expected_result)
def testValueSequence(self):
"""Indexing value functions by action with a minibatch of sequences."""
values = [[[1.1, 1.2, 1.3], [1.4, 1.5, 1.6]],
[[2.1, 2.2, 2.3], [2.4, 2.5, 2.6]],
[[3.1, 3.2, 3.3], [3.4, 3.5, 3.6]],
[[4.1, 4.2, 4.3], [4.4, 4.5, 4.6]]]
action_indices = [[0, 2],
[1, 0],
[2, 1],
[0, 2]]
result = indexing_ops.batched_index(values, action_indices)
expected_result = [[1.1, 1.6],
[2.2, 2.4],
[3.3, 3.5],
[4.1, 4.6]]
with self.test_session() as sess:
self.assertAllClose(sess.run(result), expected_result)
def testInputShapeChecks(self):
"""Input shape checks can catch some, but not all, shape problems."""
# 1. Inputs have incorrect or incompatible ranks:
for args in [dict(values=[[5, 5]], indices=1),
dict(values=[5, 5], indices=[1]),
dict(values=[[[5, 5]]], indices=[1]),
dict(values=[[5, 5]], indices=[[[1]]]),]:
with self.assertRaisesRegexp(ValueError, "do not correspond"):
indexing_ops.batched_index(**args)
# 2. Inputs have correct, compatible ranks but incompatible sizes:
for args in [dict(values=[[5, 5]], indices=[1, 1]),
dict(values=[[5, 5], [5, 5]], indices=[1]),
dict(values=[[[5, 5], [5, 5]]], indices=[[1, 1], [1, 1]]),
dict(values=[[[5, 5], [5, 5]]], indices=[[1], [1]]),]:
with self.assertRaisesRegexp(ValueError, "incompatible shapes"):
indexing_ops.batched_index(**args)
# (Correct ranks and sizes work fine, though):
indexing_ops.batched_index(
values=[[5, 5]], indices=[1])
indexing_ops.batched_index(
values=[[[5, 5], [5, 5]]], indices=[[1, 1]])
# 3. Shape-checking works with fully-specified placeholders, or even
# partially-specified placeholders that still provide evidence of having
# incompatible shapes or incorrect ranks.
for sizes in [dict(q_size=[4, 3], a_size=[4, 1]),
dict(q_size=[4, 2, 3], a_size=[4, 1]),
dict(q_size=[4, 3], a_size=[5, None]),
dict(q_size=[None, 2, 3], a_size=[4, 1]),
dict(q_size=[4, 2, 3], a_size=[None, 1]),
dict(q_size=[4, 2, 3], a_size=[5, None]),
dict(q_size=[None, None], a_size=[None, None]),
dict(q_size=[None, None, None], a_size=[None]),]:
with self.assertRaises(ValueError):
indexing_ops.batched_index(
tf.placeholder(tf.float32, sizes["q_size"]),
tf.placeholder(tf.int32, sizes["a_size"]))
# But it can't work with 100% certainty if full shape information is not
# known ahead of time. These cases generate no errors; some make warnings:
for sizes in [dict(q_size=None, a_size=None),
dict(q_size=None, a_size=[4]),
dict(q_size=[4, 2], a_size=None),
dict(q_size=[None, 2], a_size=[None]),
dict(q_size=[None, 2, None], a_size=[None, 2]),
dict(q_size=[4, None, None], a_size=[4, None]),
dict(q_size=[None, None], a_size=[None]),
dict(q_size=[None, None, None], a_size=[None, None]),]:
indexing_ops.batched_index(
tf.placeholder(tf.float32, sizes["q_size"]),
tf.placeholder(tf.int32, sizes["a_size"]))
# And it can't detect invalid indices at construction time, either.
indexing_ops.batched_index(values=[[5, 5, 5]], indices=[1000000000])
def testFullShapeAvailableAtRuntimeOnly(self):
"""What happens when shape information isn't available statically?
The short answer is: it still works. The long answer is: it still works, but
arguments that shouldn't work due to argument shape mismatch can sometimes
work without raising any errors! This can cause insidious bugs. This test
verifies correct behaviour and also demonstrates kinds of shape mismatch
that can go undetected. Look for `!!!DANGER!!!` below.
Why this is possible: internally, `batched_index` flattens its inputs,
then transforms the action indices you provide into indices into its
flattened Q values tensor. So long as these flattened indices don't go
out-of-bounds, and so long as your arguments are compatible with a few
other bookkeeping operations, the operation will succeed.
The moral: always provide as much shape information as you can! See also
`testInputShapeChecks` for more on what shape checking can accomplish when
only partial shape information is available.
"""
## 1. No shape information is available during construction time.
q_values = tf.placeholder(tf.float32)
actions = tf.placeholder(tf.int32)
values = indexing_ops.batched_index(q_values, actions)
with self.test_session() as sess:
# First, correct and compatible Q values and indices work as intended.
self.assertAllClose(
[51],
sess.run(values, feed_dict={q_values: [[50, 51]], actions: [1]}))
self.assertAllClose(
[[51, 52]],
sess.run(values,
feed_dict={q_values: [[[50, 51], [52, 53]]],
actions: [[1, 0]]}))
# !!!DANGER!!! These "incompatible" shapes are silently tolerated!
# (These examples are probably not exhaustive, either!)
qs_2x2 = [[5, 5], [5, 5]]
qs_2x2x2 = [[[5, 5], [5, 5]],
[[5, 5], [5, 5]]]
sess.run(values, feed_dict={q_values: qs_2x2, actions: [0]})
sess.run(values, feed_dict={q_values: qs_2x2, actions: 0})
sess.run(values, feed_dict={q_values: qs_2x2x2, actions: [[0]]})
sess.run(values, feed_dict={q_values: qs_2x2x2, actions: [0]})
sess.run(values, feed_dict={q_values: qs_2x2x2, actions: 0})
## 2a. Shape information is only known for the batch size (2-D case).
q_values = tf.placeholder(tf.float32, shape=[2, None])
actions = tf.placeholder(tf.int32, shape=[2])
values = indexing_ops.batched_index(q_values, actions)
with self.test_session() as sess:
# Correct and compatible Q values and indices work as intended.
self.assertAllClose(
[51, 52],
sess.run(values,
feed_dict={q_values: [[50, 51], [52, 53]], actions: [1, 0]}))
# There are no really terrible shape errors that go uncaught in this case.
## 2b. Shape information is only known for the batch size (3-D case).
q_values = tf.placeholder(tf.float32, shape=[None, 2, None])
actions = tf.placeholder(tf.int32, shape=[None, 2])
values = indexing_ops.batched_index(q_values, actions)
with self.test_session() as sess:
# Correct and compatible Q values and indices work as intended.
self.assertAllClose(
[[51, 52]],
sess.run(values,
feed_dict={q_values: [[[50, 51], [52, 53]]],
actions: [[1, 0]]}))
# !!!DANGER!!! This "incompatible" shape is silently tolerated!
sess.run(values, feed_dict={q_values: qs_2x2x2, actions: [[0, 0]]})
## 3. Shape information is only known for the sequence length.
q_values = tf.placeholder(tf.float32, shape=[2, None, None])
actions = tf.placeholder(tf.int32, shape=[2, None])
values = indexing_ops.batched_index(q_values, actions)
with self.test_session() as sess:
# Correct and compatible Q values and indices work as intended.
self.assertAllClose(
[[51, 52], [54, 57]],
sess.run(values,
feed_dict={q_values: [[[50, 51], [52, 53]],
[[54, 55], [56, 57]]],
actions: [[1, 0], [0, 1]]}))
# !!!DANGER!!! This "incompatible" shape is silently tolerated!
sess.run(values, feed_dict={q_values: qs_2x2x2, actions: [[0], [0]]})
## 4a. Shape information is only known for the number of actions (2-D case).
q_values = tf.placeholder(tf.float32, shape=[None, 2])
actions = tf.placeholder(tf.int32, shape=[None])
values = indexing_ops.batched_index(q_values, actions)
with self.test_session() as sess:
# Correct and compatible Q values and indices work as intended.
self.assertAllClose(
[51, 52],
sess.run(values,
feed_dict={q_values: [[50, 51], [52, 53]], actions: [1, 0]}))
# !!!DANGER!!! This "incompatible" shape is silently tolerated!
sess.run(values, feed_dict={q_values: qs_2x2, actions: [0]})
## 4b. Shape information is only known for the number of actions (3-D case).
q_values = tf.placeholder(tf.float32, shape=[None, None, 2])
actions = tf.placeholder(tf.int32, shape=[None, None])
values = indexing_ops.batched_index(q_values, actions)
with self.test_session() as sess:
# Correct and compatible Q values and indices work as intended.
self.assertAllClose(
[[51, 52]],
sess.run(values,
feed_dict={q_values: [[[50, 51], [52, 53]]],
actions: [[1, 0]]}))
# !!!DANGER!!! These "incompatible" shapes are silently tolerated!
sess.run(values, feed_dict={q_values: qs_2x2x2, actions: [[0, 0]]})
sess.run(values, feed_dict={q_values: qs_2x2x2, actions: [[0]]})
## 5a. Value shape is not known ahead of time.
q_values = tf.placeholder(tf.float32)
actions = tf.placeholder(tf.int32, shape=[2])
values = indexing_ops.batched_index(q_values, actions)
with self.test_session() as sess:
# Correct and compatible Q values and indices work as intended.
self.assertAllClose(
[51, 52],
sess.run(values,
feed_dict={q_values: [[50, 51], [52, 53]], actions: [1, 0]}))
# !!!DANGER!!! This "incompatible" shape is silently tolerated!
sess.run(values, feed_dict={q_values: qs_2x2x2, actions: [0, 0]})
## 5b. Action shape is not known ahead of time.
q_values = tf.placeholder(tf.float32, shape=[None, None, 2])
actions = tf.placeholder(tf.int32)
values = indexing_ops.batched_index(q_values, actions)
with self.test_session() as sess:
# Correct and compatible Q values and indices work as intended.
self.assertAllClose(
[[51, 52], [54, 57]],
sess.run(values,
feed_dict={q_values: [[[50, 51], [52, 53]],
[[54, 55], [56, 57]]],
actions: [[1, 0], [0, 1]]}))
# !!!DANGER!!! This "incompatible" shape is silently tolerated!
sess.run(values, feed_dict={q_values: qs_2x2x2, actions: [0, 0]})
if __name__ == "__main__":
tf.test.main()
| trfl-master | trfl/indexing_ops_test.py |
# coding=utf8
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TensorFlow ops for the Retrace algorithm.
These Retrace ops implement an action-value learning rule for minibatches of
multi-step sequences. Retrace supports off-policy learning and has
well-analysed theoretical guarantees.
The ops in this file support only discrete action spaces. Actions must be
indices in the range `[0, K)`, where `K` is the number of distinct actions
available to the agent.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Dependency imports
from six.moves import zip
import tensorflow.compat.v1 as tf
from trfl import base_ops
from trfl import indexing_ops
from trfl import sequence_ops
RetraceCoreExtra = collections.namedtuple(
'retrace_core_extra', ['retrace_weights', 'target'])
def retrace(lambda_,
qs,
targnet_qs,
actions,
rewards,
pcontinues,
target_policy_probs,
behaviour_policy_probs,
stop_targnet_gradients=True,
name=None):
"""Retrace algorithm loss calculation op.
Given a minibatch of temporally-contiguous sequences of Q values, policy
probabilities, and various other typical RL algorithm inputs, this
Op creates a subgraph that computes a loss according to the
Retrace multi-step off-policy value learning algorithm. This Op supports the
use of target networks, but does not require them.
For more details of Retrace, refer to
[the arXiv paper](http://arxiv.org/abs/1606.02647).
In argument descriptions, `T` counts the number of transitions over which
the Retrace loss is computed, and `B` is the minibatch size. Note that all
tensor arguments list a first-dimension (time dimension) size of T+1;
this is because in order to compute the loss over T timesteps, the
algorithm must be aware of the values of many of its inputs at timesteps
before and after each transition.
All tensor arguments are indexed first by transition, with specific
details of this indexing in the argument descriptions.
Args:
lambda_: Positive scalar value or 0-D `Tensor` controlling the degree to
which future timesteps contribute to the loss computed at each
transition.
qs: 3-D tensor holding per-action Q-values for the states encountered
just before taking the transitions that correspond to each major index.
Since these values are the predicted values we wish to update (in other
words, the values we intend to change as we learn), in a target network
setting, these nearly always come from the "non-target" network, which
we usually call the "learning network".
Shape is `[(T+1), B, num_actions]`.
targnet_qs: Like `qs`, but in the target network setting, these values
should be computed by the target network. We use these values to
compute multi-step error values for timesteps that follow the first
timesteps in each sequence and sequence fragment we consider.
Shape is `[(T+1), B, num_actions]`.
actions: 2-D tensor holding the indices of actions executed during the
transition that corresponds to each major index.
Shape is `[(T+1), B]`.
rewards: 2-D tensor holding rewards received during the transition
that corresponds to each major index.
Shape is `[(T+1), B]`.
pcontinues: 2-D tensor holding pcontinue values received during the
transition that corresponds to each major index.
Shape is `[(T+1), B]`.
target_policy_probs: 3-D tensor holding per-action policy probabilities
for the states encountered just before taking the transitions that
correspond to each major index, according to the target policy (i.e.
the policy we wish to learn). These probabilities usually derive from
the learning net.
Shape is `[(T+1), B, num_actions]`.
behaviour_policy_probs: 2-D tensor holding the *behaviour* policy's
probabilities of having taken actions `action` during the transitions
that correspond to each major index. These probabilities derive from
whatever policy you used to generate the data.
Shape is `[(T+1), B]`.
stop_targnet_gradients: `bool` that enables a sensible default way of
handling gradients through the Retrace op (essentially, gradients
are not permitted to involve the `targnet_qs` inputs). Can be disabled
if you require a different arrangement, but you'll probably want to
block some gradients somewhere.
name: name to prefix ops created by this function.
Returns:
A namedtuple with fields:
* `loss`: Tensor containing the batch of losses, shape `[B]`.
* `extra`: None
"""
all_args = [
lambda_, qs, targnet_qs, actions, rewards, pcontinues,
target_policy_probs, behaviour_policy_probs
]
with tf.name_scope(name, 'Retrace', values=all_args):
# Mainly to simplify testing:
(lambda_, qs, targnet_qs, actions, rewards, pcontinues, target_policy_probs,
behaviour_policy_probs) = (
tf.convert_to_tensor(arg) for arg in all_args)
# Require correct tensor ranks---as long as we have shape information
# available to check. If there isn't any, we print a warning.
def check_rank(tensors, ranks):
for i, (tensor, rank) in enumerate(zip(tensors, ranks)):
if tensor.get_shape():
base_ops.assert_rank_and_shape_compatibility([tensor], rank)
else:
tf.logging.error(
'Tensor "%s", which was offered as Retrace parameter %d, has '
'no rank at construction time, so Retrace can\'t verify that '
'it has the necessary rank of %d', tensor.name, i + 1, rank)
check_rank([
lambda_, qs, targnet_qs, actions, rewards, pcontinues,
target_policy_probs, behaviour_policy_probs
], [0, 3, 3, 2, 2, 2, 3, 2])
# Deduce the shapes of the arguments we'll create for retrace_core.
qs_shape = tf.shape(qs)
timesteps = qs_shape[0] # Batch size is qs_shape[1].
# Deduce the time indices for the arguments we'll create for retrace_core.
timestep_indices_tm1 = tf.range(0, timesteps - 1)
timestep_indices_t = tf.range(1, timesteps)
# Construct arguments for retrace_core and call.
q_tm1 = tf.gather(qs, timestep_indices_tm1)
a_tm1 = tf.gather(actions, timestep_indices_tm1)
r_t = tf.gather(rewards, timestep_indices_tm1)
pcont_t = tf.gather(pcontinues, timestep_indices_tm1)
target_policy_t = tf.gather(target_policy_probs, timestep_indices_t)
behaviour_policy_t = tf.gather(behaviour_policy_probs, timestep_indices_t)
targnet_q_t = tf.gather(targnet_qs, timestep_indices_t)
a_t = tf.gather(actions, timestep_indices_t)
core = retrace_core(lambda_, q_tm1, a_tm1, r_t, pcont_t, target_policy_t,
behaviour_policy_t, targnet_q_t, a_t,
stop_targnet_gradients)
return base_ops.LossOutput(core.loss, None)
def _general_off_policy_corrected_multistep_target(r_t,
pcont_t,
target_policy_t,
c_t,
q_t,
a_t,
back_prop=False,
name=None):
"""Evaluates targets for various off-policy value correction based algorithms.
`target_policy_t` is the policy that this function aims to evaluate. New
action-value estimates (target values `T`) must be expressible in this
recurrent form:
```none
T(x_{t-1}, a_{t-1}) = r_t + γ[ 𝔼_π Q(x_t, .) - c_t Q(x_t, a_t) +
c_t T(x_t, a_t) ]
```
`T(x_t, a_t)` is an estimate of expected discounted future returns based
on the current Q value estimates `Q(x_t, a_t)` and rewards `r_t`. The
evaluated target values can be used as supervised targets for learning the Q
function itself or as returns for various policy gradient algorithms.
`Q==T` if convergence is reached. As the formula is recurrent, it will
evaluate multistep returns for non-zero importance weights `c_t`.
In the usual moving and target network setup `q_t` should be calculated by
the target network while the `target_policy_t` may be evaluated by either of
the networks. If `target_policy_t` is evaluated by the current moving network
the algorithm implemented will have a similar flavour as double DQN.
Depending on the choice of c_t, the algorithm can implement:
```none
Importance Sampling c_t = π(x_t, a_t) / μ(x_t, a_t),
Harutyunyan's et al. Q(lambda) c_t = λ,
Precup's et al. Tree-Backup c_t = π(x_t, a_t),
Munos' et al. Retrace c_t = λ min(1, π(x_t, a_t) / μ(x_t, a_t)).
```
Please refer to page 3 for more details:
https://arxiv.org/pdf/1606.02647v1.pdf
Args:
r_t: 2-D tensor holding rewards received during the transition
that corresponds to each major index.
Shape is `[T, B]`.
pcont_t: 2-D tensor holding pcontinue values received during the
transition that corresponds to each major index.
Shape is `[T, B]`.
target_policy_t: 3-D tensor holding per-action policy probabilities for
the states encountered just AFTER the transitions that correspond to
each major index, according to the target policy (i.e. the policy we
wish to learn). These usually derive from the learning net.
Shape is `[T, B, num_actions]`.
c_t: 2-D tensor holding importance weights; see discussion above.
Shape is `[T, B]`.
q_t: 3-D tensor holding per-action Q-values for the states
encountered just AFTER taking the transitions that correspond to each
major index. Shape is `[T, B, num_actions]`.
a_t: 2-D tensor holding the indices of actions executed during the
transition AFTER the transition that corresponds to each major index.
Shape is `[T, B]`.
back_prop: whether to backpropagate gradients through time.
name: name of the op.
Returns:
Tensor of shape `[T, B, num_actions]` containing Q values.
"""
# Formula (4) in https://arxiv.org/pdf/1606.02647v1.pdf can be expressed
# in a recursive form where T is a new target value:
# T(x_{t-1}, a_{t-1}) = r_t + γ[ 𝔼_π Q(x_t, .) - c_t Q(x_t, a_t) +
# c_t T(x_t, a_t) ]
# This recurrent form allows us to express Retrace by using
# `scan_discounted_sum`.
# Define:
# T_tm1 = T(x_{t-1}, a_{t-1})
# T_t = T(x_t, a_t)
# exp_q_t = 𝔼_π Q(x_t,.)
# qa_t = Q(x_t, a_t)
# Hence:
# T_tm1 = r_t + γ * (exp_q_t - c_t * qa_t) + γ * c_t * T_t
# Define:
# current = r_t + γ * (exp_q_t - c_t * qa_t)
# Thus:
# T_tm1 = scan_discounted_sum(current, γ * c_t, reverse=True)
args = [r_t, pcont_t, target_policy_t, c_t, q_t, a_t]
with tf.name_scope(
name, 'general_returns_based_off_policy_target', values=args):
exp_q_t = tf.reduce_sum(target_policy_t * q_t, axis=2)
qa_t = indexing_ops.batched_index(q_t, a_t)
current = r_t + pcont_t * (exp_q_t - c_t * qa_t)
initial_value = qa_t[-1]
return sequence_ops.scan_discounted_sum(
current,
pcont_t * c_t,
initial_value,
reverse=True,
back_prop=back_prop)
def _retrace_weights(pi_probs, mu_probs):
"""Evaluates importance weights for the Retrace algorithm.
Args:
pi_probs: taken action probabilities according to target policy.
Shape is `[T, B]`.
mu_probs: taken action probabilities according to behaviour policy.
Shape is `[T, B]`.
Returns:
Tensor of shape `[T, B]` containing importance weights.
"""
# tf.minimum seems to handle potential NaNs when pi_probs[i]=mu_probs[i]=0
return tf.minimum(1.0, pi_probs / mu_probs)
def retrace_core(lambda_,
q_tm1,
a_tm1,
r_t,
pcont_t,
target_policy_t,
behaviour_policy_t,
targnet_q_t,
a_t,
stop_targnet_gradients=True,
name=None):
"""Retrace algorithm core loss calculation op.
Given a minibatch of temporally-contiguous sequences of Q values, policy
probabilities, and various other typical RL algorithm inputs, this
Op creates a subgraph that computes a loss according to the
Retrace multi-step off-policy value learning algorithm. This Op supports the
use of target networks, but does not require them.
This function is the "core" Retrace op only because its arguments are less
user-friendly and more implementation-convenient. For a more user-friendly
operator, consider using `retrace`. For more details of Retrace, refer to
[the arXiv paper](http://arxiv.org/abs/1606.02647).
Construct the "core" retrace loss subgraph for a batch of sequences.
Note that two pairs of arguments (one holding target network values; the
other, actions) are temporally-offset versions of each other and will share
many values in common (nb: a good setting for using `IndexedSlices`). *This
op does not include any checks that these pairs of arguments are
consistent*---that is, it does not ensure that temporally-offset
arguments really do share the values they are supposed to share.
In argument descriptions, `T` counts the number of transitions over which
the Retrace loss is computed, and `B` is the minibatch size. All tensor
arguments are indexed first by transition, with specific details of this
indexing in the argument descriptions (pay close attention to "subscripts"
in variable names).
Args:
lambda_: Positive scalar value or 0-D `Tensor` controlling the degree to
which future timesteps contribute to the loss computed at each
transition.
q_tm1: 3-D tensor holding per-action Q-values for the states encountered
just before taking the transitions that correspond to each major index.
Since these values are the predicted values we wish to update (in other
words, the values we intend to change as we learn), in a target network
setting, these nearly always come from the "non-target" network, which
we usually call the "learning network".
Shape is `[T, B, num_actions]`.
a_tm1: 2-D tensor holding the indices of actions executed during the
transition that corresponds to each major index.
Shape is `[T, B]`.
r_t: 2-D tensor holding rewards received during the transition
that corresponds to each major index.
Shape is `[T, B]`.
pcont_t: 2-D tensor holding pcontinue values received during the
transition that corresponds to each major index.
Shape is `[T, B]`.
target_policy_t: 3-D tensor holding per-action policy probabilities for
the states encountered just AFTER the transitions that correspond to
each major index, according to the target policy (i.e. the policy we
wish to learn). These usually derive from the learning net.
Shape is `[T, B, num_actions]`.
behaviour_policy_t: 2-D tensor holding the *behaviour* policy's
probabilities of having taken action `a_t` at the states encountered
just AFTER the transitions that correspond to each major index. Derived
from whatever policy you used to generate the data. All values MUST be
greater that 0. Shape is `[T, B]`.
targnet_q_t: 3-D tensor holding per-action Q-values for the states
encountered just AFTER taking the transitions that correspond to each
major index. Since these values are used to calculate target values for
the network, in a target in a target network setting, these should
probably come from the target network.
Shape is `[T, B, num_actions]`.
a_t: 2-D tensor holding the indices of actions executed during the
transition AFTER the transition that corresponds to each major index.
Shape is `[T, B]`.
stop_targnet_gradients: `bool` that enables a sensible default way of
handling gradients through the Retrace op (essentially, gradients
are not permitted to involve the `targnet_q_t` input).
Can be disabled if you require a different arragement, but
you'll probably want to block some gradients somewhere.
name: name to prefix ops created by this function.
Returns:
A namedtuple with fields:
* `loss`: Tensor containing the batch of losses, shape `[B]`.
* `extra`: A namedtuple with fields:
* `retrace_weights`: Tensor containing batch of retrace weights,
shape `[T, B]`.
* `target`: Tensor containing target action values, shape `[T, B]`.
"""
all_args = [
lambda_, q_tm1, a_tm1, r_t, pcont_t, target_policy_t, behaviour_policy_t,
targnet_q_t, a_t
]
with tf.name_scope(name, 'RetraceCore', all_args):
(lambda_, q_tm1, a_tm1, r_t, pcont_t, target_policy_t, behaviour_policy_t,
targnet_q_t, a_t) = (
tf.convert_to_tensor(arg) for arg in all_args)
# Evaluate importance weights.
c_t = _retrace_weights(
indexing_ops.batched_index(target_policy_t, a_t),
behaviour_policy_t) * lambda_
# Targets are evaluated by using only Q values from the target network.
# This provides fixed regression targets until the next target network
# update.
target = _general_off_policy_corrected_multistep_target(
r_t, pcont_t, target_policy_t, c_t, targnet_q_t, a_t,
not stop_targnet_gradients)
if stop_targnet_gradients:
target = tf.stop_gradient(target)
# Regress Q values of the learning network towards the targets evaluated
# by using the target network.
qa_tm1 = indexing_ops.batched_index(q_tm1, a_tm1)
delta = target - qa_tm1
loss = 0.5 * tf.square(delta)
return base_ops.LossOutput(
loss, RetraceCoreExtra(retrace_weights=c_t, target=target))
| trfl-master | trfl/retrace_ops.py |
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TensorFlow ops for expressing common types of RL policies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
def epsilon_greedy(action_values, epsilon, legal_actions_mask=None):
"""Computes an epsilon-greedy distribution over actions.
This returns a categorical distribution over a discrete action space. It is
assumed that the trailing dimension of `action_values` is of length A, i.e.
the number of actions. It is also assumed that actions are 0-indexed.
This policy does the following:
- With probability 1 - epsilon, take the action corresponding to the highest
action value, breaking ties uniformly at random.
- With probability epsilon, take an action uniformly at random.
Args:
action_values: A Tensor of action values with any rank >= 1 and dtype float.
Shape can be flat ([A]), batched ([B, A]), a batch of sequences
([T, B, A]), and so on.
epsilon: A scalar Tensor (or Python float) with value between 0 and 1.
legal_actions_mask: An optional one-hot tensor having the shame shape and
dtypes as `action_values`, defining the legal actions:
legal_actions_mask[..., a] = 1 if a is legal, 0 otherwise.
If not provided, all actions will be considered legal and
`tf.ones_like(action_values)`.
Returns:
policy: tfp.distributions.Categorical distribution representing the policy.
"""
with tf.name_scope("epsilon_greedy", values=[action_values, epsilon]):
# Convert inputs to Tensors if they aren't already.
action_values = tf.convert_to_tensor(action_values)
epsilon = tf.convert_to_tensor(epsilon, dtype=action_values.dtype)
# We compute the action space dynamically.
num_actions = tf.cast(tf.shape(action_values)[-1], action_values.dtype)
# Dithering action distribution.
if legal_actions_mask is None:
dither_probs = 1 / num_actions * tf.ones_like(action_values)
else:
dither_probs = 1 / tf.reduce_sum(
legal_actions_mask, axis=-1, keepdims=True) * legal_actions_mask
# Greedy action distribution, breaking ties uniformly at random.
max_value = tf.reduce_max(action_values, axis=-1, keepdims=True)
greedy_probs = tf.cast(tf.equal(action_values, max_value),
action_values.dtype)
greedy_probs /= tf.reduce_sum(greedy_probs, axis=-1, keepdims=True)
# Epsilon-greedy action distribution.
probs = epsilon * dither_probs + (1 - epsilon) * greedy_probs
# Make the policy object.
policy = tfp.distributions.Categorical(probs=probs)
return policy
| trfl-master | trfl/policy_ops.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for emergent_communication_at_scale.Lewis."""
from absl.testing import absltest
import jax
from jaxline import platform
from jaxline import train
from emergent_communication_at_scale import lewis_experiment
from emergent_communication_at_scale.configs import lewis_config
from emergent_communication_at_scale.utils import eval_utils
class LewisTest(absltest.TestCase):
def test_lewis(self):
config = lewis_config.get_config('debug')
xp_config = config.experiment_kwargs.config
xp_config.game.name = 'dummy' # Use dummy dataset.
# Defines smaller architectures for quick testing.
config.length = 5
xp_config.speaker.core_config.core_kwargs.hidden_size = 8
xp_config.listener.core_config.core_kwargs.hidden_size = 8
xp_config.listener.head_config.head_kwargs.hidden_sizes = [8]
checkpointer = platform.create_checkpointer(config, 'train')
writer = platform.create_writer(config, 'train')
## Creates a `dummy` checkpoint for evaluation.
temp_dir = self.create_tempdir().full_path
xp_config.checkpointing.checkpoint_dir = temp_dir
# Training step
train.train(
lewis_experiment.LewisExperiment,
config,
checkpointer,
writer)
# Evaluation step
eval_utils.evaluate_final(config=config,
mode='eval_test_average',
rng=jax.random.PRNGKey(42))
if __name__ == '__main__':
absltest.main()
| emergent_communication_at_scale-main | lewis_experiment_test.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implement another experiment to compute ease of learning of agents."""
from typing import List, Optional
from absl import logging
import chex
import jax
from jaxline import utils
from ml_collections import config_dict
import numpy as np
import optax
from emergent_communication_at_scale import lewis_experiment
from emergent_communication_at_scale import types
from emergent_communication_at_scale.utils import checkpointer as ckpt_lib
class EaseOfLearningExperiment(lewis_experiment.LewisExperiment):
"""Ease of learning experiment.
The ease of learning is defined as how fast a new listener acquires
an emergent language (the speaker is fixed).
"""
def __init__(
self,
mode: str,
init_rng: chex.PRNGKey,
config: config_dict.ConfigDict,
) -> None:
"""Initializes experiment."""
if mode != 'train':
raise ValueError(
f'Ease of learning experiment only supports train mode: {mode}.')
# Step 1: Loads ckpt and the related config.
# Retrieve speaker params and config fo perform Ease of learning from a
# given lewis configuration path
path = config.speaker_path.path
if not path:
raise ValueError(f'{path} does not exist to retrieve checkpoint.')
exp_state, lewis_cfg, _, _ = ckpt_lib.load_checkpoint(path) # pytype: disable=attribute-error
# Complete the eol configuration with lewis config option.
config = config.unlock()
config['speaker'] = lewis_cfg.speaker
# Add dummy values that are required to start LewisExperiment
config.training['target_update_ema'] = lewis_cfg.training.target_update_ema
config.evaluation['batch_size'] = lewis_cfg.evaluation.batch_size
config = config.lock()
# Step 2: Creates the lewis experiment to perform ease of learning.
super().__init__(mode=mode, init_rng=init_rng, config=config)
# Overrides the speaker params with loaded ckpt.
ckpt_params, ckpt_states = exp_state.params, exp_state.states
speaker_params = ckpt_params['speaker'][config.speaker_path.speaker_index]
speaker_states = ckpt_states['speaker'][config.speaker_path.speaker_index]
self._population_storage.restore(
params=dict(speaker=[speaker_params]),
states=dict(speaker=[speaker_states]))
def speaker_optimizer(self) -> optax.GradientTransformation:
"""Speaker params must be fixed => set the learning rate to zero."""
return optax.sgd(learning_rate=0.0)
def train_loop(
self,
config: config_dict.ConfigDict,
state,
periodic_actions: List[utils.PeriodicAction],
writer: Optional[utils.Writer] = None,
) -> None:
"""Overrides `train_loop` to collect the 'accuracy' output scalar values."""
class CollectAccuracies:
"""A helper that collects 'accuracy' output scalar values."""
def __init__(self) -> None:
self.collector_accuracies = []
def update_time(self, t: float, step: int) -> None:
del t, step # Unused.
def __call__(
self,
t: float,
step: int,
scalar_outputs: types.Config,
) -> None:
del t, step # Unused.
self.collector_accuracies.append(scalar_outputs['global_accuracy'])
collector = CollectAccuracies()
# Weirdly type(periodic_actions) is tuple and not list!
super().train_loop(
config=config,
state=state,
periodic_actions=list(periodic_actions) + [collector],
writer=writer,
)
# Fetches from device and stack the accuracy numbers.
accuracies = np.array(jax.device_get(collector.collector_accuracies))
logging.info('Ease of learning accuracies per listener %s', accuracies)
| emergent_communication_at_scale-main | eol_experiment.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constructs typing of different Objects needed in CIDRE."""
import enum
from typing import Any, Dict, List, NamedTuple, Optional, Union
import chex
import haiku as hk
from ml_collections import config_dict
import optax
Config = Union[Dict[str, Any], config_dict.ConfigDict]
RNNState = chex.ArrayTree
AllParams = Dict[str, List[Optional[hk.Params]]]
AllStates = Dict[str, List[Optional[hk.State]]]
AllOptStates = Dict[str, List[Optional[optax.OptState]]]
class TrainingMode(enum.Enum):
TRAINING = 'training'
EVAL = 'eval'
FORCING = 'forcing'
class ImitationMode(enum.Enum):
BEST = 'best'
RANDOM = 'random'
WORST = 'worst'
class ResetMode:
PAIR = 'pair'
SPEAKER = 'speaker'
LISTENER = 'listener'
class Task:
CLASSIFICATION = 'classification'
REGRESSION = 'regression'
MULTICLASSIFICATION = 'multiclassification'
LANDMARK = 'landmark'
ATTRIBUTE = 'attributes'
DISCRIMINATION = 'discrimination'
IMAGES = 'images'
class MeaningSimilarity:
INPUTS = 'inputs'
ATTRIBUTES = 'attributes'
class RewardType:
SUCCESS_RATE = 'success_rate'
LOG_PROB = 'log_prob'
class CoreType:
LSTM = 'lstm'
GRU = 'gru'
IDENTITY = 'identity'
class TorsoType:
DISCRETE = 'discrete'
MLP = 'mlp'
IDENTITY = 'identity'
class ListenerHeadType:
MLP = 'mlp'
CPC = 'cpc'
MULTIMLP = 'multi_mlp'
class SpeakerHeadType:
POLICY = 'policy'
POLICY_QVALUE = 'policy_q_value'
POLICY_QVALUE_DUELING = 'policy_q_value_dueling'
class ListenerHeadOutputs(NamedTuple):
predictions: chex.ArrayTree
targets: Optional[chex.ArrayTree] = None
class SpeakerHeadOutputs(NamedTuple):
policy_logits: chex.Array
q_values: Optional[chex.Array] = None
value: Optional[chex.Array] = None
class DuelingHeadOutputs(NamedTuple):
q_values: chex.Array
value: chex.Array
class Params(NamedTuple):
speaker: hk.Params
listener: hk.Params
target_speaker: Optional[hk.Params]
class States(NamedTuple):
speaker: hk.State
listener: hk.State
target_speaker: Optional[hk.State]
class OptStates(NamedTuple):
speaker: optax.OptState
listener: optax.OptState
class AgentProperties(NamedTuple):
params: hk.Params
opt_states: optax.OptState
states: hk.State
target_params: Optional[hk.Params] = None
target_states: Optional[hk.State] = None
class AllProperties(NamedTuple):
params: AllParams
states: AllStates
opt_states: AllOptStates
class SpeakerOutputs(NamedTuple):
action: chex.Array
action_log_prob: chex.Array
entropy: chex.Array
policy_logits: chex.Array
q_values: Optional[chex.Array] = None
value: Optional[chex.Array] = None
class ListenerOutputs(NamedTuple):
predictions: chex.ArrayTree
targets: Optional[chex.ArrayTree] = None
class AgentLossOutputs(NamedTuple):
loss: chex.Array
probs: chex.Array
stats: Config
class ListenerLossOutputs(NamedTuple):
loss: chex.Array
accuracy: chex.Array
probs: chex.Array
stats: Config
reward: Optional[chex.Array] = None
class SpeakerLossOutputs(NamedTuple):
loss: chex.Array
stats: Config
class ListenerLossType:
CLASSIF = 'classif'
CPC = 'cpc'
class SpeakerLossType:
REINFORCE = 'reinforce'
POLICYGRADIENT = 'policy_gradient'
class GamesInputs(NamedTuple):
speaker_inp: chex.Array
labels: Optional[chex.ArrayTree] = None
misc: Dict[str, Any] = dict() # to store debug information
class Games(NamedTuple):
speaker_inp: chex.Array
labels: Optional[chex.ArrayTree] = None
speaker_outputs: Optional[SpeakerOutputs] = None
target_speaker_outputs: Optional[SpeakerOutputs] = None
listener_outputs: Optional[ListenerOutputs] = None
| emergent_communication_at_scale-main | types.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The actual logic of a agent."""
import functools
from typing import Tuple
import chex
import haiku as hk
import jax
from ml_collections import config_dict
from emergent_communication_at_scale import types
from emergent_communication_at_scale.losses import listeners as listener_losses
from emergent_communication_at_scale.losses import speakers as speaker_losses
from emergent_communication_at_scale.networks import listeners
from emergent_communication_at_scale.networks import speakers
class SpeakerListenerGame:
"""Plays a speaker/listener game with multi symbol."""
def __init__(self, config: config_dict.ConfigDict) -> None:
# Prepares constructor.
speaker = functools.partial(speakers.Speaker, **config.speaker)
listener = functools.partial(listeners.Listener, **config.listener)
# hk.transform requires lambda to be built a posteriori in a pmap
# pylint: disable=unnecessary-lambda
# pylint: disable=g-long-lambda
self._speaker = hk.transform_with_state(
lambda games, training_mode, actions_to_follow=None: speaker()(
games=games,
training_mode=training_mode,
actions_to_follow=actions_to_follow,
))
self._listener = hk.transform_with_state(
lambda games, training_mode: listener()(
games=games,
training_mode=training_mode,
))
# pylint: enable=unnecessary-lambda
# pylint: enable=g-long-lambda
if config.loss.get('speaker', False):
self._speaker_loss = speaker_losses.speaker_loss_factory(
**config.loss.speaker)
else:
# We do not have speaker loss in EOL
self._speaker_loss = None
self._listener_loss = listener_losses.listener_loss_factory(
**config.loss.listener)
self._config = config
@property
def speaker(self):
return self._speaker
@property
def listener_loss(self):
return self._listener_loss
def init(
self,
rng: chex.PRNGKey,
init_games: types.GamesInputs,
training_mode: types.TrainingMode,
) -> Tuple[types.Params, types.States]:
"""Returns speaker and listener params."""
games = types.Games(
speaker_inp=init_games.speaker_inp, labels=init_games.labels)
speaker_rng, target_speaker_rng, listener_rng = jax.random.split(rng, 3)
params_speaker, states_speaker = self._speaker.init(
speaker_rng,
games=games,
training_mode=training_mode,
)
speaker_outputs, _ = self._speaker.apply(
params_speaker,
states_speaker,
speaker_rng,
games=games,
training_mode=training_mode,
)
target_params_speaker, target_states_speaker = self._speaker.init(
target_speaker_rng,
games=games,
training_mode=types.TrainingMode.FORCING,
actions_to_follow=speaker_outputs.action,
)
games = games._replace(speaker_outputs=speaker_outputs)
params_listener, state_listener = self._listener.init(
listener_rng,
games=games,
training_mode=training_mode,
)
joint_states = types.States(
speaker=states_speaker,
listener=state_listener,
target_speaker=target_states_speaker)
joint_params = types.Params(
speaker=params_speaker,
listener=params_listener,
target_speaker=target_params_speaker,
)
return joint_params, joint_states
def unroll(
self,
params: types.Params,
states: types.States,
rng: chex.PRNGKey,
games: types.GamesInputs,
training_mode: types.TrainingMode,
) -> types.Games:
"""Unrolls the game for the forward pass."""
# Prepares output.
speaker_rng, listener_rng, rng = jax.random.split(rng, 3)
games = types.Games(speaker_inp=games.speaker_inp, labels=games.labels)
# Step1 : Speaker play.
speaker_outputs, _ = self._speaker.apply(
params.speaker,
states.speaker,
speaker_rng,
games=games,
training_mode=training_mode,
)
target_speaker_outputs, _ = self._speaker.apply(
params.target_speaker,
states.target_speaker,
speaker_rng,
games=games,
training_mode=types.TrainingMode.FORCING,
actions_to_follow=speaker_outputs.action,
)
games = games._replace(
speaker_outputs=speaker_outputs,
target_speaker_outputs=target_speaker_outputs,
)
# Step 2 : Listener play.
listener_outputs, _ = self._listener.apply(
params.listener,
states.listener,
listener_rng,
games=games,
training_mode=training_mode,
)
games = games._replace(listener_outputs=listener_outputs)
return games
def compute_loss(
self,
games: types.Games,
rng: chex.PRNGKey,
) -> types.AgentLossOutputs:
"""Computes Listener and Speaker losses."""
# Computes listener loss and stats.
listener_loss_outputs = self._listener_loss.compute_listener_loss(
games=games,
rng=rng,
)
loss = listener_loss_outputs.loss
stats = listener_loss_outputs.stats
# Computes speaker loss and stats. (if necessary).
if self._speaker_loss is not None:
speaker_loss_outputs = self._speaker_loss.compute_speaker_loss(
games=games,
reward=listener_loss_outputs.reward,
)
loss += speaker_loss_outputs.loss
stats.update(speaker_loss_outputs.stats)
return types.AgentLossOutputs(
loss=loss,
probs=listener_loss_outputs.probs,
stats=stats,
)
| emergent_communication_at_scale-main | agent.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for emergent_communication_at_scale.ease_of_learning."""
from absl.testing import absltest
from jaxline import platform
from jaxline import train
from jaxline import utils
from emergent_communication_at_scale import eol_experiment
from emergent_communication_at_scale import lewis_experiment
from emergent_communication_at_scale.configs import ease_of_learning_config
from emergent_communication_at_scale.configs import lewis_config
class EaseOfLearningTest(absltest.TestCase):
def test_ease_learning(self):
# Performs Experiment training to build a checkpoint for ease of learning.
config = lewis_config.get_config('debug')
config.training_steps = 1
xp_config = config.experiment_kwargs.config
xp_config.game.name = 'dummy' # Use dummy dataset.
# Defines smaller architectures for quick testing.
config.length = 5
xp_config.speaker.core_config.core_kwargs.hidden_size = 8
xp_config.listener.core_config.core_kwargs.hidden_size = 8
xp_config.listener.head_config.head_kwargs.hidden_sizes = [8]
checkpointer = platform.create_checkpointer(config, 'train')
writer = platform.create_writer(config, 'train')
## Creates a `dummy` checkpoint for the ease of learning experiment.
temp_dir = self.create_tempdir().full_path
xp_config.checkpointing.checkpoint_dir = temp_dir
train.train(
lewis_experiment.LewisExperiment,
config,
checkpointer,
writer)
# Ease of learning test.
utils.GLOBAL_CHECKPOINT_DICT = {} # Overrides jaxline global chkpt dict.
config = ease_of_learning_config.get_config('debug')
config.training_steps = 1
xp_config = config.experiment_kwargs.config
xp_config.game.name = 'dummy'
# Defines smaller architectures for quick testing.
config.length = 5
xp_config.listener.core_config.core_kwargs.hidden_size = 8
xp_config.listener.head_config.head_kwargs.hidden_sizes = [8]
xp_config.speaker_path.path = f'{temp_dir}/agents.pkl'
train.train(eol_experiment.EaseOfLearningExperiment, config,
checkpointer, writer) # Uses same checkpointer and writer.
if __name__ == '__main__':
absltest.main()
| emergent_communication_at_scale-main | eol_experiment_test.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Emergent Communication jaxline experiment."""
from typing import List, Tuple
from absl import flags
from absl import logging
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from jaxline import utils
from ml_collections import config_dict
import numpy as np
import optax
from emergent_communication_at_scale import agent
from emergent_communication_at_scale import types
from emergent_communication_at_scale.game import game_factory
from emergent_communication_at_scale.trainers import communication_trainer
from emergent_communication_at_scale.trainers import imitation_trainer
from emergent_communication_at_scale.trainers import reset_trainer
from emergent_communication_at_scale.utils import experiment_with_checkpointing as jaxline_ckpt
from emergent_communication_at_scale.utils import language_measures
from emergent_communication_at_scale.utils import population_storage as ps
from emergent_communication_at_scale.utils import utils as emcom_utils
# This file should only include langame and jaxline dependencies!
FLAGS = flags.FLAGS
# A split helper that operates on a pmap-ed rng_key.
@jax.pmap
def _split_three_keys_pmap(key):
return tuple(jax.random.split(key, num=3))
class LewisExperiment(jaxline_ckpt.ExperimentWithCheckpointing):
"""Cidre experiment.
Note: we here inherit from ExperimentWithCheckpointing to abstract the ckpt
mechanism that is entangled in jaxline.
Beware that ExperimentWithCheckpointing inherits from
experiment.AbstractExperiment in jaxline.
"""
def __init__(
self,
mode: str,
init_rng: chex.PRNGKey,
config: config_dict.ConfigDict,
) -> None:
"""Initializes experiment."""
super().__init__(mode=mode, init_rng=init_rng, config=config)
self._mode = mode
self._init_rng = init_rng
# ConfigDict are not completely resolved when a reference is given inside
# a structure such as a list or a tuple.
self._config = emcom_utils.resolve_dictionary(config)
# By default, we do not use a population
pop_config = self._config.population
self._n_speakers = pop_config.get('n_speakers', 1)
self._n_listeners = pop_config.get('n_listeners', 1)
self._num_agents_per_step = pop_config.get('num_agents_per_step', 1)
# Prepares games.
self._game = agent.SpeakerListenerGame(config=self._config)
self._game_builder = game_factory.get(
config=self._config.game,
train_batch_size=self._config.training.batch_size,
eval_batch_size=self._config.evaluation.batch_size,
)
# Prepares parameters.
self._population_storage = ps.PopulationStorage(
n_speakers=self._n_speakers,
n_listeners=self._n_listeners,
)
# Train vs. Eval.
if self._mode == 'train':
# Constructs the input dataset that will be prefetched in background.
self._train_input = utils.py_prefetch(
lambda: self._game_builder.get_training_games(self._init_rng),
buffer_size=10,
)
### Lewis trainer
# Constructs the trainer that sample and update agents pairs.
self._communication_trainer = communication_trainer.BasicTrainer(
update_fn=self._update_fn,
n_speakers=self._n_speakers,
n_listeners=self._n_listeners,
num_agents_per_step=self._num_agents_per_step)
### Imitation trainer.
if self._config.imitation and self._config.imitation.imitation_step:
# Checks config values.
if not self._config.imitation.self_imitation and self._n_speakers < 2:
raise ValueError('Invalid imitation config: n_speaker must be larger.'
' than one.')
if self._config.imitation.self_imitation and self._n_speakers != 1:
raise ValueError('Invalid imitation config: n_speaker must be equal'
' to one for self-imitation.')
# Cases where we perform imitation training.
logging.info('Training option: apply imitation.')
self._imitation_trainer = imitation_trainer.ImitateTrainer(
n_speakers=self._n_speakers,
imitation_update_fn=self._imitation_update_fn,
)
else:
# Cases where we do not perform imitation training.
logging.info('Training option: Do not apply imitation.')
self._imitation_trainer = None
### Resets trainer.
if config.reset and self._config.reset.reset_step:
logging.info('Training option: apply resetting.')
self._reset_trainer = reset_trainer.ResetTrainer(
n_speakers=self._n_speakers,
n_listeners=self._n_listeners,
)
else:
# Cases where we do not perform resetting.
logging.info('Training option: Do not apply resetting.')
self._reset_trainer = None
# Initializes network/optim param/states.
games = next(self._game_builder.get_training_games(init_rng))
self._population_storage.initialize(
rng_key=init_rng,
games=games,
game_init_fn=self._game.init,
opt_speaker_init_fn=self.speaker_optimizer().init,
opt_listener_init_fn=self.listener_optimizer().init,
)
else:
self._eval_batch = jax.jit(self._eval_batch)
self._communication_trainer = None
self._imitation_trainer = None
self._reset_trainer = None
def listener_optimizer(self) -> optax.GradientTransformation:
return self.create_optimizer(self._config.listener_optimizer)
def speaker_optimizer(self) -> optax.GradientTransformation:
return self.create_optimizer(self._config.speaker_optimizer)
def create_optimizer(
self,
config: config_dict.ConfigDict,
) -> optax.GradientTransformation:
name = config.name
kwargs = config.kwargs.get(name, dict())
optimizer = getattr(optax, name)
return optimizer(config.learning_rate, **kwargs)
# _ _
# | |_ _ __ __ _(_)_ __
# | __| '__/ _` | | '_ \
# | |_| | | (_| | | | | |
# \__|_| \__,_|_|_| |_|
#
def step(
self,
global_step: chex.ArrayNumpy,
rng: chex.PRNGKey,
**unused_args,
) -> types.Config:
"""A single training step."""
games = next(self._train_input)
rng_communicate, rng_imitate, rng_reset = _split_three_keys_pmap(rng)
# Performs one step of population training.
# Population trainer sample agents pair before `_update_func` per pair.
scalars, self._population_storage = self._communication_trainer.communicate(
rng=rng_communicate,
games=games,
agent_storage=self._population_storage,
)
global_step = utils.get_first(global_step)
# Imitation learning every imitation_step steps.
if (self._imitation_trainer and global_step > 0 and
global_step % self._config.imitation.imitation_step == 0):
imit_scalar, self._population_storage = self._imitation_trainer.imitate(
rng=rng_imitate,
games=games,
agent_storage=self._population_storage,
**self._config.imitation,
)
scalars.update(imit_scalar)
# Reset step.
if (self._reset_trainer and global_step > 0 and
global_step % self._config.reset.reset_step == 0):
self._population_storage = self._reset_trainer.reset(
rng=rng_reset,
games=games,
agent_storage=self._population_storage,
game_init_fn=self._game.init,
opt_speaker_init_fn=self.speaker_optimizer().init,
opt_listener_init_fn=self.listener_optimizer().init,
reset_type=self._config.reset.reset_type,
)
# Returns the scalar of the last random pair.
return scalars
def _update_fn(
self,
params: types.Params,
states: types.States,
opt_states: types.OptStates,
games: types.GamesInputs,
rng: chex.PRNGKey,
training_mode: types.TrainingMode,
is_sharded_update: bool = True,
) -> Tuple[types.Params, types.States, types.OptStates, types.Config]:
"""Applies an update to parameters and returns new state.
Args:
params: The current (speaker, listener) params to update.
states: The current (speaker, listener) states.
opt_states: The current optimizer state for speaker and listener.
games: The input batch of games to learn on.
rng: The random key.
training_mode: defines the training_mode (TRAIN=sampling, EVAL=greedy).
is_sharded_update: If set, the code assumes it's running within the
context of a pmap, and thus would use jax.lax.pxxx functions to average
gradients or measurementes across chips/shards.
Returns:
new_params: The updated params.
new_states: The updated state.
new_opt_states: The updated optimizer state.
scalars: A dict of scalar measurements to log.
"""
grad_loss_fn = jax.grad(self._loss_fn, has_aux=True)
grads, agent_loss_stats = grad_loss_fn(
params,
states=states,
games=games,
rng=rng,
training_mode=training_mode,
)
if is_sharded_update:
# grad_loss_fn outputs the grads divided by the number of devices
# (jax.device_count()). We apply the psum to get the mean across devices.
grads = jax.lax.psum(grads, axis_name='i')
# Computes and applies updates via our optimizer.
_, speaker_opt_update = self.speaker_optimizer()
_, listener_opt_update = self.listener_optimizer()
speaker_updates, new_opt_state_speaker = speaker_opt_update(
grads.speaker, opt_states.speaker)
new_params_speaker = optax.apply_updates(params.speaker, speaker_updates)
listener_updates, new_opt_state_listener = listener_opt_update(
grads.listener, opt_states.listener)
new_params_listener = optax.apply_updates(params.listener, listener_updates)
new_target_params = emcom_utils.update_target_params(
rl_params=new_params_speaker,
target_rl_params=params.target_speaker,
target_network_update_ema=self._config.training.target_update_ema,
)
new_params = types.Params(
speaker=new_params_speaker,
listener=new_params_listener,
target_speaker=new_target_params)
new_opt_states = types.OptStates(
speaker=new_opt_state_speaker, listener=new_opt_state_listener)
# Scalars to log (note: we log the mean across all hosts/devices).
scalars = jax.tree_map(lambda x: x / games.speaker_inp.shape[0],
agent_loss_stats)
if is_sharded_update:
scalars = jax.lax.pmean(scalars, axis_name='i')
### Stores the score of the individual speakers inside the state
# Retrieves speaker states.
speaker_state = states.speaker
counter = speaker_state['speaker']['counter']
avg_score = speaker_state['speaker']['avg_score']
# Updates speaker by computing the average score.
mutable_state = hk.data_structures.to_mutable_dict(speaker_state)
mutable_state['speaker']['avg_score'] = (counter * avg_score) / (
counter + 1) + scalars['global_accuracy'] / (
counter + 1)
mutable_state['speaker']['counter'] += 1
speaker_state = hk.data_structures.to_haiku_dict(mutable_state)
# Updates states across devices.
speaker_state = jax.lax.pmean(speaker_state, axis_name='i')
new_states = states._replace(speaker=speaker_state)
return new_params, new_states, new_opt_states, scalars
def _loss_fn(
self,
params: types.Params,
states: types.States,
games: types.GamesInputs,
rng: chex.PRNGKey,
training_mode: types.TrainingMode,
):
rng_unroll, rng_loss = jax.random.split(rng)
games = self._game.unroll(
params,
states,
rng=rng_unroll,
games=games,
training_mode=training_mode,
)
agent_loss_outputs = self._game.compute_loss(games=games, rng=rng_loss)
avg_fn = lambda x: x / games.speaker_inp.shape[0]
scaled_loss = avg_fn(agent_loss_outputs.loss) / jax.device_count()
return scaled_loss, agent_loss_outputs.stats
def _imitation_update_fn(
self,
games: types.GamesInputs,
params_student: hk.Params,
params_oracle: hk.Params,
state_student: hk.State,
state_oracle: hk.State,
opt_state: optax.OptState,
rng: chex.PRNGKey,
):
# Gets labels (output of the oracle).
games = types.Games(speaker_inp=games.speaker_inp, labels=games.labels)
# rng not used as training_mode=EVAL.
oracle_outputs, _ = self._game.speaker.apply(
params_oracle,
state_oracle,
rng,
games=games,
training_mode=types.TrainingMode.EVAL,
)
# Computes gradient.
grad_supervised_loss_fn = jax.grad(self._supervised_loss_fn, has_aux=True)
scaled_grads, loss = grad_supervised_loss_fn(
params_student,
state_student,
labels=jax.lax.stop_gradient(oracle_outputs.action),
games=games,
rng=rng)
grads = jax.lax.psum(scaled_grads, axis_name='i')
# Computes and applies updates via our optimizer.
_, speaker_opt_update = self.speaker_optimizer()
speaker_updates, new_opt_state_speaker = speaker_opt_update(
grads, opt_state)
new_params_speaker = optax.apply_updates(params_student, speaker_updates)
# Scalars to log (note: we log the mean across all hosts/devices).
scalars = loss / games.speaker_inp.shape[0]
scalars = jax.lax.pmean(scalars, axis_name='i')
return new_params_speaker, state_student, new_opt_state_speaker, scalars
def _supervised_loss_fn(
self,
params_student: hk.Params,
state_student: hk.Params,
labels: chex.Array,
games: types.GamesInputs,
rng: chex.PRNGKey,
):
prediction_outputs, _ = self._game.speaker.apply(
params_student,
state_student,
rng,
games=games,
training_mode=types.TrainingMode.TRAINING,
)
logits = jnp.transpose(prediction_outputs.policy_logits, [0, 2, 1])
labels = jax.nn.one_hot(
labels, self._config.speaker.vocab_size, dtype=logits.dtype)
# [B, T]
loss = emcom_utils.softmax_cross_entropy(logits, labels)
# Average on T and sum on B
loss = jnp.sum(jnp.mean(loss, axis=-1), axis=0)
avg_fn = lambda x: x / logits.shape[0]
scaled_loss = avg_fn(loss) / jax.device_count()
return scaled_loss, loss
# _
# _____ ____ _| |
# / _ \ \ / / _` | |
# | __/\ V / (_| | |
# \___| \_/ \__,_|_|
#
def evaluate(
self,
global_step: chex.ArrayNumpy,
rng: chex.PRNGKey,
**unused_kwargs,
) -> types.Config:
"""See base class."""
# Gives a mode equal to either test or valid.
# Gives a ensemble_type in [vote, average].
_, mode, ensemble_type = self._mode.split('_')
# Computes metrics over the evaluation games.
game_scalars, messages = self._eval_over_games(mode, ensemble_type, rng)
# Computes metrics by iterating over concepts.
# It is only computed over speaker message, independently of listener.
message_scalars = self._eval_over_messages(messages)
# Fuses and formats scalars.
scalars = {**game_scalars, **message_scalars}
scalars = jax.device_get(scalars)
logging.info('Eval [Step %d] %s', global_step, scalars)
return scalars
def _eval_over_games(
self,
mode: str,
ensemble_type: str,
rng: chex.PRNGKey,
) -> Tuple[types.Config, List[chex.Array]]:
# Eval at most the self._config.evaluation.max_n_agents first agents.
n_speakers = np.min(
[self._n_speakers, self._config.evaluation.max_n_agents])
n_listeners = np.min(
[self._n_listeners, self._config.evaluation.max_n_agents])
# Initializes values.
num_games, sum_scalars = 0, None
topographic_similarity = []
messages = [[] for _ in range(n_speakers)]
# Prepares subsampling.
subsampling_ratio = self._config.evaluation.subsampling_ratio
assert 0.01 <= subsampling_ratio <= 1
for samples in self._game_builder.get_evaluation_games(mode):
for speaker_id in range(n_speakers):
all_agents_outputs = []
for listener_id in range(n_listeners):
# Retrieves params.
params, states, _ = self._population_storage.load_pair(
speaker_id=speaker_id, listener_id=listener_id)
params = utils.get_first(params) # eval is on single device only
states = utils.get_first(states) # eval is on single device only
# Play game.
# rng is not used at eval time.
agent_outputs, games = self._eval_batch(
params=params, states=states, games=samples, rng=rng)
all_agents_outputs.append(agent_outputs)
# Computes scalar by averaging all listeners.
ensemble_scalars = self._eval_all_listeners(
ensemble_type=ensemble_type,
predictions=all_agents_outputs,
games=games,
)
# Saves ensemble stats and stats for the last listener (one pair).
scalars = {**ensemble_scalars, **agent_outputs.stats}
# Updates counters.
num_games += games.speaker_inp.shape[0]
# Accumulates the sum of scalars for each step.
if sum_scalars is None:
sum_scalars = scalars
else:
sum_scalars = jax.tree_multimap(jnp.add, scalars, sum_scalars)
# Computes message statistics. As it is independent of the listener,
# we here arbitrary take the last listener.
slices = max(3, int(games.speaker_inp.shape[0] * subsampling_ratio))
# Takes only the first slices examples.
slice_games = jax.tree_map(lambda x, y=slices: x[:y], games)
topographic_similarity += [
language_measures.games_topographic_similarity(
games=slice_games,
meaning_sim=self._config.evaluation.topsim_meaning_similarity,
task=self._config.evaluation.topsim_task,
)
]
# Stores message for end-game analysis.
messages[speaker_id].append(games.speaker_outputs.action)
# Averages per number of total games (both wrt batches and populations).
avg_scalars = jax.tree_map(lambda x: x / num_games, sum_scalars)
avg_scalars['topographic_similarity'] = np.mean(topographic_similarity)
# stacks messages into a single batch.
messages = [np.concatenate(m, axis=0) for m in messages]
return avg_scalars, messages
def _eval_all_listeners(
self,
ensemble_type: str,
predictions: List[types.AgentLossOutputs],
games: types.Games,
):
if ensemble_type == 'vote':
probs = [x.probs for x in predictions]
# Stacks leaves of probs, which can be a list of dictionaries for classif.
stacked_pred = jax.tree_multimap(lambda *vals: np.stack(vals, axis=-1),
*probs) # [B, F, listeners]
avg_prediction = jax.tree_map(lambda x: jnp.mean(x, axis=-1),
stacked_pred) # [B, F]
ensemble_pred = jax.tree_map(lambda x: jnp.argmax(x, axis=-1),
avg_prediction) # [B]
scalars = self._game.listener_loss.compute_ensemble_accuracy(
prediction=ensemble_pred, games=games)
elif ensemble_type == 'average':
accuracies = jnp.array([x.stats['global_accuracy'] for x in predictions])
scalars = dict(ensemble_acc=jnp.mean(accuracies))
else:
raise ValueError(f'Wrong ensemble type: {ensemble_type}.')
return scalars
def _eval_batch(
self,
params: types.Params,
states: types.States,
games: types.GamesInputs,
rng: chex.PRNGKey,
) -> Tuple[types.AgentLossOutputs, types.Games]:
finished_game = self._game.unroll(
params,
states,
rng=rng,
games=games,
training_mode=types.TrainingMode.EVAL,
)
agent_loss_outputs = self._game.compute_loss(games=finished_game,
rng=jax.random.PRNGKey(42))
return agent_loss_outputs, finished_game
def _eval_over_messages(self, messages: List[chex.Array]) -> types.Config:
# Computes edit distance between messages from different speakers.
edit_distance = []
message_per_games = np.stack(messages, axis=1) # [n_games, n_speaker, T]
for message in message_per_games:
# These messages are from the same game, and thus encode the same concept.
edit_dist = language_measures.edit_dist(message)
edit_dist = np.mean(edit_dist)
edit_distance.append(edit_dist)
return dict(edit_distance=np.mean(edit_distance))
| emergent_communication_at_scale-main | lewis_experiment.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Specifies which experiment to launch."""
from absl import app
from absl import flags
import jax
from jaxline import platform
from emergent_communication_at_scale import eol_experiment
from emergent_communication_at_scale import lewis_experiment
from emergent_communication_at_scale.utils import eval_utils
FLAGS = flags.FLAGS
def main(argv):
flags.mark_flag_as_required('config')
config = FLAGS.config
if config.experiment == 'lewis':
# Training
platform.main(lewis_experiment.LewisExperiment, argv)
# Evaluation
eval_utils.evaluate_final(config,
mode='eval_test_average',
rng=jax.random.PRNGKey(42)) # Deterministic eval
elif config.experiment == 'ease_of_learning':
platform.main(eol_experiment.EaseOfLearningExperiment, argv)
else:
raise ValueError(f'{config.experiment} not recognized. '
'Only lewis and ease_of_learning are supported')
if __name__ == '__main__':
app.run(main)
| emergent_communication_at_scale-main | main.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Listener losses."""
import abc
from typing import Any, Dict
import chex
import jax
import jax.numpy as jnp
from emergent_communication_at_scale import types
from emergent_communication_at_scale.utils import utils as emcom_utils
class ListenerLoss(abc.ABC):
"""Abstract class implementing the listener loss."""
def __init__(self, reward_type: types.RewardType) -> None:
self._reward_type = reward_type
@abc.abstractmethod
def compute_ensemble_accuracy(
self,
prediction: chex.Array,
games: types.Games,
) -> Dict[str, Any]:
pass
def compute_listener_loss(
self,
rng: chex.PRNGKey,
games: types.Games,
) -> types.ListenerLossOutputs:
"""Computes the Listener loss."""
# Computes the loss.
output = self._compute_listener_loss(rng=rng, games=games)
# Turns the loss into reward.
if self._reward_type == 'logprob':
reward = -output.loss # [B]
elif self._reward_type == 'success_rate':
reward = output.accuracy # [B]
else:
raise ValueError(f'Invalid reward reward type {self._reward_type}'
f'Should be one of [logprob, success_rate]')
# Makes the reward non-differentiable.
reward = jax.lax.stop_gradient(reward) # [B]
# Computes global loss and accuracies.
global_loss = jnp.sum(output.loss, axis=0)
global_accuracy = jnp.sum(output.accuracy, axis=0)
# Adds global metrics to stats.
stats = {
'listener_loss': global_loss,
'global_accuracy': global_accuracy,
**output.stats
}
return types.ListenerLossOutputs(
loss=global_loss,
probs=output.probs,
accuracy=global_accuracy,
reward=reward,
stats=stats,
)
@abc.abstractmethod
def _compute_listener_loss(
self,
rng: chex.PRNGKey,
games: types.Games,
) -> types.ListenerLossOutputs:
pass
def listener_loss_factory(
loss_type: types.ListenerLossType,
kwargs: types.Config,
**common_kwargs,
) -> ListenerLoss:
"""Factory to select the listener's loss."""
loss_specific_kwargs = kwargs.get(loss_type, dict())
all_kwargs = {**common_kwargs, **loss_specific_kwargs}
if loss_type == types.ListenerLossType.CLASSIF:
listener_loss = ClassificationListenerLoss(**all_kwargs)
elif loss_type == types.ListenerLossType.CPC:
listener_loss = CpcListenerLoss(**all_kwargs)
else:
raise ValueError(f'Incorrect listener loss type {loss_type}.')
return listener_loss
class ClassificationListenerLoss(ListenerLoss):
"""Class implementing the classification loss."""
def __init__(
self,
reward_type: types.RewardType,
task: types.Task,
) -> None:
super().__init__(reward_type=reward_type)
self._task = task
def compute_ensemble_accuracy(
self,
prediction: chex.ArrayTree,
games: types.Games,
) -> types.Config:
"""Compute accuracy given a prediction."""
assert self._task in games.labels
labels = games.labels[self._task] # {str: [B, F]}
# Iterates over the attribute to compute an accuracy per attribute.
accuracy_per_attr = jax.tree_map(lambda x, y: x == jnp.argmax(y, axis=-1),
prediction, labels) # {str: [B]}
accuracy = jnp.stack(jax.tree_leaves(accuracy_per_attr)) # [|{}|, B]
accuracy = jnp.mean(accuracy, axis=0) # [B]
return dict(ensemble_acc=jnp.sum(accuracy, axis=0))
def _compute_listener_loss(
self,
rng: chex.PRNGKey,
games: types.Games,
) -> types.ListenerLossOutputs:
"""Computes the Listener loss."""
del rng # Deterministic loss
predictions = games.listener_outputs.predictions # {str: [B, F]}
assert self._task in games.labels
labels = games.labels[self._task] # {str: [B, F]}
# Iterates over the attribute to compute an accuracy per attribute.
accuracy_per_attr = jax.tree_map(
lambda x, y: jnp.argmax(x, axis=-1) == jnp.argmax(y, axis=-1),
predictions, labels) # {str: [B]}
global_accuracy = jnp.stack(
jax.tree_leaves(accuracy_per_attr), axis=0) # [|{}|, B]
global_accuracy = jnp.mean(global_accuracy, axis=0) # [B]
listener_probs = jax.tree_map(jax.nn.softmax, predictions)
listener_loss = jax.tree_map(emcom_utils.softmax_cross_entropy, predictions,
labels) # {str: [B]}
listener_loss = jnp.stack(
jax.tree_leaves(listener_loss), axis=0) # [|{}|, B]
listener_loss = jnp.mean(listener_loss, axis=0) # [B]
# Sums over the batch size.
accuracy_per_attr = jax.tree_map(jnp.sum, accuracy_per_attr)
stats = {f'accuracy_{k}': v for k, v in accuracy_per_attr.items()}
return types.ListenerLossOutputs(
loss=listener_loss,
probs=listener_probs,
accuracy=global_accuracy,
stats=stats,
)
class CpcListenerLoss(ListenerLoss):
"""Class implementing the CPC loss."""
def __init__(
self,
reward_type: types.RewardType,
num_distractors: int,
cross_device: bool,
) -> None:
super().__init__(reward_type=reward_type)
self._num_distractors = num_distractors
self._cross_device = cross_device
def compute_ensemble_accuracy(self, prediction, games):
"""Computes accuracy given a prediction."""
del games
effective_batchsize = prediction.shape[0]
num_distractors = self._num_distractors
if num_distractors >= (effective_batchsize - 1):
num_distractors = -1
if num_distractors == -1:
accuracy = (prediction == jnp.arange(effective_batchsize))
else:
accuracy = (prediction == 0)
# Transforms accuracy from bool to integer.
accuracy = accuracy * 1
return dict(ensemble_acc=jnp.sum(accuracy, axis=0))
def _compute_listener_loss(
self,
rng: chex.PRNGKey,
games: types.Games,
) -> types.ListenerLossOutputs:
"""Computes CPC loss."""
effective_batchsize, feature_dim = games.listener_outputs.targets.shape
# Warning: at evaluation time, batch size is small.
# Use all the batch as distractors at eval time.
if self._num_distractors >= (effective_batchsize - 1):
self._num_distractors = -1
if self._num_distractors == -1:
# Computes CPC on the full batch.
predictions = games.listener_outputs.predictions
targets = games.listener_outputs.targets
batch_indices = jnp.arange(effective_batchsize)
# If we are on multiple devices we have to gather targets from other
# devices and offset batch indices by the device id.
# We do not pmap the init to gain compilation time so we do not gather
# across devices at init.
if jax.device_count() > 1 and self._cross_device:
targets = jax.lax.all_gather(
targets, axis_name='i') # Num_devices, B, F
targets = targets.reshape(-1, feature_dim) # Large_Batch, F
global_batch_indices = batch_indices + jax.lax.axis_index(
'i') * effective_batchsize
else:
global_batch_indices = batch_indices
cosine_sim = -emcom_utils.cosine_loss(predictions[:, None, :],
targets[None, :, :])
listener_probs = jax.nn.softmax(cosine_sim, axis=-1)
listener_loss = -jax.nn.log_softmax(
cosine_sim, axis=-1)[batch_indices, global_batch_indices]
accuracy = (jnp.argmax(cosine_sim, axis=-1) == global_batch_indices)
else:
# Computes CPC on a predefined numbner of distractors.
batch_distractors = []
for i in range(effective_batchsize):
key_rng, rng = jax.random.split(rng)
potential_distractors_idx = list(range(effective_batchsize))
potential_distractors_idx.remove(i)
distractor_idx = jax.random.choice(
key_rng,
jnp.array(potential_distractors_idx),
shape=[self._num_distractors],
replace=False)
distractors = jnp.take(
games.listener_outputs.targets, distractor_idx, axis=0)
target = games.listener_outputs.targets[i:(i + 1)]
batch_distractors.append(jnp.concatenate([target, distractors], axis=0))
targets = jnp.stack(batch_distractors, axis=0)
cosine_sim = -emcom_utils.cosine_loss(
games.listener_outputs.predictions[:, None, :], targets)
listener_probs = jax.nn.softmax(cosine_sim, axis=-1)
# By construction the target is in position 0.
listener_loss = -jax.nn.log_softmax(cosine_sim, axis=-1)[:, 0]
accuracy = (jnp.argmax(cosine_sim, axis=-1) == 0)
# Transforms accuracy from bool to integer.
accuracy = accuracy * 1
return types.ListenerLossOutputs(
loss=listener_loss,
probs=listener_probs,
accuracy=accuracy,
stats=dict(),
)
| emergent_communication_at_scale-main | losses/listeners.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Speaker losses."""
import abc
import chex
import jax
import jax.numpy as jnp
import rlax
from emergent_communication_at_scale import types
class SpeakerLoss(abc.ABC):
"""Abstract class implementing speaker losses."""
@abc.abstractmethod
def compute_speaker_loss(
self,
games: types.Games,
reward: chex.Array,
) -> types.SpeakerLossOutputs:
pass
def speaker_loss_factory(
loss_type: types.SpeakerLossType,
kwargs: types.Config,
**common_kwargs,
) -> SpeakerLoss:
"""Speaker loss factory."""
loss_specific_kwargs = kwargs.get(loss_type, dict())
all_kwargs = {**common_kwargs, **loss_specific_kwargs}
if loss_type == types.SpeakerLossType.REINFORCE:
speaker_loss = ReinforceSpeakerLoss(**all_kwargs)
elif loss_type == types.SpeakerLossType.POLICYGRADIENT:
speaker_loss = PolicyGradientSpeakerLoss(**all_kwargs)
else:
raise ValueError(f'Incorrect speaker loss type {loss_type}.')
return speaker_loss
class ReinforceSpeakerLoss(SpeakerLoss):
"""Class implementing the reinforce loss for the speaker."""
def __init__(
self,
speaker_entropy: float,
speaker_kl_target: float,
use_baseline: bool = False,
) -> None:
self._speaker_entropy = speaker_entropy
self._speaker_kl_target = speaker_kl_target
self._use_baseline = use_baseline
def compute_speaker_loss(
self,
games: types.Games,
reward: chex.Array,
) -> types.SpeakerLossOutputs:
"""Computes the reinforce loss."""
if self._use_baseline:
assert games.speaker_outputs.value is not None
# Transforms to [T, B, F] receives [B, F, T].
value = jnp.transpose(games.speaker_outputs.value, [2, 0, 1])
value = jnp.squeeze(value, axis=-1)
else:
value = 0.0
# Transforms to [T, B] receives [B, T].
action_log_prob = jnp.transpose(games.speaker_outputs.action_log_prob,
[1, 0])
entropy = jnp.transpose(games.speaker_outputs.entropy, [1, 0])
# Policy loss via Reinforce.
sg_value = jax.lax.stop_gradient(value)
policy_loss = -jnp.mean((reward - sg_value) * action_log_prob, axis=0)
policy_loss = jnp.sum(policy_loss, axis=0)
entropy = jnp.sum(jnp.mean(entropy, axis=0), axis=0)
entropy_loss = -entropy * self._speaker_entropy
if self._use_baseline:
value_loss = jnp.mean(jnp.square(reward - value), axis=0)
value_loss = jnp.sum(value_loss, axis=0)
value_stats = jnp.sum(jnp.mean(value, axis=0), axis=0)
else:
value_loss = 0.0
value_stats = 0.0
# Transforms to [T, B, F] receives [B, F, T].
speaker_policy_logits = jnp.transpose(games.speaker_outputs.policy_logits,
[2, 0, 1])
target_speaker_policy_logits = jnp.transpose(
games.target_speaker_outputs.policy_logits, [2, 0, 1])
kl_target_loss = rlax.softmax().kl(
speaker_policy_logits,
target_speaker_policy_logits) * self._speaker_kl_target
kl_target_loss = jnp.sum(jnp.mean(kl_target_loss, axis=0), axis=0)
speaker_loss = policy_loss + entropy_loss + value_loss + kl_target_loss
stats = dict(
value=value_stats,
value_loss=value_loss,
speaker_loss=speaker_loss,
policy_loss=policy_loss,
entropy_loss=entropy_loss,
kl_target_loss=kl_target_loss,
speaker_entropy=entropy,
)
return types.SpeakerLossOutputs(loss=speaker_loss, stats=stats)
class PolicyGradientSpeakerLoss(SpeakerLoss):
"""Class implementing the policy loss for the speaker."""
def __init__(
self,
speaker_entropy: float,
use_baseline: bool = False,
) -> None:
self._speaker_entropy = speaker_entropy
self._use_baseline = use_baseline
def compute_speaker_loss(
self,
games: types.Games,
reward: chex.Array,
) -> types.SpeakerLossOutputs:
"""Computes the policy gradient loss."""
# Policy loss via policy gradient.
# Transforms to [T, B, F] receives [B, F, T].
assert games.speaker_outputs.q_values is not None
q_values = jnp.transpose(games.speaker_outputs.q_values, [2, 0, 1])
if self._use_baseline:
assert games.speaker_outputs.value is not None
value = jnp.transpose(games.speaker_outputs.value, [2, 0, 1])
value = jnp.squeeze(value, axis=-1)
else:
value = 0.0
# Transforms to [T, B] receives [B, T].
action = jnp.transpose(games.speaker_outputs.action, [1, 0])
entropy = jnp.transpose(games.speaker_outputs.entropy, [1, 0])
action_log_prob = jnp.transpose(games.speaker_outputs.action_log_prob,
[1, 0])
q_value_chosen = rlax.batched_index(q_values, action)
sg_q_value_chosen = jax.lax.stop_gradient(q_value_chosen)
sg_value = jax.lax.stop_gradient(value)
policy_loss = -jnp.mean(
(sg_q_value_chosen - sg_value) * action_log_prob, axis=0)
policy_loss = jnp.sum(policy_loss, axis=0)
entropy_loss = -jnp.mean(entropy, axis=0) * self._speaker_entropy
entropy_loss = jnp.sum(entropy_loss, axis=0)
action_value_loss = jnp.mean(jnp.square(reward - q_value_chosen), axis=0)
action_value_loss = jnp.sum(action_value_loss, axis=0)
if self._use_baseline:
value_loss = jnp.mean(jnp.square(reward - value), axis=0)
value_loss = jnp.sum(value_loss, axis=0)
value_stats = jnp.sum(jnp.mean(value, axis=0), axis=0)
else:
value_loss = 0.0
value_stats = 0.0
speaker_loss = policy_loss + entropy_loss + action_value_loss + value_loss
stats = dict(
q_value_chosen=jnp.sum(jnp.mean(q_value_chosen, axis=0), axis=0),
value=value_stats,
speaker_loss=speaker_loss,
action_value_loss=action_value_loss,
value_loss=value_loss,
policy_loss=policy_loss,
entropy_loss=entropy_loss,
)
return types.SpeakerLossOutputs(loss=speaker_loss, stats=stats)
class DummySpeakerLoss(SpeakerLoss):
"""Class implementing the policy loss for the speaker."""
def compute_speaker_loss(
self,
games: types.Games,
reward: chex.Array,
) -> types.SpeakerLossOutputs:
"""Computes the policy gradient loss."""
del games, reward
return types.SpeakerLossOutputs(loss=0., stats={})
| emergent_communication_at_scale-main | losses/speakers.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementes different measures for the language (e.g., topographic similarity).
"""
import itertools
import editdistance
import jax
import jax.numpy as jnp
import numpy as np
from scipy import stats
from emergent_communication_at_scale import types
from emergent_communication_at_scale.utils import utils as emcom_utils
def edit_dist(alist):
"""Computes edit distance of strings.
Args:
alist: a sequence of symbols (integers).
Returns:
a list of normalized levenshtein distance values for each pair of elements.
"""
distances = []
for i, el1 in enumerate(alist[:-1]):
for _, el2 in enumerate(alist[i+1:]):
# Normalized edit distance (same in our case as length is fixed)
distances.append(editdistance.eval(el1.tolist(), el2.tolist()) / len(el1))
return distances
def vmap_cosine_dist(alist):
"""Applies jnp_distancecosine on all possible distinct combinations of alist."""
all_combinations = jax.tree_map(
lambda x: jnp.array(list(itertools.combinations(x, 2))), alist)
distances = jax.tree_map(jax.vmap(jnp_distancecosine), all_combinations)
if isinstance(distances, dict):
# Average the distances across attributes.
distances = np.mean([*distances.values()], 0)
return distances
@jax.jit
def jnp_distancecosine(vectors):
"""Computes cosine similarity between (u,v) = vectors."""
u, v = vectors
dist = emcom_utils.cosine_loss(u, v)
return dist
def topographic_similarity(messages, meanings):
"""Computes topographic similarity."""
if isinstance(meanings, dict):
ashape = [*meanings.values()][0].shape[0]
else:
ashape = meanings.shape[0]
assert messages.shape[0] == ashape
distance_messages = edit_dist(messages)
distance_inputs = list(vmap_cosine_dist(meanings))
corr = stats.spearmanr(distance_messages, distance_inputs).correlation
return corr
def games_topographic_similarity(games,
meaning_sim,
task=types.Task.CLASSIFICATION):
"""Factory to get the right meaning space."""
messages = games.speaker_outputs.action
if meaning_sim == types.MeaningSimilarity.INPUTS:
meanings = games.speaker_inp
elif meaning_sim == types.MeaningSimilarity.ATTRIBUTES:
assert task in games.labels
meanings = games.labels[task]
if task == types.Task.MULTICLASSIFICATION:
meanings = jax.tree_map(lambda x: x.reshape(x.shape[0], -1), meanings)
else:
raise ValueError(f'Wrong topsim_meaning_similarity value: {meaning_sim}')
return topographic_similarity(messages=messages, meanings=meanings)
if __name__ == '__main__':
all_meanings = jnp.array([[1., 0., 0., 0., 1., 0., 0., 0.],
[1., 0., 0., 0., 0., 1., 0., 0.],
[1., 0., 0., 0., 0., 0., 1., 0.],
[1., 0., 0., 0., 0., 0., 0., 1.],
[0., 1., 0., 0., 1., 0., 0., 0.],
[0., 1., 0., 0., 0., 1., 0., 0.],
[0., 1., 0., 0., 0., 0., 1., 0.],
[0., 1., 0., 0., 0., 0., 0., 1.],
[0., 0., 1., 0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0., 1., 0., 0.],
[0., 0., 1., 0., 0., 0., 1., 0.],
[0., 0., 1., 0., 0., 0., 0., 1.],
[0., 0., 0., 1., 1., 0., 0., 0.],
[0., 0., 0., 1., 0., 1., 0., 0.],
[0., 0., 0., 1., 0., 0., 1., 0.],
[0., 0., 0., 1., 0., 0., 0., 1.]])
high_messages = jnp.array([[0, 0, 0],
[0, 0, 1],
[0, 0, 2],
[0, 0, 3],
[0, 1, 0],
[0, 1, 1],
[0, 1, 2],
[0, 1, 3],
[2, 0, 0],
[2, 0, 1],
[2, 0, 2],
[2, 0, 3],
[2, 1, 0],
[2, 1, 1],
[2, 1, 2],
[2, 1, 3]])
low_messages = jnp.array([[1, 2, 3],
[2, 2, 0],
[3, 0, 1],
[1, 3, 1],
[1, 0, 3],
[2, 0, 0],
[3, 1, 1],
[1, 0, 1],
[0, 2, 3],
[0, 2, 0],
[0, 0, 1],
[0, 3, 1],
[1, 2, 0],
[2, 2, 1],
[3, 0, 0],
[1, 3, 0]])
low_topsim = topographic_similarity(messages=low_messages,
meanings=all_meanings)
high_topsim = topographic_similarity(messages=high_messages,
meanings=all_meanings)
print(low_topsim, high_topsim)
| emergent_communication_at_scale-main | utils/language_measures.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class for checkpointing that fits jaxline pipeline."""
import time
from typing import List, Mapping, Optional, Tuple
import chex
import jax
import jax.numpy as jnp
from jaxline import experiment
from jaxline import utils
from ml_collections import config_dict
from emergent_communication_at_scale.utils import checkpointer as ckpt_api
from emergent_communication_at_scale.utils import population_storage as ps
class ExperimentWithCheckpointing(experiment.AbstractExperiment):
"""Helper to save/load ckpt during traning."""
def __init__(self,
mode: str,
init_rng: chex.PRNGKey,
config: config_dict.ConfigDict) -> None:
super().__init__(mode=mode, init_rng=init_rng)
self._checkpointer = ckpt_api.Checkpointer(**config.checkpointing)
self._training_steps = config.training.steps
self._population_storage = None # will be inited in LewisExperiment.
self._config = None # will be inited in LewisExperiment.
@property
def checkpoint_path(self) -> str:
return self._checkpointer.checkpoint_path
def snapshot_state(self) -> Mapping[str, chex.Array]:
"""Takes a frozen copy of the current experiment state for checkpointing."""
jaxline_snapshot = super().snapshot_state()
emcom_snaptshot = self._population_storage.snapshot()
return {
**jaxline_snapshot,
'params': utils.get_first(emcom_snaptshot.params),
'states': utils.get_first(emcom_snaptshot.states),
'opt_states': utils.get_first(emcom_snaptshot.opt_states),
}
def restore_from_snapshot(self,
snapshot_state: Mapping[str, chex.Array]) -> None:
"""Restores experiment state from a snapshot."""
super().restore_from_snapshot(snapshot_state)
self._population_storage.restore(
params=utils.bcast_local_devices(snapshot_state['params']),
states=utils.bcast_local_devices(snapshot_state['states']),
opt_states=utils.bcast_local_devices(snapshot_state['opt_states']))
def save_checkpoint(self, step: int, rng: jnp.ndarray) -> None:
self._checkpointer.maybe_save_checkpoint(
self._population_storage.snapshot(),
config=self._config,
step=step,
rng=rng,
is_final=step >= self._training_steps)
def restore_state(self, restore_path: Optional[str] = None,
) -> Tuple[int, ps.PopulationStorage]:
"""Initializes experiment state from a checkpoint."""
if restore_path is None:
restore_path = self._checkpointer.checkpoint_path
# Load pretrained experiment state.
exp_state, _, step, _ = ckpt_api.load_checkpoint(restore_path) # pytype: disable=attribute-error
self._population_storage.restore(params=exp_state.params,
states=exp_state.states,
opt_states=exp_state.opt_states)
return step, self._population_storage
def train_loop(self,
config: config_dict.ConfigDict,
state,
periodic_actions: List[utils.PeriodicAction],
writer: Optional[utils.Writer] = None) -> None:
"""Overrides the jaxline train_loop to add regular checkpointing."""
is_chief = jax.process_index() == 0
step = state.global_step
rng = state.train_step_rng
checkpoint_config = config.experiment_kwargs.config.checkpointing
if config.train_checkpoint_all_hosts or is_chief:
if checkpoint_config.save_checkpoint_interval > 0:
periodic_actions += (
utils.PeriodicAction(
lambda x, *_: self.save_checkpoint(step=x, rng=rng),
interval_type=(config.checkpoint_interval_type
or config.interval_type),
interval=checkpoint_config.save_checkpoint_interval,
run_async=False),) # run_async True would not be thread-safe.
for pa in periodic_actions:
pa.update_time(time.time(), step)
super().train_loop(config, state, periodic_actions, writer)
| emergent_communication_at_scale-main | utils/experiment_with_checkpointing.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic checkpointer to load and store data."""
import collections
import os
import pickle
import time
from typing import Optional, Text
from absl import logging
import jax
import jax.numpy as jnp
from jaxline import utils
from ml_collections import config_dict
from emergent_communication_at_scale import types
CkptData = collections.namedtuple("CkptData",
["experiment_state", "config", "step", "rng"])
def load_checkpoint(checkpoint_path: Text) -> Optional[CkptData]:
"""Loads a checkpoint if any is found."""
# Step 1: Load file
try:
with open(checkpoint_path, "rb") as checkpoint_file:
checkpoint_data = pickle.load(checkpoint_file)
logging.info("Loading checkpoint from %s, saved at step %d",
checkpoint_path, checkpoint_data["step"])
except FileNotFoundError:
logging.info("No existing checkpoint found at %s", checkpoint_path)
return None
# Retrieve experiment states (params, states etc.)
experiment_state = checkpoint_data["experiment_state"]
experiment_state = jax.tree_map(utils.bcast_local_devices,
experiment_state)
return CkptData(
experiment_state=experiment_state,
config=checkpoint_data["config"],
step=checkpoint_data["step"],
rng=checkpoint_data["rng"])
class Checkpointer:
"""A checkpoint saving and loading class."""
def __init__(
self,
use_checkpointing: bool,
checkpoint_dir: Text,
save_checkpoint_interval: int,
filename: Text):
if (not use_checkpointing or
checkpoint_dir is None or
save_checkpoint_interval <= 0):
self._checkpoint_enabled = False
return
self._checkpoint_enabled = True
self._checkpoint_dir = checkpoint_dir
os.makedirs(self._checkpoint_dir, exist_ok=True)
self._filename = filename
self._checkpoint_path = os.path.join(self._checkpoint_dir, filename)
self._last_checkpoint_time = 0
self._checkpoint_every = save_checkpoint_interval
@property
def checkpoint_path(self) -> str:
return self._checkpoint_path
def maybe_save_checkpoint(
self,
xp_state: types.AllProperties,
config: config_dict.ConfigDict,
step: int,
rng: jnp.ndarray,
is_final: bool):
"""Saves a checkpoint if enough time has passed since the previous one."""
current_time = time.time()
# Checks whether we should perform checkpointing.
if (not self._checkpoint_enabled or
jax.host_id() != 0 or # Only checkpoint the first worker.
(not is_final and
current_time - self._last_checkpoint_time < self._checkpoint_every)):
return
# Creates data to checkpoint.
checkpoint_data = dict(
experiment_state=jax.tree_map(lambda x: jax.device_get(x[0]), xp_state),
config=config,
step=step,
rng=rng)
# Creates a rolling ckpt.
with open(self._checkpoint_path + "_tmp", "wb") as checkpoint_file:
pickle.dump(checkpoint_data, checkpoint_file, protocol=2)
try:
os.rename(self._checkpoint_path, self._checkpoint_path + "_old")
remove_old = True
except FileNotFoundError:
remove_old = False # No previous checkpoint to remove
os.rename(self._checkpoint_path + "_tmp", self._checkpoint_path)
if remove_old:
os.remove(self._checkpoint_path + "_old")
self._last_checkpoint_time = current_time
| emergent_communication_at_scale-main | utils/checkpointer.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for CIDRE experiments."""
import functools
from typing import Callable, Optional, Tuple
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from jaxline import utils
from ml_collections import config_dict
import numpy as np
import rlax
def _maybe_resolve(entry):
if isinstance(entry, config_dict.FieldReference):
return entry.get()
else:
return entry
def resolve_dictionary(
config: config_dict.ConfigDict,) -> config_dict.ConfigDict:
"""Resolve the 'get_value(...) / get_oneway_ref(...)' in the dictionnary."""
return config_dict.ConfigDict(jax.tree_map(_maybe_resolve, config.to_dict()))
def softmax_cross_entropy(
logits: jnp.ndarray,
labels: jnp.ndarray,
) -> jnp.ndarray:
"""Computes softmax cross entropy given logits and one-hot class labels.
Args:
logits: Logit output values.
labels: Ground truth one-hot-encoded labels.
Returns:
Loss value that has the same shape as `labels`.
"""
loss = -jnp.sum(labels * jax.nn.log_softmax(logits), axis=-1)
return loss
def l2_normalize(
x: chex.Array,
axis: Optional[int] = None,
epsilon: float = 1e-12,
) -> chex.Array:
"""l2 normalize a tensor on an axis with numerical stability."""
square_sum = jnp.sum(jnp.square(x), axis=axis, keepdims=True)
x_inv_norm = jax.lax.rsqrt(jnp.maximum(square_sum, epsilon))
return x * x_inv_norm
def cosine_loss(x: chex.Array, y: chex.Array) -> chex.Array:
"""CPC's regression loss. This is a simple cosine distance."""
normed_x, normed_y = l2_normalize(x, axis=-1), l2_normalize(y, axis=-1)
return jnp.sum((normed_x - normed_y)**2, axis=-1)
def update_target_params(
rl_params: hk.Params,
target_rl_params: hk.Params,
target_network_update_ema: float,
) -> hk.Params:
"""Maybe update target params."""
new_target_rl_params = rlax.incremental_update(
new_tensors=rl_params,
old_tensors=target_rl_params,
tau=1 - target_network_update_ema,
)
return new_target_rl_params
def global_to_local_pmap(data: chex.ArrayTree) -> chex.ArrayTree:
"""From a tensor for all/global devices get the slice for local devices.
Args:
data: arbitrarily nested structure of tensors, all having a leading
dimension of the size of all/global devices.
Returns:
The slice of each tensor that correspond to local devices.
"""
chex.assert_tree_shape_prefix(data, (jax.device_count(),))
start, end = _global_to_local_pmap_indexes()
return jax.tree_map(lambda t: t[start:end], data)
def _global_to_local_pmap_indexes() -> Tuple[int, int]:
"""An internal helper for global_to_local_pmap."""
start, end = 0, 0
for process_index in range(jax.process_count()):
process_devices = jax.local_devices(process_index=process_index)
end = start + len(process_devices)
if process_index == jax.process_index():
break
start = end
assert end > start
return start, end
def run_and_broadcast_to_all_devices(fn: Callable[[chex.PRNGKey],
chex.ArrayTree]):
"""Given a one-process sampling fn, wrap it for multi-process setup.
Sampling in a multi-process and multi TPU-chips context is not easy.
This helper takes a vanilla sampler that assumes exactly one process
is sampling for all other processes, and wraps it to run in a multi-process,
multi-TPU context, to get benefit of TPU communication to pass the sampling
results to all processes.
Args:
fn: The callable that does sampling in a non-parallel context (no pmap). It
expects a single (not batched, not pmaped) `rng_key`.
Returns:
A callable that correctly samples using pmap and distributes the results
across all TPU chips/shards/processes.
"""
# The trick is to only use one process if we are in a multi-process setup.
@functools.partial(jax.pmap, axis_name="s")
@jax.util.wraps(fn, namestr="sample({fun})")
def _pmap_run_fn(rng_key, is_current_chip_the_one: bool):
"""Runs `fn` on all chips, but uses only results where `onehot_mask`."""
samples = fn(rng_key)
# Apply the given mask, which should be a boolean scalar.
samples = jax.tree_map(lambda t: t * is_current_chip_the_one, samples)
# Transfer the samples from the main process to all other ones.
samples = jax.lax.psum(samples, axis_name="s")
return samples
# The sampling should only happen on exactly one TPU chip of exactly
# one process. Pass `the One` boolean to chips.
the_one_mask_global = (jnp.arange(0, jax.device_count()) == 0)
assert np.sum(the_one_mask_global).item() == 1
the_one_mask_pmap = global_to_local_pmap(the_one_mask_global)
@jax.util.wraps(fn)
def _result(rng_key):
rng_key_pmap = utils.bcast_local_devices(rng_key)
# The sampling returns pmap-ed data.
samples_pmap = _pmap_run_fn(rng_key_pmap, the_one_mask_pmap)
chex.assert_tree_shape_prefix(samples_pmap, (jax.local_device_count(),))
# All chips/devices will return exactly the same data. Thus pick one.
samples = jax.device_get(utils.get_first(samples_pmap))
return samples
return _result
| emergent_communication_at_scale-main | utils/utils.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implement another experiment to compute ease of learning of agents."""
from typing import Optional, Text
from absl import flags
from absl import logging
import chex
import jax
from ml_collections import config_dict
import numpy as np
from emergent_communication_at_scale import lewis_experiment
FLAGS = flags.FLAGS
def evaluate_final(config: Optional[config_dict.ConfigDict],
mode: Optional[Text],
rng: chex.PRNGKey):
"""The main evaluation loop.
This loop loads a checkpoint and evaluates its performance on the
test set, by calling experiment.evaluate.
Args:
config: Optional argument. Defines the config.
mode: optional argument. Defines the mode of evalution. Coud be any value in
eval_{test/valid}_{average/vote}. Default (eval_test_average).
rng: select evaluation seed (recommended to always use the same)
"""
if config is None:
config = FLAGS.config.experiment_kwargs.config
else:
config = config.experiment_kwargs.config
if config.checkpointing.use_checkpointing:
logging.info('\nEvaluating the final checkpoint on the test set.\n')
init_rng, eval_rng = jax.random.split(rng)
exp = lewis_experiment.LewisExperiment(mode=mode,
init_rng=init_rng,
config=config)
step, _ = exp.restore_state(exp.checkpoint_path)
exp.evaluate(global_step=np.array(step), rng=eval_rng)
else:
logging.info('\nCheckpointing not available for evaluation.\n')
return
| emergent_communication_at_scale-main | utils/eval_utils.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a class to store and load the param of a population of agents."""
import functools as fn
from typing import Optional, Tuple
import chex
import haiku as hk
import jax
from jaxline import utils
import optax
from emergent_communication_at_scale import types
class PopulationStorage:
"""Stores the population params and states."""
def __init__(
self,
n_speakers: int,
n_listeners: int,
) -> None:
self._n_speakers = n_speakers
self._n_listeners = n_listeners
self._params: types.AllParams = dict(
speaker=[None] * n_speakers,
listener=[None] * n_listeners,
target_speaker=[None] * n_speakers,
)
self._states: types.AllStates = dict(
speaker=[None] * n_speakers,
listener=[None] * n_listeners,
target_speaker=[None] * n_speakers,
)
self._opt_states: types.AllOptStates = dict(
speaker=[None] * n_speakers,
listener=[None] * n_listeners,
)
@property
def params(self):
return self._params
@property
def states(self):
return self._states
@property
def opt_states(self):
return self._opt_states
# Basic Accessors.
def load_speaker(self, idx) -> types.AgentProperties:
return types.AgentProperties(
params=self._params['speaker'][idx],
target_params=self._params['target_speaker'][idx],
states=self._states['speaker'][idx],
target_states=self._states['target_speaker'][idx],
opt_states=self._opt_states['speaker'][idx],
)
def load_listener(self, idx) -> types.AgentProperties:
return types.AgentProperties(
params=self._params['listener'][idx],
states=self._states['listener'][idx],
opt_states=self._opt_states['listener'][idx],
)
def store_agent(
self,
agent_id: int,
agent_name: str,
params: hk.Params,
states: hk.State,
opt_states: Optional[optax.OptState] = None,
) -> None:
"""Once data for an agent is updated, store it back."""
assert agent_name in ['speaker', 'listener', 'target_speaker']
assert 0 <= agent_id < len(self._params[agent_name])
self._params[agent_name][agent_id] = params
self._states[agent_name][agent_id] = states
if opt_states:
self._opt_states[agent_name][agent_id] = opt_states
# Checkpointing utilities.
def snapshot(self) -> types.AllProperties:
return types.AllProperties(
params=self._params,
states=self._states,
opt_states=self._opt_states,
)
def restore(
self,
params: Optional[types.AllParams] = None,
states: Optional[types.AllStates] = None,
opt_states: Optional[types.AllOptStates] = None,
) -> None:
"""Restores all params/states of the agent/optimizer."""
if params:
assert all([
k in ['speaker', 'listener', 'target_speaker'] for k in params.keys()
])
self._params.update(params)
if states:
assert all([
k in ['speaker', 'listener', 'target_speaker'] for k in states.keys()
])
self._states.update(states)
if opt_states:
assert all([k in ['speaker', 'listener'] for k in opt_states.keys()])
self._opt_states.update(opt_states)
def initialize(
self,
rng_key: chex.PRNGKey,
games: types.GamesInputs,
game_init_fn,
opt_speaker_init_fn,
opt_listener_init_fn,
) -> None:
"""Initializes all the params/states of the agent/optimizer."""
# Initializes params/states.
self._params = dict(speaker=[], listener=[], target_speaker=[])
self._states = dict(speaker=[], listener=[], target_speaker=[])
self._opt_states = dict(speaker=[], listener=[])
# Iterates over speaker/listener options.
for agent_name, num_agents, opt_init_fn in zip(
('speaker', 'listener'), (self._n_speakers, self._n_listeners),
(opt_speaker_init_fn, opt_listener_init_fn)):
# Prepares per agents pmap function.
params_init_pmap = jax.pmap(
fn.partial(
game_init_fn,
training_mode=types.TrainingMode.TRAINING,
))
opt_init_pmap = jax.pmap(opt_init_fn)
for _ in range(num_agents):
# Prepares rng.
rng_key, rng = jax.random.split(rng_key)
rng = utils.bcast_local_devices(rng) # same network init across devices
# Init Params/States.
joint_params, joint_states = params_init_pmap(init_games=games, rng=rng)
agent_params = getattr(joint_params, agent_name)
agent_states = getattr(joint_states, agent_name)
self._params[agent_name].append(agent_params)
self._states[agent_name].append(agent_states)
if agent_name == 'speaker':
self._params['target_speaker'].append(joint_params.target_speaker)
self._states['target_speaker'].append(joint_states.target_speaker)
# Init Opt state.
agent_opt_states = opt_init_pmap(agent_params)
self._opt_states[agent_name].append(agent_opt_states)
def load_pair(
self, speaker_id: int,
listener_id: int) -> Tuple[types.Params, types.States, types.OptStates]:
"""Prepares params and opt_states for a given pair of speaker/listener."""
assert 0 <= speaker_id < len(self._params['speaker'])
assert 0 <= listener_id < len(self._params['listener'])
params = types.Params(
speaker=self._params['speaker'][speaker_id],
listener=self._params['listener'][listener_id],
target_speaker=self._params['target_speaker'][speaker_id],
)
states = types.States(
speaker=self._states['speaker'][speaker_id],
listener=self._states['listener'][listener_id],
target_speaker=self._states['target_speaker'][speaker_id],
)
opt_states = types.OptStates(
speaker=self._opt_states['speaker'][speaker_id],
listener=self._opt_states['listener'][listener_id])
return params, states, opt_states
def store_pair(
self,
speaker_id: int,
listener_id: int,
params: types.Params,
states: types.States,
opt_states: types.OptStates,
) -> None:
"""Once data for a pair speaker/listener is updated, store it back."""
self.store_agent(
agent_id=speaker_id,
agent_name='speaker',
params=params.speaker,
states=states.speaker,
opt_states=opt_states.speaker,
)
self.store_agent(
agent_id=speaker_id,
agent_name='target_speaker',
params=params.target_speaker,
states=states.target_speaker,
)
self.store_agent(
agent_id=listener_id,
agent_name='listener',
params=params.listener,
states=states.listener,
opt_states=opt_states.listener,
)
| emergent_communication_at_scale-main | utils/population_storage.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines Postprocessing per dataset."""
import abc
from typing import Dict, NamedTuple
import numpy as np
import tensorflow.compat.v2 as tf
import tree
from emergent_communication_at_scale import types
####################
# Abstract class #
####################
class DatasetProcessing(abc.ABC):
"""Interface for postprocessing dataset with default options, e.g., noise."""
@abc.abstractmethod
def _process(self, data: Dict[str, np.ndarray]) -> types.GamesInputs:
pass
def __call__(self, data: Dict[str, tf.Tensor]) -> types.GamesInputs:
"""Main postprocessing call."""
return self._process(data)
class DatasetInfo(NamedTuple):
dataset_processing_fn: DatasetProcessing
#############
# Factory #
#############
def get(dataset_name: str, to_onehot_label: bool) -> DatasetInfo:
"""Simple helper to return the correct dataset and its tokenizer."""
all_dataset = dict(
byol_imagenet2012=DatasetInfo(
dataset_processing_fn=ImagenetProcessing(to_onehot_label)),
byol_celeb_a2=DatasetInfo(
dataset_processing_fn=CelebAProcessing(to_onehot_label)),
)
if dataset_name in all_dataset:
return all_dataset[dataset_name]
else:
raise ValueError(f'Invalid dataset name: {dataset_name}.'
f'Supported: {[all_dataset.keys()]}')
###########
# Utils #
###########
def _tree_to_one_hot(x, max_val):
x = tree.map_structure(
lambda a: tf.one_hot(tf.cast(a, tf.int32), depth=max_val, axis=-1),
x)
return x
###########################
# Dataset preprocessing #
###########################
class ImagenetProcessing(DatasetProcessing):
"""Turns Dataset with logits into a Discrimation Lewis Game."""
def __init__(self, to_onehot_label):
self._to_onehot_label = to_onehot_label
def _process(self, data: Dict[str, np.ndarray]) -> types.GamesInputs:
if self._to_onehot_label:
labels = _tree_to_one_hot(data['label'], max_val=1000)
else:
labels = data['label']
return types.GamesInputs(
speaker_inp=data['logit'],
labels={types.Task.CLASSIFICATION: {'class': labels}},
misc=dict()
)
class CelebAProcessing(DatasetProcessing):
"""Turns Dataset with logits into a Discrimation Lewis Game."""
def __init__(self, to_onehot_label):
self._to_onehot_label = to_onehot_label
def _process(self, data: Dict[str, np.ndarray]) -> types.GamesInputs:
attributes = _tree_to_one_hot(data['attributes'], max_val=2)
# Config option to avoid crashing the memory while performing CPC
if self._to_onehot_label:
labels = _tree_to_one_hot(data['label'], max_val=10178) # Big
else:
labels = data['label']
return types.GamesInputs(
speaker_inp=data['logit'],
labels={
types.Task.CLASSIFICATION: {'class': labels},
types.Task.ATTRIBUTE: attributes,
types.Task.LANDMARK: data['landmarks'],
},
misc=dict(image_id=data['image_id']),
)
################
# Miscellaneous
################
class NoiseBatchProcessingTf:
"""Adds Gaussian noise to the speaker input (logit or image)."""
def __init__(self, coeff_noise):
self._coeff_noise = coeff_noise
def __call__(self, data: types.GamesInputs):
"""Apply noise on data through tensorflow (fast)."""
x = data.speaker_inp
# Collects input batch statistic.
stddev = tf.math.reduce_std(x, axis=0)
shape = tf.shape(x)
### Process first view.
noise_view1 = tf.random.normal(shape, mean=0.0, stddev=stddev)
data = data._replace(speaker_inp=x + self._coeff_noise * noise_view1)
### Process second view.
# Creates second view.
if types.Task.DISCRIMINATION not in data.labels:
data.labels[types.Task.DISCRIMINATION] = tf.identity(x) # Simulate copy.
# We here assume that the view1/view2 stats are the same.
noise_view2 = tf.random.normal(shape, mean=0.0, stddev=stddev)
data.labels[types.Task.DISCRIMINATION] += self._coeff_noise * noise_view2
return data
| emergent_communication_at_scale-main | game/dataset.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lewis Game."""
import numpy as np
from emergent_communication_at_scale import types
from emergent_communication_at_scale.game.game_interface import dispatch_per_device
from emergent_communication_at_scale.game.game_interface import Game
def iterator(num_games, max_steps, mode):
"""Iterator for dummy game."""
obs = types.GamesInputs(
speaker_inp=np.eye(num_games),
labels=np.ones((num_games,)),
misc=dict(),
)
if mode == 'train':
obs = dispatch_per_device(obs) # Dispatch only at training.
for _ in range(max_steps):
yield obs
class DummyGame(Game):
"""Dummy game for testing."""
def __init__(self, train_batch_size, eval_batch_size, max_steps):
super().__init__(train_batch_size, eval_batch_size)
self._max_steps = max_steps
def get_training_games(self, rng):
del rng
return iterator(self._train_batch_size, self._max_steps, mode='train')
def get_evaluation_games(self, mode: str = 'test'):
return iterator(self._eval_batch_size, self._max_steps, mode=mode)
def evaluate(self, prediction, target):
pass
| emergent_communication_at_scale-main | game/dummy_game.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init file to locate correct dataset and its configuration."""
from ml_collections import config_dict
from emergent_communication_at_scale.game import visual_game
from emergent_communication_at_scale.game.dummy_game import DummyGame
from emergent_communication_at_scale.game.game_interface import Game
def get(
config: config_dict.ConfigDict,
train_batch_size: int,
eval_batch_size: int,
) -> Game:
"""Simple helper to return the correct dataset and its tokenizer."""
name = config.name
game_kwargs = config.kwargs[name]
if name == 'visual_game':
game = visual_game.LogitLoader(
train_batch_size=train_batch_size,
eval_batch_size=eval_batch_size,
**game_kwargs,
)
elif name == 'dummy':
game = DummyGame(
train_batch_size=train_batch_size,
eval_batch_size=eval_batch_size,
**game_kwargs,
)
else:
raise ValueError(f'Invalid game name, {name}.')
return game
| emergent_communication_at_scale-main | game/game_factory.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dummy file to have Mock object for testing."""
import abc
from typing import Iterator, Union
import jax
import jax.numpy as jnp
import numpy as np
from emergent_communication_at_scale import types
NumpyValue = Union[np.ndarray, np.generic, bytes]
GameIterator = Iterator[types.GamesInputs]
# This is a very minimal game interface, so that we can sweep over game.
def batch_size_per_device(batch_size: int, num_devices: int):
"""Return the batch size per device."""
per_device_batch_size, ragged = divmod(batch_size, num_devices)
if ragged:
msg = 'batch size must be divisible by num devices, got {} and {}.'
raise ValueError(msg.format(per_device_batch_size, num_devices))
return per_device_batch_size
def dispatch_per_device(games):
"""Helper fo split dataset per device."""
num_games = list(games._asdict().values())[0].shape[0]
num_devices = jax.local_device_count()
batch_size = batch_size_per_device(num_games, num_devices=num_devices)
def dispatch(x):
if hasattr(x, 'shape'):
return jnp.reshape(x, [num_devices, batch_size] + list(x.shape[1:]))
else:
return x
games_per_device = jax.tree_map(dispatch, games)
return games_per_device
class Game(abc.ABC):
"""Interface for game with terminal reward."""
def __init__(self,
train_batch_size,
eval_batch_size):
self._train_batch_size = train_batch_size
self._eval_batch_size = eval_batch_size
if self._eval_batch_size <= 2:
raise ValueError(f'Eval batch size must be greater than 2 to compute '
f'topography similarity. Got {self._eval_batch_size}')
@property
def train_batch_size(self) -> int:
return self._train_batch_size
@property
def eval_batch_size(self) -> int:
return self._eval_batch_size
@abc.abstractmethod
def get_training_games(self, rng) -> GameIterator:
pass
@abc.abstractmethod
def get_evaluation_games(self, mode: str = 'test') -> GameIterator:
pass
def evaluate(self, prediction, target):
pass
| emergent_communication_at_scale-main | game/game_interface.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Visual Lewis Game.
Pretrained image logits/representations are loaded thought tfds before being
split into distractors and targets.
"""
from typing import Any, Callable, Optional
from absl import logging
import chex
import jax
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
from emergent_communication_at_scale.game import dataset as visual_dataset
from emergent_communication_at_scale.game.game_interface import batch_size_per_device
from emergent_communication_at_scale.game.game_interface import Game
AUTOTUNE = tf.data.experimental.AUTOTUNE
# Randomness option
EVAL_FIX_SEED = 42
tf.random.set_seed(42) # Make tensorflow deterministic.
def _load_dataset(dataset_name: str,
dataset_path: str,
prefix: str,
shuffle_files: bool,
shuffle_seed: int,
reshuffle_iteration: bool) -> tf.data.Dataset:
"""Allow to load dataset from either tfds recording or dm.auto_dataset."""
return tfds.load(dataset_name,
split=prefix,
shuffle_files=shuffle_files,
data_dir=dataset_path,
read_config=tfds.ReadConfig(
shuffle_seed=shuffle_seed,
shuffle_reshuffle_each_iteration=reshuffle_iteration))
def _process_dataset(
ds,
batch_size: Optional[int], # None or zero -> no batch size
num_epoch: Optional[int], # None -> infinite loop
cache: bool,
use_shards: bool, # to avoid multi-device sample duplication
drop_remainder: bool,
shuffle: bool,
shuffling_buffer: Optional[int] = None,
seed: Optional[int] = None,
preprocess_fn: Optional[Callable[[Any], Any]] = None,
batchprocess_fn: Optional[Callable[[Any], Any]] = None,
prefetch: int = AUTOTUNE,
num_parallel_call: int = AUTOTUNE,
) -> tf.data.Dataset:
"""Creates dataset by enforcing valid call orders."""
if use_shards: # Must be done BEFORE repeat
ds = ds.shard(num_shards=jax.process_count(), index=jax.process_index())
if preprocess_fn:
ds = ds.map(preprocess_fn, num_parallel_calls=num_parallel_call)
if cache: # Must be done BEFORE shuffling
ds = ds.cache()
ds = ds.repeat(num_epoch)
if shuffle:
assert seed is not None, '"seed" must be defined when shuffling'
assert shuffling_buffer, '"shuffle_buffer" must be defined when shuffling'
ds = ds.shuffle(shuffling_buffer, seed=seed, reshuffle_each_iteration=True)
if batch_size is not None and batch_size > 0:
ds = ds.batch(batch_size, drop_remainder=drop_remainder)
ds = ds.prefetch(prefetch)
if batchprocess_fn:
ds = ds.map(batchprocess_fn, num_parallel_calls=num_parallel_call)
return ds
class LogitLoader(Game):
"""Simple container class."""
def __init__(
self,
dataset_name: str,
dataset_path: str,
train_batch_size: int,
eval_batch_size: int,
shuffle_training: bool = True,
shuffle_evaluation: bool = False,
num_evaluation_epochs: int = 1,
use_shards=True,
drop_remainder: bool = False,
coeff_noise: float = 0.0,
is_one_hot_label: bool = True,
):
"""Dataset using modified loader from @defauw.
Args:
dataset_name: name of the dataset to load
dataset_path: path to load tfds dataset from cns.
train_batch_size: training minibatch size.
eval_batch_size: evaluation (val & test) batch size.
shuffle_training: whether or not to shuffle the training dataset.
shuffle_evaluation: whether shuffling evaluation (negative samples CPC)
num_evaluation_epochs: how many time to iterate over evaluation
use_shards: use multiple shards across devices.
drop_remainder: drop last elements of dataset (or pad them).
coeff_noise: ratio of additional gaussian noise, at evaluation (for now).
is_one_hot_label: specifies if labels need to be hotified.
Returns:
An dataset container object.
"""
super().__init__(train_batch_size, eval_batch_size)
self._dataset_name = dataset_name
self._path = dataset_path.format(dataset_name)
self._use_shards = use_shards
self._shuffle_training = shuffle_training
self._shuffle_evaluation = shuffle_evaluation
self._num_evaluation_epochs = num_evaluation_epochs
self._drop_remainder = drop_remainder
dataset = visual_dataset.get(dataset_name, is_one_hot_label)
self._dataset_processing_fn = dataset.dataset_processing_fn
self._batchprocess_fn = None
if coeff_noise > 0.:
self._batchprocess_fn = visual_dataset.NoiseBatchProcessingTf(coeff_noise)
@property
def dataset_name(self):
return self._dataset_name
def get_training_games(self, rng: chex.PRNGKey):
"""See base class."""
# Computes the batch size we need to dispatch per device.
batch_size = batch_size_per_device(
self.train_batch_size, num_devices=jax.device_count())
ds = _load_dataset(dataset_name=self._dataset_name,
dataset_path=self._path,
prefix='train',
shuffle_files=True,
shuffle_seed=rng[-1].item(),
reshuffle_iteration=True)
ds = _process_dataset(
ds,
batch_size,
cache=False,
num_epoch=None, # Infinite iterator
use_shards=True,
preprocess_fn=self._dataset_processing_fn,
batchprocess_fn=self._batchprocess_fn, # only tf func
drop_remainder=self._drop_remainder,
shuffle=self._shuffle_training,
shuffling_buffer=32768, # <1Go RAM when 2048 feature size
seed=rng[-1].item(),
)
logging.info('Dataset looks like %s.', ds)
# Batch per devices
ds = ds.batch(jax.local_device_count(), drop_remainder=self._drop_remainder)
return ds.as_numpy_iterator()
def get_evaluation_games(self, mode: str = 'test'):
"""Builds the evaluation input pipeline."""
assert mode in ['test', 'test_official', 'valid']
# On a general basis, it is safer to be single host for evaluation (GPU)
assert jax.device_count() == 1
assert jax.local_device_count() == 1
ds = _load_dataset(dataset_name=self._dataset_name,
dataset_path=self._path,
prefix=mode,
shuffle_files=self._shuffle_evaluation,
shuffle_seed=EVAL_FIX_SEED,
reshuffle_iteration=True)
ds = _process_dataset(
ds,
self._eval_batch_size,
cache=False,
use_shards=False, # Single device at evaluation time
num_epoch=self._num_evaluation_epochs, # useful for CPC eval,
preprocess_fn=self._dataset_processing_fn,
batchprocess_fn=self._batchprocess_fn, # only tf func
drop_remainder=self._drop_remainder,
shuffle=self._shuffle_evaluation, # useful for CPC eval
shuffling_buffer=32768, # <1Go RAM when 2048 feature size
seed=EVAL_FIX_SEED, # we want the same shuffle every time
)
logging.info('Dataset looks like {%s}.', ds)
return ds.as_numpy_iterator()
| emergent_communication_at_scale-main | game/visual_game.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper to sample a oracles/students."""
import chex
import jax
from jaxline import utils
import numpy as np
from emergent_communication_at_scale import types
from emergent_communication_at_scale.utils import population_storage as ps
@jax.pmap
def _split_keys_pmap(key):
return tuple(jax.random.split(key))
class ImitateTrainer():
"""Class implementation for imitation over speaker agents."""
def __init__(
self,
n_speakers: int,
imitation_update_fn,
):
# Pmap imitation update function.
self._n_speakers = n_speakers
self._pmap_imitation_learning = jax.pmap(imitation_update_fn, axis_name='i')
def imitate(
self,
rng: chex.PRNGKey,
games: types.GamesInputs,
agent_storage: ps.PopulationStorage,
nbr_students: int,
imitation_step: int,
imitation_type: types.ImitationMode,
self_imitation: bool,
):
"""Implements imitation learning with different modes."""
del imitation_step # Unused.
def get_oracle_student_id(rng: chex.PRNGKey):
if self._n_speakers > 1:
speaker_ids = list(
jax.random.choice(
key=utils.get_first(rng),
a=self._n_speakers,
shape=[nbr_students + 1],
replace=False))
# Gets a speaker as oracle (depending on imitation mode).
# Sets the rest as students.
func = lambda id: utils.get_first( # pylint: disable=g-long-lambda
agent_storage.load_speaker(id).states['speaker']['avg_score'])
scores = list(map(func, speaker_ids))
if imitation_type == types.ImitationMode.BEST:
oracle_id = speaker_ids[np.argmax(scores)]
elif imitation_type == types.ImitationMode.WORST:
oracle_id = speaker_ids[np.argmin(scores)]
elif imitation_type == types.ImitationMode.RANDOM:
oracle_id = speaker_ids[0]
else:
raise ValueError(f'Wrong imitation type: {imitation_type}.')
speaker_ids.remove(oracle_id)
elif (self._n_speakers == 1) and self_imitation:
# Self-imitation case.
speaker_ids = [0]
oracle_id = 0
else:
raise ValueError('There is no imitation.')
return speaker_ids, oracle_id
rng_sampling, rng_training = _split_keys_pmap(rng)
student_ids, oracle_id = get_oracle_student_id(rng=rng_sampling)
# Imitation or self-imitation scenarios.
oracle_properties = agent_storage.load_speaker(oracle_id)
for student_id in student_ids:
student_properties = agent_storage.load_speaker(student_id)
new_params, new_states, new_opt_state, imit_scalar = self._pmap_imitation_learning(
params_oracle=oracle_properties.params,
params_student=student_properties.params,
state_oracle=oracle_properties.states,
state_student=student_properties.states,
opt_state=student_properties.opt_states,
games=games,
rng=rng_training)
# Updates params/states.
agent_storage.store_agent(
agent_id=student_id,
agent_name='speaker',
params=new_params,
states=new_states,
opt_states=new_opt_state)
# Returns the scalar of the last imitation training with no pmaped dim.
return dict(imitation_loss=utils.get_first(imit_scalar)), agent_storage
| emergent_communication_at_scale-main | trainers/imitation_trainer.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper to sample listeners/speakers for resetting."""
import functools as fn
import chex
import jax
from jaxline import utils
from emergent_communication_at_scale import types
from emergent_communication_at_scale.utils import population_storage as ps
class ResetTrainer():
"""Class implementation for resetting speaker and listener agents."""
def __init__(
self,
n_speakers: int,
n_listeners: int,
):
self._n_speakers = n_speakers
self._n_listeners = n_listeners
def reset(
self,
rng: chex.PRNGKey,
games: types.GamesInputs,
agent_storage: ps.PopulationStorage,
game_init_fn,
opt_speaker_init_fn,
opt_listener_init_fn,
reset_type: types.ResetMode,
):
"""Implements random reset."""
# Gets first then broadcasts to ensure same rng for all devices at init.
rng = utils.get_first(rng)
rng_speaker, rng_listener, rng = jax.random.split(rng, num=3)
rng = utils.bcast_local_devices(rng)
reset_speaker_id = jax.random.randint(
key=rng_speaker,
shape=(1,),
minval=0,
maxval=self._n_speakers,
)
reset_listener_id = jax.random.randint(
key=rng_listener,
shape=(1,),
minval=0,
maxval=self._n_listeners,
)
agent_storage = self._initialize_pairs(
rng_key=rng,
speaker_id=reset_speaker_id.item(),
listener_id=reset_listener_id.item(),
games=games,
agent_storage=agent_storage,
game_init_fn=game_init_fn,
opt_speaker_init_fn=opt_speaker_init_fn,
opt_listener_init_fn=opt_listener_init_fn,
reset_type=reset_type,
)
return agent_storage
def _initialize_pairs(
self,
rng_key: chex.PRNGKey,
speaker_id: int,
listener_id: int,
games: types.GamesInputs,
agent_storage: ps.PopulationStorage,
game_init_fn,
opt_speaker_init_fn,
opt_listener_init_fn,
reset_type: types.ResetMode,
):
"""Initializes pair of agents."""
params_init_pmap = jax.pmap(
fn.partial(
game_init_fn,
training_mode=types.TrainingMode.TRAINING,
))
opt_speaker_init_pmap = jax.pmap(opt_speaker_init_fn)
opt_listener_init_pmap = jax.pmap(opt_listener_init_fn)
# Init Params/States.
joint_params, joint_states = params_init_pmap(init_games=games, rng=rng_key)
# Init Opt state.
speaker_opt_states = opt_speaker_init_pmap(joint_params.speaker)
listener_opt_states = opt_listener_init_pmap(joint_params.listener)
joint_opt_states = types.OptStates(
speaker=speaker_opt_states, listener=listener_opt_states)
if reset_type == types.ResetMode.PAIR:
# Store reinitialized pair.
agent_storage.store_pair(
speaker_id=speaker_id,
listener_id=listener_id,
params=joint_params,
states=joint_states,
opt_states=joint_opt_states,
)
elif reset_type == types.ResetMode.SPEAKER:
agent_storage.store_agent(
agent_id=speaker_id,
agent_name='speaker',
params=joint_params.speaker,
states=joint_states.speaker,
opt_states=speaker_opt_states,
)
agent_storage.store_agent(
agent_id=speaker_id,
agent_name='target_speaker',
params=joint_params.speaker,
states=joint_states.speaker,
)
elif reset_type == types.ResetMode.LISTENER:
agent_storage.store_agent(
agent_id=listener_id,
agent_name='listener',
params=joint_params.listener,
states=joint_states.listener,
opt_states=listener_opt_states,
)
else:
raise ValueError(f'Wrong type reset {reset_type}')
return agent_storage
| emergent_communication_at_scale-main | trainers/reset_trainer.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper to sample a population."""
import abc
import functools
from typing import Tuple
import chex
import jax
from jaxline import utils
from emergent_communication_at_scale import types
from emergent_communication_at_scale.utils import population_storage as ps
from emergent_communication_at_scale.utils import utils as emcom_utils
@jax.pmap
def _split_keys_pmap(key):
return tuple(jax.random.split(key))
class AbstractCommunicateTrainer(abc.ABC):
"""Abstract class implementation for training agents."""
@abc.abstractmethod
def communicate(
self,
rng: chex.PRNGKey,
games: types.GamesInputs,
agent_storage: ps.PopulationStorage,
):
pass
class BasicTrainer(AbstractCommunicateTrainer):
"""Sample trainer that simply loop over sampled agent pairs."""
def __init__(
self,
update_fn,
n_speakers: int,
n_listeners: int,
num_agents_per_step: int,
) -> None:
# Stores key values.
self._n_speakers = n_speakers
self._n_listeners = n_listeners
self._num_agents_per_step = num_agents_per_step
# Prepares pmap functions.
# Special pmap wrapper to correctly handle sampling across devices.
self._pmap_sampling = emcom_utils.run_and_broadcast_to_all_devices(
self._sample_fn)
self._pmap_update_fn = jax.pmap(
functools.partial(
update_fn,
training_mode=types.TrainingMode.TRAINING,
is_sharded_update=True),
axis_name='i', donate_argnums=(0, 1, 2))
def _sample_fn(self, rng: chex.PRNGKey) -> Tuple[chex.Array, chex.Array]:
"""Basic sampling function."""
speaker_rng, listener_rng = jax.random.split(rng)
speaker_ids = jax.random.choice(
key=speaker_rng,
a=self._n_speakers,
replace=True,
shape=[self._num_agents_per_step],
)
listener_ids = jax.random.choice(
key=listener_rng,
a=self._n_listeners,
replace=True,
shape=[self._num_agents_per_step],
)
return speaker_ids, listener_ids
def communicate(
self,
rng: chex.PRNGKey,
games: types.GamesInputs,
agent_storage: ps.PopulationStorage,
) -> Tuple[types.Config, ps.PopulationStorage]:
"""Performs one training step by looping over agent pairs."""
# Step 1: samples the speaker/listener idx.
sampling_rng, rng = _split_keys_pmap(rng)
sampling_rng = utils.get_first(sampling_rng)
speaker_ids, listener_ids = self._pmap_sampling(sampling_rng)
chex.assert_tree_shape_prefix((speaker_ids, listener_ids),
(self._num_agents_per_step,))
# Step 2: executes a pmap update per speaker/listener pairs.
scalars = None
for (speaker_id, listener_id) in zip(speaker_ids, listener_ids):
# Next rng.
update_rng, rng = _split_keys_pmap(rng)
# Load agent params.
params, states, opt_states = agent_storage.load_pair(
speaker_id=speaker_id.item(), # `.item()` gets the scalar value.
listener_id=listener_id.item())
# Performs update function (forward/backward pass).
new_params, new_states, new_opt_states, scalars = self._pmap_update_fn(
params,
states,
opt_states,
games,
update_rng,
)
# Updates params in storage.
agent_storage.store_pair(
speaker_id=speaker_id.item(),
listener_id=listener_id.item(),
params=new_params,
states=new_states,
opt_states=new_opt_states,
)
# Returns the scalar of the last random pair without the pmaped dimension.
scalars = utils.get_first(scalars)
return scalars, agent_storage
| emergent_communication_at_scale-main | trainers/communication_trainer.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines different types of listeners."""
import haiku as hk
from emergent_communication_at_scale import types
from emergent_communication_at_scale.networks import cores
from emergent_communication_at_scale.networks import heads
from emergent_communication_at_scale.networks import torsos
class Listener(hk.Module):
"""General Recurrent Language Listener."""
def __init__(
self,
torso_config: types.Config,
core_config: types.Config,
head_config: types.Config,
name: str = 'listener',
) -> None:
super().__init__(name=name)
self._torso = torsos.torso_factory(**torso_config, name='torso')
self._core = cores.core_factory(**core_config, name='core')
self._head = heads.head_factory(**head_config, name='head')
# Adding a dummy state to listeners to have symmetric speakers/listeners.
hk.get_state('dummy_state', shape=(), init=hk.initializers.Constant(0.))
def __call__(
self,
games: types.Games,
training_mode: types.TrainingMode,
) -> types.ListenerOutputs:
"""Unroll Listener over token of messages."""
del training_mode
batch_size = games.speaker_outputs.action.shape[0]
# Torso
embedded_message = self._torso(games.speaker_outputs.action)
# Core
initial_state = self._core.initial_state(batch_size)
core_out, _ = hk.static_unroll(
self._core, embedded_message, initial_state, time_major=False)
core_out = core_out[:, -1, :] # Only consider the last repr. of core
# Head
listener_head_outputs = self._head(core_out, games)
return types.ListenerOutputs(
predictions=listener_head_outputs.predictions,
targets=listener_head_outputs.targets,
)
| emergent_communication_at_scale-main | networks/listeners.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Torso networks."""
import chex
import haiku as hk
from emergent_communication_at_scale import types
def torso_factory(
torso_type: types.TorsoType,
torso_kwargs: types.Config,
name: str,
):
"""Builds torso from name and kwargs."""
if torso_type == types.TorsoType.DISCRETE:
torso = DiscreteTorso(name=name, **torso_kwargs)
elif torso_type == types.TorsoType.MLP:
torso = hk.nets.MLP(name=name, **torso_kwargs)
elif torso_type == types.TorsoType.IDENTITY:
torso = Identity(name=name)
else:
raise ValueError(f'Incorrect torso type {torso_type}.')
return torso
class DiscreteTorso(hk.Module):
"""Torso for discrete entries."""
def __init__(
self,
vocab_size: int,
embed_dim: int,
mlp_kwargs: types.Config,
name: str,
) -> None:
super().__init__(name=name)
self._vocab_size = vocab_size
self._embed_dim = embed_dim
self._mlp_kwargs = mlp_kwargs
def __call__(self, x: chex.Array) -> chex.Array:
# chex.assert_rank(x, 1)
h = hk.Embed(
vocab_size=self._vocab_size,
embed_dim=self._embed_dim,
)(x,)
return hk.nets.MLP(**self._mlp_kwargs)(h)
class Identity(hk.Module):
"""Torso for Identity."""
def __call__(self, x: chex.Array) -> chex.Array:
return x
| emergent_communication_at_scale-main | networks/torsos.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reccurent networks."""
from typing import Optional, Sequence, Tuple
import chex
import haiku as hk
import jax.numpy as jnp
from emergent_communication_at_scale import types
def core_factory(
core_type: types.CoreType,
core_kwargs: types.Config,
name: str,
) -> hk.RNNCore:
"""Builds core from name and kwargs."""
if core_type == types.CoreType.LSTM:
core = hk.LSTM(name=name, **core_kwargs)
elif core_type == types.CoreType.GRU:
core = hk.GRU(name=name, **core_kwargs)
elif core_type == types.CoreType.IDENTITY:
core = CustomedIdentityCore(name=name, **core_kwargs)
else:
raise ValueError(f'Incorrect core type {core_type}.')
return core
class CustomedIdentityCore(hk.RNNCore):
"""A recurrent core that forwards the inputs and a mock state.
This is commonly used when switching between recurrent and feedforward
versions of a model while preserving the same interface.
"""
def __init__(
self,
hidden_size: int,
name: Optional[str] = None,
) -> None:
"""Constructs an CustomedIdentityCore.
Args:
hidden_size: Hidden layer size.
name: Name of the module.
"""
super().__init__(name=name)
self.hidden_size = hidden_size
def __call__(
self,
inputs: Sequence[chex.Array],
state: hk.LSTMState,
) -> Tuple[Sequence[chex.Array], hk.LSTMState]:
return inputs, state
def initial_state(self, batch_size: Optional[int]) -> hk.LSTMState:
return hk.LSTMState(
hidden=jnp.zeros([batch_size, self.hidden_size]),
cell=jnp.zeros([batch_size, self.hidden_size]),
)
class ToCoreState(hk.Module):
"""Module to get a core state from an embedding."""
def __init__(
self,
prototype: types.RNNState,
name: Optional[str] = None,
) -> None:
super().__init__(name=name)
self._prototype = prototype
def __call__(self, embedding: chex.Array) -> types.RNNState:
if isinstance(self._prototype, hk.LSTMState):
return _ToLSTMState(self._prototype.cell.shape[-1])(embedding)
elif isinstance(self._prototype, chex.Array):
return hk.Linear(output_size=self._prototype.shape[-1])(embedding)
elif not self._prototype:
return ()
else:
raise ValueError(f'Invalid prototype type for core state '
f'{type(self._prototype)}.')
class _ToLSTMState(hk.Module):
"""Module linearly mapping a tensor to an hk.LSTMState."""
def __init__(self, output_size: int) -> None:
super().__init__(name='to_lstm_state')
self._linear = hk.Linear(output_size=2 * output_size)
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
h, c = jnp.split(self._linear(inputs), indices_or_sections=2, axis=-1)
return hk.LSTMState(h, c)
| emergent_communication_at_scale-main | networks/cores.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines different types of speakers."""
from typing import Optional
import chex
import haiku as hk
import jax.numpy as jnp
import rlax
from emergent_communication_at_scale import types
from emergent_communication_at_scale.networks import cores
from emergent_communication_at_scale.networks import heads
from emergent_communication_at_scale.networks import torsos
class Speaker(hk.Module):
"""General Reccurrent Language Speaker."""
def __init__(
self,
length: int,
vocab_size: int,
torso_config: types.Config,
embedder_config: types.Config,
core_config: types.Config,
head_config: types.Config,
name: str = 'speaker',
) -> None:
super().__init__(name=name)
self._length = length
self._vocab_size = vocab_size
self._core_config = core_config
self._torso = torsos.torso_factory(**torso_config, name='torso')
self._embedder = torsos.torso_factory(**embedder_config, name='embedder')
self._core = cores.core_factory(**core_config, name='core')
self._head = heads.head_factory(**head_config, name='head')
hk.get_state('avg_score', shape=(), init=hk.initializers.Constant(0.))
hk.get_state('counter', shape=(), init=hk.initializers.Constant(0.))
def __call__(
self,
games: types.Games,
training_mode: types.TrainingMode,
actions_to_follow: Optional[chex.Array] = None,
) -> types.SpeakerOutputs:
batch_size = games.speaker_inp.shape[0]
continuous_embedding = self._torso(games.speaker_inp)
state = cores.ToCoreState(prototype=self._core.initial_state(batch_size))(
continuous_embedding)
sos_symbol = self._vocab_size # The last vocabulary entry is Start of Sequ.
prev_token = jnp.array([sos_symbol] * batch_size)
if training_mode == types.TrainingMode.FORCING:
assert actions_to_follow is not None
action_list = []
policy_logits_list = []
action_log_prob_list = []
entropy_list = []
q_values_list = []
value_list = []
distr = rlax.softmax(temperature=1)
for i in range(self._length):
step_input = self._embedder(prev_token)
output, state = self._core(step_input, state)
head_outputs = self._head(output)
policy_logits = head_outputs.policy_logits
q_values = head_outputs.q_values
value = head_outputs.value
if types.TrainingMode.TRAINING:
rng = hk.next_rng_key()
action = distr.sample(key=rng, logits=policy_logits)
elif types.TrainingMode.EVAL:
action = jnp.argmax(policy_logits, axis=-1)
elif types.TrainingMode.FORCING:
action = actions_to_follow[..., i]
else:
raise ValueError(f'Unknown training mode: {training_mode}.')
action_log_prob = distr.logprob(logits=policy_logits, sample=action)
entropy = distr.entropy(policy_logits)
prev_token = action
action_list.append(action)
policy_logits_list.append(policy_logits)
entropy_list.append(entropy)
action_log_prob_list.append(action_log_prob)
q_values_list.append(q_values)
value_list.append(value)
def maybe_stack_fn(x):
if x[0] is None:
return None
else:
return jnp.stack(x, axis=-1)
return types.SpeakerOutputs(
action=maybe_stack_fn(action_list),
action_log_prob=maybe_stack_fn(action_log_prob_list),
entropy=maybe_stack_fn(entropy_list),
policy_logits=maybe_stack_fn(policy_logits_list),
q_values=maybe_stack_fn(q_values_list),
value=maybe_stack_fn(value_list),
)
| emergent_communication_at_scale-main | networks/speakers.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Head networks."""
from typing import Any, Iterable, Optional, Union
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from emergent_communication_at_scale import types
def head_factory(
head_type: Union[types.SpeakerLossType, types.ListenerLossType],
head_kwargs: types.Config,
kwargs: types.Config,
name: str,
) -> Any:
"""Builds head from name and kwargs."""
loss_specific_kwargs = kwargs.get(head_type, dict())
all_kwargs = {**head_kwargs, **loss_specific_kwargs}
if head_type == types.SpeakerHeadType.POLICY:
head = PolicyHead(name=name, **all_kwargs)
elif head_type == types.SpeakerHeadType.POLICY_QVALUE:
head = PolicyQValueHead(name=name, **all_kwargs)
elif head_type == types.SpeakerHeadType.POLICY_QVALUE_DUELING:
head = PolicyQValueDuelingHead(name=name, **all_kwargs)
elif head_type == types.ListenerHeadType.MULTIMLP:
head = MultiMlpHead(name=name, **all_kwargs)
elif head_type == types.ListenerHeadType.CPC:
head = CpcHead(name=name, **all_kwargs)
else:
raise ValueError(f'Incorrect head type {head_type}.')
return head
class MultiMlpHead(hk.Module):
"""MultiMLP head."""
def __init__(
self,
hidden_sizes: Iterable[int],
task: types.Task,
name: Optional[str] = 'multi_mlp_head',
):
super().__init__(name)
self._hidden_sizes = tuple(hidden_sizes)
self._task = task
def __call__(
self,
message_rep: chex.Array,
games: types.Games,
) -> types.ListenerHeadOutputs:
assert self._task in games.labels
mlps = jax.tree_map(
lambda x: hk.nets.MLP(output_sizes=self._hidden_sizes + (x.shape[-1],)),
games.labels[self._task])
predictions = jax.tree_map(lambda x, m=message_rep: x(m), mlps)
return types.ListenerHeadOutputs(predictions=predictions, targets=None)
class PolicyHead(hk.Module):
"""Policy head."""
def __init__(
self,
num_actions: int,
hidden_sizes: Iterable[int],
name: Optional[str] = None,
) -> None:
super().__init__(name)
self._policy_head = hk.nets.MLP(
output_sizes=tuple(hidden_sizes) + (num_actions,))
def __call__(self, inputs) -> types.SpeakerHeadOutputs:
return types.SpeakerHeadOutputs(policy_logits=self._policy_head(inputs))
class DuelingHead(hk.Module):
"""Dueling value head."""
def __init__(
self,
num_actions: int,
hidden_sizes: Iterable[int],
name: Optional[str] = None,
) -> None:
super().__init__(name)
self._value_net = hk.nets.MLP(tuple(hidden_sizes) + (1,))
self._advantage_net = hk.nets.MLP(tuple(hidden_sizes) + (num_actions,))
def __call__(self, inputs) -> types.DuelingHeadOutputs:
state_value = self._value_net(inputs)
advantage = self._advantage_net(inputs)
mean_advantage = jnp.mean(advantage, axis=-1, keepdims=True)
q_values = state_value + advantage - mean_advantage
return types.DuelingHeadOutputs(q_values=q_values, value=state_value)
class PolicyQValueHead(hk.Module):
"""Policy and Qvalue head."""
def __init__(
self,
num_actions: int,
hidden_sizes: Iterable[int],
name: Optional[str] = None,
) -> None:
super().__init__(name)
self._policy_head = hk.nets.MLP(
output_sizes=tuple(hidden_sizes) + (num_actions,))
self._value_head = hk.nets.MLP(
output_sizes=tuple(hidden_sizes) + (num_actions,))
self._q_value_head = DuelingHead(
num_actions=num_actions, hidden_sizes=hidden_sizes)
def __call__(self, inputs) -> types.SpeakerHeadOutputs:
dueling_head_outputs = self._q_value_head(inputs)
return types.SpeakerHeadOutputs(
policy_logits=self._policy_head(inputs),
q_values=dueling_head_outputs.q_values,
value=dueling_head_outputs.value,
)
class PolicyQValueDuelingHead(hk.Module):
"""Policy and Qvalue head."""
def __init__(
self,
num_actions: int,
hidden_sizes: Iterable[int],
name: Optional[str] = None,
) -> None:
super().__init__(name)
self._policy_head = hk.nets.MLP(
output_sizes=tuple(hidden_sizes) + (num_actions,))
self._value_head = hk.nets.MLP(output_sizes=tuple(hidden_sizes) + (1,))
self._q_value_head = DuelingHead(
num_actions=num_actions, hidden_sizes=hidden_sizes)
def __call__(self, inputs) -> types.SpeakerHeadOutputs:
return types.SpeakerHeadOutputs(
policy_logits=self._policy_head(inputs),
q_values=self._q_value_head(inputs).q_values,
value=self._value_head(inputs),
)
class CpcHead(hk.Module):
"""CPC head."""
def __init__(
self,
hidden_sizes: Iterable[int],
name: Optional[str] = 'cpc_head',
) -> None:
super().__init__(name)
self.proj_pred = hk.nets.MLP(output_sizes=hidden_sizes)
self.proj_target = hk.nets.MLP(output_sizes=hidden_sizes)
def __call__(
self,
message_rep: chex.Array,
games: types.Games,
) -> types.ListenerHeadOutputs:
# Takes the second view if it exist, otherwise, takes same input view.
if types.Task.DISCRIMINATION in games.labels:
target_inputs = games.labels[types.Task.DISCRIMINATION]
else:
target_inputs = games.speaker_inp
return types.ListenerHeadOutputs(
predictions=self.proj_pred(message_rep),
targets=self.proj_target(target_inputs),
)
| emergent_communication_at_scale-main | networks/heads.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper to create Celeb_a split wi."""
import collections
import os
import re
from absl import app
DATASET_PATH = '.'
IMG = os.path.join(DATASET_PATH, 'img_align_celeba')
CLASS_IDX = os.path.join(DATASET_PATH, 'identity_CelebA.txt')
ATTRIBUTE_VALUES = os.path.join(DATASET_PATH, 'list_attr_celeba.txt')
LANDMARKS = os.path.join(DATASET_PATH, 'list_landmarks_align_celeba.txt')
TRAIN_SPLIT = os.path.join(DATASET_PATH, 'list_eval_partition.txt')
use_split_perso = True
ratio = 5
def main(argv):
del argv
dataset = collections.OrderedDict()
### Load images
with open(CLASS_IDX, 'r') as f:
for line in f:
# Parse line
line = line.strip()
img_id, label = line.split(' ')
dataset[img_id] = dict() # create sample entries
dataset[img_id]['label'] = int(label)
dataset[img_id]['image_id'] = int(img_id.split('.')[0])
dataset[img_id]['filename'] = img_id.encode('utf-8')
attribute_names = []
with open(ATTRIBUTE_VALUES, 'r') as f:
for i, line in enumerate(f):
# Parse line
line = line.strip()
if i == 0:
assert len(dataset) == int(line)
elif i == 1:
attribute_names = line.split(' ')
else:
line = re.sub(' +', ' ', line)
info = line.split(' ')
img_id, attr_values = info[0], info[1:]
attr_values = [val == '1' for val in attr_values]
attributes = {k: v for k, v in zip(attribute_names, attr_values)}
# Store data
dataset[img_id]['attributes'] = attributes
landmark_names = []
with open(LANDMARKS, 'r') as f:
for i, line in enumerate(f):
# Parse line
line = line.strip()
if i == 0:
assert len(dataset) == int(line)
elif i == 1:
landmark_names = line.split(' ')
else:
line = re.sub(' +', ' ', line)
info = line.split(' ')
img_id, landmarks = info[0], info[1:]
landmarks = [int(l) for l in landmarks]
landmarks = {k: v for k, v in zip(landmark_names, landmarks)}
# Store data
dataset[img_id]['landmarks'] = landmarks
# Split train/test set from official split
image_train, image_valid, image_test = [], [], []
if use_split_perso:
counter_label = collections.Counter()
for data in dataset.values():
label = data['label']
count = counter_label[label]
if count > 0 and count % ratio == 0:
if label % 2 == 0:
image_valid.append(data)
else:
image_test.append(data)
else:
image_train.append(data)
counter_label[label] += 1
else:
with open(TRAIN_SPLIT, 'r') as f:
for line in f:
# Parse line
line = line.strip()
img_id, split_id = line.split(' ')
split_id = int(split_id)
if split_id == 0:
image_train.append(dataset[img_id])
elif split_id == 1:
image_valid.append(dataset[img_id])
elif split_id == 2:
image_test.append(dataset[img_id])
else:
assert False
print('Done!')
return image_train, image_valid, image_test
if __name__ == '__main__':
app.run(main)
| emergent_communication_at_scale-main | dataset_tools/celeba.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base config."""
from jaxline import base_config
from ml_collections import config_dict
from emergent_communication_at_scale import types
TASK_OVERRIDE = {}
def get_config(sweep='debug'):
"""Return config object for training."""
config = base_config.get_base_config()
get_value = lambda x, c=config: c.get_oneway_ref(x)
config.experiment = 'ease_of_learning'
# Define global storage folder (ckpt, logs etc.)
config.checkpoint_dir = '/tmp/cidre_ckpts'
# Overwrite plotting options
config.interval_type = 'steps'
config.log_train_data_interval = 100
config.log_tensors_interval = 300
# Basic jaxline logging options
config.interval_type = 'secs'
config.log_train_data_interval = 60
config.log_tensors_interval = 60
config.training_steps = int(1e4)
# Put here values that are referenced multiple times
config.vocab_size = 20
config.length = 10
config.task = types.Task.CLASSIFICATION
config.experiment_kwargs = config_dict.ConfigDict(
dict(
config=dict(
debug=False,
training=dict(
batch_size=1024,
length=get_value('length'),
steps=get_value('training_steps'),
),
listener_optimizer=dict(
name='adam',
learning_rate=0.001,
kwargs=dict(adam=dict()),
),
speaker_path=dict(
path=get_value('checkpoint_dir') + '/agents.pkl',
speaker_index=0),
listener=dict(
torso_config=dict(
torso_type=types.TorsoType.DISCRETE,
torso_kwargs=dict(
vocab_size=get_value('vocab_size'),
embed_dim=10,
mlp_kwargs=dict(output_sizes=(),))),
core_config=dict(
core_type=types.CoreType.LSTM,
core_kwargs=dict(hidden_size=512),
),
head_config=dict(
head_type=types.ListenerHeadType.CPC,
head_kwargs=dict(hidden_sizes=[256]),
kwargs=dict(
cpc=dict(),
mlp=dict(),
multi_mlp=dict(task=get_value('task')),
),
),
),
population=dict(), # Unused for EOL
imitation=dict(), # Unused for EOL
reset=dict(), # Unused for EOL
evaluation=dict(), # Unused for EOL
loss=dict(
speaker=dict(), # Unused for EOL
listener=dict(
loss_type=types.ListenerLossType.CPC,
reward_type=types.RewardType.SUCCESS_RATE,
kwargs=dict(
classif=dict(task=get_value('task')),
cpc=dict(num_distractors=-1, cross_device=True),
)),
),
game=dict(
name='visual_game',
kwargs=dict(
dummy=dict(
max_steps=get_value('training_steps')),
visual_game=dict(
dataset_name='byol_imagenet2012',
# Important: Make sure to download the data
# and update here.
dataset_path='emergent_communication_at_scale/emcom_datasets/',
coeff_noise=0.0,
shuffle_training=True,
is_one_hot_label=True,
))),
checkpointing=dict(
use_checkpointing=True,
checkpoint_dir=get_value('checkpoint_dir'),
save_checkpoint_interval=0,
filename='agents_eol.pkl'
),
),))
if sweep == 'debug':
config.experiment_kwargs.config.debug = True
config.interval_type = 'steps'
config.training_steps = int(1)
config.log_train_data_interval = 1
config.log_tensors_interval = 1
exp_config = config.experiment_kwargs.config
exp_config.training.batch_size = 8
elif sweep == 'celeba':
# Game
exp_config = config.experiment_kwargs.config
exp_config.game.kwargs.visual_game.dataset_name = 'byol_celeb_a2'
elif sweep == 'imagenet':
pass
else:
raise ValueError(f'Sweep {sweep} is not recognized.')
# Prevents accidentally setting keys that aren't recognized (e.g. in tests).
config.lock()
return config
| emergent_communication_at_scale-main | configs/ease_of_learning_config.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base config."""
from jaxline import base_config
from ml_collections import config_dict
from emergent_communication_at_scale import types
TASK_OVERRIDE = {}
def get_config(sweep='debug'):
"""Return config object for training."""
config = base_config.get_base_config()
get_value = lambda x, c=config: c.get_oneway_ref(x)
config.experiment = 'lewis'
config.training_steps = int(2e5)
# Define global storage folder (ckpt, logs etc.)
config.checkpoint_dir = '/tmp/cidre_ckpts'
# Basic jaxline logging options
config.interval_type = 'secs'
config.log_train_data_interval = 60
config.log_tensors_interval = 60
# Put here values that are referenced multiple times
config.vocab_size = 20
config.length = 10
config.task = types.Task.CLASSIFICATION
config.experiment_kwargs = config_dict.ConfigDict(
dict(
config=dict(
debug=False,
speaker_optimizer=dict(
name='adam',
learning_rate=0.0001,
kwargs=dict(adam=dict()),
),
listener_optimizer=dict(
name='adam',
learning_rate=0.0001,
kwargs=dict(adam=dict()),
),
training=dict(
batch_size=1024,
length=get_value('length'),
target_update_ema=0.99,
steps=get_value('training_steps'),
),
population=dict(
n_speakers=1,
n_listeners=1,
num_agents_per_step=1,
),
speaker=dict(
length=get_value('length'),
vocab_size=get_value('vocab_size'),
torso_config=dict(
torso_type=types.TorsoType.IDENTITY,
torso_kwargs=dict(),
),
embedder_config=dict(
torso_type=types.TorsoType.DISCRETE,
torso_kwargs=dict(
vocab_size=get_value('vocab_size') + 1,
embed_dim=10,
mlp_kwargs=dict(output_sizes=(),)),
),
core_config=dict(
core_type=types.CoreType.LSTM,
core_kwargs=dict(hidden_size=256),
),
head_config=dict(
head_type=types.SpeakerHeadType.POLICY_QVALUE_DUELING,
head_kwargs=dict(
hidden_sizes=(), num_actions=get_value('vocab_size')),
kwargs=dict(),
),
),
listener=dict(
torso_config=dict(
torso_type=types.TorsoType.DISCRETE,
torso_kwargs=dict(
vocab_size=get_value('vocab_size'),
embed_dim=10,
mlp_kwargs=dict(output_sizes=(),))),
core_config=dict(
core_type=types.CoreType.LSTM,
core_kwargs=dict(hidden_size=512),
),
head_config=dict(
head_type=types.ListenerHeadType.CPC,
head_kwargs=dict(hidden_sizes=[256]),
kwargs=dict(
cpc=dict(),
mlp=dict(),
multi_mlp=dict(task=get_value('task')),
),
),
),
imitation=dict(
nbr_students=1,
imitation_step=None,
imitation_type=types.ImitationMode.BEST,
self_imitation=False,
),
reset=dict(reset_step=None, reset_type=types.ResetMode.PAIR),
evaluation=dict(
batch_size=1024,
subsampling_ratio=0.01,
max_n_agents=10,
topsim_meaning_similarity=types.MeaningSimilarity.INPUTS,
topsim_task=types.Task.CLASSIFICATION,
),
loss=dict(
speaker=dict(
loss_type=types.SpeakerLossType.REINFORCE,
use_baseline=True,
speaker_entropy=1e-4,
kwargs=dict(
policy_gradient=dict(),
reinforce=dict(speaker_kl_target=0.5)),
),
listener=dict(
loss_type=types.ListenerLossType.CPC,
reward_type=types.RewardType.SUCCESS_RATE,
kwargs=dict(
classif=dict(task=get_value('task')),
cpc=dict(num_distractors=-1, cross_device=True),
)),
),
game=dict(
name='visual_game',
kwargs=dict(
dummy=dict( # Dataset used for testing.
max_steps=get_value('training_steps')),
visual_game=dict(
dataset_name='byol_imagenet2012',
# Important: Make sure to download the data
# and update here.
dataset_path='emergent_communication_at_scale/emcom_datasets/',
coeff_noise=0.0,
num_evaluation_epochs=5,
shuffle_evaluation=True,
shuffle_training=True,
is_one_hot_label=False,
))),
checkpointing=dict(
use_checkpointing=True,
checkpoint_dir=get_value('checkpoint_dir'),
save_checkpoint_interval=300,
filename='agents.pkl'
),
),))
exp_config = config.experiment_kwargs.config
if sweep == 'debug':
config.experiment_kwargs.config.debug = True
config.training_steps = int(1)
config.interval_type = 'steps'
config.log_train_data_interval = 1
config.log_tensors_interval = 1
exp_config.checkpointing.save_checkpoint_interval = 1
exp_config.training.batch_size = 8
exp_config.evaluation.batch_size = 8
exp_config.evaluation.subsampling_ratio = 0.5
elif sweep == 'celeba':
# Game
exp_config = config.experiment_kwargs.config
exp_config.game.kwargs.visual_game.dataset_name = 'byol_celeb_a2'
# Evaluation
exp_config.evaluation.topsim_meaning_similarity = types.MeaningSimilarity.ATTRIBUTES
exp_config.evaluation.subsampling_ratio = 0.01
exp_config.evaluation.topsim_task = types.Task.ATTRIBUTE # used for topsim
elif sweep == 'imagenet':
pass
elif sweep == 'imagenet_imitation':
# Set population size
exp_config.population.n_speakers = 10
exp_config.population.n_listeners = 10
exp_config.population.num_agents_per_step = 10
# Set imitation parameters
exp_config.imitation.nbr_students = 4
exp_config.imitation.imitation_step = 10
else:
raise ValueError(f'Sweep {sweep} not recognized.')
# Prevents accidentally setting keys that aren't recognized (e.g. in tests).
config.lock()
return config
| emergent_communication_at_scale-main | configs/lewis_config.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.