python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Lint as: python3
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
r"""Run an experiment.
Run GPE/GPI on task (1, -1) with a learned phi model and w by regression.
For example, first train a phi model with 3 dimenional phi:
python3 train_phi_model.py -- --logtostderr --use_random_tasks \
--export_path=/tmp/option_keyboard/phi_model_3d --num_phis=3
Then train a keyboard:
python3 train_keyboard_with_phi.py -- --logtostderr \
--export_path=/tmp/option_keyboard/keyboard_3d \
--phi_model_path=/tmp/option_keyboard/phi_model_3d \
--num_phis=2
Finally, evaluate the keyboard with w by regression.
python3 run_regressed_w_with_phi_fig4c.py -- --logtostderr \
--phi_model_path=/tmp/option_keyboard/phi_model_3d \
--keyboard_path=/tmp/option_keyboard/keyboard_3d/tfhub
"""
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
from option_keyboard import configs
from option_keyboard import environment_wrappers
from option_keyboard import experiment
from option_keyboard import scavenger
from option_keyboard import smart_module
from option_keyboard.gpe_gpi_experiments import regressed_agent
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_episodes", 100, "Number of training episodes.")
flags.DEFINE_integer("report_every", 1,
"Frequency at which metrics are reported.")
flags.DEFINE_string("phi_model_path", None, "Path to phi model.")
flags.DEFINE_string("keyboard_path", None, "Path to keyboard model.")
flags.DEFINE_string("output_path", None, "Path to write out training curves.")
def main(argv):
del argv
# Load the keyboard.
keyboard = smart_module.SmartModuleImport(hub.Module(FLAGS.keyboard_path))
# Create the task environment.
base_env_config = configs.get_fig4_task_config()
base_env = scavenger.Scavenger(**base_env_config)
base_env = environment_wrappers.EnvironmentWithLogging(base_env)
base_env = environment_wrappers.EnvironmentWithLearnedPhi(
base_env, FLAGS.phi_model_path)
# Wrap the task environment with the keyboard.
additional_discount = 0.9
env = environment_wrappers.EnvironmentWithKeyboardDirect(
env=base_env,
keyboard=keyboard,
keyboard_ckpt_path=None,
additional_discount=additional_discount,
call_and_return=False)
# Create the player agent.
agent = regressed_agent.Agent(
batch_size=10,
optimizer_name="AdamOptimizer",
optimizer_kwargs=dict(learning_rate=3e-2,),
init_w=np.random.normal(size=keyboard.num_cumulants) * 0.1,
)
_, ema_returns = experiment.run(
env,
agent,
num_episodes=FLAGS.num_episodes,
report_every=FLAGS.report_every,
num_eval_reps=100)
if FLAGS.output_path:
experiment.write_returns_to_file(FLAGS.output_path, ema_returns)
if __name__ == "__main__":
tf.disable_v2_behavior()
app.run(main)
| deepmind-research-master | option_keyboard/gpe_gpi_experiments/run_regressed_w_with_phi_fig4c.py |
# Lint as: python3
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Train simple phi model."""
import collections
import random
from absl import app
from absl import flags
from absl import logging
import numpy as np
import sonnet as snt
import tensorflow.compat.v1 as tf
import tree
from option_keyboard import scavenger
from option_keyboard import smart_module
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_phis", 2, "Dimensionality of phis.")
flags.DEFINE_integer("num_train_steps", 2000, "Number of training steps.")
flags.DEFINE_integer("num_replay_steps", 500, "Number of replay steps.")
flags.DEFINE_integer("min_replay_size", 1000,
"Minimum replay size before starting training.")
flags.DEFINE_integer("num_train_repeats", 10, "Number of training repeats.")
flags.DEFINE_float("learning_rate", 3e-3, "Learning rate.")
flags.DEFINE_bool("use_random_tasks", False, "Use random tasks.")
flags.DEFINE_string("normalisation", "L2",
"Normalisation method for cumulant weights.")
flags.DEFINE_string("export_path", None, "Export path.")
StepOutput = collections.namedtuple("StepOutput",
["obs", "actions", "rewards", "next_obs"])
def collect_experience(env, num_episodes, verbose=False):
"""Collect experience."""
num_actions = env.action_spec().maximum + 1
observations = []
actions = []
rewards = []
next_observations = []
for _ in range(num_episodes):
timestep = env.reset()
episode_return = 0
while not timestep.last():
action = np.random.randint(num_actions)
observations.append(timestep.observation)
actions.append(action)
timestep = env.step(action)
rewards.append(timestep.observation["aux_tasks_reward"])
episode_return += timestep.reward
next_observations.append(timestep.observation)
if verbose:
logging.info("Total return for episode: %f", episode_return)
observation_spec = tree.map_structure(lambda _: None, observations[0])
def stack_observations(obs_list):
obs_list = [
np.stack(obs) for obs in zip(*[tree.flatten(obs) for obs in obs_list])
]
obs_dict = tree.unflatten_as(observation_spec, obs_list)
obs_dict.pop("aux_tasks_reward")
return obs_dict
observations = stack_observations(observations)
actions = np.array(actions, dtype=np.int32)
rewards = np.stack(rewards)
next_observations = stack_observations(next_observations)
return StepOutput(observations, actions, rewards, next_observations)
class PhiModel(snt.AbstractModule):
"""A model for learning phi."""
def __init__(self,
n_actions,
n_phis,
network_kwargs,
final_activation="sigmoid",
name="PhiModel"):
super(PhiModel, self).__init__(name=name)
self._n_actions = n_actions
self._n_phis = n_phis
self._network_kwargs = network_kwargs
self._final_activation = final_activation
def _build(self, observation, actions):
obs = observation["arena"]
n_outputs = self._n_actions * self._n_phis
flat_obs = snt.BatchFlatten()(obs)
net = snt.nets.MLP(**self._network_kwargs)(flat_obs)
net = snt.Linear(output_size=n_outputs)(net)
net = snt.BatchReshape((self._n_actions, self._n_phis))(net)
indices = tf.stack([tf.range(tf.shape(actions)[0]), actions], axis=1)
values = tf.gather_nd(net, indices)
if self._final_activation:
values = getattr(tf.nn, self._final_activation)(values)
return values
def create_ph(tensor):
return tf.placeholder(shape=(None,) + tensor.shape[1:], dtype=tensor.dtype)
def main(argv):
del argv
if FLAGS.use_random_tasks:
tasks = np.random.normal(size=(8, 2))
else:
tasks = [
[1.0, 0.0],
[0.0, 1.0],
[1.0, 1.0],
[-1.0, 1.0],
]
if FLAGS.normalisation == "L1":
tasks /= np.sum(np.abs(tasks), axis=-1, keepdims=True)
elif FLAGS.normalisation == "L2":
tasks /= np.linalg.norm(tasks, axis=-1, keepdims=True)
else:
raise ValueError("Unknown normlisation_method {}".format(
FLAGS.normalisation))
logging.info("Tasks: %s", tasks)
env_config = dict(
arena_size=11,
num_channels=2,
max_num_steps=100,
num_init_objects=10,
object_priors=[1.0, 1.0],
egocentric=True,
default_w=None,
aux_tasks_w=tasks)
env = scavenger.Scavenger(**env_config)
num_actions = env.action_spec().maximum + 1
model_config = dict(
n_actions=num_actions,
n_phis=FLAGS.num_phis,
network_kwargs=dict(
output_sizes=(64, 128),
activate_final=True,
),
)
model = smart_module.SmartModuleExport(lambda: PhiModel(**model_config))
dummy_steps = collect_experience(env, num_episodes=10, verbose=True)
num_rewards = dummy_steps.rewards.shape[-1]
# Placeholders
steps_ph = tree.map_structure(create_ph, dummy_steps)
phis = model(steps_ph.obs, steps_ph.actions)
phis_to_rewards = snt.Linear(
num_rewards, initializers=dict(w=tf.zeros), use_bias=False)
preds = phis_to_rewards(phis)
loss_per_batch = tf.square(preds - steps_ph.rewards)
loss_op = tf.reduce_mean(loss_per_batch)
replay = []
# Optimizer and train op.
with tf.variable_scope("optimizer"):
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
train_op = optimizer.minimize(loss_op)
# Add normalisation of weights in phis_to_rewards
if FLAGS.normalisation == "L1":
w_norm = tf.reduce_sum(tf.abs(phis_to_rewards.w), axis=0, keepdims=True)
elif FLAGS.normalisation == "L2":
w_norm = tf.norm(phis_to_rewards.w, axis=0, keepdims=True)
else:
raise ValueError("Unknown normlisation_method {}".format(
FLAGS.normalisation))
normalise_w = tf.assign(phis_to_rewards.w,
phis_to_rewards.w / tf.maximum(w_norm, 1e-6))
def filter_steps(steps):
mask = np.sum(np.abs(steps.rewards), axis=-1) > 0.1
nonzero_inds = np.where(mask)[0]
zero_inds = np.where(np.logical_not(mask))[0]
zero_inds = np.random.choice(
zero_inds, size=len(nonzero_inds), replace=False)
selected_inds = np.concatenate([nonzero_inds, zero_inds])
selected_steps = tree.map_structure(lambda x: x[selected_inds], steps)
return selected_steps, selected_inds
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
step = 0
while step < FLAGS.num_train_steps:
step += 1
steps_output = collect_experience(env, num_episodes=10)
selected_step_outputs, selected_inds = filter_steps(steps_output)
if len(replay) > FLAGS.min_replay_size:
# Do training.
for _ in range(FLAGS.num_train_repeats):
train_samples = random.choices(replay, k=128)
train_samples = tree.map_structure(
lambda *x: np.stack(x, axis=0), *train_samples)
train_samples = tree.unflatten_as(steps_ph, train_samples)
feed_dict = dict(
zip(tree.flatten(steps_ph), tree.flatten(train_samples)))
_, train_loss = sess.run([train_op, loss_op], feed_dict=feed_dict)
sess.run(normalise_w)
# Do evaluation.
if step % 50 == 0:
feed_dict = dict(
zip(tree.flatten(steps_ph), tree.flatten(selected_step_outputs)))
eval_loss = sess.run(loss_op, feed_dict=feed_dict)
logging.info("Step %d, train loss %f, eval loss %f, replay %s",
step, train_loss, eval_loss, len(replay))
print(sess.run(phis_to_rewards.get_variables())[0].T)
values = dict(step=step, train_loss=train_loss, eval_loss=eval_loss)
logging.info(values)
# Add to replay.
if step <= FLAGS.num_replay_steps:
def select_fn(ind):
return lambda x: x[ind]
for idx in range(len(selected_inds)):
replay.append(
tree.flatten(
tree.map_structure(select_fn(idx), selected_step_outputs)))
# Export trained model.
if FLAGS.export_path:
model.export(FLAGS.export_path, sess, overwrite=True)
if __name__ == "__main__":
tf.disable_v2_behavior()
app.run(main)
| deepmind-research-master | option_keyboard/gpe_gpi_experiments/train_phi_model.py |
# Lint as: python3
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Run an experiment.
Run a q-learning agent on task (1, -1).
"""
from absl import app
from absl import flags
import tensorflow.compat.v1 as tf
from option_keyboard import configs
from option_keyboard import dqn_agent
from option_keyboard import environment_wrappers
from option_keyboard import experiment
from option_keyboard import scavenger
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_episodes", 10000, "Number of training episodes.")
flags.DEFINE_integer("report_every", 5,
"Frequency at which metrics are reported.")
flags.DEFINE_string("output_path", None, "Path to write out training curves.")
def main(argv):
del argv
# Create the task environment.
env_config = configs.get_fig4_task_config()
env = scavenger.Scavenger(**env_config)
env = environment_wrappers.EnvironmentWithLogging(env)
# Create the flat agent.
agent = dqn_agent.Agent(
obs_spec=env.observation_spec(),
action_spec=env.action_spec(),
network_kwargs=dict(
output_sizes=(64, 128),
activate_final=True,
),
epsilon=0.1,
additional_discount=0.9,
batch_size=10,
optimizer_name="AdamOptimizer",
optimizer_kwargs=dict(learning_rate=3e-4,))
_, ema_returns = experiment.run(
env,
agent,
num_episodes=FLAGS.num_episodes,
report_every=FLAGS.report_every)
if FLAGS.output_path:
experiment.write_returns_to_file(FLAGS.output_path, ema_returns)
if __name__ == "__main__":
tf.disable_v2_behavior()
app.run(main)
| deepmind-research-master | option_keyboard/gpe_gpi_experiments/run_dqn_fig4b.py |
# Lint as: python3
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Train a keyboard."""
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
from option_keyboard import keyboard_utils
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_pretrain_episodes", 20000,
"Number of pretraining episodes.")
flags.DEFINE_integer("num_phis", None, "Size of phi")
flags.DEFINE_string("phi_model_path", None,
"Where to load the phi model checkpoints.")
flags.DEFINE_string("export_path", None,
"Where to save the keyboard checkpoints.")
def main(argv):
del argv
keyboard_utils.create_and_train_keyboard_with_phi(
num_episodes=FLAGS.num_pretrain_episodes,
phi_model_path=FLAGS.phi_model_path,
policy_weights=np.eye(FLAGS.num_phis, dtype=np.float32),
export_path=FLAGS.export_path)
if __name__ == "__main__":
tf.disable_v2_behavior()
app.run(main)
| deepmind-research-master | option_keyboard/gpe_gpi_experiments/train_keyboard_with_phi.py |
# Lint as: python3
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
r"""Run an experiment.
Run GPE/GPI on task (1, -1) with w obtained by regression.
For example, first train a keyboard:
python3 train_keyboard.py -- --logtostderr --policy_weights_name=12 \
--export_path=/tmp/option_keyboard/keyboard
Then, evaluate the keyboard with w by regression.
python3 run_regressed_w_fig4c.py -- --logtostderr \
--keyboard_path=/tmp/option_keyboard/keyboard_12/tfhub
"""
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
from option_keyboard import configs
from option_keyboard import environment_wrappers
from option_keyboard import experiment
from option_keyboard import scavenger
from option_keyboard import smart_module
from option_keyboard.gpe_gpi_experiments import regressed_agent
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_episodes", 100, "Number of training episodes.")
flags.DEFINE_integer("report_every", 1,
"Frequency at which metrics are reported.")
flags.DEFINE_string("keyboard_path", None, "Path to keyboard model.")
flags.DEFINE_string("output_path", None, "Path to write out training curves.")
def main(argv):
del argv
# Load the keyboard.
keyboard = smart_module.SmartModuleImport(hub.Module(FLAGS.keyboard_path))
# Create the task environment.
base_env_config = configs.get_fig4_task_config()
base_env = scavenger.Scavenger(**base_env_config)
base_env = environment_wrappers.EnvironmentWithLogging(base_env)
# Wrap the task environment with the keyboard.
additional_discount = 0.9
env = environment_wrappers.EnvironmentWithKeyboardDirect(
env=base_env,
keyboard=keyboard,
keyboard_ckpt_path=None,
additional_discount=additional_discount,
call_and_return=False)
# Create the player agent.
agent = regressed_agent.Agent(
batch_size=10,
optimizer_name="AdamOptimizer",
optimizer_kwargs=dict(learning_rate=3e-2,),
init_w=np.random.normal(size=keyboard.num_cumulants) * 0.1,
)
_, ema_returns = experiment.run(
env,
agent,
num_episodes=FLAGS.num_episodes,
report_every=FLAGS.report_every,
num_eval_reps=100)
if FLAGS.output_path:
experiment.write_returns_to_file(FLAGS.output_path, ema_returns)
if __name__ == "__main__":
tf.disable_v2_behavior()
app.run(main)
| deepmind-research-master | option_keyboard/gpe_gpi_experiments/run_regressed_w_fig4c.py |
# Lint as: python3
# pylint: disable=g-bad-file-header
# pylint: disable=line-too-long
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
r"""Run an experiment.
This script generates the raw data for the polar plots used to visualise how
well a trained keyboard covers the space of w.
For example, train 3 separate keyboards with different base policies:
python3 train_keyboard.py --logtostderr --policy_weights_name=12
python3 train_keyboard.py --logtostderr --policy_weights_name=34
python3 train_keyboard.py --logtostderr --policy_weights_name=5
Then generate the polar plot data as follows:
python3 eval_keyboard_fig5.py --logtostderr \
--keyboard_paths=/tmp/option_keyboard/keyboard_12/tfhub,/tmp/option_keyboard/keyboard_34/tfhub,/tmp/option_keyboard/keyboard_5/tfhub \
--num_episodes=1000
Example outout:
[[ 0.11 0.261 -0.933 ]
[ 1.302 3.955 0.54 ]
[ 2.398 4.434 1.2105359 ]
[ 3.459 4.606 2.087 ]
[ 4.09026795 4.60911325 3.06106882]
[ 4.55499485 4.71947818 3.8123229 ]
[ 4.715 4.835 4.395 ]
[ 4.75743564 4.64095528 4.46330207]
[ 4.82518207 4.71232378 4.56190708]
[ 4.831 4.7155 4.5735 ]
[ 4.78074425 4.6754641 4.58312762]
[ 4.70154374 4.5416429 4.47850417]
[ 4.694 4.631 4.427 ]
[ 4.25085125 4.56606664 3.68157677]
[ 3.61726795 4.4838453 2.68154403]
[ 2.714 4.43 1.554 ]
[ 1.69 4.505 0.9635359 ]
[ 0.894 4.043 0.424 ]
[ 0.099 0.349 0.055 ]]
"""
import csv
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1.io import gfile
import tensorflow_hub as hub
from option_keyboard import configs
from option_keyboard import environment_wrappers
from option_keyboard import experiment
from option_keyboard import scavenger
from option_keyboard import smart_module
from option_keyboard.gpe_gpi_experiments import regressed_agent
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_episodes", 1000, "Number of training episodes.")
flags.DEFINE_list("keyboard_paths", [], "Path to keyboard model.")
flags.DEFINE_string("output_path", None, "Path to write out returns.")
def evaluate_keyboard(keyboard_path, weights_to_sweep):
"""Evaluate a keyboard."""
# Load the keyboard.
keyboard = smart_module.SmartModuleImport(hub.Module(keyboard_path))
# Create the task environment.
all_returns = []
for w_to_sweep in weights_to_sweep.tolist():
base_env_config = configs.get_fig5_task_config(w_to_sweep)
base_env = scavenger.Scavenger(**base_env_config)
base_env = environment_wrappers.EnvironmentWithLogging(base_env)
# Wrap the task environment with the keyboard.
with tf.variable_scope(None, default_name="inner_loop"):
additional_discount = 0.9
env = environment_wrappers.EnvironmentWithKeyboardDirect(
env=base_env,
keyboard=keyboard,
keyboard_ckpt_path=None,
additional_discount=additional_discount,
call_and_return=False)
# Create the player agent.
agent = regressed_agent.Agent(
batch_size=10,
optimizer_name="AdamOptimizer",
# Disable training.
optimizer_kwargs=dict(learning_rate=0.0,),
init_w=w_to_sweep)
returns = []
for _ in range(FLAGS.num_episodes):
returns.append(experiment.run_episode(env, agent))
tf.logging.info(f"Task: {w_to_sweep}, mean returns over "
f"{FLAGS.num_episodes} episodes is {np.mean(returns)}")
all_returns.append(returns)
return all_returns
def main(argv):
del argv
angles_to_sweep = np.deg2rad(np.linspace(-90, 180, num=19, endpoint=True))
weights_to_sweep = np.stack(
[np.sin(angles_to_sweep),
np.cos(angles_to_sweep)], axis=-1)
weights_to_sweep /= np.sum(
np.maximum(weights_to_sweep, 0.0), axis=-1, keepdims=True)
weights_to_sweep = np.clip(weights_to_sweep, -1000, 1000)
tf.logging.info(weights_to_sweep)
all_returns = []
for keyboard_path in FLAGS.keyboard_paths:
returns = evaluate_keyboard(keyboard_path, weights_to_sweep)
all_returns.append(returns)
print("Results:")
print(np.mean(all_returns, axis=-1).T)
if FLAGS.output_path:
with gfile.GFile(FLAGS.output_path, "w") as file:
writer = csv.writer(file, delimiter=" ", quoting=csv.QUOTE_MINIMAL)
writer.writerow(["angle", "return", "idx"])
for idx, returns in enumerate(all_returns):
for row in np.array(returns).T.tolist():
assert len(angles_to_sweep) == len(row)
for ang, val in zip(angles_to_sweep, row):
ang = "{:.4g}".format(ang)
val = "{:.4g}".format(val)
writer.writerow([ang, val, idx])
if __name__ == "__main__":
tf.disable_v2_behavior()
app.run(main)
| deepmind-research-master | option_keyboard/gpe_gpi_experiments/eval_keyboard_fig5.py |
# Lint as: python3
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Regressed agent."""
import numpy as np
import tensorflow.compat.v1 as tf
class Agent():
"""A DQN Agent."""
def __init__(
self,
batch_size,
optimizer_name,
optimizer_kwargs,
init_w,
):
"""A simple DQN agent.
Args:
batch_size: Size of update batch.
optimizer_name: Name of an optimizer from tf.train
optimizer_kwargs: Keyword arguments for the optimizer.
init_w: The initial cumulant weight.
"""
self._batch_size = batch_size
self._init_w = np.array(init_w)
self._replay = []
# Regress w by gradient descent, could also use closed-form solution.
self._n_cumulants = len(init_w)
self._regressed_w = tf.get_variable(
"regressed_w",
dtype=tf.float32,
initializer=lambda: tf.to_float(init_w))
cumulants_ph = tf.placeholder(
shape=(None, self._n_cumulants), dtype=tf.float32)
rewards_ph = tf.placeholder(shape=(None,), dtype=tf.float32)
predicted_rewards = tf.reduce_sum(
tf.multiply(self._regressed_w, cumulants_ph), axis=-1)
loss = tf.reduce_sum(tf.square(predicted_rewards - rewards_ph))
with tf.variable_scope("optimizer"):
self._optimizer = getattr(tf.train, optimizer_name)(**optimizer_kwargs)
train_op = self._optimizer.minimize(loss)
# Make session and callables.
session = tf.Session()
self._update_fn = session.make_callable(train_op,
[cumulants_ph, rewards_ph])
self._action = session.make_callable(self._regressed_w.read_value(), [])
session.run(tf.global_variables_initializer())
def step(self, timestep, is_training=False):
"""Select actions according to epsilon-greedy policy."""
del timestep
if is_training:
# Can also just use random actions at environment level.
return np.random.uniform(low=-1.0, high=1.0, size=(self._n_cumulants,))
return self._action()
def update(self, step_tm1, action, step_t):
"""Takes in a transition from the environment."""
del step_tm1, action
transition = [
step_t.observation["cumulants"],
step_t.reward,
]
self._replay.append(transition)
if len(self._replay) == self._batch_size:
batch = list(zip(*self._replay))
self._update_fn(*batch)
self._replay = [] # Just a queue.
def get_logs(self):
return dict(regressed=self._action())
| deepmind-research-master | option_keyboard/gpe_gpi_experiments/regressed_agent.py |
# Lint as: python3
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
r"""Run an experiment.
Run GPE/GPI on task (1, -1) with the groundtruth w.
For example, first train a keyboard:
python3 train_keyboard.py -- --logtostderr --policy_weights_name=12
Then, evaluate the keyboard with groundtruth w.
python3 run_true_w_fig4.py -- --logtostderr \
--keyboard_path=/tmp/option_keyboard/keyboard_12/tfhub
"""
import csv
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1.io import gfile
import tensorflow_hub as hub
from option_keyboard import configs
from option_keyboard import environment_wrappers
from option_keyboard import experiment
from option_keyboard import scavenger
from option_keyboard import smart_module
from option_keyboard.gpe_gpi_experiments import regressed_agent
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_episodes", 1000, "Number of training episodes.")
flags.DEFINE_string("keyboard_path", None, "Path to keyboard model.")
flags.DEFINE_string("output_path", None, "Path to write out returns.")
def main(argv):
del argv
# Load the keyboard.
keyboard = smart_module.SmartModuleImport(hub.Module(FLAGS.keyboard_path))
# Create the task environment.
base_env_config = configs.get_fig4_task_config()
base_env = scavenger.Scavenger(**base_env_config)
base_env = environment_wrappers.EnvironmentWithLogging(base_env)
# Wrap the task environment with the keyboard.
additional_discount = 0.9
env = environment_wrappers.EnvironmentWithKeyboardDirect(
env=base_env,
keyboard=keyboard,
keyboard_ckpt_path=None,
additional_discount=additional_discount,
call_and_return=False)
# Create the player agent.
agent = regressed_agent.Agent(
batch_size=10,
optimizer_name="AdamOptimizer",
# Disable training.
optimizer_kwargs=dict(learning_rate=0.0,),
init_w=[1., -1.])
returns = []
for _ in range(FLAGS.num_episodes):
returns.append(experiment.run_episode(env, agent))
tf.logging.info("#" * 80)
tf.logging.info(
f"Avg. return over {FLAGS.num_episodes} episodes is {np.mean(returns)}")
tf.logging.info("#" * 80)
if FLAGS.output_path:
with gfile.GFile(FLAGS.output_path, "w") as file:
writer = csv.writer(file, delimiter=" ", quoting=csv.QUOTE_MINIMAL)
writer.writerow(["return"])
for val in returns:
writer.writerow([val])
if __name__ == "__main__":
tf.disable_v2_behavior()
app.run(main)
| deepmind-research-master | option_keyboard/gpe_gpi_experiments/run_true_w_fig4.py |
# Lint as: python3
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
r"""Run an experiment.
Run GPE/GPI on task (1, -1) with w obtained by regression.
For example, first train a keyboard:
python3 train_keyboard.py -- --logtostderr --policy_weights_name=12 \
--export_path=/tmp/option_keyboard/keyboard
Then, evaluate the keyboard with w by regression.
python3 run_regressed_w_fig4b.py -- --logtostderr \
--keyboard_path=/tmp/option_keyboard/keyboard_12/tfhub
"""
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
from option_keyboard import configs
from option_keyboard import environment_wrappers
from option_keyboard import experiment
from option_keyboard import scavenger
from option_keyboard import smart_module
from option_keyboard.gpe_gpi_experiments import regressed_agent
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_episodes", 4000, "Number of training episodes.")
flags.DEFINE_integer("report_every", 5,
"Frequency at which metrics are reported.")
flags.DEFINE_string("keyboard_path", None, "Path to keyboard model.")
flags.DEFINE_string("output_path", None, "Path to write out training curves.")
def main(argv):
del argv
# Load the keyboard.
keyboard = smart_module.SmartModuleImport(hub.Module(FLAGS.keyboard_path))
# Create the task environment.
base_env_config = configs.get_fig4_task_config()
base_env = scavenger.Scavenger(**base_env_config)
base_env = environment_wrappers.EnvironmentWithLogging(base_env)
# Wrap the task environment with the keyboard.
additional_discount = 0.9
env = environment_wrappers.EnvironmentWithKeyboardDirect(
env=base_env,
keyboard=keyboard,
keyboard_ckpt_path=None,
additional_discount=additional_discount,
call_and_return=False)
# Create the player agent.
agent = regressed_agent.Agent(
batch_size=10,
optimizer_name="AdamOptimizer",
optimizer_kwargs=dict(learning_rate=3e-2,),
init_w=np.random.normal(size=keyboard.num_cumulants) * 0.1,
)
_, ema_returns = experiment.run(
env,
agent,
num_episodes=FLAGS.num_episodes,
report_every=FLAGS.report_every,
num_eval_reps=20)
if FLAGS.output_path:
experiment.write_returns_to_file(FLAGS.output_path, ema_returns)
if __name__ == "__main__":
tf.disable_v2_behavior()
app.run(main)
| deepmind-research-master | option_keyboard/gpe_gpi_experiments/run_regressed_w_fig4b.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loss functions to be used by LayerCollection."""
import abc
from typing import Tuple, Optional, Union, Sequence
import jax
import jax.numpy as jnp
from kfac_ferminet_alpha import distributions
from kfac_ferminet_alpha import layers_and_loss_tags as tags
from kfac_ferminet_alpha import utils
ArrayPair = Tuple[jnp.ndarray, jnp.ndarray]
FloatArray = Union[float, jnp.ndarray]
Index = Tuple[int]
class LossFunction(abc.ABC):
"""Abstract base class for loss functions.
Note that unlike typical loss functions used in neural networks these are
neither summed nor averaged over the batch and hence the output of evaluate()
will not be a scalar. It is up to the user to then to correctly manipulate
them as needed.
"""
def __init__(self, weight: FloatArray):
self._weight = weight
@property
def weight(self) -> FloatArray:
return self._weight
@property
@abc.abstractmethod
def targets(self) -> Optional[jnp.ndarray]:
"""The targets being predicted by the model.
Returns:
None or Tensor of appropriate shape for calling self._evaluate() on.
"""
pass
@property
@abc.abstractmethod
def inputs(self) -> Sequence[jnp.ndarray]:
"""The inputs to the loss function (excluding the targets)."""
pass
@abc.abstractmethod
def copy_with_different_inputs(self, inputs: Sequence[jnp.ndarray]):
pass
def evaluate(
self,
targets: Optional[jnp.ndarray] = None,
coefficient_mode: str = "regular",
) -> jnp.ndarray:
"""Evaluate the loss function on the targets."""
if targets is None and self.targets is None:
raise ValueError("Cannot evaluate losses with unspecified targets.")
elif targets is None:
targets = self.targets
if coefficient_mode == "regular":
multiplier = self.weight
elif coefficient_mode == "sqrt":
multiplier = jnp.sqrt(self.weight)
elif coefficient_mode == "off":
multiplier = 1.0
else:
raise ValueError(f"Unrecognized coefficient_mode={coefficient_mode}.")
return self._evaluate(targets) * multiplier
@abc.abstractmethod
def _evaluate(self, targets: jnp.ndarray) -> jnp.ndarray:
"""Evaluates the negative log probability of the targets.
Args:
targets: Tensor that distribution can calculate log_prob() of.
Returns:
negative log probability of each target, summed across all targets.
"""
pass
def grad_of_evaluate(
self,
targets: Optional[jnp.ndarray],
coefficient_mode: str,
) -> Sequence[jnp.ndarray]:
"""Evaluates the gradient of the loss function.
Note that the targets of the loss must not be `None`.
Args:
targets: The potential targets on which to evaluate the gradient.
coefficient_mode: The coefficient mode to use for evaluation.
Returns:
The gradient of the loss evaluation function with respect to the inputs.
"""
def evaluate_sum(inputs: Sequence[jnp.ndarray]) -> jnp.ndarray:
instance = self.copy_with_different_inputs(inputs)
return jnp.sum(instance.evaluate(targets, coefficient_mode))
return jax.grad(evaluate_sum)(self.inputs)
def multiply_ggn(self, vector: jnp.ndarray) -> jnp.ndarray:
"""Right-multiply a vector by the GGN.
Here the 'GGN' is the GGN matrix (whose definition is slightly flexible)
of the loss function with respect to its inputs.
Args:
vector: The vector to multiply. Must be the same shape(s) as the 'inputs'
property.
Returns:
The vector right-multiplied by the GGN. Will be of the same shape(s)
as the 'inputs' property.
"""
return utils.scalar_mul(self.multiply_ggn_unweighted(vector), self.weight)
@abc.abstractmethod
def multiply_ggn_unweighted(self, vector: jnp.ndarray) -> jnp.ndarray:
"""Same as `multiply_ggn`, but without taking into account the weight."""
pass
def multiply_ggn_factor(self, vector: jnp.ndarray) -> jnp.ndarray:
"""Right-multiply a vector by a factor B of the GGN.
Here the 'GGN' is the GGN matrix (whose definition is slightly flexible)
of the loss function with respect to its inputs. Typically this will be
block-diagonal across different cases in the batch, since the loss function
is typically summed across cases.
Note that B can be any matrix satisfying B * B^T = G where G is the GGN,
but will agree with the one used in the other methods of this class.
Args:
vector: The vector to multiply. Must be of the shape given by the
'ggn_factor_inner_shape' property.
Returns:
The vector right-multiplied by B. Will be of the same shape(s) as the
'inputs' property.
"""
return utils.scalar_mul(
self.multiply_ggn_factor_unweighted(vector), jnp.sqrt(self.weight))
@abc.abstractmethod
def multiply_ggn_factor_unweighted(self, vector: jnp.ndarray) -> jnp.ndarray:
"""Same as `multiply_ggn_factor`, but without taking into account the weight."""
pass
def multiply_ggn_factor_transpose(self, vector: jnp.ndarray) -> jnp.ndarray:
"""Right-multiply a vector by the transpose of a factor B of the GGN.
Here the 'GGN' is the GGN matrix (whose definition is slightly flexible)
of the loss function with respect to its inputs. Typically this will be
block-diagonal across different cases in the batch, since the loss function
is typically summed across cases.
Note that B can be any matrix satisfying B * B^T = G where G is the GGN,
but will agree with the one used in the other methods of this class.
Args:
vector: The vector to multiply. Must be the same shape(s) as the 'inputs'
property.
Returns:
The vector right-multiplied by B^T. Will be of the shape given by the
'ggn_factor_inner_shape' property.
"""
return utils.scalar_mul(
self.multiply_ggn_factor_transpose_unweighted(vector),
jnp.sqrt(self.weight))
@abc.abstractmethod
def multiply_ggn_factor_transpose_unweighted(
self,
vector: jnp.ndarray
) -> jnp.ndarray:
"""Same as `multiply_ggn_factor_transpose`, but without taking into account the weight."""
pass
def multiply_ggn_factor_replicated_one_hot(self, index: Index) -> jnp.ndarray:
"""Right-multiply a replicated-one-hot vector by a factor B of the GGN.
Here the 'GGN' is the GGN matrix (whose definition is slightly flexible)
of the loss function with respect to its inputs. Typically this will be
block-diagonal across different cases in the batch, since the loss function
is typically summed across cases.
A 'replicated-one-hot' vector means a tensor which, for each slice along the
batch dimension (assumed to be dimension 0), is 1.0 in the entry
corresponding to the given index and 0 elsewhere.
Note that B can be any matrix satisfying B * B^T = G where G is the GGN,
but will agree with the one used in the other methods of this class.
Args:
index: A tuple representing in the index of the entry in each slice that
is 1.0. Note that len(index) must be equal to the number of elements of
the 'ggn_factor_inner_shape' tensor minus one.
Returns:
The vector right-multiplied by B^T. Will be of the same shape(s) as the
'inputs' property.
"""
return utils.scalar_mul(
self.multiply_ggn_factor_replicated_one_hot_unweighted(index),
jnp.sqrt(self.weight))
@abc.abstractmethod
def multiply_ggn_factor_replicated_one_hot_unweighted(
self,
index: Index
) -> jnp.ndarray:
pass
@property
@abc.abstractmethod
def ggn_factor_inner_shape(self) -> Sequence[int]:
"""The shape of the tensor returned by multiply_ggn_factor."""
pass
class NegativeLogProbLoss(LossFunction):
"""Abstract base class for loss functions that are negative log probs."""
@property
def inputs(self):
return self.params
@property
@abc.abstractmethod
def params(self):
"""Parameters to the underlying distribution."""
pass
def multiply_fisher(self, vector: jnp.ndarray) -> jnp.ndarray:
"""Right-multiply a vector by the Fisher.
Args:
vector: The vector to multiply. Must be the same shape(s) as the 'inputs'
property.
Returns:
The vector right-multiplied by the Fisher. Will be of the same shape(s)
as the 'inputs' property.
"""
return utils.scalar_mul(
self.multiply_fisher_unweighted(vector), self.weight)
@abc.abstractmethod
def multiply_fisher_unweighted(self, vector: jnp.ndarray) -> jnp.ndarray:
pass
def multiply_fisher_factor(self, vector: jnp.ndarray) -> jnp.ndarray:
"""Right-multiply a vector by a factor B of the Fisher.
Here the 'Fisher' is the Fisher information matrix (i.e. expected outer-
product of gradients) with respect to the parameters of the underlying
probability distribution (whose log-prob defines the loss). Typically this
will be block-diagonal across different cases in the batch, since the
distribution is usually (but not always) conditionally iid across different
cases.
Note that B can be any matrix satisfying B * B^T = F where F is the Fisher,
but will agree with the one used in the other methods of this class.
Args:
vector: The vector to multiply. Must be of the shape given by the
'fisher_factor_inner_shape' property.
Returns:
The vector right-multiplied by B. Will be of the same shape(s) as the
'inputs' property.
"""
return utils.scalar_mul(
self.multiply_fisher_factor_unweighted(vector), jnp.sqrt(self.weight))
@abc.abstractmethod
def multiply_fisher_factor_unweighted(
self,
vector: jnp.ndarray
) -> jnp.ndarray:
pass
def multiply_fisher_factor_transpose(
self,
vector: jnp.ndarray
) -> jnp.ndarray:
"""Right-multiply a vector by the transpose of a factor B of the Fisher.
Here the 'Fisher' is the Fisher information matrix (i.e. expected outer-
product of gradients) with respect to the parameters of the underlying
probability distribution (whose log-prob defines the loss). Typically this
will be block-diagonal across different cases in the batch, since the
distribution is usually (but not always) conditionally iid across different
cases.
Note that B can be any matrix satisfying B * B^T = F where F is the Fisher,
but will agree with the one used in the other methods of this class.
Args:
vector: The vector to multiply. Must be the same shape(s) as the 'inputs'
property.
Returns:
The vector right-multiplied by B^T. Will be of the shape given by the
'fisher_factor_inner_shape' property.
"""
return utils.scalar_mul(
self.multiply_fisher_factor_transpose_unweighted(vector),
jnp.sqrt(self.weight))
@abc.abstractmethod
def multiply_fisher_factor_transpose_unweighted(
self,
vector: jnp.ndarray
) -> jnp.ndarray:
pass
def multiply_fisher_factor_replicated_one_hot(
self,
index: Index
) -> jnp.ndarray:
"""Right-multiply a replicated-one-hot vector by a factor B of the Fisher.
Here the 'Fisher' is the Fisher information matrix (i.e. expected outer-
product of gradients) with respect to the parameters of the underlying
probability distribution (whose log-prob defines the loss). Typically this
will be block-diagonal across different cases in the batch, since the
distribution is usually (but not always) conditionally iid across different
cases.
A 'replicated-one-hot' vector means a tensor which, for each slice along the
batch dimension (assumed to be dimension 0), is 1.0 in the entry
corresponding to the given index and 0 elsewhere.
Note that B can be any matrix satisfying B * B^T = H where H is the Fisher,
but will agree with the one used in the other methods of this class.
Args:
index: A tuple representing in the index of the entry in each slice that
is 1.0. Note that len(index) must be equal to the number of elements of
the 'fisher_factor_inner_shape' tensor minus one.
Returns:
The vector right-multiplied by B. Will be of the same shape(s) as the
'inputs' property.
"""
return utils.scalar_mul(
self.multiply_fisher_factor_replicated_one_hot_unweighted(index),
jnp.sqrt(self.weight))
@abc.abstractmethod
def multiply_fisher_factor_replicated_one_hot_unweighted(
self,
index: Index
) -> jnp.ndarray:
pass
@property
@abc.abstractmethod
def fisher_factor_inner_shape(self) -> Sequence[int]:
"""The shape of the tensor returned by multiply_fisher_factor."""
pass
@abc.abstractmethod
def sample(self, rng_key: jnp.ndarray) -> jnp.ndarray:
"""Sample 'targets' from the underlying distribution."""
pass
def grad_of_evaluate_on_sample(
self,
rng_key: jnp.ndarray,
coefficient_mode: str,
) -> Sequence[jnp.ndarray]:
"""Evaluates the gradient of the log probability on a random sample.
Args:
rng_key: Jax PRNG key for sampling.
coefficient_mode: The coefficient mode to use for evaluation.
Returns:
The gradient of the log probability of targets sampled from the
distribution.
"""
return self.grad_of_evaluate(self.sample(rng_key), coefficient_mode)
class NaturalParamsNegativeLogProbLoss(NegativeLogProbLoss, abc.ABC):
"""Base class for neg log prob losses whose inputs are 'natural' parameters.
We will take the GGN of the loss to be the Fisher associated with the
distribution, which also happens to be equal to the Hessian for this class
of loss functions. See here: https://arxiv.org/abs/1412.1193
'Natural parameters' are defined for exponential-family models. See for
example: https://en.wikipedia.org/wiki/Exponential_family
"""
def multiply_ggn_unweighted(self, vector: jnp.ndarray) -> jnp.ndarray:
return self.multiply_fisher_unweighted(vector)
def multiply_ggn_factor_unweighted(self, vector: jnp.ndarray) -> jnp.ndarray:
return self.multiply_fisher_factor_unweighted(vector)
def multiply_ggn_factor_transpose_unweighted(
self,
vector: jnp.ndarray
) -> jnp.ndarray:
return self.multiply_fisher_factor_transpose_unweighted(vector)
def multiply_ggn_factor_replicated_one_hot_unweighted(
self,
index: Index
) -> jnp.ndarray:
return self.multiply_fisher_factor_replicated_one_hot_unweighted(index)
@property
def ggn_factor_inner_shape(self) -> Sequence[int]:
return self.fisher_factor_inner_shape
class DistributionNegativeLogProbLoss(NegativeLogProbLoss):
"""Base class for neg log prob losses that use the distribution classes."""
@property
@abc.abstractmethod
def dist(self):
"""The underlying distribution instance."""
pass
def _evaluate(self, targets: jnp.ndarray):
return -self.dist.log_prob(targets)
def sample(self, rng_key: jnp.ndarray):
return self.dist.sample(seed=rng_key)
@property
def fisher_factor_inner_shape(self) -> Sequence[int]:
return self.dist.mean().shape
class NormalMeanNegativeLogProbLoss(DistributionNegativeLogProbLoss,
NaturalParamsNegativeLogProbLoss):
"""Neg log prob loss for a normal distribution parameterized by a mean vector.
Note that the covariance is treated as the identity divided by 2.
Also note that the Fisher for such a normal distribution with respect the mean
parameter is given by:
F = (1 / variance) * I
See for example https://www.ii.pwr.edu.pl/~tomczak/PDF/[JMT]Fisher_inf.pdf.
"""
def __init__(
self,
mean: jnp.ndarray,
targets: Optional[jnp.ndarray] = None,
variance: float = 0.5,
weight: float = 1.0,
):
super().__init__(weight=weight)
self._mean = mean
self._targets = targets
self._variance = variance
if not isinstance(variance, float):
raise ValueError("The `variance` argument should be python float.")
@property
def targets(self) -> Optional[jnp.ndarray]:
return self._targets
@property
def dist(self):
scale_diag = jnp.full_like(self._mean, jnp.sqrt(self._variance))
return distributions.MultivariateNormalDiag(self._mean, scale_diag)
@property
def params(self):
return self._mean,
def copy_with_different_inputs(self, inputs: Sequence[jnp.ndarray]):
[mean] = inputs
return NormalMeanNegativeLogProbLoss(
mean=mean,
targets=self.targets,
variance=self._variance,
weight=self.weight,
)
def multiply_fisher_unweighted(self, vector: jnp.ndarray) -> jnp.ndarray:
return vector / self._variance
def multiply_fisher_factor_unweighted(
self,
vector: jnp.ndarray,
) -> jnp.ndarray:
return vector / jnp.sqrt(self._variance)
def multiply_fisher_factor_transpose_unweighted(
self,
vector: jnp.ndarray,
) -> jnp.ndarray:
return self.multiply_fisher_factor_unweighted(vector) # it's symmetric
def multiply_fisher_factor_replicated_one_hot_unweighted(
self,
index: Index,
) -> jnp.ndarray:
assert len(index) == 1, f"Length of index was {len(index)}."
index = index[0]
ones_slice = jnp.ones([self._mean.shape[0]])[..., None]
output_slice = ones_slice / jnp.sqrt(self._variance)
return insert_slice_in_zeros(output_slice, 1, self._mean.shape[1], index)
def insert_slice_in_zeros(
slice_to_insert: jnp.ndarray,
dim: int,
dim_size: int,
position: int,
) -> jnp.ndarray:
"""Inserts slice into a larger tensor of zeros.
Forms a new tensor which is the same shape as slice_to_insert, except that
the dimension given by 'dim' is expanded to the size given by 'dim_size'.
'position' determines the position (index) at which to insert the slice within
that dimension.
Assumes slice_to_insert.shape[dim] = 1.
Args:
slice_to_insert: The slice to insert.
dim: The dimension which to expand with zeros.
dim_size: The new size of the 'dim' dimension.
position: The position of 'slice_to_insert' in the new tensor.
Returns:
The new tensor.
Raises:
ValueError: If the slice's shape at the given dim is not 1.
"""
slice_shape = slice_to_insert.shape
if slice_shape[dim] != 1:
raise ValueError(f"Expected slice_to_insert.shape to have {dim} dim of 1,"
f" but was {slice_to_insert.shape[dim]}.")
before = [0] * int(len(slice_shape))
after = before[:]
before[dim] = position
after[dim] = dim_size - position - 1
return jnp.pad(slice_to_insert, list(zip(before, after)))
# _______ _____ _ _ _ _
# |__ __| | __ \ (_) | | | | (_)
# | | __ _ __ _ | |__) |___ __ _ _ ___| |_ _ __ __ _| |_ _ ___ _ __
# | |/ _` |/ _` | | _ // _ \/ _` | / __| __| '__/ _` | __| |/ _ \| '_ \
# | | (_| | (_| | | | \ \ __/ (_| | \__ \ |_| | | (_| | |_| | (_) | | | |
# |_|\__,_|\__, | |_| \_\___|\__, |_|___/\__|_| \__,_|\__|_|\___/|_| |_|
# __/ | __/ |
# |___/ |___/
NormalMeanNegativeLogProbLoss_tag = tags.LossTag(
NormalMeanNegativeLogProbLoss, num_inputs=1)
def register_normal_predictive_distribution(
mean: jnp.ndarray,
targets: Optional[jnp.ndarray] = None,
variance: float = 0.5,
weight: float = 1.0,
):
"""Registers a normal predictive distribution.
This corresponds to a squared error loss of the form
weight/(2*var) * ||target - mean||^2
Args:
mean: A tensor defining the mean vector of the distribution. The first
dimension must be the batch size.
targets: (OPTIONAL) The targets for the loss function. Only required if one
wants to use the "empirical Fisher" instead of the true Fisher (which is
controlled by the 'estimation_mode' to the optimizer).
(Default: None)
variance: float. The variance of the distribution. Note that the default
value of 0.5 corresponds to a standard squared error loss weight *
||target - prediction||^2. If you want your squared error loss to be of
the form 0.5*coeff*||target - prediction||^2 you should use
variance=1.0.
(Default: 0.5)
weight: A scalar coefficient to multiply the log prob loss associated with
this distribution. The Fisher will be multiplied by the corresponding
factor. In general this is NOT equivalent to changing the temperature of
the distribution, but in the ase of normal distributions it may be.
(Default: 1.0)
Returns:
The mean and targets as dependable on the tag.
"""
if targets is None:
targets = jnp.zeros_like(mean)
return NormalMeanNegativeLogProbLoss_tag.bind(
mean, targets, variance=variance, weight=weight, return_loss=False)
def register_squared_error_loss(
prediction: jnp.ndarray,
targets: Optional[jnp.ndarray] = None,
weight: float = 1.0,
):
"""Registers a squared error loss function.
This assumes the squared error loss of the form ||target - prediction||^2,
averaged across the mini-batch. If your loss uses a coefficient of 0.5
you need to set the "weight" argument to reflect this.
Args:
prediction: The prediction made by the network (i.e. its output). The first
dimension must be the batch size.
targets: (OPTIONAL) The targets for the loss function. Only required if one
wants to use the "empirical Fisher" instead of the true Fisher (which is
controlled by the 'estimation_mode' to the optimizer).
(Default: None)
weight: A float coefficient to multiply the loss function by.
(Default: 1.0)
Returns:
The mean and targets as dependable on the tag.
"""
return register_normal_predictive_distribution(
prediction, targets=targets, variance=0.5, weight=weight)
| deepmind-research-master | kfac_ferminet_alpha/loss_functions.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for anything that an end user would use."""
from kfac_ferminet_alpha.loss_functions import register_normal_predictive_distribution
from kfac_ferminet_alpha.loss_functions import register_squared_error_loss
from kfac_ferminet_alpha.optimizer import Optimizer
| deepmind-research-master | kfac_ferminet_alpha/__init__.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for registering already known functions for tagging patterns."""
import functools
from typing import Sequence, Tuple, TypeVar
import jax
from jax import core as jax_core
from jax import lax
from jax import lib as jax_lib
from jax.interpreters import batching as jax_batching
import jax.numpy as jnp
_T = TypeVar("_T")
class LossTag(jax_core.Primitive):
"""A tagging primitive specifically for losses."""
multiple_results = True
def __init__(self, cls, num_inputs: int, num_targets: int = 1):
super().__init__(cls.__name__ + "_tag")
self._cls = cls
self._num_inputs = num_inputs
self._num_targets = num_targets
jax.xla.translations[self] = self.xla_translation
jax.ad.primitive_jvps[self] = self.jvp
# This line defines how does the tag behave under vmap. It is required for
# any primitive that can be used inside a vmap. The reason why we want to
# allow this is two fold - one to not break user code when the tags are not
# used at all, and two - to be able to define a network with code for a
# single example which is the vmap-ed for a batch.
jax_batching.primitive_batchers[self] = self.batching
@property
def num_inputs(self) -> int:
return self._num_inputs
@property
def num_targets(self) -> int:
return self._num_targets
def loss(self, *args, weight: float = 1.0, **kwargs):
return self._cls(*args, weight=weight, **kwargs)
def loss_evaluate(self, *args, weight: float = 1.0, **kwargs):
return self.loss(*args, weight=weight, **kwargs).evaluate()
def get_outputs(self, *args, weight: float, return_loss: bool, **kwargs):
if len(args) < self.num_inputs:
raise ValueError("Inputs to the tag are not enough.")
if len(args) < self.num_inputs + self.num_targets:
if len(args) != self.num_inputs:
raise ValueError("Inputs to the tag are not quite enough.")
if return_loss:
raise ValueError("Can not have return_loss=True when there are no "
"targets.")
return args
if len(args) > self.num_inputs + self.num_targets:
raise ValueError("Inputs to the tag are too many.")
if return_loss:
return self.loss(*args, weight=weight, **kwargs).evaluate()
else:
return args
def impl(self, *args, weight: float, return_loss: bool, **kwargs):
return self.get_outputs(*args, weight=weight, return_loss=return_loss)
def abstract_eval(self, *args, weight: float, return_loss: bool, **kwargs):
return self.get_outputs(*args, weight=weight, return_loss=return_loss)
def xla_translation(
self,
c,
*args,
weight: float = 1.0,
return_loss: bool = False,
**kwargs,
):
outputs = self.get_outputs(
*args, weight=weight, return_loss=return_loss, **kwargs)
if isinstance(outputs, tuple):
return jax_lib.xla_client.ops.Tuple(c, outputs)
return outputs
def jvp(
self,
arg_values,
arg_tangents,
weight: float,
return_loss: bool,
**kwargs,
):
if len(arg_values) != len(arg_tangents):
raise ValueError("Values and tangents are not the same length.")
primal_output = self.bind(
*arg_values, weight=weight, return_loss=return_loss, **kwargs)
if len(arg_values) == self.num_inputs:
tangents_out = self.get_outputs(
*arg_tangents, weight=weight, return_loss=return_loss, **kwargs)
elif return_loss:
tangents_out = jax.jvp(
functools.partial(self.loss_evaluate, weight=weight, **kwargs),
arg_tangents, arg_tangents)[1]
else:
tangents_out = arg_tangents
return primal_output, tangents_out
def batching(self, batched_args, batched_dims, **kwargs):
return self.bind(*batched_args, **kwargs), batched_dims[0]
class LayerTag(jax_core.Primitive):
"""A tagging primitive that is used to mark/tag computation."""
def __init__(self, name: str, num_inputs: int, num_outputs: int):
super().__init__(name)
if num_outputs > 1:
raise NotImplementedError(
f"Only single outputs are supported, got: num_outputs={num_outputs}")
self._num_outputs = num_outputs
self._num_inputs = num_inputs
jax.xla.translations[self] = self.xla_translation
jax.ad.deflinear(self, self.transpose)
jax.ad.primitive_transposes[self] = self.transpose
# This line defines how does the tag behave under vmap. It is required for
# any primitive that can be used inside a vmap. The reason why we want to
# allow this is two fold - one to not break user code when the tags are not
# used at all, and two - to be able to define a network with code for a
# single example which is the vmap-ed for a batch.
jax_batching.primitive_batchers[self] = self.batching
@property
def num_outputs(self) -> int:
return self._num_outputs
@property
def num_inputs(self) -> int:
return self._num_inputs
def split_all_inputs(
self,
all_inputs: Sequence[_T],
) -> Tuple[Sequence[_T], Sequence[_T], Sequence[_T]]:
outputs = tuple(all_inputs[:self.num_outputs])
inputs = tuple(all_inputs[self.num_outputs:self.num_outputs +
self.num_inputs])
params = tuple(all_inputs[self.num_outputs + self.num_inputs:])
return outputs, inputs, params
def get_outputs(self, *operands: _T, **kwargs) -> _T:
assert self.num_outputs == 1
return operands[0]
def xla_translation(self, c, *operands: _T, **kwargs) -> _T:
return self.get_outputs(*operands, **kwargs)
@staticmethod
def transpose(cotangent, *operands, **kwargs):
return (cotangent,) + (None,) * (len(operands) - 1)
def impl(self, *operands, **kwargs):
return self.get_outputs(*operands, **kwargs)
def abstract_eval(self, *abstract_operands, **kwargs):
return self.get_outputs(*abstract_operands, **kwargs)
def batching(self, batched_operands, batched_dims, **kwargs):
return self.bind(*batched_operands, **kwargs), batched_dims[0]
# _____ _
# / ____| (_)
# | | __ ___ _ __ ___ _ __ _ ___
# | | |_ |/ _ \ '_ \ / _ \ '__| |/ __|
# | |__| | __/ | | | __/ | | | (__
# \_____|\___|_| |_|\___|_| |_|\___|
#
#
generic_tag = LayerTag(name="generic_tag", num_inputs=0, num_outputs=1)
def register_generic(parameter: _T) -> _T:
return generic_tag.bind(parameter)
# _____
# | __ \
# | | | | ___ _ __ ___ ___
# | | | |/ _ \ '_ \/ __|/ _ \
# | |__| | __/ | | \__ \ __/
# |_____/ \___|_| |_|___/\___|
#
dense_tag = LayerTag(name="dense_tag", num_inputs=1, num_outputs=1)
def register_dense(y, x, w, b=None):
if b is None:
return dense_tag.bind(y, x, w)
return dense_tag.bind(y, x, w, b)
def dense_func(x, params):
"""Example of a dense layer function."""
w = params[0]
y = jnp.matmul(x, w)
if len(params) == 1:
# No bias
return y
# Add bias
return y + params[1]
def dense_tagging(jaxpr, inverse_map, values_map):
"""Correctly registers a dense layer pattern."""
del inverse_map
in_values = [values_map[v] for v in jaxpr.invars]
out_values = [values_map[v] for v in jaxpr.outvars]
return register_dense(out_values[0], *in_values)
# ___ _____ _____ _ _ _
# |__ \| __ \ / ____| | | | | (_)
# ) | | | | | | ___ _ ____ _____ | |_ _| |_ _ ___ _ __
# / /| | | | | | / _ \| '_ \ \ / / _ \| | | | | __| |/ _ \| "_ \
# / /_| |__| | | |___| (_) | | | \ V / (_) | | |_| | |_| | (_) | | | |
# |____|_____/ \_____\___/|_| |_|\_/ \___/|_|\__,_|\__|_|\___/|_| |_|
#
conv2d_tag = LayerTag(name="conv2d_tag", num_inputs=1, num_outputs=1)
def register_conv2d(y, x, w, b=None, **kwargs):
if b is None:
return conv2d_tag.bind(y, x, w, **kwargs)
return conv2d_tag.bind(y, x, w, b, **kwargs)
def conv2d_func(x, params):
"""Example of a conv2d layer function."""
w = params[0]
y = lax.conv_general_dilated(
x,
w,
window_strides=(2, 2),
padding="SAME",
dimension_numbers=("NHWC", "HWIO", "NHWC"))
if len(params) == 1:
# No bias
return y
# Add bias
return y + params[1][None, None, None]
def conv2d_tagging(jaxpr, inverse_map, values_map):
"""Correctly registers a conv2d layer pattern."""
in_values = [values_map[v] for v in jaxpr.invars]
out_values = [values_map[v] for v in jaxpr.outvars]
keys = [k for k in inverse_map.keys() if isinstance(k, str)]
keys = [k for k in keys if k.startswith("conv_general_dilated")]
if len(keys) != 1:
raise ValueError("Did not find any conv_general_dilated!")
kwargs = inverse_map[keys[0]].params
return register_conv2d(out_values[0], *in_values, **kwargs)
# _____ _ _ _____ _ _ __ _
# / ____| | | | | / ____| | (_)/ _| |
# | (___ ___ __ _| | ___ __ _ _ __ __| | | (___ | |__ _| |_| |_
# \___ \ / __/ _` | |/ _ \ / _` | '_ \ / _` | \___ \| '_ \| | _| __|
# ____) | (_| (_| | | __/ | (_| | | | | (_| | ____) | | | | | | | |_
# |_____/ \___\__,_|_|\___| \__,_|_| |_|\__,_| |_____/|_| |_|_|_| \__|
#
scale_and_shift_tag = LayerTag(
name="scale_and_shift_tag", num_inputs=1, num_outputs=1)
def register_scale_and_shift(y, args, has_scale: bool, has_shift: bool):
assert has_scale or has_shift
x, args = args[0], args[1:]
return scale_and_shift_tag.bind(
y, x, *args, has_scale=has_scale, has_shift=has_shift)
def scale_and_shift_func(x, params, has_scale: bool, has_shift: bool):
"""Example of a scale and shift function."""
if has_scale and has_shift:
scale, shift = params
return x * scale + shift
elif has_scale:
return x * params[0]
elif has_shift:
return x + params[0]
else:
raise ValueError()
def scale_and_shift_tagging(
jaxpr,
inverse_map,
values_map,
has_scale: bool,
has_shift: bool,
):
"""Correctly registers a scale and shift layer pattern."""
del inverse_map
in_values = [values_map[v] for v in jaxpr.invars]
out_values = [values_map[v] for v in jaxpr.outvars]
return register_scale_and_shift(out_values[0], in_values, has_scale,
has_shift)
def batch_norm_func(
inputs: Tuple[jnp.ndarray, jnp.ndarray],
params: Tuple[jnp.ndarray, jnp.ndarray],
) -> jnp.ndarray:
"""Example of batch norm as is defined in Haiku."""
x, y = inputs
scale, shift = params
inv = scale * y
return x * inv + shift
def batch_norm_tagging_func(
jaxpr,
inverse_map,
values_map,
has_scale: bool,
has_shift: bool,
):
"""Correctly registers a batch norm layer pattern as is defined in Haiku."""
del inverse_map
in_values = [values_map[v] for v in jaxpr.invars]
out_values = [values_map[v] for v in jaxpr.outvars]
# The first two are both multipliers with the scale so we merge them
in_values = [in_values[0] * in_values[1]] + in_values[2:]
return register_scale_and_shift(out_values[0], in_values, has_scale,
has_shift)
| deepmind-research-master | kfac_ferminet_alpha/layers_and_loss_tags.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for all distribution implementations needed for the loss functions."""
import math
import jax
import jax.numpy as jnp
class MultivariateNormalDiag:
"""Multivariate normal distribution on `R^k`."""
def __init__(
self,
loc: jnp.ndarray,
scale_diag: jnp.ndarray):
"""Initializes a MultivariateNormalDiag distribution.
Args:
loc: Mean vector of the distribution. Can also be a batch of vectors.
scale_diag: Vector of standard deviations.
"""
super().__init__()
self._loc = loc
self._scale_diag = scale_diag
@property
def loc(self) -> jnp.ndarray:
"""Mean of the distribution."""
return self._loc
@property
def scale_diag(self) -> jnp.ndarray:
"""Scale of the distribution."""
return self._scale_diag
def _num_dims(self) -> int:
"""Dimensionality of the events."""
return self._scale_diag.shape[-1]
def _standardize(self, value: jnp.ndarray) -> jnp.ndarray:
return (value - self._loc) / self._scale_diag
def log_prob(self, value: jnp.ndarray) -> jnp.ndarray:
"""See `Distribution.log_prob`."""
log_unnormalized = -0.5 * jnp.square(self._standardize(value))
log_normalization = 0.5 * math.log(2 * math.pi) + jnp.log(self._scale_diag)
return jnp.sum(log_unnormalized - log_normalization, axis=-1)
def mean(self) -> jnp.ndarray:
"""Calculates the mean."""
return self.loc
def sample(self, seed: jnp.ndarray) -> jnp.ndarray:
"""Samples an event.
Args:
seed: PRNG key or integer seed.
Returns:
A sample.
"""
eps = jax.random.normal(seed, self.loc.shape)
return self.loc + eps * self.scale_diag
| deepmind-research-master | kfac_ferminet_alpha/distributions.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for pip package."""
from setuptools import setup
REQUIRED_PACKAGES = (
"absl-py",
"dataclasses",
"jax",
"networkx",
"numpy",
"ordered-set",
"typing",
)
LONG_DESCRIPTION = "\n".join([
"Kronecker-Factored Approximate Curvature (K-FAC) optimizer implemented in "
"JAX.",
"",
"Accompanying code for 'Better, Faster Fermionic Neural Networks'",
"James S. Spencer, David Pfau, Aleksandar Botev, and W. M. C. Foulkes.",
"https://arxiv.org/abs/2011.07125.",
])
setup(
name="kfac_ferminet_alpha",
version="0.0.1",
description="A K-FAC optimizer implemented in JAX",
long_description=LONG_DESCRIPTION,
url="https://github.com/deepmind/deepmind-research/kfac_ferminet_alpha",
author="DeepMind",
package_dir={"kfac_ferminet_alpha": "."},
packages=["kfac_ferminet_alpha"],
install_requires=REQUIRED_PACKAGES,
platforms=["any"],
license="Apache License, Version 2.0",
)
| deepmind-research-master | kfac_ferminet_alpha/setup.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of running KFAC."""
from absl import app
from absl import flags
import jax
import jax.numpy as jnp
import numpy as np
import kfac_ferminet_alpha as kfac_ferminet_alpha
from kfac_ferminet_alpha import utils
TRAINING_STEPS = flags.DEFINE_integer(
name="training_steps",
default=100,
help="Number of training steps to perform")
BATCH_SIZE = flags.DEFINE_integer(
name="batch_size", default=128, help="Batch size")
LEARNING_RATE = flags.DEFINE_float(
name="learning_rate", default=1e-3, help="Learning rate")
L2_REG = flags.DEFINE_float(
name="l2_reg", default=1e-3, help="L2 regularization coefficient")
MOMENTUM = flags.DEFINE_float(
name="momentum", default=0.8, help="Momentum coefficient")
DAMPING = flags.DEFINE_float(
name="damping", default=1e-2, help="Damping coefficient")
MULTI_DEVICE = flags.DEFINE_bool(
name="multi_device",
default=False,
help="Whether the computation should be replicated across multiple devices")
SEED = flags.DEFINE_integer(name="seed", default=12412321, help="JAX RNG seed")
def glorot_uniform(shape, key):
dim_in = np.prod(shape[:-1])
dim_out = shape[-1]
c = jnp.sqrt(6 / (dim_in + dim_out))
return jax.random.uniform(key, shape=shape, minval=-c, maxval=c)
def fully_connected_layer(params, x):
w, b = params
return jnp.matmul(x, w) + b[None]
def model_init(rng_key, batch, encoder_sizes=(1000, 500, 250, 30)):
"""Initialize the standard autoencoder."""
x_size = batch.shape[-1]
decoder_sizes = encoder_sizes[len(encoder_sizes) - 2::-1]
sizes = (x_size,) + encoder_sizes + decoder_sizes + (x_size,)
keys = jax.random.split(rng_key, len(sizes) - 1)
params = []
for rng_key, dim_in, dim_out in zip(keys, sizes, sizes[1:]):
# Glorot uniform initialization
w = glorot_uniform((dim_in, dim_out), rng_key)
b = jnp.zeros([dim_out])
params.append((w, b))
return params, None
def model_loss(params, inputs, l2_reg):
"""Evaluate the standard autoencoder."""
h = inputs.reshape([inputs.shape[0], -1])
for i, layer_params in enumerate(params):
h = fully_connected_layer(layer_params, h)
# Last layer does not have a nonlinearity
if i % 4 != 3:
h = jnp.tanh(h)
l2_value = 0.5 * sum(jnp.square(p).sum() for p in jax.tree_leaves(params))
error = jax.nn.sigmoid(h) - inputs.reshape([inputs.shape[0], -1])
mean_squared_error = jnp.mean(jnp.sum(error * error, axis=1), axis=0)
regularized_loss = mean_squared_error + l2_reg * l2_value
return regularized_loss, dict(mean_squared_error=mean_squared_error)
def random_data(multi_device, batch_shape, rng):
if multi_device:
shape = (multi_device,) + tuple(batch_shape)
else:
shape = tuple(batch_shape)
while True:
rng, key = jax.random.split(rng)
yield jax.random.normal(key, shape)
def main(argv):
del argv # Unused.
learning_rate = jnp.asarray([LEARNING_RATE.value])
momentum = jnp.asarray([MOMENTUM.value])
damping = jnp.asarray([DAMPING.value])
# RNG keys
global_step = jnp.zeros([])
rng = jax.random.PRNGKey(SEED.value)
params_key, opt_key, step_key, data_key = jax.random.split(rng, 4)
dataset = random_data(MULTI_DEVICE.value, (BATCH_SIZE.value, 20), data_key)
example_batch = next(dataset)
if MULTI_DEVICE.value:
global_step = utils.replicate_all_local_devices(global_step)
learning_rate = utils.replicate_all_local_devices(learning_rate)
momentum = utils.replicate_all_local_devices(momentum)
damping = utils.replicate_all_local_devices(damping)
params_key, opt_key = utils.replicate_all_local_devices(
(params_key, opt_key))
step_key = utils.make_different_rng_key_on_all_devices(step_key)
split_key = jax.pmap(lambda x: tuple(jax.random.split(x)))
jit_init_parameters_func = jax.pmap(model_init)
else:
split_key = jax.random.split
jit_init_parameters_func = jax.jit(model_init)
# Initialize or load parameters
params, func_state = jit_init_parameters_func(params_key, example_batch)
# Make optimizer
optim = kfac_ferminet_alpha.Optimizer(
value_and_grad_func=jax.value_and_grad(
lambda p, x: model_loss(p, x, L2_REG.value), has_aux=True),
l2_reg=L2_REG.value,
value_func_has_aux=True,
value_func_has_state=False,
value_func_has_rng=False,
learning_rate_schedule=None,
momentum_schedule=None,
damping_schedule=None,
norm_constraint=1.0,
num_burnin_steps=10,
)
# Initialize optimizer
opt_state = optim.init(params, opt_key, example_batch, func_state)
for t in range(TRAINING_STEPS.value):
step_key, key_t = split_key(step_key)
params, opt_state, stats = optim.step(
params,
opt_state,
key_t,
dataset,
learning_rate=learning_rate,
momentum=momentum,
damping=damping)
global_step = global_step + 1
# Log any of the statistics
print(f"iteration: {t}")
print(f"mini-batch loss = {stats['loss']}")
if "aux" in stats:
for k, v in stats["aux"].items():
print(f"{k} = {v}")
print("----")
if __name__ == "__main__":
app.run(main)
| deepmind-research-master | kfac_ferminet_alpha/example.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities related to multi-device operations."""
import collections
from typing import Any, Mapping, Optional, Sequence, Tuple, TypeVar, Union
import dataclasses
import jax
from jax import core
from jax import lax
import jax.numpy as jnp
from jax.scipy import linalg
import jax.tree_util as tree_util
T = TypeVar("T")
def wrap_if_pmap(p_func):
def p_func_if_pmap(obj, axis_name):
try:
core.axis_frame(axis_name)
return p_func(obj, axis_name)
except NameError:
return obj
return p_func_if_pmap
pmean_if_pmap = wrap_if_pmap(lax.pmean)
psum_if_pmap = wrap_if_pmap(lax.psum)
compute_mean = jax.pmap(lambda x: lax.pmean(x, "i"), axis_name="i")
compute_sum = jax.pmap(lambda x: lax.psum(x, "i"), axis_name="i")
def get_first(obj: T) -> T:
return jax.tree_map(lambda x: x[0], obj)
def get_mean(obj: T) -> T:
return get_first(compute_mean(obj))
def get_sum(obj: T) -> T:
return get_first(compute_sum(obj))
broadcast_all_local_devices = jax.pmap(lambda x: x)
def replicate_all_local_devices(obj: T) -> T:
n = jax.local_device_count()
obj_stacked = jax.tree_map(lambda x: jnp.stack([x] * n, axis=0), obj)
return broadcast_all_local_devices(obj_stacked)
def make_different_rng_key_on_all_devices(rng: jnp.ndarray) -> jnp.ndarray:
rng = jax.random.fold_in(rng, jax.host_id())
rng = jax.random.split(rng, jax.local_device_count())
return broadcast_all_local_devices(rng)
p_split = jax.pmap(lambda key: tuple(jax.random.split(key)))
def scalar_mul(obj: T, scalar: Union[float, jnp.ndarray]) -> T:
return jax.tree_map(lambda x: x * scalar, obj)
def scalar_div(obj: T, scalar: Union[float, jnp.ndarray]) -> T:
return jax.tree_map(lambda x: x / scalar, obj)
def make_func_args(params, func_state, rng, batch, has_state: bool,
has_rng: bool):
"""Correctly puts all arguments to the function together."""
func_args = (params,)
if has_state:
if func_state is None:
raise ValueError("The `func_state` is None, but the argument `has_state` "
"is True.")
func_args += (func_state,)
if has_rng:
if rng is None:
raise ValueError("The `rng` is None, but the argument `has_rng` is True.")
func_args += (rng,)
func_args += (batch,)
return func_args
def extract_func_outputs(
raw_outputs: Any,
has_aux: bool,
has_state: bool,
) -> Tuple[jnp.ndarray, Any, Any]:
"""Given the function output returns separately the loss, func_state, aux."""
if not has_aux and not has_state:
return raw_outputs, None, None
loss, other = raw_outputs
if has_aux and has_state:
func_state, aux = other
elif has_aux:
func_state, aux = None, other
else:
func_state, aux = other, None
return loss, func_state, aux
def inner_product(obj1: T, obj2: T) -> jnp.ndarray:
if jax.tree_structure(obj1) != jax.tree_structure(obj2):
raise ValueError("The two structures are not identical.")
elements_product = jax.tree_multimap(lambda x, y: jnp.sum(x * y), obj1, obj2)
return sum(jax.tree_flatten(elements_product)[0])
def psd_inv_cholesky(matrix: jnp.ndarray, damping: jnp.ndarray) -> jnp.ndarray:
assert matrix.ndim == 2
identity = jnp.eye(matrix.shape[0])
matrix = matrix + damping * identity
return linalg.solve(matrix, identity, sym_pos=True)
def solve_maybe_small(a: jnp.ndarray, b: jnp.ndarray) -> jnp.ndarray:
"""Computes a^-1 b more efficiently for small matrices."""
assert a.shape[-1] == a.shape[-2] == b.shape[-1]
d = a.shape[-1]
if d == 0:
return a
elif d == 1:
return b / a[..., 0]
elif d == 2:
det = a[..., 0, 0] * a[..., 1, 1] - a[..., 0, 1] * a[..., 1, 0]
b_0 = a[..., 1, 1] * b[..., 0] - a[..., 0, 1] * b[..., 1]
b_1 = a[..., 0, 0] * b[..., 1] - a[..., 1, 0] * b[..., 0]
return jnp.stack([b_0, b_1], axis=-1) / det
elif d == 3:
raise NotImplementedError()
return jnp.linalg.solve(a, b)
def pi_adjusted_inverse(
factor_0: jnp.ndarray,
factor_1: jnp.ndarray,
damping: jnp.ndarray,
pmap_axis_name: str,
) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Performs inversion with pi-adjusted damping."""
# Compute the norms of each factor
norm_0 = jnp.trace(factor_0)
norm_1 = jnp.trace(factor_1)
# We need to sync the norms here, because reduction can be non-deterministic.
# They specifically are on GPUs by default for better performance.
# Hence although factor_0 and factor_1 are synced, the trace operation above
# can still produce different answers on different devices.
norm_0, norm_1 = pmean_if_pmap((norm_0, norm_1), axis_name=pmap_axis_name)
# Compute the overall scale
scale = norm_0 * norm_1
def regular_inverse(
operand: Sequence[jnp.ndarray]) -> Tuple[jnp.ndarray, jnp.ndarray]:
factor0, factor1, norm0, norm1, s, d = operand
# Special cases with one or two scalar factors
if factor0.size == 1 and factor1.size == 1:
value = jnp.ones_like(factor0) / jnp.sqrt(s)
return value, value
if factor0.size == 1:
factor1_normed = factor1 / norm1
damping1 = d / norm1
factor1_inv = psd_inv_cholesky(factor1_normed, damping1)
return jnp.full((1, 1), s), factor1_inv
if factor1.size == 1:
factor0_normed = factor0 / norm0
damping0 = d / norm0
factor0_inv = psd_inv_cholesky(factor0_normed, damping0)
return factor0_inv, jnp.full((1, 1), s)
# Invert first factor
factor0_normed = factor0 / norm0
damping0 = jnp.sqrt(d * factor1.shape[0] / (s * factor0.shape[0]))
factor0_inv = psd_inv_cholesky(factor0_normed, damping0) / jnp.sqrt(s)
# Invert second factor
factor1_normed = factor1 / norm1
damping1 = jnp.sqrt(d * factor0.shape[0] / (s * factor1.shape[0]))
factor1_inv = psd_inv_cholesky(factor1_normed, damping1) / jnp.sqrt(s)
return factor0_inv, factor1_inv
def zero_inverse(
operand: Sequence[jnp.ndarray]) -> Tuple[jnp.ndarray, jnp.ndarray]:
return (jnp.eye(factor_0.shape[0]) / jnp.sqrt(operand[-1]),
jnp.eye(factor_1.shape[0]) / jnp.sqrt(operand[-1]))
# In the special case where for some reason one of the factors is zero, then
# the correct inverse of `(0 kron A + lambda I)` is
# `(I/sqrt(lambda) kron (I/sqrt(lambda)`. However, because one of the norms is
# zero, then `pi` and `1/pi` would be 0 and infinity leading to NaN values.
# Hence, we need to make this check explicitly.
return lax.cond(
jnp.greater(scale, 0.0),
regular_inverse,
zero_inverse,
operand=(factor_0, factor_1, norm_0, norm_1, scale, damping))
def convert_value_and_grad_to_value_func(
value_and_grad_func,
has_aux: bool = False,
):
"""Converts a value_and_grad function to value_func only."""
def value_func(*args, **kwargs):
out, _ = value_and_grad_func(*args, **kwargs)
if has_aux:
return out[0]
else:
return out
return value_func
def check_structure_shapes_and_dtype(obj1: T, obj2: T) -> None:
"""Verifies that the two objects have the same pytree structure."""
assert jax.tree_structure(obj1) == jax.tree_structure(obj2)
for v1, v2 in zip(jax.tree_flatten(obj1)[0], jax.tree_flatten(obj2)[0]):
assert v1.shape == v2.shape
assert v1.dtype == v2.dtype
def check_first_dim_is_batch_size(batch_size: int, *args: jnp.ndarray) -> None:
for i, arg in enumerate(args):
if arg.shape[0] != batch_size:
raise ValueError(f"Expecting first dimension of arg[{i}] with shape "
f"{arg.shape} to be equal to the batch size "
f"{batch_size}.")
def py_tree_registered_dataclass(cls, *args, **kwargs):
"""Creates a new dataclass type and registers it as a pytree node."""
dcls = dataclasses.dataclass(cls, *args, **kwargs)
tree_util.register_pytree_node(
dcls,
lambda instance: ( # pylint: disable=g-long-lambda
[getattr(instance, f.name)
for f in dataclasses.fields(instance)], None),
lambda _, instance_args: dcls(*instance_args))
return dcls
class WeightedMovingAverage:
"""A wrapped class for a variable for which we keep exponential moving average."""
def __init__(self, weight: jnp.ndarray, array: jnp.ndarray):
self._weight = weight
self._array = array
@staticmethod
def zero(shape: Sequence[int]) -> "WeightedMovingAverage":
return WeightedMovingAverage(weight=jnp.zeros([]), array=jnp.zeros(shape))
@property
def weight(self) -> jnp.ndarray:
return self._weight
@property
def value(self) -> jnp.ndarray:
return self._array / self._weight
@property
def raw_value(self) -> jnp.ndarray:
return self._array
def update(self, value: jnp.ndarray, old_weight_multiplier: float,
new_weight: float) -> None:
self._weight = old_weight_multiplier * self._weight + new_weight
self._array = old_weight_multiplier * self._array + new_weight * value
def sync(self, pmap_axis_name: str) -> None:
self._array = pmean_if_pmap(self._array, pmap_axis_name)
def __str__(self) -> str:
return (f"ExponentialMovingAverage(weight={self._weight}, "
f"array={self._array})")
def __repr__(self) -> str:
return self.__str__()
tree_util.register_pytree_node(
WeightedMovingAverage,
lambda instance: ((instance.weight, instance.raw_value), None),
lambda _, instance_args: WeightedMovingAverage(*instance_args),
)
class Stateful:
"""A class for stateful objects."""
def __init__(self, stateful_fields_names: Optional[Sequence[str]] = ()):
self.__stateful_fields_names = stateful_fields_names
def _add_stateful_fields_names(self, value: Sequence[str]) -> None:
self.__stateful_fields_names += tuple(value)
def get_state(self) -> Mapping[str, Any]:
"""Returns the state of the object."""
state = dict()
for name in self.__stateful_fields_names:
state[name] = Stateful._get_state_from_instance(getattr(self, name))
return state
def set_state(self, value):
"""Sets the state of the object with the provided value and returns the object."""
assert isinstance(value, dict)
for name in self.__stateful_fields_names:
setattr(self, name,
Stateful._set_state_to_instance(getattr(self, name), value[name]))
return self
def clear_state(self) -> None:
"""Clears the state of the object."""
for name in self.__stateful_fields_names:
setattr(self, name,
Stateful._clear_state_from_instance(getattr(self, name)))
def pop_state(self) -> Mapping[str, Any]:
"""Returns the current state of the object, while simultaneously clearing it."""
state = self.get_state()
self.clear_state()
return state
@staticmethod
def _get_state_from_instance(obj):
"""Recursively gets the state of the object and returns it."""
if isinstance(obj, Stateful):
return obj.get_state()
if isinstance(obj, list):
return [Stateful._get_state_from_instance(i) for i in obj]
if isinstance(obj, tuple):
return tuple(Stateful._get_state_from_instance(i) for i in obj)
if isinstance(obj, collections.OrderedDict):
return collections.OrderedDict(
(k, Stateful._get_state_from_instance(v)) for k, v in obj.items())
if isinstance(obj, dict):
return dict(
(k, Stateful._get_state_from_instance(v)) for k, v in obj.items())
return obj
@staticmethod
def _set_state_to_instance(obj, value):
"""Recursively sets the state of the object and returns it."""
if isinstance(obj, Stateful):
obj.set_state(value)
return obj
if isinstance(value, list):
if obj is None:
obj = [None] * len(value)
return [
Stateful._set_state_to_instance(obj_i, value_i)
for obj_i, value_i in zip(obj, value)
]
if isinstance(value, tuple):
if obj is None:
obj = [None] * len(value)
return tuple(
Stateful._set_state_to_instance(obj_i, value_i)
for obj_i, value_i in zip(obj, value))
if isinstance(value, collections.OrderedDict):
if obj is None:
obj = dict((k, None) for k in value)
return collections.OrderedDict(
(k, Stateful._set_state_to_instance(obj[k], value[k])) for k in obj)
if isinstance(value, dict):
obj = dict((k, None) for k in value)
return dict(
(k, Stateful._set_state_to_instance(obj[k], value[k])) for k in obj)
return value
@staticmethod
def _clear_state_from_instance(obj):
"""Recursively clears the state of the object and returns it."""
if isinstance(obj, Stateful):
obj.clear_state()
return obj
if isinstance(obj, list):
return [Stateful._clear_state_from_instance(obj_i) for obj_i in obj]
if isinstance(obj, tuple):
return tuple(Stateful._clear_state_from_instance(obj_i) for obj_i in obj)
if isinstance(obj, collections.OrderedDict):
return collections.OrderedDict(
(k, Stateful._clear_state_from_instance(obj[k])) for k in obj)
if isinstance(obj, dict):
return dict((k, Stateful._clear_state_from_instance(obj[k])) for k in obj)
return None
@staticmethod
def infer_class_state(class_type):
"""Infers a stateful class state attributes from class annotations."""
if not issubclass(class_type, Stateful):
raise ValueError(
f"In order to annotate a class as stateful it must inherit "
f"{Stateful!r}")
class_type = dataclasses.dataclass(
class_type, init=False, repr=False, eq=False) # pytype: disable=wrong-keyword-args
fields_names = tuple(field.name for field in dataclasses.fields(class_type))
original_init = getattr(class_type, "__init__", None)
if original_init is None:
def injected_init(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs) # pylint: disable=bad-super-call
Stateful._add_stateful_fields_names(self, fields_names)
for field_name in fields_names:
if getattr(self, field_name, None) is None:
setattr(self, field_name, None)
setattr(class_type, "__init__", injected_init)
else:
def injected_init(self, *args, **kwargs):
original_init(self, *args, **kwargs)
Stateful._add_stateful_fields_names(self, fields_names)
for field_name in fields_names:
if getattr(self, field_name, None) is None:
setattr(self, field_name, None)
setattr(class_type, "__init__", injected_init)
return class_type
def compute_sq_norm_relative_abs_diff(obj, pmap_axis_name):
sq_norm = inner_product(obj, obj)
synced_sq_norm = psum_if_pmap(sq_norm, pmap_axis_name)
synced_sq_norm = (synced_sq_norm - sq_norm) / (jax.device_count() - 1.0)
sq_norm_abs_diff = jnp.abs(sq_norm - synced_sq_norm)
return sq_norm_abs_diff / sq_norm
def product(iterable_object):
x = 1
for element in iterable_object:
x *= element
return x
| deepmind-research-master | kfac_ferminet_alpha/utils.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for the main curvature optimizer class."""
from typing import Any, Callable, Iterator, Mapping, Optional, Sequence, Tuple, Union
import jax
import jax.lax as lax
import jax.numpy as jnp
import jax.random as jnr
from kfac_ferminet_alpha import estimator
from kfac_ferminet_alpha import tag_graph_matcher as tgm
from kfac_ferminet_alpha import utils
ScheduleType = Callable[[jnp.ndarray], Optional[jnp.ndarray]]
Parameters = Any
Batch = Any
FuncState = Any
State = Mapping[str, Any]
@utils.Stateful.infer_class_state
class Optimizer(utils.Stateful):
"""The default optimizer class."""
velocities: Parameters
estimator: estimator.CurvatureEstimator
step_counter: jnp.ndarray
def __init__(
self,
value_and_grad_func,
l2_reg: Union[float, jnp.ndarray],
value_func_has_aux: bool = False,
value_func_has_state: bool = False,
value_func_has_rng: bool = False,
learning_rate_schedule: Optional[ScheduleType] = None,
momentum_schedule: Optional[ScheduleType] = None,
damping_schedule: Optional[ScheduleType] = None,
min_damping: Union[float, jnp.ndarray] = 1e-8,
max_damping: Union[float, jnp.ndarray] = jnp.inf,
norm_constraint: Optional[Union[float, jnp.ndarray]] = None,
num_burnin_steps: int = 10,
estimation_mode: str = "fisher_gradients",
curvature_ema: Union[float, jnp.ndarray] = 0.95,
inverse_update_period: int = 5,
register_only_generic: bool = False,
layer_tag_to_block_cls: Optional[estimator.TagMapping] = None,
patterns_to_skip: Sequence[str] = (),
donate_parameters: bool = False,
donate_optimizer_state: bool = False,
donate_batch_inputs: bool = False,
donate_func_state: bool = False,
batch_process_func: Optional[Callable[[Any], Any]] = None,
multi_device: bool = False,
use_jax_cond: bool = True,
debug: bool = False,
pmap_axis_name="kfac_axis",
):
"""Initializes the K-FAC optimizer with the given settings.
Args:
value_and_grad_func: Python callable. The function should return the value
of the loss to be optimized and its gradients. If the argument
`value_func_has_aux` is `False` then the interface should be: loss,
loss_grads = value_and_grad_func(params, batch)
If `value_func_has_aux` is `True` then the interface should be: (loss,
aux), loss_grads = value_and_grad_func(params, batch)
l2_reg: Scalar. Set this value to tell the optimizer what L2
regularization coefficient you are using (if any). Note the coefficient
appears in the regularizer as coeff / 2 * sum(param**2). Note that the
user is still responsible for adding regularization to the loss.
value_func_has_aux: Boolean. Specifies whether the provided callable
`value_and_grad_func` returns the loss value only, or also some
auxiliary data. (Default: False)
value_func_has_state: Boolean. Specifies whether the provided callable
`value_and_grad_func` has a persistent state that is inputed and
it also outputs an update version of it. (Default: False)
value_func_has_rng: Boolean. Specifies whether the provided callable
`value_and_grad_func` additionally takes as input an rng key.
(Default: False)
learning_rate_schedule: Callable. A schedule for the learning rate. This
should take as input the current step number and return a single
`jnp.ndarray` that represents the learning rate. (Default: None)
momentum_schedule: Callable. A schedule for the momentum. This should take
as input the current step number and return a single `jnp.ndarray`
that represents the momentum. (Default: None)
damping_schedule: Callable. A schedule for the damping. This should take
as input the current step number and return a single `jnp.ndarray`
that represents the learning rate. (Default: None)
min_damping: Scalar. Minimum value the damping parameter can take. Note
that the default value of 1e-8 is quite arbitrary, and you may have to
adjust this up or down for your particular problem. If you are using a
non-zero value of l2_reg you *may* be able to set this to
zero. (Default: 1e-8)
max_damping: Scalar. Maximum value the damping parameter can take.
(Default: Infinity)
norm_constraint: Scalar. If specified, the update is scaled down so that
its approximate squared Fisher norm `v^T F v` is at most the specified
value.(Note that here `F` is the approximate curvature matrix, not the
exact.) (Default: None)
num_burnin_steps: Int. At the start of optimization, e.g. the first step,
before performing the actual step the optimizer will perform this many
times updates to the curvature approximation without updating the
actual parameters. (Default: 10)
estimation_mode: String. The type of estimator to use for the curvature
matrix. Can be one of: * fisher_empirical * fisher_exact *
fisher_gradients * fisher_curvature_prop * ggn_exact *
ggn_curvature_prop See the doc-string for CurvatureEstimator (in
estimator.py) for a more
detailed description of these options. (Default: 'fisher_gradients').
curvature_ema: The decay factor used when calculating the covariance
estimate moving averages. (Default: 0.95)
inverse_update_period: Int. The number of steps in between updating the
the computation of the inverse curvature approximation. (Default: 5)
register_only_generic: Boolean. Whether when running the auto-tagger to
register only generic parameters, or allow it to use the graph matcher
to automatically pick up any kind of layer tags. (Default: False)
layer_tag_to_block_cls: Dictionary. A mapping from layer tags to block
classes which to override the default choices of block approximation for
that specific tag. See the doc-string for CurvatureEstimator (in
estimator.py) for a more detailed description of this.
patterns_to_skip: Tuple. A list of any patterns that should be skipped by
the graph matcher when auto-tagging.
donate_parameters: Boolean. Whether to use jax's `donate_argnums` to
donate the parameter values of each call to `step`. Note that this
implies that you will not be able to access the old parameter values'
buffers after calling into `step`.
donate_optimizer_state: Boolean. Whether to use jax's `donate_argnums` to
donate the optimizer state of each call to `step`. Note that this
implies that you will not be able to access the old optimizer state
values' buffers after calling into `step`.
donate_batch_inputs: Boolean. Whether to use jax's `donate_argnums` to
donate the batch values of each call to `step`. Note that this implies
that you will not be able to access the old batch values' buffers after
calling into `step`.
donate_func_state: Boolean. Whether to use jax's `donate_argnums` to
donate the persistent function state of each call to `step`. Note that
this implies that you will not be able to access the old function state
values' buffers after calling into `step`.
batch_process_func: Callable. A function which to be called on each batch
before feeding to the KFAC on device. This could be useful for specific
device input optimizations.
multi_device: Boolean. Whether to use `pmap` and run the optimizer on
multiple devices. (Default: False)
use_jax_cond: Not used for the moment.
debug: Boolean. If non of the step or init functions would be jitted. Note
that this also overrides `multi_device` and prevents using `pmap`.
(Default: False)
pmap_axis_name: String. The name of the `pmap` axis to use when
`multi_device` is set to True. (Default: curvature_axis)
"""
super().__init__()
self.value_and_grad_func = value_and_grad_func
self.value_func_has_aux = value_func_has_aux
self.value_func_has_state = value_func_has_state
self.value_func_has_rng = value_func_has_rng
self.value_func = utils.convert_value_and_grad_to_value_func(
value_and_grad_func, has_aux=value_func_has_aux)
self.l2_reg = l2_reg
self.learning_rate_schedule = learning_rate_schedule
if momentum_schedule is not None:
def schedule_with_first_step_zero(global_step: jnp.ndarray):
value = momentum_schedule(global_step)
check = jnp.equal(global_step, 0)
return check * jnp.zeros_like(value) + (1 - check) * value
self.momentum_schedule = schedule_with_first_step_zero
else:
self.momentum_schedule = None
self.damping_schedule = damping_schedule
self.min_damping = min_damping
self.max_damping = max_damping
self.norm_constraint = norm_constraint
self.num_burnin_steps = num_burnin_steps
self.estimation_mode = estimation_mode
self.curvature_ema = curvature_ema
self.inverse_update_period = inverse_update_period
self.register_only_generic = register_only_generic
self.layer_tag_to_block_cls = layer_tag_to_block_cls
self.patterns_to_skip = patterns_to_skip
self.donate_parameters = donate_parameters
self.donate_optimizer_state = donate_optimizer_state
self.donate_batch_inputs = donate_batch_inputs
self.donate_func_state = donate_func_state
self.batch_process_func = batch_process_func or (lambda x: x)
self.multi_device = multi_device
self.use_jax_cond = use_jax_cond
self.debug = debug
self.pmap_axis_name = pmap_axis_name if multi_device else None
self._rng_split = utils.p_split if multi_device else jnr.split
# Attributes filled in during self.init()
self.finalized = False
self.tagged_func = None
self.flat_params_shapes = None
self.params_treedef = None
# Special attributes related to jitting/pmap
self._jit_init = None
self._jit_burnin = None
self._jit_step = None
def finalize(
self,
params: Parameters,
rng: jnp.ndarray,
batch: Batch,
func_state: Optional[FuncState] = None,
) -> None:
"""Finalizes the optimizer by tracing the model function with the params and batch."""
if self.finalized:
raise ValueError("Optimizer has already been finalized.")
if self.multi_device:
# We assume that the parameters and batch are replicated, while tracing
# must happen with parameters for a single device call
params, rng, batch = jax.tree_map(lambda x: x[0], (params, rng, batch))
if func_state is not None:
func_state = jax.tree_map(lambda x: x[0], func_state)
batch = self.batch_process_func(batch)
# These are all tracing operations and we can run them with abstract values
func_args = utils.make_func_args(params, func_state, rng, batch,
self.value_func_has_state,
self.value_func_has_rng)
# Run all tracing with abstract values so no computation is done
flat_params, self.params_treedef = jax.tree_flatten(params)
self.flat_params_shapes = tuple(p.shape for p in flat_params)
self.tagged_func = tgm.auto_register_tags(
func=self.value_func,
func_args=func_args,
params_index=0,
register_only_generic=self.register_only_generic,
patterns_to_skip=self.patterns_to_skip)
self.estimator = estimator.CurvatureEstimator(
self.tagged_func,
func_args,
self.l2_reg,
self.estimation_mode,
layer_tag_to_block_cls=self.layer_tag_to_block_cls)
# Arguments: params, opt_state, rng, batch, func_state
donate_argnums = []
if self.donate_parameters:
donate_argnums.append(0)
if self.donate_optimizer_state:
donate_argnums.append(1)
if self.donate_batch_inputs:
donate_argnums.append(3)
if self.donate_func_state and self.value_func_has_state:
donate_argnums.append(4)
donate_argnums = tuple(donate_argnums)
if self.debug:
self._jit_init = self._init
self._jit_burnin = self._burnin
self._jit_step = self._step
elif self.multi_device:
self._jit_init = jax.pmap(
self._init, axis_name=self.pmap_axis_name, donate_argnums=[0])
# batch size is static argnum and is at index 5
self._jit_burnin = jax.pmap(
self._burnin,
axis_name=self.pmap_axis_name,
static_broadcasted_argnums=[5])
self._jit_step = jax.pmap(
self._step,
axis_name=self.pmap_axis_name,
donate_argnums=donate_argnums,
static_broadcasted_argnums=[5])
else:
self._jit_init = jax.jit(self._init, donate_argnums=[0])
# batch size is static argnum and is at index 5
self._jit_burnin = jax.jit(self._burnin, static_argnums=[5])
self._jit_step = jax.jit(
self._step, donate_argnums=donate_argnums, static_argnums=[5])
self.finalized = True
def _init(self, rng: jnp.ndarray) -> State:
"""This is the non-jitted version of initializing the state."""
flat_velocities = [jnp.zeros(shape) for shape in self.flat_params_shapes]
return dict(
velocities=jax.tree_unflatten(self.params_treedef, flat_velocities),
estimator=self.estimator.init(rng, None),
step_counter=jnp.asarray(0))
def verify_args_and_get_step_counter(
self,
params: Parameters,
state: State,
rng: jnp.ndarray,
data_iterator: Iterator[Batch],
func_state: Optional[FuncState] = None,
learning_rate: Optional[jnp.ndarray] = None,
momentum: Optional[jnp.ndarray] = None,
damping: Optional[jnp.ndarray] = None,
global_step_int: Optional[int] = None,
) -> int:
"""Verifies that the arguments passed to `Optimizer.step` are correct."""
if not self.finalized:
rng, rng_finalize = self._rng_split(rng)
self.finalize(params, rng_finalize, next(data_iterator), func_state)
# Verify correct arguments invocation
if self.learning_rate_schedule is not None and learning_rate is not None:
raise ValueError("When you have passed a `learning_rate_schedule` you "
"should not pass a value to the step function.")
if self.momentum_schedule is not None and momentum is not None:
raise ValueError("When you have passed a `momentum_schedule` you should "
"not pass a value to the step function.")
if self.damping_schedule is not None and damping is not None:
raise ValueError("When you have passed a `damping_schedule` you should "
"not pass a value to the step function.")
# Do a bunrnin on the first iteration
if global_step_int is None:
if self.multi_device:
return int(utils.get_first(state["step_counter"]))
else:
return int(state["step_counter"])
return global_step_int
def _burnin(
self,
params: Parameters,
state: State,
rng: jnp.ndarray,
batch: Batch,
func_state: Optional[FuncState],
batch_size: Optional[int],
) -> Tuple[State, Optional[FuncState]]:
"""This is the non-jitted version of a single burnin step."""
self.set_state(state)
batch = self.batch_process_func(batch)
rng, func_rng = jnr.split(rng) if self.value_func_has_rng else (rng, None)
func_args = utils.make_func_args(params, func_state, func_rng, batch,
self.value_func_has_state,
self.value_func_has_rng)
# Compute batch size
if batch_size is None:
batch_size = jax.tree_flatten(batch)[0][0].shape[0]
# Update curvature estimate
ema_old, ema_new = 1.0, 1.0 / self.num_burnin_steps
self.estimator.update_curvature_matrix_estimate(ema_old, ema_new,
batch_size, rng, func_args,
self.pmap_axis_name)
if func_state is not None:
out, _ = self.value_and_grad_func(*func_args)
_, func_state, _ = utils.extract_func_outputs(out,
self.value_func_has_aux,
self.value_func_has_state)
return self.pop_state(), func_state
def _step(
self,
params: Parameters,
state: State,
rng: jnp.ndarray,
batch: Batch,
func_state: Optional[FuncState],
batch_size: Optional[int],
learning_rate: Optional[jnp.ndarray],
momentum: Optional[jnp.ndarray],
damping: Optional[jnp.ndarray],
) -> Union[Tuple[Parameters, State, FuncState, Mapping[str, jnp.ndarray]],
Tuple[Parameters, State, Mapping[str, jnp.ndarray]]]:
"""This is the non-jitted version of a single step."""
# Unpack and set the state
self.set_state(state)
if damping is not None:
assert self.estimator.damping is None
self.estimator.damping = damping
else:
assert self.estimator.damping is not None
# Preprocess the batch and construct correctly the function arguments
batch = self.batch_process_func(batch)
rng, func_rng = jnr.split(rng) if self.value_func_has_rng else (rng, None)
func_args = utils.make_func_args(params, func_state, func_rng, batch,
self.value_func_has_state,
self.value_func_has_rng)
# Compute the batch size
if batch_size is None:
batch_size = jax.tree_flatten(batch)[0][0].shape[0]
# Compute schedules if applicable
if self.learning_rate_schedule is not None:
assert learning_rate is None
learning_rate = self.learning_rate_schedule(self.step_counter)
else:
assert learning_rate is not None
if self.momentum_schedule is not None:
assert momentum is None
momentum = self.momentum_schedule(self.step_counter)
else:
assert momentum is not None
if self.damping_schedule is not None:
assert damping is None
damping = self.damping_schedule(self.step_counter)
else:
assert damping is not None
# Compute current loss and gradients
out, grads = self.value_and_grad_func(*func_args)
loss, new_func_state, aux = utils.extract_func_outputs(
out, self.value_func_has_aux, self.value_func_has_state)
# Sync loss and grads
loss, grads = utils.pmean_if_pmap((loss, grads), self.pmap_axis_name)
# Update curvature estimate
self.estimator.update_curvature_matrix_estimate(
self.curvature_ema,
1.0,
batch_size,
rng,
func_args,
self.pmap_axis_name,
)
# Optionally update the inverse estimate
self.estimator.set_state(
lax.cond(
self.step_counter % self.inverse_update_period == 0,
lambda s: self.estimator.update_curvature_estimate_inverse( # pylint: disable=g-long-lambda
self.pmap_axis_name, s),
lambda s: s,
self.estimator.pop_state()))
# Compute proposed directions
vectors = self.propose_directions(
grads,
self.velocities,
learning_rate,
momentum,
)
# The learning rate is defined as the negative of the coefficient by which
# we multiply the gradients, while the momentum is the coefficient by
# which we multiply the velocities.
neg_learning_rate = -learning_rate
# Compute the coefficients of the update vectors
assert neg_learning_rate is not None and momentum is not None
coefficients = (neg_learning_rate, momentum)
# Update velocities and compute new delta
self.velocities, delta = self.velocities_and_delta(
self.velocities,
vectors,
coefficients,
)
# Update parameters: params = params + delta
params = jax.tree_multimap(jnp.add, params, delta)
# Optionally compute the reduction ratio and update the damping
self.estimator.damping = None
rho = jnp.nan
# Statistics with useful information
stats = dict()
stats["step"] = self.step_counter
stats["loss"] = loss
stats["learning_rate"] = -coefficients[0]
stats["momentum"] = coefficients[1]
stats["damping"] = damping
stats["rho"] = rho
if self.value_func_has_aux:
stats["aux"] = aux
self.step_counter = self.step_counter + 1
if self.value_func_has_state:
return params, self.pop_state(), new_func_state, stats
else:
assert new_func_state is None
return params, self.pop_state(), stats
def init(
self,
params: Parameters,
rng: jnp.ndarray,
batch: Batch,
func_state: Optional[FuncState] = None,
) -> State:
"""Initializes the optimizer and returns the appropriate optimizer state."""
if not self.finalized:
self.finalize(params, rng, batch, func_state)
return self._jit_init(rng)
def step(
self,
params: Parameters,
state: Mapping[str, Any],
rng: jnp.ndarray,
data_iterator: Iterator[Any],
func_state: Any = None,
learning_rate: Optional[jnp.ndarray] = None,
momentum: Optional[jnp.ndarray] = None,
damping: Optional[jnp.ndarray] = None,
batch_size: Optional[int] = None,
global_step_int: Optional[int] = None,
) -> Union[Tuple[Parameters, State, FuncState, Mapping[str, jnp.ndarray]],
Tuple[Parameters, State, Mapping[str, jnp.ndarray]]]:
"""Performs a single update step using the optimizer.
Args:
params: The parameters of the model.
state: The state of the optimizer.
rng: A Jax PRNG key.
data_iterator: An iterator that returns a batch of data.
func_state: Any function state that gets passed in and returned.
learning_rate: This must be provided when
`use_adaptive_learning_rate=False` and `learning_rate_schedule=None`.
momentum: This must be provided when
`use_adaptive_momentum=False` and `momentum_schedule=None`.
damping: This must be provided when
`use_adaptive_damping=False` and `damping_schedule=None`.
batch_size: The batch size to use for KFAC. The default behaviour when it
is None is to use the leading dimension of the first data array.
global_step_int: The global step as a python int. Note that this must
match the step inte rnal to the optimizer that is part of its state.
Returns:
(params, state, stats)
where:
params: The updated model parameters.
state: The updated optimizer state.
stats: A dictionary of key statistics provided to be logged.
"""
step_counter_int = self.verify_args_and_get_step_counter(
params=params,
state=state,
rng=rng,
data_iterator=data_iterator,
func_state=func_state,
learning_rate=learning_rate,
momentum=momentum,
damping=damping,
global_step_int=global_step_int)
if step_counter_int == 0:
for _ in range(self.num_burnin_steps):
rng, rng_burn = self._rng_split(rng)
batch = next(data_iterator)
state, func_state = self._jit_burnin(params, state, rng_burn, batch,
func_state, batch_size)
# On the first step we always treat the momentum as 0.0
if self.momentum_schedule is None:
momentum = jnp.zeros([])
if self.multi_device:
momentum = utils.replicate_all_local_devices(momentum)
batch = next(data_iterator)
return self._jit_step(params, state, rng, batch, func_state, batch_size,
learning_rate, momentum, damping)
def propose_directions(
self,
grads: Parameters,
velocities: Parameters,
learning_rate: Optional[jnp.ndarray],
momentum: Optional[jnp.ndarray],
) -> Tuple[Parameters, Parameters]:
"""Computes the vector proposals for the next step."""
del momentum # not used in this, but could be used in subclasses
preconditioned_grads = self.estimator.multiply_matpower(grads, -1)
if self.norm_constraint is not None:
assert learning_rate is not None
sq_norm_grads = utils.inner_product(preconditioned_grads, grads)
sq_norm_scaled_grads = sq_norm_grads * learning_rate**2
# We need to sync the norms here, because reduction can be
# non-deterministic. They specifically are on GPUs by default for better
# performance. Hence although grads and preconditioned_grads are synced,
# the inner_product operation can still produce different answers on
# different devices.
sq_norm_scaled_grads = utils.pmean_if_pmap(sq_norm_scaled_grads,
self.pmap_axis_name)
max_coefficient = jnp.sqrt(self.norm_constraint / sq_norm_scaled_grads)
coefficient = jnp.minimum(max_coefficient, 1)
preconditioned_grads = utils.scalar_mul(preconditioned_grads, coefficient)
return preconditioned_grads, velocities
def velocities_and_delta(
self,
velocities: Parameters,
vectors: Sequence[Parameters],
coefficients: Sequence[jnp.ndarray],
) -> Sequence[Parameters]:
"""Computes the new velocities and delta (update to parameters)."""
del velocities
assert len(vectors) == len(coefficients)
delta = utils.scalar_mul(vectors[0], coefficients[0])
for vi, wi in zip(vectors[1:], coefficients[1:]):
delta = jax.tree_multimap(jnp.add, delta, utils.scalar_mul(vi, wi))
return delta, delta
| deepmind-research-master | kfac_ferminet_alpha/optimizer.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the high-level Fisher estimator class."""
import collections
from typing import Any, Callable, Mapping, Optional, Sequence, Union, TypeVar
import jax
import jax.numpy as jnp
import jax.random as jnr
import numpy as np
from kfac_ferminet_alpha import curvature_blocks
from kfac_ferminet_alpha import tracer
from kfac_ferminet_alpha import utils
_CurvatureBlock = curvature_blocks.CurvatureBlock
TagMapping = Mapping[str, curvature_blocks.CurvatureBlockCtor]
BlockVector = Sequence[jnp.ndarray]
_StructureT = TypeVar("_StructureT")
_OptionalStateT = TypeVar("_OptionalStateT", bound=Optional[Mapping[str, Any]])
@utils.Stateful.infer_class_state
class CurvatureEstimator(utils.Stateful):
"""Curvature estimator class supporting various curvature approximations."""
blocks: "collections.OrderedDict[str, _CurvatureBlock]"
damping: Optional[jnp.ndarray]
def __init__(self,
tagged_func: Callable[[Any], jnp.ndarray],
func_args: Sequence[Any],
l2_reg: Union[float, jnp.ndarray],
estimation_mode: str = "fisher_gradients",
params_index: int = 0,
layer_tag_to_block_cls: Optional[TagMapping] = None):
"""Create a FisherEstimator object.
Args:
tagged_func: The function which evaluates the model, in which layer and
loss tags has already been registered.
func_args: Arguments to trace the function for layer and loss tags.
l2_reg: Scalar. The L2 regularization coefficient, which represents
the following regularization function: `coefficient/2 ||theta||^2`.
estimation_mode: The type of curvature estimator to use. One of: *
'fisher_gradients' - the basic estimation approach from the original
K-FAC paper. (Default) * 'fisher_curvature_prop' - method which
estimates the Fisher using self-products of random 1/-1 vectors times
"half-factors" of the
Fisher, as described here: https://arxiv.org/abs/1206.6464 *
'fisher_exact' - is the obvious generalization of Curvature
Propagation to compute the exact Fisher (modulo any additional
diagonal or Kronecker approximations) by looping over one-hot
vectors for each coordinate of the output instead of using 1/-1
vectors. It is more expensive to compute than the other three
options by a factor equal to the output dimension, roughly
speaking. * 'fisher_empirical' - computes the 'empirical' Fisher
information matrix (which uses the data's distribution for the
targets, as opposed to the true Fisher which uses the model's
distribution) and requires that each registered loss have
specified targets. * 'ggn_curvature_prop' - Analogous to
fisher_curvature_prop, but estimates the Generalized
Gauss-Newton matrix (GGN). * 'ggn_exact'- Analogous to
fisher_exact, but estimates the Generalized Gauss-Newton matrix
(GGN).
params_index: The index of the arguments accepted by `func` which
correspond to parameters.
layer_tag_to_block_cls: An optional dict mapping tags to specific classes
of block approximations, which to override the default ones.
"""
if estimation_mode not in ("fisher_gradients", "fisher_empirical",
"fisher_exact", "fisher_curvature_prop",
"ggn_exact", "ggn_curvature_prop"):
raise ValueError(f"Unrecognised estimation_mode={estimation_mode}.")
super().__init__()
self.tagged_func = tagged_func
self.l2_reg = l2_reg
self.estimation_mode = estimation_mode
self.params_index = params_index
self.vjp = tracer.trace_estimator_vjp(self.tagged_func)
# Figure out the mapping from layer
self.layer_tag_to_block_cls = curvature_blocks.copy_default_tag_to_block()
if layer_tag_to_block_cls is None:
layer_tag_to_block_cls = dict()
layer_tag_to_block_cls = dict(**layer_tag_to_block_cls)
self.layer_tag_to_block_cls.update(layer_tag_to_block_cls)
# Create the blocks
self._in_tree = jax.tree_structure(func_args)
self._jaxpr = jax.make_jaxpr(self.tagged_func)(*func_args).jaxpr
self._layer_tags, self._loss_tags = tracer.extract_tags(self._jaxpr)
self.blocks = collections.OrderedDict()
counters = dict()
for eqn in self._layer_tags:
cls = self.layer_tag_to_block_cls[eqn.primitive.name]
c = counters.get(cls.__name__, 0)
self.blocks[cls.__name__ + "_" + str(c)] = cls(eqn)
counters[cls.__name__] = c + 1
@property
def diagonal_weight(self) -> jnp.ndarray:
return self.l2_reg + self.damping
def vectors_to_blocks(
self,
parameter_structured_vector: Any,
) -> Sequence[BlockVector]:
"""Splits the parameters to values for the corresponding blocks."""
in_vars = jax.tree_unflatten(self._in_tree, self._jaxpr.invars)
params_vars = in_vars[self.params_index]
params_vars_flat = jax.tree_flatten(params_vars)[0]
params_values_flat = jax.tree_flatten(parameter_structured_vector)[0]
assert len(params_vars_flat) == len(params_values_flat)
params_dict = dict(zip(params_vars_flat, params_values_flat))
per_block_vectors = []
for eqn in self._layer_tags:
if eqn.primitive.name == "generic_tag":
block_vars = eqn.invars
else:
block_vars = eqn.primitive.split_all_inputs(eqn.invars)[2]
per_block_vectors.append(tuple(params_dict.pop(v) for v in block_vars))
if params_dict:
raise ValueError(f"From the parameters the following structure is not "
f"assigned to any block: {params_dict}. Most likely "
f"this part of the parameters is not part of the graph "
f"reaching the losses.")
return tuple(per_block_vectors)
def blocks_to_vectors(self, per_block_vectors: Sequence[BlockVector]) -> Any:
"""Reverses the function self.vectors_to_blocks."""
in_vars = jax.tree_unflatten(self._in_tree, self._jaxpr.invars)
params_vars = in_vars[self.params_index]
assigned_dict = dict()
for eqn, block_values in zip(self._layer_tags, per_block_vectors):
if eqn.primitive.name == "generic_tag":
block_params = eqn.invars
else:
block_params = eqn.primitive.split_all_inputs(eqn.invars)[2]
assigned_dict.update(zip(block_params, block_values))
params_vars_flat, params_tree = jax.tree_flatten(params_vars)
params_values_flat = [assigned_dict[v] for v in params_vars_flat]
assert len(params_vars_flat) == len(params_values_flat)
return jax.tree_unflatten(params_tree, params_values_flat)
def init(
self,
rng: jnp.ndarray,
init_damping: Optional[jnp.ndarray],
) -> Mapping[str, Any]:
"""Returns an initialized variables for the curvature approximations and the inverses.."""
return dict(
blocks=collections.OrderedDict(
(name, block.init(block_rng)) #
for (name, block), block_rng #
in zip(self.blocks.items(), jnr.split(rng, len(self.blocks)))),
damping=init_damping)
@property
def mat_type(self) -> str:
return self.estimation_mode.split("_")[0]
def vec_block_apply(
self,
func: Callable[[_CurvatureBlock, BlockVector], BlockVector],
parameter_structured_vector: Any,
) -> Any:
"""Executes func for each approximation block on vectors."""
per_block_vectors = self.vectors_to_blocks(parameter_structured_vector)
assert len(per_block_vectors) == len(self.blocks)
results = jax.tree_multimap(func, tuple(self.blocks.values()),
per_block_vectors)
parameter_structured_result = self.blocks_to_vectors(results)
utils.check_structure_shapes_and_dtype(parameter_structured_vector,
parameter_structured_result)
return parameter_structured_result
def multiply_inverse(self, parameter_structured_vector: Any) -> Any:
"""Multiplies the vectors by the corresponding (damped) inverses of the blocks.
Args:
parameter_structured_vector: Structure equivalent to the parameters of the
model.
Returns:
A structured identical to `vectors` containing the product.
"""
return self.multiply_matpower(parameter_structured_vector, -1)
def multiply(self, parameter_structured_vector: Any) -> Any:
"""Multiplies the vectors by the corresponding (damped) blocks.
Args:
parameter_structured_vector: A vector in the same structure as the
parameters of the model.
Returns:
A structured identical to `vectors` containing the product.
"""
return self.multiply_matpower(parameter_structured_vector, 1)
def multiply_matpower(
self,
parameter_structured_vector: _StructureT,
exp: int,
) -> _StructureT:
"""Multiplies the vectors by the corresponding matrix powers of the blocks.
Args:
parameter_structured_vector: A vector in the same structure as the
parameters of the model.
exp: A float representing the power to raise the blocks by before
multiplying it by the vector.
Returns:
A structured identical to `vectors` containing the product.
"""
def func(block: _CurvatureBlock, vec: BlockVector) -> BlockVector:
return block.multiply_matpower(vec, exp, self.diagonal_weight)
return self.vec_block_apply(func, parameter_structured_vector)
def update_curvature_matrix_estimate(
self,
ema_old: Union[float, jnp.ndarray],
ema_new: Union[float, jnp.ndarray],
batch_size: int,
rng: jnp.ndarray,
func_args: Sequence[Any],
pmap_axis_name: str,
) -> None:
"""Updates the curvature estimate."""
# Compute the losses and the VJP function from the function inputs
losses, losses_vjp = self.vjp(func_args)
# Helper function that updates the blocks given a vjp vector
def _update_blocks(vjp_vec_, ema_old_, ema_new_):
blocks_info_ = losses_vjp(vjp_vec_)
for block_, block_info_ in zip(self.blocks.values(), blocks_info_):
block_.update_curvature_matrix_estimate(
info=block_info_,
batch_size=batch_size,
ema_old=ema_old_,
ema_new=ema_new_,
pmap_axis_name=pmap_axis_name)
if self.estimation_mode == "fisher_gradients":
keys = jnr.split(rng, len(losses)) if len(losses) > 1 else [rng]
vjp_vec = tuple(
loss.grad_of_evaluate_on_sample(key, coefficient_mode="sqrt")
for loss, key in zip(losses, keys))
_update_blocks(vjp_vec, ema_old, ema_new)
elif self.estimation_mode in ("fisher_curvature_prop",
"ggn_curvature_prop"):
keys = jnr.split(rng, len(losses)) if len(losses) > 1 else [rng]
vjp_vec = []
for loss, key in zip(losses, keys):
if self.estimation_mode == "fisher_curvature_prop":
random_b = jnr.bernoulli(key, shape=loss.fisher_factor_inner_shape())
vjp_vec.append(loss.multiply_fisher_factor(random_b * 2.0 - 1.0))
else:
random_b = jnr.bernoulli(key, shape=loss.ggn_factor_inner_shape())
vjp_vec.append(loss.multiply_ggn_factor(random_b * 2.0 - 1.0))
_update_blocks(tuple(vjp_vec), ema_old, ema_new)
elif self.estimation_mode in ("fisher_exact", "ggn_exact"):
# We use the following trick to simulate summation. The equation is:
# estimate = ema_old * estimate + ema_new * (sum_i estimate_index_i)
# weight = ema_old * weight + ema_new
# Instead we update the estimate n times with the following updates:
# for k = 1
# estimate_k = ema_old * estimate + (ema_new/n) * (n*estimate_index_k)
# weight_k = ema_old * weight + (ema_new/n)
# for k > 1:
# estimate_k = 1.0 * estimate_k-1 + (ema_new/n) * (n*estimate_index_k)
# weight_k = 1.0 * weight_k-1 + (ema_new/n)
# Which is mathematically equivalent to the original version.
zero_tangents = jax.tree_map(jnp.zeros_like,
list(loss.inputs for loss in losses))
if self.estimation_mode == "fisher_exact":
num_indices = [
(l, int(np.prod(l.fisher_factor_inner_shape[1:]))) for l in losses
]
else:
num_indices = [
(l, int(np.prod(l.ggn_factor_inner_shape()))) for l in losses
]
total_num_indices = sum(n for _, n in num_indices)
for i, (loss, loss_num_indices) in enumerate(num_indices):
for index in range(loss_num_indices):
vjp_vec = zero_tangents.copy()
if self.estimation_mode == "fisher_exact":
vjp_vec[i] = loss.multiply_fisher_factor_replicated_one_hot([index])
else:
vjp_vec[i] = loss.multiply_ggn_factor_replicated_one_hot([index])
if isinstance(vjp_vec[i], jnp.ndarray):
# In the special case of only one parameter, it still needs to be a
# tuple for the tangents.
vjp_vec[i] = (vjp_vec[i],)
vjp_vec[i] = jax.tree_map(lambda x: x * total_num_indices, vjp_vec[i])
_update_blocks(tuple(vjp_vec), ema_old, ema_new / total_num_indices)
ema_old = 1.0
elif self.estimation_mode == "fisher_empirical":
raise NotImplementedError()
else:
raise ValueError(f"Unrecognised estimation_mode={self.estimation_mode}")
def update_curvature_estimate_inverse(
self,
pmap_axis_name: str,
state: _OptionalStateT,
) -> _OptionalStateT:
if state is not None:
old_state = self.get_state()
self.set_state(state)
for block in self.blocks.values():
block.update_curvature_inverse_estimate(self.diagonal_weight,
pmap_axis_name)
if state is None:
return None
else:
state = self.pop_state()
self.set_state(old_state)
return state
| deepmind-research-master | kfac_ferminet_alpha/estimator.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for all of the different curvature blocks."""
import abc
from typing import Any, Callable, Dict, Mapping, MutableMapping, Optional, Sequence, Union
import jax
from jax import core
import jax.numpy as jnp
from kfac_ferminet_alpha import tag_graph_matcher as tgm
from kfac_ferminet_alpha import utils
_Arrays = Sequence[jnp.ndarray]
_BlockInfo = Mapping[str, Any]
class CurvatureBlock(utils.Stateful, abc.ABC):
"""Top level class."""
def __init__(self, layer_tag_eq: tgm.jax_core.JaxprEqn):
super(CurvatureBlock, self).__init__()
self._layer_tag_eq = layer_tag_eq
@property
def layer_tag_primitive(self) -> tgm.tags.LayerTag:
assert isinstance(self._layer_tag_eq.primitive, tgm.tags.LayerTag)
return self._layer_tag_eq.primitive
@property
def outputs_shapes(self) -> Sequence[Sequence[int]]:
output_vars = self.layer_tag_primitive.split_all_inputs(
self._layer_tag_eq.invars)[0]
return jax.tree_map(lambda x: x.aval.shape, output_vars)
@property
def inputs_shapes(self) -> Sequence[Sequence[int]]:
input_vars = self.layer_tag_primitive.split_all_inputs(
self._layer_tag_eq.invars)[1]
return jax.tree_map(lambda x: x.aval.shape, input_vars)
@property
def params_shapes(self) -> Sequence[Sequence[int]]:
params_vars = self.layer_tag_primitive.split_all_inputs(
self._layer_tag_eq.invars)[2]
return jax.tree_map(lambda x: x.aval.shape, params_vars)
@abc.abstractmethod
def init(self, rng: jnp.ndarray) -> MutableMapping[str, Any]:
"""This initializes/creates all of the arrays for the state of the block.
Usually this would include the arrays used for storing the curvature
approximation, as well as the arrays for storing any approximate
inverses/powers of the curvature block.
Args:
rng: The Jax PRNG key to use if any of the state is supposed to be
initialized randomly.
Returns:
A mutable mapping of the state.
"""
@abc.abstractmethod
def update_curvature_matrix_estimate(
self,
info: _BlockInfo,
batch_size: int,
ema_old: Union[float, jnp.ndarray],
ema_new: Union[float, jnp.ndarray],
pmap_axis_name: str
) -> None:
pass
@abc.abstractmethod
def update_curvature_inverse_estimate(
self,
diagonal_weight: Union[float, jnp.ndarray],
pmap_axis_name: str
) -> None:
pass
@abc.abstractmethod
def multiply_matpower(
self,
vec: _Arrays,
exp: Union[float, int],
diagonal_weight: Union[float, jnp.ndarray]
) -> _Arrays:
pass
CurvatureBlockCtor = Callable[[core.JaxprEqn], CurvatureBlock]
@utils.Stateful.infer_class_state
class NaiveDiagonal(CurvatureBlock):
"""The naively estimated diagonal block."""
diagonal_factor: utils.WeightedMovingAverage
def init(self, rng: jnp.ndarray) -> Dict[str, Any]:
del rng
return dict(
diagonal_factor=utils.WeightedMovingAverage.zero(
self.outputs_shapes[0])
)
def update_curvature_matrix_estimate(
self,
info: _BlockInfo,
batch_size: int,
ema_old: Union[float, jnp.ndarray],
ema_new: Union[float, jnp.ndarray],
pmap_axis_name: str
) -> None:
dw, = info["outputs_tangent"]
diagonal_update = dw * dw / batch_size
self.diagonal_factor.update(diagonal_update, ema_old, ema_new)
self.diagonal_factor.sync(pmap_axis_name)
def update_curvature_inverse_estimate(
self,
diagonal_weight: Union[float, jnp.ndarray],
pmap_axis_name: str
) -> None:
pass
def multiply_matpower(
self,
vec: _Arrays,
exp: Union[float, int],
diagonal_weight: Union[float, jnp.ndarray]
) -> _Arrays:
w, = vec
if exp == 1:
return w * (self.diagonal_factor.value + diagonal_weight),
elif exp == -1:
return w / (self.diagonal_factor.value + diagonal_weight),
else:
raise NotImplementedError()
@utils.Stateful.infer_class_state
class TwoKroneckerFactored(CurvatureBlock, abc.ABC):
"""A factor that is the Kronecker product of two matrices."""
inputs_factor: utils.WeightedMovingAverage
inputs_factor_inverse: jnp.ndarray
outputs_factor: utils.WeightedMovingAverage
outputs_factor_inverse: jnp.ndarray
extra_scale: Optional[Union[int, float, jnp.ndarray]]
@property
def has_bias(self) -> bool:
return len(self._layer_tag_eq.invars) == 4
@abc.abstractmethod
def input_size(self) -> int:
pass
@abc.abstractmethod
def output_size(self) -> int:
pass
def compute_extra_scale(self) -> Optional[Union[int, float, jnp.ndarray]]:
return 1
def init(self, rng: jnp.ndarray) -> Dict[str, Any]:
# The extra scale is technically a constant, but in general it could be
# useful for anyone examining the state to know it explicitly,
# hence we actually keep it as part of the state.
d_in = self.input_size()
d_out = self.output_size()
return dict(
inputs_factor=utils.WeightedMovingAverage.zero([d_in, d_in]),
inputs_factor_inverse=jnp.zeros([d_in, d_in]),
outputs_factor=utils.WeightedMovingAverage.zero([d_out, d_out]),
outputs_factor_inverse=jnp.zeros([d_out, d_out]),
extra_scale=self.compute_extra_scale()
)
def update_curvature_inverse_estimate(
self,
diagonal_weight: Union[float, jnp.ndarray],
pmap_axis_name: str
) -> None:
self.inputs_factor.sync(pmap_axis_name)
self.outputs_factor.sync(pmap_axis_name)
# This computes the approximate inverse factor using the pi-adjusted
# inversion from the original KFAC paper.
# Note that the damping is divided by extra_scale since:
# (s * A kron B + lambda I)^-1 = s^-1 (A kron B + s^-1 * lambda I)^-1
# And the extra division by the scale is included in `multiply_matpower`.
(self.inputs_factor_inverse,
self.outputs_factor_inverse) = utils.pi_adjusted_inverse(
factor_0=self.inputs_factor.value,
factor_1=self.outputs_factor.value,
damping=diagonal_weight / self.extra_scale,
pmap_axis_name=pmap_axis_name)
def multiply_matpower(
self,
vec: _Arrays,
exp: Union[float, int],
diagonal_weight: Union[float, jnp.ndarray]
) -> _Arrays:
if self.has_bias:
w, b = vec
vec = jnp.concatenate([w.reshape([-1, w.shape[-1]]), b[None]], axis=0)
else:
w, = vec
vec = w.reshape([-1, w.shape[-1]])
if exp == 1:
inputs_factor, outputs_factor = (self.inputs_factor.value,
self.outputs_factor.value)
scale = self.extra_scale
elif exp == -1:
inputs_factor, outputs_factor = (self.inputs_factor_inverse,
self.outputs_factor_inverse)
scale = 1.0 / self.extra_scale
diagonal_weight = 0
else:
raise NotImplementedError()
result = jnp.matmul(inputs_factor, vec)
result = jnp.matmul(result, outputs_factor)
result = result * scale + diagonal_weight * vec
if self.has_bias:
w_new, b_new = result[:-1], result[-1]
return w_new.reshape(w.shape), b_new
else:
return result.reshape(w.shape),
class DenseTwoKroneckerFactored(TwoKroneckerFactored):
"""Factor for a standard dense layer."""
def input_size(self) -> int:
if self.has_bias:
return self.params_shapes[0][0] + 1
else:
return self.params_shapes[0][0]
def output_size(self) -> int:
return self.params_shapes[0][1]
def update_curvature_matrix_estimate(
self,
info: _BlockInfo,
batch_size: int,
ema_old: Union[float, jnp.ndarray],
ema_new: Union[float, jnp.ndarray],
pmap_axis_name: str
) -> None:
del pmap_axis_name
(x,), (dy,) = info["inputs"], info["outputs_tangent"]
utils.check_first_dim_is_batch_size(batch_size, x, dy)
if self.has_bias:
x_one = jnp.ones_like(x[:, :1])
x = jnp.concatenate([x, x_one], axis=1)
input_stats = jnp.matmul(x.T, x) / batch_size
output_stats = jnp.matmul(dy.T, dy) / batch_size
self.inputs_factor.update(input_stats, ema_old, ema_new)
self.outputs_factor.update(output_stats, ema_old, ema_new)
@utils.Stateful.infer_class_state
class ScaleAndShiftDiagonal(CurvatureBlock):
"""A scale and shift block with a diagonal approximation to the curvature."""
scale_factor: Optional[utils.WeightedMovingAverage]
shift_factor: Optional[utils.WeightedMovingAverage]
@property
def has_scale(self) -> bool:
return self._layer_tag_eq.params["has_scale"]
@property
def has_shift(self) -> bool:
return self._layer_tag_eq.params["has_shift"]
def init(self, rng: jnp.ndarray) -> Dict[str, Any]:
del rng
if self.has_scale and self.has_shift:
return dict(
scale_factor=utils.WeightedMovingAverage.zero(
self.params_shapes[0]
),
shift_factor=utils.WeightedMovingAverage.zero(
self.params_shapes[1]
)
)
elif self.has_scale:
return dict(
scale_factor=utils.WeightedMovingAverage.zero(
self.params_shapes[0]
),
shift_factor=None
)
elif self.has_shift:
return dict(
scale_factor=None,
shift_factor=utils.WeightedMovingAverage.zero(
self.params_shapes[0]
),
)
else:
raise ValueError("Neither `has_scale` nor `has_shift`.")
def update_curvature_matrix_estimate(
self,
info: _BlockInfo,
batch_size: int,
ema_old: Union[float, jnp.ndarray],
ema_new: Union[float, jnp.ndarray],
pmap_axis_name: str
) -> None:
(x,), (dy,) = info["inputs"], info["outputs_tangent"]
utils.check_first_dim_is_batch_size(batch_size, x, dy)
if self.has_scale:
assert self.scale_factor is not None
scale_shape = info["params"][0].shape
full_scale_shape = (1,) * (len(x.shape) - len(scale_shape)) + scale_shape
axis = [i for i, s in enumerate(full_scale_shape) if s == 1 and i != 0]
d_scale = jnp.sum(x * dy, axis=axis)
scale_diag_update = jnp.sum(d_scale * d_scale, axis=0) / batch_size
self.scale_factor.update(scale_diag_update, ema_old, ema_new)
self.scale_factor.sync(pmap_axis_name)
if self.has_shift:
assert self.shift_factor is not None
shift_shape = info["params"][1].shape
full_shift_shape = (1,) * (len(x.shape) - len(shift_shape)) + shift_shape
axis = [i for i, s in enumerate(full_shift_shape) if s == 1 and i != 0]
d_shift = jnp.sum(dy, axis=axis)
shift_diag_update = jnp.sum(d_shift * d_shift, axis=0) / batch_size
self.shift_factor.update(shift_diag_update, ema_old, ema_new)
self.shift_factor.sync(pmap_axis_name)
def update_curvature_inverse_estimate(
self,
diagonal_weight: Union[float, jnp.ndarray],
pmap_axis_name: str
) -> None:
pass
def multiply_matpower(
self,
vec: _Arrays,
exp: Union[float, int],
diagonal_weight: Union[float, jnp.ndarray]
) -> _Arrays:
if self.has_scale and self.has_shift:
factors = (self.scale_factor.value, self.shift_factor.value)
elif self.has_scale:
factors = (self.scale_factor.value,)
elif self.has_shift:
factors = (self.shift_factor.value,)
else:
raise ValueError("Neither `has_scale` nor `has_shift`.")
factors = jax.tree_map(lambda x: x + diagonal_weight, factors)
if exp == 1:
return jax.tree_multimap(jnp.multiply, vec, factors)
elif exp == -1:
return jax.tree_multimap(jnp.divide, vec, factors)
else:
raise NotImplementedError()
@utils.Stateful.infer_class_state
class ScaleAndShiftFull(CurvatureBlock):
"""A scale and shift block with full approximation to the curvature."""
factor: utils.WeightedMovingAverage
inverse_factor: jnp.ndarray
@property
def _has_scale(self) -> bool:
return self._layer_tag_eq.params["has_scale"]
@property
def _has_shift(self) -> bool:
return self._layer_tag_eq.params["has_shift"]
def init(self, rng: jnp.ndarray) -> Dict[str, Any]:
del rng
dims = sum(utils.product(shape) for shape in self.params_shapes)
return dict(
factor=utils.WeightedMovingAverage.zero([dims, dims]),
inverse_factor=jnp.zeros([dims, dims])
)
def update_curvature_matrix_estimate(
self,
info: _BlockInfo,
batch_size: int,
ema_old: Union[float, jnp.ndarray],
ema_new: Union[float, jnp.ndarray],
pmap_axis_name: str
) -> None:
del pmap_axis_name
(x,), (dy,) = info["inputs"], info["outputs_tangent"]
utils.check_first_dim_is_batch_size(batch_size, x, dy)
grads = list()
if self._has_scale:
# Scale gradients
scale_shape = info["params"][0].shape
full_scale_shape = (1,) * (len(x.shape) - len(scale_shape)) + scale_shape
axis = [i for i, s in enumerate(full_scale_shape) if s == 1 and i != 0]
d_scale = jnp.sum(x * dy, axis=axis)
d_scale = d_scale.reshape([batch_size, -1])
grads.append(d_scale)
if self._has_shift:
# Shift gradients
shift_shape = info["params"][1].shape
full_shift_shape = (1,) * (len(x.shape) - len(shift_shape)) + shift_shape
axis = [i for i, s in enumerate(full_shift_shape) if s == 1 and i != 0]
d_shift = jnp.sum(dy, axis=axis)
d_shift = d_shift.reshape([batch_size, -1])
grads.append(d_shift)
grads = jnp.concatenate(grads, axis=1)
factor_update = jnp.matmul(grads.T, grads) / batch_size
self.factor.update(factor_update, ema_old, ema_new)
def update_curvature_inverse_estimate(
self,
diagonal_weight: Union[float, jnp.ndarray],
pmap_axis_name: str
) -> None:
self.factor.sync(pmap_axis_name)
self.inverse_factor = utils.psd_inv_cholesky(self.factor.value,
diagonal_weight)
def multiply_matpower(
self,
vec: _Arrays,
exp: Union[float, int],
diagonal_weight: Union[float, jnp.ndarray]
) -> _Arrays:
# Remember the vector is a tuple of all parameters
if self._has_scale and self._has_shift:
flat_vec = jnp.concatenate([v.flatten() for v in vec])
else:
flat_vec = vec[0].flatten()
if exp == 1:
flat_result = (
jnp.matmul(self.factor.value, flat_vec) + diagonal_weight * flat_vec)
elif exp == -1:
flat_result = jnp.matmul(self.inverse_factor, flat_vec)
else:
raise NotImplementedError()
if self._has_scale and self._has_shift:
scale_dims = int(vec[0].size)
scale_result = flat_result[:scale_dims].reshape(vec[0].shape)
shift_result = flat_result[scale_dims:].reshape(vec[1].shape)
return scale_result, shift_result
else:
return flat_vec.reshape(vec[0].shape),
_default_tag_to_block: MutableMapping[str, CurvatureBlockCtor] = dict(
dense_tag=DenseTwoKroneckerFactored,
generic_tag=NaiveDiagonal,
scale_and_shift_tag=ScaleAndShiftDiagonal,
)
def copy_default_tag_to_block() -> MutableMapping[str, CurvatureBlockCtor]:
return dict(_default_tag_to_block)
def get_default_tag_to_block(tag_name: str) -> CurvatureBlockCtor:
return _default_tag_to_block[tag_name]
def set_default_tag_to_block(
tag_name: str,
block_class: CurvatureBlockCtor,
) -> None:
_default_tag_to_block[tag_name] = block_class
| deepmind-research-master | kfac_ferminet_alpha/curvature_blocks.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the Jax tracer functionality for tags."""
import functools
from typing import Any, Callable, Sequence, Tuple
import jax
from jax import core
from jax import util as jax_util
import jax.numpy as jnp
from kfac_ferminet_alpha import layers_and_loss_tags as tags
from kfac_ferminet_alpha import tag_graph_matcher as tgm
from kfac_ferminet_alpha import utils
_Function = Callable[[Any], Any]
_Loss = tags.LossTag
def extract_tags(
jaxpr: core.Jaxpr
) -> Tuple[Sequence[core.JaxprEqn], Sequence[core.JaxprEqn]]:
"""Extracts all of the tag equations."""
# Loop through equations and evaluate primitives using `bind`
layer_tags = []
loss_tags = []
for eqn in jaxpr.eqns:
if isinstance(eqn.primitive, tags.LossTag):
loss_tags.append(eqn)
elif isinstance(eqn.primitive, tags.LayerTag):
layer_tags.append(eqn)
return tuple(layer_tags), tuple(loss_tags)
def construct_compute_losses_inputs(
jaxpr: core.Jaxpr,
consts: Tuple[Any],
num_losses: int,
primals: Any,
params_index: int) -> Callable[[Any], Sequence[Sequence[jnp.ndarray]]]:
"""Constructs a function that computes all of the inputs to all losses."""
primals_ = list(primals)
def forward_compute_losses(
params_primals: Any,
) -> Sequence[Sequence[jnp.ndarray]]:
primals_[params_index] = params_primals
flat_args = jax.tree_flatten(primals_)[0]
# Mapping from variable -> value
env = dict()
read = functools.partial(tgm.read_env, env)
write = functools.partial(tgm.write_env, env)
# Bind args and consts to environment
write(jax.core.unitvar, jax.core.unit)
jax_util.safe_map(write, jaxpr.invars, flat_args)
jax_util.safe_map(write, jaxpr.constvars, consts)
# Loop through equations and evaluate primitives using `bind`
losses_so_far = 0
loss_tags = []
for eqn in jaxpr.eqns:
tgm.evaluate_eqn(eqn, jax_util.safe_map(read, eqn.invars), write)
if isinstance(eqn.primitive, tags.LossTag):
loss_tags.append(eqn)
losses_so_far += 1
if num_losses is not None and losses_so_far == num_losses:
break
return tuple(tuple(read(v) for v in tag.invars) for tag in loss_tags)
# return tuple(jax_util.safe_map(read, tag.invars) for tag in loss_tags)
return forward_compute_losses
# We know when `.primitive` will be either a `LossTag` or a `LayerTag`, however
# pytype cannot infer its subclass, so we need to unbox it.
def _unbox_loss_tag(jaxpr_eqn: core.JaxprEqn) -> tags.LossTag:
assert isinstance(jaxpr_eqn.primitive, tags.LossTag)
return jaxpr_eqn.primitive
def _unbox_layer_tag(jaxpr_eqn: core.JaxprEqn) -> tags.LayerTag:
assert isinstance(jaxpr_eqn.primitive, tags.LayerTag)
return jaxpr_eqn.primitive
def trace_losses_matrix_vector_vjp(tagged_func: _Function,
params_index: int = 0):
"""Returns the Jacobian-transposed vector product (backward mode) function in equivalent form to jax.vjp."""
def vjp(*primals):
typed_jaxpr = jax.make_jaxpr(tagged_func)(*primals)
jaxpr, consts = typed_jaxpr.jaxpr, typed_jaxpr.literals
_, loss_jaxpr_eqns = extract_tags(jaxpr)
n = len(loss_jaxpr_eqns)
losses_func = construct_compute_losses_inputs(
jaxpr, consts, n, primals, params_index)
losses_inputs, full_vjp_func = jax.vjp(losses_func, primals[params_index])
losses = []
for jaxpr_eqn, inputs in zip(loss_jaxpr_eqns, losses_inputs):
loss_tag = _unbox_loss_tag(jaxpr_eqn)
losses.append(loss_tag.loss(*inputs, weight=jaxpr_eqn.params["weight"]))
losses = tuple(losses)
def vjp_func(tangents):
flat_tangents = jax.tree_flatten(tangents)[0]
loss_invars = []
loss_targets = []
for jaxpr_eqn, inputs in zip(loss_jaxpr_eqns, losses_inputs):
num_inputs = _unbox_loss_tag(jaxpr_eqn).num_inputs
loss_invars.append(tuple(jaxpr_eqn.invars[:num_inputs]))
loss_targets.append(inputs[num_inputs:])
treedef = jax.tree_structure(loss_invars)
tangents = jax.tree_unflatten(treedef, flat_tangents)
# Since the losses could also take and targets as inputs and we don't want
# this function to computes vjp w.r.t to those (e.g. the user should not
# be providing tangent vectors for the targets, only for inputs) we have
# to manually fill in these "extra" tangents with zeros.
targets_tangents = jax.tree_map(jnp.zeros_like, loss_targets)
tangents = tuple(ti + tti for ti, tti in zip(tangents, targets_tangents))
input_tangents = full_vjp_func(tangents)[0]
return input_tangents,
return losses, vjp_func
return vjp
def trace_losses_matrix_vector_jvp(
tagged_func: _Function,
params_index: int = 0):
"""Returns the Jacobian vector product (forward mode) function in equivalent form to jax.jvp."""
def jvp(primals, params_tangents):
typed_jaxpr = jax.make_jaxpr(tagged_func)(*primals)
jaxpr, consts = typed_jaxpr.jaxpr, typed_jaxpr.literals
_, loss_tags = extract_tags(jaxpr)
n = len(loss_tags)
losses_func = construct_compute_losses_inputs(jaxpr, consts, n,
primals, params_index)
primals = (primals[params_index],)
tangents = (params_tangents,)
(primals_out, tangents_out) = jax.jvp(losses_func, primals, tangents)
tangents_out = tuple(tuple(t[:tag.primitive.num_inputs])
for t, tag in zip(tangents_out, loss_tags))
losses = tuple(tag.primitive.loss(*inputs, weight=tag.params["weight"])
for tag, inputs in zip(loss_tags, primals_out))
return losses, tangents_out
return jvp
def trace_losses_matrix_vector_hvp(tagged_func, params_index=0):
"""Returns the Hessian vector product function of **the tagged losses**, rather than the output value of `tagged_func`."""
# The function uses backward-over-forward mode.
def hvp(primals, params_tangents):
typed_jaxpr = jax.make_jaxpr(tagged_func)(*primals)
jaxpr, consts = typed_jaxpr.jaxpr, typed_jaxpr.literals
_, loss_tags = extract_tags(jaxpr)
n = len(loss_tags)
losses_func = construct_compute_losses_inputs(
jaxpr, consts, n, primals, params_index)
def losses_sum(param_primals):
loss_inputs = losses_func(param_primals)
losses = [
_unbox_loss_tag(jaxpr_eqn).loss(
*inputs, weight=jaxpr_eqn.params["weight"])
for jaxpr_eqn, inputs in zip(loss_tags, loss_inputs)
]
# This computes the sum of losses evaluated. Makes it easier as we can
# now use jax.grad rather than jax.vjp for taking derivatives.
return sum(jnp.sum(loss.evaluate(None)) for loss in losses)
def grads_times_tangents(params_primals):
grads = jax.grad(losses_sum)(params_primals)
return utils.inner_product(grads, params_tangents)
return jax.grad(grads_times_tangents)(primals[params_index])
return hvp
def trace_estimator_vjp(tagged_func: _Function) -> _Function:
"""Creates the function needed for an estimator of curvature matrices.
Args:
tagged_func: An function that has been annotated with tags both for layers
and losses.
Returns:
A function with the same signatures as `tagged_func`, which when provided
with inputs returns two things:
1. The instances of all losses objected that are tagged.
2. A second function, which when provide with tangent vectors for each
of the loss instances' parameters, returns for every tagged layer a
dictionary containing the following elements:
inputs - The primal values of the inputs to the layer.
outputs - The primal values of the outputs to the layer.
params - The primal values of the layer.
inputs_tangent - The tangent value of layer, given the provided
tangents of the losses.
inputs_tangent - The tangent value of layer, given the provided
tangents of the losses.
inputs_tangent - The tangent value of layer, given the provided
tangents of the losses.
"""
def full_vjp_func(func_args):
# Trace the tagged function
typed_jaxpr = jax.make_jaxpr(tagged_func)(*func_args)
jaxpr, consts = typed_jaxpr.jaxpr, typed_jaxpr.literals
layer_tags, loss_tags = extract_tags(jaxpr)
layer_vars_flat = jax.tree_flatten([tag.invars for tag in layer_tags])[0]
layer_input_vars = tuple(set(layer_vars_flat))
def forward():
own_func_args = func_args
# Mapping from variable -> value
env = dict()
read = functools.partial(tgm.read_env, env)
write = functools.partial(tgm.write_env, env)
# Bind args and consts to environment
write(jax.core.unitvar, jax.core.unit)
jax_util.safe_map(write, jaxpr.invars, jax.tree_flatten(own_func_args)[0])
jax_util.safe_map(write, jaxpr.constvars, consts)
# Loop through equations and evaluate primitives using `bind`
num_losses_passed = 0
for eqn in jaxpr.eqns:
tgm.evaluate_eqn(eqn, jax_util.safe_map(read, eqn.invars), write)
if isinstance(eqn.primitive, tags.LossTag):
num_losses_passed += 1
if num_losses_passed == len(loss_tags):
break
if num_losses_passed != len(loss_tags):
raise ValueError("This should be unreachable.")
return jax_util.safe_map(read, layer_input_vars)
def forward_aux(aux):
own_func_args = func_args
# Mapping from variable -> value
env = dict()
read = functools.partial(tgm.read_env, env)
def write(var, val):
if not isinstance(var, (jax.core.Literal, jax.core.UnitVar)):
val = val + aux[var] if var in aux else val
env[var] = val
# Bind args and consts to environment
write(jax.core.unitvar, jax.core.unit)
jax_util.safe_map(write, jaxpr.invars, jax.tree_flatten(own_func_args)[0])
jax_util.safe_map(write, jaxpr.constvars, consts)
# Loop through equations and evaluate primitives using `bind`
num_losses_passed = 0
losses_inputs_values = []
losses_kwargs_values = []
for eqn in jaxpr.eqns:
input_values = jax_util.safe_map(read, eqn.invars)
tgm.evaluate_eqn(eqn, input_values, write)
if isinstance(eqn.primitive, tags.LossTag):
loss = eqn.primitive.loss(*input_values, weight=eqn.params["weight"])
losses_inputs_values.append(loss.inputs)
losses_kwargs_values.append(dict(
targets=loss.targets,
weight=eqn.params["weight"]
))
num_losses_passed += 1
if num_losses_passed == len(loss_tags):
break
if num_losses_passed != len(loss_tags):
raise ValueError("This should be unreachable.")
# Read the inputs to the loss functions, but also return the target values
return tuple(losses_inputs_values), tuple(losses_kwargs_values)
layer_input_values = forward()
primals_dict = dict(zip(layer_input_vars, layer_input_values))
primals_dict.update(zip(jaxpr.invars, jax.tree_flatten(func_args)[0]))
aux_values = jax.tree_map(jnp.zeros_like, layer_input_values)
aux_dict = dict(zip(layer_input_vars, aux_values))
losses_args, aux_vjp, losses_kwargs = jax.vjp(forward_aux, aux_dict,
has_aux=True)
losses = tuple(tag.primitive.loss(*inputs, **kwargs)
for tag, inputs, kwargs in
zip(loss_tags, losses_args, losses_kwargs))
def vjp_func(tangents):
all_tangents = aux_vjp(tangents)
tangents_dict, inputs_tangents = all_tangents[0], all_tangents[1:]
inputs_tangents = jax.tree_flatten(inputs_tangents)[0]
tangents_dict.update(zip(jaxpr.invars, inputs_tangents))
read_primals = functools.partial(tgm.read_env, primals_dict)
read_tangents = functools.partial(tgm.read_env, tangents_dict)
layers_info = []
for jaxpr_eqn in layer_tags:
layer_tag = _unbox_layer_tag(jaxpr_eqn)
info = dict()
primals = jax_util.safe_map(read_primals, tuple(jaxpr_eqn.invars))
(
info["outputs"],
info["inputs"],
info["params"],
) = layer_tag.split_all_inputs(primals)
tangents = jax_util.safe_map(read_tangents, tuple(jaxpr_eqn.invars))
(
info["outputs_tangent"],
info["inputs_tangent"],
info["params_tangent"],
) = layer_tag.split_all_inputs(tangents)
layers_info.append(info)
return tuple(layers_info)
return losses, vjp_func
return full_vjp_func
| deepmind-research-master | kfac_ferminet_alpha/tracer.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for tagging and graph manipulation."""
import collections
import functools
import itertools
from typing import Any, NamedTuple, Sequence
from absl import logging
import jax
from jax import core as jax_core
from jax import lax
from jax import util as jax_util
from jax.interpreters import partial_eval as pe
import jax.numpy as jnp
import networkx as nx
from networkx.algorithms import isomorphism
import numpy as np
import ordered_set
from kfac_ferminet_alpha import layers_and_loss_tags as tags
USE_NETWORKX = False
def match_nodes(g1, g2, mapping, node1, node2):
"""Matching nodes when doing graph search."""
if not kfac_node_match(g1.nodes[node1], g2.nodes[node2]):
return False
# Check predecessors
p1 = set(n for n in g1.predecessors(node1) if n in mapping.keys())
p2 = set(n for n in g2.predecessors(node2) if n in mapping.values())
if len(p1) != len(p2):
return False
for p1_i in p1:
if mapping[p1_i] not in p2:
return False
# Check successors
s1 = set(n for n in g1.successors(node1) if n in mapping.keys())
s2 = set(n for n in g2.successors(node2) if n in mapping.values())
if len(s1) != len(s2):
return False
for s1_i in s1:
if mapping[s1_i] not in s2:
return False
return True
def generate_candidates(g1, g2, mapping, node1, node2):
"""Generates the initial candidates for graph search."""
# Check predecessors
p1 = set(n for n in g1.predecessors(node1) if n not in mapping.keys())
p2 = set(n for n in g2.predecessors(node2) if n not in mapping.values())
candidates = ordered_set.OrderedSet(itertools.product(p1, p2))
s1 = set(n for n in g1.successors(node1) if n not in mapping.keys())
s2 = set(n for n in g2.successors(node2) if n not in mapping.values())
candidates.update(list(itertools.product(s1, s2)))
return candidates
def find_mappings(pattern, graph, mapping, terminals):
"""Finds all mappings from graph search of the pattern."""
if len(mapping) == len(pattern):
for k, v in terminals.items():
v.add(mapping[k])
return [frozenset(mapping.items())]
mappings = set()
nodes_list = list(mapping.keys())
for node1 in reversed(nodes_list):
for s1 in pattern.successors(node1):
if s1 not in mapping.keys():
for s2 in graph.successors(mapping[node1]):
if s2 not in mapping.values():
if s1 not in terminals or s2 not in terminals[s1]:
if match_nodes(pattern, graph, mapping, s1, s2):
mapping[s1] = s2
mappings.update(
find_mappings(pattern, graph, mapping, terminals))
mapping.pop(s1)
for p1 in pattern.predecessors(node1):
if p1 not in mapping.keys():
for p2 in graph.predecessors(mapping[node1]):
if p2 not in mapping.values():
if p1 not in terminals or p2 not in terminals[p1]:
if match_nodes(pattern, graph, mapping, p1, p2):
mapping[p1] = p2
mappings.update(
find_mappings(pattern, graph, mapping, terminals))
mapping.pop(p1)
return mappings
def match_pattern(pattern, graph):
"""Given a pattern returns all matches inside the graph."""
if USE_NETWORKX:
matcher = isomorphism.GraphMatcher(
graph, pattern, node_match=kfac_node_match)
mappings = list(
dict((k, v)
for v, k in mapping.items())
for mapping in matcher.subgraph_isomorphisms_iter())
else:
mapping = collections.OrderedDict()
params1 = [n for n in pattern.nodes if pattern.nodes[n]["op"] == "param"]
params2 = [n for n in graph.nodes if graph.nodes[n]["op"] == "param"]
terminals = {
n: set() for n in pattern.nodes if not list(pattern.successors(n))
}
mappings = set()
for node1, node2 in itertools.product(params1, params2):
mapping[node1] = node2
mappings.update(find_mappings(pattern, graph, mapping, terminals))
mapping.pop(node1)
for v in terminals.values():
v.clear()
mappings = list(dict(mapping) for mapping in mappings)
var_mappings = []
for mapping in mappings:
var_mappings.append(dict())
for k, v in mapping.items():
cond = pattern.nodes[k]["op"] in ("param", "array")
source = pattern.nodes[k]["var"] if cond else k
target = graph.nodes[v]["var"] if cond else graph.nodes[v]["eqn"]
var_mappings[-1][source] = target
return var_mappings
def read_env(env, var):
# Literals are values baked into the Jaxpr
if isinstance(var, jax.core.Literal):
return var.val
return env[var]
def write_env(env, var, val):
env[var] = val
def abstract_single_value(value):
if isinstance(value, jnp.ndarray):
value = jax.ShapedArray(np.shape(value), np.result_type(value))
return pe.PartialVal.unknown(value)
else:
return value
def abstract_args(args):
return jax.tree_map(abstract_single_value, args)
def evaluate_eqn(eqn, in_values, write_func):
"""Evaluate a single Jax equation and writes the outputs."""
in_values = list(in_values)
# This is logic specifically to handle `xla_call`
call_jaxpr, params = jax.core.extract_call_jaxpr(eqn.primitive, eqn.params)
if call_jaxpr:
subfuns = [
jax.core.lu.wrap_init(
functools.partial(jax.core.eval_jaxpr, call_jaxpr, ()))
]
else:
subfuns = []
ans = eqn.primitive.bind(*(subfuns + in_values), **params)
if eqn.primitive.multiple_results:
jax_util.safe_map(write_func, eqn.outvars, ans)
else:
write_func(eqn.outvars[0], ans)
return ans
def clean_jaxpr_eqns(jaxpr, preserve_tags=True):
"""Performs dead code elimination on the jaxpr, preserving loss and layer tags."""
eqns = []
dependants = set(jaxpr.outvars)
for eqn in reversed(jaxpr.eqns):
check = False
for v in eqn.outvars:
if v in dependants:
dependants.remove(v)
check = True
if isinstance(eqn.primitive, (tags.LossTag, tags.LayerTag)):
check = check or preserve_tags
if check:
eqns.append(eqn)
new_dependants = set(
v for v in eqn.invars if not isinstance(v, jax_core.Literal))
dependants = dependants.union(new_dependants)
# Dependants should only be invars
dependants = dependants - set(jaxpr.invars + jaxpr.constvars)
if dependants:
raise ValueError("Something went wrong with the dead code elimination.")
return reversed(eqns)
def broadcast_merger(f):
"""Transforms `f` into a function where all consecutive broadcasts are merged."""
def merged_func(*func_args):
typed_jaxpr, out_avals = jax.make_jaxpr(f, return_shape=True)(*func_args)
out_tree = jax.tree_structure(out_avals)
jaxpr, consts = typed_jaxpr.jaxpr, typed_jaxpr.literals
# Mapping from variable -> value
env = dict()
read = functools.partial(read_env, env)
write = functools.partial(write_env, env)
# Bind args and consts to environment
flat_args = jax.tree_flatten(func_args)[0]
write(jax.core.unitvar, jax.core.unit)
jax_util.safe_map(write, jaxpr.invars, flat_args)
jax_util.safe_map(write, jaxpr.constvars, consts)
# Bind args and consts to environment
write(jax.core.unitvar, jax.core.unit)
jax_util.safe_map(write, jaxpr.invars, flat_args)
jax_util.safe_map(write, jaxpr.constvars, consts)
# Loop through equations and evaluate primitives using `bind`
broadcasts_outputs = dict()
for eqn in clean_jaxpr_eqns(jaxpr):
# We ignore broadcasting of constants
if (eqn.primitive.name == "broadcast_in_dim" and
not all(isinstance(v, jax_core.Literal) for v in eqn.invars)):
if eqn.invars[0] in broadcasts_outputs:
x, dims = broadcasts_outputs[eqn.invars[0]]
kept_dims = eqn.params["broadcast_dimensions"]
kept_dims = [kept_dims[d] for d in dims]
y = lax.broadcast_in_dim(x, eqn.params["shape"], kept_dims)
jax_util.safe_map(write, eqn.outvars, [y])
broadcasts_outputs[eqn.outvars[0]] = (x, kept_dims)
else:
inputs = jax_util.safe_map(read, eqn.invars)
evaluate_eqn(eqn, inputs, write)
broadcasts_outputs[eqn.outvars[0]] = (
inputs[0], eqn.params["broadcast_dimensions"])
else:
evaluate_eqn(eqn, jax_util.safe_map(read, eqn.invars), write)
return jax.tree_unflatten(out_tree, jax_util.safe_map(read, jaxpr.outvars))
return merged_func
class JaxGraph(NamedTuple):
jaxpr: Any
consts: Any
params: Any
params_tree: Any
in_tree: Any
out_tree: Any
digraph: nx.DiGraph
tagging_func: Any
SPECIAL_OP_COMPARE_RULES = dict()
def default_compare(node1, node2):
if node1["op"] != node2["op"]:
return False
params1, params2 = node1["eqn"].params, node2["eqn"].params
if set(params1.keys()) != set(params2.keys()):
return False
for k in params1.keys():
if params1[k] != params2[k]:
return False
return True
def reshape_compare(node1, node2):
"""Compares two reshape nodes."""
assert node1["op"] == node2["op"] == "reshape"
params1, params2 = node1["eqn"].params, node2["eqn"].params
if params1["dimensions"] != params2["dimensions"]:
return False
return True
def broadcast_in_dim_compare(node1, node2):
"""Compares two reshape nodes."""
assert node1["op"] == node2["op"] == "broadcast_in_dim"
return True
def conv_compare(node1, node2):
"""Compares two conv_general_dialted nodes."""
assert node1["op"] == node2["op"] == "conv_general_dilated"
params1, params2 = node1["eqn"].params, node2["eqn"].params
for k in ("window_strides", "padding", "lhs_dilation", "rhs_dilation",
"lhs_shape", "rhs_shape"):
if len(params1[k]) != len(params2[k]):
return False
if (len(params1["dimension_numbers"].lhs_spec) != #
len(params2["dimension_numbers"].lhs_spec)):
return False
if (len(params1["dimension_numbers"].rhs_spec) != #
len(params2["dimension_numbers"].rhs_spec)):
return False
if (len(params1["dimension_numbers"].out_spec) != #
len(params2["dimension_numbers"].out_spec)):
return False
if ((params1["feature_group_count"] > 1) != #
(params2["feature_group_count"] > 1)):
return False
if ((params1["batch_group_count"] > 1) != #
(params2["batch_group_count"] > 1)):
return False
return True
SPECIAL_OP_COMPARE_RULES["reshape"] = reshape_compare
SPECIAL_OP_COMPARE_RULES["broadcast_in_dim"] = broadcast_in_dim_compare
SPECIAL_OP_COMPARE_RULES["conv_general_dilated"] = conv_compare
def kfac_node_match(node1, node2):
"""Checks if two nodes are equivalent."""
# Parameters match with each other and nothing else
if node1["op"] == "param" and node2["op"] == "param":
return True
# return node1["rank"] == node2["rank"]
if node1["op"] == "param" or node2["op"] == "param":
return False
# Arrays always match each other and nothing else
if node1["op"] == "array" and node2["op"] == "array":
return True
if node1["op"] == "array" or node2["op"] == "array":
return False
# Operators match first on name
if node1["op"] != node2["op"]:
return False
compare = SPECIAL_OP_COMPARE_RULES.get(node1["op"], default_compare)
return compare(node1, node2)
def var_to_str(var):
"""Returns a string representation of the variable of a Jax expression."""
if isinstance(var, jax.core.Literal):
return str(var)
elif isinstance(var, jax.core.UnitVar):
return "*"
elif not isinstance(var, jax.core.Var):
raise ValueError(f"Idk what to do with this {type(var)}?")
c = int(var.count)
if c == -1:
return "_"
str_rep = ""
while c > 25:
str_rep += chr(c % 26 + ord("a"))
c = c // 26
str_rep += chr(c + ord("a"))
return str_rep[::-1]
def extract_param_vars_flat(jaxpr, in_tree, params_index):
if params_index is None:
params_index = []
elif isinstance(params_index, int):
params_index = [params_index]
in_vars = jax.tree_unflatten(in_tree, jaxpr.invars)
return jax.tree_flatten([in_vars[i] for i in params_index])
def fill_jaxpr_to_graph(graph, jaxpr, in_vars=None, out_vars=None):
"""Fills the graph with the jaxpr."""
in_vars = in_vars or [var_to_str(v) for v in jaxpr.invars + jaxpr.constvars]
in_map = dict(zip(jaxpr.invars + jaxpr.constvars, in_vars))
out_vars = out_vars or [var_to_str(v) for v in jaxpr.outvars]
out_map = dict(zip(jaxpr.outvars, out_vars))
for eqn in jaxpr.eqns:
in_vars = []
for v in eqn.invars:
if isinstance(v, (jax.core.Literal, jax.core.UnitVar)):
in_vars.append(var_to_str(v))
else:
in_vars.append(in_map.get(v, var_to_str(v)))
out_vars = [out_map.get(v, var_to_str(v)) for v in eqn.outvars]
in_str = ",".join(in_vars)
out_str = ",".join(out_vars)
if isinstance(eqn.primitive, tags.LossTag):
func_name = "__loss_tag"
elif isinstance(eqn.primitive, tags.LayerTag):
func_name = "__layer_tag"
else:
func_name = eqn.primitive.name
node_c = f"{func_name}({in_str})->{out_str}"
graph.add_node(node_c, op=eqn.primitive.name, eqn=eqn)
# Create incoming edges
for v, name in zip(eqn.invars, in_vars):
if (not isinstance(v, jax.core.Literal) and
not isinstance(v, jax.core.UnitVar)):
graph.add_edge(name, node_c)
# Create output nodes and edges
for v, name in zip(eqn.outvars, out_vars):
graph.add_node(name, op="array", var=v)
graph.add_edge(node_c, name)
def create_digraph(jaxpr, params):
"""Creates a directed graph from the given jaxpr and parameters."""
graph = nx.DiGraph()
# Create input nodes
for v in jaxpr.invars + jaxpr.constvars:
if v in params:
graph.add_node(var_to_str(v), op="param", var=v)
else:
graph.add_node(var_to_str(v), op="array", var=v)
fill_jaxpr_to_graph(graph, jaxpr)
return graph
def function_to_jax_graph(func, args, params_index, tagging_func=None):
"""Creates a `JaxGraph` instance from the provided function."""
in_tree = jax.tree_structure(args)
typed_jaxpr = jax.make_jaxpr(func)(*args)
jaxpr, consts = typed_jaxpr.jaxpr, typed_jaxpr.literals
params, params_tree = extract_param_vars_flat(jaxpr, in_tree, params_index)
digraph = create_digraph(jaxpr, params)
if tagging_func is not None:
tagging_func = functools.partial(tagging_func, jaxpr)
return JaxGraph(
jaxpr=jaxpr,
consts=consts,
params=params,
params_tree=params_tree,
in_tree=in_tree,
out_tree=None,
digraph=digraph,
tagging_func=tagging_func)
def print_nice_jaxpr(jaxpr):
for eqn in jaxpr.eqns:
print(tuple(eqn.invars), "->", eqn.primitive.name, tuple(eqn.outvars))
def auto_register_tags(func,
func_args,
params_index: int = 0,
register_only_generic: bool = False,
compute_only_loss_tags: bool = True,
patterns_to_skip: Sequence[str] = ()):
"""Transform the function to one that is populated with tags."""
func = broadcast_merger(func)
graph = function_to_jax_graph(func, func_args, params_index=params_index)
matches = dict()
# Extract the tagged losses variables and all their ancestors
loss_output_vars = []
num_losses = 0
loss_ancestors = set()
for node in graph.digraph.nodes:
if node.startswith("__loss_tag"):
num_losses += 1
ancestors = nx.ancestors(graph.digraph, node)
ancestors.add(node)
for output_node in node.split("->")[-1].split(","):
ancestors.add(output_node)
loss_output_vars.append(graph.digraph.nodes[output_node]["var"])
loss_ancestors = loss_ancestors.union(ancestors)
loss_output_vars = tuple(loss_output_vars)
# Extract the sub-graph that leads to losses
sub_graph = nx.induced_subgraph(graph.digraph, loss_ancestors)
# First collect all parameters that are already part of a layer tag
tagged_params = dict()
pattern_counters = dict()
for tag_node in (
node for node in sub_graph.nodes if node.startswith("__layer_tag")):
inputs = graph.digraph.nodes[tag_node]["eqn"].invars
tag_instance = graph.digraph.nodes[tag_node]["eqn"].primitive
if tag_instance.name == "generic_tag":
tag_params = tag_instance.split_all_inputs(inputs)[0]
else:
tag_params = tag_instance.split_all_inputs(inputs)[2]
pattern_number = pattern_counters.get(tag_instance.name, 0)
for param in tag_params:
if param not in graph.params:
raise ValueError(f"You have registered a layer tag with parameter "
f"that is not part of the parameters at index "
f"{params_index}.")
if param in tagged_params:
raise ValueError(f"You have registered twice the parameter {param}.")
tagged_params[param] = f"Manual[{tag_instance.name}_{pattern_number}]"
if tag_instance.name not in pattern_counters:
pattern_counters[tag_instance.name] = 1
else:
pattern_counters[tag_instance.name] += 1
if not register_only_generic:
for pattern_name, patterns in get_graph_patterns():
if pattern_name in patterns_to_skip:
logging.info("Skipping graph pattern %s", pattern_name)
continue
logging.info("Matching graph pattern %s", pattern_name)
for pattern in patterns:
for match_map in match_pattern(pattern.digraph, sub_graph):
if len(pattern.jaxpr.outvars) > 1:
raise NotImplementedError()
output = pattern.jaxpr.outvars[0]
if matches.get(match_map[output]) is not None:
raise ValueError(f"Found more than one match for equation "
f"{match_map[output]}. Examine the jaxpr:\n "
f"{graph.jaxpr}")
# Mark the parameters as already tagged
match_params = set()
match_params_already_tagged = False
for param in match_map.values():
if param in graph.params:
match_params.add(param)
if param in tagged_params.keys():
match_params_already_tagged = True
# Register the match only if no parameters are already registered
if not match_params_already_tagged:
matches[match_map[output]] = (match_map, pattern.tagging_func)
pattern_number = pattern_counters.get(pattern_name, 0)
for param in match_params:
tagged_params[param] = f"Auto[{pattern_name}_{pattern_number}]"
if pattern_name not in pattern_counters:
pattern_counters[pattern_name] = 1
else:
pattern_counters[pattern_name] += 1
# Mark remaining parameters as orphans
orphan_params = sorted(
set(graph.params) - set(tagged_params.keys()), key=lambda v: v.count)
params_regs = [tagged_params.get(p, "Orphan") for p in graph.params]
params_regs = jax.tree_unflatten(graph.params_tree, params_regs)
logging.info("=" * 50)
logging.info("Graph parameter registrations:")
logging.info(params_regs)
logging.info("=" * 50)
# Construct a function with all of the extra tag registrations
@functools.wraps(func)
def wrapped_auto_registered(*args):
flat_args, _ = jax.tree_flatten(args)
# Mapping from variable -> value
env = {}
read = functools.partial(read_env, env)
write = functools.partial(write_env, env)
def tag(var):
if matches.get(var) is not None:
inv_map, tagging_func = matches[var]
var_map = {k: v for k, v in inv_map.items() if not isinstance(k, str)}
val_map = jax.tree_map(read, var_map)
val = tagging_func(inv_map, val_map)
env[var] = val
# Bind args and consts to environment
write(jax.core.unitvar, jax.core.unit)
jax_util.safe_map(write, graph.jaxpr.invars, flat_args)
jax_util.safe_map(write, graph.jaxpr.constvars, graph.consts)
# Register any orphan parameters as generic
for param_var in orphan_params:
write(param_var, tags.register_generic(read(param_var)))
# Set the correct output variables
if compute_only_loss_tags:
output_vars = loss_output_vars
out_tree = jax.tree_structure(loss_output_vars)
else:
output_vars = graph.jaxpr.outvars
out_tree = graph.out_tree
# Loop through equations and evaluate primitives using `bind`
losses_evaluated = 0
for eqn in graph.jaxpr.eqns:
evaluate_eqn(eqn, jax_util.safe_map(read, eqn.invars), write)
jax_util.safe_map(tag, eqn.outvars)
# If we want to output only tagged losses
if isinstance(eqn.primitive, tags.LossTag):
losses_evaluated += 1
if compute_only_loss_tags and num_losses == losses_evaluated:
break
outputs = jax_util.safe_map(read, output_vars)
return jax.tree_unflatten(out_tree, outputs)
return wrapped_auto_registered
# Registered graphs
NAME_TO_JAX_GRAPH = dict()
DEFERRED_REGISTRATIONS = []
def register_function(name, func, tagging_func, example_args, params_index,
precedence):
"""Registers a function as a pattern in the graph matcher registry.
The graph matcher needs to trace at least once the full function, which means
you need to provide it with dummy arguments. The shapes of the arguments do
not matter, as the graph matcher ignores their values, however the rank does.
Especially if there is some broadcasting happening you should register with
every possible broadcast pattern. As a general advice avoid using a shape to
be 1, unless you want the pattern to specifically match that, as some
operations, like squeeze for example, can have special behaviour then.
Args:
name: The name of the pattern that is being registered to.
func: The function that performs the computation.
tagging_func: Function that correctly creates the tag.
example_args: Example arguments that can be inputted into `func`.
params_index: Specifies at which index of the `example_args` are considered
a parameter.
precedence: This specifies what precedence the graph matcher is going to
assign to the provided pattern. The graph matcher will go from lowest to
highest precedence, randomly breaking ties, when matching. Note that the
pattern that matches a parameter with the lowest precedence will get
registered and no other will. Specifically useful when there is a pattern
for a layer with and without bias, in which case the with bias
registration always should go with lower precedence.
"""
# This is required because we can not use Jax before InitGoogle() runs
def register():
jnp_args = jax.tree_map(jnp.asarray, example_args)
graph = function_to_jax_graph(
func, jnp_args, params_index=params_index, tagging_func=tagging_func)
if NAME_TO_JAX_GRAPH.get(name) is None:
NAME_TO_JAX_GRAPH[name] = (precedence, [])
assert precedence == NAME_TO_JAX_GRAPH[name][0]
NAME_TO_JAX_GRAPH[name][1].append(graph)
DEFERRED_REGISTRATIONS.append(register)
def get_graph_patterns():
"""Returns all graph patterns sorted by their precedence."""
while DEFERRED_REGISTRATIONS:
DEFERRED_REGISTRATIONS.pop()()
return [(name, pattern) for name, (_, pattern) in sorted(
NAME_TO_JAX_GRAPH.items(), key=lambda pair: pair[1][0])]
# Dense with bias
register_function(
"dense_with_bias",
tags.dense_func,
tags.dense_tagging,
[np.zeros([11, 13]), [np.zeros([13, 7]), np.zeros([7])]],
params_index=1,
precedence=0)
# Dense without bias
register_function(
"dense_no_bias",
tags.dense_func,
tags.dense_tagging, [np.zeros([11, 13]), [np.zeros([13, 7])]],
params_index=1,
precedence=1)
# Conv2d with bias
register_function(
"conv2d_with_bias",
tags.conv2d_func,
tags.conv2d_tagging,
[np.zeros([2, 8, 8, 5]), [np.zeros([3, 3, 5, 4]),
np.zeros([4])]],
params_index=1,
precedence=0)
# Conv2d without bias
register_function(
"conv2d_no_bias",
tags.conv2d_func,
tags.conv2d_tagging, [np.zeros([2, 8, 8, 5]), [np.zeros([3, 3, 5, 4])]],
params_index=1,
precedence=1)
# Standard scale and shift with both scale and shift
register_function(
"scale_and_shift",
functools.partial(
tags.scale_and_shift_func, has_scale=True, has_shift=True),
functools.partial(
tags.scale_and_shift_tagging, has_scale=True, has_shift=True),
[np.zeros([2, 13]), [np.zeros([13]), np.zeros([13])]],
params_index=1,
precedence=0)
# Same but no broadcasting
register_function(
"scale_and_shift",
functools.partial(
tags.scale_and_shift_func, has_scale=True, has_shift=True),
functools.partial(
tags.scale_and_shift_tagging, has_scale=True, has_shift=True),
[np.zeros([13]), [np.zeros([13]), np.zeros([13])]],
params_index=1,
precedence=0)
# Scale and shift as implemented in batch norm layers in Haiku
register_function(
"scale_and_shift",
tags.batch_norm_func,
functools.partial(
tags.batch_norm_tagging_func, has_scale=True, has_shift=True),
[[np.zeros([2, 13]), np.zeros([13])], [np.zeros([13]),
np.zeros([13])]],
params_index=1,
precedence=0)
# Same but no broadcasting
register_function(
"scale_and_shift",
tags.batch_norm_func,
functools.partial(
tags.batch_norm_tagging_func, has_scale=True, has_shift=True),
[[np.zeros([13]), np.zeros([13])], [np.zeros([13]),
np.zeros([13])]],
params_index=1,
precedence=0)
# Only scale
register_function(
"scale_only",
functools.partial(
tags.scale_and_shift_func, has_scale=True, has_shift=False),
functools.partial(
tags.scale_and_shift_tagging, has_scale=True, has_shift=False),
[np.zeros([2, 13]), [np.zeros([13])]],
params_index=1,
precedence=1)
| deepmind-research-master | kfac_ferminet_alpha/tag_graph_matcher.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
import jax
import jax.numpy as jnp
import jax.random as jnr
import jax.test_util as jtu
from kfac_ferminet_alpha import layers_and_loss_tags
from kfac_ferminet_alpha import loss_functions
from kfac_ferminet_alpha import tag_graph_matcher
from kfac_ferminet_alpha.tests import common
def tagged_autoencoder(all_params, x_in):
h_in = x_in
layers_values = []
for i, params in enumerate(all_params):
h_out = common.fully_connected_layer(params, h_in)
h_out = layers_and_loss_tags.register_dense(h_out, h_in, params[0],
params[1],)
layers_values.append((h_out, h_in))
# Last layer does not have a nonlinearity
if i % 4 != 3:
h_in = jnp.tanh(h_out)
else:
h_in = h_out
h1, _ = loss_functions.register_normal_predictive_distribution(
h_in, targets=x_in, weight=1.0)
h2, t2 = loss_functions.register_normal_predictive_distribution(
h_in, targets=x_in, weight=0.1)
return [[h1, t2], [h2, t2]]
class TestGraphMatcher(jtu.JaxTestCase):
"""Class for running all of the tests for integrating the systems."""
def _test_jaxpr(self, init_func, model_func, tagged_model, data_shape):
data_shape = tuple(data_shape)
rng_key = jnr.PRNGKey(12345)
init_key, data_key = jnr.split(rng_key)
params = init_func(init_key, data_shape)
data = jnr.normal(data_key, (11,) + data_shape)
func = tag_graph_matcher.auto_register_tags(model_func, (params, data))
jaxpr = jax.make_jaxpr(func)(params, data).jaxpr
tagged_jaxpr = jax.make_jaxpr(tagged_model)(params, data).jaxpr
self.assertEqual(len(jaxpr.invars), len(tagged_jaxpr.invars))
self.assertEqual(len(jaxpr.constvars), len(tagged_jaxpr.constvars))
self.assertEqual(len(jaxpr.outvars), len(tagged_jaxpr.outvars))
for eq, tagged_eq in zip(jaxpr.eqns, tagged_jaxpr.eqns):
eq_in_vars = [v for v in eq.invars if not isinstance(v, jax.core.UnitVar)]
tagged_in_vars = [
v for v in tagged_eq.invars if not isinstance(v, jax.core.UnitVar)
]
self.assertEqual(len(eq_in_vars), len(tagged_in_vars))
self.assertEqual(len(eq.outvars), len(tagged_eq.outvars))
self.assertEqual(eq.primitive, tagged_eq.primitive)
for variable, t_variable in zip(eq_in_vars + eq.outvars,
tagged_in_vars + tagged_eq.outvars):
if isinstance(variable, jax.core.Literal):
self.assertEqual(variable.aval, t_variable.aval)
else:
if variable.count != t_variable.count:
print("0")
self.assertEqual(variable.count, t_variable.count)
def test_autoencoder(self):
self._test_jaxpr(common.init_autoencoder, common.autoencoder,
tagged_autoencoder, [784])
if __name__ == "__main__":
absltest.main()
| deepmind-research-master | kfac_ferminet_alpha/tests/graph_matcher_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
import jax
import jax.numpy as jnp
import jax.random as jnr
import jax.test_util as jtu
from kfac_ferminet_alpha import loss_functions
from kfac_ferminet_alpha import tag_graph_matcher as tgm
from kfac_ferminet_alpha import tracer
from kfac_ferminet_alpha import utils
from kfac_ferminet_alpha.tests import common
def autoencoder_aux(all_aux, all_params, x_in):
h_in = x_in
layers_values = []
for i, (params, aux) in enumerate(zip(all_params, all_aux)):
h_out = common.fully_connected_layer(params, h_in + aux[1]) + aux[0]
layers_values.append((h_out, h_in))
# Last layer does not have a nonlinearity
if i % 4 != 3:
h_in = jnp.tanh(h_out)
else:
h_in = h_out
h1, _ = loss_functions.register_normal_predictive_distribution(h_in, x_in)
h2, _ = loss_functions.register_normal_predictive_distribution(
h_in, targets=x_in, weight=0.1)
l1 = (h1 - x_in)**2 + jnp.log(jnp.pi) / 2
l2 = (h2 - x_in)**2 + jnp.log(jnp.pi) / 2
return [l1, l2 * 0.1], layers_values
class TestTracer(jtu.JaxTestCase):
"""Class for running all of the tests for integrating the systems."""
@staticmethod
def generate_data(init_func, func, data_shape, rng_key):
n = 3
rng_key, key = jnr.split(rng_key)
params = init_func(key, data_shape)
rng_key, key = jnr.split(rng_key)
p_tangents = init_func(key, data_shape)
rng_key, key = jnr.split(rng_key)
data = jnr.normal(key, [n] + data_shape)
loss_vals, layer_vals = func(params, data)
h = layer_vals[-1][0]
keys = jnr.split(key, len(loss_vals))
h_tangents = tuple(jnr.normal(key, shape=h.shape) for key in keys)
return params, data, p_tangents, h_tangents
def assertStructureAllClose(self, x, y, **kwargs):
x_v, x_tree = jax.tree_flatten(x)
y_v, y_tree = jax.tree_flatten(y)
self.assertEqual(x_tree, y_tree)
for xi, yi in zip(x_v, y_v):
self.assertEqual(xi.shape, yi.shape)
self.assertAllClose(xi, yi, check_dtypes=True, **kwargs)
def test_tacer_jvp(self):
init_func = common.init_autoencoder
func = common.autoencoder
data_shape = [784]
rng_key = jnr.PRNGKey(12345)
params, data, p_tangents, _ = self.generate_data(init_func, func,
data_shape, rng_key)
def no_data_func(args):
outputs = func(args, data)
return outputs[0], outputs[1][-1][0]
# True computation
(primals_out, tangents_out) = jax.jvp(no_data_func, [params], [p_tangents])
loss_vals, _ = primals_out
_, h_tangents = tangents_out
loss_tangents = ((h_tangents,),) * len(loss_vals)
# Tracer computation
tracer_jvp = tracer.trace_losses_matrix_vector_jvp(func)
tracer_losses, tracer_loss_tangents = tracer_jvp((params, data), p_tangents)
tracer_losses = [loss.evaluate(None) for loss in tracer_losses]
self.assertStructureAllClose(loss_vals, tracer_losses)
self.assertStructureAllClose(loss_tangents, tracer_loss_tangents)
def test_tracer_vjp(self):
init_func = common.init_autoencoder
func = common.autoencoder
data_shape = [784]
rng_key = jnr.PRNGKey(12345)
params, data, _, h_tangents = self.generate_data(init_func, func,
data_shape, rng_key)
def no_data_func(args):
outputs = func(args, data)
return outputs[0], outputs[1][-1][0]
# True computation
(loss_vals, _), vjp_func = jax.vjp(no_data_func, params)
loss_tangents = jax.tree_map(jnp.zeros_like, loss_vals)
summed_h_tangents = sum(jax.tree_flatten(h_tangents)[0])
p_tangents = vjp_func((loss_tangents, summed_h_tangents))
# Tracer computation
trace_vjp = tracer.trace_losses_matrix_vector_vjp(func)
tracer_losses, tracer_vjp_func = trace_vjp(params, data)
tracer_losses = [loss.evaluate(None) for loss in tracer_losses]
tracer_p_tangents = tracer_vjp_func(h_tangents)
self.assertStructureAllClose(loss_vals, tracer_losses)
self.assertStructureAllClose(p_tangents, tracer_p_tangents, atol=3e-6)
def test_tracer_hvp(self):
init_func = common.init_autoencoder
func = common.autoencoder
data_shape = [784]
rng_key = jnr.PRNGKey(12345)
params, data, p_tangents, _ = self.generate_data(init_func, func,
data_shape, rng_key)
def no_data_func(args):
outputs = func(args, data)
return sum(jax.tree_map(jnp.sum, outputs[0]))
# True computation
grad_func = jax.grad(no_data_func)
def grad_time_tangents(args):
return utils.inner_product(grad_func(args), p_tangents)
hvp = jax.grad(grad_time_tangents)
hvp_vectors = hvp(params)
# Tracer computation
tracer_hvp = tracer.trace_losses_matrix_vector_hvp(func)
tracer_hvp_vectors = tracer_hvp((params, data), p_tangents)
self.assertStructureAllClose(hvp_vectors, tracer_hvp_vectors, atol=1e-4)
def test_trace_estimator(self):
init_func = common.init_autoencoder
func = common.autoencoder
aux_func = autoencoder_aux
data_shape = [784]
rng_key = jnr.PRNGKey(12345)
params, data, _, h_tangents = self.generate_data(init_func, func,
data_shape, rng_key)
def aux_last_layer(aux, args):
outs = aux_func(aux, args, data)
return outs[1][-1][0]
# True computation
loss_vals, layer_vals = func(params, data)
aux_vals = jax.tree_map(jnp.zeros_like, layer_vals)
_, vjp = jax.vjp(aux_last_layer, aux_vals, params)
summed_h_tangents = sum(jax.tree_flatten(h_tangents)[0])
aux_tangents, p_tangents = vjp(summed_h_tangents)
layers_info = []
for aux_p, p_p in zip(layer_vals, params):
info = dict()
info["outputs"] = (aux_p[0],)
info["inputs"] = (aux_p[1],)
info["params"] = (p_p[0], p_p[1])
layers_info.append(info)
for i, (aux_t, p_t) in enumerate(zip(aux_tangents, p_tangents)):
info = dict()
info["outputs_tangent"] = (aux_t[0],)
info["inputs_tangent"] = (aux_t[1],)
info["params_tangent"] = (p_t[0], p_t[1])
layers_info[i].update(info)
layers_info = tuple(layers_info)
func = tgm.auto_register_tags(func, (params, data))
tracer_vjp = tracer.trace_estimator_vjp(func)
tracer_losses, tracer_vjp_func = tracer_vjp((params, data))
tracer_losses = [loss.evaluate(None) for loss in tracer_losses]
tracer_outputs = tracer_vjp_func((h_tangents[:1], h_tangents[1:]))
self.assertStructureAllClose(loss_vals, tracer_losses)
self.assertStructureAllClose(tracer_outputs, layers_info, atol=3e-6)
if __name__ == "__main__":
absltest.main()
| deepmind-research-master | kfac_ferminet_alpha/tests/tracer_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common functions used across more than one test."""
import jax
import jax.numpy as jnp
import jax.random as jnr
from kfac_ferminet_alpha import loss_functions
def fully_connected_layer(params, x):
w, b = params
return jnp.matmul(x, w) + b[None]
def init_autoencoder(key, data_shape):
"""Initialize the standard autoencoder."""
assert len(data_shape) == 1
x_size = data_shape[0]
sizes = [x_size, 1000, 500, 250, 30, 250, 500, 1000, x_size]
keys = jnr.split(key, len(sizes) - 1)
params = []
for key, dim_in, dim_out in zip(keys, sizes, sizes[1:]):
# Glorot uniform initialization
c = jnp.sqrt(6 / (dim_in + dim_out))
w = jax.random.uniform(key, shape=(dim_in, dim_out), minval=-c, maxval=c)
b = jnp.zeros([dim_out])
params.append((w, b))
return params
def autoencoder(all_params, x_in):
"""Evaluate the standard autoencoder.
Note that the objective of this autoencoder is not standard, bur rather a sum
of the standard sigmoid crossentropy and squared loss. The reason for this is
to test on handling multiple losses.
Args:
all_params: All parameter values.
x_in: Inputs to the network.
Returns:
The value of the two losses and intermediate layer values.
"""
h_in = x_in
layers_values = []
for i, params in enumerate(all_params):
h_out = fully_connected_layer(params, h_in)
layers_values.append((h_out, h_in))
# Last layer does not have a nonlinearity
if i % 4 != 3:
# h_in = nn.leaky_relu(h_out)
h_in = jnp.tanh(h_out)
else:
h_in = h_out
h1, _ = loss_functions.register_normal_predictive_distribution(h_in, x_in)
h2, _ = loss_functions.register_normal_predictive_distribution(
h_in, targets=x_in, weight=0.1)
l1 = (h1 - x_in)**2 + jnp.log(jnp.pi) / 2
l1 = jnp.sum(l1, axis=-1)
l2 = (h2 - x_in)**2 + jnp.log(jnp.pi) / 2
l2 = jnp.sum(l2, axis=-1)
return [l1, l2 * 0.1], layers_values
| deepmind-research-master | kfac_ferminet_alpha/tests/common.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example code for running model on CLEVRER."""
import json
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
from object_attention_for_reasoning import model as modellib
BATCH_SIZE = 1
NUM_FRAMES = 25
NUM_OBJECTS = 8
_BASE_DIR = flags.DEFINE_string(
"base_dir", "./clevrer_monet_latents",
"Directory containing checkpoints and MONet latents.")
_SCENE_IDX = flags.DEFINE_string(
"scene_idx", 1000, "Scene index of CLEVRER video.")
def load_monet_latents(base_dir, scene_index):
filename = f"{base_dir}/train/{scene_index}.npz"
with open(filename, "rb") as f:
return np.load(f)
def _split_string(s):
"""Splits string to words and standardize alphabet."""
return s.lower().replace("?", "").split()
def _pad(array, length):
"""Pad an array to desired length."""
return np.pad(array, [(0, length - array.shape[0])], mode="constant")
def encode_sentence(token_map, sentence, pad_length):
"""Encode CLEVRER question/choice sentences as sequence of token ids."""
ret = np.array(
[token_map["question_vocab"][w] for w in _split_string(sentence)],
np.int32)
return _pad(ret, pad_length)
def encode_choices(token_map, choices):
"""Encode CLEVRER choices."""
arrays = [encode_sentence(token_map, choice["choice"],
modellib.MAX_CHOICE_LENGTH)
for choice in choices]
return _pad(np.stack(arrays, axis=0), modellib.NUM_CHOICES)
def main(unused_argv):
base_dir = _BASE_DIR.value
with open(f"{base_dir}/vocab.json", "rb") as f:
token_map = json.load(f)
reverse_answer_lookup = {v: k for k, v in token_map["answer_vocab"].items()}
with open(f"{base_dir}/train.json", "rb") as f:
questions_data = json.load(f)
tf.reset_default_graph()
model = modellib.ClevrerTransformerModel(**modellib.PRETRAINED_MODEL_CONFIG)
inputs_descriptive = {
"monet_latents": tf.placeholder(
tf.float32,
[BATCH_SIZE, NUM_FRAMES, NUM_OBJECTS, modellib.EMBED_DIM]),
"question": tf.placeholder(
tf.int32, [BATCH_SIZE, modellib.MAX_QUESTION_LENGTH]),
}
inputs_mc = {
"monet_latents": tf.placeholder(
tf.float32,
[BATCH_SIZE, NUM_FRAMES, NUM_OBJECTS, modellib.EMBED_DIM]),
"question": tf.placeholder(tf.int32,
[BATCH_SIZE, modellib.MAX_QUESTION_LENGTH]),
"choices": tf.placeholder(
tf.int32, [BATCH_SIZE, modellib.NUM_CHOICES,
modellib.MAX_CHOICE_LENGTH]),
}
output_descriptive = model.apply_model_descriptive(inputs_descriptive)
output_mc = model.apply_model_mc(inputs_mc)
# Restore from checkpoint
saver = tf.train.Saver()
checkpoint_dir = f"{base_dir}/checkpoints/"
sess = tf.train.SingularMonitoredSession(checkpoint_dir=checkpoint_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
saver.restore(sess, ckpt.model_checkpoint_path)
def eval_descriptive(monet_latents, question_json):
# CLEVRER provides videos with 128 frames. In our model, we subsample 25
# frames (as was done in Yi et al (2020)).
# For training, we randomize the choice of 25 frames, and for evaluation, we
# sample the 25 frames as evenly as possible.
# We do that by doing strided sampling of the frames.
stride, rem = divmod(monet_latents.shape[0], NUM_FRAMES)
monet_latents = monet_latents[None, :-rem:stride]
assert monet_latents.shape[1] == NUM_FRAMES
question = encode_sentence(token_map, question_json["question"],
modellib.MAX_QUESTION_LENGTH)
batched_question = np.expand_dims(question, axis=0)
logits = sess.run(output_descriptive, feed_dict={
inputs_descriptive["monet_latents"]: monet_latents,
inputs_descriptive["question"]: batched_question,
})
descriptive_answer = np.argmax(logits)
return reverse_answer_lookup[descriptive_answer]
def eval_mc(monet_latents, question_json):
stride, rem = divmod(monet_latents.shape[0], NUM_FRAMES)
monet_latents = monet_latents[None, :-rem:stride]
assert monet_latents.shape[1] == NUM_FRAMES
question = encode_sentence(
token_map, question_json["question"], modellib.MAX_QUESTION_LENGTH)
choices = encode_choices(
token_map, question_json["choices"])
mc_answer = sess.run(output_mc, feed_dict={
inputs_mc["monet_latents"]: monet_latents,
inputs_mc["question"]: np.expand_dims(question, axis=0),
inputs_mc["choices"]: np.expand_dims(choices, axis=0),
})
return mc_answer >= 0
sample_scene_idx = _SCENE_IDX.value
question_json = questions_data[sample_scene_idx]["questions"][0]
print("Descriptive Question: ", question_json["question"])
print("Model Answer: ",
eval_descriptive(load_monet_latents(base_dir, sample_scene_idx),
question_json))
print("True Answer: ", question_json["answer"])
question_json = questions_data[sample_scene_idx]["questions"][-1]
print("Multiple-Choice Question: ", question_json["question"])
for i, choice_json in enumerate(question_json["choices"]):
print(f"{i+1}) {choice_json['choice']}")
print("Model Answer: ",
eval_mc(load_monet_latents(base_dir, sample_scene_idx), question_json))
print("True Answer: ",
[choice_json["answer"] for choice_json in question_json["choices"]])
if __name__ == "__main__":
app.run(main)
| deepmind-research-master | object_attention_for_reasoning/run_model.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model code. Provided settings are identical to what was used in the paper."""
import sonnet as snt
import tensorflow.compat.v1 as tf
from object_attention_for_reasoning import transformer
QUESTION_VOCAB_SIZE = 82
ANSWER_VOCAB_SIZE = 22
MAX_QUESTION_LENGTH = 20
MAX_CHOICE_LENGTH = 12
NUM_CHOICES = 4
EMBED_DIM = 16
PRETRAINED_MODEL_CONFIG = dict(
use_relative_positions=True,
shuffle_objects=True,
transformer_layers=28,
head_size=128,
num_heads=10,
embed_dim=EMBED_DIM,
)
def append_ids(tensor, id_vector, axis):
id_vector = tf.constant(id_vector, tf.float32)
for a in range(len(tensor.shape)):
if a != axis:
id_vector = tf.expand_dims(id_vector, axis=a)
tiling_vector = [s if i != axis else 1 for i, s in enumerate(tensor.shape)]
id_tensor = tf.tile(id_vector, tiling_vector)
return tf.concat([tensor, id_tensor], axis=axis)
class ClevrerTransformerModel(object):
"""Model from Ding et al. 2020 (https://arxiv.org/abs/2012.08508)."""
def __init__(self, use_relative_positions, shuffle_objects,
transformer_layers, num_heads, head_size, embed_dim):
"""Instantiate Sonnet modules."""
self._embed_dim = embed_dim
self._embed = snt.Embed(QUESTION_VOCAB_SIZE, embed_dim - 2)
self._shuffle_objects = shuffle_objects
self._memory_transformer = transformer.TransformerTower(
value_size=embed_dim + 2,
num_heads=num_heads,
num_layers=transformer_layers,
use_relative_positions=use_relative_positions,
causal=False)
self._final_layer_mc = snt.Sequential(
[snt.Linear(head_size), tf.nn.relu, snt.Linear(1)])
self._final_layer_descriptive = snt.Sequential(
[snt.Linear(head_size), tf.nn.relu,
snt.Linear(ANSWER_VOCAB_SIZE)])
self._dummy = tf.get_variable("dummy", [embed_dim + 2], tf.float32,
initializer=tf.zeros_initializer)
self._infill_linear = snt.Linear(embed_dim + 2)
self._mask_embedding = tf.get_variable(
"mask", [embed_dim + 2], tf.float32, initializer=tf.zeros_initializer)
def _apply_transformers(self, lang_embedding, vision_embedding):
"""Applies transformer to language and vision input.
Args:
lang_embedding: tensor,
vision_embedding: tensor, "validation", or "test".
Returns:
tuple, output at dummy token, all output embeddings, infill loss
"""
def _unroll(tensor):
"""Unroll the time dimension into the object dimension."""
return tf.reshape(
tensor, [tensor.shape[0], -1, tensor.shape[3]])
words = append_ids(lang_embedding, [1, 0], axis=2)
dummy_word = tf.tile(self._dummy[None, None, :], [tf.shape(words)[0], 1, 1])
vision_embedding = append_ids(vision_embedding, [0, 1], axis=3)
vision_over_time = _unroll(vision_embedding)
transformer_input = tf.concat([dummy_word, words, vision_over_time], axis=1)
output, _ = self._memory_transformer(transformer_input,
is_training=False)
return output[:, 0, :]
def apply_model_descriptive(self, inputs):
"""Applies model to CLEVRER descriptive questions.
Args:
inputs: dict of form: {
"question": tf.int32 tensor of shape [batch, MAX_QUESTION_LENGTH],
"monet_latents": tf.float32 tensor of shape [batch, frames, 8, 16],
}
Returns:
Tensor of shape [batch, ANSWER_VOCAB_SIZE], representing logits for each
possible answer word.
"""
question = inputs["question"]
# Shape: [batch, question_len, embed_dim-2]
question_embedding = self._embed(question)
# Shape: [batch, question_len, embed_dim]
question_embedding = append_ids(question_embedding, [0, 1], 2)
choices_embedding = self._embed(
tf.zeros([question.shape[0], MAX_CHOICE_LENGTH], tf.int64))
choices_embedding = append_ids(choices_embedding, [0, 1], 2)
# Shape: [batch, choices, question_len + choice_len, embed_dim]
lang_embedding = tf.concat([question_embedding, choices_embedding], axis=1)
# Shape: [batch, frames, num_objects, embed_dim]
vision_embedding = inputs["monet_latents"]
if self._shuffle_objects:
vision_embedding = tf.transpose(vision_embedding, [2, 1, 0, 3])
vision_embedding = tf.random.shuffle(vision_embedding)
vision_embedding = tf.transpose(vision_embedding, [2, 1, 0, 3])
output = self._apply_transformers(lang_embedding, vision_embedding)
output = self._final_layer_descriptive(output)
return output
def apply_model_mc(self, inputs):
"""Applies model to CLEVRER multiple-choice questions.
Args:
inputs: dict of form: {
"question": tf.int32 tensor of shape [batch, MAX_QUESTION_LENGTH],
"choices": tf.int32 tensor of shape [batch, 4, MAX_CHOICE_LENGTH],
"monet_latents": tf.float32 tensor of shape [batch, frames, 8, 16],
}
Returns:
Tensor of shape [batch, 4], representing logits for each choice
"""
question = inputs["question"]
choices = inputs["choices"]
# Shape: [batch, question_len, embed_dim-2]
question_embedding = self._embed(question)
# Shape: [batch, question_len, embed_dim]
question_embedding = append_ids(question_embedding, [1, 0], 2)
# Shape: [batch, choices, choice_len, embed_dim-2]
choices_embedding = snt.BatchApply(self._embed)(choices)
# Shape: [batch, choices, choice_len, embed_dim]
choices_embedding = append_ids(choices_embedding, [0, 1], 3)
# Shape: [batch, choices, question_len + choice_len, embed_dim]
lang_embedding = tf.concat([
tf.tile(question_embedding[:, None],
[1, choices_embedding.shape[1], 1, 1]),
choices_embedding], axis=2)
# Shape: [batch, frames, num_objects, embed_dim]
vision_embedding = inputs["monet_latents"]
if self._shuffle_objects:
vision_embedding = tf.transpose(vision_embedding, [2, 1, 0, 3])
vision_embedding = tf.random.shuffle(vision_embedding)
vision_embedding = tf.transpose(vision_embedding, [2, 1, 0, 3])
output_per_choice = []
for c in range(NUM_CHOICES):
output = self._apply_transformers(
lang_embedding[:, c, :, :], vision_embedding)
output_per_choice.append(output)
output = tf.stack(output_per_choice, axis=1)
output = tf.squeeze(snt.BatchApply(self._final_layer_mc)(output), axis=2)
return output
| deepmind-research-master | object_attention_for_reasoning/model.py |
# Fork of Sonnet transformer model with small modifications
#
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Implementation of Transformer networks.
Size glossary:
* Batch size (B).
* Sequence length (N).
* Memory size (M). The size of the optional memory, passed in via `state`.
* Number of heads (H): the number of attention heads.
* Value size (V): the size of each value embedding per head.
* Key size (K): the size of each key embedding per head. Equally, the size
of each query embedding per head. Typically K <= V.
* Embedding size (HV). The size of the activation or embedding relating to
each input between layers. Equal to value_size * num_heads.
* All attention size (F). The size of all attention activations over every
head.
* QKV size (F / H): The size of the query, key and value per head. Equal to
2K + V or equivalently F / H.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from sonnet.python.modules import base
from sonnet.python.modules import basic
from sonnet.python.modules import layer_norm as snt_ln
from sonnet.python.modules import util
from sonnet.python.modules.nets import mlp as snt_mlp
import tensorflow.compat.v1 as tf
AttentionState = collections.namedtuple('AttentionState',
('queries', 'keys', 'values', 'logits',
'weights', 'embeddings', 'read_words'))
CompressedMemoryState = collections.namedtuple(
'CompressedMemoryState', ('episodic_memory', 'compressed_memory', 'index'))
def rel_shift(position_logits):
"""Shifting of logits for relative attention.
Args:
position_logits: A tensor of shape [B, H, N, N + M].
Returns:
The shifted logits. Example, for input (H=1, B=1):
[5, 4, 3, 2, 1]
[5, 4, 3, 2, 1]
[5, 4, 3, 2, 1]
[5, 4, 3, 2, 1]
[5, 4, 3, 2, 1]
the function outputs:
[1, 0, 5, 4, 3]
[2, 1, 0, 5, 4]
[3, 2, 1, 0, 5]
[4, 3, 2, 1, 0]
[5, 4, 3, 2, 1]
Raises:
ValueError if position_logits is not 4D.
Note: this is not an exact shift as the upper triangle is non-zero. This
works as intended in the causally-masked case. If this is used with un-masked
attention, we'd want these to also be zero.
"""
if position_logits.get_shape().ndims != 4:
raise ValueError('Expected 4D position logits.')
input_shape = position_logits.shape
batch_size = input_shape[0]
num_heads = input_shape[1]
t1 = input_shape[2]
t2 = input_shape[3]
# We prepend zeros on the final timescale dimension.
to_pad = tf.zeros([batch_size, num_heads, t1, 1])
position_logits = tf.concat([to_pad, position_logits], -1)
# Reshape trick to shift input.
position_logits = tf.reshape(position_logits,
[batch_size, num_heads, t2 + 1, t1])
# Remove extra time dimension and re-shape.
position_logits = position_logits[:, :, 1:]
position_logits = tf.reshape(position_logits, input_shape)
return position_logits
def _layer_norm(inputs):
if inputs.get_shape().ndims > 2:
return basic.BatchApply(snt_ln.LayerNorm())(inputs)
else:
return snt_ln.LayerNorm()(inputs)
def _concat_and_slice(prev_memory, new_memory):
original_memory_size = prev_memory.get_shape().as_list()[1]
concat_memory = tf.concat([prev_memory, new_memory], 1)
memory = concat_memory[:, -original_memory_size:]
return memory, concat_memory
def simple_attention(queries, keys, values):
logits = tf.matmul(queries, keys, transpose_b=True)
weights = tf.nn.softmax(logits)
return tf.matmul(weights, values)
class ResidualDropoutWrapper(base.AbstractModule):
"""Wrapper class that applies residual connections, dropout and layer norm.
By default applies a relu to the module output before the other operations.
"""
def __init__(self,
layer,
dropout_rate,
layer_norm='input',
name='residual_dropout_wrapper'):
self._module = layer
self._dropout_rate = dropout_rate
self._layer_norm = layer_norm
super(ResidualDropoutWrapper, self).__init__(name=name)
def _build(self, inputs, *args, **kwargs):
if self._layer_norm in ('both', 'input'):
normed_inputs = _layer_norm(inputs)
else:
normed_inputs = inputs
module_output = self._module(normed_inputs, *args, **kwargs)
module_state = None
# If module outputs multiple items, assumes (output, state) tuple.
if isinstance(module_output, tuple):
module_output, module_state = module_output
if kwargs['is_training']: # kwargs must contain is_training.
module_output = tf.nn.dropout(module_output, rate=self._dropout_rate)
output = inputs + module_output
if self._layer_norm in ('both', 'output'):
output = _layer_norm(output)
if module_state is None:
return output
else:
return output, module_state
def future_mask(chunk_size, dtype):
"""Creates attention mask to ensure an element i cannot attend to j > i."""
square = tf.ones([chunk_size, chunk_size], dtype=dtype)
# Create upper diagonal matrix and remove diagonal entries (allow self-attn).
mask = tf.matrix_band_part(square, 0, -1) - tf.matrix_band_part(square, 0, 0)
# Multiply by -1e6 and expand to broadcast with [B, H, N, N] logits.
mask = -1e6 * tf.reshape(mask, [1, 1, chunk_size, chunk_size])
return mask
def _memory_size(state):
if isinstance(state, CompressedMemoryState):
return (state.episodic_memory.get_shape().as_list()[1] +
state.compressed_memory.get_shape().as_list()[1])
else:
return state.get_shape().as_list()[1]
def create_mask(inputs, state, equal_window):
"""Creates mask for future sequence positions.
Args:
inputs: inputs tensor of shape [B, N, D]
state: optional tensor of shape [B, M, D], CompressedMemoryState or a list
where the ith entry corresponds to the ith layer's state.
equal_window: if True, then each activation has an equally-sized attention
window of length 'M'. This only makes sense if a state is given.
Returns:
Float tensor of shape [1, 1, N, N + M], to be summed with logits.
"""
chunk_size = inputs.get_shape().as_list()[1]
dtype = inputs.dtype
mask = future_mask(chunk_size, dtype)
if state is not None:
if isinstance(state, (tuple, list)):
largest_memory_layer = np.argmax([_memory_size(s) for s in state])
state = state[largest_memory_layer]
mem_size = _memory_size(state)
mask = tf.concat(
[tf.zeros([1, 1, chunk_size, mem_size], dtype=dtype), mask], 3)
if equal_window:
attn_mask = tf.ones([chunk_size, chunk_size], dtype=dtype)
mask_dia = tf.cast(tf.matrix_band_part(attn_mask, 0, 0), dtype=dtype)
mask_l = tf.cast(tf.matrix_band_part(attn_mask, -1, 0), dtype=dtype)
start_mask = tf.reshape(mask_l - mask_dia,
[1, 1, chunk_size, chunk_size]) * -1e6
mask = tf.concat(
[mask[:, :, :, :chunk_size] + start_mask, mask[:, :, :, chunk_size:]],
3)
return mask
def default_mlp(hidden_sizes, activate_final=False, init_std=2., **kwargs):
"""Standard batch-applied MLP for transformer modules."""
init = {'w': tf.variance_scaling_initializer(init_std, distribution='normal')}
mlp = snt_mlp.MLP(
hidden_sizes,
activate_final=activate_final,
use_dropout=True,
initializers=init,
**kwargs)
return basic.BatchApply(mlp)
def get_position_encodings(sequence_length,
hidden_size,
clamp_value,
max_timescale=10000.,
min_timescale=2.0):
"""Creates sinusoidal encodings of shape [1, N + M, D]."""
# NOTE: when not using relative position encodings, min_timescale must be 2.0
# and hidden_size must be an even number. Otherwise, the dimensions do not
# match.
pos_seq = tf.range(sequence_length - 1, -1, -1.0)
if clamp_value > 0:
pos_seq = tf.minimum(pos_seq, clamp_value)
freqs = tf.range(0, hidden_size, min_timescale)
inv_freq = 1 / (max_timescale**(freqs / hidden_size))
sinusoid_inp = tf.einsum('i,j->ij', pos_seq, inv_freq)
pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], -1)
pos_emb = tf.expand_dims(pos_emb, 0)
output_dim = pos_emb.get_shape().as_list()[-1]
if output_dim != hidden_size:
raise ValueError(
'position embedding dimension ({}) does not match that of the input ({}).'
.format(output_dim, hidden_size))
return pos_emb
class MultiheadAttention(base.AbstractModule):
"""Implements multi-head attention with optional state context."""
def __init__(self,
value_size,
key_size,
num_heads,
mask=None,
scaling=True,
positional_encodings=None,
use_relative_positions=False,
init_std=2.,
name='multihead_attention'):
"""Creates a MultiheadAttention module.
Args:
value_size: V parameter. See size glossary in class docstring.
key_size: K parameter. See size glossary in class docstring.
num_heads: The number of independent queries per timestep.
mask: Optional mask to attention logits. This can prevent attending to
future positions or unused memory slots.
scaling: Whether to scale the attention logits.
positional_encodings: Either None (none given), or an iterable of
`(key_positional_encodings, query_positional_encodings)` tuples, where
the first encodings in the list indicate the oldest entries in memory
and the final encodings indicate the newest entries in memory and the
sequence.
use_relative_positions: If True then relative positions are incorporated,
vs absolute, into the attention logits. This is done exactly as
described in the TransformerXL, Dai et al. 2019.
init_std: scaling of standard deviation for weight matrices init.
name: Name of module.
"""
super(MultiheadAttention, self).__init__(name=name)
self._value_size = value_size
self._key_size = key_size
self._sizes = {
'value': self._value_size,
'key': self._key_size,
'query': self._key_size,
'relative_keys': self._key_size,
'relative_keys_0': self._key_size,
}
self._num_heads = num_heads
self._mask = mask
self._scaling = scaling
self._positional_encodings = positional_encodings
self._use_relative_positions = use_relative_positions
self._init = {'w': tf.variance_scaling_initializer(init_std)}
@util.reuse_variables
def multihead_linear(self, inputs, name):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
hidden_size = self._sizes[name]
input_size = inputs.shape[-1].value
w = tf.get_variable(
'linear/w',
shape=[input_size, self._num_heads * hidden_size],
initializer=self._init['w'])
w = tf.reshape(w, [input_size, self._num_heads, hidden_size])
out = tf.einsum('bij,jhk->bhik', inputs, w)
return out
def _build(self,
inputs,
query_inputs=None,
state=None,
is_training=False,
dropout_keep_prob=0.5,
key_value_inputs=None):
"""Calculates multi-layer self attention.
Args:
inputs: Tensor of shape [batch_size, num_steps, output_dim_size]. Inputs
used as the query, key, and value to the attention layer.
query_inputs: optional Tensor of shape [batch_size, num_steps,
output_dim_size]. Query inputs to the attention layer. Set when
query_inputs is different from the inputs argument.
state: optional CompressedMemoryState or a Tensor of shape [batch_size,
memory_size, dim_size] concatenated to the inputs. Set when attend to
the memory from previous steps.
is_training: if currently training.
dropout_keep_prob: dropout rate applied to attention weights.
key_value_inputs: optional Tensor of shape [batch_size, num_steps,
output_dim_size]. It is used as the key and value of the multihead
attention. Set when the key and value are different from the inputs
argument.
Returns:
output: the result Tensor of shape
[batch_size, num_steps, output_dim_size].
attention_state: named tuple of AttentionState.
"""
if key_value_inputs is not None and state is not None:
raise ValueError('Only one of the key_value_input and state is needed.')
embedding_size = self._value_size * self._num_heads
q_inputs = inputs if query_inputs is None else query_inputs
# Denoted by L. If query_inputs is None, L = N.
_, query_size = q_inputs.get_shape().as_list()[:2]
if key_value_inputs is not None:
k_inputs = key_value_inputs
v_inputs = k_inputs
elif state is not None:
if isinstance(state, CompressedMemoryState):
state_memory_list = [state.compressed_memory, state.episodic_memory]
else:
state_memory_list = [state]
k_inputs = tf.concat(state_memory_list + [inputs], 1)
v_inputs = k_inputs
else:
k_inputs = inputs
v_inputs = inputs
# Batch size denoted by B
batch_size = tf.shape(inputs)[0]
# Chunk_size denoted by N
chunk_size = inputs.get_shape().as_list()[1]
# Denoted by N + M
att_size = k_inputs.get_shape().as_list()[1]
if self._positional_encodings and not self._use_relative_positions:
if len(self._positional_encodings) != 1:
raise ValueError(
'Absolute positional encodings only supported for 1 memory. '
'Found %i.' % len(self._positional_encodings))
key_positions, query_positions = self._positional_encodings[0]
k_inputs += key_positions
q_inputs += query_positions
# [B, H, L, K]
q = self.multihead_linear(q_inputs, 'query')
# [B, H, N + M, K]
k = self.multihead_linear(k_inputs, 'key')
# [B, H, N + M, V]
v = self.multihead_linear(v_inputs, 'value')
# Scaling the dot-product
if self._scaling:
q *= self._key_size**-0.5
# [B, H, L, N + M]
if self._use_relative_positions:
r_w_bias = tf.get_variable(
'r_w_bias', [1, self._num_heads, 1, self._key_size],
dtype=inputs.dtype)
content_logits = tf.matmul(q + r_w_bias, k, transpose_b=True)
all_relative_logits = []
# Loop over multiple positional encodings, for the case of multiple
# memory types.
for i, positional_encodings in enumerate(self._positional_encodings):
key_positions, query_positions = positional_encodings
if key_positions.get_shape().as_list()[-1] != att_size:
key_positions = key_positions[:, -att_size:] # Crop to layer mem size
is_final = i == len(self._positional_encodings) - 1
suffix = '' if is_final else '_%d' % i
relative_keys = self.multihead_linear(
key_positions, name='relative_keys' + suffix)
# [B, H, N, D]
r_r_bias = tf.get_variable(
'r_r_bias' + suffix, [1, self._num_heads, 1, self._key_size],
dtype=inputs.dtype)
relative_keys = tf.tile(relative_keys, [batch_size, 1, 1, 1])
relative_logits = tf.matmul(
q + r_r_bias, relative_keys, transpose_b=True)
relative_logits = rel_shift(relative_logits)
if not is_final: # Include relative positions for input sequence.
relative_logits = relative_logits[:, :, :, :-chunk_size]
all_relative_logits.append(relative_logits)
all_relative_logits = tf.concat(all_relative_logits, 3)
logits = content_logits + all_relative_logits
else:
# [B, H, N, N + M]
logits = tf.matmul(q, k, transpose_b=True)
content_logits = logits
if self._mask is not None:
if self._mask.get_shape().as_list()[-1] != att_size:
mask = self._mask[:, :, :, -att_size:]
else:
mask = self._mask
logits += mask
weights = tf.nn.softmax(logits)
if is_training:
weights = tf.nn.dropout(weights, dropout_keep_prob)
# [B, L, H, V], where V is value_size
output_transpose = tf.einsum('bhij,bhjk->bihk', weights, v)
# [B, L, H, V] -> [B, L, HV]
attended_inputs = basic.BatchReshape([query_size, embedding_size])(
output_transpose)
# Apply final mlp to mix information between heads.
output = basic.BatchApply(basic.Linear(embedding_size))(attended_inputs)
attention_state = AttentionState(
queries=q,
keys=k,
values=v,
weights=weights,
logits=content_logits,
embeddings=inputs,
read_words=output)
return output, attention_state
class TransformerTower(base.AbstractModule):
"""Transformer tower.
Deep residual network using blocks of attention and MLPs, specified in
Vaswani et al. 2017.
"""
def __init__(self,
value_size,
num_heads,
num_layers,
causal=True,
key_size=None,
shared_attention=False,
output_size=None,
mlp_hidden_sizes=tuple([1024]),
dropout_rate=0.1,
use_relative_positions=True,
clamp_time_range=0,
same_attention_length=False,
layer_norm='input',
name='transformer_tower'):
"""Initializes TransformerTower.
Args:
value_size: dimensionality of values per-head.
num_heads: number of attention heads.
num_layers: number of transformer blocks, where each block contains a
multi-head attention layer and an MLP.
causal: if True, applies a causal mask.
key_size: optional dimensionality of key size. If unspecified then it is
set to `value_size`.
shared_attention: if True, attention params are shared across all layers.
output_size: if set, the desired output dimensionality. By default the
output size is `value_size` x `num_heads`.
mlp_hidden_sizes: tuple containing dimensionality of mlp layer(s). If
multiple values are specified, the mlp contains multiple layers for each
transformer block.
dropout_rate: dropout rate applied to hidden activations, attention, and
positional encodings.
use_relative_positions: if False, applies absolute positional encodings.
If true, uses relative positional encodings from Dai et al. 2019.
clamp_time_range: clamps max temporal positional encoding if specified.
same_attention_length: if True, attention is masked to ensure each
position in the sequence contains the same length of attention.
layer_norm: Where to apply layer-norm in Transformer block. Can be one of
'input' (Vaswani et al. 2017), 'output', or 'both'.
name: name of variable scope.
"""
super(TransformerTower, self).__init__(name=name)
self._causal = causal
self._mask = None
if key_size is None:
key_size = value_size
self._key_size = key_size
self._value_size = value_size
self._shared_attention = shared_attention
self._num_heads = num_heads
self._num_layers = num_layers
self._output_size = output_size
self._embedding_size = self._value_size * self._num_heads
self._mlp_hidden_sizes = list(mlp_hidden_sizes) + [self._embedding_size]
self._multihead_attention = None
self._object_embeddings = None
self._dropout_rate = dropout_rate
self._positional_encodings = None
self._use_relative_positions = use_relative_positions
self._clamp_time_range = clamp_time_range
self._same_attention_length = same_attention_length
self._layer_norm = layer_norm
self._attention_modules = []
self._object_mlps = []
def get_sublayers(self, is_training):
if self._multihead_attention is None or not self._shared_attention:
attention_module = MultiheadAttention(
value_size=self._value_size,
key_size=self._key_size,
num_heads=self._num_heads,
mask=self._mask,
positional_encodings=self._positional_encodings,
use_relative_positions=self._use_relative_positions,
init_std=2. / np.sqrt(self._num_layers),
)
self._multihead_attention = ResidualDropoutWrapper(
attention_module, self._dropout_rate, layer_norm=self._layer_norm)
mlp = default_mlp(
self._mlp_hidden_sizes, init_std=2. / np.sqrt(self._num_layers))
object_mlp = ResidualDropoutWrapper(
mlp, self._dropout_rate, layer_norm=self._layer_norm)
self._attention_modules.append(attention_module)
self._object_mlps.append(mlp)
return self._multihead_attention, object_mlp
def _build(self,
inputs,
state=None,
condition=None,
is_training=True,
final_layer_key_value_inputs=None):
"""Calculates multi-layer self attention and mlp transformation.
Args:
inputs: Tensor of shape [batch_size, num_steps, dim_size].
state: optional list of length num_layers of tensors of shape
[batch_size, memory_size, dim_size].
condition: optional tensor to condition on. The shape is shape
[batch_size, dim_size].
is_training: If true, dropout is applied.
final_layer_key_value_inputs: optional Tensor to be used as the key and
value for the final multi-head attention layer of shape
[batch_size, num_steps, dim_size]. Useful when the tower is a Seq2Seq
decoder and it can attend to encoder outputs.
Returns:
output: tensor of shape [batch_size, num_steps, output_dim_size].
state: list of length `num_layers` containing AttentionState tuples.
"""
# inputs: [B, N, F]
if final_layer_key_value_inputs is not None and state is not None and len(
state) == (self._num_layers - 1):
raise ValueError('When the final_layer_key_value_input is set, exclude'
'the state of the last layer.')
if condition is not None:
condition_tile = tf.tile(
tf.expand_dims(condition, 1), [1, tf.shape(inputs)[1], 1])
inputs = tf.concat([inputs, condition_tile], -1)
# Map inputs to be of `embedding_size` dimension.
if inputs.get_shape().as_list()[-1] != self._embedding_size:
inputs = default_mlp([self._embedding_size], activate_final=True)(
inputs,
is_training=is_training,
dropout_keep_prob=1 - self._dropout_rate)
if state is None:
memory_sizes = [0]
elif isinstance(state[0], CompressedMemoryState):
cm_mem_size = max(_memory_size(s.compressed_memory) for s in state)
em_mem_size = max(_memory_size(s.episodic_memory) for s in state)
memory_sizes = [cm_mem_size, em_mem_size]
else:
memory_sizes = [max([_memory_size(s) for s in state])]
chunk_size = inputs.get_shape().as_list()[1]
self._positional_encodings = []
# Creates positional encodings for different memory types.
for i, memory_size in enumerate(memory_sizes):
seq_len = chunk_size + memory_size
key_positions = get_position_encodings(
sequence_length=seq_len,
hidden_size=inputs.get_shape().as_list()[2],
clamp_value=self._clamp_time_range,
)
if is_training:
key_positions = tf.nn.dropout(key_positions, rate=self._dropout_rate)
key_positions = tf.cast(key_positions, dtype=inputs.dtype)
query_positions = key_positions[:, -chunk_size:, :]
self._positional_encodings.append((key_positions, query_positions))
if self._causal:
self._mask = create_mask(inputs, state, self._same_attention_length)
layer_i_inputs = inputs
attention_states = []
key_value_inputs = None
for i in range(self._num_layers):
with tf.variable_scope('layer_%d' % i, reuse=tf.AUTO_REUSE):
multihead_attention, object_mlp = self.get_sublayers(is_training)
# Multihead attention with residuals.
state_i = None if state is None else state[i]
if i == (self._num_layers -
1) and final_layer_key_value_inputs is not None:
# When the final_layer_key_value_inputs is set, the finaly layer
# of attention will use it as the key & value, thus no need for state.
key_value_inputs = final_layer_key_value_inputs
state_i = None
attention_outputs, attention_state = multihead_attention(
layer_i_inputs,
state=state_i,
is_training=is_training,
dropout_keep_prob=1. - self._dropout_rate,
key_value_inputs=key_value_inputs)
attention_states.append(attention_state)
# Feed-forward with residuals.
output = object_mlp(
attention_outputs,
is_training=is_training,
dropout_keep_prob=1 - self._dropout_rate)
layer_i_inputs = output
if self._output_size is not None:
output = basic.BatchApply(
basic.Linear(self._output_size, use_bias=False))(
output)
return output, attention_states
def attention_module(self, i):
"""Returns the i-th layer attention module."""
return self._attention_modules[i]
| deepmind-research-master | object_attention_for_reasoning/transformer.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenizer implementation mapping strings to their UTF-8 bytes."""
from typing import Union
import numpy as np
class BytesTokenizer:
"""Tokenizes string to utf-8 bytes."""
def __init__(self):
self._num_reserved_tokens = 6 # PAD, BOS, EOS, MASK, CLS, SEP
def to_string(self, inputs: np.ndarray) -> str:
inputs_no_special = (
inputs[inputs >= self._num_reserved_tokens] - self._num_reserved_tokens)
decoded_bytes = inputs_no_special.astype(np.uint8).tobytes()
return decoded_bytes.decode('utf-8', errors='replace')
def to_int(self, inputs: Union[str, bytes]) -> np.ndarray:
if isinstance(inputs, str):
inputs = inputs.encode('utf-8')
encoded = np.frombuffer(inputs, np.uint8).astype(np.int32)
encoded = encoded + self._num_reserved_tokens
return encoded.astype(np.int32)
@property
def vocab_size(self) -> int:
return 256 + self._num_reserved_tokens
@property
def pad_token(self) -> int:
return 0
@property
def bos_token(self) -> int:
return 1
@property
def eos_token(self) -> int:
return 2
@property
def mask_token(self) -> int:
return 3
@property
def cls_token(self) -> int:
return 4
@property
def sep_token(self) -> int:
return 5
| deepmind-research-master | perceiver/bytes_tokenizer.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perceiver architecture and components."""
import abc
import math
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from perceiver import io_processors
from perceiver import position_encoding
# -----------------------------------------------------------
# ---------------------- Primitives -----------------------
# -----------------------------------------------------------
def attend(q, k, v, dropout_prob=0.0, attention_mask=None):
"""Computes multi-head attention using a query, key and value.
Args:
q: Query with shape [batch, q_indices, num_heads, head_dim].
k: Key with shape [batch, kv_indices, num_heads, head_dim].
v: Value with shape [batch, kv_indices, num_heads, head_dim].
dropout_prob: dropout probability on the attention weights.
attention_mask: Array of shape [batch, q_indices, kv_indices] indicating
which attentions are valid
Returns:
Output of the attention with shape [batch, q_indices, hiddens]
"""
batch, q_indices, num_heads, q_head_dim = q.shape
_, _, _, v_head_dim = v.shape
hiddens = num_heads * v_head_dim
attention = jnp.einsum('bthd,bThd->bhtT', q, k)
scale = 1. / math.sqrt(q_head_dim)
attention *= scale
if attention_mask is not None:
# Use large_k instead of np.NINF because np.NINF breaks for causal-masked
# left-padded sampling.
large_k = jnp.array(1e4 if attention.dtype == jnp.float16 else 1e30,
dtype=attention.dtype)
attention = jnp.where(attention_mask[:, None, :, :], attention,
-large_k)
normalized = jax.nn.softmax(attention)
if dropout_prob > 0:
normalized = hk.dropout(hk.next_rng_key(), dropout_prob, normalized)
summed = jnp.einsum('bhtT,bThd->bthd', normalized, v)
summed = jnp.reshape(summed, [batch, q_indices, hiddens])
if attention_mask is not None:
# If all attended tokens are masked, or for masked tokens
# some rows of logits gets completely masked, in which case the softmax
# gives a uniform row and we obtain non-zero outputs where it should be
# zero. We force zeros.
wipe_attn = jnp.all(
attention_mask == 0, axis=2, keepdims=True) # shape (B, T, 1)
summed = jnp.where(wipe_attn, jnp.zeros_like(summed), summed)
return summed
def conv_1d(
output_channels,
init_scale=1.0,
with_bias=True,
name=None):
"""A 1D convolution."""
return hk.Linear(
output_size=output_channels,
with_bias=with_bias,
w_init=hk.initializers.VarianceScaling(init_scale),
name=name)
def layer_norm(x, name=None):
return hk.LayerNorm(axis=-1, create_scale=True, create_offset=True,
name=name)(x)
def make_cross_attention_mask(query_mask, kv_mask):
batch_size, query_len = query_mask.shape
_, key_len = kv_mask.shape
mask = jax.vmap(jnp.outer)(query_mask, kv_mask)
assert mask.shape == (batch_size, query_len, key_len)
return mask
# -----------------------------------------------------------
# ----------------------- Modules -------------------------
# -----------------------------------------------------------
class Attention(hk.Module):
"""Multi-headed {cross, self}-attention."""
def __init__(self,
num_heads=8,
init_scale=1.0,
with_final_bias=True,
final_init_scale_multiplier=1.,
dropout_prob=0.0,
qk_channels=None,
v_channels=None,
output_channels=None,
name=None):
super(Attention, self).__init__(name=name)
self._num_heads = num_heads
self._init_scale = init_scale
self._with_final_bias = with_final_bias
self._final_init_scale = final_init_scale_multiplier * init_scale
self._dropout_prob = dropout_prob
# If none of these are passed, the Q input determines the output shape:
self._qk_channels = qk_channels
self._v_channels = v_channels
self._output_channels = output_channels
def __call__(self, inputs_q, inputs_kv, attention_mask=None):
# Q and K must have the same number of channels.
# Default to preserving Q's input's shape.
if self._qk_channels is None:
self._qk_channels = inputs_q.shape[-1]
# V's num_channels determines the shape of the output of QKV-attention.
# Default to the same number of channels used in the key-query operation.
if self._v_channels is None:
self._v_channels = self._qk_channels
# Project the output of QKV attention to a desired number of channels.
# Default to the same number as the output of the QKV attention operation.
if self._output_channels is None:
self._output_channels = self._v_channels
if self._qk_channels % self._num_heads != 0:
raise ValueError(f'qk_channels ({self._qk_channels}) must be divisible by'
f' num_heads ({self._num_heads}).')
if self._v_channels % self._num_heads != 0:
raise ValueError(f'v_channels ({self._v_channels}) must be divisible by'
f' num_heads ({self._num_heads}).')
qk_channels_per_head = self._qk_channels // self._num_heads
v_channels_per_head = self._v_channels // self._num_heads
# Project QKV to a common feature dimension.
q = conv_1d(self._qk_channels, init_scale=self._init_scale)(inputs_q)
k = conv_1d(self._qk_channels, init_scale=self._init_scale)(inputs_kv)
v = conv_1d(self._v_channels, init_scale=self._init_scale)(inputs_kv)
# Reshape channels for multi-head attention.
batch, q_time, _ = q.shape
_, kv_time, _ = k.shape
q = jnp.reshape(q, [batch, q_time, self._num_heads, qk_channels_per_head])
k = jnp.reshape(k, [batch, kv_time, self._num_heads, qk_channels_per_head])
v = jnp.reshape(v, [batch, kv_time, self._num_heads, v_channels_per_head])
result = attend(q, k, v, dropout_prob=self._dropout_prob,
attention_mask=attention_mask)
return conv_1d(
self._output_channels,
with_bias=self._with_final_bias,
init_scale=self._final_init_scale)(result)
class MLP(hk.Module):
"""A Transformer-style dense module to follow attention."""
def __init__(self,
widening_factor=4,
dropout_prob=0.0,
init_scale=1.,
name=None):
super(MLP, self).__init__(name=name)
self._widening_factor = widening_factor
self._dropout_prob = dropout_prob
self._init_scale = init_scale
def __call__(self, x, *, is_training):
dropout_prob = self._dropout_prob if is_training else 0.0
output_channels = x.shape[-1]
x = conv_1d(
output_channels=self._widening_factor * output_channels,
init_scale=self._init_scale)(x)
x = jax.nn.gelu(x)
x = conv_1d(
output_channels=output_channels,
init_scale=self._init_scale)(x)
return hk.dropout(hk.next_rng_key(), dropout_prob, x)
class SelfAttention(hk.Module):
"""A self-attention module, including a dense block."""
def __init__(self,
widening_factor=4,
dropout_prob=0.0,
dropout_attn_prob=0.0,
num_heads=8,
att_init_scale=1.0,
dense_init_scale=1.0,
qk_channels=None,
v_channels=None,
name=None):
super(SelfAttention, self).__init__(name=name)
self._widening_factor = widening_factor
self._dropout_prob = dropout_prob
self._dropout_attn_prob = dropout_attn_prob
self._num_heads = num_heads
self._att_init_scale = att_init_scale
self._dense_init_scale = dense_init_scale
self._qk_channels = qk_channels
self._v_channels = v_channels
def __call__(self,
inputs,
*,
attention_mask=None,
is_training):
dropout_prob = self._dropout_prob if is_training else 0.0
dropout_attn_prob = self._dropout_attn_prob if is_training else 0.0
x = inputs
qkv_inputs = layer_norm(inputs)
attention = Attention(
num_heads=self._num_heads,
init_scale=self._att_init_scale,
qk_channels=self._qk_channels,
v_channels=self._v_channels,
dropout_prob=dropout_attn_prob)(qkv_inputs, qkv_inputs,
attention_mask=attention_mask)
attention = hk.dropout(hk.next_rng_key(), dropout_prob, attention)
x += attention
x += MLP(
widening_factor=self._widening_factor,
dropout_prob=dropout_prob,
init_scale=self._dense_init_scale)(
layer_norm(x), is_training=is_training)
return x
class CrossAttention(hk.Module):
"""A cross-attention module, including a dense block."""
def __init__(self,
widening_factor=1,
dropout_prob=0.0,
dropout_attn_prob=0.0,
num_heads=8,
att_init_scale=1.0,
dense_init_scale=1.0,
shape_for_attn='kv',
use_query_residual=True,
qk_channels=None,
v_channels=None,
name=None):
super(CrossAttention, self).__init__(name=name)
self._widening_factor = widening_factor
self._dropout_prob = dropout_prob
self._dropout_attn_prob = dropout_attn_prob
self._num_heads = num_heads
self._att_init_scale = att_init_scale
self._dense_init_scale = dense_init_scale
self._shape_for_attn = shape_for_attn
self._use_query_residual = use_query_residual
self._qk_channels = qk_channels
self._v_channels = v_channels
def __call__(self,
inputs_q,
inputs_kv,
*,
attention_mask=None,
is_training):
dropout_prob = self._dropout_prob if is_training else 0.0
dropout_attn_prob = self._dropout_attn_prob if is_training else 0.0
output_channels = inputs_q.shape[-1]
if self._shape_for_attn == 'q':
qk_channels = inputs_q.shape[-1]
elif self._shape_for_attn == 'kv':
qk_channels = inputs_kv.shape[-1]
else:
raise ValueError(f'Unknown value {self._shape_for_attn} for '
'shape_for_attention.')
v_channels = None
if self._qk_channels is not None:
qk_channels = self._qk_channels
if self._v_channels is not None:
v_channels = self._v_channels
attention = Attention(
num_heads=self._num_heads,
init_scale=self._att_init_scale,
dropout_prob=dropout_attn_prob,
qk_channels=qk_channels,
v_channels=v_channels,
output_channels=output_channels)(layer_norm(inputs_q),
layer_norm(inputs_kv),
attention_mask=attention_mask)
attention = hk.dropout(hk.next_rng_key(), dropout_prob, attention)
# Optionally include a residual to the query.
# Consider omitting the residual if the semantics of query and output
# are different, e.g. if queries are positions and outputs are pixels.
if self._use_query_residual:
x = inputs_q + attention
else:
x = attention
x += MLP(
widening_factor=self._widening_factor,
dropout_prob=dropout_prob,
init_scale=self._dense_init_scale)(
layer_norm(x), is_training=is_training)
return x
# -----------------------------------------------------------
# ----------------------- Perceiver -----------------------
# -----------------------------------------------------------
class Perceiver(hk.Module):
"""The Perceiver: a scalable, fully attentional architecture."""
def __init__(
self,
encoder,
decoder,
input_preprocessor=None,
output_postprocessor=None,
name='perceiver'):
super().__init__(name=name)
# Feature and task parameters:
self._input_preprocessor = input_preprocessor
self._output_postprocessor = output_postprocessor
self._decoder = decoder
self._encoder = encoder
def __call__(self, inputs, *, is_training, subsampled_output_points=None,
pos=None, input_mask=None, query_mask=None):
if self._input_preprocessor:
network_input_is_1d = self._encoder._input_is_1d
inputs, modality_sizes, inputs_without_pos = self._input_preprocessor(
inputs, pos=pos, is_training=is_training,
network_input_is_1d=network_input_is_1d)
else:
modality_sizes = None
inputs_without_pos = None
# Get the queries for encoder and decoder cross-attends.
encoder_query = self._encoder.latents(inputs)
decoder_query = self._decoder.decoder_query(
inputs, modality_sizes, inputs_without_pos,
subsampled_points=subsampled_output_points)
# Run the network forward:
z = self._encoder(inputs, encoder_query,
is_training=is_training, input_mask=input_mask)
_, output_modality_sizes = self._decoder.output_shape(
inputs)
output_modality_sizes = output_modality_sizes or modality_sizes
outputs = self._decoder(
decoder_query, z, is_training=is_training, query_mask=query_mask)
if self._output_postprocessor:
outputs = self._output_postprocessor(outputs, is_training=is_training,
modality_sizes=output_modality_sizes)
return outputs
class PerceiverEncoder(hk.Module):
"""The Perceiver Encoder: a scalable, fully attentional encoder."""
def __init__(
self,
# The encoder has a total of
# num_self_attends_per_block * num_blocks
# self-attend layers. We share weights between blocks.
num_self_attends_per_block=6,
num_blocks=8,
z_index_dim=512,
num_z_channels=1024,
qk_channels=None,
v_channels=None,
num_cross_attend_heads=1,
num_self_attend_heads=8,
cross_attend_widening_factor=1,
self_attend_widening_factor=1,
dropout_prob=0.0,
z_pos_enc_init_scale=0.02,
cross_attention_shape_for_attn='kv',
use_query_residual=True,
name='perceiver_encoder'):
super().__init__(name=name)
# Check that we can use multihead-attention with these shapes.
if num_z_channels % num_self_attend_heads != 0:
raise ValueError(f'num_z_channels ({num_z_channels}) must be divisible by'
f' num_self_attend_heads ({num_self_attend_heads}).')
if num_z_channels % num_cross_attend_heads != 0:
raise ValueError(f'num_z_channels ({num_z_channels}) must be divisible by'
f' num_cross_attend_heads ({num_cross_attend_heads}).')
self._input_is_1d = True
self._num_blocks = num_blocks
# Construct the latent array initial state.
self.z_pos_enc = position_encoding.TrainablePositionEncoding(
index_dim=z_index_dim,
num_channels=num_z_channels,
init_scale=z_pos_enc_init_scale)
# Construct the cross attend:
self.cross_attend = CrossAttention(
dropout_prob=dropout_prob,
num_heads=num_cross_attend_heads,
widening_factor=cross_attend_widening_factor,
shape_for_attn=cross_attention_shape_for_attn,
qk_channels=qk_channels,
v_channels=v_channels,
use_query_residual=use_query_residual)
# Construct the block of self-attend layers.
# We get deeper architectures by applying this block more than once.
self.self_attends = []
for _ in range(num_self_attends_per_block):
self_attend = SelfAttention(
num_heads=num_self_attend_heads,
dropout_prob=dropout_prob,
qk_channels=qk_channels,
v_channels=v_channels,
widening_factor=self_attend_widening_factor)
self.self_attends.append(self_attend)
def latents(self, inputs):
# Initialize the latent array for the initial cross-attend.
return self.z_pos_enc(batch_size=inputs.shape[0])
def __call__(self, inputs, z, *, is_training, input_mask=None):
attention_mask = None
if input_mask is not None:
attention_mask = make_cross_attention_mask(
query_mask=jnp.ones(z.shape[:2], dtype=jnp.int32),
kv_mask=input_mask)
z = self.cross_attend(z, inputs, is_training=is_training,
attention_mask=attention_mask)
for _ in range(self._num_blocks):
for self_attend in self.self_attends:
z = self_attend(z, is_training=is_training)
return z
class AbstractPerceiverDecoder(hk.Module, metaclass=abc.ABCMeta):
"""Abstract Perceiver decoder."""
@abc.abstractmethod
def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None,
subsampled_points=None):
raise NotImplementedError
@abc.abstractmethod
def output_shape(self, inputs):
raise NotImplementedError
@abc.abstractmethod
def __call__(self, query, z, *, is_training, query_mask=None):
raise NotImplementedError
class ProjectionDecoder(AbstractPerceiverDecoder):
"""Baseline projection decoder (no cross-attention)."""
def __init__(
self,
num_classes,
final_avg_before_project=False,
name='projection_decoder'):
super().__init__(name=name)
self._final_avg_before_project = final_avg_before_project
self._num_classes = num_classes
self.final_layer = hk.Linear(
num_classes, w_init=jnp.zeros, name='logits')
def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None,
subsampled_points=None):
return None
def output_shape(self, inputs):
return ((inputs.shape[0], self._num_classes), None)
def __call__(self, query, z, *, is_training, query_mask=None):
# b x n_z x c -> b x c
z = jnp.mean(z, axis=1, dtype=z.dtype)
# b x c -> b x n_logits
logits = self.final_layer(z)
return logits
class BasicDecoder(AbstractPerceiverDecoder):
"""Cross-attention-based decoder."""
def __init__(self,
output_num_channels,
position_encoding_type='trainable',
# Ignored if position_encoding_type == 'none':
output_index_dims=None,
subsampled_index_dims=None,
num_z_channels=1024,
qk_channels=None,
v_channels=None,
use_query_residual=False,
output_w_init=None,
concat_preprocessed_input=False,
num_heads=1,
name='basic_decoder',
final_project=True,
**position_encoding_kwargs):
super().__init__(name=name)
self._position_encoding_type = position_encoding_type
# If `none`, the decoder will not construct any position encodings.
# You should construct your own when quering the decoder.
self.output_pos_enc = None
if self._position_encoding_type != 'none':
self.output_pos_enc = position_encoding.build_position_encoding(
position_encoding_type,
index_dims=output_index_dims,
**position_encoding_kwargs)
self._output_index_dim = output_index_dims
if subsampled_index_dims is None:
subsampled_index_dims = output_index_dims
self._subsampled_index_dims = subsampled_index_dims
self._output_num_channels = output_num_channels
self._output_w_init = output_w_init
self._use_query_residual = use_query_residual
self._qk_channels = qk_channels
self._v_channels = v_channels
self._final_project = final_project
self._num_heads = num_heads
self._concat_preprocessed_input = concat_preprocessed_input
def output_shape(self, inputs):
return ((inputs[0], self._subsampled_index_dims, self._output_num_channels),
None)
def decoder_query(self, inputs, modality_sizes=None,
inputs_without_pos=None, subsampled_points=None):
assert self._position_encoding_type != 'none' # Queries come from elsewhere
if subsampled_points is not None:
# unravel_index returns a tuple (x_idx, y_idx, ...)
# stack to get the [n, d] tensor of coordinates
pos = jnp.stack(
jnp.unravel_index(subsampled_points, self._output_index_dim),
axis=1)
# Map these coordinates to [-1, 1]
pos = -1 + 2 * pos / jnp.array(self._output_index_dim)[None, :]
pos = jnp.broadcast_to(pos[None],
[inputs.shape[0], pos.shape[0], pos.shape[1]])
pos_emb = self.output_pos_enc(
batch_size=inputs.shape[0],
pos=pos)
pos_emb = jnp.reshape(pos_emb, [pos_emb.shape[0], -1, pos_emb.shape[-1]])
else:
pos_emb = self.output_pos_enc(batch_size=inputs.shape[0])
if self._concat_preprocessed_input:
if inputs_without_pos is None:
raise ValueError('Value is required for inputs_without_pos if'
' concat_preprocessed_input is True')
pos_emb = jnp.concatenate([inputs_without_pos, pos_emb], axis=-1)
return pos_emb
def __call__(self, query, z, *, is_training,
query_mask=None):
# Cross-attention decoding.
# key, value: B x N x K; query: B x M x K
# Attention maps -> B x N x M
# Output -> B x M x K
# Construct cross attention and linear layer lazily, in case we don't need
# them.
attention_mask = None
if query_mask is not None:
attention_mask = make_cross_attention_mask(
query_mask=query_mask,
kv_mask=jnp.ones(z.shape[:2], dtype=jnp.int32))
decoding_cross_attn = CrossAttention(
dropout_prob=0.0,
num_heads=self._num_heads,
widening_factor=1,
shape_for_attn='kv',
qk_channels=self._qk_channels,
v_channels=self._v_channels,
use_query_residual=self._use_query_residual)
final_layer = hk.Linear(
self._output_num_channels, w_init=self._output_w_init, name='output')
output = decoding_cross_attn(query, z, is_training=is_training,
attention_mask=attention_mask)
if self._final_project:
output = final_layer(output)
return output
class ClassificationDecoder(AbstractPerceiverDecoder):
"""Cross-attention based classification decoder.
Light-weight wrapper of `BasicDecoder` for logit output.
"""
def __init__(self,
num_classes,
name='classification_decoder',
**decoder_kwargs):
super().__init__(name=name)
self._num_classes = num_classes
self.decoder = BasicDecoder(
output_index_dims=(1,), # Predict a single logit array.
output_num_channels=num_classes,
**decoder_kwargs)
def decoder_query(self, inputs, modality_sizes=None,
inputs_without_pos=None, subsampled_points=None):
return self.decoder.decoder_query(inputs, modality_sizes,
inputs_without_pos,
subsampled_points=subsampled_points)
def output_shape(self, inputs):
return (inputs.shape[0], self._num_classes), None
def __call__(self, query, z, *, is_training, query_mask=None):
# B x 1 x num_classes -> B x num_classes
logits = self.decoder(query, z, is_training=is_training)
return logits[:, 0, :]
class MultimodalDecoder(AbstractPerceiverDecoder):
"""Multimodal decoding by composing uni-modal decoders.
The modalities argument of the constructor is a dictionary mapping modality
name to the decoder of that modality. That decoder will be used to construct
queries for that modality. However, there is a shared cross attention across
all modalities, using the concatenated per-modality query vectors.
"""
def __init__(self, modalities, num_outputs, output_num_channels,
min_padding_size=2,
subsampled_index_dims=None,
name='multimodal_decoder', **decoder_kwargs):
super().__init__(name=name)
self._modalities = modalities
self._subsampled_index_dims = subsampled_index_dims
self._min_padding_size = min_padding_size
self._output_num_channels = output_num_channels
self._num_outputs = num_outputs
self._decoder = BasicDecoder(
output_index_dims=(num_outputs,),
output_num_channels=output_num_channels,
position_encoding_type='none',
**decoder_kwargs)
def decoder_query(self, inputs, modality_sizes, inputs_without_pos=None,
subsampled_points=None):
# Partition the flat inputs among the different modalities
inputs = io_processors.restructure(modality_sizes, inputs)
# Obtain modality-specific decoders' queries
subsampled_points = subsampled_points or dict()
decoder_queries = dict()
for modality, decoder in self._modalities.items():
# Get input_without_pos for this modality if it exists.
input_without_pos = None
if inputs_without_pos is not None:
input_without_pos = inputs_without_pos.get(modality, None)
decoder_queries[modality] = decoder.decoder_query(
inputs=inputs[modality],
modality_sizes=None,
inputs_without_pos=input_without_pos,
subsampled_points=subsampled_points.get(modality, None)
)
# Pad all queries with trainable position encodings to make them
# have the same channels
num_channels = (max(query.shape[2] for query in decoder_queries.values())
+ self._min_padding_size)
def embed(modality, x):
x = jnp.reshape(x, [x.shape[0], np.prod(x.shape[1:-1]), x.shape[-1]])
pos = position_encoding.TrainablePositionEncoding(
1, num_channels=num_channels - x.shape[2],
init_scale=0.02, name=f'{modality}_padding')(x.shape[0])
pos = jnp.broadcast_to(
pos, [x.shape[0], x.shape[1], num_channels - x.shape[2]])
return jnp.concatenate([x, pos], axis=2)
# Apply a predictable ordering to the modalities
return jnp.concatenate([
embed(modality, decoder_queries[modality])
for modality in sorted(self._modalities.keys())
], axis=1)
def output_shape(self, inputs):
if self._subsampled_index_dims is not None:
subsampled_index_dims = sum(self._subsampled_index_dims.values())
else:
subsampled_index_dims = self._num_outputs
return ((inputs.shape[0], subsampled_index_dims, self._output_num_channels),
self._subsampled_index_dims)
def __call__(self, query, z, *, is_training, query_mask=None):
# B x 1 x num_classes -> B x num_classes
return self._decoder(query, z, is_training=is_training)
class BasicVideoAutoencodingDecoder(AbstractPerceiverDecoder):
"""Cross-attention based video-autoencoding decoder.
Light-weight wrapper of `BasicDecoder` with video reshaping logic.
"""
def __init__(self,
output_shape,
position_encoding_type,
name='basic_video_autoencoding_decoder',
**decoder_kwargs):
super().__init__(name=name)
if len(output_shape) != 4: # B, T, H, W
raise ValueError(f'Expected rank 4 output_shape, got {output_shape}.')
# Build the decoder components:
self._output_shape = output_shape
self._output_num_channels = decoder_kwargs['output_num_channels']
self.decoder = BasicDecoder(
output_index_dims=self._output_shape[1:4], # T*H*W
position_encoding_type=position_encoding_type,
**decoder_kwargs)
def decoder_query(self, inputs, modality_sizes=None,
inputs_without_pos=None, subsampled_points=None):
return self.decoder.decoder_query(inputs,
modality_sizes=modality_sizes,
inputs_without_pos=inputs_without_pos,
subsampled_points=subsampled_points)
def output_shape(self, inputs):
return ([inputs.shape[0]] + self._output_shape[1:] +
[self._output_num_channels], None)
def __call__(self, query, z, *, is_training, query_mask=None):
output = self.decoder(query, z, is_training=is_training)
output = jnp.reshape(output, self._output_shape + [output.shape[-1]])
return output
class FlowDecoder(AbstractPerceiverDecoder):
"""Cross-attention based flow decoder."""
def __init__(self,
output_image_shape,
output_num_channels=2,
rescale_factor=100.0,
name='flow_decoder',
**decoder_kwargs):
super().__init__(name=name)
self._output_image_shape = output_image_shape
self._output_num_channels = output_num_channels
self._rescale_factor = rescale_factor
self.decoder = BasicDecoder(
output_num_channels=output_num_channels,
**decoder_kwargs)
def output_shape(self, inputs):
# The channel dimensions of output here don't necessarily correspond to
# (u, v) of flow: they may contain dims needed for the post-processor.
return ((inputs.shape[0],) + tuple(self._output_image_shape) + (
self._output_num_channels,), None)
def decoder_query(
self, inputs, modality_sizes=None, inputs_without_pos=None,
subsampled_points=None):
if subsampled_points is not None:
raise ValueError("FlowDecoder doesn't support subsampling yet.")
# assumes merged in time
return inputs
def __call__(self, query, z, *, is_training, query_mask=None):
# Output flow and rescale.
preds = self.decoder(query, z, is_training=is_training)
preds /= self._rescale_factor
return preds.reshape([preds.shape[0]] + list(self._output_image_shape) +
[preds.shape[-1]])
| deepmind-research-master | perceiver/perceiver.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Position encodings and utilities."""
import abc
import functools
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
def generate_fourier_features(
pos, num_bands, max_resolution=(224, 224),
concat_pos=True, sine_only=False):
"""Generate a Fourier frequency position encoding with linear spacing.
Args:
pos: The position of n points in d dimensional space.
A jnp array of shape [n, d].
num_bands: The number of bands (K) to use.
max_resolution: The maximum resolution (i.e. the number of pixels per dim).
A tuple representing resolution for each dimension
concat_pos: Concatenate the input position encoding to the Fourier features?
sine_only: Whether to use a single phase (sin) or two (sin/cos) for each
frequency band.
Returns:
embedding: A 1D jnp array of shape [n, n_channels]. If concat_pos is True
and sine_only is False, output dimensions are ordered as:
[dim_1, dim_2, ..., dim_d,
sin(pi*f_1*dim_1), ..., sin(pi*f_K*dim_1), ...,
sin(pi*f_1*dim_d), ..., sin(pi*f_K*dim_d),
cos(pi*f_1*dim_1), ..., cos(pi*f_K*dim_1), ...,
cos(pi*f_1*dim_d), ..., cos(pi*f_K*dim_d)],
where dim_i is pos[:, i] and f_k is the kth frequency band.
"""
min_freq = 1.0
# Nyquist frequency at the target resolution:
freq_bands = jnp.stack([
jnp.linspace(min_freq, res / 2, num=num_bands, endpoint=True)
for res in max_resolution], axis=0)
# Get frequency bands for each spatial dimension.
# Output is size [n, d * num_bands]
per_pos_features = pos[:, :, None] * freq_bands[None, :, :]
per_pos_features = jnp.reshape(per_pos_features,
[-1, np.prod(per_pos_features.shape[1:])])
if sine_only:
# Output is size [n, d * num_bands]
per_pos_features = jnp.sin(jnp.pi * (per_pos_features))
else:
# Output is size [n, 2 * d * num_bands]
per_pos_features = jnp.concatenate(
[jnp.sin(jnp.pi * per_pos_features),
jnp.cos(jnp.pi * per_pos_features)], axis=-1)
# Concatenate the raw input positions.
if concat_pos:
# Adds d bands to the encoding.
per_pos_features = jnp.concatenate([pos, per_pos_features], axis=-1)
return per_pos_features
def build_linear_positions(index_dims, output_range=(-1.0, 1.0)):
"""Generate an array of position indices for an N-D input array.
Args:
index_dims: The shape of the index dimensions of the input array.
output_range: The min and max values taken by each input index dimension.
Returns:
A jnp array of shape [index_dims[0], index_dims[1], .., index_dims[-1], N].
"""
def _linspace(n_xels_per_dim):
return jnp.linspace(
output_range[0], output_range[1],
num=n_xels_per_dim,
endpoint=True, dtype=jnp.float32)
dim_ranges = [
_linspace(n_xels_per_dim) for n_xels_per_dim in index_dims]
array_index_grid = jnp.meshgrid(*dim_ranges, indexing='ij')
return jnp.stack(array_index_grid, axis=-1)
class AbstractPositionEncoding(hk.Module, metaclass=abc.ABCMeta):
"""Abstract Perceiver decoder."""
@abc.abstractmethod
def __call__(self, batch_size, pos):
raise NotImplementedError
class TrainablePositionEncoding(AbstractPositionEncoding):
"""Trainable position encoding."""
def __init__(self, index_dim, num_channels=128, init_scale=0.02, name=None):
super(TrainablePositionEncoding, self).__init__(name=name)
self._index_dim = index_dim
self._num_channels = num_channels
self._init_scale = init_scale
def __call__(self, batch_size, pos=None):
del pos # Unused.
pos_embs = hk.get_parameter(
'pos_embs', [self._index_dim, self._num_channels],
init=hk.initializers.TruncatedNormal(stddev=self._init_scale))
if batch_size is not None:
pos_embs = jnp.broadcast_to(
pos_embs[None, :, :], (batch_size,) + pos_embs.shape)
return pos_embs
def _check_or_build_spatial_positions(pos, index_dims, batch_size):
"""Checks or builds spatial position features (x, y, ...).
Args:
pos: None, or an array of position features. If None, position features
are built. Otherwise, their size is checked.
index_dims: An iterable giving the spatial/index size of the data to be
featurized.
batch_size: The batch size of the data to be featurized.
Returns:
An array of position features, of shape [batch_size, prod(index_dims)].
"""
if pos is None:
pos = build_linear_positions(index_dims)
pos = jnp.broadcast_to(pos[None], (batch_size,) + pos.shape)
pos = jnp.reshape(pos, [batch_size, np.prod(index_dims), -1])
else:
# Just a warning label: you probably don't want your spatial features to
# have a different spatial layout than your pos coordinate system.
# But feel free to override if you think it'll work!
assert pos.shape[-1] == len(index_dims)
return pos
class FourierPositionEncoding(AbstractPositionEncoding):
"""Fourier (Sinusoidal) position encoding."""
def __init__(self, index_dims, num_bands, concat_pos=True,
max_resolution=None, sine_only=False, name=None):
super(FourierPositionEncoding, self).__init__(name=name)
self._num_bands = num_bands
self._concat_pos = concat_pos
self._sine_only = sine_only
self._index_dims = index_dims
# Use the index dims as the maximum resolution if it's not provided.
self._max_resolution = max_resolution or index_dims
def __call__(self, batch_size, pos=None):
pos = _check_or_build_spatial_positions(pos, self._index_dims, batch_size)
build_ff_fn = functools.partial(
generate_fourier_features,
num_bands=self._num_bands,
max_resolution=self._max_resolution,
concat_pos=self._concat_pos,
sine_only=self._sine_only)
return jax.vmap(build_ff_fn, 0, 0)(pos)
class PositionEncodingProjector(AbstractPositionEncoding):
"""Projects a position encoding to a target size."""
def __init__(self, output_size, base_position_encoding, name=None):
super(PositionEncodingProjector, self).__init__(name=name)
self._output_size = output_size
self._base_position_encoding = base_position_encoding
def __call__(self, batch_size, pos=None):
base_pos = self._base_position_encoding(batch_size, pos)
projected_pos = hk.Linear(output_size=self._output_size)(base_pos)
return projected_pos
def build_position_encoding(
position_encoding_type,
index_dims,
project_pos_dim=-1,
trainable_position_encoding_kwargs=None,
fourier_position_encoding_kwargs=None,
name=None):
"""Builds the position encoding."""
if position_encoding_type == 'trainable':
assert trainable_position_encoding_kwargs is not None
output_pos_enc = TrainablePositionEncoding(
# Construct 1D features:
index_dim=np.prod(index_dims),
name=name,
**trainable_position_encoding_kwargs)
elif position_encoding_type == 'fourier':
assert fourier_position_encoding_kwargs is not None
output_pos_enc = FourierPositionEncoding(
index_dims=index_dims,
name=name,
**fourier_position_encoding_kwargs)
else:
raise ValueError(f'Unknown position encoding: {position_encoding_type}.')
if project_pos_dim > 0:
# Project the position encoding to a target dimension:
output_pos_enc = PositionEncodingProjector(
output_size=project_pos_dim,
base_position_encoding=output_pos_enc)
return output_pos_enc
| deepmind-research-master | perceiver/position_encoding.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for io_processors."""
import numpy as np
import tensorflow as tf
from perceiver import io_processors
def _create_test_image(shape):
image = np.arange(np.prod(np.array(shape)))
return np.reshape(image, shape)
def test_space_to_depth_image():
image_shape = (2, 3 * 5, 3 * 7, 11)
image = _create_test_image(image_shape)
output = io_processors.space_to_depth(image, spatial_block_size=3)
assert output.shape == (2, 5, 7, 3 * 3 * 11)
def test_space_to_depth_video():
image_shape = (2, 5 * 7, 3 * 11, 3 * 13, 17)
image = _create_test_image(image_shape)
output = io_processors.space_to_depth(image, spatial_block_size=3,
temporal_block_size=5)
assert output.shape == (2, 7, 11, 13, 5 * 3 * 3 * 17)
def test_reverse_space_to_depth_image():
image_shape = (2, 5, 7, 3 * 3 * 11)
image = _create_test_image(image_shape)
output = io_processors.reverse_space_to_depth(image, spatial_block_size=3)
assert output.shape == (2, 3 * 5, 3 * 7, 11)
def test_reverse_space_to_depth_video():
image_shape = (2, 7, 11, 13, 5 * 3 * 3 * 17)
image = _create_test_image(image_shape)
output = io_processors.reverse_space_to_depth(
image, spatial_block_size=3, temporal_block_size=5)
assert output.shape == (2, 5 * 7, 3 * 11, 3 * 13, 17)
def test_extract_patches():
image_shape = (2, 5, 7, 3)
image = _create_test_image(image_shape)
sizes = [1, 2, 3, 1]
strides = [1, 1, 2, 1]
rates = [1, 2, 1, 1]
for padding in ["VALID", "SAME"]:
jax_patches = io_processors.extract_patches(
image, sizes=sizes, strides=strides, rates=rates, padding=padding)
tf_patches = tf.image.extract_patches(
image, sizes=sizes, strides=strides, rates=rates, padding=padding)
assert np.array_equal(
np.array(jax_patches),
tf_patches.numpy())
| deepmind-research-master | perceiver/io_processors_test.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IO pre- and post-processors for Perceiver."""
import functools
import math
from typing import Any, Callable, Mapping, Optional, Sequence, Tuple
import einops
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from perceiver import position_encoding
ModalitySizeT = Mapping[str, int]
PreprocessorOutputT = Tuple[jnp.ndarray, Optional[jnp.ndarray], jnp.ndarray]
PreprocessorT = Callable[..., PreprocessorOutputT]
PostprocessorT = Callable[..., Any]
def reverse_space_to_depth(
frames: jnp.ndarray,
temporal_block_size: int = 1,
spatial_block_size: int = 1) -> jnp.ndarray:
"""Reverse space to depth transform."""
if len(frames.shape) == 4:
return einops.rearrange(
frames, 'b h w (dh dw c) -> b (h dh) (w dw) c',
dh=spatial_block_size, dw=spatial_block_size)
elif len(frames.shape) == 5:
return einops.rearrange(
frames, 'b t h w (dt dh dw c) -> b (t dt) (h dh) (w dw) c',
dt=temporal_block_size, dh=spatial_block_size, dw=spatial_block_size)
else:
raise ValueError(
'Frames should be of rank 4 (batch, height, width, channels)'
' or rank 5 (batch, time, height, width, channels)')
def space_to_depth(
frames: jnp.ndarray,
temporal_block_size: int = 1,
spatial_block_size: int = 1) -> jnp.ndarray:
"""Space to depth transform."""
if len(frames.shape) == 4:
return einops.rearrange(
frames, 'b (h dh) (w dw) c -> b h w (dh dw c)',
dh=spatial_block_size, dw=spatial_block_size)
elif len(frames.shape) == 5:
return einops.rearrange(
frames, 'b (t dt) (h dh) (w dw) c -> b t h w (dt dh dw c)',
dt=temporal_block_size, dh=spatial_block_size, dw=spatial_block_size)
else:
raise ValueError(
'Frames should be of rank 4 (batch, height, width, channels)'
' or rank 5 (batch, time, height, width, channels)')
def extract_patches(images: jnp.ndarray,
sizes: Sequence[int],
strides: Sequence[int],
rates: Sequence[int],
padding: str = 'VALID') -> jnp.ndarray:
"""Extract patches from images.
This function is a wrapper for jax.lax.conv_general_dilated_patches
to conforms to the same interface as tf.image.extract_patches.
The function extracts patches of shape sizes from the input images in the same
manner as a convolution with kernel of shape sizes, stride equal to strides,
and the given padding scheme.
The patches are stacked in the channel dimension.
Args:
images: input batch of images of shape [B, H, W, C].
sizes: size of extracted patches. Must be [1, size_rows, size_cols, 1].
strides: strides, must be [1, stride_rows, stride_cols, 1].
rates: sampling rate (as in dilated convolutions),
must be [1, rate_rows, rate_cols, 1].
padding: padding algorithm to use.
Returns:
Tensor of shape [B, patch_rows, patch_cols, size_rows * size_cols * C]
"""
if len(sizes) != 4 or sizes[0] != 1 or sizes[3] != 1:
raise ValueError(
f'Shape of sizes must be [1, size_rows, size_cols, 1], got {sizes}.')
if len(strides) != 4 or strides[0] != 1 or strides[3] != 1:
raise ValueError(
f'Shape of strides must be [1, size_rows, size_cols, 1], '
f'got {strides}.')
if len(rates) != 4 or rates[0] != 1 or rates[3] != 1:
raise ValueError(
f'Shape of rates must be [1, size_rows, size_cols, 1], got {rates}.')
if images.ndim != 4:
raise ValueError(
f'Rank of images must be 4 (got tensor of shape {jnp.shape(images)})')
# Rearrange axes of images to NCHW for conv_general_dilated_patches
images = einops.rearrange(images, 'n h w c -> n c h w')
channels = images.shape[1]
patches = jax.lax.conv_general_dilated_patches(
images, sizes[1:-1], strides[1:-1], padding, rhs_dilation=rates[1:-1])
# conv_general_dilated_patches returns patches in channel-major order.
# Rearrange to match interface of tf.image.extract_patches.
patches = einops.rearrange(patches, 'n (c ph pw) h w -> n h w (ph pw c)',
c=channels, ph=sizes[1], pw=sizes[2])
return patches
def patches_for_flow(inputs: jnp.ndarray) -> jnp.ndarray:
"""Extract 3x3x2 image patches for flow inputs."""
def pad_and_extract_patches(inputs):
padded_inputs = jnp.pad(inputs, [[0, 0], [1, 1], [1, 1], [0, 0]],
mode='constant')
return extract_patches(
padded_inputs,
sizes=[1, 3, 3, 1],
strides=[1, 1, 1, 1],
padding='VALID',
rates=[1, 1, 1, 1])
return jax.vmap(pad_and_extract_patches, in_axes=1, out_axes=1)(inputs)
# ------------------------------------------------------------
# ------------------- Up/down-sampling ---------------------
# ------------------------------------------------------------
class Conv2DDownsample(hk.Module):
"""Downsamples 4x by applying a 2D convolution and doing max pooling."""
def __init__(
self,
num_layers: int = 1,
num_channels: int = 64,
use_batchnorm: bool = True,
bn_config: Optional[Mapping[str, float]] = None,
name: Optional[str] = None,
):
"""Constructs a Conv2DDownsample model.
Args:
num_layers: The number of conv->max_pool layers.
num_channels: The number of conv output channels.
use_batchnorm: Whether to use batchnorm.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers. By default the
``decay_rate`` is ``0.9`` and ``eps`` is ``1e-5``.
name: Name of the module.
"""
super().__init__(name=name)
self._num_layers = num_layers
self._use_batchnorm = use_batchnorm
bn_config = dict(bn_config or {})
bn_config.setdefault('decay_rate', 0.9)
bn_config.setdefault('eps', 1e-5)
bn_config.setdefault('create_scale', True)
bn_config.setdefault('create_offset', True)
self.layers = []
for _ in range(self._num_layers):
conv = hk.Conv2D(
output_channels=num_channels,
kernel_shape=7,
stride=2,
with_bias=False,
padding='SAME',
name='conv')
if use_batchnorm:
batchnorm = hk.BatchNorm(name='batchnorm', **bn_config)
else:
batchnorm = None
self.layers.append(dict(conv=conv, batchnorm=batchnorm))
def __call__(self, inputs: jnp.ndarray, *,
is_training: bool,
test_local_stats: bool = False) -> jnp.ndarray:
out = inputs
for layer in self.layers:
out = layer['conv'](out)
if layer['batchnorm'] is not None:
out = layer['batchnorm'](out, is_training, test_local_stats)
out = jax.nn.relu(out)
out = hk.max_pool(out,
window_shape=(1, 3, 3, 1),
strides=(1, 2, 2, 1),
padding='SAME')
return out
class Conv2DUpsample(hk.Module):
"""Upsamples 4x using 2 2D transposed convolutions."""
def __init__(
self,
n_outputs: int,
name: Optional[str] = None,
):
"""Constructs a Conv2DUpsample model.
Args:
n_outputs: The number of output channels of the module.
name: Name of the module.
"""
super().__init__(name=name)
self.transp_conv1 = hk.Conv2DTranspose(
output_channels=n_outputs*2,
kernel_shape=4,
stride=2,
with_bias=True,
padding='SAME',
name='transp_conv_1')
self.transp_conv2 = hk.Conv2DTranspose(
output_channels=n_outputs,
kernel_shape=4,
stride=2,
with_bias=True,
padding='SAME',
name='transp_conv_2')
def __call__(self, inputs: jnp.ndarray, *,
is_training: bool,
test_local_stats: bool = False) -> jnp.ndarray:
out = inputs
out = self.transp_conv1(out)
out = jax.nn.relu(out)
out = self.transp_conv2(out)
return out
class Conv3DUpsample(hk.Module):
"""Simple convolutional auto-encoder."""
def __init__(self,
n_outputs: int,
n_time_upsamples: int = 2,
n_space_upsamples: int = 4,
name: Optional[str] = None):
super().__init__(name=name)
self._n_outputs = n_outputs
self._n_time_upsamples = n_time_upsamples
self._n_space_upsamples = n_space_upsamples
def __call__(self, x: jnp.ndarray, *, is_training: bool) -> jnp.ndarray:
n_upsamples = max(self._n_time_upsamples, self._n_space_upsamples)
time_stride = 2
space_stride = 2
for i in range(n_upsamples):
if i >= self._n_time_upsamples:
time_stride = 1
if i >= self._n_space_upsamples:
space_stride = 1
channels = self._n_outputs * pow(2, n_upsamples - 1 - i)
x = hk.Conv3DTranspose(output_channels=channels,
stride=[time_stride, space_stride, space_stride],
kernel_shape=[4, 4, 4],
name=f'conv3d_transpose_{i}')(x)
if i != n_upsamples - 1:
x = jax.nn.relu(x)
return x
class ImagePreprocessor(hk.Module):
"""Image preprocessing for Perceiver Encoder."""
def __init__(
self,
prep_type='conv',
spatial_downsample: int = 4,
temporal_downsample: int = 1,
position_encoding_type: str = 'fourier',
n_extra_pos_mlp: int = 0,
num_channels: int = 64,
conv_after_patching: bool = False,
conv2d_use_batchnorm: bool = True,
concat_or_add_pos: str = 'concat',
name: Optional[str] = None,
**position_encoding_kwargs):
super().__init__(name=name)
if prep_type not in ('conv', 'patches', 'pixels', 'conv1x1'):
raise ValueError('Invalid prep_type!')
if concat_or_add_pos not in ['concat', 'add']:
raise ValueError(
f'Invalid value {concat_or_add_pos} for concat_or_add_pos.')
self._prep_type = prep_type
self._spatial_downsample = spatial_downsample
self._temporal_downsample = temporal_downsample
self._concat_or_add_pos = concat_or_add_pos
self._conv_after_patching = conv_after_patching
self._num_channels = num_channels
if self._prep_type == 'conv':
# Downsampling with conv is currently restricted
convnet_num_layers = math.log(spatial_downsample, 4)
convnet_num_layers_is_int = (
convnet_num_layers == np.round(convnet_num_layers))
if not convnet_num_layers_is_int or temporal_downsample != 1:
raise ValueError('Only powers of 4 expected for spatial '
'and 1 expected for temporal '
'downsampling with conv.')
self.convnet = Conv2DDownsample(
num_layers=int(convnet_num_layers),
num_channels=num_channels,
use_batchnorm=conv2d_use_batchnorm)
elif self._prep_type == 'conv1x1':
assert temporal_downsample == 1, 'conv1x1 does not downsample in time.'
self.convnet_1x1 = hk.Conv2D(
num_channels, kernel_shape=[1, 1],
# spatial_downsample is unconstrained for 1x1 convolutions.
stride=[spatial_downsample, spatial_downsample])
# Partially construct the positional encoding function.
# We fully construct it when we know the input size.
self._positional_encoding_ctor = functools.partial(
position_encoding.build_position_encoding,
position_encoding_type=position_encoding_type,
**position_encoding_kwargs)
# Stack MLPs to get a deeper positional embedding.
self._n_extra_pos_mlp = n_extra_pos_mlp
def _build_network_inputs(
self, inputs: jnp.ndarray, pos: jnp.ndarray,
network_input_is_1d: bool = True) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Construct the final input, including position encoding."""
batch_size = inputs.shape[0]
index_dims = inputs.shape[1:-1]
# Reshape input features to a 1D index dimension if necessary.
if len(inputs.shape) > 3 and network_input_is_1d:
inputs = jnp.reshape(
inputs, [batch_size, np.prod(index_dims), -1])
# Construct the position encoding.
pos_enc = self._positional_encoding_ctor(
index_dims=index_dims)(batch_size=batch_size, pos=pos)
for i in range(0, self._n_extra_pos_mlp):
pos_enc += hk.Linear(pos_enc.shape[-1])(pos_enc)
if i < (self._n_extra_pos_mlp-1):
pos_enc = jax.nn.relu(pos_enc)
if not network_input_is_1d:
# Reshape pos to match the input feature shape
# if the network takes non-1D inputs
sh = inputs.shape
pos_enc = jnp.reshape(pos_enc, list(sh)[:-1]+[-1])
if self._concat_or_add_pos == 'concat':
inputs_with_pos = jnp.concatenate([inputs, pos_enc], axis=-1)
elif self._concat_or_add_pos == 'add':
inputs_with_pos = inputs + pos_enc
return inputs_with_pos, inputs
def __call__(
self, inputs: jnp.ndarray, *,
is_training: bool,
pos: Optional[jnp.ndarray] = None,
network_input_is_1d: bool = True) -> PreprocessorOutputT:
if self._prep_type == 'conv':
# Convnet image featurization.
# Downsamples spatially by a factor of 4
conv = self.convnet
if len(inputs.shape) == 5:
conv = hk.BatchApply(conv)
inputs = conv(inputs, is_training=is_training)
elif self._prep_type == 'conv1x1':
# maps inputs to 64d
conv = self.convnet_1x1
if len(inputs.shape) == 5:
conv = hk.BatchApply(conv)
inputs = conv(inputs)
elif self._prep_type == 'patches':
# Space2depth featurization.
# Video: B x T x H x W x C
inputs = space_to_depth(
inputs,
temporal_block_size=self._temporal_downsample,
spatial_block_size=self._spatial_downsample)
if inputs.ndim == 5 and inputs.shape[1] == 1:
# for flow
inputs = jnp.squeeze(inputs, axis=1)
if self._conv_after_patching:
inputs = hk.Linear(self._num_channels, name='patches_linear')(inputs)
elif self._prep_type == 'pixels':
# if requested, downsamples in the crudest way
if inputs.ndim == 4:
inputs = inputs[:,
::self._spatial_downsample, ::self._spatial_downsample]
elif inputs.ndim == 5:
inputs = inputs[:, ::self._temporal_downsample,
::self._spatial_downsample, ::self._spatial_downsample]
else:
raise ValueError('Unsupported data format for pixels.')
inputs, inputs_without_pos = self._build_network_inputs(
inputs, pos, network_input_is_1d)
modality_sizes = None # Size for each modality, only needed for multimodal
return inputs, modality_sizes, inputs_without_pos
class ImagePostprocessor(hk.Module):
"""Image postprocessing for Perceiver."""
def __init__(
self,
postproc_type: str = 'pixels',
spatial_upsample: int = 1,
temporal_upsample: int = 1,
n_outputs: int = -1, # only relevant for 'conv1x1', 'conv', and 'raft'
input_reshape_size: Optional[Sequence[int]] = None,
name: Optional[str] = None):
super().__init__(name=name)
if postproc_type not in ('conv', 'patches', 'pixels', 'raft', 'conv1x1'):
raise ValueError('Invalid postproc_type!')
# Architecture parameters:
self._postproc_type = postproc_type
self._temporal_upsample = temporal_upsample
self._spatial_upsample = spatial_upsample
self._input_reshape_size = input_reshape_size
if self._postproc_type == 'pixels':
# No postprocessing.
if self._temporal_upsample != 1 or self._spatial_upsample != 1:
raise ValueError('Pixels postprocessing should not currently upsample.')
elif self._postproc_type == 'conv1x1':
assert self._temporal_upsample == 1, 'conv1x1 does not upsample in time.'
if n_outputs == -1:
raise ValueError('Expected value for n_outputs')
self.conv1x1 = hk.Conv2D(
n_outputs, kernel_shape=[1, 1],
# spatial_downsample is unconstrained for 1x1 convolutions.
stride=[self._spatial_upsample, self._spatial_upsample])
elif self._postproc_type == 'conv':
if n_outputs == -1:
raise ValueError('Expected value for n_outputs')
if self._temporal_upsample != 1:
def int_log2(x):
return int(np.round(np.log(x) / np.log(2)))
self.convnet = Conv3DUpsample(
n_outputs, int_log2(temporal_upsample), int_log2(spatial_upsample))
else:
self.convnet = Conv2DUpsample(n_outputs)
def __call__(
self, inputs: jnp.ndarray, *,
is_training: bool,
pos: Optional[jnp.ndarray] = None,
modality_sizes: Optional[ModalitySizeT] = None) -> jnp.ndarray:
if self._input_reshape_size is not None:
inputs = jnp.reshape(
inputs,
[inputs.shape[0]] + list(self._input_reshape_size)
+ [inputs.shape[-1]])
if self._postproc_type == 'conv' or self._postproc_type == 'raft':
# Convnet image featurization.
conv = self.convnet
if len(inputs.shape) == 5 and self._temporal_upsample == 1:
conv = hk.BatchApply(conv)
inputs = conv(inputs, is_training=is_training)
elif self._postproc_type == 'conv1x1':
inputs = self.conv1x1(inputs)
elif self._postproc_type == 'patches':
inputs = reverse_space_to_depth(
inputs, self._temporal_upsample, self._spatial_upsample)
return inputs
class OneHotPreprocessor(hk.Module):
"""One-hot preprocessor for Perceiver Encoder."""
def __init__(self, name: Optional[str] = None):
super().__init__(name=name)
def __call__(self, inputs: jnp.ndarray, *,
is_training: bool,
pos: Optional[jnp.ndarray] = None,
network_input_is_1d: bool = True) -> PreprocessorOutputT:
# Add a dummy index dimension.
inputs = inputs[:, None, :]
# No position encodings, so the 1st (input) and 3rd (inputs_without_pos)
# outputs are identical.
return inputs, None, inputs
class AudioPreprocessor(hk.Module):
"""Audio preprocessing for Perceiver Encoder."""
def __init__(
self,
prep_type: str = 'patches',
samples_per_patch: int = 96,
position_encoding_type: str = 'fourier',
n_extra_pos_mlp: int = 0,
concat_or_add_pos: str = 'concat',
name: Optional[str] = None,
**position_encoding_kwargs):
super().__init__(name=name)
if prep_type not in ('patches',):
raise ValueError('Invalid prep_type!')
if concat_or_add_pos not in ['concat', 'add']:
raise ValueError(
f'Invalid value {concat_or_add_pos} for concat_or_add_pos.')
self._samples_per_patch = samples_per_patch
self._concat_or_add_pos = concat_or_add_pos
# Partially construct the positional encoding function.
# We fully construct it when we know the input size.
self._positional_encoding_ctor = functools.partial(
position_encoding.build_position_encoding,
position_encoding_type=position_encoding_type,
**position_encoding_kwargs)
# for deeper positional embeddings
self._n_extra_pos_mlp = n_extra_pos_mlp
def _build_network_inputs(
self, inputs: jnp.ndarray,
pos: jnp.ndarray) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Construct the final input, including position encoding."""
batch_size = inputs.shape[0]
index_dims = inputs.shape[1:-1]
# Construct the position encoding.
pos_enc = self._positional_encoding_ctor(
index_dims=index_dims)(batch_size=batch_size, pos=pos)
for i in range(0, self._n_extra_pos_mlp):
pos_enc += hk.Linear(pos_enc.shape[-1])(pos_enc)
if i < (self._n_extra_pos_mlp-1):
pos_enc = jax.nn.relu(pos_enc)
if self._concat_or_add_pos == 'concat':
inputs_with_pos = jnp.concatenate([inputs, pos_enc], axis=-1)
elif self._concat_or_add_pos == 'add':
inputs_with_pos = inputs + pos_enc
return inputs_with_pos, inputs
def __call__(self, inputs: jnp.ndarray, *,
is_training: bool,
pos: Optional[jnp.ndarray] = None,
network_input_is_1d: bool = True) -> PreprocessorOutputT:
inputs = jnp.reshape(inputs, [inputs.shape[0], -1,
self._samples_per_patch])
inputs, inputs_without_pos = self._build_network_inputs(inputs, pos)
modality_sizes = None # Size for each modality, only needed for multimodal
return inputs, modality_sizes, inputs_without_pos
class AudioPostprocessor(hk.Module):
"""Audio postprocessing for Perceiver."""
def __init__(
self,
postproc_type: str = 'patches', # 'conv', 'patches', 'pixels'
samples_per_patch: int = 96,
name: Optional[str] = None):
super().__init__(name=name)
if postproc_type not in ('patches',):
raise ValueError('Invalid postproc_type!')
self._samples_per_patch = samples_per_patch
# Architecture parameters:
self._postproc_type = postproc_type
def __call__(self, inputs: jnp.ndarray, *,
is_training: bool,
pos: Optional[jnp.ndarray] = None,
modality_sizes: Optional[ModalitySizeT] = None) -> jnp.ndarray:
out = hk.Linear(self._samples_per_patch)(inputs)
return jnp.reshape(out, [inputs.shape[0], -1])
class IdentityPostprocessor(hk.Module):
"""Passes through the inputs unchanged."""
def __init__(self, name: Optional[str] = None):
super().__init__(name=name)
def __call__(self, inputs: jnp.ndarray, *,
is_training: bool,
pos: Optional[jnp.ndarray] = None,
modality_sizes: Optional[ModalitySizeT] = None) -> jnp.ndarray:
return inputs
def restructure(modality_sizes: ModalitySizeT,
inputs: jnp.ndarray) -> Mapping[str, jnp.ndarray]:
"""Partitions a [B, N, C] tensor into tensors for each modality.
Args:
modality_sizes: dict specifying the size of the modality
inputs: input tensor
Returns:
dict mapping name of modality to its associated tensor.
"""
outputs = {}
index = 0
# Apply a predictable ordering to the modalities
for modality in sorted(modality_sizes.keys()):
size = modality_sizes[modality]
inp = inputs[:, index:index + size]
index += size
outputs[modality] = inp
return outputs
class MultimodalPreprocessor(hk.Module):
"""Multimodal preprocessing for Perceiver Encoder.
Inputs for each modality is preprocessed then padded with trainable position
embeddings to have the same number of channels.
"""
def __init__(
self,
modalities: Mapping[str, PreprocessorT],
mask_probs: Optional[Mapping[str, float]] = None,
min_padding_size: int = 2,
name: Optional[str] = None):
"""Constructor.
Args:
modalities: dict mapping modality name to preprocessor
mask_probs: dict mapping modality name to masking probability of that
modality
min_padding_size: the minimum padding size for all modalities.
The final output will have num_channels equal to the maximum channels
across all modalities plus min_padding_size.
name: name of module
"""
super().__init__(name=name)
self._modalities = modalities
self._min_padding_size = min_padding_size
self._mask_probs = mask_probs
def __call__(self, inputs: jnp.ndarray, *,
is_training: bool,
pos: Optional[jnp.ndarray] = None,
network_input_is_1d: bool = True) -> PreprocessorOutputT:
outputs = {}
inputs_without_pos = {}
for modality, preprocessor in self._modalities.items():
outputs[modality], _, inputs_without_pos[modality] = preprocessor(
inputs[modality], is_training=is_training, pos=pos,
network_input_is_1d=network_input_is_1d)
common_channel_size = (max(o.shape[2] for o in outputs.values())
+ self._min_padding_size)
padded = {}
modality_sizes = {}
for modality, output in outputs.items():
pos_enc = position_encoding.TrainablePositionEncoding(
1, num_channels=common_channel_size-output.shape[2],
init_scale=0.02, name=f'{modality}_padding')
padding = jnp.broadcast_to(
pos_enc(batch_size=output.shape[0]),
[output.shape[0], output.shape[1],
common_channel_size-output.shape[2]])
output_padded = jnp.concatenate([output, padding], axis=2)
if self._mask_probs is not None:
# Randomly mask out each token corresponding to this modality
mask_token = position_encoding.TrainablePositionEncoding(
1, num_channels=output_padded.shape[2],
init_scale=0.02, name=f'{modality}_mask_token')(output.shape[0])
mask_prob = self._mask_probs[modality]
rng = hk.next_rng_key()
mask = jax.random.bernoulli(rng, mask_prob,
shape=[output.shape[0], output.shape[1]])
mask = jnp.expand_dims(mask, axis=2)
output_padded = (1 - mask) * output_padded + mask * mask_token
padded[modality] = output_padded
modality_sizes[modality] = output_padded.shape[1]
# Apply a predictable ordering to the modalities
padded_ls = [padded[k] for k in sorted(padded.keys())]
return (jnp.concatenate(padded_ls, axis=1),
modality_sizes,
inputs_without_pos)
class MultimodalPostprocessor(hk.Module):
"""Multimodal postprocessing for Perceiver."""
def __init__(
self,
modalities: Mapping[str, PostprocessorT],
input_is_dict: bool = False,
name: Optional[str] = None):
"""Constructor.
Args:
modalities: dict mapping modality name to post processor for that modality
input_is_dict: If True, input is assumed to be dictionary structured,
and outputs keep the same dictionary shape. If False, input is a tensor
which is sliced up during postprocessing by `modality_sizes`.
name: name of the module
"""
super().__init__(name=name)
self._modalities = modalities
self._input_is_dict = input_is_dict
def __call__(
self, inputs: jnp.ndarray, *,
is_training: bool,
pos: Optional[jnp.ndarray] = None,
modality_sizes: Optional[ModalitySizeT] = None) -> Mapping[str,
jnp.ndarray]:
if not self._input_is_dict:
# Slice up modalities by their sizes.
assert modality_sizes is not None
inputs = restructure(modality_sizes=modality_sizes, inputs=inputs)
outputs = {modality: postprocessor(
inputs[modality], is_training=is_training, pos=pos, modality_sizes=None)
for modality, postprocessor in self._modalities.items()}
return outputs
class ClassificationPostprocessor(hk.Module):
"""Classification postprocessing for Perceiver."""
def __init__(
self,
num_classes: int,
name: Optional[str] = None):
super().__init__(name=name)
self._num_classes = num_classes
def __call__(self, inputs: jnp.ndarray, *,
is_training: bool,
pos: Optional[jnp.ndarray] = None,
modality_sizes: Optional[ModalitySizeT] = None) -> jnp.ndarray:
logits = hk.Linear(self._num_classes)(inputs)
return logits[:, 0, :]
class ProjectionPostprocessor(hk.Module):
"""Projection postprocessing for Perceiver."""
def __init__(
self,
num_outputs: int,
name: Optional[str] = None):
super().__init__(name=name)
self._num_outputs = num_outputs
def __call__(self, inputs: jnp.ndarray, *,
is_training: bool,
pos: Optional[jnp.ndarray] = None,
modality_sizes: Optional[ModalitySizeT] = None) -> jnp.ndarray:
logits = hk.Linear(self._num_outputs)(inputs)
return logits
class EmbeddingDecoder(hk.Module):
"""Haiku module to decode embeddings."""
def __init__(self, embedding_matrix: jnp.ndarray, name='embedding_decoder'):
"""Constructs the module.
Args:
embedding_matrix: Array of shape [vocab_size, d_model].
name: Name of the module.
"""
super().__init__(name=name)
self._embedding_matrix = embedding_matrix
self._vocab_size, self._d_model = embedding_matrix.shape
def __call__(self, embeddings: jnp.ndarray) -> jnp.ndarray:
batch_size, seq_len, _ = embeddings.shape
output = jnp.matmul(
embeddings.reshape([-1, self._d_model]), # Flatten batch dim
jnp.transpose(self._embedding_matrix))
bias = hk.get_parameter('bias', shape=[self._vocab_size], init=jnp.zeros)
output = output + bias
return output.reshape([batch_size, seq_len, self._vocab_size])
| deepmind-research-master | perceiver/io_processors.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ImageNet dataset with pre-processing and augmentation.
Deng, et al CVPR 2009 - ImageNet: A large-scale hierarchical image database.
https://image-net.org/
"""
import enum
from typing import Any, Generator, Mapping, Optional, Sequence, Text, Tuple
import jax
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
from perceiver.train import autoaugment
Batch = Mapping[Text, np.ndarray]
MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)
STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)
AUTOTUNE = tf.data.experimental.AUTOTUNE
INPUT_DIM = 224 # The number of pixels in the image resize.
class Split(enum.Enum):
"""ImageNet dataset split."""
TRAIN = 1
TRAIN_AND_VALID = 2
VALID = 3
TEST = 4
@classmethod
def from_string(cls, name: Text) -> 'Split':
return {'TRAIN': Split.TRAIN, 'TRAIN_AND_VALID': Split.TRAIN_AND_VALID,
'VALID': Split.VALID, 'VALIDATION': Split.VALID,
'TEST': Split.TEST}[name.upper()]
@property
def num_examples(self):
return {Split.TRAIN_AND_VALID: 1281167, Split.TRAIN: 1271167,
Split.VALID: 10000, Split.TEST: 50000}[self]
def load(
split: Split,
*,
is_training: bool,
# batch_dims should be:
# [device_count, per_device_batch_size] or [total_batch_size]
batch_dims: Sequence[int],
augmentation_settings: Mapping[str, Any],
# The shape to which images are resized.
im_dim: int = INPUT_DIM,
threadpool_size: int = 48,
max_intra_op_parallelism: int = 1,
) -> Generator[Batch, None, None]:
"""Loads the given split of the dataset."""
start, end = _shard(split, jax.host_id(), jax.host_count())
im_size = (im_dim, im_dim)
total_batch_size = np.prod(batch_dims)
tfds_split = tfds.core.ReadInstruction(_to_tfds_split(split),
from_=start, to=end, unit='abs')
ds = tfds.load('imagenet2012:5.*.*', split=tfds_split,
decoders={'image': tfds.decode.SkipDecoding()})
options = tf.data.Options()
options.experimental_threading.private_threadpool_size = threadpool_size
options.experimental_threading.max_intra_op_parallelism = (
max_intra_op_parallelism)
options.experimental_optimization.map_parallelization = True
if is_training:
options.experimental_deterministic = False
ds = ds.with_options(options)
if is_training:
if jax.host_count() > 1:
# Only cache if we are reading a subset of the dataset.
ds = ds.cache()
ds = ds.repeat()
ds = ds.shuffle(buffer_size=10 * total_batch_size, seed=0)
else:
if split.num_examples % total_batch_size != 0:
raise ValueError(f'Test/valid must be divisible by {total_batch_size}')
def crop_augment_preprocess(example):
image, _ = _preprocess_image(
example['image'], is_training, im_size, augmentation_settings)
label = tf.cast(example['label'], tf.int32)
out = {'images': image, 'labels': label}
if is_training:
if augmentation_settings['cutmix']:
out['mask'] = cutmix_padding(*im_size)
out['cutmix_ratio'] = tf.reduce_mean(out['mask'])
if augmentation_settings['mixup_alpha'] is not None:
beta = tfp.distributions.Beta(
augmentation_settings['mixup_alpha'],
augmentation_settings['mixup_alpha'])
out['mixup_ratio'] = beta.sample()
return out
ds = ds.map(crop_augment_preprocess, num_parallel_calls=AUTOTUNE)
# Mixup/cutmix by temporarily batching (using the per-device batch size):
use_cutmix = augmentation_settings['cutmix']
use_mixup = augmentation_settings['mixup_alpha'] is not None
if is_training and (use_cutmix or use_mixup):
inner_batch_size = batch_dims[-1]
# Apply mixup, cutmix, or mixup + cutmix on batched data.
# We use data from 2 batches to produce 1 mixed batch.
ds = ds.batch(inner_batch_size * 2)
if not use_cutmix and use_mixup:
ds = ds.map(my_mixup, num_parallel_calls=AUTOTUNE)
elif use_cutmix and not use_mixup:
ds = ds.map(my_cutmix, num_parallel_calls=AUTOTUNE)
elif use_cutmix and use_mixup:
ds = ds.map(my_mixup_cutmix, num_parallel_calls=AUTOTUNE)
# Unbatch for further processing.
ds = ds.unbatch()
for batch_size in reversed(batch_dims):
ds = ds.batch(batch_size)
ds = ds.prefetch(AUTOTUNE)
yield from tfds.as_numpy(ds)
# cutmix_padding, my_cutmix, my_mixup, and my_mixup_cutmix taken from:
# https://github.com/deepmind/deepmind-research/blob/master/nfnets/dataset.py
def cutmix_padding(h, w):
"""Returns image mask for CutMix.
Taken from (https://github.com/google/edward2/blob/master/experimental
/marginalization_mixup/data_utils.py#L367)
Args:
h: image height.
w: image width.
"""
r_x = tf.random.uniform([], 0, w, tf.int32)
r_y = tf.random.uniform([], 0, h, tf.int32)
# Beta dist in paper, but they used Beta(1,1) which is just uniform.
image1_proportion = tf.random.uniform([])
patch_length_ratio = tf.math.sqrt(1 - image1_proportion)
r_w = tf.cast(patch_length_ratio * tf.cast(w, tf.float32), tf.int32)
r_h = tf.cast(patch_length_ratio * tf.cast(h, tf.float32), tf.int32)
bbx1 = tf.clip_by_value(tf.cast(r_x - r_w // 2, tf.int32), 0, w)
bby1 = tf.clip_by_value(tf.cast(r_y - r_h // 2, tf.int32), 0, h)
bbx2 = tf.clip_by_value(tf.cast(r_x + r_w // 2, tf.int32), 0, w)
bby2 = tf.clip_by_value(tf.cast(r_y + r_h // 2, tf.int32), 0, h)
# Create the binary mask.
pad_left = bbx1
pad_top = bby1
pad_right = tf.maximum(w - bbx2, 0)
pad_bottom = tf.maximum(h - bby2, 0)
r_h = bby2 - bby1
r_w = bbx2 - bbx1
mask = tf.pad(
tf.ones((r_h, r_w)),
paddings=[[pad_top, pad_bottom], [pad_left, pad_right]],
mode='CONSTANT',
constant_values=0)
mask.set_shape((h, w))
return mask[..., None] # Add channel dim.
def my_cutmix(batch):
"""Apply CutMix: https://arxiv.org/abs/1905.04899."""
batch = dict(**batch)
bs = tf.shape(batch['images'])[0] // 2
mask = batch['mask'][:bs]
images = (mask * batch['images'][:bs] + (1.0 - mask) * batch['images'][bs:])
mix_labels = batch['labels'][bs:]
labels = batch['labels'][:bs]
ratio = batch['cutmix_ratio'][:bs]
return {'images': images, 'labels': labels,
'mix_labels': mix_labels, 'ratio': ratio}
def my_mixup(batch):
"""Apply mixup: https://arxiv.org/abs/1710.09412."""
batch = dict(**batch)
bs = tf.shape(batch['images'])[0] // 2
ratio = batch['mixup_ratio'][:bs, None, None, None]
images = (ratio * batch['images'][:bs] + (1.0 - ratio) * batch['images'][bs:])
mix_labels = batch['labels'][bs:]
labels = batch['labels'][:bs]
ratio = ratio[..., 0, 0, 0] # Unsqueeze
return {'images': images, 'labels': labels,
'mix_labels': mix_labels, 'ratio': ratio}
def my_mixup_cutmix(batch):
"""Apply mixup to half the batch, and cutmix to the other."""
batch = dict(**batch)
bs = tf.shape(batch['images'])[0] // 4
mixup_ratio = batch['mixup_ratio'][:bs, None, None, None]
mixup_images = (mixup_ratio * batch['images'][:bs]
+ (1.0 - mixup_ratio) * batch['images'][bs:2*bs])
mixup_labels = batch['labels'][:bs]
mixup_mix_labels = batch['labels'][bs:2*bs]
cutmix_mask = batch['mask'][2*bs:3*bs]
cutmix_images = (cutmix_mask * batch['images'][2*bs:3*bs]
+ (1.0 - cutmix_mask) * batch['images'][-bs:])
cutmix_labels = batch['labels'][2*bs:3*bs]
cutmix_mix_labels = batch['labels'][-bs:]
cutmix_ratio = batch['cutmix_ratio'][2*bs : 3*bs]
return {'images': tf.concat([mixup_images, cutmix_images], axis=0),
'labels': tf.concat([mixup_labels, cutmix_labels], axis=0),
'mix_labels': tf.concat([mixup_mix_labels, cutmix_mix_labels], 0),
'ratio': tf.concat([mixup_ratio[..., 0, 0, 0], cutmix_ratio], axis=0)}
def _to_tfds_split(split: Split) -> tfds.Split:
"""Returns the TFDS split appropriately sharded."""
# NOTE: Imagenet did not release labels for the test split used in the
# competition, so it has been typical at DeepMind to consider the VALID
# split the TEST split and to reserve 10k images from TRAIN for VALID.
if split in (
Split.TRAIN, Split.TRAIN_AND_VALID, Split.VALID):
return tfds.Split.TRAIN
else:
assert split == Split.TEST
return tfds.Split.VALIDATION
def _shard(
split: Split, shard_index: int, num_shards: int) -> Tuple[int, int]:
"""Returns [start, end) for the given shard index."""
assert shard_index < num_shards
arange = np.arange(split.num_examples)
shard_range = np.array_split(arange, num_shards)[shard_index]
start, end = shard_range[0], (shard_range[-1] + 1)
if split == Split.TRAIN:
# Note that our TRAIN=TFDS_TRAIN[10000:] and VALID=TFDS_TRAIN[:10000].
offset = Split.VALID.num_examples
start += offset
end += offset
return start, end
def _preprocess_image(
image_bytes: tf.Tensor,
is_training: bool,
image_size: Sequence[int],
augmentation_settings: Mapping[str, Any],
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Returns processed and resized images."""
# Get the image crop.
if is_training:
image, im_shape = _decode_and_random_crop(image_bytes)
image = tf.image.random_flip_left_right(image)
else:
image, im_shape = _decode_and_center_crop(image_bytes)
assert image.dtype == tf.uint8
# Optionally apply RandAugment: https://arxiv.org/abs/1909.13719
if is_training:
if augmentation_settings['randaugment'] is not None:
# Input and output images are dtype uint8.
image = autoaugment.distort_image_with_randaugment(
image,
num_layers=augmentation_settings['randaugment']['num_layers'],
magnitude=augmentation_settings['randaugment']['magnitude'])
# Resize and normalize the image crop.
# NOTE: Bicubic resize (1) casts uint8 to float32 and (2) resizes without
# clamping overshoots. This means values returned will be outside the range
# [0.0, 255.0] (e.g. we have observed outputs in the range [-51.1, 336.6]).
image = tf.image.resize(
image, image_size, tf.image.ResizeMethod.BICUBIC)
image = _normalize_image(image)
return image, im_shape
def _normalize_image(image: tf.Tensor) -> tf.Tensor:
"""Normalize the image to zero mean and unit variance."""
image -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=image.dtype)
image /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=image.dtype)
return image
def _distorted_bounding_box_crop(
image_bytes: tf.Tensor,
*,
jpeg_shape: tf.Tensor,
bbox: tf.Tensor,
min_object_covered: float,
aspect_ratio_range: Tuple[float, float],
area_range: Tuple[float, float],
max_attempts: int,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Generates cropped_image using one of the bboxes randomly distorted."""
bbox_begin, bbox_size, _ = tf.image.sample_distorted_bounding_box(
jpeg_shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = [offset_y, offset_x, target_height, target_width]
if image_bytes.dtype == tf.dtypes.string:
image = tf.image.decode_and_crop_jpeg(image_bytes,
tf.stack(crop_window),
channels=3)
else:
image = tf.image.crop_to_bounding_box(image_bytes, *crop_window)
im_shape = tf.stack([target_height, target_width])
return image, im_shape
def _decode_whole_image(image_bytes: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
image = tf.io.decode_jpeg(image_bytes, channels=3)
im_shape = tf.io.extract_jpeg_shape(image_bytes, output_type=tf.int32)
return image, im_shape
def _decode_and_random_crop(
image_bytes: tf.Tensor
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Make a random crop of INPUT_DIM."""
if image_bytes.dtype == tf.dtypes.string:
jpeg_shape = tf.image.extract_jpeg_shape(image_bytes)
else:
jpeg_shape = tf.shape(image_bytes)
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
image, im_shape = _distorted_bounding_box_crop(
image_bytes,
jpeg_shape=jpeg_shape,
bbox=bbox,
min_object_covered=0.1,
aspect_ratio_range=(3 / 4, 4 / 3),
area_range=(0.08, 1.0),
max_attempts=10)
if tf.reduce_all(tf.equal(jpeg_shape, tf.shape(image))):
# If the random crop failed fall back to center crop.
image, im_shape = _decode_and_center_crop(image_bytes, jpeg_shape)
return image, im_shape
def _center_crop(image, crop_dim):
"""Center crops an image to a target dimension."""
image_height = image.shape[0]
image_width = image.shape[1]
offset_height = ((image_height - crop_dim) + 1) // 2
offset_width = ((image_width - crop_dim) + 1) // 2
return tf.image.crop_to_bounding_box(
image, offset_height, offset_width, crop_dim, crop_dim)
def _decode_and_center_crop(
image_bytes: tf.Tensor,
jpeg_shape: Optional[tf.Tensor] = None,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Crops to center of image with padding then scales."""
if jpeg_shape is None:
if image_bytes.dtype == tf.dtypes.string:
jpeg_shape = tf.image.extract_jpeg_shape(image_bytes)
else:
jpeg_shape = tf.shape(image_bytes)
image_height = jpeg_shape[0]
image_width = jpeg_shape[1]
padded_center_crop_size = tf.cast(
((INPUT_DIM / (INPUT_DIM + 32)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)), tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = [offset_height, offset_width,
padded_center_crop_size, padded_center_crop_size]
if image_bytes.dtype == tf.dtypes.string:
image = tf.image.decode_and_crop_jpeg(image_bytes,
tf.stack(crop_window),
channels=3)
else:
image = tf.image.crop_to_bounding_box(image_bytes, *crop_window)
im_shape = tf.stack([padded_center_crop_size, padded_center_crop_size])
return image, im_shape
| deepmind-research-master | perceiver/train/dataset.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A reference training pipeline for Perceiver/Perceiver IO on ImageNet.
We use the Jaxline (https://github.com/deepmind/jaxline) training framework.
Two sets of hyperparameters are provided, the hyperparameters we used for the
Perceiver IO paper, and scaled-down hyperparameters for local testing.
This script should run out-of-the-box with the local hyper parameters.
The scaled-up hyperparameters requires a distributed learning setup to run,
and this script will need to be adapted to your specific setup.
"""
import functools
from typing import Generator, Mapping, Text, Tuple
from absl import app
from absl import flags
from absl import logging
import haiku as hk
import jax
import jax.numpy as jnp
from jaxline import base_config
from jaxline import experiment
from jaxline import platform
from jaxline import utils as jl_utils
from ml_collections import config_dict
import numpy as np
import optax
from perceiver import io_processors
from perceiver import perceiver
from perceiver.train import dataset
from perceiver.train import utils
FLAGS = flags.FLAGS
OptState = Tuple[optax.TraceState, optax.ScaleByScheduleState, optax.ScaleState]
Scalars = Mapping[Text, jnp.ndarray]
N_TRAIN_EXAMPLES = dataset.Split.TRAIN_AND_VALID.num_examples
N_CLASSES = 1000
# Only local/debug parameters are supported out of the box.
# To use the scaled-up hyperparameters, please adapt this script to your
# training setup and set this flag to False
IS_LOCAL = True
def get_training_steps(batch_size, n_epochs):
return (N_TRAIN_EXAMPLES * n_epochs) // batch_size
def get_config():
"""Return config object for training."""
use_debug_settings = IS_LOCAL
config = base_config.get_base_config()
# Experiment config.
local_batch_size = 2
# Modify this to adapt to your custom distributed learning setup
num_devices = 1
config.train_batch_size = local_batch_size * num_devices
config.n_epochs = 110
def _default_or_debug(default_value, debug_value):
return debug_value if use_debug_settings else default_value
n_train_examples = N_TRAIN_EXAMPLES
num_classes = N_CLASSES
config.experiment_kwargs = config_dict.ConfigDict(
dict(
config=dict(
optimizer=dict(
base_lr=5e-4,
max_norm=10.0, # < 0 to turn off.
schedule_type='constant_cosine',
weight_decay=1e-1,
decay_pos_embs=True,
scale_by_batch=True,
cosine_decay_kwargs=dict(
init_value=0.0,
warmup_epochs=0,
end_value=0.0,
),
step_decay_kwargs=dict(
decay_boundaries=[0.5, 0.8, 0.95],
decay_rate=0.1,
),
constant_cosine_decay_kwargs=dict(
constant_fraction=0.5,
end_value=0.0,
),
optimizer='lamb',
# Optimizer-specific kwargs:
adam_kwargs=dict(
b1=0.9,
b2=0.999,
eps=1e-8,
),
lamb_kwargs=dict(
b1=0.9,
b2=0.999,
eps=1e-6,
),
),
# Don't specify output_channels - it's not used for
# classifiers.
model=dict(
perceiver_kwargs=dict(
input_preprocessor=dict(
prep_type='pixels',
# Channels for conv/conv1x1 preprocessing:
num_channels=64,
# -------------------------
# Position encoding arguments:
# -------------------------
position_encoding_type='fourier',
concat_or_add_pos='concat',
spatial_downsample=1,
# If >0, project position to this size:
project_pos_dim=-1,
trainable_position_encoding_kwargs=dict(
num_channels=258, # Match default # for Fourier.
init_scale=0.02,
),
fourier_position_encoding_kwargs=dict(
num_bands=64,
max_resolution=(224, 224),
sine_only=False,
concat_pos=True,
),
),
encoder=dict(
num_self_attends_per_block=_default_or_debug(6, 2),
# Weights won't be shared if num_blocks is set to 1.
num_blocks=_default_or_debug(8, 2),
z_index_dim=512,
num_z_channels=1024,
num_cross_attend_heads=1,
num_self_attend_heads=8,
cross_attend_widening_factor=1,
self_attend_widening_factor=1,
dropout_prob=0.0,
# Position encoding for the latent array.
z_pos_enc_init_scale=0.02,
cross_attention_shape_for_attn='kv',
use_query_residual=True,
),
decoder=dict(
num_z_channels=1024,
use_query_residual=True,
# Position encoding for the output logits.
position_encoding_type='trainable',
trainable_position_encoding_kwargs=dict(
num_channels=1024,
init_scale=0.02,
),
),
),
),
training=dict(
images_per_epoch=n_train_examples,
label_smoothing=0.1,
n_epochs=config.get_oneway_ref('n_epochs'),
batch_size=config.get_oneway_ref('train_batch_size')
),
data=dict(
num_classes=num_classes,
# Run on smaller images to debug.
im_dim=_default_or_debug(224, 32),
augmentation=dict(
# Typical randaug params:
# num_layers in [1, 3]
# magnitude in [5, 30]
# Set randaugment to None to disable.
randaugment=dict(
num_layers=4,
magnitude=5),
cutmix=True,
# Mixup alpha should be in [0, 1].
# Set to None to disable.
mixup_alpha=0.2,
),
),
evaluation=dict(
subset='test',
batch_size=2,
),
)
)
)
# Training loop config.
config.training_steps = get_training_steps(
config.get_oneway_ref('train_batch_size'),
config.get_oneway_ref('n_epochs'))
config.log_train_data_interval = 60
config.log_tensors_interval = 60
config.save_checkpoint_interval = 300
config.eval_specific_checkpoint_dir = ''
config.best_model_eval_metric = 'eval_top_1_acc'
config.checkpoint_dir = '/tmp/perceiver_imagnet_checkpoints'
config.train_checkpoint_all_hosts = False
# Prevents accidentally setting keys that aren't recognized (e.g. in tests).
config.lock()
return config
class Experiment(experiment.AbstractExperiment):
"""ImageNet experiment."""
# A map from object properties that will be checkpointed to their name
# in a checkpoint. Currently we assume that these are all sharded
# device arrays.
CHECKPOINT_ATTRS = {
'_params': 'params',
'_state': 'state',
'_opt_state': 'opt_state',
}
def __init__(self, mode, init_rng, config):
"""Initializes experiment."""
super(Experiment, self).__init__(mode=mode, init_rng=init_rng)
self.mode = mode
self.init_rng = init_rng
self.config = config
# Checkpointed experiment state.
self._params = None
self._state = None
self._opt_state = None
# Input pipelines.
self._train_input = None
self._eval_input = None
self.forward = hk.transform_with_state(self._forward_fn)
# NOTE: We "donate" the `params, state, opt_state` arguments which allows
# JAX (on some backends) to reuse the device memory associated with these
# inputs to store the outputs of our function (which also start with
# `params, state, opt_state`).
self._update_func = jax.pmap(self._update_func, axis_name='i',
donate_argnums=(0, 1, 2))
self._eval_batch = jax.jit(self._eval_batch)
def _forward_fn(
self,
inputs: dataset.Batch,
is_training: bool,
) -> jnp.ndarray:
images = inputs['images']
perceiver_kwargs = self.config.model.perceiver_kwargs
input_preprocessor = io_processors.ImagePreprocessor(
**perceiver_kwargs['input_preprocessor'])
encoder = perceiver.PerceiverEncoder(**perceiver_kwargs['encoder'])
decoder = perceiver.ClassificationDecoder(
self.config.data.num_classes,
**perceiver_kwargs['decoder'])
model = perceiver.Perceiver(
encoder=encoder,
decoder=decoder,
input_preprocessor=input_preprocessor)
return model(images, is_training=is_training)
# _ _
# | |_ _ __ __ _(_)_ __
# | __| '__/ _` | | '_ \
# | |_| | | (_| | | | | |
# \__|_| \__,_|_|_| |_|
#
def step(self, global_step: int, rng: jnp.ndarray,
*unused_args, **unused_kwargs):
"""See base class."""
if self._train_input is None:
self._initialize_train()
inputs = next(self._train_input)
self._params, self._state, self._opt_state, scalars = (
self._update_func(
self._params, self._state, self._opt_state, inputs, rng, global_step
))
scalars = jl_utils.get_first(scalars)
return scalars
def _initialize_train(self):
self._train_input = jl_utils.py_prefetch(self._build_train_input)
total_batch_size = self.config.training.batch_size
steps_per_epoch = (
self.config.training.images_per_epoch / self.config.training.batch_size)
total_steps = self.config.training.n_epochs * steps_per_epoch
# Scale by the (negative) learning rate.
self._lr_schedule = utils.get_learning_rate_schedule(
total_batch_size, steps_per_epoch, total_steps, self.config.optimizer)
self._optimizer = utils.make_optimizer(
self.config.optimizer,
self._lr_schedule)
# Check we haven't already restored params
if self._params is None:
logging.info('Initializing parameters.')
inputs = next(self._train_input)
init_net = jax.pmap(lambda *a: self.forward.init(*a, is_training=True))
init_opt = jax.pmap(self._optimizer.init)
# Init uses the same RNG key on all hosts+devices to ensure everyone
# computes the same initial state.
init_rng = jl_utils.bcast_local_devices(self.init_rng)
self._params, self._state = init_net(init_rng, inputs)
self._opt_state = init_opt(self._params)
def _load_data(self, split, is_training, batch_dims):
"""Wrapper for dataset loading."""
return dataset.load(
split=split,
is_training=is_training,
batch_dims=batch_dims,
im_dim=self.config.data.im_dim,
augmentation_settings=self.config.data.augmentation,
)
def _build_train_input(self) -> Generator[dataset.Batch, None, None]:
"""See base class."""
num_devices = jax.device_count()
global_batch_size = self.config.training.batch_size
per_device_batch_size, ragged = divmod(global_batch_size, num_devices)
if ragged:
raise ValueError(
f'Global batch size {global_batch_size} must be divisible by '
f'num devices {num_devices}')
split = dataset.Split.TRAIN_AND_VALID
return self._load_data(
split=split,
is_training=True,
batch_dims=[jax.local_device_count(), per_device_batch_size])
def _one_hot(self, value):
"""One-hot encoding potentially over a sequence of labels."""
y = jax.nn.one_hot(value, self.config.data.num_classes)
return y
def _loss_fn(
self,
params: hk.Params,
state: hk.State,
inputs: dataset.Batch,
rng: jnp.ndarray,
) -> Tuple[jnp.ndarray, Tuple[Scalars, hk.State]]:
logits, state = self.forward.apply(
params, state, rng, inputs, is_training=True)
label = self._one_hot(inputs['labels'])
# Handle cutmix/mixup label mixing:
if 'mix_labels' in inputs:
logging.info('Using mixup or cutmix!')
mix_label = self._one_hot(inputs['mix_labels'])
mix_ratio = inputs['ratio'][:, None]
label = mix_ratio * label + (1. - mix_ratio) * mix_label
# Apply label-smoothing to one-hot labels.
label_smoothing = self.config.training.label_smoothing
if not (label_smoothing >= 0. and label_smoothing < 1.):
raise ValueError(
f"'label_smoothing is {label_smoothing} and should be in [0, 1)")
if label_smoothing > 0:
smooth_positives = 1. - label_smoothing
smooth_negatives = label_smoothing / self.config.data.num_classes
label = smooth_positives * label + smooth_negatives
loss_w_batch = utils.softmax_cross_entropy(logits, label)
loss = jnp.mean(loss_w_batch, dtype=loss_w_batch.dtype)
scaled_loss = loss / jax.device_count()
metrics = utils.topk_correct(logits, inputs['labels'], prefix='')
metrics = jax.tree_map(jnp.mean, metrics)
top_1_acc = metrics['top_1_acc']
top_5_acc = metrics['top_5_acc']
loss_scalars = dict(
loss=loss,
top_1_acc=top_1_acc,
top_5_acc=top_5_acc,
)
return scaled_loss, (loss_scalars, state)
def _update_func(
self,
params: hk.Params,
state: hk.State,
opt_state: OptState,
inputs: dataset.Batch,
rng: jnp.ndarray,
global_step: int,
) -> Tuple[hk.Params, hk.State, OptState, Scalars]:
"""Applies an update to parameters and returns new state."""
# This function computes the gradient of the first output of loss_fn and
# passes through the other arguments unchanged.
grad_loss_fn = jax.grad(self._loss_fn, has_aux=True)
scaled_grads, (loss_scalars, state) = grad_loss_fn(
params, state, inputs, rng)
grads = jax.lax.psum(scaled_grads, axis_name='i')
# Grab the learning rate to log before performing the step.
learning_rate = self._lr_schedule(global_step)
# Compute and apply updates via our optimizer.
updates, opt_state = self._optimizer.update(grads, opt_state, params)
params = optax.apply_updates(params, updates)
n_params = 0
for k in params.keys():
for l in params[k]:
n_params = n_params + np.prod(params[k][l].shape)
# Scalars to log (note: we log the mean across all hosts/devices).
scalars = {'learning_rate': learning_rate,
'n_params (M)': float(n_params/1e6),
'global_gradient_norm': optax.global_norm(grads)}
loss_scalars = {f'train_{k}': v for k, v in loss_scalars.items()}
scalars.update(loss_scalars)
scalars = jax.lax.pmean(scalars, axis_name='i')
return params, state, opt_state, scalars
# _
# _____ ____ _| |
# / _ \ \ / / _` | |
# | __/\ V / (_| | |
# \___| \_/ \__,_|_|
#
def evaluate(self, global_step, rng, **unused_args):
"""See base class."""
global_step = np.array(jl_utils.get_first(global_step))
scalars = jax.device_get(self._eval_epoch(jl_utils.get_first(rng)))
logging.info('[Step %d] Eval scalars: %s', global_step, scalars)
return scalars
def _eval_batch(
self,
params: hk.Params,
state: hk.State,
inputs: dataset.Batch,
rng: jnp.ndarray,
) -> Scalars:
"""Evaluates a batch."""
logits, _ = self.forward.apply(
params, state, rng, inputs, is_training=False)
labels = self._one_hot(inputs['labels'])
loss = utils.softmax_cross_entropy(logits, labels)
metrics = utils.topk_correct(logits, inputs['labels'], prefix='')
metrics = jax.tree_map(jnp.mean, metrics)
top_1_acc = metrics['top_1_acc']
top_5_acc = metrics['top_5_acc']
bs = logits.shape[0]
top_1_acc = jnp.expand_dims(top_1_acc, axis=0) * bs
top_5_acc = jnp.expand_dims(top_5_acc, axis=0) * bs
# NOTE: Returned values will be summed and finally divided by num_samples.
return {
'eval_loss': loss,
'eval_top_1_acc': top_1_acc, 'eval_top_5_acc': top_5_acc}
def _build_eval_input(self) -> Generator[dataset.Batch, None, None]:
split = dataset.Split.from_string(self.config.evaluation.subset)
return self._load_data(
split=split,
is_training=False,
batch_dims=[self.config.evaluation.batch_size])
def _eval_epoch(self, rng):
"""Evaluates an epoch."""
num_samples = 0.
summed_scalars = None
params = jl_utils.get_first(self._params)
state = jl_utils.get_first(self._state)
for inputs in self._build_eval_input():
num_samples += inputs['labels'].shape[0]
scalars = self._eval_batch(params, state, inputs, rng)
# Accumulate the sum of scalars for each step.
scalars = jax.tree_map(lambda x: jnp.sum(x, axis=0), scalars)
if summed_scalars is None:
summed_scalars = scalars
else:
summed_scalars = jax.tree_multimap(jnp.add, summed_scalars, scalars)
mean_scalars = jax.tree_map(lambda x: x / num_samples, summed_scalars)
return mean_scalars
if __name__ == '__main__':
flags.mark_flag_as_required('config')
app.run(functools.partial(platform.main, Experiment))
| deepmind-research-master | perceiver/train/experiment.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities."""
from typing import Callable, List, Mapping, NamedTuple, Optional, Tuple, Union
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
Batch = Mapping[str, np.ndarray]
OptState = Tuple[optax.TraceState, optax.ScaleByScheduleState, optax.ScaleState]
Scalars = Mapping[str, jnp.ndarray]
ParamsOrState = Union[hk.Params, hk.State]
NORM_NAMES = ['layer_norm', 'batchnorm']
# any_in and topk_correct taken from
# https://github.com/deepmind/deepmind-research/blob/master/nfnets/utils.py
@jax.vmap
def any_in(prediction, target):
"""For each row in a and b, checks if any element of a is in b."""
return jnp.isin(prediction, target)
def topk_correct(logits, labels, mask=None, prefix='', topk=(1, 5)):
"""Calculate top-k error for multiple k values."""
metrics = {}
argsorted_logits = jnp.argsort(logits)
for k in topk:
pred_labels = argsorted_logits[..., -k:]
# Get the number of examples where the label is in the top-k predictions
correct = any_in(pred_labels, labels).any(axis=-1).astype(jnp.float32)
if mask is not None:
correct *= mask
metrics[f'{prefix}top_{k}_acc'] = correct
return metrics
def softmax_cross_entropy(logits, labels):
"""Computes softmax cross entropy given logits and one-hot class labels.
Args:
logits: Logit output values.
labels: Ground truth one-hot-encoded labels.
Returns:
Loss value with the same shape as `labels`;
"""
return jnp.asarray(optax.softmax_cross_entropy(logits, labels))
def _get_batch_scaled_lr(total_batch_size, lr, scale_by_batch=True):
# This is the linear scaling rule in Section 5.1 of
# https://arxiv.org/pdf/1706.02677.pdf.
if scale_by_batch:
lr = (lr * total_batch_size) / 256
return lr
def get_learning_rate_schedule(
total_batch_size, steps_per_epoch, total_steps, optimizer_config):
"""Build the learning rate schedule function."""
base_lr = _get_batch_scaled_lr(total_batch_size, optimizer_config.base_lr,
optimizer_config.scale_by_batch)
schedule_type = optimizer_config.schedule_type
if schedule_type == 'steps':
boundaries = optimizer_config.step_decay_kwargs.decay_boundaries
boundaries.sort()
decay_rate = optimizer_config.step_decay_kwargs.decay_rate
boundaries_and_scales = {
int(boundary * total_steps): decay_rate for boundary in boundaries}
schedule_fn = optax.piecewise_constant_schedule(
init_value=base_lr, boundaries_and_scales=boundaries_and_scales)
elif schedule_type == 'cosine':
warmup_steps = (optimizer_config.cosine_decay_kwargs.warmup_epochs
* steps_per_epoch)
# Batch scale the other lr values as well:
init_value = _get_batch_scaled_lr(
total_batch_size,
optimizer_config.cosine_decay_kwargs.init_value,
optimizer_config.scale_by_batch)
end_value = _get_batch_scaled_lr(
total_batch_size,
optimizer_config.cosine_decay_kwargs.end_value,
optimizer_config.scale_by_batch)
schedule_fn = optax.warmup_cosine_decay_schedule(
init_value=init_value,
peak_value=base_lr,
warmup_steps=warmup_steps,
decay_steps=total_steps,
end_value=end_value)
elif schedule_type == 'constant_cosine':
# Convert end_value to alpha, used by cosine_decay_schedule.
alpha = optimizer_config.constant_cosine_decay_kwargs.end_value / base_lr
# Number of steps spent in constant phase.
constant_steps = int(
optimizer_config.constant_cosine_decay_kwargs.constant_fraction
* total_steps)
decay_steps = total_steps - constant_steps
constant_phase = optax.constant_schedule(value=base_lr)
decay_phase = optax.cosine_decay_schedule(
init_value=base_lr,
decay_steps=decay_steps,
alpha=alpha)
schedule_fn = optax.join_schedules(
schedules=[constant_phase, decay_phase],
boundaries=[constant_steps])
else:
raise ValueError(f'Unknown learning rate schedule: {schedule_type}')
return schedule_fn
def _weight_decay_exclude(
exclude_names: Optional[List[str]] = None
) -> Callable[[str, str, jnp.ndarray], bool]:
"""Logic for deciding which parameters to include for weight decay..
Args:
exclude_names: an optional list of names to include for weight_decay. ['w']
by default.
Returns:
A predicate that returns True for params that need to be excluded from
weight_decay.
"""
# By default weight_decay the weights but not the biases.
if not exclude_names:
exclude_names = ['b']
def exclude(module_name: str, name: str, value: jnp.array):
del value
# Do not weight decay the parameters of normalization blocks.
if any([norm_name in module_name for norm_name in NORM_NAMES]):
return True
else:
return name in exclude_names
return exclude
class AddWeightDecayState(NamedTuple):
"""Stateless transformation."""
def add_weight_decay(
weight_decay: float,
exclude_names: Optional[List[str]] = None) -> optax.GradientTransformation:
"""Add parameter scaled by `weight_decay` to the `updates`.
Same as optax.add_decayed_weights but can exclude parameters by name.
Args:
weight_decay: weight_decay coefficient.
exclude_names: an optional list of names to exclude for weight_decay. ['b']
by default.
Returns:
An (init_fn, update_fn) tuple.
"""
def init_fn(_):
return AddWeightDecayState()
def update_fn(updates, state, params):
exclude = _weight_decay_exclude(exclude_names=exclude_names)
u_ex, u_in = hk.data_structures.partition(exclude, updates)
_, p_in = hk.data_structures.partition(exclude, params)
u_in = jax.tree_multimap(lambda g, p: g + weight_decay * p, u_in, p_in)
updates = hk.data_structures.merge(u_ex, u_in)
return updates, state
return optax.GradientTransformation(init_fn, update_fn)
def make_optimizer(optimizer_config, lr_schedule):
"""Construct the optax optimizer with given LR schedule."""
if (optimizer_config.get('decay_pos_embs') is None or
optimizer_config.decay_pos_embs):
# Decay learned position embeddings by default.
weight_decay_exclude_names = ['b']
else:
weight_decay_exclude_names = ['pos_embs', 'b']
optax_chain = []
if optimizer_config.max_norm > 0:
optax_chain.append(
optax.clip_by_global_norm(optimizer_config.max_norm))
if optimizer_config.optimizer == 'adam':
# See: https://arxiv.org/abs/1412.6980
optax_chain.extend([
optax.scale_by_adam(**optimizer_config.adam_kwargs),
add_weight_decay(
optimizer_config.weight_decay,
exclude_names=weight_decay_exclude_names)
])
elif optimizer_config.optimizer == 'lamb':
# See: https://arxiv.org/abs/1904.00962
optax_chain.extend([
optax.scale_by_adam(**optimizer_config.lamb_kwargs),
add_weight_decay(
optimizer_config.weight_decay,
exclude_names=weight_decay_exclude_names),
optax.scale_by_trust_ratio()
])
else:
raise ValueError(f'Undefined optimizer {optimizer_config.optimizer}')
# Scale by the (negative) learning rate.
optax_chain.extend([
optax.scale_by_schedule(lr_schedule),
optax.scale(-1),
])
return optax.chain(*optax_chain)
| deepmind-research-master | perceiver/train/utils.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AutoAugment and RandAugment policies for enhanced image preprocessing.
AutoAugment Reference: https://arxiv.org/abs/1805.09501
RandAugment Reference: https://arxiv.org/abs/1909.13719
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import math
from ml_collections import config_dict
import tensorflow.compat.v1 as tf
from tensorflow_addons import image as contrib_image
# pylint: disable=deprecated-method
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
def policy_v0():
"""Autoaugment policy that was used in AutoAugment Paper."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('Equalize', 0.8, 1), ('ShearY', 0.8, 4)],
[('Color', 0.4, 9), ('Equalize', 0.6, 3)],
[('Color', 0.4, 1), ('Rotate', 0.6, 8)],
[('Solarize', 0.8, 3), ('Equalize', 0.4, 7)],
[('Solarize', 0.4, 2), ('Solarize', 0.6, 2)],
[('Color', 0.2, 0), ('Equalize', 0.8, 8)],
[('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)],
[('ShearX', 0.2, 9), ('Rotate', 0.6, 8)],
[('Color', 0.6, 1), ('Equalize', 1.0, 2)],
[('Invert', 0.4, 9), ('Rotate', 0.6, 0)],
[('Equalize', 1.0, 9), ('ShearY', 0.6, 3)],
[('Color', 0.4, 7), ('Equalize', 0.6, 0)],
[('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)],
[('Solarize', 0.6, 8), ('Color', 0.6, 9)],
[('Solarize', 0.2, 4), ('Rotate', 0.8, 9)],
[('Rotate', 1.0, 7), ('TranslateY', 0.8, 9)],
[('ShearX', 0.0, 0), ('Solarize', 0.8, 4)],
[('ShearY', 0.8, 0), ('Color', 0.6, 4)],
[('Color', 1.0, 0), ('Rotate', 0.6, 2)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 8)],
[('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)],
[('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)],
[('Posterize', 0.8, 2), ('Solarize', 0.6, 10)],
[('Solarize', 0.6, 8), ('Equalize', 0.6, 1)],
[('Color', 0.8, 6), ('Rotate', 0.4, 5)],
]
return policy
def policy_vtest():
"""Autoaugment test policy for debugging."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('TranslateX', 1.0, 4), ('Equalize', 1.0, 10)],
]
return policy
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
Factor can be above 0.0. A value of 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor of type uint8.
image2: An image Tensor of type uint8.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor of type uint8.
"""
if factor == 0.0:
return tf.convert_to_tensor(image1)
if factor == 1.0:
return tf.convert_to_tensor(image2)
image1 = tf.to_float(image1)
image2 = tf.to_float(image2)
difference = image2 - image1
scaled = factor * difference
# Do addition in float.
temp = tf.to_float(image1) + scaled
# Interpolate
if factor > 0.0 and factor < 1.0:
# Interpolation means we always stay within 0 and 255.
return tf.cast(temp, tf.uint8)
# Extrapolate:
#
# We need to clip and then cast.
return tf.cast(tf.clip_by_value(temp, 0.0, 255.0), tf.uint8)
def cutout(image, pad_size, replace=0):
"""Apply cutout (https://arxiv.org/abs/1708.04552) to image.
This operation applies a (2*pad_size x 2*pad_size) mask of zeros to
a random location within `img`. The pixel values filled in will be of the
value `replace`. The located where the mask will be applied is randomly
chosen uniformly over the whole image.
Args:
image: An image Tensor of type uint8.
pad_size: Specifies how big the zero mask that will be generated is that
is applied to the image. The mask will be of size
(2*pad_size x 2*pad_size).
replace: What pixel value to fill in the image in the area that has
the cutout mask applied to it.
Returns:
An image Tensor that is of type uint8.
"""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random_uniform(
shape=[], minval=0, maxval=image_height,
dtype=tf.int32)
cutout_center_width = tf.random_uniform(
shape=[], minval=0, maxval=image_width,
dtype=tf.int32)
lower_pad = tf.maximum(0, cutout_center_height - pad_size)
upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_size)
left_pad = tf.maximum(0, cutout_center_width - pad_size)
right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims, constant_values=1)
mask = tf.expand_dims(mask, -1)
mask = tf.tile(mask, [1, 1, 3])
image = tf.where(
tf.equal(mask, 0),
tf.ones_like(image, dtype=image.dtype) * replace,
image)
return image
def solarize(image, threshold=128):
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract the pixel from 255.
return tf.where(image < threshold, image, 255 - image)
def solarize_add(image, addition=0, threshold=128):
# For each pixel in the image less than threshold
# we add 'addition' amount to it and then clip the
# pixel value to be between 0 and 255. The value
# of 'addition' is between -128 and 128.
added_image = tf.cast(image, tf.int64) + addition
added_image = tf.cast(tf.clip_by_value(added_image, 0, 255), tf.uint8)
return tf.where(image < threshold, added_image, image)
def color(image, factor):
"""Equivalent of PIL Color."""
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor)
def contrast(image, factor):
"""Equivalent of PIL Contrast."""
degenerate = tf.image.rgb_to_grayscale(image)
# Cast before calling tf.histogram.
degenerate = tf.cast(degenerate, tf.int32)
# Compute the grayscale histogram, then compute the mean pixel value,
# and create a constant image size of that value. Use that as the
# blending degenerate target of the original image.
hist = tf.histogram_fixed_width(degenerate, [0, 255], nbins=256)
mean = tf.reduce_sum(tf.cast(hist, tf.float32)) / 256.0
degenerate = tf.ones_like(degenerate, dtype=tf.float32) * mean
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.image.grayscale_to_rgb(tf.cast(degenerate, tf.uint8))
return blend(degenerate, image, factor)
def brightness(image, factor):
"""Equivalent of PIL Brightness."""
degenerate = tf.zeros_like(image)
return blend(degenerate, image, factor)
def posterize(image, bits):
"""Equivalent of PIL Posterize."""
shift = 8 - bits
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
def rotate(image, degrees, replace):
"""Rotates the image by degrees either clockwise or counterclockwise.
Args:
image: An image Tensor of type uint8.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
replace: A one or three value 1D tensor to fill empty pixels caused by
the rotate operation.
Returns:
The rotated version of image.
"""
# Convert from degrees to radians.
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# In practice, we should randomize the rotation degrees by flipping
# it negatively half the time, but that's done on 'degrees' outside
# of the function.
image = contrib_image.rotate(wrap(image), radians)
return unwrap(image, replace)
def translate_x(image, pixels, replace):
"""Equivalent of PIL Translate in X dimension."""
image = contrib_image.translate(wrap(image), [-pixels, 0])
return unwrap(image, replace)
def translate_y(image, pixels, replace):
"""Equivalent of PIL Translate in Y dimension."""
image = contrib_image.translate(wrap(image), [0, -pixels])
return unwrap(image, replace)
def shear_x(image, level, replace):
"""Equivalent of PIL Shearing in X dimension."""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1].
image = contrib_image.transform(
wrap(image), [1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image, replace)
def shear_y(image, level, replace):
"""Equivalent of PIL Shearing in Y dimension."""
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1].
image = contrib_image.transform(
wrap(image), [1., 0., 0., level, 1., 0., 0., 0.])
return unwrap(image, replace)
def autocontrast(image):
"""Implements Autocontrast function from PIL using TF ops.
Args:
image: A 3D uint8 tensor.
Returns:
The image after it has had autocontrast applied to it and will be of type
uint8.
"""
def scale_channel(image):
"""Scale the 2D image using the autocontrast rule."""
# A possibly cheaper version can be done using cumsum/unique_with_counts
# over the histogram values, rather than iterating over the entire image.
# to compute mins and maxes.
lo = tf.to_float(tf.reduce_min(image))
hi = tf.to_float(tf.reduce_max(image))
# Scale the image, making the lowest value 0 and the highest value 255.
def scale_values(im):
scale = 255.0 / (hi - lo)
offset = -lo * scale
im = tf.to_float(im) * scale + offset
im = tf.clip_by_value(im, 0.0, 255.0)
return tf.cast(im, tf.uint8)
result = tf.cond(hi > lo, lambda: scale_values(image), lambda: image)
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[:, :, 0])
s2 = scale_channel(image[:, :, 1])
s3 = scale_channel(image[:, :, 2])
image = tf.stack([s1, s2, s3], 2)
return image
def sharpness(image, factor):
"""Implements Sharpness function from PIL using TF ops."""
orig_image = image
image = tf.cast(image, tf.float32)
# Make image 4D for conv operation.
image = tf.expand_dims(image, 0)
# SMOOTH PIL Kernel.
kernel = tf.constant(
[[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.
# Tile across channel dimension.
kernel = tf.tile(kernel, [1, 1, 3, 1])
strides = [1, 1, 1, 1]
with tf.device('/cpu:0'):
# Some augmentation that uses depth-wise conv will cause crashing when
# training on GPU. See (b/156242594) for details.
degenerate = tf.nn.depthwise_conv2d(
image, kernel, strides, padding='VALID', rate=[1, 1])
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.squeeze(tf.cast(degenerate, tf.uint8), [0])
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(degenerate)
padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])
padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]])
result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image)
# Blend the final result.
return blend(result, orig_image, factor)
def equalize(image):
"""Implements Equalize function from PIL using TF ops."""
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = tf.cast(im[:, :, c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(tf.equal(step, 0),
lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, tf.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], 2)
return image
def invert(image):
"""Inverts the image pixels."""
image = tf.convert_to_tensor(image)
return 255 - image
def wrap(image):
"""Returns 'image' with an extra channel set to all 1s."""
shape = tf.shape(image)
extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)
extended = tf.concat([image, extended_channel], 2)
return extended
def unwrap(image, replace):
"""Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D Image Tensor with 4 channels.
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
image: A 3D image Tensor with 3 channels.
"""
image_shape = tf.shape(image)
# Flatten the spatial dimensions.
flattened_image = tf.reshape(image, [-1, image_shape[2]])
# Find all pixels where the last channel is zero.
alpha_channel = flattened_image[:, 3]
replace = tf.concat([replace, tf.ones([1], image.dtype)], 0)
# Where they are zero, fill them in with 'replace'.
flattened_image = tf.where(
tf.equal(alpha_channel, 0),
tf.ones_like(flattened_image, dtype=image.dtype) * replace,
flattened_image)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], 3])
return image
NAME_TO_FUNC = {
'AutoContrast': autocontrast,
'Equalize': equalize,
'Invert': invert,
'Rotate': rotate,
'Posterize': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'ShearX': shear_x,
'ShearY': shear_y,
'TranslateX': translate_x,
'TranslateY': translate_y,
'Cutout': cutout,
}
def _randomly_negate_tensor(tensor):
"""With 50% prob turn the tensor negative."""
should_flip = tf.cast(tf.floor(tf.random_uniform([]) + 0.5), tf.bool)
final_tensor = tf.cond(should_flip, lambda: tensor, lambda: -tensor)
return final_tensor
def _rotate_level_to_arg(level):
level = (level/_MAX_LEVEL) * 30.
level = _randomly_negate_tensor(level)
return (level,)
def _shrink_level_to_arg(level):
"""Converts level to ratio by which we shrink the image content."""
if level == 0:
return (1.0,) # if level is zero, do not shrink the image
# Maximum shrinking ratio is 2.9.
level = 2. / (_MAX_LEVEL / level) + 0.9
return (level,)
def _enhance_level_to_arg(level):
return ((level/_MAX_LEVEL) * 1.8 + 0.1,)
def _shear_level_to_arg(level):
level = (level/_MAX_LEVEL) * 0.3
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _translate_level_to_arg(level, translate_const):
level = (level/_MAX_LEVEL) * float(translate_const)
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def level_to_arg(hparams):
return {
'AutoContrast': lambda level: (),
'Equalize': lambda level: (),
'Invert': lambda level: (),
'Rotate': _rotate_level_to_arg,
'Posterize': lambda level: (int((level/_MAX_LEVEL) * 4),),
'Solarize': lambda level: (int((level/_MAX_LEVEL) * 256),),
'SolarizeAdd': lambda level: (int((level/_MAX_LEVEL) * 110),),
'Color': _enhance_level_to_arg,
'Contrast': _enhance_level_to_arg,
'Brightness': _enhance_level_to_arg,
'Sharpness': _enhance_level_to_arg,
'ShearX': _shear_level_to_arg,
'ShearY': _shear_level_to_arg,
'Cutout': lambda level: (int((level/_MAX_LEVEL) * hparams.cutout_const),),
# pylint:disable=g-long-lambda
'TranslateX': lambda level: _translate_level_to_arg(
level, hparams.translate_const),
'TranslateY': lambda level: _translate_level_to_arg(
level, hparams.translate_const),
# pylint:enable=g-long-lambda
}
def _parse_policy_info(name, prob, level, replace_value, augmentation_hparams):
"""Return the function that corresponds to `name` and update `level` param."""
func = NAME_TO_FUNC[name]
args = level_to_arg(augmentation_hparams)[name](level)
# Check to see if prob is passed into function. This is used for operations
# where we alter bboxes independently.
# pytype:disable=wrong-arg-types
if 'prob' in inspect.getargspec(func)[0]:
args = tuple([prob] + list(args))
# pytype:enable=wrong-arg-types
# Add in replace arg if it is required for the function that is being called.
# pytype:disable=wrong-arg-types
if 'replace' in inspect.getargspec(func)[0]:
# Make sure replace is the final argument
assert 'replace' == inspect.getargspec(func)[0][-1]
args = tuple(list(args) + [replace_value])
# pytype:enable=wrong-arg-types
return (func, prob, args)
def _apply_func_with_prob(func, image, args, prob):
"""Apply `func` to image w/ `args` as input with probability `prob`."""
assert isinstance(args, tuple)
# If prob is a function argument, then this randomness is being handled
# inside the function, so make sure it is always called.
# pytype:disable=wrong-arg-types
if 'prob' in inspect.getargspec(func)[0]:
prob = 1.0
# pytype:enable=wrong-arg-types
# Apply the function with probability `prob`.
should_apply_op = tf.cast(
tf.floor(tf.random_uniform([], dtype=tf.float32) + prob), tf.bool)
augmented_image = tf.cond(
should_apply_op,
lambda: func(image, *args),
lambda: image)
return augmented_image
def select_and_apply_random_policy(policies, image):
"""Select a random policy from `policies` and apply it to `image`."""
policy_to_select = tf.random_uniform([], maxval=len(policies), dtype=tf.int32)
# Note that using tf.case instead of tf.conds would result in significantly
# larger graphs and would even break export for some larger policies.
for (i, policy) in enumerate(policies):
image = tf.cond(
tf.equal(i, policy_to_select),
lambda selected_policy=policy: selected_policy(image),
lambda: image)
return image
def build_and_apply_nas_policy(policies, image,
augmentation_hparams):
"""Build a policy from the given policies passed in and apply to image.
Args:
policies: list of lists of tuples in the form `(func, prob, level)`, `func`
is a string name of the augmentation function, `prob` is the probability
of applying the `func` operation, `level` is the input argument for
`func`.
image: tf.Tensor that the resulting policy will be applied to.
augmentation_hparams: Hparams associated with the NAS learned policy.
Returns:
A version of image that now has data augmentation applied to it based on
the `policies` pass into the function.
"""
replace_value = [128, 128, 128]
# func is the string name of the augmentation function, prob is the
# probability of applying the operation and level is the parameter associated
# with the tf op.
# tf_policies are functions that take in an image and return an augmented
# image.
tf_policies = []
for policy in policies:
tf_policy = []
# Link string name to the correct python function and make sure the correct
# argument is passed into that function.
for policy_info in policy:
policy_info = list(policy_info) + [replace_value, augmentation_hparams]
tf_policy.append(_parse_policy_info(*policy_info))
# Now build the tf policy that will apply the augmentation procedue
# on image.
def make_final_policy(tf_policy_):
def final_policy(image_):
for func, prob, args in tf_policy_:
image_ = _apply_func_with_prob(
func, image_, args, prob)
return image_
return final_policy
tf_policies.append(make_final_policy(tf_policy))
augmented_image = select_and_apply_random_policy(
tf_policies, image)
return augmented_image
def distort_image_with_autoaugment(image, augmentation_name):
"""Applies the AutoAugment policy to `image`.
AutoAugment is from the paper: https://arxiv.org/abs/1805.09501.
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
augmentation_name: The name of the AutoAugment policy to use. The available
options are `v0` and `test`. `v0` is the policy used for
all of the results in the paper and was found to achieve the best results
on the COCO dataset. `v1`, `v2` and `v3` are additional good policies
found on the COCO dataset that have slight variation in what operations
were used during the search procedure along with how many operations are
applied in parallel to a single image (2 vs 3).
Returns:
A tuple containing the augmented versions of `image`.
"""
available_policies = {'v0': policy_v0,
'test': policy_vtest}
if augmentation_name not in available_policies:
raise ValueError('Invalid augmentation_name: {}'.format(augmentation_name))
policy = available_policies[augmentation_name]()
# Hparams that will be used for AutoAugment.
augmentation_hparams = config_dict.ConfigDict(dict(
cutout_const=100, translate_const=250))
return build_and_apply_nas_policy(policy, image, augmentation_hparams)
def distort_image_with_randaugment(image, num_layers, magnitude):
"""Applies the RandAugment policy to `image`.
RandAugment is from the paper https://arxiv.org/abs/1909.13719,
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
num_layers: Integer, the number of augmentation transformations to apply
sequentially to an image. Represented as (N) in the paper. Usually best
values will be in the range [1, 3].
magnitude: Integer, shared magnitude across all augmentation operations.
Represented as (M) in the paper. Usually best values are in the range
[5, 30].
Returns:
The augmented version of `image`.
"""
replace_value = [128] * 3
tf.logging.info('Using RandAug.')
augmentation_hparams = config_dict.ConfigDict(dict(
cutout_const=40, translate_const=100))
available_ops = [
'AutoContrast', 'Equalize', 'Invert', 'Rotate', 'Posterize',
'Solarize', 'Color', 'Contrast', 'Brightness', 'Sharpness',
'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Cutout', 'SolarizeAdd']
for layer_num in range(num_layers):
op_to_select = tf.random_uniform(
[], maxval=len(available_ops), dtype=tf.int32)
random_magnitude = float(magnitude)
with tf.name_scope('randaug_layer_{}'.format(layer_num)):
for (i, op_name) in enumerate(available_ops):
prob = tf.random_uniform([], minval=0.2, maxval=0.8, dtype=tf.float32)
func, _, args = _parse_policy_info(op_name, prob, random_magnitude,
replace_value, augmentation_hparams)
image = tf.cond(
tf.equal(i, op_to_select),
# pylint:disable=g-long-lambda
lambda selected_func=func, selected_args=args: selected_func(
image, *selected_args),
# pylint:enable=g-long-lambda
lambda: image)
return image
| deepmind-research-master | perceiver/train/autoaugment.py |
# Lint as: python3
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Atari RL Unplugged datasets.
Examples in the dataset represent SARSA transitions stored during a
DQN training run as described in https://arxiv.org/pdf/1907.04543.
For every training run we have recorded all 50 million transitions corresponding
to 200 million environment steps (4x factor because of frame skipping). There
are 5 separate datasets for each of the 45 games.
Every transition in the dataset is a tuple containing the following features:
* o_t: Observation at time t. Observations have been processed using the
canonical Atari frame processing, including 4x frame stacking. The shape
of a single observation is [84, 84, 4].
* a_t: Action taken at time t.
* r_t: Reward after a_t.
* d_t: Discount after a_t.
* o_tp1: Observation at time t+1.
* a_tp1: Action at time t+1.
* extras:
* episode_id: Episode identifier.
* episode_return: Total episode return computed using per-step [-1, 1]
clipping.
"""
import functools
import os
from typing import Dict
from acme import wrappers
import dm_env
from dm_env import specs
from dopamine.discrete_domains import atari_lib
import reverb
import tensorflow as tf
# 9 tuning games.
TUNING_SUITE = [
'BeamRider',
'DemonAttack',
'DoubleDunk',
'IceHockey',
'MsPacman',
'Pooyan',
'RoadRunner',
'Robotank',
'Zaxxon',
]
# 36 testing games.
TESTING_SUITE = [
'Alien',
'Amidar',
'Assault',
'Asterix',
'Atlantis',
'BankHeist',
'BattleZone',
'Boxing',
'Breakout',
'Carnival',
'Centipede',
'ChopperCommand',
'CrazyClimber',
'Enduro',
'FishingDerby',
'Freeway',
'Frostbite',
'Gopher',
'Gravitar',
'Hero',
'Jamesbond',
'Kangaroo',
'Krull',
'KungFuMaster',
'NameThisGame',
'Phoenix',
'Pong',
'Qbert',
'Riverraid',
'Seaquest',
'SpaceInvaders',
'StarGunner',
'TimePilot',
'UpNDown',
'VideoPinball',
'WizardOfWor',
'YarsRevenge',
]
# Total of 45 games.
ALL = TUNING_SUITE + TESTING_SUITE
def _decode_frames(pngs: tf.Tensor):
"""Decode PNGs.
Args:
pngs: String Tensor of size (4,) containing PNG encoded images.
Returns:
4 84x84 grayscale images packed in a (84, 84, 4) uint8 Tensor.
"""
# Statically unroll png decoding
frames = [tf.image.decode_png(pngs[i], channels=1) for i in range(4)]
frames = tf.concat(frames, axis=2)
frames.set_shape((84, 84, 4))
return frames
def _make_reverb_sample(o_t: tf.Tensor,
a_t: tf.Tensor,
r_t: tf.Tensor,
d_t: tf.Tensor,
o_tp1: tf.Tensor,
a_tp1: tf.Tensor,
extras: Dict[str, tf.Tensor]) -> reverb.ReplaySample:
"""Create Reverb sample with offline data.
Args:
o_t: Observation at time t.
a_t: Action at time t.
r_t: Reward at time t.
d_t: Discount at time t.
o_tp1: Observation at time t+1.
a_tp1: Action at time t+1.
extras: Dictionary with extra features.
Returns:
Replay sample with fake info: key=0, probability=1, table_size=0.
"""
info = reverb.SampleInfo(key=tf.constant(0, tf.uint64),
probability=tf.constant(1.0, tf.float64),
table_size=tf.constant(0, tf.int64),
priority=tf.constant(1.0, tf.float64))
data = (o_t, a_t, r_t, d_t, o_tp1, a_tp1, extras)
return reverb.ReplaySample(info=info, data=data)
def _tf_example_to_reverb_sample(tf_example: tf.train.Example
) -> reverb.ReplaySample:
"""Create a Reverb replay sample from a TF example."""
# Parse tf.Example.
feature_description = {
'o_t': tf.io.FixedLenFeature([4], tf.string),
'o_tp1': tf.io.FixedLenFeature([4], tf.string),
'a_t': tf.io.FixedLenFeature([], tf.int64),
'a_tp1': tf.io.FixedLenFeature([], tf.int64),
'r_t': tf.io.FixedLenFeature([], tf.float32),
'd_t': tf.io.FixedLenFeature([], tf.float32),
'episode_id': tf.io.FixedLenFeature([], tf.int64),
'episode_return': tf.io.FixedLenFeature([], tf.float32),
}
data = tf.io.parse_single_example(tf_example, feature_description)
# Process data.
o_t = _decode_frames(data['o_t'])
o_tp1 = _decode_frames(data['o_tp1'])
a_t = tf.cast(data['a_t'], tf.int32)
a_tp1 = tf.cast(data['a_tp1'], tf.int32)
episode_id = tf.bitcast(data['episode_id'], tf.uint64)
# Build Reverb replay sample.
extras = {
'episode_id': episode_id,
'return': data['episode_return']
}
return _make_reverb_sample(o_t, a_t, data['r_t'], data['d_t'], o_tp1, a_tp1,
extras)
def dataset(path: str,
game: str,
run: int,
num_shards: int = 100,
shuffle_buffer_size: int = 100000) -> tf.data.Dataset:
"""TF dataset of Atari SARSA tuples."""
path = os.path.join(path, f'{game}/run_{run}')
filenames = [f'{path}-{i:05d}-of-{num_shards:05d}' for i in range(num_shards)]
file_ds = tf.data.Dataset.from_tensor_slices(filenames)
file_ds = file_ds.repeat().shuffle(num_shards)
example_ds = file_ds.interleave(
functools.partial(tf.data.TFRecordDataset, compression_type='GZIP'),
cycle_length=tf.data.experimental.AUTOTUNE,
block_length=5)
example_ds = example_ds.shuffle(shuffle_buffer_size)
return example_ds.map(_tf_example_to_reverb_sample,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
class AtariDopamineWrapper(dm_env.Environment):
"""Wrapper for Atari Dopamine environmnet."""
def __init__(self, env, max_episode_steps=108000):
self._env = env
self._max_episode_steps = max_episode_steps
self._episode_steps = 0
self._reset_next_episode = True
def reset(self):
self._episode_steps = 0
self._reset_next_step = False
observation = self._env.reset()
return dm_env.restart(observation.squeeze(-1))
def step(self, action):
if self._reset_next_step:
return self.reset()
observation, reward, terminal, _ = self._env.step(action.item())
observation = observation.squeeze(-1)
discount = 1 - float(terminal)
self._episode_steps += 1
if terminal:
self._reset_next_episode = True
return dm_env.termination(reward, observation)
elif self._episode_steps == self._max_episode_steps:
self._reset_next_episode = True
return dm_env.truncation(reward, observation, discount)
else:
return dm_env.transition(reward, observation, discount)
def observation_spec(self):
space = self._env.observation_space
return specs.Array(space.shape[:-1], space.dtype)
def action_spec(self):
return specs.DiscreteArray(self._env.action_space.n)
def environment(game: str) -> dm_env.Environment:
"""Atari environment."""
env = atari_lib.create_atari_environment(game_name=game,
sticky_actions=True)
env = AtariDopamineWrapper(env)
env = wrappers.FrameStackingWrapper(env, num_frames=4)
return wrappers.SinglePrecisionWrapper(env)
| deepmind-research-master | rl_unplugged/atari.py |
# Lint as: python3
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Real World RL for RL Unplugged datasets.
Examples in the dataset represent SARS transitions stored when running a
partially online trained agent as described in https://arxiv.org/abs/1904.12901.
We release 8 datasets in total -- with no combined challenge and easy combined
challenge on the cartpole, walker, quadruped, and humanoid tasks. For details
on how the dataset was generated, please refer to the paper.
Every transition in the dataset is a tuple containing the following features:
* o_t: Observation at time t. Observations have been processed using the
canonical
* a_t: Action taken at time t.
* r_t: Reward at time t.
* d_t: Discount at time t.
* o_tp1: Observation at time t+1.
* a_tp1: Action taken at time t+1. This is set to equal to the last action
for the last timestep.
Note that this serves as an example. For optimal data loading speed, consider
separating out data preprocessing from the data loading loop during training,
e.g. saving the preprocessed data.
"""
import collections
import functools
import os
from typing import Any, Dict, Optional, Sequence
from acme import wrappers
import dm_env
import realworldrl_suite.environments as rwrl_envs
import reverb
import tensorflow as tf
import tree
DELIMITER = ':'
# Control suite tasks have 1000 timesteps per episode. One additional timestep
# accounts for the very first observation where no action has been taken yet.
DEFAULT_NUM_TIMESTEPS = 1001
def _decombine_key(k: str, delimiter: str = DELIMITER) -> Sequence[str]:
return k.split(delimiter)
def tf_example_to_feature_description(example,
num_timesteps=DEFAULT_NUM_TIMESTEPS):
"""Takes a string tensor encoding an tf example and returns its features."""
if not tf.executing_eagerly():
raise AssertionError(
'tf_example_to_reverb_sample() only works under eager mode.')
example = tf.train.Example.FromString(example.numpy())
ret = {}
for k, v in example.features.feature.items():
l = len(v.float_list.value)
if l % num_timesteps:
raise ValueError('Unexpected feature length %d. It should be divisible '
'by num_timesteps: %d' % (l, num_timesteps))
size = l // num_timesteps
ret[k] = tf.io.FixedLenFeature([num_timesteps, size], tf.float32)
return ret
def tree_deflatten_with_delimiter(
flat_dict: Dict[str, Any], delimiter: str = DELIMITER) -> Dict[str, Any]:
"""De-flattens a dict to its originally nested structure.
Does the opposite of {combine_nested_keys(k) :v
for k, v in tree.flatten_with_path(nested_dicts)}
Example: {'a:b': 1} -> {'a': {'b': 1}}
Args:
flat_dict: the keys of which equals the `path` separated by `delimiter`.
delimiter: the delimiter that separates the keys of the nested dict.
Returns:
An un-flattened dict.
"""
root = collections.defaultdict(dict)
for delimited_key, v in flat_dict.items():
keys = _decombine_key(delimited_key, delimiter=delimiter)
node = root
for k in keys[:-1]:
node = node[k]
node[keys[-1]] = v
return dict(root)
def get_slice_of_nested(nested: Dict[str, Any], start: int,
end: int) -> Dict[str, Any]:
return tree.map_structure(lambda item: item[start:end], nested)
def repeat_last_and_append_to_nested(nested: Dict[str, Any]) -> Dict[str, Any]:
return tree.map_structure(
lambda item: tf.concat((item, item[-1:]), axis=0), nested)
def tf_example_to_reverb_sample(example,
feature_description,
num_timesteps=DEFAULT_NUM_TIMESTEPS):
"""Converts the episode encoded as a tf example into SARSA reverb samples."""
example = tf.io.parse_single_example(example, feature_description)
kv = tree_deflatten_with_delimiter(example)
output = (
get_slice_of_nested(kv['observation'], 0, num_timesteps - 1),
get_slice_of_nested(kv['action'], 1, num_timesteps),
kv['reward'][1:num_timesteps],
# The two fields below aren't needed for learning,
# but are kept here to be compatible with acme learner format.
kv['discount'][1:num_timesteps],
get_slice_of_nested(kv['observation'], 1, num_timesteps),
repeat_last_and_append_to_nested(
get_slice_of_nested(kv['action'], 2, num_timesteps)))
ret = tf.data.Dataset.from_tensor_slices(output)
ret = ret.map(lambda *x: reverb.ReplaySample(info=b'None', data=x)) # pytype: disable=wrong-arg-types
return ret
def dataset(path: str,
combined_challenge: str,
domain: str,
task: str,
difficulty: str,
num_shards: int = 100,
shuffle_buffer_size: int = 100000) -> tf.data.Dataset:
"""TF dataset of RWRL SARSA tuples."""
path = os.path.join(
path,
f'combined_challenge_{combined_challenge}/{domain}/{task}/'
f'offline_rl_challenge_{difficulty}'
)
filenames = [
f'{path}/episodes.tfrecord-{i:05d}-of-{num_shards:05d}'
for i in range(num_shards)
]
file_ds = tf.data.Dataset.from_tensor_slices(filenames)
file_ds = file_ds.repeat().shuffle(num_shards)
tf_example_ds = file_ds.interleave(
tf.data.TFRecordDataset,
cycle_length=tf.data.experimental.AUTOTUNE,
block_length=5)
# Take one item to get the output types and shapes.
example_item = None
for example_item in tf.data.TFRecordDataset(filenames[:1]).take(1):
break
if example_item is None:
raise ValueError('Empty dataset')
feature_description = tf_example_to_feature_description(example_item)
reverb_ds = tf_example_ds.interleave(
functools.partial(
tf_example_to_reverb_sample, feature_description=feature_description),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
deterministic=False)
reverb_ds = reverb_ds.prefetch(100)
reverb_ds = reverb_ds.shuffle(shuffle_buffer_size)
return reverb_ds
def environment(
combined_challenge: str,
domain: str,
task: str,
log_output: Optional[str] = None,
environment_kwargs: Optional[Dict[str, Any]] = None) -> dm_env.Environment:
"""RWRL environment."""
env = rwrl_envs.load(
domain_name=domain,
task_name=task,
log_output=log_output,
environment_kwargs=environment_kwargs,
combined_challenge=combined_challenge)
return wrappers.SinglePrecisionWrapper(env)
| deepmind-research-master | rl_unplugged/rwrl.py |
# Lint as: python3
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Atari dataset example.
Instructions:
> mkdir -p /tmp/dataset/Asterix
> gsutil cp gs://rl_unplugged/atari/Asterix/run_1-00000-of-00100 \
/tmp/dataset/Asterix/run_1-00000-of-00001
> python atari_example.py --path=/tmp/dataset --game=Asterix
"""
from absl import app
from absl import flags
from acme import specs
import tree
from rl_unplugged import atari
flags.DEFINE_string('path', '/tmp/dataset', 'Path to dataset.')
flags.DEFINE_string('game', 'Asterix', 'Game.')
FLAGS = flags.FLAGS
def main(_):
ds = atari.dataset(FLAGS.path, FLAGS.game, 1,
num_shards=1,
shuffle_buffer_size=1)
for sample in ds.take(1):
print('Data spec')
print(tree.map_structure(lambda x: (x.dtype, x.shape), sample.data))
env = atari.environment(FLAGS.game)
print('Environment spec')
print(specs.make_environment_spec(env))
print('Environment observation')
timestep = env.reset()
print(tree.map_structure(lambda x: (x.dtype, x.shape), timestep.observation))
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | rl_unplugged/atari_example.py |
# Lint as: python3
# pylint: disable=line-too-long
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""RWRL dataset example.
Instructions:
> export TMP_PATH=/tmp/dataset/rwrl
> export DATA_PATH=combined_challenge_easy/quadruped/walk/offline_rl_challenge_easy
> mkdir -p $TMP_PATH/$DATA_PATH
> gsutil cp gs://rl_unplugged/rwrl/$DATA_PATH/episodes.tfrecord-00001-of-00015 \
$TMP_PATH/$DATA_PATH/episodes.tfrecord-00000-of-00001
> python rwrl_example.py --path=$TMP_PATH
"""
# pylint: enable=line-too-long
from absl import app
from absl import flags
import tree
from rl_unplugged import rwrl
flags.DEFINE_string('path', '/tmp/dataset', 'Path to dataset.')
def main(_):
ds = rwrl.dataset(
flags.FLAGS.path,
combined_challenge='easy',
domain='quadruped',
task='walk',
difficulty='easy',
num_shards=1,
shuffle_buffer_size=1)
for replay_sample in ds.take(1):
print(tree.map_structure(lambda x: (x.dtype, x.shape), replay_sample.data))
break
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | rl_unplugged/rwrl_example.py |
# Lint as: python3
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Networks used for training agents.
"""
from acme.tf import networks as acme_networks
from acme.tf import utils as tf2_utils
import numpy as np
import sonnet as snt
import tensorflow as tf
def instance_norm_and_elu(x):
mean = tf.reduce_mean(x, axis=[1, 2], keepdims=True)
x_ = x - mean
var = tf.reduce_mean(x_**2, axis=[1, 2], keepdims=True)
x_norm = x_ / (var + 1e-6)
return tf.nn.elu(x_norm)
class ControlNetwork(snt.Module):
"""Image, proprio and optionally action encoder used for actors and critics.
"""
def __init__(self,
proprio_encoder_size: int,
proprio_keys=None,
activation=tf.nn.elu):
"""Creates a ControlNetwork.
Args:
proprio_encoder_size: Size of the linear layer for the proprio encoder.
proprio_keys: Optional list of names of proprioceptive observations.
Defaults to all observations. Note that if this is specified, any
observation not contained in proprio_keys will be ignored by the agent.
activation: Linear layer activation function.
"""
super().__init__(name='control_network')
self._activation = activation
self._proprio_keys = proprio_keys
self._proprio_encoder = acme_networks.LayerNormMLP([proprio_encoder_size])
def __call__(self, inputs, action: tf.Tensor = None, task=None):
"""Evaluates the ControlNetwork.
Args:
inputs: A dictionary of agent observation tensors.
action: Agent actions.
task: Optional encoding of the task.
Raises:
ValueError: if neither proprio_input is provided.
ValueError: if some proprio input looks suspiciously like pixel inputs.
Returns:
Processed network output.
"""
if not isinstance(inputs, dict):
inputs = {'inputs': inputs}
proprio_input = []
# By default, treat all observations as proprioceptive.
if self._proprio_keys is None:
self._proprio_keys = list(sorted(inputs.keys()))
for key in self._proprio_keys:
proprio_input.append(snt.Flatten()(inputs[key]))
if np.prod(inputs[key].shape[1:]) > 32*32*3:
raise ValueError(
'This input does not resemble a proprioceptive '
'state: {} with shape {}'.format(
key, inputs[key].shape))
# Append optional action input (i.e. for critic networks).
if action is not None:
proprio_input.append(action)
proprio_input = tf2_utils.batch_concat(proprio_input)
proprio_state = self._proprio_encoder(proprio_input)
return proprio_state
| deepmind-research-master | rl_unplugged/networks.py |
# Lint as: python3
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Control RL Unplugged datasets.
Examples in the dataset represent sequences stored when running a partially
trained agent (trained in online way) as described in
https://arxiv.org/abs/2006.13888.
Every dataset has a SARSA version, and datasets for environments for solving
which we believe one may need a recurrent agent also include a version of the
dataset with overlapping sequences of length 40.
Datasets for the dm_control_suite environments only include proprio
observations, while datasets for dm_locomotion include both pixel and proprio
observations.
"""
import collections
import functools
import os
from typing import Dict, Optional, Tuple, Set
from acme import wrappers
from acme.adders import reverb as adders
from dm_control import composer
from dm_control import suite
from dm_control.composer.variation import colors
from dm_control.composer.variation import distributions
from dm_control.locomotion import arenas
from dm_control.locomotion import props
from dm_control.locomotion import tasks
from dm_control.locomotion import walkers
from dm_env import specs
import numpy as np
import reverb
import tensorflow as tf
import tree
def _build_rodent_escape_env():
"""Build environment where a rodent escapes from a bowl."""
walker = walkers.Rat(
observable_options={'egocentric_camera': dict(enabled=True)},
)
arena = arenas.bowl.Bowl(
size=(20., 20.),
aesthetic='outdoor_natural')
locomotion_task = tasks.escape.Escape(
walker=walker,
arena=arena,
physics_timestep=0.001,
control_timestep=.02)
raw_env = composer.Environment(
time_limit=20,
task=locomotion_task,
strip_singleton_obs_buffer_dim=True)
return raw_env
def _build_rodent_maze_env():
"""Build environment where a rodent runs to targets."""
walker = walkers.Rat(
observable_options={'egocentric_camera': dict(enabled=True)},
)
wall_textures = arenas.labmaze_textures.WallTextures(
style='style_01')
arena = arenas.mazes.RandomMazeWithTargets(
x_cells=11,
y_cells=11,
xy_scale=.5,
z_height=.3,
max_rooms=4,
room_min_size=4,
room_max_size=5,
spawns_per_room=1,
targets_per_room=3,
wall_textures=wall_textures,
aesthetic='outdoor_natural')
rodent_task = tasks.random_goal_maze.ManyGoalsMaze(
walker=walker,
maze_arena=arena,
target_builder=functools.partial(
props.target_sphere.TargetSphere,
radius=0.05,
height_above_ground=.125,
rgb1=(0, 0, 0.4),
rgb2=(0, 0, 0.7)),
target_reward_scale=50.,
contact_termination=False,
control_timestep=.02,
physics_timestep=0.001)
raw_env = composer.Environment(
time_limit=30,
task=rodent_task,
strip_singleton_obs_buffer_dim=True)
return raw_env
def _build_rodent_corridor_gaps():
"""Build environment where a rodent runs over gaps."""
walker = walkers.Rat(
observable_options={'egocentric_camera': dict(enabled=True)},
)
platform_length = distributions.Uniform(low=0.4, high=0.8)
gap_length = distributions.Uniform(low=0.05, high=0.2)
arena = arenas.corridors.GapsCorridor(
corridor_width=2,
platform_length=platform_length,
gap_length=gap_length,
corridor_length=40,
aesthetic='outdoor_natural')
rodent_task = tasks.corridors.RunThroughCorridor(
walker=walker,
arena=arena,
walker_spawn_position=(5, 0, 0),
walker_spawn_rotation=0,
target_velocity=1.0,
contact_termination=False,
terminate_at_height=-0.3,
physics_timestep=0.001,
control_timestep=.02)
raw_env = composer.Environment(
time_limit=30,
task=rodent_task,
strip_singleton_obs_buffer_dim=True)
return raw_env
def _build_rodent_two_touch_env():
"""Build environment where a rodent touches targets."""
walker = walkers.Rat(
observable_options={'egocentric_camera': dict(enabled=True)},
)
arena_floor = arenas.floors.Floor(
size=(10., 10.), aesthetic='outdoor_natural')
task_reach = tasks.reach.TwoTouch(
walker=walker,
arena=arena_floor,
target_builders=[
functools.partial(
props.target_sphere.TargetSphereTwoTouch,
radius=0.025),
],
randomize_spawn_rotation=True,
target_type_rewards=[25.],
shuffle_target_builders=False,
target_area=(1.5, 1.5),
physics_timestep=0.001,
control_timestep=.02)
raw_env = composer.Environment(
time_limit=30,
task=task_reach,
strip_singleton_obs_buffer_dim=True)
return raw_env
def _build_humanoid_walls_env():
"""Build humanoid walker walls environment."""
walker = walkers.CMUHumanoidPositionControlled(
name='walker',
observable_options={'egocentric_camera': dict(enabled=True)},
)
wall_width = distributions.Uniform(low=1, high=7)
wall_height = distributions.Uniform(low=2.5, high=4.0)
swap_wall_side = distributions.Bernoulli(prob=0.5)
wall_r = distributions.Uniform(low=0.5, high=0.6)
wall_g = distributions.Uniform(low=0.21, high=0.41)
wall_rgba = colors.RgbVariation(r=wall_r, g=wall_g, b=0, alpha=1)
arena = arenas.WallsCorridor(
wall_gap=5.0,
wall_width=wall_width,
wall_height=wall_height,
swap_wall_side=swap_wall_side,
wall_rgba=wall_rgba,
corridor_width=10,
corridor_length=100)
humanoid_task = tasks.RunThroughCorridor(
walker=walker,
arena=arena,
walker_spawn_rotation=1.57, # pi / 2
physics_timestep=0.005,
control_timestep=0.03)
raw_env = composer.Environment(
time_limit=30,
task=humanoid_task,
strip_singleton_obs_buffer_dim=True)
return raw_env
def _build_humanoid_corridor_env():
"""Build humanoid walker walls environment."""
walker = walkers.CMUHumanoidPositionControlled(
name='walker',
observable_options={'egocentric_camera': dict(enabled=True)},
)
arena = arenas.EmptyCorridor(
corridor_width=10,
corridor_length=100)
humanoid_task = tasks.RunThroughCorridor(
walker=walker,
arena=arena,
walker_spawn_rotation=1.57, # pi / 2
physics_timestep=0.005,
control_timestep=0.03)
raw_env = composer.Environment(
time_limit=30,
task=humanoid_task,
strip_singleton_obs_buffer_dim=True)
return raw_env
def _build_humanoid_corridor_gaps():
"""Build humanoid walker walls environment."""
walker = walkers.CMUHumanoidPositionControlled(
name='walker',
observable_options={'egocentric_camera': dict(enabled=True)},
)
platform_length = distributions.Uniform(low=0.3, high=2.5)
gap_length = distributions.Uniform(low=0.75, high=1.25)
arena = arenas.GapsCorridor(
corridor_width=10,
platform_length=platform_length,
gap_length=gap_length,
corridor_length=100)
humanoid_task = tasks.RunThroughCorridor(
walker=walker,
arena=arena,
walker_spawn_position=(2, 0, 0),
walker_spawn_rotation=1.57, # pi / 2
physics_timestep=0.005,
control_timestep=0.03)
raw_env = composer.Environment(
time_limit=30,
task=humanoid_task,
strip_singleton_obs_buffer_dim=True)
return raw_env
class MujocoActionNormalizer(wrappers.EnvironmentWrapper):
"""Rescale actions to [-1, 1] range for mujoco physics engine.
For control environments whose actions have bounded range in [-1, 1], this
adaptor rescale actions to the desired range. This allows actor network to
output unscaled actions for better gradient dynamics.
"""
def __init__(self, environment, rescale='clip'):
super().__init__(environment)
self._rescale = rescale
def step(self, action):
"""Rescale actions to [-1, 1] range before stepping wrapped environment."""
if self._rescale == 'tanh':
scaled_actions = tree.map_structure(np.tanh, action)
elif self._rescale == 'clip':
scaled_actions = tree.map_structure(lambda a: np.clip(a, -1., 1.), action)
else:
raise ValueError('Unrecognized scaling option: %s' % self._rescale)
return self._environment.step(scaled_actions)
class NormilizeActionSpecWrapper(wrappers.EnvironmentWrapper):
"""Turn each dimension of the actions into the range of [-1, 1]."""
def __init__(self, environment):
super().__init__(environment)
action_spec = environment.action_spec()
self._scale = action_spec.maximum - action_spec.minimum
self._offset = action_spec.minimum
minimum = action_spec.minimum * 0 - 1.
maximum = action_spec.minimum * 0 + 1.
self._action_spec = specs.BoundedArray(
action_spec.shape,
action_spec.dtype,
minimum,
maximum,
name=action_spec.name)
def _from_normal_actions(self, actions):
actions = 0.5 * (actions + 1.0) # a_t is now in the range [0, 1]
# scale range to [minimum, maximum]
return actions * self._scale + self._offset
def step(self, action):
action = self._from_normal_actions(action)
return self._environment.step(action)
def action_spec(self):
return self._action_spec
class FilterObservationsWrapper(wrappers.EnvironmentWrapper):
"""Filter out all the observations not specified to this wrapper."""
def __init__(self, environment, observations_to_keep):
super().__init__(environment)
self._observations_to_keep = observations_to_keep
spec = self._environment.observation_spec()
filtered = [(k, spec[k]) for k in observations_to_keep]
self._observation_spec = collections.OrderedDict(filtered)
def _filter_observation(self, timestep):
observation = timestep.observation
filtered = [(k, observation[k]) for k in self._observations_to_keep]
return timestep._replace(observation=collections.OrderedDict(filtered))
def step(self, action):
return self._filter_observation(self._environment.step(action))
def reset(self):
return self._filter_observation(self._environment.reset())
def observation_spec(self):
return self._observation_spec
class ControlSuite:
"""Create bits needed to run agents on an Control Suite dataset."""
def __init__(self, task_name='humanoid_run'):
"""Initializes datasets/environments for the Deepmind Control suite.
Args:
task_name: take name. Must be one of,
finger_turn_hard, manipulator_insert_peg, humanoid_run,
cartpole_swingup, cheetah_run, fish_swim, manipulator_insert_ball,
walker_stand, walker_walk
"""
self.task_name = task_name
self._uint8_features = set([])
self._environment = None
if task_name == 'swim':
self._domain_name = 'fish'
self._task_name = 'swim'
self._shapes = {
'observation/target': (3,),
'observation/velocity': (13,),
'observation/upright': (1,),
'observation/joint_angles': (7,),
'action': (5,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()
}
elif task_name == 'humanoid_run':
self._domain_name = 'humanoid'
self._task_name = 'run'
self._shapes = {
'observation/velocity': (27,),
'observation/com_velocity': (3,),
'observation/torso_vertical': (3,),
'observation/extremities': (12,),
'observation/head_height': (1,),
'observation/joint_angles': (21,),
'action': (21,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()
}
elif task_name == 'manipulator_insert_ball':
self._domain_name = 'manipulator'
self._task_name = 'insert_ball'
self._shapes = {
'observation/arm_pos': (16,),
'observation/arm_vel': (8,),
'observation/touch': (5,),
'observation/hand_pos': (4,),
'observation/object_pos': (4,),
'observation/object_vel': (3,),
'observation/target_pos': (4,),
'action': (5,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()}
elif task_name == 'manipulator_insert_peg':
self._domain_name = 'manipulator'
self._task_name = 'insert_peg'
self._shapes = {
'observation/arm_pos': (16,),
'observation/arm_vel': (8,),
'observation/touch': (5,),
'observation/hand_pos': (4,),
'observation/object_pos': (4,),
'observation/object_vel': (3,),
'observation/target_pos': (4,),
'episodic_reward': (),
'action': (5,),
'discount': (),
'reward': (),
'step_type': ()}
elif task_name == 'cartpole_swingup':
self._domain_name = 'cartpole'
self._task_name = 'swingup'
self._shapes = {
'observation/position': (3,),
'observation/velocity': (2,),
'action': (1,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()}
elif task_name == 'walker_walk':
self._domain_name = 'walker'
self._task_name = 'walk'
self._shapes = {
'observation/orientations': (14,),
'observation/velocity': (9,),
'observation/height': (1,),
'action': (6,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()}
elif task_name == 'walker_stand':
self._domain_name = 'walker'
self._task_name = 'stand'
self._shapes = {
'observation/orientations': (14,),
'observation/velocity': (9,),
'observation/height': (1,),
'action': (6,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()}
elif task_name == 'cheetah_run':
self._domain_name = 'cheetah'
self._task_name = 'run'
self._shapes = {
'observation/position': (8,),
'observation/velocity': (9,),
'action': (6,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()}
elif task_name == 'finger_turn_hard':
self._domain_name = 'finger'
self._task_name = 'turn_hard'
self._shapes = {
'observation/position': (4,),
'observation/velocity': (3,),
'observation/touch': (2,),
'observation/target_position': (2,),
'observation/dist_to_target': (1,),
'action': (2,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()}
else:
raise ValueError('Task \'{}\' not found.'.format(task_name))
self._data_path = 'dm_control_suite/{}/train'.format(task_name)
@property
def shapes(self):
return self._shapes
@property
def data_path(self):
return self._data_path
@property
def uint8_features(self):
return self._uint8_features
@property
def environment(self):
"""Build and return the environment."""
if self._environment is not None:
return self._environment
self._environment = suite.load(
domain_name=self._domain_name,
task_name=self._task_name)
self._environment = wrappers.SinglePrecisionWrapper(self._environment)
self._environment = NormilizeActionSpecWrapper(self._environment)
return self._environment
class CmuThirdParty:
"""Create bits needed to run agents on an locomotion humanoid dataset."""
def __init__(self, task_name='humanoid_walls'):
# 'humanoid_corridor|humanoid_gaps|humanoid_walls'
self._task_name = task_name
self._pixel_keys = self.get_pixel_keys()
self._uint8_features = set(['observation/walker/egocentric_camera'])
self.additional_paths = {}
self._proprio_keys = [
'walker/joints_vel',
'walker/sensors_velocimeter',
'walker/sensors_gyro',
'walker/joints_pos',
'walker/world_zaxis',
'walker/body_height',
'walker/sensors_accelerometer',
'walker/end_effectors_pos'
]
self._shapes = {
'observation/walker/joints_vel': (56,),
'observation/walker/sensors_velocimeter': (3,),
'observation/walker/sensors_gyro': (3,),
'observation/walker/joints_pos': (56,),
'observation/walker/world_zaxis': (3,),
'observation/walker/body_height': (1,),
'observation/walker/sensors_accelerometer': (3,),
'observation/walker/end_effectors_pos': (12,),
'observation/walker/egocentric_camera': (
64,
64,
3,
),
'action': (56,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()
}
if task_name == 'humanoid_corridor':
self._data_path = 'dm_locomotion/humanoid_corridor/seq2/train'
elif task_name == 'humanoid_gaps':
self._data_path = 'dm_locomotion/humanoid_gaps/seq2/train'
elif task_name == 'humanoid_walls':
self._data_path = 'dm_locomotion/humanoid_walls/seq40/train'
else:
raise ValueError('Task \'{}\' not found.'.format(task_name))
@staticmethod
def get_pixel_keys():
return ('walker/egocentric_camera',)
@property
def uint8_features(self):
return self._uint8_features
@property
def shapes(self):
return self._shapes
@property
def data_path(self):
return self._data_path
@property
def environment(self):
"""Build and return the environment."""
if self._task_name == 'humanoid_corridor':
self._environment = _build_humanoid_corridor_env()
elif self._task_name == 'humanoid_gaps':
self._environment = _build_humanoid_corridor_gaps()
elif self._task_name == 'humanoid_walls':
self._environment = _build_humanoid_walls_env()
self._environment = NormilizeActionSpecWrapper(self._environment)
self._environment = MujocoActionNormalizer(
environment=self._environment, rescale='clip')
self._environment = wrappers.SinglePrecisionWrapper(self._environment)
all_observations = list(self._proprio_keys) + list(self._pixel_keys)
self._environment = FilterObservationsWrapper(self._environment,
all_observations)
return self._environment
class Rodent:
"""Create bits needed to run agents on an Rodent dataset."""
def __init__(self, task_name='rodent_gaps'):
# 'rodent_escape|rodent_two_touch|rodent_gaps|rodent_mazes'
self._task_name = task_name
self._pixel_keys = self.get_pixel_keys()
self._uint8_features = set(['observation/walker/egocentric_camera'])
self._proprio_keys = [
'walker/joints_pos', 'walker/joints_vel', 'walker/tendons_pos',
'walker/tendons_vel', 'walker/appendages_pos', 'walker/world_zaxis',
'walker/sensors_accelerometer', 'walker/sensors_velocimeter',
'walker/sensors_gyro', 'walker/sensors_touch',
]
self._shapes = {
'observation/walker/joints_pos': (30,),
'observation/walker/joints_vel': (30,),
'observation/walker/tendons_pos': (8,),
'observation/walker/tendons_vel': (8,),
'observation/walker/appendages_pos': (15,),
'observation/walker/world_zaxis': (3,),
'observation/walker/sensors_accelerometer': (3,),
'observation/walker/sensors_velocimeter': (3,),
'observation/walker/sensors_gyro': (3,),
'observation/walker/sensors_touch': (4,),
'observation/walker/egocentric_camera': (64, 64, 3),
'action': (38,),
'discount': (),
'reward': (),
'step_type': ()
}
if task_name == 'rodent_gaps':
self._data_path = 'dm_locomotion/rodent_gaps/seq2/train'
elif task_name == 'rodent_escape':
self._data_path = 'dm_locomotion/rodent_bowl_escape/seq2/train'
elif task_name == 'rodent_two_touch':
self._data_path = 'dm_locomotion/rodent_two_touch/seq40/train'
elif task_name == 'rodent_mazes':
self._data_path = 'dm_locomotion/rodent_mazes/seq40/train'
else:
raise ValueError('Task \'{}\' not found.'.format(task_name))
@staticmethod
def get_pixel_keys():
return ('walker/egocentric_camera',)
@property
def shapes(self):
return self._shapes
@property
def uint8_features(self):
return self._uint8_features
@property
def data_path(self):
return self._data_path
@property
def environment(self):
"""Return environment."""
if self._task_name == 'rodent_escape':
self._environment = _build_rodent_escape_env()
elif self._task_name == 'rodent_gaps':
self._environment = _build_rodent_corridor_gaps()
elif self._task_name == 'rodent_two_touch':
self._environment = _build_rodent_two_touch_env()
elif self._task_name == 'rodent_mazes':
self._environment = _build_rodent_maze_env()
self._environment = NormilizeActionSpecWrapper(self._environment)
self._environment = MujocoActionNormalizer(
environment=self._environment, rescale='clip')
self._environment = wrappers.SinglePrecisionWrapper(self._environment)
all_observations = list(self._proprio_keys) + list(self._pixel_keys)
self._environment = FilterObservationsWrapper(self._environment,
all_observations)
return self._environment
def _parse_seq_tf_example(example, uint8_features, shapes):
"""Parse tf.Example containing one or two episode steps."""
def to_feature(key, shape):
if key in uint8_features:
return tf.io.FixedLenSequenceFeature(
shape=[], dtype=tf.string, allow_missing=True)
else:
return tf.io.FixedLenSequenceFeature(
shape=shape, dtype=tf.float32, allow_missing=True)
feature_map = {}
for k, v in shapes.items():
feature_map[k] = to_feature(k, v)
parsed = tf.io.parse_single_example(example, features=feature_map)
observation = {}
restructured = {}
for k in parsed.keys():
if 'observation' not in k:
restructured[k] = parsed[k]
continue
if k in uint8_features:
observation[k.replace('observation/', '')] = tf.reshape(
tf.io.decode_raw(parsed[k], out_type=tf.uint8), (-1,) + shapes[k])
else:
observation[k.replace('observation/', '')] = parsed[k]
restructured['observation'] = observation
restructured['length'] = tf.shape(restructured['action'])[0]
return restructured
def _build_sequence_example(sequences):
"""Convert raw sequences into a Reverb sequence sample."""
data = adders.Step(
observation=sequences['observation'],
action=sequences['action'],
reward=sequences['reward'],
discount=sequences['discount'],
start_of_episode=(),
extras=())
info = reverb.SampleInfo(key=tf.constant(0, tf.uint64),
probability=tf.constant(1.0, tf.float64),
table_size=tf.constant(0, tf.int64),
priority=tf.constant(1.0, tf.float64))
return reverb.ReplaySample(info=info, data=data)
def _build_sarsa_example(sequences):
"""Convert raw sequences into a Reverb n-step SARSA sample."""
o_tm1 = tree.map_structure(lambda t: t[0], sequences['observation'])
o_t = tree.map_structure(lambda t: t[1], sequences['observation'])
a_tm1 = tree.map_structure(lambda t: t[0], sequences['action'])
a_t = tree.map_structure(lambda t: t[1], sequences['action'])
r_t = tree.map_structure(lambda t: t[0], sequences['reward'])
p_t = tree.map_structure(lambda t: t[0], sequences['discount'])
info = reverb.SampleInfo(key=tf.constant(0, tf.uint64),
probability=tf.constant(1.0, tf.float64),
table_size=tf.constant(0, tf.int64),
priority=tf.constant(1.0, tf.float64))
return reverb.ReplaySample(info=info, data=(o_tm1, a_tm1, r_t, p_t, o_t, a_t))
def _padded_batch(example_ds, batch_size, shapes, drop_remainder=False):
"""Batch data while handling unequal lengths."""
padded_shapes = {}
padded_shapes['observation'] = {}
for k, v in shapes.items():
if 'observation' in k:
padded_shapes['observation'][
k.replace('observation/', '')] = (-1,) + v
else:
padded_shapes[k] = (-1,) + v
padded_shapes['length'] = ()
return example_ds.padded_batch(batch_size,
padded_shapes=padded_shapes,
drop_remainder=drop_remainder)
def dataset(root_path: str,
data_path: str,
shapes: Dict[str, Tuple[int]],
num_threads: int,
batch_size: int,
uint8_features: Optional[Set[str]] = None,
num_shards: int = 100,
shuffle_buffer_size: int = 100000,
sarsa: bool = True) -> tf.data.Dataset:
"""Create tf dataset for training."""
uint8_features = uint8_features if uint8_features else {}
path = os.path.join(root_path, data_path)
filenames = [f'{path}-{i:05d}-of-{num_shards:05d}' for i in range(num_shards)]
file_ds = tf.data.Dataset.from_tensor_slices(filenames)
file_ds = file_ds.repeat().shuffle(num_shards)
example_ds = file_ds.interleave(
functools.partial(tf.data.TFRecordDataset, compression_type='GZIP'),
cycle_length=tf.data.experimental.AUTOTUNE,
block_length=5)
example_ds = example_ds.shuffle(shuffle_buffer_size)
def map_func(example):
example = _parse_seq_tf_example(example, uint8_features, shapes)
return example
example_ds = example_ds.map(map_func, num_parallel_calls=num_threads)
example_ds = example_ds.repeat().shuffle(batch_size * 10)
if sarsa:
example_ds = example_ds.map(
_build_sarsa_example,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
example_ds.batch(batch_size)
else:
example_ds = _padded_batch(
example_ds, batch_size, shapes, drop_remainder=True)
example_ds = example_ds.map(
_build_sequence_example,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
example_ds = example_ds.prefetch(tf.data.experimental.AUTOTUNE)
return example_ds
| deepmind-research-master | rl_unplugged/dm_control_suite.py |
# Lint as: python3
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""DM control suite and locomotion dataset examples.
Example:
Instructions:
> export TMP_PATH=/tmp/dataset
> export TASK_NAME=humanoid_run
> mkdir -p $TMP_PATH/$TASK_NAME
> gsutil cp gs://rl_unplugged/dm_control_suite/$TASK_NAME/train-00000-of-00100 \
$TMP_PATH/dm_control_suite/$TASK_NAME/train-00000-of-00001
> python dm_control_suite_example.py --path=$TMP_PATH \
--task_class=control_suite --task_name=$TASK_NAME
"""
from absl import app
from absl import flags
import tree
from rl_unplugged import dm_control_suite
flags.DEFINE_string('path', '/tmp/dataset', 'Path to dataset.')
flags.DEFINE_string('task_name', 'humanoid_run', 'Game.')
flags.DEFINE_enum('task_class', 'control_suite',
['humanoid', 'rodent', 'control_suite'],
'Task classes.')
FLAGS = flags.FLAGS
def main(_):
if FLAGS.task_class == 'control_suite':
task = dm_control_suite.ControlSuite(task_name=FLAGS.task_name)
elif FLAGS.task_class == 'humanoid':
task = dm_control_suite.CmuThirdParty(task_name=FLAGS.task_name)
elif FLAGS.task_class == 'rodent':
task = dm_control_suite.Rodent(task_name=FLAGS.task_name)
ds = dm_control_suite.dataset(root_path=FLAGS.path,
data_path=task.data_path,
shapes=task.shapes,
num_threads=1,
batch_size=2,
uint8_features=task.uint8_features,
num_shards=1,
shuffle_buffer_size=10)
for sample in ds.take(1):
print('Data spec')
print(tree.map_structure(lambda x: (x.dtype, x.shape), sample.data))
environment = task.environment
timestep = environment.reset()
print(tree.map_structure(lambda x: (x.dtype, x.shape), timestep.observation))
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | rl_unplugged/dm_control_suite_example.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Setup for pip package."""
from setuptools import find_packages
from setuptools import setup
setup(
name='wikigraphs',
version='0.1.0',
description='A Wikipedia - knowledge graph paired dataset.',
url='https://github.com/deepmind/deepmind-research/tree/master/wikigraphs',
author='DeepMind',
author_email='[email protected]',
packages=find_packages(),
license='Apache 2.0',
)
| deepmind-research-master | wikigraphs/setup.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Data Parallel Updater for Graph2text data."""
import functools
import os
import pickle
from absl import logging
import haiku as hk
import jax
from jax.tree_util import tree_multimap
import numpy as np
import optax
def call_fn_with_state_keys(jit_fn, state, other_inputs, keys):
"""Executes `jit_fn`, filtering out all keys except some subset."""
state = state.copy()
extra_state = {}
for k in list(state.keys()):
if k not in keys:
extra_state[k] = state.pop(k)
return jit_fn(state, *other_inputs), extra_state
class Updater:
"""Graph2text model updater with multi-GPU support."""
def __init__(self, loss_fn, optimizer, devices=None, has_graph=False):
self._net_init_fn, self._apply_fn = hk.transform_with_state(
functools.partial(loss_fn, is_training=True))
_, self._eval_apply_fn = hk.transform_with_state(
functools.partial(loss_fn, is_training=False))
if optimizer is None:
optimizer = optax.identity()
self._optimizer = optimizer
self._num_devices = jax.local_device_count()
if devices is None:
devices = []
for host_id in range(jax.process_count()):
for device_id in jax.local_devices(host_id):
devices.append(device_id)
else:
self._num_devices = min(self._num_devices, len(devices))
def _pmap(f, static_broadcasted_argnums=()):
return jax.pmap(f, axis_name='i', devices=devices,
static_broadcasted_argnums=static_broadcasted_argnums)
def handle_graph_size(fn):
def _fn(*args):
batch = args[-1].copy()
max_graph_size = batch['max_graph_size']
del batch['max_graph_size']
args = args[:-1] + (batch, max_graph_size)
return fn(*args)
return _fn
# Try to jit.
if has_graph:
# If the model contains full graphs, we need to set the max_graph_size
# as a statically broadcasted argument.
self._init_fn = handle_graph_size(_pmap(self._init, 4))
self._update_fn = handle_graph_size(_pmap(self._update, 2))
self._eval_fn = handle_graph_size(_pmap(self._eval, 2))
else:
self._init_fn = _pmap(self._init)
self._update_fn = _pmap(self._update)
self._eval_fn = _pmap(self._eval)
def _init(self, master_rng, params, network_state, data, max_graph_size=None):
"""Initializes state of the updater."""
out_rng, init_rng = jax.random.split(master_rng)
if max_graph_size is not None:
new_params, new_network_state = self._net_init_fn(
init_rng, data, max_graph_size)
else:
new_params, new_network_state = self._net_init_fn(init_rng, data)
if params is None:
params = new_params
if network_state is None:
network_state = new_network_state
opt_state = self._optimizer.init(params)
return dict(
replicated_step=0,
rng=out_rng,
state=network_state,
opt_state=opt_state,
params=params,
)
def init(self, master_rng, data, params=None, network_state=None,
replicated_params=False):
"""Initializes state of the updater."""
data = self._preprocess(data)
rngs = np.array([master_rng] * self._num_devices)
if not replicated_params and params is not None:
params = jax.tree_map(
lambda x: np.array([x] * self._num_devices), params)
state = self._init_fn(rngs, params, network_state, data)
state['step'] = np.array(0, dtype=np.int64)
# Wait for initialization to finish before starting training to keep
# memory usage low.
flat_params = jax.tree_leaves(state['params'])
if flat_params:
jax.tree_leaves(state['params'])[0].block_until_ready()
return state
def _update(self, state, data, max_graph_size=None):
"""Updates parameters."""
replicated_step = state['replicated_step']
rng = state['rng']
opt_state = state['opt_state']
params = state['params']
net_state = state['state']
rng, new_rng = jax.random.split(rng)
rng = jax.random.fold_in(rng, jax.lax.axis_index('i'))
def _loss(params, state, batch, rng):
if max_graph_size is not None:
(loss, metrics), state = self._apply_fn(params, state, rng, batch,
max_graph_size)
else:
(loss, metrics), state = self._apply_fn(params, state, rng, batch)
return loss, (metrics, state)
(loss, (metrics, new_net_state)), g = jax.value_and_grad(
_loss, has_aux=True)(params, net_state, data, rng)
g = jax.lax.pmean(g, axis_name='i')
loss = jax.lax.pmean(loss, axis_name='i')
metrics = jax.lax.pmean(metrics, axis_name='i')
updates, new_opt_state = self._optimizer.update(g, opt_state, params)
new_params = optax.apply_updates(params, updates)
new_state = dict(
replicated_step=replicated_step + 1,
rng=new_rng,
state=new_net_state,
opt_state=new_opt_state,
params=new_params,
)
metrics['loss'] = loss
metrics['step'] = replicated_step
return new_state, metrics
def update(self, state, data):
"""Updates the state using some data and returns metrics."""
data = self._preprocess(data)
(state, out), extra_state = call_fn_with_state_keys(
self._update_fn, state, [data], keys=set([
'state', 'params', 'rng', 'replicated_step', 'opt_state']))
state.update(extra_state)
state['step'] += 1
return state, tree_multimap(lambda x: x[0], out)
def _eval(self, state, data, max_graph_size=None):
"""Evaluates the current state on the given data."""
if max_graph_size is not None:
(loss, metrics), new_state = self._eval_apply_fn(
state['params'], state['state'], state['rng'], data, max_graph_size)
else:
(loss, metrics), new_state = self._eval_apply_fn(
state['params'], state['state'], state['rng'], data)
state['state'] = new_state
loss = jax.lax.pmean(loss, axis_name='i')
metrics = jax.lax.pmean(metrics, axis_name='i')
metrics['loss'] = loss
metrics['step'] = state['replicated_step']
return state, metrics
def eval_return_state(self, state, data):
"""Returns metrics without updating the model."""
data = self._preprocess(data)
(state, out), extra_state = call_fn_with_state_keys(
self._eval_fn, state, [data], keys=set([
'state', 'params', 'rng', 'replicated_step']))
state.update(extra_state)
return state, tree_multimap(lambda x: x[0], out)
def eval(self, state, data):
"""Returns metrics without updating the model."""
_, out = self.eval_return_state(state, data)
return out
def _preprocess(self, data):
"""Reshapes input so that it can be distributed across multiple cores."""
multi_inputs = data.copy()
def add_core_dimension(x):
if np.isscalar(x):
return x
if x.shape[0] % self._num_devices != 0:
raise ValueError(f'The batch size must be a multiple of the number of'
f' devices. Got batch size = {x.shape[0]} and number'
f' of devices = {self._num_devices}.')
prefix = (self._num_devices, x.shape[0] // self._num_devices)
return np.reshape(x, prefix + x.shape[1:])
multi_inputs = tree_multimap(add_core_dimension, multi_inputs)
return multi_inputs
def params(self, state):
"""Returns model parameters."""
return tree_multimap(lambda x: x[0], state['params'])
def opt_state(self, state):
"""Returns the state of the optimiser."""
return tree_multimap(lambda x: x[0], state['opt_state'])
def network_state(self, state):
"""Returns the model's state."""
return tree_multimap(lambda x: x[0], state['state'])
def to_checkpoint_state(self, state):
"""Transforms the updater state into a checkpointable state."""
checkpoint_state = state.copy()
# Wrapper around checkpoint_state['step'] so we can get [0].
checkpoint_state['step'] = checkpoint_state['step'][np.newaxis]
# Unstack the replicated contents.
checkpoint_state = tree_multimap(lambda x: x[0], checkpoint_state)
return checkpoint_state
def from_checkpoint_state(self, checkpoint_state):
"""Initializes the updater state from the checkpointed state."""
# Expand the checkpoint so we have a copy for each device.
state = tree_multimap(lambda x: np.stack(jax.local_device_count() * [x]),
checkpoint_state)
state['step'] = state['step'][0] # Undo stacking for step.
return state
class CheckpointingUpdater:
"""A checkpointing wrapper around an Updater."""
def __init__(self,
inner: Updater,
checkpoint_dir: str):
self._inner = inner
self._checkpoint_dir = checkpoint_dir
def _checkpoint_paths(self):
return [p for p in os.listdir(self._checkpoint_dir) if 'checkpoint' in p]
def init(self, rng, data, params=None, network_state=None):
"""Initialize experiment state."""
if not os.path.exists(self._checkpoint_dir) or not self._checkpoint_paths():
os.makedirs(self._checkpoint_dir, exist_ok=True)
return self._inner.init(rng, data, params, network_state)
return self.load_checkpoint()
def init_from_checkpoint(self, rng, data, checkpoint_state):
params = self._inner.params(checkpoint_state)
network_state = None
return self._inner.init(rng, data, params, network_state)
def eval_return_state(self, state, data):
return self._inner.eval_return_state(state, data)
def save_checkpoint(self, state):
path = os.path.join(self._checkpoint_dir, 'checkpoint.pkl')
logging.info('Serializing experiment state to %s', path)
checkpoint_state = self._inner.to_checkpoint_state(jax.device_get(state))
with open(path, 'wb') as f:
pickle.dump(checkpoint_state, f)
def load_checkpoint(self):
checkpoint = os.path.join(self._checkpoint_dir,
self._checkpoint_paths()[-1])
logging.info('Loading checkpoint from %s', checkpoint)
with open(checkpoint, 'rb') as f:
state = pickle.load(f)
return self._inner.from_checkpoint_state(state)
def update(self, state, data):
"""Update experiment state."""
state, out = self._inner.update(state, data)
return state, out
| deepmind-research-master | wikigraphs/updaters.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Utility functions for the training script."""
import collections
import math
import random
from absl import flags
from absl import logging
import jax.numpy as jnp
import jraph
import numpy as np
import sklearn
from wikigraphs.data import paired_dataset as pd
from wikigraphs.data import tokenizers
from wikigraphs.data import wikitext as wt
from wikigraphs.model import graph_net as gn
from wikigraphs.model import sampler as transformer_sampler
from wikigraphs.model import transformer
FLAGS = flags.FLAGS
VOCAB_FILES_MAP = {
'wikitext': '/tmp/data/wikitext-vocab.csv',
'freebase2wikitext': '/tmp/data/text-vocab.csv',
}
GRAPH_VOCAB_FILE = '/tmp/data/graph-vocab.csv'
def init_tokenizer(dataset_name):
"""Initialie the tokenizer."""
logging.info('Loading tokenizer...')
tokenizer = tokenizers.WordTokenizer(VOCAB_FILES_MAP[dataset_name])
logging.info('Vocab size: %d', tokenizer.vocab_size)
return tokenizer
def init_graph_tokenizer():
"""Initialie the tokenizer."""
logging.info('Loading graph tokenizer...')
tokenizer = tokenizers.GraphTokenizer(GRAPH_VOCAB_FILE)
logging.info('Vocab size: %d', tokenizer.vocab_size)
return tokenizer
def get_dataset_class(dataset_name, model_type, job_mode='train'):
"""Get the dataset class used for all jobs."""
if dataset_name == 'freebase2wikitext':
if model_type == 'bow2text':
return pd.Bow2TextDataset
elif FLAGS.model_type == 'graph2text':
return pd.Graph2TextDataset
elif FLAGS.model_type == 'text':
if job_mode in ['train', 'eval']:
return pd.TextOnlyDataset
else:
# for sampling: taking the unique graphs for a fair comparison
return pd.Bow2TextDataset
else:
# Add other graph2text data here.
raise NotImplementedError()
else:
def dataset(graph_tokenizer, *args, **kwargs):
del graph_tokenizer
return wt.Dataset(*args, **kwargs)
return dataset
def preprocess(batch, model_type, num_devices=1):
"""Preprocess the batch before sending to the model."""
if model_type == 'text':
if 'graphs' in batch:
del batch['graphs']
elif model_type == 'bow2text':
# Do nothing, bow2text data is already in a good form.
pass
else: # graph2text
if num_devices == 1:
graphs = gn.pad_graphs(jraph.batch(batch['graphs']))
else:
# We need to first batch graphs into num_devices batchs.
graphs = gn.batch_graphs_by_device(batch['graphs'], num_devices)
# Then we pad them to the maximum graph size in the batch and concat.
# This way graphs can be distributed to each device through pmap.
graphs = gn.pad_graphs_by_device(graphs)
max_graph_size = gn.pad_size(graphs.n_node.max())
batch.update({
'graphs': graphs,
'max_graph_size': max_graph_size})
return batch
def text_model_fn(vocab_size):
return transformer.TransformerXL(
vocab_size=vocab_size,
emb_dim=FLAGS.emb_dim,
num_layers=FLAGS.num_layers,
num_heads=FLAGS.num_heads,
dropout_prob=FLAGS.dropout,
dropout_attn_prob=FLAGS.dropout_attn,
self_att_init_scale=FLAGS.self_att_init_scale,
dense_init_scale=FLAGS.dense_init_scale,
dense_dim=FLAGS.dense_dim,
tail_shrink_factor=FLAGS.tail_shrink_factor,
relative_pos_clamp_len=FLAGS.clamp_len or None)
def graph2text_model_fn(vocab_size):
"""Get graph2text transformer model."""
return transformer.Graph2TextTransformer(
vocab_size=vocab_size,
emb_dim=FLAGS.emb_dim,
num_layers=FLAGS.num_layers,
num_heads=FLAGS.num_heads,
dropout_prob=FLAGS.dropout,
dropout_attn_prob=FLAGS.dropout_attn,
self_att_init_scale=FLAGS.self_att_init_scale,
dense_init_scale=FLAGS.dense_init_scale,
dense_dim=FLAGS.dense_dim,
tail_shrink_factor=FLAGS.tail_shrink_factor,
relative_pos_clamp_len=FLAGS.clamp_len or None,
gnn_embed_dim=FLAGS.gnn_embed_dim,
gnn_num_layers=FLAGS.gnn_num_layers,
gnn_layer_norm=FLAGS.gnn_layer_norm)
def bow2text_model_fn(vocab_size):
"""Get the bow2text model."""
return transformer.Bow2TextTransformer(
vocab_size=vocab_size,
emb_dim=FLAGS.emb_dim,
num_layers=FLAGS.num_layers,
num_heads=FLAGS.num_heads,
dropout_prob=FLAGS.dropout,
dropout_attn_prob=FLAGS.dropout_attn,
self_att_init_scale=FLAGS.self_att_init_scale,
dense_init_scale=FLAGS.dense_init_scale,
dense_dim=FLAGS.dense_dim,
tail_shrink_factor=FLAGS.tail_shrink_factor,
relative_pos_clamp_len=FLAGS.clamp_len or None,
bow_embedding_dim=FLAGS.bow_embedding_dim,
bow_n_tokens=FLAGS.bow_n_tokens)
def build_loss_fn(vocab_size, cache_steps):
"""Build the appropriate loss function according to the configs."""
if FLAGS.model_type == 'text':
def loss_fn(data, is_training=True):
return text_model_fn(vocab_size=vocab_size).loss(
data['obs'], data['target'], data['mask'],
is_training=is_training,
should_reset=data['should_reset'],
cache_steps=cache_steps)
elif FLAGS.model_type == 'graph2text':
def loss_fn(data, max_graph_size, is_training=True):
return graph2text_model_fn(vocab_size=vocab_size).loss(
data['graphs'], max_graph_size, True,
data['obs'], data['target'], data['mask'],
is_training=is_training,
should_reset=data['should_reset'],
cache_steps=cache_steps)
elif FLAGS.model_type == 'bow2text':
def loss_fn(data, is_training=True):
return bow2text_model_fn(vocab_size=vocab_size).loss(
data['graphs'], data['obs'], data['target'], data['mask'],
is_training=is_training,
should_reset=data['should_reset'],
cache_steps=cache_steps)
else:
raise ValueError(f'Unknown model type "{FLAGS.model_type}".')
return loss_fn
def build_sampler(tokenizer, device=None):
"""Build the appropriate sampler according to the configs."""
if FLAGS.model_type == 'text':
model_fn = lambda prompts: text_model_fn(tokenizer.vocab_size)( # pylint: disable=g-long-lambda
prompts, is_training=False, cache_steps=FLAGS.sample_memory_size)
sampler_class = transformer_sampler.TransformerXLSampler
elif FLAGS.model_type == 'graph2text':
def model_fn(graphs, max_graph_size, prompts):
return graph2text_model_fn(tokenizer.vocab_size)(
graphs, max_graph_size, True, prompts, is_training=False,
cache_steps=FLAGS.sample_memory_size)
sampler_class = transformer_sampler.Graph2TextTransformerSampler
elif FLAGS.model_type == 'bow2text':
def model_fn(graphs, prompts):
return bow2text_model_fn(tokenizer.vocab_size)(
graphs, prompts, is_training=False,
cache_steps=FLAGS.sample_memory_size)
sampler_class = transformer_sampler.Bow2TextTransformerSampler
sampler = sampler_class(model_fn, FLAGS.sampling_temperature, device)
return sampler
def schedule(i, lr_schedule, init_lr, min_lr_ratio, max_steps):
if lr_schedule == 'cosine':
cosine_decay = 0.5 * (1 + jnp.cos(jnp.pi * i / max_steps))
decayed = (1 - min_lr_ratio) * cosine_decay + min_lr_ratio
return init_lr * decayed
else:
return jnp.where(
i > 350000, init_lr / 3**3,
jnp.where(i > 250000, init_lr / 3**2,
jnp.where(i > 150000, init_lr / 3, init_lr)))
def evaluate(eval_set, initial_state, updater, eval_batch_size=1,
preprocess_fn=None, max_eval_samples=-1,
print_progress_every=None):
"""Evaluate a model on given dataset."""
total_losses = []
total_counts = []
token_accuracy = []
seq_accuracy = []
state = initial_state
step = state['step']
for i, batch in enumerate(eval_set):
state, eval_out = updater.eval_return_state(state, preprocess_fn(batch))
total_losses.append(eval_out['total_loss'])
total_counts.append(eval_out['total_count'])
token_accuracy.append(
eval_out['token_accuracy'] * eval_out['total_count'])
seq_accuracy.append(eval_out['seq_accuracy'])
if print_progress_every and (i + 1) % print_progress_every == 0:
total_loss = float(jnp.array(total_losses).sum())
total_count = float(jnp.array(total_counts).sum())
avg_loss = total_loss / total_count
bpc = avg_loss * np.log2(np.e)
perplexity = np.exp(avg_loss)
logging.info(
'Evaluated %d batches, total tokens %d, average loss %g,'
' bpc %g, perplexity %g.',
i + 1, total_count, avg_loss, bpc, perplexity)
if 0 < max_eval_samples <= (i + 1) * eval_batch_size:
break
total_loss = jnp.array(total_losses).sum()
total_count = jnp.array(total_counts).sum()
avg_loss = total_loss / total_count
eval_out = dict(total_loss=float(total_loss),
total_count=float(total_count),
loss=float(avg_loss),
token_accuracy=float(
jnp.array(token_accuracy).sum() / total_count),
seq_accuracy=float(
jnp.array(seq_accuracy).sum() / len(seq_accuracy)),
step=float(step),
bits_per_token=float(avg_loss) * np.log2(np.e),
perplexity=np.exp(float(avg_loss)))
return eval_out, state
def extract_title(text, tokenizer):
r"""Extract the title in the text.
The wikitext articles is in the format of `\n = TITLE = \n \n...`. We extract
the title as the tokens from the start to when the `\n \n` first appears.
Args:
text: tokenized input text using `tokenizer`.
tokenizer: text tokenizer.
Returns:
title_end_idx: a numpy.array of shape (batch_size,), it indicates the index
in `text` that marks the end of the title.
"""
batch_size, text_length = text.shape
title_end_idx = np.ones(batch_size, dtype=np.int32)
newline_token = tokenizer.encode('\n')[0]
for b in range(batch_size):
prev_token = 1 # start tokens
for i in range(1, text_length): # skip start token
# when we first see '\n \n', that is the title
if prev_token == newline_token and text[b, i] == newline_token:
title_end_idx[b] = i
break
else:
prev_token = text[b, i]
return title_end_idx
def construct_prompts(text, batch_size, sample_length, tokenizer, prompt_title):
"""Construct prompts for text generation.
Args:
text: tokenized input text using `tokenizer`.
batch_size: the size of the batch.
sample_length: the length of the sample to be generated.
tokenizer: text tokenizer.
prompt_title: whether to return a prompt with the title of the `text`.
Returns:
prompts: a numpy.array of shape [batch_size, sample_length], in which -1
indicates tokens that need to be generated using the sampler.
"""
prompts = -np.ones((batch_size, sample_length), dtype=np.int32)
prompts[:, 0] = tokenizer.bos_token()
if prompt_title and text is not None:
title_end_idx = extract_title(text, tokenizer)
for i in range(batch_size):
prompts[i, 1:title_end_idx[i]+1] = text[i, 1:title_end_idx[i]+1]
return prompts
def generate_samples(params, tokenizer, sampler, model_type, prompts, graphs):
"""Generate a batch of samples using a sampler."""
if model_type == 'text':
samples = sampler.sample(params, prompts)
elif model_type == 'graph2text':
samples = sampler.sample(params, prompts, graphs, pad=True)
elif model_type == 'bow2text':
samples = sampler.sample(params, prompts, graphs)
else:
raise ValueError(f'Unknown model_type {model_type}')
return [tokenizer.decode(s) for s in samples], samples
def take_unique_graphs(data_iter, model_type):
"""Filter data such that it only returns batches with unique graphs."""
prev_graphs = None
for batch in data_iter:
graphs = batch.get('graphs', None)
# If there's no graph in batch, don't do any filtering
if graphs is None:
yield batch
else:
if prev_graphs is None:
prev_graphs = graphs
yield batch
else:
if model_type == 'graph2text':
not_same_graph = (prev_graphs.nodes.shape != graphs.nodes.shape or
not (prev_graphs.nodes == graphs.nodes).all())
else:
not_same_graph = (prev_graphs.shape != graphs.shape or
not (prev_graphs == graphs).all())
if not_same_graph:
prev_graphs = graphs
yield batch
def compute_map_sklearn(pred, gt):
"""Computes mAP using scikit-learn."""
assert len(gt.shape) == len(pred.shape) == 2, (
'gt should be a one-hot encoding with the same shape as pred')
ap = [
sklearn.metrics.average_precision_score(
gt[c, :], pred[c, :], average=None)
for c in range(gt.shape[0])
]
return sum(ap) / len(ap)
def compute_recall_at_k(pred, k=1):
"""Computes recall@1 score."""
num_articles = pred.shape[1]
return sklearn.metrics.top_k_accuracy_score(
np.arange(num_articles), pred, k=k)
def compute_text_graph_relevance(
eval_set, initial_state, updater, eval_batch_size=1, preprocess_fn=None,
print_progress_every=None):
"""Compute the text and graph relevance a model on given dataset."""
assert eval_batch_size == 1
num_articles = eval_set.num_articles
tokens_count = np.zeros((num_articles, num_articles))
log_probs = np.zeros((num_articles, num_articles)) # [graphs, texts]
state = initial_state
for i, batch in enumerate(eval_set):
state, eval_out = updater.eval_return_state(state, preprocess_fn(batch))
graph_id = batch['graph_id'][0]
seq_id = batch['seq_id'][0]
tokens_count[graph_id, seq_id] += eval_out['total_count']
log_probs[graph_id, seq_id] += eval_out['log_probs']
if print_progress_every is not None and (i + 1) % print_progress_every == 0:
logging.info('Evaluated %d samples', i + 1)
log_probs_per_token = log_probs / tokens_count
labels = np.eye(num_articles)
eval_out = dict(
log_probs=log_probs,
tokens_count=tokens_count,
log_probs_per_token=log_probs_per_token,
text2graph_recall_at_1=compute_recall_at_k(log_probs_per_token.T, k=1),
text2graph_recall_at_5=compute_recall_at_k(log_probs_per_token.T, k=5),
text2graph_map=compute_map_sklearn(log_probs_per_token.T, labels),
graph2text_recall_at_1=compute_recall_at_k(log_probs_per_token, k=1),
graph2text_recall_at_5=compute_recall_at_k(log_probs_per_token, k=5),
graph2text_map=compute_map_sklearn(log_probs_per_token, labels))
return eval_out, state
def _get_ngrams(segment, max_order):
"""Extracts all n-grams upto a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i+order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Originally from tensor2tensor/tensor2tensor/utils/bleu_hook.py
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
BLEU score and n-gram precisions.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
if random.random() < 0.01:
print('==========')
for k, v in overlap.items():
if len(k) >= 3:
print('%s : %d' % (str(k), v))
# print(matches_by_order)
# print(possible_matches_by_order)
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return bleu, precisions
| deepmind-research-master | wikigraphs/utils.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Train a transformer for language modeling on Wikitext-103."""
import concurrent
import functools
import os
import pickle
import time
from absl import app
from absl import flags
from absl import logging
import jax
import jraph
import numpy as np
import optax
from updaters import CheckpointingUpdater
from updaters import Updater
import utils
# Train
flags.DEFINE_integer('train_batch_size', 4, '(Per-Device) batch size for'
' training.')
flags.DEFINE_integer('train_timesteps', 150, 'Sequence length to learn on')
flags.DEFINE_integer('train_memory_size', 150, 'Memory size for transformer XL')
flags.DEFINE_bool('debug', False, 'Whether to turn on debugging mode')
flags.DEFINE_string('job_mode', 'train',
'One of `train`, `eval`, `sample`, `retrieve`.')
flags.DEFINE_integer('random_seed', 42, 'Random seed id.')
flags.DEFINE_integer('num_gpus', 8, 'Number of GPUs for training.')
# Eval
flags.DEFINE_integer('eval_batch_size', 1, 'Evaluation batch size')
flags.DEFINE_string('eval_subset', 'valid', 'Which subset to evaluate on,'
' one of `valid`, `test`.')
flags.DEFINE_integer('eval_every', 10, 'Evaluation frequency.')
flags.DEFINE_integer('eval_timesteps', 64, 'Sequence length to learn on')
flags.DEFINE_integer('eval_memory_size', 640, 'Memory size for transformer XL')
flags.DEFINE_integer('max_eval_samples', -1, 'Max number of eval samples. Set'
' as -1 to use the entire eval set.')
# Model
flags.DEFINE_integer('emb_dim', 410, 'model width')
flags.DEFINE_integer('num_heads', 10, 'Number of attention heads')
flags.DEFINE_integer('num_layers', 16, 'Number of transformer layers')
flags.DEFINE_integer('dense_dim', 2100, 'Size of dense hidden layer.')
flags.DEFINE_integer('tail_shrink_factor', 4,
'Low-frequency vocabulary shrinkage factor in adaptive'
' softmax.')
flags.DEFINE_string('emb_type', 'adaptive_softmax', 'Type of the word embedding'
' layer.')
flags.DEFINE_integer('clamp_len', 400, 'Clamp length for transformer XL.')
flags.DEFINE_float('dropout', 0.1, 'Dropout rate for the transformer layers.')
flags.DEFINE_float('dropout_attn', 0.0, 'Dropout rate for the attention'
' weights.')
flags.DEFINE_float('self_att_init_scale', 0.02,
'Self attention module initilization scale.')
flags.DEFINE_float('dense_init_scale', 0.02,
'Dense module initilization scale.')
# Graph neural net configs
flags.DEFINE_string('gnn_embed_type', 'adaptive', 'Token embedding type for the'
' graph.')
flags.DEFINE_integer('gnn_embed_dim', 128, 'Graph node embedding size.')
flags.DEFINE_integer('gnn_num_layers', 1, 'Number of layers in the GNN.')
flags.DEFINE_bool('gnn_layer_norm', True, 'Whether to use layer norm in GNN.')
# Bag-of-words to text configs
flags.DEFINE_integer('bow_embedding_dim', 256, 'Size of the bow embeddings.')
flags.DEFINE_integer('bow_n_tokens', 1, 'Number of tokens to use for the'
' bow2text model.')
# Sampling
flags.DEFINE_float('sampling_temperature', 0.8, 'Temperature used for'
' sampling. Sampling becomes more deterministic with a'
' lower temperature. Setting temperature to 1.0 samples'
' from the model distribution.')
flags.DEFINE_bool('prompt_title', False, 'Whether to prompt title when sample')
flags.DEFINE_integer('sample_length', 512, 'Length of samples.')
flags.DEFINE_integer('sample_memory_size', 640, 'Memory size for sampling.')
flags.DEFINE_integer('num_samples', 1000, 'Maximum number of samples to'
' generate.')
# Optimization
flags.DEFINE_float('init_lr', 0.00025, 'Initial learning rate.')
flags.DEFINE_float('min_lr_ratio', 0.0, 'Minimum learning rate as a ratio of'
' `init_lr`.')
flags.DEFINE_string('lr_schedule', 'cosine', 'One of `default`, `cosine`.')
flags.DEFINE_float('grad_clip', 0.25, 'Maximum gradient norm allowed for'
' clipping, set to a very large number to disable clipping.')
flags.DEFINE_integer('max_steps', 200_000, 'Number of training steps.')
flags.DEFINE_string('checkpoint_dir', '/tmp/graph2text',
'Directory to store checkpoints.')
# Data
flags.DEFINE_string('dataset', 'freebase2wikitext', 'Which dataset to train on,'
' one of "wikitext", "freebase2wikitext".')
flags.DEFINE_string('model_type', 'graph2text', 'One of "text", "graph2text",'
' "bow2text".')
flags.DEFINE_string('graph_data_version', 'max256', 'One of "max256", "max512",'
' "max1024".')
flags.DEFINE_integer('log_every', 50, 'Log every this many steps.')
flags.DEFINE_integer('ckpt_every', 1000, 'Checkpoint every this many steps.')
FLAGS = flags.FLAGS
def _preprocess(batch, num_devices=1):
return utils.preprocess(batch, FLAGS.model_type, num_devices)
def _train(updater, train_dataset, num_devices):
"""Train the transformer model."""
# Initialize parameters.
logging.info('Initializing parameters...')
rng = jax.random.PRNGKey(FLAGS.random_seed)
state = updater.init(
rng, _preprocess(train_dataset.return_faux_batch(), num_devices))
logging.info('Starting train loop...')
prev_time = time.time()
while True:
data = next(train_dataset)
state, metrics = updater.update(state, _preprocess(data, num_devices))
# We use JAX runahead to mask data preprocessing and JAX dispatch overheads.
# Using values from state/metrics too often will block the runahead and can
# cause these overheads to become more prominent.
step = np.array(metrics['step'])
if step % FLAGS.log_every == 0:
steps_per_sec = FLAGS.log_every / (time.time() - prev_time)
prev_time = time.time()
metrics.update({'steps_per_sec': steps_per_sec})
logging.info({k: float(v) for k, v in metrics.items()})
if step % FLAGS.ckpt_every == 0:
updater.save_checkpoint(state)
if step > FLAGS.max_steps:
break
def _eval(updater, eval_dataset):
"""Evaluate the transformer model."""
checkpoint_state = updater.load_checkpoint()
rng = jax.random.PRNGKey(FLAGS.random_seed)
state = updater.init_from_checkpoint(
rng, _preprocess(eval_dataset.return_faux_batch()), checkpoint_state)
eval_out, state = utils.evaluate(
eval_dataset, state, updater, FLAGS.eval_batch_size, _preprocess,
FLAGS.max_eval_samples, print_progress_every=20)
logging.info('Eval output: %s', eval_out)
def _retrieve(updater, eval_dataset):
"""Graph and text retrieval using the transformer model."""
checkpoint_state = updater.load_checkpoint()
rng = jax.random.PRNGKey(FLAGS.random_seed)
state = updater.init_from_checkpoint(
rng, _preprocess(eval_dataset.return_faux_batch()), checkpoint_state)
retrieval_out, _ = utils.compute_text_graph_relevance(
eval_dataset, state, updater, preprocess_fn=_preprocess,
print_progress_every=20)
logging.info('Retrieval output: %s', retrieval_out)
def _sample(eval_dataset, tokenizer, devices, batch_size=1):
"""Evaluate the graph2text transformer."""
checkpoint_dir = os.path.join(FLAGS.checkpoint_dir, 'checkpoint.pkl')
logging.info('Loading checkpoint from %s', checkpoint_dir)
with open(checkpoint_dir, 'rb') as f:
state = pickle.load(f)
if FLAGS.model_type == 'graph2text':
# process list of graphs into a batch
eval_dataset = map(lambda x: dict( # pylint: disable=g-long-lambda
obs=x['obs'],
target=x['target'],
should_reset=x['should_reset'],
mask=x['mask'],
graphs=jraph.batch(x['graphs']),
), eval_dataset)
eval_dataset = utils.take_unique_graphs(eval_dataset, FLAGS.model_type)
samplers = []
for device in devices:
sampler = utils.build_sampler(tokenizer, device=device)
samplers.append(sampler)
step = state['step']
params = state['params']
sample_logger = []
with concurrent.futures.ThreadPoolExecutor(
max_workers=len(samplers)) as executor:
futures = dict()
for sampler in samplers:
batch = next(eval_dataset)
prompts = utils.construct_prompts(
batch['obs'], batch_size, FLAGS.sample_length, tokenizer,
prompt_title=FLAGS.prompt_title)
if FLAGS.model_type in ['graph2text', 'bow2text']:
future = executor.submit(
utils.generate_samples, params, tokenizer, sampler,
model_type=FLAGS.model_type, prompts=prompts,
graphs=batch['graphs'])
futures[future] = (sampler, batch['graphs'], batch['obs'])
else:
future = executor.submit(
utils.generate_samples, params, tokenizer, sampler,
model_type=FLAGS.model_type, prompts=prompts, graphs=None)
futures[future] = (sampler, batch['obs'])
n_samples = 0
while n_samples < FLAGS.num_samples:
for future, future_items in list(futures.items()):
if not future.done():
continue
samples, tokens = future.result()
if FLAGS.model_type == 'graph2text':
sampler, graphs, text = future_items
graphs = jraph.unbatch(graphs)
elif FLAGS.model_type == 'bow2text':
sampler, graphs, text = future_items
else:
sampler, text = future_items
if FLAGS.model_type in ['graph2text', 'bow2text']:
for s, g, tk, txt in zip(samples, graphs, tokens, text):
# Only log a small fraction of the generated samples, if we are
# generating non-stop. Otherwise log every sample.
logging.info('[step %d]', step)
logging.info('graph=\n%r', g)
logging.info('sample=\n%s', s)
if FLAGS.model_type == 'graph2text':
sample_logger.append({
'step': step,
'sample': s,
'sample_tokens': tk,
'ground_truth_text': txt,
})
elif FLAGS.model_type == 'bow2text':
sample_logger.append({
'step': step,
'bow': g,
'sample': s,
'sample_tokens': tk,
'ground_truth_text': txt,
})
else:
for s, tk, txt in zip(samples, tokens, text):
# Only log a small fraction of the generated samples, if we are
# generating non-stop. Otherwise log every sample.
logging.info('[step %d]', step)
logging.info('sample=\n%s', s)
sample_logger.append({
'step': step,
'sample': s,
'sample_tokens': tk,
'ground_truth_text': txt,
})
n_samples += len(samples)
logging.info('Finished generating %d samples', n_samples)
del futures[future]
if n_samples < FLAGS.num_samples:
batch = next(eval_dataset)
prompts = utils.construct_prompts(
batch['obs'], batch_size, FLAGS.sample_length, tokenizer,
prompt_title=FLAGS.prompt_title)
if FLAGS.model_type in ['graph2text', 'bow2text']:
future = executor.submit(
utils.generate_samples, params, tokenizer, sampler,
model_type=FLAGS.model_type, prompts=prompts,
graphs=batch['graphs'])
futures[future] = (sampler, batch['graphs'], batch['obs'])
else:
future = executor.submit(
utils.generate_samples, params, tokenizer, sampler,
model_type=FLAGS.model_type, prompts=prompts, graphs=None)
futures[future] = (sampler, batch['obs'])
logging.info('Finished')
path = os.path.join(FLAGS.checkpoint_dir, 'samples.pkl')
with open(path, 'wb') as f:
pickle.dump(dict(samples=sample_logger), f)
logging.info('Samples saved to %s', path)
def main(_):
# Create the dataset.
tokenizer = utils.init_tokenizer(FLAGS.dataset)
graph_tokenizer = utils.init_graph_tokenizer()
dataset_class = utils.get_dataset_class(FLAGS.dataset, FLAGS.model_type)
has_graph = True if FLAGS.model_type == 'graph2text' else False
local_devices = jax.local_devices()
num_gpus = min(FLAGS.num_gpus, len(local_devices))
if FLAGS.job_mode == 'train':
train_dataset = dataset_class(
tokenizer=tokenizer,
graph_tokenizer=graph_tokenizer,
batch_size=FLAGS.train_batch_size,
subset='train',
timesteps=FLAGS.train_timesteps,
version=FLAGS.graph_data_version,
shuffle_data=True,
repeat=True,
debug=FLAGS.debug)
train_iter = iter(train_dataset)
loss_fn = utils.build_loss_fn(vocab_size=tokenizer.vocab_size,
cache_steps=FLAGS.train_memory_size)
optimizer = optax.chain(
optax.clip_by_global_norm(FLAGS.grad_clip),
optax.scale_by_adam(),
optax.scale_by_schedule(functools.partial(
utils.schedule,
lr_schedule=FLAGS.lr_schedule,
init_lr=FLAGS.init_lr,
min_lr_ratio=FLAGS.min_lr_ratio,
max_steps=FLAGS.max_steps)),
optax.scale(-1))
optimizer = optax.apply_if_finite(optimizer, max_consecutive_errors=5)
updater = Updater(loss_fn, optimizer,
devices=local_devices[:num_gpus],
has_graph=has_graph)
updater = CheckpointingUpdater(updater, FLAGS.checkpoint_dir)
_train(updater, train_iter, num_gpus)
elif FLAGS.job_mode == 'eval':
eval_dataset = dataset_class(
tokenizer=tokenizer,
graph_tokenizer=graph_tokenizer,
batch_size=FLAGS.eval_batch_size,
subset=FLAGS.eval_subset,
timesteps=FLAGS.eval_timesteps,
version=FLAGS.graph_data_version,
shuffle_data=False,
repeat=False,
debug=FLAGS.debug)
eval_iter = iter(eval_dataset)
loss_fn = utils.build_loss_fn(vocab_size=tokenizer.vocab_size,
cache_steps=FLAGS.eval_memory_size)
# only use one device for evaluation
devices = local_devices[:1]
updater = Updater(loss_fn, optimizer=None, devices=devices,
has_graph=has_graph)
updater = CheckpointingUpdater(updater, FLAGS.checkpoint_dir)
_eval(updater, eval_iter)
elif FLAGS.job_mode == 'sample':
eval_dataset = dataset_class(
tokenizer=tokenizer,
graph_tokenizer=graph_tokenizer,
batch_size=1,
subset=FLAGS.eval_subset,
timesteps=FLAGS.sample_length,
version=FLAGS.graph_data_version,
shuffle_data=False,
repeat=True,
debug=FLAGS.debug)
eval_iter = iter(eval_dataset)
_sample(eval_iter, tokenizer, local_devices[:num_gpus])
elif FLAGS.job_mode == 'retrieve':
eval_dataset = dataset_class(
tokenizer=tokenizer,
graph_tokenizer=graph_tokenizer,
batch_size=1,
subset=FLAGS.eval_subset,
timesteps=FLAGS.eval_timesteps,
version=FLAGS.graph_data_version,
shuffle_data=False,
repeat=False,
graph_retrieval_dataset=True,
debug=FLAGS.debug)
eval_iter = iter(eval_dataset)
loss_fn = utils.build_loss_fn(vocab_size=tokenizer.vocab_size,
cache_steps=FLAGS.eval_memory_size)
# only use one device for evaluation
devices = local_devices[:1]
updater = Updater(loss_fn, optimizer=None, devices=devices,
has_graph=has_graph)
updater = CheckpointingUpdater(updater, FLAGS.checkpoint_dir)
_retrieve(updater, eval_iter)
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | wikigraphs/main.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Script for building vocabulary files for datasets."""
import collections
import csv
import enum
import io
import os
from typing import List, Tuple
from absl import app
from absl import flags
from absl import logging
from wikigraphs.data import io_tools
from wikigraphs.data import paired_dataset
from wikigraphs.data import tokenizers
from wikigraphs.data import wikitext
class DatasetType(enum.Enum):
text = 1
graph = 2
wikitext = 3
FLAGS = flags.FLAGS
flags.DEFINE_string('data_dir', '', 'Path to the directory that contains the'
' unzipped wikitext-103 data.')
flags.DEFINE_string('vocab_file_path', '', 'Path to the output vocab file.')
flags.DEFINE_enum_class('data_type', DatasetType.wikitext, DatasetType,
'One of {`wikitext`, `graph`, `text`}.')
flags.DEFINE_integer('threshold', 1, 'Frequency threshold for a word to be'
' included in the vocabulary.')
flags.DEFINE_string('version', 'max256', 'Which version of paired data to use.')
def get_vocab(dataset: wikitext.RawDataset) -> List[Tuple[str, int]]:
"""Build vocabulary, return (word, count) tuples sorted by count."""
vocab = collections.defaultdict(int)
for pair in dataset:
for t in pair.text.split(' '):
if t:
vocab[t] += 1
return sorted(vocab.items(), key=lambda t: -t[1])
def write_vocab(vocab: List[Tuple[str, int]], output_path: str):
"""Write a vocab list to a file."""
output_dir = os.path.dirname(output_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(output_path, mode='wb') as f_:
with io.TextIOWrapper(f_, encoding='utf-8') as f:
w = csv.writer(f)
w.writerows(vocab)
def build_wikitext_vocab():
logging.info('Loading the dataset.')
dataset = wikitext.RawDataset(subset='train', data_dir=FLAGS.data_dir)
logging.info('Building the vocab.')
vocab = get_vocab(dataset)
logging.info('Finished, vocab size %d, total number of tokens %d',
len(vocab), sum([c for _, c in vocab]))
logging.info('Writing the vocab to %s', FLAGS.vocab_file_path)
write_vocab(vocab, FLAGS.vocab_file_path)
def build_graph_vocab():
"""Build vocabulary for graph data."""
logging.info('Loading the dataset.')
dataset = paired_dataset.ParsedDataset(
subset='train', data_dir=FLAGS.data_dir, version=FLAGS.version)
logging.info('Building graph vocab.')
vocab = collections.defaultdict(int)
for pair in dataset:
graph = pair.graph
for n in graph.nodes():
for t in tokenizers.GraphTokenizer.split_node(n):
if t:
vocab[t] += 1
for _, _, e in graph.edges():
for t in tokenizers.GraphTokenizer.split_edge(e):
if t:
vocab[t] += 1
vocab = sorted(vocab.items(), key=lambda t: -t[1])
vocab = [k for k, v in vocab if v >= FLAGS.threshold]
logging.info('Finished, vocab size %d.', len(vocab))
logging.info('Writing the vocab to %s.', FLAGS.vocab_file_path)
io_tools.write_txt_file(FLAGS.vocab_file_path, '\n'.join(vocab),
# Some unicode characters requires utf-16 to encode.
encoding='utf-16')
def build_text_vocab():
"""Build vocabulary for the text part of the graph-to-text data."""
logging.info('Loading the dataset.')
dataset = paired_dataset.ParsedDataset(
subset='train', data_dir=FLAGS.data_dir, version=FLAGS.version)
logging.info('Building text vocab.')
vocab = collections.defaultdict(int)
for pair in dataset:
for t in pair.text.split(' '):
if t:
vocab[t] += 1
vocab = sorted(vocab.items(), key=lambda t: -t[1])
logging.info('Finished, vocab size %d, total number of tokens %d.',
len(vocab), sum([v for _, v in vocab]))
vocab = [(k, v) for k, v in vocab if v >= FLAGS.threshold]
logging.info('After filtering, vocab size %d.', len(vocab))
logging.info('Writing the vocab to %s.', FLAGS.vocab_file_path)
write_vocab(vocab, FLAGS.vocab_file_path)
def main(_):
if FLAGS.data_type == DatasetType.wikitext:
build_wikitext_vocab()
elif FLAGS.data_type == DatasetType.text:
build_text_vocab()
elif FLAGS.data_type == DatasetType.graph:
build_graph_vocab()
else:
raise ValueError(f'Unknown data type {FLAGS.data_type}.')
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | wikigraphs/scripts/build_vocab.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
r"""Tool to visualize graphs.
You need to have the command line tool `dot` installed locally, for example by
`sudo apt-get install graphviz`.
Example usage:
python visualize_graph.py \
--logtostderr --graph_ids=0:48 --truncate_limit=500 --layout=fdp
"""
import html
import os
import textwrap
from absl import app
from absl import flags
from absl import logging
from wikigraphs.data import io_tools
from wikigraphs.data import paired_dataset as pd
FLAGS = flags.FLAGS
flags.DEFINE_string('subset', 'valid', 'Which subset to choose graphs from.')
flags.DEFINE_string('graph_ids', '', 'A comma-separated string of graph IDs'
' (0-based), for example `1,2,3`. Or alternatively a'
' range, e.g. `0:10` which is equivalent to'
' `0,1,2,3,...,9`.')
flags.DEFINE_string('version', 'max256', 'Which version of data to load.')
flags.DEFINE_string('data_dir', '', 'Path to a directory that contains the raw'
' paired data, if provided.')
flags.DEFINE_string('output_dir', '/tmp/graph_vis', 'Output directory to save'
' the visualized graphs.')
flags.DEFINE_integer('truncate_limit', -1, 'Maximum length for graph nodes in'
' visualization.')
flags.DEFINE_string('layout', 'fdp', 'Which one of the dot layout to use.')
def truncate(s: str) -> str:
if FLAGS.truncate_limit > 0 and len(s) > FLAGS.truncate_limit:
s = s[:FLAGS.truncate_limit] + '...'
return s
def format_label(s: str, width: int = 40) -> str:
"""Format a node / edge label."""
s = io_tools.normalize_freebase_string(s)
s = truncate(s)
lines = s.split('\\n')
output_lines = []
for line in lines:
line = html.escape(line)
if width > 0:
output_lines += textwrap.wrap(line, width)
else:
output_lines.append(line)
return '<' + '<br/>'.join(output_lines) + '>'
def graph_to_dot(graph_text_pair: io_tools.GraphTextPair) -> str:
"""Convert a graph to a dot file."""
dot = ['digraph {', 'node [shape=rect];']
graph = pd.Graph.from_edges(graph_text_pair.edges)
center_node_id = graph.node2id(graph_text_pair.center_node)
for i, n in enumerate(graph.nodes()):
color = '#f5dc98' if i == center_node_id else (
'#b0ffad' if not(n[0] == '"' and n[-1] == '"') else '#ffffff')
label = format_label(n)
dot.append(f'{i} [ label = {label}, fillcolor="{color}", style="filled"];')
for i, j, e in graph.edges():
dot.append(f'{i} -> {j} [ label = {format_label(e, width=0)} ];')
dot.append('}')
return '\n'.join(dot)
def visualize_graph(graph_text_pair: io_tools.GraphTextPair,
graph_id: int,
output_dir: str):
"""Visualize a graph and save the visualization to the specified directory."""
dot = graph_to_dot(graph_text_pair)
output_file = os.path.join(output_dir, f'{graph_id}.dot')
logging.info('Writing output to %s', output_file)
with open(output_file, 'w') as f:
f.write(dot)
pdf_output = os.path.join(output_dir, f'{graph_id}.pdf')
os.system(f'dot -K{FLAGS.layout} -Tpdf -o {pdf_output} {output_file}')
def main(_):
logging.info('Loading the %s set of data.', FLAGS.subset)
pairs = list(pd.RawDataset(subset=FLAGS.subset,
data_dir=FLAGS.data_dir or None,
shuffle_data=False,
version=FLAGS.version))
logging.info('Loaded %d graph-text pairs.')
if ':' in FLAGS.graph_ids:
start, end = [int(i) for i in FLAGS.graph_ids.split(':')]
graph_ids = list(range(start, end))
else:
graph_ids = [int(i) for i in FLAGS.graph_ids.split(',')]
logging.info('Visualizing graphs with ID %r', graph_ids)
if not os.path.exists(FLAGS.output_dir):
os.makedirs(FLAGS.output_dir)
for gid in graph_ids:
visualize_graph(pairs[gid], gid, FLAGS.output_dir)
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | wikigraphs/scripts/visualize_graph.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Preprocess freebase data and pair with wikitext."""
import os
from absl import app
from absl import flags
from absl import logging
from wikigraphs.data import io_tools
from wikigraphs.data import wikitext
FLAGS = flags.FLAGS
flags.DEFINE_string('freebase_dir', '', 'Directory that containns Freebase'
' graphs.')
flags.DEFINE_string('output_dir', '', 'Path to output directory to store the'
' paired dataset.')
def pair_graphs_with_wikitext(subset: str, graph_dir: str, output_dir: str):
"""Pair graphs with wikitext articles, and write to output directory."""
logging.info('Pairing graphs from the %s set from %s with wikitext.',
subset, graph_dir)
graphs = list(io_tools.graphs_from_file(
os.path.join(graph_dir, f'{subset}.gz')))
title2graph = {
io_tools.normalize_freebase_string(g.title).replace(' ', ''): g
for g in graphs}
n_graphs = len(graphs)
# Use raw version of the wikitext data as the tokenized version has <unk> in
# titles which is bad for matching. We will handle the <unk>s through the
# tokenizer to make sure our data are equivalent to that of the tokenized
# version of wikitext-103.
wikitext_articles = list(wikitext.RawDataset(subset=subset, version='raw'))
n_wiki = len(wikitext_articles)
logging.info('Loaded %d graphs and %d wikitext articles in total.',
n_graphs, n_wiki)
# Keep track of the article titles in the dataset. Unfortunately wikitext-103
# has about 1% of duplicated articles, we want to take care of that.
retrieved_titles = set()
pairs = []
n_duplicates = 0
for a in wikitext_articles:
title = wikitext.normalize_title(a.title).replace(' ', '')
g = title2graph.get(title, None)
if g is not None:
if title not in retrieved_titles:
retrieved_titles.add(title)
pairs.append(io_tools.GraphTextPair(
center_node=g.center,
title=g.title,
edges=g.edges,
text=a.text))
else:
n_duplicates += 1
n_pairs = len(pairs)
logging.info('Matched %d/%d = %.1f%% of wikitext articles,'
' and %d/%d = %.1f%% of graphs.',
n_pairs, n_wiki, float(n_pairs) / n_wiki * 100,
n_pairs, n_graphs, float(n_pairs) / n_graphs * 100)
logging.info('Detected %d/%d = %.1f%% of duplicated wikitext articles.',
n_duplicates, n_wiki, float(n_duplicates) / n_wiki * 100)
io_tools.write_pairs_to_gzip_txt_file(
os.path.join(output_dir, f'{subset}.gz'), pairs)
def main(_):
for subset in ['train', 'valid', 'test']:
pair_graphs_with_wikitext(subset, FLAGS.freebase_dir, FLAGS.output_dir)
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | wikigraphs/scripts/freebase_preprocess.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Compute the bleu score on generated text and the ground truth."""
import math
import os
import pickle
from absl import app
from absl import flags
from absl import logging
import numpy as np
import utils
flags.DEFINE_string('checkpoint_dir', '/tmp/transformerXL',
'Checkpoint directory to load saved samples.')
flags.DEFINE_string('dataset', 'freebase2wikitext', 'Which dataset to the model'
' is trained on, one of "wikitext", "freebase2wikitext".')
FLAGS = flags.FLAGS
def group_samples(samples, tokenizer):
"""Groups generated and ground truth texts."""
groups = {}
for i, row in enumerate(samples):
gt = tokenizer.decode(row['ground_truth_text'])
sample = tokenizer.decode(row['sample_tokens'])
if gt not in groups:
groups[gt] = (gt.split(), [sample.split()])
else:
groups[gt][-1].append(sample.split())
if (i + 1) % 100 == 0:
logging.info('Processed %d samples', i + 1)
return groups
def eval_samples(raw_samples, tokenizer):
"""Evaluates generated samples."""
gt_refs = []
samples = []
groups = group_samples(raw_samples, tokenizer)
groups = list(groups.values())
avg_group_size = np.mean([len(g[-1]) for g in groups])
logging.info('Average samples per example: %.2f', avg_group_size)
avg_group_size = int(math.ceil(avg_group_size))
for i, (gt, s) in enumerate(groups):
gt_refs.append(gt)
idx = i % len(groups)
samples.append(groups[idx][-1])
gt_bleu, gt_n_grams = utils.compute_bleu(samples, gt_refs)
logging.info('Processed %d samples in total.', sum([len(s) for s in samples]))
flat_samples = []
for s in samples:
flat_samples.extend(s)
logging.info('Average sample len: %.2f',
np.mean([len(s) for s in flat_samples]))
logging.info('Average ground-truth len: %.2f',
np.mean([len(gt) for gt in gt_refs]))
logging.info('Ground-truth BLEU: %6.2f, n-gram precision: (%s)',
gt_bleu * 100,
', '.join(['%6.2f%%' % (s * 100) for s in gt_n_grams]))
def main(_):
tokenizer = utils.init_tokenizer(FLAGS.dataset)
checkpoint_dir = os.path.join(FLAGS.checkpoint_dir, 'samples.pkl')
logging.info('Loading samples from %s', checkpoint_dir)
with open(checkpoint_dir, 'rb') as f:
samples = pickle.load(f)['samples']
eval_samples(samples, tokenizer)
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | wikigraphs/scripts/compute_blue_score.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Transformer blocks."""
import math
from typing import Callable, Optional
import haiku as hk
from haiku import initializers as init
import jax
import jax.numpy as jnp
from wikigraphs.model.embedding import RelativePositionEmbedding
def conv1d(x, num_units, init_scale=0.02, with_bias=True):
return hk.Conv1D(
output_channels=num_units, kernel_shape=1, with_bias=with_bias,
w_init=init.RandomNormal(stddev=init_scale))(x)
def layer_norm(x):
return hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(x)
class FeedForwardBlock(hk.Module):
"""Feed forward block."""
def __init__(self,
dense_dim: int = 2100,
dropout_prob: float = 0.1,
init_scale: float = 1.,
name: Optional[str] = None):
"""Initializes a FeedForwardBlock.
Args:
dense_dim: feature size of the feedforward block.
dropout_prob: dropout probability.
init_scale: the initialization scale of the VarianceScaling used for the
feedforward layer.
name: Optional name for this Haiku module.
"""
super(FeedForwardBlock, self).__init__(name=name)
self._dense_dim = dense_dim
self._dropout_prob = dropout_prob
self._init_scale = init_scale
def __call__(self, x: jnp.ndarray) -> jnp.ndarray:
hiddens = x.shape[-1]
x = conv1d(x, num_units=self._dense_dim, init_scale=self._init_scale)
x = jax.nn.relu(x)
x = hk.dropout(hk.next_rng_key(), self._dropout_prob, x)
x = conv1d(x, num_units=hiddens, init_scale=self._init_scale)
return hk.dropout(hk.next_rng_key(), self._dropout_prob, x)
def get_reset_attention_mask(should_reset: jnp.ndarray) -> jnp.ndarray:
"""Maps a reset token vector into an attention mask that consists of blocks.
A sequence of should reset tokens such as:
[0, 1, 0, 1, 0, 0]
transforms into an attention mask such as:
[[1, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1]]
Args:
should_reset: Reset tokens with shape [batch, timesteps].
Returns:
attention_mask: Attention mask with shape [batch, timesteps, timesteps].
"""
should_reset = jnp.cumsum(should_reset, axis=-1)
attention_mask = should_reset[:, :, None] == should_reset[:, None, :]
return attention_mask.astype(jnp.float32)
def attend(q: jnp.ndarray,
k: jnp.ndarray,
v: jnp.ndarray,
mask: Optional[jnp.ndarray] = None,
attend_fn:
Optional[Callable[[jnp.ndarray, jnp.ndarray], jnp.ndarray]] = None,
dropout_prob: float = 0.0,
extra_k: Optional[jnp.ndarray] = None,
extra_v: Optional[jnp.ndarray] = None,
extra_mask: Optional[jnp.ndarray] = None) -> jnp.ndarray:
"""Computes multi-head attention using the given query, key and value.
Args:
q: Query with shape [batch, q_timesteps, num_heads, head_dim].
k: Key with shape [batch, timesteps, num_heads, head_dim].
v: Value with shape [batch, timesteps, num_heads, head_dim].
mask: Attention mask to apply [batch, 1, q_timesteps, timesteps].
attend_fn: An optionally defined attend function. The default attend_fn is
is jnp.einsum('bthd,bThd->bhtT', q, k).
dropout_prob: dropout probability on the attention weights.
extra_k: Extra keys to attend to, if provided. Note the extra keys and
values do not apply the specified attention_fn, but instead use the
default dot-product attention. [batch, timesteps_extra, num_heads,
head_dim].
extra_v: Extra values to attend to, if provided. [batch, timesteps_extra,
num_heads, head_dim].
extra_mask: Extra attention mask to apply on the extra inputs [batch, 1,
q_timesteps, timesteps_extra].
Returns:
Output of the attention with shape [batch, timesteps, hiddens]
"""
infinity_proxy = 1e9
batch, q_time, num_heads, head_dim = q.shape
hiddens = num_heads * head_dim
_, kv_time, _, _ = k.shape
expected_kv_shape = (batch, kv_time, num_heads, head_dim)
if k.shape != expected_kv_shape:
raise ValueError(
f'Expected key shape {expected_kv_shape} but got shape {k.shape}')
if v.shape != expected_kv_shape:
raise ValueError(
f'Expected value shape {expected_kv_shape} but got shape {v.shape}')
if attend_fn is not None:
attention = attend_fn(q, k)
else:
attention = jnp.einsum('bthd,bThd->bhtT', q, k)
if mask is not None:
attention = attention * mask - infinity_proxy * (1 - mask)
if extra_k is not None and extra_v is not None:
extra_time = extra_k.shape[1]
expected_extra_shape = (batch, extra_time, num_heads, head_dim)
if extra_k.shape != expected_extra_shape:
raise ValueError(
f'Expected extra key shape {expected_extra_shape} but got'
f' {extra_k.shape}')
if extra_v.shape != expected_extra_shape:
raise ValueError(
f'Expected extra value shape {expected_extra_shape} but got'
f' {extra_v.shape}')
# [B, H, t, T']
extra_attention = jnp.einsum('bthd,bThd->bhtT', q, extra_k)
if extra_mask is not None:
extra_attention = extra_attention * extra_mask - infinity_proxy * (
1 - extra_mask)
# [B, H, t, T+T']
attention = jnp.concatenate([attention, extra_attention], axis=-1)
# [B, T+T', H, D]
v = jnp.concatenate([v, extra_v], axis=1)
scale = 1. / math.sqrt(head_dim)
attention *= scale
normalized = jax.nn.softmax(attention)
if dropout_prob > 0:
normalized = hk.dropout(hk.next_rng_key(), dropout_prob, normalized)
summed = jnp.einsum('bhtT,bThd->bthd', normalized, v)
return jnp.reshape(summed, [batch, q_time, hiddens])
class Attention(hk.Module):
"""Attention with memory (https://arxiv.org/abs/1901.02860).
This implementation leverages the `state` in Haiku, in which the inputs are
stored as `states`. At each step, these states in memory are updated with a
rolling window.
"""
def __init__(self,
r_w_bias: Optional[jnp.ndarray] = None,
r_r_bias: Optional[jnp.ndarray] = None,
num_heads: int = 8,
init_scale: float = 1.0,
with_final_bias: bool = False,
final_init_scale_multiplier: float = 1.,
relative_pos_clamp_len: Optional[int] = None,
dropout_prob: float = 0.0,
name: Optional[str] = None):
"""Initializes a Attention module.
Args:
r_w_bias: global content bias.
r_r_bias: global positional bias.
num_heads: number of attention heads.
init_scale: the initialization scale of the VarianceScaling used for the
linear layer.
with_final_bias: whether to let final layer have biases.
final_init_scale_multiplier: how much to scale the initialization scale of
the output layer.
relative_pos_clamp_len: clamp length of the relative position embeddings.
dropout_prob: dropout probability.
name: Optional name for this Haiku module.
"""
super(Attention, self).__init__(name=name)
self._r_w_bias = r_w_bias
self._r_r_bias = r_r_bias
self._num_heads = num_heads
self._init_scale = init_scale
self._with_final_bias = with_final_bias
self._final_init_scale = final_init_scale_multiplier * init_scale
self._relative_pos_clamp_len = relative_pos_clamp_len
self._dropout_prob = dropout_prob
def _update_cache(self,
key: jnp.ndarray,
value: jnp.ndarray,
cache_steps: Optional[int] = None,
axis: int = 1) -> jnp.ndarray:
"""Update the cache stored in hk.state."""
cache_shape = list(value.shape)
value_steps = cache_shape[axis]
if cache_steps is not None:
cache_shape[axis] += cache_steps
cache = hk.get_state(
key, shape=cache_shape, dtype=value.dtype, init=jnp.zeros)
# Overwrite at index 0, then rotate timesteps left so what was just
# inserted is first.
value = jax.lax.dynamic_update_slice(
cache, value, jnp.zeros(len(cache_shape), dtype=jnp.int32))
value = jnp.roll(value, -value_steps, axis)
hk.set_state(key, value)
return value
def _update_memory(self,
mem: jnp.ndarray,
mask: jnp.ndarray,
input_length: int,
cache_steps: int,
should_reset: jnp.ndarray) -> jnp.ndarray:
"""Logic for using and updating cached activations."""
batch_size = mem.shape[0]
if cache_steps > 0:
# Tells us how much of the cache should be used.
cache_progress_idx = hk.get_state(
'cache_progress_idx', [batch_size], dtype=jnp.int32, init=jnp.zeros)
hk.set_state('cache_progress_idx', cache_progress_idx + input_length)
mem = self._update_cache('mem', mem, cache_steps=cache_steps)
if mask is None:
mask = jnp.ones((batch_size, 1, input_length, input_length))
cache_mask = (jnp.arange(cache_steps - 1, -1, -1)[None, None, None, :]
< cache_progress_idx[:, None, None, None])
cache_mask = jnp.broadcast_to(
cache_mask, (batch_size, 1, input_length, cache_steps))
mask = jnp.concatenate([cache_mask, mask], axis=-1)
if should_reset is not None:
if cache_steps > 0:
should_reset = self._update_cache('should_reset', should_reset,
cache_steps=cache_steps)
reset_mask = get_reset_attention_mask(should_reset)[:, None, :, :]
mask *= reset_mask[:, :, cache_steps:, :]
return mem, mask
def __call__(self,
x: jnp.ndarray,
mask: Optional[jnp.ndarray] = None,
should_reset: Optional[jnp.ndarray] = None,
cache_steps: int = 0,
extra: Optional[jnp.ndarray] = None,
extra_mask: Optional[jnp.ndarray] = None) -> jnp.ndarray:
"""Compute the multi-head attention.
Args:
x: input [batch, x_timesteps, in_dim].
mask: attention mask [batch, 1, x_timesteps, y_timesteps].
should_reset: reset marker [batch, timesteps].
cache_steps: number of timesteps in the cache.
extra: if provided should be extra key-value input
[batch, extra_timesteps, in_dim'].
extra_mask: if provided should be the mask for extra key-value input,
[batch, extra_timesteps].
Returns:
output: attention output [batch, x_timesteps, in_dim].
"""
hiddens_in = x.shape[-1]
steps = x.shape[1]
qkv_hiddens = hiddens_in
y, mask = self._update_memory(x, mask, steps, cache_steps, should_reset)
q = conv1d(x, qkv_hiddens, init_scale=self._init_scale, with_bias=False)
k = conv1d(y, qkv_hiddens, init_scale=self._init_scale, with_bias=False)
v = conv1d(y, qkv_hiddens, init_scale=self._init_scale, with_bias=False)
batch, q_time, _ = q.shape
_, kv_time, _ = k.shape
head_dim = qkv_hiddens // self._num_heads
assert qkv_hiddens % self._num_heads == 0, 'Head dim should be an integer.'
q = jnp.reshape(q, [batch, q_time, self._num_heads, head_dim])
k = jnp.reshape(k, [batch, kv_time, self._num_heads, head_dim])
v = jnp.reshape(v, [batch, kv_time, self._num_heads, head_dim])
attend_fn = RelativePositionEmbedding(
dim=qkv_hiddens, dropout_rate=self._dropout_prob,
r_w_bias=self._r_w_bias, r_r_bias=self._r_r_bias,
init_scale=self._init_scale, clamp_len=self._relative_pos_clamp_len)
if extra is not None:
extra_k = conv1d(extra, qkv_hiddens, init_scale=self._init_scale,
with_bias=False)
extra_v = conv1d(extra, qkv_hiddens, init_scale=self._init_scale,
with_bias=False)
extra_time = extra.shape[1]
extra_k = jnp.reshape(
extra_k, [batch, extra_time, self._num_heads, head_dim])
extra_v = jnp.reshape(
extra_v, [batch, extra_time, self._num_heads, head_dim])
if extra_mask is not None:
extra_mask = extra_mask[:, None, None, :]
attn_vec = attend(q, k, v, mask=mask, attend_fn=attend_fn,
dropout_prob=self._dropout_prob,
extra_k=extra_k, extra_v=extra_v, extra_mask=extra_mask)
else:
attn_vec = attend(q, k, v, mask=mask, attend_fn=attend_fn,
dropout_prob=self._dropout_prob)
attn_out = conv1d(attn_vec, hiddens_in, with_bias=self._with_final_bias,
init_scale=self._final_init_scale)
return hk.dropout(hk.next_rng_key(), self._dropout_prob, attn_out)
class SelfAttentionBlock(hk.Module):
"""Self attention block."""
def __init__(self,
r_w_bias: Optional[jnp.ndarray] = None,
r_r_bias: Optional[jnp.ndarray] = None,
causal: bool = False,
num_heads: int = 8,
dropout_prob: float = 0.1,
dropout_attn_prob: float = 0.0,
init_scale: float = 1.0,
relative_pos_clamp_len: Optional[int] = None,
name: Optional[str] = None):
"""Initializes a SelfAttentionBlock.
Args:
r_w_bias: global content bias.
r_r_bias: global positional bias.
causal: whether to apply a causal mask to the input.
num_heads: number of attention heads.
dropout_prob: dropout probability.
dropout_attn_prob: dropout probability of the attention module.
init_scale: the initialization scale of the VarianceScaling used for the
linear layer.
relative_pos_clamp_len: clamp length of the relative position embeddings.
name: Optional name for this Haiku module.
"""
super(SelfAttentionBlock, self).__init__(name=name)
self._r_w_bias = r_w_bias
self._r_r_bias = r_r_bias
self._causal = causal
self._num_heads = num_heads
self._dropout_prob = dropout_prob
self._dropout_attn_prob = dropout_attn_prob
self._init_scale = init_scale
self._relative_pos_clamp_len = relative_pos_clamp_len
def __call__(self,
x: jnp.ndarray,
mask: Optional[jnp.ndarray] = None,
should_reset: Optional[jnp.ndarray] = None,
cache_steps: int = 0,
extra: Optional[jnp.ndarray] = None,
extra_mask: Optional[jnp.ndarray] = None) -> jnp.ndarray:
"""Computes the outputs of the self attention block.
Args:
x: query input [batch, x_timesteps, in_dim].
mask: attention mask [batch, 1, 1, x_timesteps].
should_reset: reset marker [batch, timesteps].
cache_steps: number of timesteps in the cache.
extra: if provided should be extra key-value input
[batch, extra_timesteps, in_dim'].
extra_mask: if provided should be the mask for extra key-value input,
[batch, extra_timesteps].
Returns:
output: block output [batch, x_timesteps, in_dim].
"""
if self._causal:
timesteps = x.shape[1]
batch_size = x.shape[0]
t = jnp.arange(timesteps, dtype=jnp.int32)
causal_mask = (t[:, None] >= t[None, :])[None, None, :, :]
causal_mask = causal_mask.astype(x.dtype)
if mask is None:
mask = jnp.broadcast_to(
causal_mask, (batch_size, 1, timesteps, timesteps))
else:
mask *= causal_mask
x = Attention(
self._r_w_bias,
self._r_r_bias,
num_heads=self._num_heads,
init_scale=self._init_scale,
relative_pos_clamp_len=self._relative_pos_clamp_len,
dropout_prob=self._dropout_attn_prob)(
x, mask=mask, should_reset=should_reset,
cache_steps=cache_steps, extra=extra, extra_mask=extra_mask)
else:
x = Attention(
self._r_w_bias,
self._r_r_bias,
num_heads=self._num_heads,
init_scale=self._init_scale,
dropout_prob=self._dropout_attn_prob)(
x, mask=mask, extra=extra, extra_mask=extra_mask)
return hk.dropout(hk.next_rng_key(), self._dropout_prob, x)
class GPT2Block(hk.Module):
"""GPT-2 style transformer block with memory."""
def __init__(self,
r_w_bias: Optional[jnp.ndarray] = None,
r_r_bias: Optional[jnp.ndarray] = None,
causal: bool = True,
dense_dim: int = 2100,
dropout_prob: float = 0.1,
dropout_attn_prob: float = 0.0,
num_heads: int = 8,
self_att_init_scale: float = 0.02,
dense_init_scale: float = 0.02,
relative_pos_clamp_len: Optional[int] = None,
name: Optional[str] = None):
"""Initializes a GPT2Block.
Args:
r_w_bias: global content bias.
r_r_bias: global positional bias.
causal: whether to apply a causal mask to the input.
dense_dim: feature size of the feedforward block.
dropout_prob: dropout probability.
dropout_attn_prob: dropout probability of the attention module.
num_heads: number of attention heads.
self_att_init_scale: the initialization scale of the VarianceScaling
used for the linear layer in the attention module.
dense_init_scale: the initialization scale of the VarianceScaling
used for the linear layer in the feedforward module.
relative_pos_clamp_len: clamp length of the relative position embeddings.
name: Optional name for this Haiku module.
"""
super(GPT2Block, self).__init__(name=name)
self._r_w_bias = r_w_bias
self._r_r_bias = r_r_bias
self._causal = causal
self._dense_dim = dense_dim
self._dropout_prob = dropout_prob
self._dropout_attn_prob = dropout_attn_prob
self._num_heads = num_heads
self._self_att_init_scale = self_att_init_scale
self._dense_init_scale = dense_init_scale
self._relative_pos_clamp_len = relative_pos_clamp_len
def __call__(self,
x: jnp.ndarray,
mask: Optional[jnp.ndarray] = None,
is_training: bool = True,
should_reset: Optional[jnp.ndarray] = None,
cache_steps: int = 0,
extra: Optional[jnp.ndarray] = None,
extra_mask: Optional[jnp.ndarray] = None) -> jnp.ndarray:
"""Computes the outputs of the GPT-2 block.
Args:
x: query input [batch, x_timesteps, in_dim].
mask: attention mask [batch, 1, 1, x_timesteps].
is_training: whether the current stage is training or not.
should_reset: reset marker [batch, timesteps].
cache_steps: number of timesteps in the cache.
extra: if provided should be extra key-value input
[batch, extra_timesteps, in_dim'].
extra_mask: if provided should be the mask for extra key-value input,
[batch, extra_timesteps].
Returns:
output: block output [batch, x_timesteps, in_dim].
"""
dropout_prob = self._dropout_prob if is_training else 0.0
dropout_attn_prob = self._dropout_attn_prob if is_training else 0.0
x = layer_norm(x + SelfAttentionBlock(
self._r_w_bias,
self._r_r_bias,
causal=self._causal,
num_heads=self._num_heads,
dropout_prob=dropout_prob,
dropout_attn_prob=dropout_attn_prob,
init_scale=self._self_att_init_scale,
relative_pos_clamp_len=self._relative_pos_clamp_len)(
x, mask=mask, should_reset=should_reset,
cache_steps=cache_steps, extra=extra, extra_mask=extra_mask))
x = layer_norm(x + FeedForwardBlock(
dense_dim=self._dense_dim,
dropout_prob=dropout_prob,
init_scale=self._dense_init_scale)(x))
return x
| deepmind-research-master | wikigraphs/wikigraphs/model/transformer_block.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Tests for wikigraphs.model.transformer."""
from absl import logging
from absl.testing import absltest
import haiku as hk
import jax
import jax.numpy as jnp
import jraph
import numpy as np
import optax
from wikigraphs.model import embedding
from wikigraphs.model import transformer as models
def tree_size(nest):
return sum(x.size for x in jax.tree_util.tree_leaves(nest))
class TransformerXlTest(absltest.TestCase):
def test_transformer_param_count(self):
seqs = np.array([[1, 2, 3, 0, 0],
[3, 3, 5, 1, 2]], dtype=np.int32)
x = seqs[:, :-1]
y = seqs[:, 1:]
vocab_size = 267_735
def forward(inputs, labels):
input_mask = (labels != 0).astype(jnp.float32)
model = models.TransformerXL(
vocab_size=vocab_size,
emb_dim=210,
num_layers=2,
num_heads=10,
dropout_prob=0.0,
dropout_attn_prob=0.0,
self_att_init_scale=0.02,
dense_init_scale=0.02,
dense_dim=2100,
cutoffs=(20000, 40000, 200000), # WikiText-103
relative_pos_clamp_len=None,
)
return model.loss(inputs, labels, mask=input_mask, cache_steps=2)
init_fn, apply_fn = hk.transform_with_state(forward)
key = hk.PRNGSequence(8)
params, state = init_fn(next(key), x, y)
out, _ = apply_fn(params, state, next(key), x, y)
loss, metrics = out
logging.info('loss: %g', loss)
logging.info('metrics: %r', metrics)
param_count = tree_size(params)
self.assertEqual(param_count, 58_704_438)
def test_transformer_with_extra_runs(self):
extra = np.array([[1, 1, 0, 0],
[2, 2, 2, 2],
[3, 3, 3, 0]], dtype=np.int32)
seqs = np.array([[1, 2, 3, 0, 0],
[2, 4, 5, 6, 0],
[3, 3, 5, 1, 2]], dtype=np.int32)
x = seqs[:, :-1]
y = seqs[:, 1:]
vocab_size = seqs.max() + 1
extra_vocab_size = extra.max() + 1
def forward(inputs, labels, extra):
input_mask = (labels != 0).astype(jnp.float32)
extra_mask = (extra != 0).astype(jnp.float32)
extra = hk.Embed(vocab_size=extra_vocab_size, embed_dim=16)(extra)
model = models.TransformerXL(
vocab_size=vocab_size,
emb_dim=16,
num_layers=2,
num_heads=4,
cutoffs=[],
)
return model.loss(inputs, labels, mask=input_mask,
extra=extra, extra_mask=extra_mask)
init_fn, apply_fn = hk.transform_with_state(forward)
key = hk.PRNGSequence(8)
params, state = init_fn(next(key), x, y, extra)
out, _ = apply_fn(params, state, next(key), x, y, extra)
loss, metrics = out
logging.info('loss: %g', loss)
logging.info('metrics: %r', metrics)
def test_graph_embedding_model_runs(self):
graph = jraph.GraphsTuple(
nodes=np.array([[0, 1, 1],
[1, 2, 0],
[0, 3, 0],
[0, 4, 4]], dtype=np.float32),
edges=np.array([[1, 1],
[2, 2],
[3, 3]], dtype=np.float32),
senders=np.array([0, 1, 2], dtype=np.int32),
receivers=np.array([1, 2, 3], dtype=np.int32),
n_node=np.array([4], dtype=np.int32),
n_edge=np.array([3], dtype=np.int32),
globals=None)
embed_dim = 3
def forward(graph):
return embedding.GraphEmbeddingModel(embed_dim=3, num_layers=2)(graph)
init_fn, apply_fn = hk.without_apply_rng(hk.transform(forward))
key = hk.PRNGSequence(8)
params = init_fn(next(key), graph)
out = apply_fn(params, graph)
self.assertEqual(out.nodes.shape, (graph.nodes.shape[0], embed_dim))
self.assertEqual(out.edges.shape, (graph.edges.shape[0], embed_dim))
np.testing.assert_array_equal(out.senders, graph.senders)
np.testing.assert_array_equal(out.receivers, graph.receivers)
np.testing.assert_array_equal(out.n_node, graph.n_node)
def test_unpack_and_pad(self):
x = np.array([1, 1, 2, 2, 2, 3, 4, 4], dtype=np.float32)
s = np.array([2, 3, 1, 2], dtype=np.int32)
tensors, mask = models.unpack_and_pad(x, s, pad_size=s.max(), pad_value=0)
np.testing.assert_array_equal(
tensors,
[[1, 1, 0],
[2, 2, 2],
[3, 0, 0],
[4, 4, 0]])
np.testing.assert_array_equal(
mask,
[[1, 1, 0],
[1, 1, 1],
[1, 0, 0],
[1, 1, 0]])
# [n, 1] tensor
x = np.array([1, 1, 2, 2, 2, 3, 4, 4], dtype=np.float32)[:, None]
s = np.array([2, 3, 1, 2], dtype=np.int32)
tensors, mask = models.unpack_and_pad(x, s, pad_size=s.max(), pad_value=0)
np.testing.assert_array_equal(
tensors,
np.array([[1, 1, 0],
[2, 2, 2],
[3, 0, 0],
[4, 4, 0]])[:, :, None])
np.testing.assert_array_equal(
mask,
[[1, 1, 0],
[1, 1, 1],
[1, 0, 0],
[1, 1, 0]])
def test_graph_conditioned_transformer_runs(self):
graphs = jraph.GraphsTuple(
nodes=np.ones((4, 3), dtype=np.float32),
edges=np.ones((3, 1), dtype=np.float32),
senders=np.array([0, 2, 3], dtype=np.int32),
receivers=np.array([1, 3, 2], dtype=np.int32),
n_node=np.array([2, 2], dtype=np.int32),
n_edge=np.array([1, 2], dtype=np.int32),
globals=None,
)
seqs = np.array([[1, 1, 0],
[2, 2, 2]], dtype=np.int32)
vocab_size = seqs.max() + 1
embed_dim = 8
x = seqs[:, :-1]
y = seqs[:, 1:]
def forward(graphs, inputs, labels):
graphs = models.GraphEmbeddingModel(embed_dim=embed_dim,
num_layers=2)(graphs)
extra, extra_mask = models.unpack_and_pad(graphs.nodes,
graphs.n_node,
graphs.n_node.max())
input_mask = (labels != 0).astype(jnp.float32)
transformer = models.TransformerXL(vocab_size=vocab_size,
emb_dim=embed_dim,
num_layers=2,
num_heads=4,
cutoffs=[])
return transformer.loss(inputs, labels, mask=input_mask, extra=extra,
extra_mask=extra_mask)
init_fn, apply_fn = hk.transform_with_state(forward)
key = hk.PRNGSequence(8)
params, state = init_fn(next(key), graphs, x, y)
out, _ = apply_fn(params, state, next(key), graphs, x, y)
loss, metrics = out
logging.info('loss: %g', loss)
logging.info('metrics: %r', metrics)
def test_graph_conditioned_transformer_learns(self):
graphs = jraph.GraphsTuple(
nodes=np.ones((4, 3), dtype=np.float32),
edges=np.ones((3, 1), dtype=np.float32),
senders=np.array([0, 2, 3], dtype=np.int32),
receivers=np.array([1, 3, 2], dtype=np.int32),
n_node=np.array([2, 2], dtype=np.int32),
n_edge=np.array([1, 2], dtype=np.int32),
globals=None,
)
seqs = np.array([[1, 2, 2, 0],
[1, 3, 3, 3]], dtype=np.int32)
vocab_size = seqs.max() + 1
embed_dim = 8
max_graph_size = graphs.n_node.max()
logging.info('Training seqs: %r', seqs)
x = seqs[:, :-1]
y = seqs[:, 1:]
def model_fn(vocab_size, embed_dim):
return models.Graph2TextTransformer(
vocab_size=vocab_size,
emb_dim=embed_dim,
num_layers=2,
num_heads=4,
cutoffs=[],
gnn_embed_dim=embed_dim,
gnn_num_layers=2)
def forward(graphs, inputs, labels, max_graph_size):
input_mask = (labels != 0).astype(jnp.float32)
return model_fn(vocab_size, embed_dim).loss(
graphs, max_graph_size, False, inputs, labels, mask=input_mask)
init_fn, apply_fn = hk.transform_with_state(forward)
rng = hk.PRNGSequence(8)
params, state = init_fn(next(rng), graphs, x, y, max_graph_size)
def apply(*args, **kwargs):
out, state = apply_fn(*args, **kwargs)
return out[0], (out[1], state)
apply = jax.jit(apply, static_argnums=6)
optimizer = optax.chain(
optax.scale_by_adam(),
optax.scale(-1e-3))
opt_state = optimizer.init(params)
for i in range(500):
(loss, model_state), grad = jax.value_and_grad(apply, has_aux=True)(
params, state, next(rng), graphs, x, y, max_graph_size)
metrics, state = model_state
updates, opt_state = optimizer.update(grad, opt_state, params)
params = optax.apply_updates(params, updates)
if (i + 1) % 100 == 0:
logging.info(
'Step %d, %r', i + 1, {k: float(v) for k, v in metrics.items()})
logging.info('Loss: %.8f', loss)
self.assertLess(loss, 1.0)
def test_bow_transformer_runs(self):
bow = np.array([[0, 0, 1, 0, 2, 0, 0, 1],
[0, 1, 0, 0, 1, 0, 1, 0],
[1, 0, 0, 0, 1, 0, 0, 1]], dtype=np.int32)
seqs = np.array([[1, 2, 3, 0, 0],
[2, 4, 5, 6, 0],
[3, 3, 5, 1, 2]], dtype=np.int32)
x = seqs[:, :-1]
y = seqs[:, 1:]
vocab_size = seqs.max() + 1
def forward(bow, inputs, labels):
model = models.Bow2TextTransformer(
vocab_size=vocab_size,
emb_dim=16,
num_layers=2,
num_heads=4,
cutoffs=[])
return model.loss(bow, inputs, labels)
init_fn, apply_fn = hk.transform_with_state(forward)
key = hk.PRNGSequence(8)
params, state = init_fn(next(key), bow, x, y)
out, _ = apply_fn(params, state, next(key), bow, x, y)
loss, metrics = out
logging.info('loss: %g', loss)
logging.info('metrics: %r', metrics)
def test_bow_transformer_learns(self):
bow = np.array([[0, 0, 1, 0, 2, 0, 0, 1],
[0, 1, 0, 0, 1, 0, 1, 0],
[1, 0, 0, 0, 1, 0, 0, 1]], dtype=np.int32)
seqs = np.array([[1, 2, 2, 3, 0, 0],
[1, 2, 4, 5, 6, 0],
[1, 3, 3, 5, 4, 2]], dtype=np.int32)
x = seqs[:, :-1]
y = seqs[:, 1:]
vocab_size = seqs.max() + 1
def model_fn():
return models.Bow2TextTransformer(
vocab_size=vocab_size,
emb_dim=16,
num_layers=2,
num_heads=4,
cutoffs=[])
def loss_fn(bow, inputs, labels):
mask = (labels != 0).astype(jnp.float32)
return model_fn().loss(bow, inputs, labels, mask=mask)
init_fn, apply_fn = hk.transform_with_state(loss_fn)
key = hk.PRNGSequence(8)
params, state = init_fn(next(key), bow, x, y)
def apply(*args, **kwargs):
out, state = apply_fn(*args, **kwargs)
return out[0], (out[1], state)
value_and_grad = jax.jit(jax.value_and_grad(apply, has_aux=True))
optimizer = optax.chain(
optax.scale_by_adam(),
optax.scale(-1e-3))
opt_state = optimizer.init(params)
for i in range(800):
(loss, model_state), grad = value_and_grad(
params, state, next(key), bow, x, y)
metrics, state = model_state
updates, opt_state = optimizer.update(grad, opt_state, params)
params = optax.apply_updates(params, updates)
if (i + 1) % 100 == 0:
logging.info('Step %d, %r', i + 1,
{k: float(v) for k, v in metrics.items()})
logging.info('Loss: %.8f', loss)
self.assertLess(loss, 0.1)
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | wikigraphs/wikigraphs/model/transformer_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Transformer embedding modules."""
from typing import List, Optional
import haiku as hk
from haiku import initializers as init
import jax
import jax.numpy as jnp
import jraph
from wikigraphs.model import graph_net as gn
def get_pos_start(timesteps: int, batch_size: int) -> jnp.ndarray:
"""Find the right slice of positional embeddings for incremental sampling."""
pos_start = hk.get_state(
'cache_progress_idx', [batch_size], dtype=jnp.int32, init=jnp.zeros)
hk.set_state('cache_progress_idx', pos_start + timesteps)
return pos_start
class SinusoidalPositionEmbedding(hk.Module):
"""Position encoding, using mixture of sinusoidal signals."""
def __init__(self,
dim: int,
cache_steps: int = 0,
reverse_order: bool = False,
clamp_len: Optional[int] = None,
name: Optional[str] = None):
"""Initialize a SinusoidalPositionEmbedding.
Args:
dim: Embedding dimension.
cache_steps: The length of the memory.
reverse_order: If set to True, position index is reversed.
clamp_len: position beyond clamp_len will be reset to clamp_len, default
to not clamping.
name: Optional name for this Haiku module.
"""
super(SinusoidalPositionEmbedding, self).__init__(name=name)
self._dim = dim
self._cache_steps = cache_steps
self._reverse_order = reverse_order
self._clamp_len = clamp_len
self._inv_freq = 1.0 / (
10000 ** (jnp.arange(0, dim, 2).astype(jnp.float32) / dim))
def __call__(self, timesteps: int, batch_size: int) -> jnp.ndarray:
"""Computes the sinusoidal position embedding.
Args:
timesteps: The length of the sequence.
batch_size: The size of the batch.
Returns:
Sinusoidal position embedding.
"""
full_length = timesteps + self._cache_steps
if self._reverse_order:
positions = jnp.arange(full_length - 1, -1, -1)
positions = jnp.repeat(positions[None, :], batch_size, axis=0)
else:
if self._cache_steps > 0:
positions = (get_pos_start(timesteps, batch_size)[:, None]
+ jnp.arange(timesteps)[None, :])
else:
positions = jnp.arange(0, full_length)
positions = jnp.repeat(positions[None, :], batch_size, axis=0)
if self._clamp_len is not None:
positions = jnp.minimum(positions, self._clamp_len)
scaled_time = positions[:, :, None] * self._inv_freq[None, None, :]
return jnp.concatenate([jnp.sin(scaled_time), jnp.cos(scaled_time)], axis=2)
def relative_shift(x: jnp.ndarray) -> jnp.ndarray:
"""Shift the relative logits."""
x_shape = list(x.shape)
x = jnp.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0]])
x = jnp.reshape(
x, [x_shape[0], x_shape[1], x_shape[3] + 1, x_shape[2]])[:, :, 1:, :]
x = jnp.reshape(x, x_shape)
return x
class RelativePositionEmbedding(hk.Module):
"""Position encoding, using relative positions than absolute positions."""
def __init__(self,
dim: int,
dropout_rate: float,
r_w_bias: jnp.ndarray,
r_r_bias: jnp.ndarray,
init_scale: float = 0.02,
clamp_len: Optional[int] = None,
name: Optional[str] = None):
"""Initialize a RelativePositionEmbedding.
Args:
dim: Embedding dimension.
dropout_rate: dropout rate.
r_w_bias: global content bias.
r_r_bias: global positional bias.
init_scale: the initialization scale of the RandomNormal used for the
linear layer.
clamp_len: position beyond clamp_len will be reset to clamp_len, default
to not clamping.
name: Optional name for this Haiku module.
"""
super(RelativePositionEmbedding, self).__init__(name=name)
self._dim = dim
self._dropout_rate = dropout_rate
self._r_w_bias = r_w_bias
self._r_r_bias = r_r_bias
self._init_scale = init_scale
self._sinusoidal_pos_emb = SinusoidalPositionEmbedding(
dim=dim,
reverse_order=True,
clamp_len=clamp_len,
name=name)
def __call__(self, q: jnp.ndarray, k: jnp.ndarray) -> jnp.ndarray:
"""Computes the relative position embedding.
Args:
q: The query.
k: The key.
Returns:
Relative position embedding.
"""
# Use key instead of query to obtain the length.
batch_size, key_length, num_heads, head_dim = list(k.shape)
# Content based addressing and global content bias
content_score = jnp.einsum('bthd,bThd->bhtT', q + self._r_w_bias, k)
# Relative position encoding
positional_encodings = self._sinusoidal_pos_emb(key_length, batch_size)
positional_encodings = hk.dropout(hk.next_rng_key(), self._dropout_rate,
positional_encodings)
rel_pos_emb = hk.Conv1D(
output_channels=self._dim, kernel_shape=1, with_bias=False,
w_init=init.RandomNormal(stddev=self._init_scale))(positional_encodings)
rel_pos_emb = jnp.reshape(rel_pos_emb, [
batch_size, key_length, num_heads, head_dim])
# Content dependent positional bias and global positional bias
rel_pos_score = jnp.einsum('bthd,bThd->bhtT', q + self._r_r_bias,
rel_pos_emb)
rel_pos_score = relative_shift(rel_pos_score)
assert content_score.shape == rel_pos_score.shape
return content_score + rel_pos_score
def hierarchical_logprobs(
logits: jnp.ndarray,
class_logits: jnp.ndarray,
cutoffs: List[int]) -> jnp.ndarray:
"""Hierarchical log-probs for adaptive softmax."""
sizes = [y - x for x, y in zip(cutoffs[:-1], cutoffs[1:])]
num_tails = len(sizes) - 1
split_logits = jnp.split(logits, cutoffs[1:-1], axis=-1)
all_head_logits = jnp.concatenate([split_logits[0], class_logits], -1)
# Mask out item 0, the NULL token
all_head_logits += jnp.concatenate(
[jnp.ones([1], dtype=logits.dtype) * -10,
jnp.zeros([sizes[0] + num_tails - 1], dtype=logits.dtype)], 0)
all_head_logprobs = jax.nn.log_softmax(all_head_logits)
head_logprobs, class_logprobs = jnp.split(all_head_logprobs,
[sizes[0]], axis=-1)
tail_logprobs = []
for i, tail_size in enumerate(sizes[1:]): # pylint: disable=unused-variable
tail_logprobs += [jax.nn.log_softmax(split_logits[i + 1])
+ class_logprobs[..., [i]]]
return jnp.concatenate([head_logprobs] + tail_logprobs, -1)
class AdaptiveSoftmaxEmbedding(hk.Module):
"""Adaptive inputs and softmax (https://arxiv.org/abs/1809.10853)."""
def __init__(self,
dim: int,
vocab_size: int,
cutoffs: List[int],
tail_shrink_factor: int = 4,
hierarchical: bool = True,
init_std: float = 0.02,
init_proj_std: float = 0.01,
dtype: jnp.dtype = jnp.float32,
name: Optional[str] = None):
"""Initialize a AdaptiveSoftmaxEmbedding.
Args:
dim: dimensionality of the hidden space.
vocab_size: the size of the vocabulary.
cutoffs: the cutoff indices of the vocabulary used for the adaptive
softmax embedding.
tail_shrink_factor: how many times to shrink the hidden dimensionality
for low-frequency vocabulary after each cutoff.
hierarchical: whether to use hierarchical softmax.
init_std: standard deviation of the Normal distribution used to initialize
the embedding weights.
init_proj_std: standard deviation of the Normal distribution used to
initialize the projection weights.
dtype: Optional data type default to jnp.float32.
name: Optional name for this Haiku module.
"""
super(AdaptiveSoftmaxEmbedding, self).__init__(name=name)
self._hidden_size = dim
self._vocab_size = vocab_size
self._cutoffs = [0] + list(cutoffs) + [self._vocab_size]
self._tail_shrink_factor = tail_shrink_factor
self._hierarchical = hierarchical
self._dtype = dtype
self._embeddings = []
self._projections = []
self._bias = hk.get_parameter(
'bias', [self._vocab_size], dtype=self._dtype, init=jnp.zeros)
l_cutoffs = self._cutoffs[:-1]
r_cutoffs = self._cutoffs[1:]
for i, (l_cutoff, r_cutoff) in enumerate(zip(l_cutoffs, r_cutoffs)):
hidden_size = self._hidden_size // (self._tail_shrink_factor ** i)
embedding = hk.get_parameter(
f'embeddings_{l_cutoff}_{r_cutoff}',
[r_cutoff - l_cutoff, hidden_size],
dtype=self._dtype,
init=hk.initializers.RandomNormal(stddev=init_std))
self._embeddings += [embedding]
if self._tail_shrink_factor != 1:
projection = hk.get_parameter(
f'projection_{l_cutoff}_{r_cutoff}',
[hidden_size, self._hidden_size],
dtype=self._dtype,
init=hk.initializers.RandomNormal(stddev=init_proj_std))
self._projections += [projection]
if self._tail_shrink_factor != 1:
self._output_projection = hk.get_parameter(
'output_head_projection',
[self._hidden_size, self._hidden_size],
dtype=self._dtype,
init=hk.initializers.RandomNormal(stddev=init_proj_std))
if self._hierarchical:
self._class_weights = hk.get_parameter(
'tail_class_weights',
[self._hidden_size, len(cutoffs)],
init=hk.initializers.RandomNormal(stddev=init_std))
self._class_bias = hk.get_parameter(
'tail_class_bias',
[len(cutoffs)],
dtype=self._dtype,
init=jnp.zeros)
@hk.transparent
def build_embeddings(self):
"""Builds input embeddings."""
if self._projections:
embedding_mat = [
jnp.dot(emb, proj) for emb, proj in zip(self._embeddings,
self._projections)]
else:
embedding_mat = self._embeddings
input_embeddings = jnp.concatenate(embedding_mat, 0)
return input_embeddings
@hk.transparent
def build_output_embeddings(self):
"""Builds separate output embeddings."""
if self._projections:
projections = [self._output_projection] + self._projections[1:]
embedding_mat = [jnp.dot(emb, proj)
for emb, proj in zip(self._embeddings, projections)]
else:
embedding_mat = self._embeddings
output_embeddings = jnp.concatenate(embedding_mat, 0)
return jnp.transpose(output_embeddings)
def embed_input(self, input_tokens: jnp.ndarray) -> jnp.ndarray:
"""Embeds the input."""
assert jnp.issubdtype(input_tokens.dtype, jnp.integer)
input_embeddings = self.build_embeddings()
embedded_inputs = input_embeddings[input_tokens]
return embedded_inputs * self._hidden_size ** 0.5
def embed_output(self, inputs: jnp.ndarray) -> jnp.ndarray:
"""Outputs logits."""
output_embs = self.build_output_embeddings()
logits = jnp.einsum('btd,dv->btv', inputs, output_embs) + self._bias
if self._hierarchical:
class_logits = jnp.dot(inputs, self._class_weights) + self._class_bias
logprobs = hierarchical_logprobs(logits, class_logits, self._cutoffs)
return logprobs
else:
return logits
class GraphEmbeddingModel(hk.Module):
"""A single graph network for embedding graph data."""
def __init__(self,
embed_dim: int,
num_layers: int,
msg_hidden_size_factor: int = 2,
use_layer_norm: bool = False,
name: Optional[str] = None):
"""Constructor.
Args:
embed_dim: node embedding size.
num_layers: number of message passing layers to use.
msg_hidden_size_factor: size of the message network hiddens as a factor
of embed_dim.
use_layer_norm: whether to apply layer norm on node updates.
name: optional name for this module.
"""
super().__init__(name=name)
self._embed_dim = embed_dim
self._num_layers = num_layers
self._msg_hidden_size_factor = msg_hidden_size_factor
self._use_layer_norm = use_layer_norm
def __call__(self, graphs: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Compute embeddings for each node in the graphs.
Args:
graphs: a set of graphs batched into a single graph. The nodes and edges
are represented as feature tensors.
Returns:
graphs: new graph with node embeddings updated (shape [n_nodes,
embed_dim]).
"""
nodes = hk.Linear(self._embed_dim)(graphs.nodes)
edges = hk.Linear(self._embed_dim)(graphs.edges)
nodes = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(
jax.nn.gelu(nodes))
edges = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(
jax.nn.gelu(edges))
graphs = graphs._replace(nodes=nodes, edges=edges)
graphs = gn.SimpleGraphNet(
num_layers=self._num_layers,
msg_hidden_size_factor=self._msg_hidden_size_factor,
layer_norm=self._use_layer_norm)(graphs)
return graphs
| deepmind-research-master | wikigraphs/wikigraphs/model/embedding.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Graph net utils."""
from typing import Union, List, Optional
import haiku as hk
import jax
import jax.numpy as jnp
import jraph
import numpy as np
ArrayType = Union[np.ndarray, jnp.ndarray]
def pad_size(in_size: int):
out_size = 1
while out_size < in_size:
out_size *= 2
return out_size
def pad_graphs(
graphs: jraph.GraphsTuple,
pad_n_nodes: Optional[int] = None,
pad_n_edges: Optional[int] = None) -> jraph.GraphsTuple:
"""Pad graphs to have a canonical number of nodes and edges.
Here we pad the number of nodes and number of edges to powers of 2 by adding a
placeholder graph to the end of the batch. So that the batch gets at most 2x
as large as before, and number of graphs increase by 1.
Note this method always adds at least one new node to the placeholder graph to
make sure any edges if added are valid.
Args:
graphs: a batch of graphs.
pad_n_nodes: (optional) number of nodes to pad to.
pad_n_edges: (optional) number of edges to pad to.
Returns:
padded: the input batch padded to canonical sizes.
"""
n_nodes, node_dim = graphs.nodes.shape
n_edges, edge_dim = graphs.edges.shape
# Add at least one extra node to the placeholder graph.
if pad_n_nodes is None:
pad_n_nodes = pad_size(n_nodes + 1)
if pad_n_edges is None:
pad_n_edges = pad_size(n_edges)
nodes = np.concatenate([
graphs.nodes,
np.zeros((pad_n_nodes - n_nodes, node_dim), dtype=graphs.nodes.dtype)
], axis=0)
edges = np.concatenate([
graphs.edges,
np.zeros((pad_n_edges - n_edges, edge_dim), dtype=graphs.edges.dtype)
], axis=0)
# Add padding edges
senders = np.concatenate([
graphs.senders,
np.full(pad_n_edges - n_edges, n_nodes, dtype=graphs.senders.dtype)
], axis=0)
receivers = np.concatenate([
graphs.receivers,
np.full(pad_n_edges - n_edges, n_nodes, dtype=graphs.receivers.dtype)
], axis=0)
n_node = np.concatenate([
graphs.n_node, np.full(1, pad_n_nodes - n_nodes)], axis=0)
n_edge = np.concatenate([
graphs.n_edge, np.full(1, pad_n_edges - n_edges)], axis=0)
return jraph.GraphsTuple(
nodes=nodes, edges=edges, senders=senders, receivers=receivers,
n_node=n_node, n_edge=n_edge, globals=None)
def batch_graphs_by_device(
graphs: List[jraph.GraphsTuple],
num_devices: int
) -> List[jraph.GraphsTuple]:
"""Batch a list of graphs into num_devices batched graphs.
The input graphs are grouped into num_devices groups. Within each group the
graphs are merged. This is needed for parallelizing the graphs using pmap.
Args:
graphs: a list of graphs to be merged.
num_devices: the number of local devices.
Returns:
graph: a size num_devices list of merged graphs.
"""
bs = len(graphs)
assert bs % num_devices == 0, (
'Batch size {} is not divisible by {} devices.'.format(bs, num_devices))
bs_per_device = bs // num_devices
graphs_on_devices = []
for i in range(num_devices):
graphs_on_device_i = graphs[i*bs_per_device:(i+1)*bs_per_device]
graphs_on_device_i = jraph.batch(graphs_on_device_i)
graphs_on_devices.append(graphs_on_device_i)
return graphs_on_devices
def pad_graphs_by_device(graphs: List[jraph.GraphsTuple]) -> jraph.GraphsTuple:
"""Pad and concatenate the list of graphs.
Each graph in the list is padded according to the maximum n_nodes and n_edges
in the list, such that all graphs have the same length. Then they are
concatenated. This is need for pmap.
Args:
graphs: a list of graphs.
Returns:
graph: a single padded and merged graph.
"""
# Add at least one extra node to the placeholder graph.
pad_n_nodes = pad_size(max([g.nodes.shape[0] for g in graphs]) + 1)
pad_n_edges = pad_size(max([g.edges.shape[0] for g in graphs]))
padded_graphs = [pad_graphs(g, pad_n_nodes, pad_n_edges) for g in graphs]
nodes = []
edges = []
senders = []
receivers = []
n_node = []
n_edge = []
for g in padded_graphs:
assert g.nodes.shape[0] == pad_n_nodes
assert g.edges.shape[0] == pad_n_edges
assert g.senders.size == pad_n_edges
assert g.receivers.size == pad_n_edges
assert g.n_node.size == padded_graphs[0].n_node.size
assert g.n_edge.size == padded_graphs[0].n_edge.size
nodes.append(g.nodes)
edges.append(g.edges)
senders.append(g.senders)
receivers.append(g.receivers)
n_node.append(g.n_node)
n_edge.append(g.n_edge)
return jraph.GraphsTuple(
nodes=np.concatenate(nodes, axis=0),
edges=np.concatenate(edges, axis=0),
senders=np.concatenate(senders, axis=0),
receivers=np.concatenate(receivers, axis=0),
n_node=np.concatenate(n_node, axis=0),
n_edge=np.concatenate(n_edge, axis=0),
globals=None)
class MLPMessagePassingLayer(hk.Module):
"""Message passing layer implemented as MLPs."""
def __init__(self,
node_hidden_sizes: List[int],
msg_hidden_sizes: List[int],
residual: bool = True,
layer_norm: bool = False,
name: Optional[str] = None):
"""Constructor.
Args:
node_hidden_sizes: hidden sizes for the node update model.
msg_hidden_sizes: hidden sizes for the edge message model.
residual: set to True to use residual connections, this will also mean the
input dimension is appended to `node_hidden_sizes` as the output size.
layer_norm: whether to apply layer norm on the node representations.
name: name for this module.
"""
super().__init__(name=name)
self._node_hidden_sizes = node_hidden_sizes
self._msg_hidden_sizes = msg_hidden_sizes
self._residual = residual
self._layer_norm = layer_norm
def _compute_messages(self, graph: jraph.GraphsTuple) -> ArrayType:
"""Compute the messages on each edge."""
x = jnp.concatenate([graph.nodes[graph.senders],
graph.nodes[graph.receivers],
graph.edges], axis=-1)
return hk.nets.MLP(self._msg_hidden_sizes, activate_final=True)(x)
def _update_nodes(self, graph: jraph.GraphsTuple,
messages: ArrayType) -> ArrayType:
"""Compute updated node representations."""
x = jax.ops.segment_sum(messages, graph.receivers,
num_segments=graph.nodes.shape[0])
x = jnp.concatenate([graph.nodes, x], axis=-1)
layer_sizes = self._node_hidden_sizes[:]
if self._residual:
layer_sizes += [graph.nodes.shape[-1]]
x = hk.nets.MLP(layer_sizes, activate_final=False)(x)
if self._layer_norm:
x = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(x)
if self._residual:
return graph.nodes + x
else:
return x
def __call__(self, graph: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Apply this layer on the input graph."""
messages = self._compute_messages(graph)
updated_nodes = self._update_nodes(graph, messages)
return graph._replace(nodes=updated_nodes)
class SimpleGraphNet(hk.Module):
"""A simple graph net module, a stack of message passing layers."""
def __init__(self,
num_layers: int,
msg_hidden_size_factor: int = 2,
layer_norm: bool = False,
name: Optional[str] = None):
"""Constructor.
Args:
num_layers: number of message passing layers in the network.
msg_hidden_size_factor: size of message module hidden sizes as a factor of
the input node feature dimensionality.
layer_norm: whether to apply layer norm on node updates.
name: name of this module.
"""
super().__init__(name=name)
self._num_layers = num_layers
self._msg_hidden_size_factor = msg_hidden_size_factor
self._layer_norm = layer_norm
def __call__(self, graph: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Run the simple graph net on the input data.
Args:
graph: input graph.
Returns:
graph: output graph.
"""
input_node_dim = graph.nodes.shape[-1]
msg_hidden_size = input_node_dim * self._msg_hidden_size_factor
for _ in range(self._num_layers):
graph = MLPMessagePassingLayer(
node_hidden_sizes=[],
msg_hidden_sizes=[msg_hidden_size],
layer_norm=self._layer_norm)(graph)
return graph
def add_reverse_edges(graph: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Add edges in the reverse direction, copy edge features."""
senders = np.concatenate([graph.senders, graph.receivers], axis=0)
receivers = np.concatenate([graph.receivers, graph.senders], axis=0)
edges = np.concatenate([graph.edges, graph.edges], axis=0)
return graph._replace(senders=senders, receivers=receivers, edges=edges)
| deepmind-research-master | wikigraphs/wikigraphs/model/graph_net.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""WikiGraphs model modules."""
from . import embedding
from . import graph_net
from . import sampler
from . import transformer
from . import transformer_block
| deepmind-research-master | wikigraphs/wikigraphs/model/__init__.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Tests for wikigraphs.model.sampler."""
from absl.testing import absltest
import jraph
import numpy as np
from wikigraphs.model import sampler
from wikigraphs.model import transformer as models
class SamplerTest(absltest.TestCase):
def test_uncond_sampler_runs(self):
prompt = np.array([[0, 1, 2, -1, -1],
[0, 1, 2, -1, -1]], dtype=np.int32)
vocab_size = prompt.max() + 1
bos_token = 0
memory_size = 2
params = None
def model_fn(x):
return models.TransformerXL(
vocab_size=vocab_size,
emb_dim=8,
num_layers=2,
num_heads=4,
cutoffs=[])(x, is_training=False, cache_steps=memory_size)
uncond_sampler = sampler.TransformerXLSampler(model_fn)
sample = uncond_sampler.sample(params, prompt)
self.assertTrue((sample[:, 0] == bos_token).all())
self.assertTrue((sample != -1).all())
self.assertEqual(sample.shape, prompt.shape)
sample2 = uncond_sampler.sample(params, prompt)
self.assertTrue((sample2[:, 0] == bos_token).all())
self.assertTrue((sample2 != -1).all())
self.assertEqual(sample2.shape, prompt.shape)
self.assertTrue((sample != sample2).any())
def test_bow2text_sampler_runs(self):
bow = np.array([[0, 0, 1, 0, 2, 0, 0, 1],
[0, 1, 0, 0, 1, 0, 1, 0]], dtype=np.int32)
prompt = np.array([[0, 1, 2, -1, -1, -1],
[0, 1, 2, -1, -1, -1]], dtype=np.int32)
vocab_size = prompt.max() + 1
bos_token = 0
memory_size = 2
params = None
def model_fn(bow, x):
return models.Bow2TextTransformer(
vocab_size=vocab_size,
emb_dim=16,
num_layers=2,
num_heads=4,
cutoffs=[])(bow, x, is_training=False, cache_steps=memory_size)
bow_sampler = sampler.Bow2TextTransformerSampler(model_fn)
sample = bow_sampler.sample(params, prompt, bow)
self.assertTrue((sample[:, 0] == bos_token).all())
self.assertTrue((sample != -1).all())
self.assertEqual(sample.shape, prompt.shape)
sample2 = bow_sampler.sample(params, prompt, bow)
self.assertTrue((sample2[:, 0] == bos_token).all())
self.assertTrue((sample2 != -1).all())
self.assertEqual(sample2.shape, prompt.shape)
self.assertTrue((sample != sample2).any())
def test_graph2text_sampler_runs(self):
graphs = jraph.GraphsTuple(
nodes=np.ones((4, 3), dtype=np.float32),
edges=np.ones((3, 1), dtype=np.float32),
senders=np.array([0, 2, 3], dtype=np.int32),
receivers=np.array([1, 3, 2], dtype=np.int32),
n_node=np.array([2, 2], dtype=np.int32),
n_edge=np.array([1, 2], dtype=np.int32),
globals=None,
)
prompt = np.array([[0, 1, 2, -1, -1, -1],
[0, 1, 2, -1, -1, -1]], dtype=np.int32)
vocab_size = prompt.max() + 1
bos_token = 0
memory_size = 2
params = None
def model_fn(graphs, max_graph_size, x):
return models.Graph2TextTransformer(
vocab_size=vocab_size,
emb_dim=8,
num_layers=2,
num_heads=4,
cutoffs=[],
gnn_embed_dim=8,
gnn_num_layers=2)(
graphs, max_graph_size, True, x,
is_training=False, cache_steps=memory_size)
graph_sampler = sampler.Graph2TextTransformerSampler(model_fn)
sample = graph_sampler.sample(params, prompt, graphs)
self.assertTrue((sample[:, 0] == bos_token).all())
self.assertTrue((sample != -1).all())
self.assertEqual(sample.shape, prompt.shape)
sample2 = graph_sampler.sample(params, prompt, graphs)
self.assertTrue((sample2[:, 0] == bos_token).all())
self.assertTrue((sample2 != -1).all())
self.assertEqual(sample2.shape, prompt.shape)
self.assertTrue((sample != sample2).any())
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | wikigraphs/wikigraphs/model/sampler_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Jax implementation of the Transformer-XL model."""
from typing import Dict, List, Optional, Tuple
import haiku as hk
from haiku import initializers as init
import jax
import jax.numpy as jnp
import jraph
import numpy as np
from wikigraphs.model import transformer_block
from wikigraphs.model.embedding import AdaptiveSoftmaxEmbedding
from wikigraphs.model.embedding import GraphEmbeddingModel
# For WikiText-103
DEFAULT_CUTOFFS = (20000 + 1, 40000 + 1, 200000 + 1)
def sequence_prediction_metrics(
logits: jnp.ndarray,
labels: jnp.ndarray,
mask: Optional[jnp.ndarray] = None
) -> Dict[str, float]:
"""Compute the metrics for sequence prediction.
Args:
logits: [B, T, V] array of logits.
labels: [B, T] array of labels.
mask: [B, T] array of binary masks, if provided.
Returns:
metrics: a dictionary of metrics.
"""
vocab_size = logits.shape[-1]
logps = jax.nn.log_softmax(logits)
labels_one_hot = hk.one_hot(labels, vocab_size)
class_logps = jnp.sum(logps * labels_one_hot, axis=-1)
prediction_correct = jnp.argmax(logits, axis=-1) == labels
if mask is not None:
masked_logps = mask * class_logps
total_count = jnp.sum(mask)
tokens_correct = jnp.sum(prediction_correct * mask)
seq_correct = jnp.all(
jnp.logical_or(prediction_correct, jnp.logical_not(mask)), axis=-1)
else:
masked_logps = class_logps
total_count = np.prod(class_logps.shape)
tokens_correct = jnp.sum(prediction_correct)
seq_correct = jnp.all(prediction_correct, axis=-1)
token_accuracy = tokens_correct.astype(jnp.float32) / total_count
seq_accuracy = jnp.mean(seq_correct)
log_probs = jnp.mean(jnp.sum(masked_logps, axis=-1))
total_loss = -jnp.sum(masked_logps)
loss = total_loss / total_count
return dict(
loss=loss,
total_loss=total_loss,
total_count=total_count,
token_accuracy=token_accuracy,
seq_accuracy=seq_accuracy,
log_probs=log_probs,
)
class TransformerXL(hk.Module):
"""TransformerXL language model with memory using GPT2 blocks.
TransformerXL: https://arxiv.org/abs/1901.02860
GPT-2: http://www.persagen.com/files/misc/radford2019language.pdf
"""
def __init__(self,
vocab_size: int = 256,
emb_dim: int = 256,
num_layers: int = 10,
num_heads: int = 8,
dropout_prob: float = 0.1,
dropout_attn_prob: float = 0.0,
self_att_init_scale: float = 0.02,
dense_init_scale: float = 0.02,
dense_dim: int = 2100,
cutoffs: List[int] = DEFAULT_CUTOFFS,
tail_shrink_factor: int = 1,
relative_pos_clamp_len: Optional[int] = None,
name: Optional[str] = None):
"""Initialize a TransformerXL.
Args:
vocab_size: the size of the vocabulary.
emb_dim: the dimensionality of the embeddings.
num_layers: number of transformer blocks.
num_heads: number of attention heads.
dropout_prob: dropout probability.
dropout_attn_prob: dropout probability of the attention module.
self_att_init_scale: the initialization scale of the VarianceScaling
used for the linear layer in the attention module.
dense_init_scale: the initialization scale of the VarianceScaling
used for the linear layer in the feedforward module.
dense_dim: feature size of the feedforward block.
cutoffs: the cutoff indices of the vocabulary used for the adaptive
softmax embedding.
tail_shrink_factor: how many times to shrink the hidden dimensionality
for low-frequency vocabulary after each cutoff in the adaptive softmax
embedding.
relative_pos_clamp_len: clamp length of the relative position embeddings.
name: Optional name for this Haiku module.
"""
super().__init__(name=name)
self._vocab_size = vocab_size
self._emb_dim = emb_dim
self._num_layers = num_layers
self._num_heads = num_heads
self._dropout_prob = dropout_prob
self._dropout_attn_prob = dropout_attn_prob
self._self_att_init_scale = self_att_init_scale
self._dense_init_scale = dense_init_scale
self._dense_dim = dense_dim
self._relative_pos_clamp_len = relative_pos_clamp_len
self._io_emb = AdaptiveSoftmaxEmbedding(
emb_dim, vocab_size, cutoffs=cutoffs,
tail_shrink_factor=tail_shrink_factor)
def __call__(self,
x: jnp.ndarray,
mask: Optional[jnp.ndarray] = None,
is_training: bool = True,
should_reset: Optional[jnp.ndarray] = None,
cache_steps: int = 0,
extra: Optional[jnp.ndarray] = None,
extra_mask: Optional[jnp.ndarray] = None) -> jnp.ndarray:
"""Computes the outputs of the TransformerXL.
Args:
x: [batch, timesteps]. Inputs at time step t.
mask: [batch, timesteps]. It indicates what tokens to be predicted. In
other words it corresponds to non-pad tokens in x_{t+1}.
is_training: whether the current stage is training or not.
should_reset: reset marker [batch, timesteps].
cache_steps: number of timesteps in the cache.
extra: if provided should be extra key-value input
[batch, extra_timesteps, in_dim].
extra_mask: if provided should be the mask for extra key-value input,
[batch, extra_timesteps].
Returns:
output: transformer output [batch, timesteps].
"""
if cache_steps == 0:
cache_steps = x.shape[1]
if should_reset is None:
should_reset = jnp.where(x == 1, 1, 0)
h = self._io_emb.embed_input(x)
if mask is not None:
attention_mask = mask[:, None, None, :]
else:
attention_mask = None
head_dim = self._emb_dim // self._num_heads
assert self._emb_dim % self._num_heads == 0, 'Head dim should be an int.'
# Biases for relative position embedding shared across all layers
r_w_bias = hk.get_parameter(
'r_w_bias', [1, 1, self._num_heads, head_dim],
init=init.RandomNormal(stddev=self._self_att_init_scale))
r_r_bias = hk.get_parameter(
'r_r_bias', [1, 1, self._num_heads, head_dim],
init=init.RandomNormal(stddev=self._self_att_init_scale))
for i in range(self._num_layers):
if mask is not None:
h *= mask[:, :, None]
h = transformer_block.GPT2Block(
r_w_bias=r_w_bias,
r_r_bias=r_r_bias,
causal=True,
dense_dim=self._dense_dim,
dropout_prob=self._dropout_prob,
dropout_attn_prob=self._dropout_attn_prob,
num_heads=self._num_heads,
self_att_init_scale=self._self_att_init_scale,
dense_init_scale=self._dense_init_scale,
relative_pos_clamp_len=self._relative_pos_clamp_len,
name='transformer_block_{}'.format(i),
)(
h, mask=attention_mask, is_training=is_training,
should_reset=should_reset, cache_steps=cache_steps,
extra=extra, extra_mask=extra_mask)
if mask is not None:
h *= mask[:, :, None]
return self._io_emb.embed_output(h)
def loss(self,
inputs: jnp.ndarray,
labels: jnp.ndarray,
mask: Optional[jnp.ndarray] = None,
is_training: bool = True,
should_reset: Optional[jnp.ndarray] = None,
cache_steps: int = 0,
extra: Optional[jnp.ndarray] = None,
extra_mask: Optional[jnp.ndarray] = None
) -> Tuple[float, Dict[str, float]]:
"""Computes the loss of the TransformerXL.
Args:
inputs: [batch, timesteps].
labels: [batch, timesteps].
mask: [batch, timesteps]. It indicates what tokens to be predicted. In
other words it corresponds to non-pad tokens in the `labels`.
is_training: whether the current stage is training or not.
should_reset: reset marker [batch, timesteps].
cache_steps: number of timesteps in the cache.
extra: if provided should be extra key-value input
[batch, extra_timesteps, in_dim].
extra_mask: if provided should be the mask for extra key-value input,
[batch, extra_timesteps].
Returns:
output: loss and a dict containing metrics.
"""
# [B, T, V]
logits = self(inputs, mask=mask, is_training=is_training,
should_reset=should_reset, cache_steps=cache_steps,
extra=extra, extra_mask=extra_mask)
metrics = sequence_prediction_metrics(logits, labels, mask)
return metrics['loss'], metrics
def repeat_rows(a: jnp.ndarray, repeats: int, out_length: int) -> jnp.ndarray:
"""Repeat rows of input tensor a.
Output is
[a[0],
a[0],
...
a[0], # A total of repeats[0] copies of a[0].
a[1],
a[1],
...,
a[1], # A total of repeats[1] copies of a[1].
...
a[n-1]], # A total of repeats[n-1] copies of a[n-1].
Args:
a: [n_rows, ...] input tensor.
repeats: [n_rows] int tensor, the number of repeats for each row.
out_length: number of rows in the output, it should be the same as
sum(repeats), provided to be static for jit.
Returns:
out: [out_length, ...] output tensor.
"""
a = jnp.asarray(a)
n = a.shape[0]
assert n == repeats.size
chunk_start = jnp.cumsum(repeats)
idx = jnp.sum(jnp.arange(out_length)[:, None] >= chunk_start[None, :],
axis=-1)
return a[idx]
def unpack_and_pad(
packed: jnp.ndarray,
split_sizes: jnp.ndarray,
pad_size: int,
pad_value: int = 0) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Unpack and pad tensors to a standard size.
Args:
packed: a [total_size, ...] tensor, which contains n individual tensors
concatenated along the 0-th axis.
split_sizes: size [n] int tensor, size of each individual tensor.
pad_size: size for each split to pad to.
pad_value: the value to use for padding.
Returns:
tensors: [n, pad_size, ...] tensor, tensors[i] is the i-th individual tensor
padded to pad_size length.
mask: [n, pad_size] mask tensor indicating which value is padded.
"""
in_shape = list(packed.shape)
total_size = in_shape[0]
n_splits = split_sizes.shape[0]
idx = jnp.arange(pad_size)
masks = split_sizes[:, None] > idx[None, :]
out_shape = in_shape[:]
out_shape[0] = n_splits * pad_size
out = jnp.full(out_shape, pad_value, dtype=packed.dtype)
# Index for the rows of `packed`:
# Define split_start[k] = sum_{i=0}^{k-1} split_sizes[i], which is the
# starting index of split k. So if split_start[k] <= i < split_start[k+1]
# then index belongs to split k. We therefore have:
# idx[i] = k * pad_size + i - split_start[k]
cumsum = jnp.concatenate([jnp.array([0], dtype=split_sizes.dtype),
jnp.cumsum(split_sizes)[:-1]])
idx = jnp.arange(total_size)
idx += repeat_rows(jnp.arange(n_splits), split_sizes, total_size) * pad_size
idx -= repeat_rows(cumsum, split_sizes, total_size)
out = out.at[idx].set(packed)
out = out.reshape([n_splits, pad_size] + out_shape[1:])
return out, masks
class Graph2TextTransformer(hk.Module):
"""A graph2text TransformerXL model.
It embeds the graph with a simple graph neural network model, and passes the
graph embeddings to the TransformerXL model, which are presented as the extra
inputs to attend to in addition to the text embeddings inputs.
"""
def __init__(self,
*transformer_args,
gnn_embed_dim: int = 128,
gnn_num_layers: int = 5,
gnn_layer_norm: bool = False,
name: Optional[str] = None,
**transformer_kwargs):
"""Constructor.
Args:
*transformer_args: args for the transformer module.
gnn_embed_dim: node embedding size.
gnn_num_layers: number of message passing layers to use.
gnn_layer_norm: whether to use layer norm in the GNN.
name: optional name for this module.
**transformer_kwargs: kwargs for the transformer module.
"""
super().__init__(name=name)
self._transformer = TransformerXL(*transformer_args, **transformer_kwargs)
self._gnn = GraphEmbeddingModel(
embed_dim=gnn_embed_dim,
num_layers=gnn_num_layers,
use_layer_norm=gnn_layer_norm)
def _encode_graphs(self,
graphs: jraph.GraphsTuple,
pad_n_nodes: Optional[int] = None,
padded: bool = False) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Encode graphs so that it can be used in the transformer.
Args:
graphs: a graph structured using jraph.GraphsTuple.
pad_n_nodes: size for each node to pad to.
padded: Whether to pad each graph to the same number of nodes.
Returns:
tensors: unpacked and padded graph nodes.
mask: mask tensor indicating which value is padded.
"""
graphs = self._gnn(graphs)
if pad_n_nodes is None:
pad_n_nodes = graphs.n_node.max()
out, mask = unpack_and_pad(graphs.nodes, graphs.n_node, pad_n_nodes)
if padded:
# Remove the padding graph from the batch
return out[:-1], mask[:-1]
else:
return out, mask
def __call__(self,
graphs: jraph.GraphsTuple,
pad_n_nodes: int,
batch_padded: bool,
*args, **kwargs):
"""Computes the outputs of the graph2text TransformerXL.
Args:
graphs: a graph structured using graph_net.Graph.
pad_n_nodes: size for each node to pad to.
batch_padded: whether the graph batch is padded or not.
*args: args to the TransformerXL model.
**kwargs: kwargs to the TransformerXL model.
Returns:
output: transformer output [batch, timesteps].
"""
extra, extra_mask = self._encode_graphs(graphs, pad_n_nodes, batch_padded)
return self._transformer(
*args, extra=extra, extra_mask=extra_mask, **kwargs)
def loss(self,
graphs: jraph.GraphsTuple,
pad_n_nodes: int,
batch_padded: bool,
inputs: jnp.ndarray,
labels: jnp.ndarray,
mask: jnp.ndarray,
**kwargs):
"""Computes the loss of the graph2text TransformerXL.
Args:
graphs: a graph structured using graph_net.Graph.
pad_n_nodes: size for each node to pad to.
batch_padded: whether the graph batch is padded or not.
inputs: [batch, timesteps].
labels: [batch, timesteps].
mask: [batch, timesteps].
**kwargs: kwargs to the TransformerXL model.
Returns:
output: loss and a dict containing metrics.
"""
extra, extra_mask = self._encode_graphs(graphs, pad_n_nodes, batch_padded)
return self._transformer.loss(
inputs, labels, mask, extra=extra, extra_mask=extra_mask, **kwargs)
class Bow2TextTransformer(hk.Module):
"""A bag-of-words to text TransformerXL model.
This model embeds bag-of-words into vectors and the text transformer can then
condition on these vectors to generate text.
More specifically, the bow embedded vectors will be treated as extra tokens
that the transformer can attend to, in addition to the text data it is already
modelling.
To make the model more expressive, we allow each bag-of-words to be embedded
into potentially more than 1 vectors, and the transformer will treat them as
more than 1 extra tokens correspondingly.
"""
def __init__(self,
*transformer_args,
bow_embedding_dim: int = 256,
bow_n_tokens: int = 1,
name: Optional[str] = None,
**transformer_kwargs):
"""Constructor.
Args:
*transformer_args: the TransformerXL constructor arguments.
bow_embedding_dim: dimensionality for the bag-of-words embeddings.
bow_n_tokens: number of extra tokens to create for the bag-of-words
representations.
name: optional name for this module.
**transformer_kwargs: kwargs for the transformer module.
"""
super().__init__(name=name)
self._transformer = TransformerXL(*transformer_args, **transformer_kwargs)
self._bow_embedding_dim = bow_embedding_dim
self._bow_n_tokens = bow_n_tokens
def _encode_bow(self, bow: jnp.ndarray) -> jnp.ndarray:
"""Encode the bag-of-words into tensors that can be used by the transormer.
Args:
bow: a [batch_size, bow_vocab_size] tensor, each row is a bow vector.
Returns:
embeddings: [batch_size, bow_n_tokens, bow_embedding_dim] tensor.
"""
batch_size = bow.shape[0]
bow = bow.astype(jnp.float32)
# [B, D * n]
embeddings = hk.Linear(self._bow_embedding_dim * self._bow_n_tokens)(bow)
embeddings = transformer_block.layer_norm(jax.nn.gelu(embeddings))
return jnp.reshape(
embeddings, [batch_size, self._bow_n_tokens, self._bow_embedding_dim])
def __call__(self, bow: jnp.ndarray, *args, **kwargs):
"""Compute the output of this bag-of-words-to-text transformer model.
Args:
bow: a [batch_size, bow_vocab_size] tensor, each row is a bow vector.
*args: args to the TransformerXL model.
**kwargs: kwargs to the TransformerXL model.
Returns:
output: transformer output [batch, timesteps].
"""
return self._transformer(*args, extra=self._encode_bow(bow), **kwargs)
def loss(self, bow: jnp.ndarray, *args, **kwargs):
"""Computes the loss of the graph2text TransformerXL.
Args:
bow: a [batch_size, bow_vocab_size] tensor, each row is a bow vector.
*args: args to the TransformerXL model.
**kwargs: kwargs to the TransformerXL model.
Returns:
output: loss and a dict containing metrics.
"""
return self._transformer.loss(*args, extra=self._encode_bow(bow), **kwargs)
| deepmind-research-master | wikigraphs/wikigraphs/model/transformer.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Tests for wikigraphs.model.graph_net."""
from absl import logging
from absl.testing import absltest
import haiku as hk
import jax
import jax.numpy as jnp
import jraph
import numpy as np
import optax
from wikigraphs.model import graph_net as gn
class GraphNetTest(absltest.TestCase):
def test_node_classification(self):
# If node has more than 2 neighbors --> class 1, otherwise class 0.
# Graph structure:
# 1 4
# | \ / |
# | 0 - 3 |
# | / \ |
# 2 5
edges = np.array([
[0, 1],
[1, 2],
[2, 0],
[0, 3],
[3, 4],
[4, 5],
[5, 3],
], dtype=np.int32)
n_node = edges.max() + 1
n_edge = edges.shape[0]
g = jraph.GraphsTuple(
senders=edges[:, 0],
receivers=edges[:, 1],
edges=np.ones((edges.shape[0], 1), dtype=np.float32),
nodes=np.ones((n_node, 1), dtype=np.float32),
n_node=np.array([n_node], dtype=np.int32),
n_edge=np.array([n_edge], dtype=np.int32),
globals=None)
g = gn.add_reverse_edges(g)
targets = np.array([1, 0, 0, 1, 0, 0], dtype=np.int32)
n_classes = 2
def forward(graph, targets):
model = gn.SimpleGraphNet(num_layers=5, layer_norm=False)
graph = model(graph)
nodes = graph.nodes
logits = hk.Linear(n_classes)(nodes)
pred = logits.argmax(axis=-1)
accuracy = (pred == targets).mean()
targets = jax.nn.one_hot(targets, n_classes, dtype=jnp.float32)
return -jnp.mean(jnp.sum(
jax.nn.log_softmax(logits, axis=-1) * targets, axis=-1)), accuracy
init_fn, apply_fn = hk.without_apply_rng(hk.transform(forward))
rng = hk.PRNGSequence(0)
params = init_fn(next(rng), g, targets)
optimizer = optax.chain(
optax.scale_by_adam(),
optax.scale(-1e-3))
opt_state = optimizer.init(params)
apply_fn = jax.jit(apply_fn)
for i in range(500):
(loss, acc), grad = jax.value_and_grad(apply_fn,
has_aux=True)(params, g, targets)
updates, opt_state = optimizer.update(grad, opt_state, params)
params = optax.apply_updates(params, updates)
if (i + 1) % 100 == 0:
logging.info('Step %d, loss %.8f, accuracy %.4f', i + 1, loss, acc)
self.assertLess(loss, 0.01)
self.assertEqual(acc, 1.0)
def test_pad_size(self):
self.assertEqual(gn.pad_size(1), 1)
self.assertEqual(gn.pad_size(5), 8)
self.assertEqual(gn.pad_size(7), 8)
self.assertEqual(gn.pad_size(101), 128)
def test_pad_graphs(self):
# No new edges to add
graphs = jraph.GraphsTuple(
nodes=np.arange(6)[:, None],
edges=np.arange(4)[:, None],
senders=np.array([0, 2, 3, 4]),
receivers=np.array([1, 3, 4, 5]),
n_node=np.array([2, 4]),
n_edge=np.array([1, 3]),
globals=None)
padded = gn.pad_graphs(graphs)
np.testing.assert_array_equal(
padded.nodes,
np.array([0, 1, 2, 3, 4, 5, 0, 0])[:, None])
np.testing.assert_array_equal(padded.edges, graphs.edges)
np.testing.assert_array_equal(padded.senders, graphs.senders)
np.testing.assert_array_equal(padded.receivers, graphs.receivers)
np.testing.assert_array_equal(padded.n_node, [2, 4, 2])
np.testing.assert_array_equal(padded.n_edge, [1, 3, 0])
# Add just a single default node
graphs = jraph.GraphsTuple(
nodes=np.arange(7)[:, None],
edges=np.arange(5)[:, None],
senders=np.array([0, 2, 3, 5, 6]),
receivers=np.array([1, 3, 4, 6, 5]),
n_node=np.array([2, 3, 2]),
n_edge=np.array([1, 2, 2]),
globals=None)
padded = gn.pad_graphs(graphs)
np.testing.assert_array_equal(
padded.nodes,
np.array([0, 1, 2, 3, 4, 5, 6, 0])[:, None])
np.testing.assert_array_equal(
padded.edges,
np.array([0, 1, 2, 3, 4, 0, 0, 0])[:, None])
np.testing.assert_array_equal(
padded.senders,
[0, 2, 3, 5, 6, 7, 7, 7])
np.testing.assert_array_equal(
padded.receivers,
[1, 3, 4, 6, 5, 7, 7, 7])
np.testing.assert_array_equal(
padded.n_node, [2, 3, 2, 1])
np.testing.assert_array_equal(
padded.n_edge, [1, 2, 2, 3])
# Num. nodes is a power of 2 but we still pad at least one extra node
graphs = jraph.GraphsTuple(
nodes=np.arange(8)[:, None],
edges=np.arange(5)[:, None],
senders=np.array([0, 2, 3, 5, 6]),
receivers=np.array([1, 3, 4, 6, 7]),
n_node=np.array([2, 3, 3]),
n_edge=np.array([1, 2, 2]),
globals=None)
padded = gn.pad_graphs(graphs)
np.testing.assert_array_equal(
padded.nodes,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0])[:, None])
np.testing.assert_array_equal(
padded.edges,
np.array([0, 1, 2, 3, 4, 0, 0, 0])[:, None])
np.testing.assert_array_equal(
padded.senders,
[0, 2, 3, 5, 6, 8, 8, 8])
np.testing.assert_array_equal(
padded.receivers,
[1, 3, 4, 6, 7, 8, 8, 8])
np.testing.assert_array_equal(
padded.n_node, [2, 3, 3, 8])
np.testing.assert_array_equal(
padded.n_edge, [1, 2, 2, 3])
def test_batch_graphs_by_device(self):
# batch 4 graphs for 2 devices
num_devices = 2
graphs = [
jraph.GraphsTuple(
nodes=np.arange(2)[:, None],
edges=np.arange(2)[:, None],
senders=np.array([0, 1]),
receivers=np.array([1, 0]),
n_node=np.array([2]),
n_edge=np.array([2]),
globals=None),
jraph.GraphsTuple(
nodes=np.arange(3)[:, None],
edges=np.arange(1)[:, None],
senders=np.array([2]),
receivers=np.array([0]),
n_node=np.array([3]),
n_edge=np.array([1]),
globals=None),
jraph.GraphsTuple(
nodes=np.arange(4)[:, None],
edges=np.arange(2)[:, None],
senders=np.array([1, 0]),
receivers=np.array([2, 3]),
n_node=np.array([4]),
n_edge=np.array([2]),
globals=None),
jraph.GraphsTuple(
nodes=np.arange(5)[:, None],
edges=np.arange(3)[:, None],
senders=np.array([2, 1, 3]),
receivers=np.array([1, 4, 0]),
n_node=np.array([5]),
n_edge=np.array([3]),
globals=None),
]
batched = gn.batch_graphs_by_device(graphs, num_devices)
self.assertLen(batched, num_devices)
np.testing.assert_array_equal(
batched[0].nodes,
np.array([0, 1, 0, 1, 2])[:, None])
np.testing.assert_array_equal(
batched[0].edges,
np.array([0, 1, 0])[:, None])
np.testing.assert_array_equal(
batched[0].senders,
np.array([0, 1, 4]))
np.testing.assert_array_equal(
batched[0].receivers,
np.array([1, 0, 2]))
np.testing.assert_array_equal(
batched[0].n_node,
np.array([2, 3]))
np.testing.assert_array_equal(
batched[0].n_edge,
np.array([2, 1]))
np.testing.assert_array_equal(
batched[1].nodes,
np.array([0, 1, 2, 3, 0, 1, 2, 3, 4])[:, None])
np.testing.assert_array_equal(
batched[1].edges,
np.array([0, 1, 0, 1, 2])[:, None])
np.testing.assert_array_equal(
batched[1].senders,
np.array([1, 0, 6, 5, 7]))
np.testing.assert_array_equal(
batched[1].receivers,
np.array([2, 3, 5, 8, 4]))
np.testing.assert_array_equal(
batched[1].n_node,
np.array([4, 5]))
np.testing.assert_array_equal(
batched[1].n_edge,
np.array([2, 3]))
def test_pad_graphs_by_device(self):
graphs = [
jraph.GraphsTuple(
nodes=np.arange(5)[:, None], # pad to 8
edges=np.arange(3)[:, None], # pad to 4
senders=np.array([0, 1, 4]), # pad to 4
receivers=np.array([1, 0, 2]), # pad to 4
n_node=np.array([2, 3]), # pad to 3
n_edge=np.array([2, 1]), # pad to 3
globals=None),
jraph.GraphsTuple(
nodes=np.arange(4)[:, None], # pad to 8
edges=np.arange(1)[:, None], # pad to 4
senders=np.array([1]), # pad to 4
receivers=np.array([0]), # pad to 4
n_node=np.array([2, 2]), # pad to 3
n_edge=np.array([1, 0]), # pad to 3
globals=None),
]
padded = gn.pad_graphs_by_device(graphs)
np.testing.assert_array_equal(
padded.nodes,
np.array([0, 1, 2, 3, 4, 0, 0, 0,
0, 1, 2, 3, 0, 0, 0, 0])[:, None])
np.testing.assert_array_equal(
padded.edges,
np.array([0, 1, 2, 0, 0, 0, 0, 0])[:, None])
np.testing.assert_array_equal(
padded.senders,
np.array([0, 1, 4, 5, 1, 4, 4, 4]))
np.testing.assert_array_equal(
padded.receivers,
np.array([1, 0, 2, 5, 0, 4, 4, 4]))
np.testing.assert_array_equal(
padded.n_node,
np.array([2, 3, 3, 2, 2, 4]))
np.testing.assert_array_equal(
padded.n_edge,
np.array([2, 1, 1, 1, 0, 3]))
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | wikigraphs/wikigraphs/model/graph_net_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Samplers for the graph2text transformers."""
import abc
from typing import Any, Optional, Mapping
import haiku as hk
import jax
import jax.numpy as jnp
import jraph
import numpy as np
from wikigraphs.model import graph_net as gn
class BaseSampler:
"""Base class for transformer samplers."""
def __init__(self,
model_fn,
temperature: float = 1.0,
device: Optional[Any] = None,
rng: Optional[np.ndarray] = None):
"""Constructor.
Args:
model_fn: a transformer language model defined in model.transformer.
temperature: sampling temperature.
device: the sampler will run on this device if provided.
rng: random number generator.
"""
self._temperature = temperature
self._device = device or jax.local_devices()[0]
init_fn, apply_fn = hk.transform_with_state(model_fn)
if rng is None:
rng = jax.random.PRNGKey(np.random.randint(2**32))
rng = jax.random.fold_in(rng, jax.host_id())
self._rng = rng
self._init_state = None
self._jit_model(init_fn, apply_fn)
def _jit_model(self, init_fn, apply_fn):
"""Jit the `init_fn` and `apply_fn`."""
pass
@abc.abstractmethod
def _sample(self,
params: Mapping[str, Any],
state: Mapping[str, Any],
rng: jnp.ndarray,
x: jnp.ndarray,
**kwargs) -> np.ndarray:
"""Generate samples.
Args:
params: parameters of the transformer.
state: state of the transformer.
rng: random number generator.
x: a prompt of shape [batch_size, sample_len], in which an entry of -1
indicates it will be generate at that place. Otherwise it acts as the
prompt.
**kwargs: additional inputs.
Returns:
output: [batch_size, sample_len] tensor, the generated sequence.
"""
@abc.abstractmethod
def sample(self,
params: Mapping[str, Any],
x: jnp.ndarray,
**kwargs) -> jnp.ndarray:
"""Generate samples based on the given parameters and prompts.
Args:
params: parameters of the transformer.
x: a prompt of shape [batch_size, sample_len], in which an entry of -1
indicates it will be generate at that place. Otherwise it acts as the
prompt.
**kwargs: additional inputs.
Returns:
output: the generated sequence.
"""
class TransformerXLSampler(BaseSampler):
"""Sampling from the TransformerXL model."""
def _jit_model(self, init_fn, apply_fn):
"""Jit `init_fn` and `apply_fn`, the latter is used in `self._sample`."""
self._init_fn = jax.jit(init_fn, device=self._device)
self._apply_fn = apply_fn
self._sample_fn = jax.jit(self._sample, device=self._device)
def _sample(self,
params: Mapping[str, Any],
state: Mapping[str, Any],
rng: jnp.ndarray,
x: jnp.ndarray) -> np.ndarray:
"""Generate unconditional samples.
Args:
params: parameters of the transformer.
state: state of the transformer.
rng: random number generator.
x: a prompt of shape [batch_size, sample_len], in which an entry of -1
indicates it will be generate at that place. Otherwise it acts as the
prompt.
Returns:
output: [batch_size, sample_len] tensor, the generated sequence.
"""
batch_size, sample_len = x.shape
def one_step(params, state, rng, i, x):
step_sample = jax.lax.dynamic_slice(x, [0, i], [batch_size, 1])
rng, rng_ = jax.random.split(rng)
# step_sample shape is [batch_size, 1].
logits, state = self._apply_fn(params, state, rng_, step_sample)
rng, rng_ = jax.random.split(rng)
step_sample = jax.random.categorical(rng_, logits / self._temperature)
update = jnp.where(x[:, i + 1] < 0, step_sample[:, 0], x[:, i + 1])[:,
None]
x = jax.lax.dynamic_update_slice(x, update, [0, i + 1])
return state, rng, x
def loop_body(i, data):
state, rng, x = data
return one_step(params, state, rng, i, x)
_, _, x = jax.lax.fori_loop(0, sample_len - 1, loop_body,
(state, rng, x))
return x
def sample(self,
params: Mapping[str, Any],
x: jnp.ndarray) -> jnp.ndarray:
"""Generate samples based on the given graphs and parameters.
Args:
params: parameters of the transformer.
x: a prompt of shape [batch_size, sample_len], in which an entry of -1
indicates it will be generate at that place. Otherwise it acts as the
prompt.
Returns:
output: the generated sequence.
"""
if self._init_state is None:
self._rng, rng = jax.random.split(self._rng)
self._init_params, self._init_state = self._init_fn(rng, x[:, :1])
if params is None:
params = self._init_params
self._rng, rng = jax.random.split(self._rng)
sample = self._sample_fn(params, self._init_state, rng, x)
return sample
class Bow2TextTransformerSampler(BaseSampler):
"""Sampling from the TransformerXL model."""
def _jit_model(self, init_fn, apply_fn):
"""Jit `init_fn` and `apply_fn`, the latter is used in `self._sample`."""
self._init_fn = jax.jit(init_fn, device=self._device)
self._apply_fn = apply_fn
self._sample_fn = jax.jit(self._sample, device=self._device)
def _sample(self,
params: Mapping[str, Any],
state: Mapping[str, Any],
rng: jnp.ndarray,
bow: jnp.ndarray,
x: jnp.ndarray) -> np.ndarray:
"""Generate samples conditioned on the bag-of-words of the graph.
Args:
params: parameters of the transformer.
state: state of the transformer.
rng: random number generator.
bow: a [batch_size, bow_vocab_size] tensor, each row is a bow vector.
x: a prompt of shape [batch_size, sample_len], in which an entry of -1
indicates it will be generate at that place. Otherwise it acts as the
prompt.
Returns:
output: [batch_size, sample_len] tensor, the generated sequence.
"""
batch_size, sample_len = x.shape
def one_step(params, state, rng, i, x):
step_sample = jax.lax.dynamic_slice(x, [0, i], [batch_size, 1])
rng, rng_ = jax.random.split(rng)
# step_sample shape is [batch_size, 1].
logits, state = self._apply_fn(params, state, rng_, bow, step_sample)
rng, rng_ = jax.random.split(rng)
step_sample = jax.random.categorical(rng_, logits / self._temperature)
update = jnp.where(x[:, i + 1] < 0, step_sample[:, 0], x[:, i + 1])[:,
None]
x = jax.lax.dynamic_update_slice(x, update, [0, i + 1])
return state, rng, x
def loop_body(i, data):
state, rng, x = data
return one_step(params, state, rng, i, x)
_, _, x = jax.lax.fori_loop(0, sample_len - 1, loop_body,
(state, rng, x))
return x
def sample(self,
params: Mapping[str, Any],
x: jnp.ndarray,
bow: jnp.ndarray) -> jnp.ndarray:
"""Generate samples based on the given graphs and parameters.
Args:
params: parameters of the transformer.
x: a prompt of shape [batch_size, sample_len], in which an entry of -1
indicates it will be generate at that place. Otherwise it acts as the
prompt.
bow: a [batch_size, bow_vocab_size] tensor, each row is a bow vector.
Returns:
output: the generated sequence.
"""
if self._init_state is None:
self._rng, rng = jax.random.split(self._rng)
self._init_params, self._init_state = self._init_fn(rng, bow, x[:, :1])
if params is None:
params = self._init_params
self._rng, rng = jax.random.split(self._rng)
sample = self._sample_fn(params, self._init_state, rng, bow, x)
return sample
class Graph2TextTransformerSampler(BaseSampler):
"""Sampling from the Graph2Text TransformerXL model."""
def _jit_model(self, init_fn, apply_fn):
"""Jit `init_fn` and `apply_fn`, the latter is used in `self._sample`."""
# `pad_n_nodes` is set as a static argument.
self._init_fn = jax.jit(init_fn, device=self._device, static_argnums=2)
self._apply_fn = apply_fn
self._sample_fn = jax.jit(self._sample, device=self._device,
static_argnums=4)
def _sample(self,
params: Mapping[str, Any],
state: Mapping[str, Any],
rng: jnp.ndarray,
graphs: jraph.GraphsTuple,
pad_n_nodes: int,
x: jnp.ndarray) -> np.ndarray:
"""Generate samples conditioned on the bag-of-words reprensation of graph.
Args:
params: parameters of the transformer.
state: state of the transformer.
rng: random number generator.
graphs: a graph structured using graph_net.Graph.
pad_n_nodes: size for each node to pad to.
x: a prompt of shape [batch_size, sample_len], in which an entry of -1
indicates it will be generate at that place. Otherwise it acts as the
prompt.
Returns:
output: [batch_size, sample_len] tensor, the generated sequence.
"""
batch_size, sample_len = x.shape
def one_step(params, state, rng, i, x):
step_sample = jax.lax.dynamic_slice(x, [0, i], [batch_size, 1])
rng, rng_ = jax.random.split(rng)
# step_sample shape is [batch_size, 1].
logits, state = self._apply_fn(
params, state, rng_, graphs, pad_n_nodes, step_sample)
rng, rng_ = jax.random.split(rng)
step_sample = jax.random.categorical(rng_, logits / self._temperature)
update = jnp.where(x[:, i + 1] < 0, step_sample[:, 0], x[:, i + 1])[:,
None]
x = jax.lax.dynamic_update_slice(x, update, [0, i + 1])
return state, rng, x
def loop_body(i, data):
state, rng, x = data
return one_step(params, state, rng, i, x)
_, _, x = jax.lax.fori_loop(0, sample_len - 1, loop_body,
(state, rng, x))
return x
def sample(self,
params: Mapping[str, Any],
x: jnp.ndarray,
graphs: jraph.GraphsTuple,
pad: bool = True) -> jnp.ndarray:
"""Generate samples based on the given graphs and parameters.
Args:
params: parameters of the transformer.
x: a prompt of shape [batch_size, sample_len], in which an entry of -1
indicates it will be generate at that place. Otherwise it acts as the
prompt.
graphs: a graph structured using graph_net.Graph.
pad: whether to pad the graph nodes and edges or not.
Returns:
output: the generated sequence.
"""
if pad:
graphs = gn.pad_graphs(graphs)
max_graph_size = gn.pad_size(graphs.n_node.max())
else:
max_graph_size = graphs.n_node.max()
if self._init_state is None:
self._rng, rng = jax.random.split(self._rng)
self._init_params, self._init_state = self._init_fn(
rng, graphs, max_graph_size, x[:, :1])
if params is None:
params = self._init_params
self._rng, rng = jax.random.split(self._rng)
sample = self._sample_fn(
params, self._init_state, rng, graphs, max_graph_size, x)
return sample
| deepmind-research-master | wikigraphs/wikigraphs/model/sampler.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Tests for wikigraphs.data.wikitext."""
from absl.testing import absltest
from wikigraphs.data import tokenizers
from wikigraphs.data import wikitext
WIKITEXT_ROOT = '/tmp/data/wikitext-103'
WIKITEXT_VOCAB_FILE = '/tmp/data/wikitext-vocab.csv'
class WikitextTest(absltest.TestCase):
def test_wikitext_size(self):
valid_set = wikitext.RawDataset(
subset='valid', shuffle_data=False, data_dir=WIKITEXT_ROOT)
n_tokens = 0
n_articles = 0
for article in valid_set:
n_tokens += len([t for t in article.text.split(' ') if t])
n_articles += 1
# Dataset size must match published values.
self.assertEqual(n_tokens, 217646)
self.assertEqual(n_articles, 60)
def test_wikitext_dataset_size(self):
tokenizer = tokenizers.WordTokenizer(vocab_file=WIKITEXT_VOCAB_FILE)
batch_size = 4
timesteps = 256
valid_set = wikitext.WikitextDataset(
tokenizer=tokenizer, batch_size=batch_size, timesteps=timesteps,
subset='valid', shuffle_data=False, repeat=False,
data_dir=WIKITEXT_ROOT)
n_tokens = 0
n_bos = 0
for batch in valid_set:
n_tokens += (batch['obs'] != tokenizer.pad_token()).sum()
n_bos += (batch['obs'] == tokenizer.bos_token()).sum()
self.assertEqual(
batch['obs'].shape, (batch_size, timesteps))
self.assertEqual(
batch['target'].shape, (batch_size, timesteps))
self.assertEqual(
batch['should_reset'].shape, (batch_size, timesteps))
self.assertEqual(
batch['mask'].shape, (batch_size, timesteps))
n_tokens -= n_bos
self.assertEqual(n_tokens, 217646)
self.assertEqual(n_bos, 60)
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | wikigraphs/wikigraphs/data/wikitext_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Tests for wikigraphs.data.tokenizers."""
from absl.testing import absltest
from wikigraphs.data import tokenizers
WIKITEXT_VOCAB_FILE = '/tmp/data/wikitext-vocab.csv'
GRAPH_VOCAB_FILE = '/tmp/data/graph-vocab.csv'
class TokenizerTest(absltest.TestCase):
def test_tokenizer(self):
tokenizer = tokenizers.WordTokenizer(vocab_file=WIKITEXT_VOCAB_FILE)
# Vocab size must match published number.
self.assertEqual(tokenizer.vocab_size, 267735 + 2)
s = 'Hello world ! \n How are you ?'
encoded = tokenizer.encode(s, prepend_bos=True)
self.assertEqual(encoded.shape, (9,))
decoded = tokenizer.decode(encoded)
self.assertEqual(s, decoded)
def test_graph_tokenizer_tokenize_nodes_edges(self):
self.assertEqual(
tokenizers.GraphTokenizer.split_node(
'"Hello, how are you?"'),
['hello', ',', 'how', 'are', 'you', '?'])
self.assertEqual(
tokenizers.GraphTokenizer.split_node(
'"This building was built in 1998."'),
['this', 'building', 'was', 'built', 'in', '<number>', '.'])
self.assertEqual(
tokenizers.GraphTokenizer.split_node('ns/m.030ssw'),
['<entity>'])
self.assertEqual(
tokenizers.GraphTokenizer.split_edge('ns/common.topic.description'),
['common', 'topic', 'description'])
self.assertEqual(
tokenizers.GraphTokenizer.split_edge('ns/type.object.name'),
['type', 'object', 'name'])
def test_graph_tokenizer_vocab(self):
tokenizer = tokenizers.GraphTokenizer(vocab_file=GRAPH_VOCAB_FILE)
self.assertEqual(tokenizer.vocab_size, 31087 + 3)
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | wikigraphs/wikigraphs/data/tokenizers_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Some tools for processing data."""
from typing import Any, Iterator
from absl import logging
import numpy as np
def pad_to(x: np.array, size: int, axis: int = -1, pad_value: float = 0.):
"""Pad an array to the specified size along a specified axis."""
if x.shape[axis] > size:
raise ValueError(f'Data item has size {x.shape[axis]} larger than {size}'
f' in axis {axis} already.')
elif x.shape[axis] == size:
return x
else:
pad_amount = [(0, 0)] * x.ndim
pad_amount[axis] = (0, size - x.shape[axis])
return np.pad(x, pad_amount, mode='constant', constant_values=pad_value)
def dynamic_batch(
iterable: Iterator[Any],
batch_size: int,
timesteps: int,
return_incomplete_batch: bool = False,
pad: bool = False,
pad_value: float = 0.) -> Iterator[Any]:
"""Batches up values in iterable to [batch_size, timesteps].
This function takes items from the iterable and pack them into the batch.
Sequence #i in the batch is a continuation from the sequence #i in the
previous batch, i.e. it will start from where the previous sequence left over.
When an item is finished, a new item is taken from the iterable to append to
the sequence and fill the batch.
This function is designed for language modeling, where the input and the
target sequences are offset by one. We take that into account by making sure
neighboring batches have one token overlap.
Example:
If the iterable contains [[0, 1, 2], [10, 11, 12, 13, 14], [20, 21, 22]] and
batch size is 2, timesteps is 3, then the first batch would be:
[[0, 1, 2],
[10, 11, 12]]
then the second batch:
[[2, 20, 21], # seq 0 finished, continuing from seq 2
[12, 13, 14]]
Note the overlap of 1 token between these two batches, and the continuation
of sequences across batches.
Args:
iterable: the iterable that yields sequences of integer token IDs.
batch_size: number of examples in a batch.
timesteps: length of each sequence in a batch.
return_incomplete_batch: if True return the incomplete batches, which
typically appears at the end of the dataset.
pad: set to True to pad the incomplete batches.
pad_value: the value to use for padding.
Yields:
batches: where batches['obs'] are the observations of size
[batch_size, timesteps], and batches['should_reset'] is a 0/1 mask of
the same size that marks sequence boundaries, e.g. the entries in this
mask are all 0 except at locations where a new sequence is starting.
"""
if return_incomplete_batch and not pad:
raise ValueError(
f'If return_incomplete_batch, then pad must be True, currently {pad}.')
iterator = iter(iterable)
elems = []
for _ in range(batch_size):
item = next(iterator)
elems.append(item)
start_batch = [True] * batch_size
iter_finished = False
loaded_finished = False
while not (iter_finished and loaded_finished):
batch = []
for i in range(batch_size):
# should_reset value is 1 when a new sequence begins.
# [old[-3], old[-2], old[-1], new[0], new[1], new[2]]
# [0, 0, 0, 1, 0, 0]
should_reset = np.zeros(timesteps, np.float32)
if start_batch[i]:
should_reset[0] = 1
# Pack new examples in the sequence until they go beyond the required
# timesteps.
while len(elems[i]) < timesteps:
should_reset[len(elems[i])] = 1
try:
item = next(iterator)
except StopIteration:
iter_finished = True
break
elems[i] = np.concatenate([elems[i], item])
batch.append(dict(obs=elems[i][:timesteps], should_reset=should_reset))
# Shift and make sure we have a 1 token overlap.
elems[i] = elems[i][timesteps - 1:]
# Since the last token is shifted to be the first token of the next batch,
# We need to make sure reset is handled properly as well.
start_batch[i] = (should_reset[-1] == 1)
# If any loaded data is not yet consumed in the output we should keep
# generating.
loaded_finished = all(e.size == 0 for e in elems)
if not return_incomplete_batch:
elem_len = len(batch[0]['obs'])
if (elem_len != timesteps or
not all(len(x['obs']) == elem_len for x in batch[1:])):
logging.info('Dropping the (last?) incomplete batch.')
break
if pad:
for x in batch:
x['obs'] = pad_to(x['obs'], timesteps, axis=0, pad_value=pad_value)
yield dict(
obs=np.stack([x['obs'] for x in batch], axis=0),
should_reset=np.stack([x['should_reset'] for x in batch], axis=0))
def batch_graph_text_pairs(
iterable: Iterator[Any],
batch_size: int,
timesteps: int,
pad_value: float = 0.,
seq_and_graph_id: bool = False) -> Iterator[Any]:
"""Batch graph and text pairs.
This method pairs text with graphs, each text sequence is split into chunks
(with an overlap of 1) of size `timesteps`, and the graph associated with the
text is used and associated with each chunk as well. The last incomplete
chunk of each text sequence is padded with the `pad_value`.
Args:
iterable: Iterable that returns (graph, sequence) pairs, graph can be
anything, and sequence is a list of tokenized token IDs.
batch_size: Number of examples in a batch.
timesteps: Window size for the sequences.
pad_value: Value to use for padding.
seq_and_graph_id: whether the `iterable` contains `seq_id` and `graph_id`.
Yields:
batch: a batch of text sequence paired with graphs.
"""
iterator = iter(iterable)
seqs = [None] * batch_size
graphs = [None] * batch_size
graph_ids = [None] * batch_size
seq_ids = [None] * batch_size
iter_finished = False
loaded_finished = False
while not (iter_finished and loaded_finished):
batch = []
for idx in range(batch_size):
should_reset = np.zeros(timesteps, np.float32)
# pylint: disable=g-explicit-length-test
if seqs[idx] is None or len(seqs[idx]) == 0:
should_reset[0] = 1
# One sequence exhausted, get the next example.
try:
if seq_and_graph_id:
(graph, seq), (graph_id, seq_id) = next(iterator)
graph_ids[idx] = graph_id
seq_ids[idx] = seq_id
else:
graph, seq = next(iterator)
seqs[idx] = seq
graphs[idx] = graph
except StopIteration:
iter_finished = True
seqs[idx] = np.array([pad_value], dtype=np.int32)
graphs[idx] = None
example = dict(obs=seqs[idx][:timesteps], graph=graphs[idx],
should_reset=should_reset)
if seq_and_graph_id:
example['seq_id'] = seq_ids[idx]
example['graph_id'] = graph_ids[idx]
batch.append(example)
# Make sure that there is an overlap, as we generate targets by shifting
# the tensor by 1 timestep. So the next element should be shifted by
# `timesteps - 1' timesteps.
seqs[idx] = seqs[idx][timesteps - 1:]
# Make sure all loaded data are consumed in the output
loaded_finished = all(s.size == 0 for s in seqs)
# Also check for the last batch to avoid returning a fully empty batch
if iter_finished and all([np.all(b['obs'] == pad_value) for b in batch]):
break
# pad sequences to specified length
for e in batch:
e['obs'] = pad_to(e['obs'], timesteps, axis=0, pad_value=pad_value)
stacked_batch = dict(
obs=np.stack([e['obs'] for e in batch], axis=0),
graphs=[e['graph'] for e in batch],
should_reset=np.stack([e['should_reset'] for e in batch], axis=0))
if seq_and_graph_id:
stacked_batch['seq_id'] = np.stack(
[e['seq_id'] for e in batch], axis=0)
stacked_batch['graph_id'] = np.stack(
[e['graph_id'] for e in batch], axis=0)
yield stacked_batch
| deepmind-research-master | wikigraphs/wikigraphs/data/tools.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""WikiGraphs data modules."""
from . import dataset
from . import io_tools
from . import paired_dataset
from . import tokenizers
from . import tools
from . import wikitext
| deepmind-research-master | wikigraphs/wikigraphs/data/__init__.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Some tools for I/O."""
import gzip
import io
import os
import re
from typing import NamedTuple, List, Iterator
from absl import logging
def read_txt_file(file_path: str, encoding: str = 'utf-8') -> str:
"""Read a plain txt file."""
with open(file_path, 'rb') as f:
content = f.read()
return content.decode(encoding)
def write_txt_file(file_path: str, txt: str, encoding: str = 'utf-8'):
"""Write the given txt string to file."""
make_dir_if_necessary(file_path)
with open(file_path, 'wb') as f:
f.write(txt.encode(encoding, 'surrogatepass'))
def read_gzip_txt_file(file_path: str, encoding: str = 'utf-8') -> str:
"""Read gzipped txt file."""
with open(file_path, 'rb') as f:
content = f.read()
with gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') as f:
content = f.read()
return content.decode(encoding)
def make_dir_if_necessary(output_path):
output_dir = os.path.dirname(output_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
def write_lines_to_gzipped_file(file_path, lines):
make_dir_if_necessary(file_path)
with open(file_path, 'wb') as f_zip:
with gzip.GzipFile(fileobj=f_zip, mode='wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
class Graph(NamedTuple):
title: str
center: str
edges: List[str]
def graphs_from_file(file_path: str) -> Iterator[Graph]:
"""Read freebase graphs from file.
Args:
file_path: path to the input `.gz` file that contains a list of graphs.
Yields:
graphs: a list of read from the file.
"""
content = read_gzip_txt_file(file_path)
graph_header_sep_re = re.compile(
r'(<graph center=[^ ]+ title="[^"]+">\n)')
graph_header_re = re.compile(
r'<graph center=([^ ]+) title="([^"]+)">\n')
parts = graph_header_sep_re.split(content)
# Skip the first part which is empty
for i in range(1, len(parts), 2):
header, body = parts[i], parts[i + 1]
m = graph_header_re.match(header)
yield Graph(title=m.group(2),
center=m.group(1),
edges=body.strip().split('\n'))
_UNICODE_RE = re.compile(r'(\$[0-9A-Fa-f]{4})')
def normalize_freebase_string(s: str) -> str:
"""Expand the `$xxxx` escaped unicode characters in the input string."""
# '"' is escaped as '``', convert it back.
s.replace('``', '"')
parts = _UNICODE_RE.split(s)
parts = [p if not _UNICODE_RE.match(p) else chr(int(p[1:], base=16))
for p in parts]
return ''.join(parts).replace('_', ' ')
class GraphTextPair(NamedTuple):
"""Text paired with raw graph represented as in `edges`."""
center_node: str
title: str
edges: List[str]
text: str
def pair2lines(pair):
lines = [f'<graph center={pair.center_node} title="{pair.title}">']
lines.append('<section id="text">')
lines.append(pair.text)
lines.append('<section id="edges">')
lines.extend(pair.edges)
return lines
def write_pairs_to_gzip_txt_file(file_path, pairs):
logging.info('Writing %d pairs to %s.', len(pairs), file_path)
lines = []
for p in pairs:
lines.extend(pair2lines(p))
write_lines_to_gzipped_file(file_path, lines)
def read_pairs_from_gzip_txt_file(file_path: str) -> Iterator[GraphTextPair]:
"""Read graph-text pairs from gzip txt files.
Args:
file_path: a `.gz` file of graph-text pairs written in the same format as
using the `write_pairs_to_gzip_txt_file` function.
Yields:
Graph-text pairs from this file.
"""
content = read_gzip_txt_file(file_path)
graph_header_sep_re = re.compile(
r'(<graph center=[^ ]+ title="[^"]+">)')
graph_header_re = re.compile(
r'<graph center=([^ ]+) title="([^"]+)">$')
section_sep_re = re.compile(r'\n(<section id="[^"]+">\n)')
parts = graph_header_sep_re.split(content)
# Skip the first part which is empty
for i in range(1, len(parts), 2):
header, body = parts[i], parts[i + 1]
m = graph_header_re.match(header)
# 5 parts total, empty first part, "text", text section, "edges", edges
# section.
section_parts = section_sep_re.split(body)
yield GraphTextPair(center_node=m.group(1),
title=m.group(2),
text=section_parts[2],
edges=section_parts[-1].strip().split('\n'))
| deepmind-research-master | wikigraphs/wikigraphs/data/io_tools.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Tests for wikigraphs.data.tools."""
from absl.testing import absltest
import numpy as np
from wikigraphs.data import tools
class ToolsTest(absltest.TestCase):
def test_padding(self):
np.testing.assert_array_equal(
tools.pad_to(np.arange(3), 5),
[0, 1, 2, 0, 0])
np.testing.assert_array_equal(
tools.pad_to(np.arange(3), 5, pad_value=-1),
[0, 1, 2, -1, -1])
np.testing.assert_array_equal(
tools.pad_to(np.arange(6).reshape(2, 3), 4, axis=0, pad_value=-1),
[[0, 1, 2],
[3, 4, 5],
[-1, -1, -1],
[-1, -1, -1]])
np.testing.assert_array_equal(
tools.pad_to(np.arange(6).reshape(2, 3), 4, axis=-1, pad_value=-1),
[[0, 1, 2, -1],
[3, 4, 5, -1]])
def test_dynamic_batch(self):
def dataset():
data = [[1, 2, 2, 2],
[1, 3, 3],
[1, 4]]
for d in data:
yield np.array(d, dtype=np.int32)
batches = list(tools.dynamic_batch(
dataset(), batch_size=2, timesteps=3, return_incomplete_batch=False))
self.assertLen(batches, 1)
np.testing.assert_array_equal(
batches[0]['obs'],
[[1, 2, 2], [1, 3, 3]])
np.testing.assert_array_equal(
batches[0]['should_reset'],
[[1, 0, 0], [1, 0, 0]])
batches = list(tools.dynamic_batch(
dataset(), batch_size=2, timesteps=3, return_incomplete_batch=True,
pad=True, pad_value=0))
# Note `return_incomplete_batch=False` drops all the incomplete batches,
# and this can be more than just the last batch.
self.assertLen(batches, 3)
np.testing.assert_array_equal(
batches[0]['obs'],
[[1, 2, 2], [1, 3, 3]])
np.testing.assert_array_equal(
batches[0]['should_reset'],
[[1, 0, 0], [1, 0, 0]])
np.testing.assert_array_equal(
batches[1]['obs'],
[[2, 2, 1], [3, 0, 0]])
np.testing.assert_array_equal(
batches[1]['should_reset'],
[[0, 0, 1], [0, 1, 0]])
np.testing.assert_array_equal(
batches[2]['obs'],
[[1, 4, 0], [0, 0, 0]])
np.testing.assert_array_equal(
batches[2]['should_reset'],
[[1, 0, 1], [1, 0, 0]])
with self.assertRaises(ValueError):
batches = list(tools.dynamic_batch(
dataset(), batch_size=2, timesteps=3, return_incomplete_batch=True,
pad=False))
def test_batch_graph_text_pairs(self):
def source():
yield (1, np.array([1, 1, 1, 1, 1], dtype=np.int32))
yield (2, np.array([2, 2], dtype=np.int32))
yield (3, np.array([3, 3, 3, 3, 3, 3], dtype=np.int32))
data_iter = tools.batch_graph_text_pairs(
source(), batch_size=2, timesteps=3, pad_value=0)
batches = list(data_iter)
self.assertLen(batches, 4)
batch = batches[0]
np.testing.assert_array_equal(
batch['obs'],
[[1, 1, 1],
[2, 2, 0]])
self.assertEqual(batch['graphs'], [1, 2])
np.testing.assert_array_equal(
batch['should_reset'],
[[1, 0, 0],
[1, 0, 0]])
batch = batches[1]
np.testing.assert_array_equal(
batch['obs'],
[[1, 1, 1],
[3, 3, 3]])
self.assertEqual(batch['graphs'], [1, 3])
np.testing.assert_array_equal(
batch['should_reset'],
[[0, 0, 0],
[1, 0, 0]])
batch = batches[2]
np.testing.assert_array_equal(
batch['obs'],
[[1, 0, 0],
[3, 3, 3]])
self.assertEqual(batch['graphs'], [1, 3])
np.testing.assert_array_equal(
batch['should_reset'],
[[0, 0, 0],
[0, 0, 0]])
batch = batches[3]
np.testing.assert_array_equal(
batch['obs'],
[[0, 0, 0],
[3, 3, 0]])
self.assertEqual(batch['graphs'], [None, 3])
np.testing.assert_array_equal(
batch['should_reset'],
[[1, 0, 0],
[0, 0, 0]])
def test_batch_graph_text_pairs_batch_size1(self):
def source():
yield (0, np.array([1, 2], dtype=np.int32))
yield (1, np.array([1, 2, 3, 4, 5, 6], dtype=np.int32))
data_iter = tools.batch_graph_text_pairs(
source(), batch_size=1, timesteps=3, pad_value=0)
batches = list(data_iter)
batch = batches[0]
np.testing.assert_array_equal(batch['obs'], [[1, 2, 0]])
self.assertEqual(batch['graphs'], [0])
np.testing.assert_array_equal(batch['should_reset'], [[1, 0, 0]])
batch = batches[1]
np.testing.assert_array_equal(batch['obs'], [[1, 2, 3]])
self.assertEqual(batch['graphs'], [1])
np.testing.assert_array_equal(batch['should_reset'], [[1, 0, 0]])
batch = batches[2]
np.testing.assert_array_equal(batch['obs'], [[3, 4, 5]])
self.assertEqual(batch['graphs'], [1])
np.testing.assert_array_equal(batch['should_reset'], [[0, 0, 0]])
batch = batches[3]
np.testing.assert_array_equal(batch['obs'], [[5, 6, 0]])
self.assertEqual(batch['graphs'], [1])
np.testing.assert_array_equal(batch['should_reset'], [[0, 0, 0]])
self.assertLen(batches, 4)
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | wikigraphs/wikigraphs/data/tools_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Tools for accessing the graph-text paired datasets."""
import abc
import collections
from typing import List, Tuple, NamedTuple, Any, Dict, Optional, Union
from absl import logging
import jax.numpy as jnp
import jraph
import numpy as np
from wikigraphs.data import dataset
from wikigraphs.data import io_tools
from wikigraphs.data import tokenizers
from wikigraphs.data import tools
ArrayType = Union[np.ndarray, jnp.ndarray]
DATA_ROOT = '/tmp/data/wikigraphs'
class RawDataset(dataset.Dataset):
"""The untokenized raw dataset."""
def __init__(self,
subset: str = 'train',
shuffle_data: bool = False,
data_dir: str = None,
version: str = 'max256'):
"""Constructor.
Args:
subset: which subset to load.
shuffle_data: set to True to randomly shuffle the data.
data_dir: if provided this will be used instead of the default location to
look for data, it must contain files like `train.gz`, `valid.gz` and
`test.gz`.
version: which version of the data to load, this must be the name of a
directory in `DATA_ROOT`.
"""
super().__init__()
self._subset = subset
self._shuffle_data = shuffle_data
self._data_dir = data_dir or DATA_ROOT
self._dataset = None
allowed_versions = ('max256', 'max512', 'max1024')
if version not in allowed_versions:
raise ValueError(f'Version {version} not one of the allowed versions:'
f' {allowed_versions}.')
self._version = version
def _load_data(self):
"""Load and prepare the data iterator."""
if self._dataset is None:
self._dataset = list(io_tools.read_pairs_from_gzip_txt_file(
f'{self._data_dir}/{self._version}/{self._subset}.gz'))
def source():
n_pairs = len(self._dataset)
if self._shuffle_data:
idx = np.random.permutation(n_pairs)
else:
idx = np.arange(n_pairs)
for i in range(n_pairs):
yield self._dataset[idx[i]]
return source()
class Graph:
"""A convenience class for representing graphs."""
def __init__(self, nodes: List[str], edges: List[Tuple[int, int, str]]):
"""Construct a graph from a list of nodes and edges.
Args:
nodes: a list of node attributes, one for each node.
edges: a list of (source_node_id, target_node_id, edge_attribute) for each
edge.
"""
self._nodes = nodes
self._edges = edges
self._node2id = {n: i for i, n in enumerate(nodes)}
def nodes(self) -> List[str]:
return self._nodes
def edges(self) -> List[Tuple[int, int, str]]:
return self._edges
def node2id(self, node: str) -> int:
return self._node2id[node]
@classmethod
def from_edges(cls, edges: List[str]) -> 'Graph':
"""Build a graph instance from a list of edges."""
node2id = dict()
parsed_edges = []
next_node_id = 0
for e in edges:
src, edge, tgt = e.split('\t')[:3]
src_id = node2id.get(src, next_node_id)
if src_id == next_node_id:
node2id[src] = src_id
next_node_id += 1
tgt_id = node2id.get(tgt, next_node_id)
if tgt_id == next_node_id:
node2id[tgt] = tgt_id
next_node_id += 1
parsed_edges.append((src_id, tgt_id, edge))
id2node = {i: n for n, i in node2id.items()}
return Graph(nodes=[id2node[i] for i in range(next_node_id)],
edges=parsed_edges)
def to_edges(self) -> List[str]:
r"""Convert graph to a list of edges.
The converted list of edges should be compatible with the format specified
in io_tools and compatible with the `from_edges` method above.
Returns:
edges: one edge per line, with the (source, target, edge_type) separated
by `\t`.
"""
edges = []
for s, t, e in self._edges:
edges.append(f'{self._nodes[s]}\t{e}\t{self._nodes[t]}')
return edges
@classmethod
def subsample_nodes(
cls, graph: 'Graph', subsample_rate: float = 1.0, center_node: str = None
) -> 'Graph':
"""Subsample the nodes of a graph."""
graph_size = len(graph.nodes())
if subsample_rate == 1.0 or graph_size <= 1:
return graph
subsampled_nodes_id = np.arange(graph_size)
if subsample_rate < 1.0:
subsample_graph_size = int(subsample_rate * graph_size)
if center_node is not None:
# We need to keep the center node during subsampling
center_node_id = graph.node2id(center_node)
subsampled_nodes_id = subsampled_nodes_id[
subsampled_nodes_id != center_node_id]
subsample_graph_size = max(1, subsample_graph_size - 1)
subsampled_nodes_id = np.random.choice(
subsampled_nodes_id, subsample_graph_size, replace=False)
subsampled_nodes_id = np.append(subsampled_nodes_id, center_node_id)
else:
subsampled_nodes_id = np.random.choice(
subsampled_nodes_id, subsample_graph_size, replace=False)
subsampled_nodes_id = np.sort(subsampled_nodes_id)
map_subsampled_nodes_id = {
old_id: new_id for new_id, old_id in enumerate(subsampled_nodes_id)}
nodes = []
edges = []
for node_id, n in enumerate(graph.nodes()):
if node_id in subsampled_nodes_id:
nodes.append(n)
for out_node, in_node, e in graph.edges():
if out_node in subsampled_nodes_id and in_node in subsampled_nodes_id:
edges.append((map_subsampled_nodes_id[out_node],
map_subsampled_nodes_id[in_node], e))
return Graph(nodes=nodes, edges=edges)
class ParsedGraphTextPair(NamedTuple):
"""Graph-text pair with graph parsed into a `Graph` instance."""
center_node: str
title: str
text: str
graph: Graph
class ParsedDataset(dataset.Dataset):
"""Raw dataset + parsing graphs into Graph instances."""
def __init__(self,
subset: str = 'train',
shuffle_data: bool = False,
data_dir: str = None,
version: str = 'max256'):
"""Constructor.
Args:
subset: which subset to load.
shuffle_data: set to True to randomly shuffle the data.
data_dir: if provided this will be used instead of the default location to
look for data, it must contain files like `train.gz`, `valid.gz` and
`test.gz`.
version: which version of the data to load, this must be the name of a
directory in `DATA_ROOT`.
"""
super().__init__()
self._raw_data = RawDataset(subset=subset, shuffle_data=False,
data_dir=data_dir, version=version)
self._shuffle_data = shuffle_data
self._dataset = None
def _load_data(self):
if self._dataset is None:
# pylint: disable=g-complex-comprehension
self._dataset = [ParsedGraphTextPair(center_node=pair.center_node,
title=pair.title,
text=pair.text,
graph=Graph.from_edges(pair.edges))
for pair in self._raw_data]
def source():
n_pairs = len(self._dataset)
if self._shuffle_data:
idx = np.random.permutation(n_pairs)
else:
idx = np.arange(n_pairs)
for i in range(n_pairs):
yield self._dataset[idx[i]]
return source()
class BaseGraph2TextDataset(dataset.Dataset):
"""Base dataset class for graph-to-text tasks."""
def __init__(self,
tokenizer: tokenizers.Tokenizer,
graph_tokenizer: Optional[tokenizers.GraphTokenizer] = None,
batch_size: int = 1,
timesteps: int = 128,
subset: str = 'train',
shuffle_data: bool = False,
repeat: bool = False,
version: str = 'max256',
data_dir: str = None,
subsample_nodes: float = 1.0,
graph_retrieval_dataset: bool = False,
debug: bool = False):
"""Constructor.
Args:
tokenizer: the tokenizer for text data.
graph_tokenizer: the tokenizer for graph data.
batch_size: number of sequences to put in a batch.
timesteps: number of tokens to put in a sequence in a batch.
subset: which subset to load.
shuffle_data: whether to shuffle data.
repeat: set to True to repeat the dataset infinitely, otherwise do only
one pass through the dataset.
version: which version of the data to load.
data_dir: if set load data instead from this directory, and ignore
`version`.
subsample_nodes: the proportion of the nodes in a graph to keep.
graph_retrieval_dataset: whether to construct the dataset for graph
retrieval tasks.
debug: set to True to use debug mode and only load a small number of
examples.
"""
super().__init__()
self._parsed_data = ParsedDataset(subset=subset,
shuffle_data=False,
data_dir=data_dir,
version=version)
self._tokenizer = tokenizer
self._graph_tokenizer = graph_tokenizer
self._batch_size = batch_size
self._timesteps = timesteps
self._subset = subset
self._shuffle_data = shuffle_data
self._repeat = repeat
self._subsample_nodes = subsample_nodes
self._graph_retrieval_dataset = graph_retrieval_dataset
self._debug = debug
self._dataset = None
@property
def num_articles(self):
return self._num_articles
@abc.abstractmethod
def _process_graph(self, center_node: str, graph: Graph):
"""Process the graph part of a `ParsedGraphTextPair` instance."""
def _process_graph_text_pair(
self, pair: ParsedGraphTextPair) -> Tuple[Any, np.ndarray]:
"""Process the given graph-text pair and prepare one example.
Args:
pair: the input `ParsedGraphTextPair` instance.
Returns:
graph: the processed graph content.
text: the tokenized text, a sequence of token IDs.
"""
return (self._process_graph(pair.center_node, pair.graph),
self._tokenizer.encode(
pair.text, prepend_bos=True, append_eos=True))
def _load_data(self):
"""Prepare the data."""
if self._dataset is None:
if self._debug:
data = [next(self._parsed_data) for _ in range(10)]
else:
data = list(self._parsed_data)
self._dataset = [self._process_graph_text_pair(p) for p in data]
self._num_articles = len(self._dataset)
logging.info('Loaded a total of %d examples from %s set.',
self._num_articles, self._subset)
if self._graph_retrieval_dataset:
# For graph retrieval tasks we pair all texts and graphs in the dataset,
# and indicate their (text_id, graph_id)
retrieval_data = []
for i, (g1, _) in enumerate(self._dataset):
for j, (_, t2) in enumerate(self._dataset):
retrieval_data.append(((g1, t2), (i, j)))
self._dataset = retrieval_data
logging.info('Constructed %d pairs.', len(self._dataset))
def source():
n_examples = len(self._dataset)
if self._shuffle_data:
idx = np.random.permutation(n_examples)
else:
idx = np.arange(n_examples)
for i in range(n_examples):
yield self._dataset[idx[i]]
def maybe_repeated_source():
if self._repeat:
while True:
yield from source()
else:
yield from source()
data_iter = tools.batch_graph_text_pairs(
maybe_repeated_source(),
self._batch_size,
self._timesteps + 1,
pad_value=self._tokenizer.pad_token(),
seq_and_graph_id=self._graph_retrieval_dataset)
if self._graph_retrieval_dataset:
data_iter = map(lambda x: dict( # pylint: disable=g-long-lambda
obs=x['obs'][:, :-1],
target=x['obs'][:, 1:],
should_reset=x['should_reset'][:, :-1],
# If target is a <pad> token then that target should not be predicted.
mask=(x['obs'][:, 1:] != self._tokenizer.pad_token()).astype(
np.float32),
seq_id=x['seq_id'],
graph_id=x['graph_id'],
graphs=self._process_graph_batch(x['graphs']),
), data_iter)
else:
data_iter = map(lambda x: dict( # pylint: disable=g-long-lambda
obs=x['obs'][:, :-1],
target=x['obs'][:, 1:],
should_reset=x['should_reset'][:, :-1],
# If target is a <pad> token then that target should not be predicted.
mask=(x['obs'][:, 1:] != self._tokenizer.pad_token()).astype(
np.float32),
graphs=self._process_graph_batch(x['graphs']),
), data_iter)
# Filter out batches that does not have targets.
# This may happen when an observation contains a single last token of the
# sequence, which was predicted as target in the previous batch, and only
# used as observation in this batch, without a matching target. In this
# case all the masks are 0, therefore this batch provides no training signal
# and we can safely remove this batch. This also avoids some potential
# downstream issues.
data_iter = filter(lambda x: x['mask'].sum() > 0, data_iter)
return data_iter
@abc.abstractmethod
def _process_graph_batch(self, graphs: List[Any]):
"""Process a batch of graph data.
Args:
graphs: a list of graph data, each as returned by `_process_graph`.
Returns:
processed_graphs: processed tensor(s) that can be directly fed into a
model.
"""
@abc.abstractmethod
def return_faux_batch(self) -> Dict[str, np.ndarray]:
"""Return a fake batch with the right shapes and dtypes."""
class TextOnlyDataset(BaseGraph2TextDataset):
"""Text-only version of the paired dataset."""
def __init__(self,
tokenizer: tokenizers.Tokenizer,
graph_tokenizer: Optional[tokenizers.GraphTokenizer] = None,
batch_size: int = 1,
timesteps: int = 128,
subset: str = 'train',
shuffle_data: bool = False,
repeat: bool = False,
version: str = 'max256',
data_dir: str = None,
debug: bool = False,
**kwargs):
"""Constructor.
Args:
tokenizer: the tokenizer for text data.
graph_tokenizer: not used, keeping it here for compatibility with other
graph2text datasets.
batch_size: number of sequences to put in a batch.
timesteps: number of tokens to put in a sequence in a batch.
subset: which subset to load.
shuffle_data: whether to shuffle data.
repeat: set to True to repeat the dataset infinitely, otherwise do only
one pass through the dataset.
version: which version of the data to load.
data_dir: if set load data instead from this directory, and ignore
`version`.
debug: set to True to use debug mode and only load a small number of
examples.
**kwargs: other arguments (for interface compatibility).
"""
del graph_tokenizer
super().__init__(tokenizer=tokenizer,
graph_tokenizer=None,
batch_size=batch_size,
timesteps=timesteps,
subset=subset,
shuffle_data=shuffle_data,
repeat=repeat,
version=version,
data_dir=data_dir,
debug=debug)
def _process_graph_batch(self, graphs: List[Any]):
del graphs
return None
def _process_graph(self, center_node: str, graph: Graph):
del center_node
del graph
return None
def __next__(self):
batch = super().__next__()
# Data should be text-only.
del batch['graphs']
return batch
def return_faux_batch(self):
"""Return a fake batch with the right shapes and types."""
obs = np.zeros((self._batch_size, self._timesteps), dtype=np.int32)
target = np.zeros_like(obs)
should_reset = np.zeros_like(obs, dtype=np.float32)
mask = np.zeros_like(obs, dtype=np.float32)
return dict(obs=obs, target=target, should_reset=should_reset, mask=mask)
class Bow2TextDataset(BaseGraph2TextDataset):
"""Dataset for bag-of-words to text."""
def _process_graph(self, center_node: str, graph: Graph):
"""Process the graph part of a `ParsedGraphTextPair` instance."""
# We don't use center node in a bag-of-words representation
del center_node
if self._subsample_nodes < 1.0:
graph = Graph.subsample_nodes(graph, self._subsample_nodes)
bow = np.zeros(self._graph_tokenizer.vocab_size, dtype=np.int32)
for n in graph.nodes():
for t in self._graph_tokenizer.encode_node(n):
bow[t] += 1
for _, _, e in graph.edges():
for t in self._graph_tokenizer.encode_edge(e):
bow[t] += 1
return bow
def _process_graph_batch(self, graphs: List[Any]):
"""Process a batch of graph data.
Args:
graphs: a list of graph data, each as returned by `_process_graph`.
Returns:
processed_graphs: processed tensor(s) that can be directly fed into a
model.
"""
empty_graph_bow = np.zeros(self._graph_tokenizer.vocab_size, dtype=np.int32)
graphs = [g if g is not None else empty_graph_bow for g in graphs]
# B x [V] -> [B, V]
return np.stack(graphs, axis=0)
def return_faux_batch(self):
obs = np.zeros((self._batch_size, self._timesteps), dtype=np.int32)
target = np.zeros_like(obs)
should_reset = np.zeros_like(obs, dtype=np.float32)
mask = np.zeros_like(obs, dtype=np.float32)
graphs = np.zeros((self._batch_size, self._graph_tokenizer.vocab_size),
dtype=np.float32)
return dict(obs=obs, target=target, should_reset=should_reset, mask=mask,
graphs=graphs)
class Graph2TextDataset(BaseGraph2TextDataset):
"""Graph-to-text dataset.
This dataset encodes the graph nodes and edges using a bag-of-words
representation.
"""
def __init__(self,
tokenizer: tokenizers.Tokenizer,
graph_tokenizer: tokenizers.GraphTokenizer,
batch_size: int = 1,
timesteps: int = 128,
subset: str = 'train',
shuffle_data: bool = False,
repeat: bool = False,
version: str = 'max256',
data_dir: str = None,
subsample_nodes: float = 1.0,
graph_retrieval_dataset: bool = False,
debug: bool = False):
"""Constructor.
Args:
tokenizer: the tokenizer for text data.
graph_tokenizer: the tokenizer for graph data.
batch_size: number of sequences to put in a batch.
timesteps: number of tokens to put in a sequence in a batch.
subset: which subset to load.
shuffle_data: whether to shuffle data.
repeat: set to True to repeat the dataset infinitely, otherwise do only
one pass through the dataset.
version: which version of the data to load.
data_dir: if set load data instead from this directory, and ignore
`version`.
subsample_nodes: the proportion of the nodes in a graph to keep.
graph_retrieval_dataset: whether to construct the dataset for graph
retrieval tasks.
debug: set to True to use debug mode and only load a small number of
examples.
"""
self._graph_feature_dim = graph_tokenizer.vocab_size
super().__init__(tokenizer=tokenizer,
graph_tokenizer=graph_tokenizer,
batch_size=batch_size,
timesteps=timesteps,
subset=subset,
shuffle_data=shuffle_data,
repeat=repeat,
version=version,
data_dir=data_dir,
subsample_nodes=subsample_nodes,
graph_retrieval_dataset=graph_retrieval_dataset,
debug=debug)
self._placeholder_graph = self._process_graph(
center_node='<pad>',
graph=Graph(nodes=['<pad>'], edges=[]))
def _process_graph(self, center_node: str, graph: Graph):
"""Process the graph part of a `ParsedGraphTextPair` instance."""
if self._subsample_nodes < 1.0:
graph = Graph.subsample_nodes(graph, self._subsample_nodes, center_node)
nodes = graph.nodes()
edges = graph.edges()
n_edges = len(edges)
sender = np.zeros(n_edges, dtype=np.int32)
receiver = np.zeros(n_edges, dtype=np.int32)
nodes_bow = []
edges_bow = []
for n in nodes:
bow = collections.defaultdict(int)
for t in self._graph_tokenizer.encode_node(n):
bow[t] += 1
nodes_bow.append(bow)
for i, (s, r, e) in enumerate(edges):
bow = collections.defaultdict(int)
for t in self._graph_tokenizer.encode_edge(e):
bow[t] += 1
edges_bow.append(bow)
sender[i] = s
receiver[i] = r
return (nodes_bow, edges_bow, sender, receiver, graph.node2id(center_node))
def _to_graph_with_features(
self, nodes_bow, edges_bow, sender, receiver, center_node_id):
"""Convert the input to a `jraph.GraphsTuple` instance."""
n_nodes = len(nodes_bow)
n_edges = len(edges_bow)
# +1 for the center node indicator
nodes = np.zeros((n_nodes, self._graph_feature_dim + 1), dtype=np.float32)
edges = np.zeros((n_edges, self._graph_feature_dim), dtype=np.float32)
nodes[center_node_id][-1] = 1
for i, bow in enumerate(nodes_bow):
for t, c in bow.items():
nodes[i][t] = c
for i, bow in enumerate(edges_bow):
for t, c in bow.items():
edges[i][t] = c
return jraph.GraphsTuple(
nodes=nodes, edges=edges, senders=sender, receivers=receiver,
globals=None, n_node=np.array([n_nodes], dtype=np.int32),
n_edge=np.array([n_edges], dtype=np.int32))
def _process_graph_batch(self, graphs: List[Any]):
"""Process a batch of graph data.
Args:
graphs: a list of graph data, each as returned by `_process_graph`.
Returns:
processed_graphs: a list of processed tensor(s).
"""
graphs = [g if g is not None else self._placeholder_graph for g in graphs]
return [self._to_graph_with_features(*g) for g in graphs]
def return_faux_batch(self) -> Dict[str, np.ndarray]:
"""Return a fake batch with the right shapes and dimensions."""
obs = np.zeros([self._batch_size, self._timesteps], dtype=np.int32)
target = np.zeros([self._batch_size, self._timesteps], dtype=np.int32)
should_reset = np.zeros_like(obs, np.float32)
mask = np.zeros_like(obs, np.float32)
# A batch should contain `batch_size` graphs. Here we make sure each graph
# has one node and one edge.
graphs = self._batch_size * [jraph.GraphsTuple(
nodes=np.zeros([1, self._graph_feature_dim + 1], dtype=np.float32),
edges=np.zeros([1, self._graph_feature_dim], dtype=np.float32),
senders=np.zeros([1], dtype=np.int32),
receivers=np.zeros([1], dtype=np.int32),
n_node=np.ones(1, dtype=np.int32),
n_edge=np.ones(1, dtype=np.int32),
globals=None)]
return dict(obs=obs, target=target, mask=mask, should_reset=should_reset,
graphs=graphs)
| deepmind-research-master | wikigraphs/wikigraphs/data/paired_dataset.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Base class of the datasets."""
import abc
from typing import Any, Iterator
class Dataset(abc.ABC):
"""Base class for all datasets.
All sub-classes should define `_load_data()` where an iterator
`self._data_iter` should be instantiated that iterates over the dataset.
"""
def __init__(self):
"""Constructor."""
self._data_iter = None # An iterator produced by `self._load_data`.
@abc.abstractmethod
def _load_data(self) -> Iterator[Any]:
"""Prepare data for another pass through the dataset.
This method should return a generator in a child class.
"""
def __next__(self):
return next(self._data_iter)
def __iter__(self):
self._data_iter = self._load_data()
return self
| deepmind-research-master | wikigraphs/wikigraphs/data/dataset.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Wikitext-103 datasets."""
import re
from typing import NamedTuple, List
from absl import logging
import numpy as np
from wikigraphs.data import dataset
from wikigraphs.data import tokenizers
from wikigraphs.data import tools
# The data directory that contains subdirectories `wikitext-103` and
# `wikitext-103-raw`.
DATA_ROOT = '/tmp/data/wikitext-103'
class WikitextArticle(NamedTuple):
title: str
text: str
def articles_from_file(file_path: str) -> List[WikitextArticle]:
"""Read wikitext articles from file.
Args:
file_path: path to the input `.tokens` file.
Returns:
A list of `WikitextArticle` tuples.
"""
with open(file_path, mode='rb') as f:
content = f.read()
content = content.decode('utf-8')
title_re = re.compile(r'(\n = ([^=].*) = \n \n)')
parts = title_re.split(content)
# Skip the first part which is empty
return [WikitextArticle(title=parts[i+1], text=parts[i] + parts[i+2])
for i in range(1, len(parts), 3)]
class RawDataset(dataset.Dataset):
"""Raw text dataset for wikitext-103."""
def __init__(self,
subset: str = 'train',
shuffle_data: bool = False,
data_dir: str = None,
version: str = 'tokens'):
"""Constructor.
Args:
subset: which subset to load, one of {"train", "valid", "test"}.
shuffle_data: if set to True the data will be randomly shuffled.
data_dir: if provided will be used instead of the default `DATA_ROOT` as
the directory that contains the data.
version: one of {'tokens', 'raw'}
"""
super().__init__()
self._subset = subset
self._shuffle_data = shuffle_data
self._data_dir = data_dir or DATA_ROOT
self._dataset = None
allowed_versions = ('tokens', 'raw')
if version not in allowed_versions:
raise ValueError(f'Version must be one of {allowed_versions}.')
self._version = version
def _load_data(self):
"""Prepare data for another pass through the dataset."""
if self._dataset is None:
data_root = self._data_dir + ('-raw' if self._version == 'raw' else '')
self._dataset = articles_from_file(
f'{data_root}/wiki.{self._subset}.{self._version}')
def source():
n_articles = len(self._dataset)
if self._shuffle_data:
idx = np.random.permutation(n_articles)
else:
idx = np.arange(n_articles)
for i in range(n_articles):
yield self._dataset[idx[i]]
return source()
def normalize_title(title: str) -> str:
"""Normalize the wikitext article title by handling special characters."""
return title.replace(
'@-@', '-').replace('@,@', ',').replace('@.@', '.').replace(' ', '')
class WikitextDataset(dataset.Dataset):
"""Tokenized dataset for wikitext-103."""
def __init__(self,
tokenizer: tokenizers.Tokenizer,
batch_size: int = 1,
timesteps: int = 128,
subset: str = 'train',
shuffle_data: bool = True,
data_dir: str = None,
repeat: bool = False,
debug: bool = False,
**kwargs):
"""Constructor.
Args:
tokenizer: a tokenizer for text data.
batch_size: number of sequences to put into a batch.
timesteps: length of the sequences.
subset: which subset to load, one of {"train", "valid", "test"}.
shuffle_data: if set to True the data will be randomly shuffled.
data_dir: if provided will be used instead of the default `DATA_ROOT` as
the directory that contains the data.
repeat: set to False to go through the data only once, otherwise go
through the data indefinitely.
debug: set to True to only load a small amount of data for fast debugging.
**kwargs: other arguments (for interface compatibility).
"""
super().__init__()
self._tokenizer = tokenizer
self._batch_size = batch_size
self._timesteps = timesteps
self._subset = subset
self._shuffle_data = shuffle_data
self._data_dir = data_dir
self._repeat = repeat
self._debug = debug
self._dataset = None
def _load_data(self):
"""Prepare data for one pass through the dataset."""
# Pre-tokenize everything in our dataset so we don't have to when going
# through the data more than once.
if not self._dataset:
raw_dataset = RawDataset(
subset=self._subset, shuffle_data=False, data_dir=self._data_dir)
if self._debug:
# Load a small number of examples for debugging.
self._dataset = [
self._tokenizer.encode(next(raw_dataset).text, prepend_bos=True)
for _ in range(5)]
else:
self._dataset = [self._tokenizer.encode(item.text, prepend_bos=True)
for item in raw_dataset]
logging.info('%s set loaded, total %d examples.',
self._subset, len(self._dataset))
def source():
idx = np.random.permutation(len(self._dataset))
for i in idx:
yield self._dataset[i]
def repeated_source():
if self._repeat:
while True:
yield from source()
else:
yield from source()
data_iter = tools.dynamic_batch(
repeated_source(),
self._batch_size,
self._timesteps + 1, # Extra token to count for the overlap.
return_incomplete_batch=True,
pad=True,
pad_value=self._tokenizer.pad_token())
data_iter = map(lambda x: dict( # pylint: disable=g-long-lambda
obs=x['obs'][:, :-1],
target=x['obs'][:, 1:],
should_reset=x['should_reset'][:, :-1],
mask=(x['obs'][:, 1:] != self._tokenizer.pad_token()).astype(
np.float32),
), data_iter)
return data_iter
def return_faux_batch(self):
"""Return a fake batch with the right shapes and dtypes."""
obs = np.zeros((self._batch_size, self._timesteps), dtype=np.int32)
target = np.zeros_like(obs, dtype=np.int32)
should_reset = np.zeros_like(obs, dtype=np.float32)
mask = np.zeros_like(obs, dtype=np.float32)
return dict(obs=obs, target=target, should_reset=should_reset, mask=mask)
| deepmind-research-master | wikigraphs/wikigraphs/data/wikitext.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Tests for wikigraphs.data.paired_dataset."""
from absl.testing import absltest
import jraph
from wikigraphs.data import io_tools
from wikigraphs.data import paired_dataset
from wikigraphs.data import tokenizers
from wikigraphs.data import wikitext
WIKITEXT_ROOT = '/tmp/data/wikitext-103'
WIKIGRAPHS_ROOT = '/tmp/data/wikigraphs'
WIKITEXT_VOCAB_FILE = '/tmp/data/wikitext-vocab.csv'
GRAPH_VOCAB_FILE = '/tmp/data/graph-vocab.csv'
class PairedDatasetTest(absltest.TestCase):
def test_raw_paired_dataset_size(self):
dataset = paired_dataset.RawDataset(
subset='valid', shuffle_data=False, data_dir=WIKIGRAPHS_ROOT)
pairs = list(dataset)
self.assertLen(pairs, 48)
self.assertEqual(pairs[0].title, 'Homarus_gammarus')
self.assertEqual(pairs[-1].title, 'Rakie_Ayola')
# Make sure the content of the articles match the original
wikitext_set = wikitext.RawDataset(
subset='valid', shuffle_data=False, version='raw',
data_dir=WIKITEXT_ROOT)
title2article = {wikitext.normalize_title(a.title).replace(' ', ''): a.text
for a in wikitext_set}
for p in pairs:
title = io_tools.normalize_freebase_string(p.title).replace(' ', '')
article = title2article.get(title, None)
self.assertIsNotNone(article)
self.assertEqual(article, p.text)
def test_graph_from_edges(self):
edges = ['A\tE1\tB',
'A\tE2\tC',
'B\tE1\tC',
'C\tE3\tD',
'C\tE2\tE']
graph = paired_dataset.Graph.from_edges(edges)
self.assertEqual(graph.nodes(), ['A', 'B', 'C', 'D', 'E'])
self.assertEqual(graph.edges(), [(0, 1, 'E1'),
(0, 2, 'E2'),
(1, 2, 'E1'),
(2, 3, 'E3'),
(2, 4, 'E2')])
def test_graph_to_edges(self):
edges = ['A\tE1\tB',
'A\tE2\tC',
'B\tE1\tC',
'C\tE3\tD',
'C\tE2\tE']
graph = paired_dataset.Graph.from_edges(edges)
self.assertEqual(graph.to_edges(), edges)
def test_bow2text_dataset(self):
tokenizer = tokenizers.WordTokenizer(vocab_file=WIKITEXT_VOCAB_FILE)
graph_tokenizer = tokenizers.GraphTokenizer(vocab_file=GRAPH_VOCAB_FILE)
batch_size = 4
seq_len = 256
dataset = paired_dataset.Bow2TextDataset(
tokenizer,
graph_tokenizer,
batch_size=batch_size,
timesteps=seq_len,
subset='valid',
subsample_nodes=0.7,
repeat=False,
data_dir=WIKIGRAPHS_ROOT)
num_tokens = 0
for batch in dataset:
num_tokens += batch['mask'].sum()
self.assertEqual(batch['graphs'].shape,
(batch_size, graph_tokenizer.vocab_size))
raw_dataset = paired_dataset.RawDataset(subset='valid', shuffle_data=False)
raw_num_tokens = 0
n_pairs = 0
for pair in raw_dataset:
raw_num_tokens += len(tokenizer.encode(
pair.text, prepend_bos=True, append_eos=True))
n_pairs += 1
# The first token of each example is not counted by `mask` as it masks the
# targets, and the first token of each example never appears in the targets.
self.assertEqual(raw_num_tokens, num_tokens + n_pairs)
def test_graph2text_dataset(self):
tokenizer = tokenizers.WordTokenizer(vocab_file=WIKITEXT_VOCAB_FILE)
graph_tokenizer = tokenizers.GraphTokenizer(vocab_file=GRAPH_VOCAB_FILE)
batch_size = 4
seq_len = 256
dataset = paired_dataset.Graph2TextDataset(
tokenizer,
graph_tokenizer,
batch_size=batch_size,
timesteps=seq_len,
subsample_nodes=0.8,
subset='valid',
data_dir=WIKIGRAPHS_ROOT)
data_iter = iter(dataset)
batch = next(data_iter)
self.assertEqual(batch['obs'].shape, (batch_size, seq_len))
self.assertEqual(batch['target'].shape, (batch_size, seq_len))
self.assertEqual(batch['should_reset'].shape, (batch_size, seq_len))
self.assertEqual(batch['mask'].shape, (batch_size, seq_len))
self.assertIsInstance(batch['graphs'], list)
self.assertLen(batch['graphs'], batch_size)
for i in range(batch_size):
self.assertIsInstance(batch['graphs'][i], jraph.GraphsTuple)
# +1 for the center_node mask
self.assertEqual(
batch['graphs'][i].nodes.shape[-1], graph_tokenizer.vocab_size + 1)
self.assertEqual(
batch['graphs'][i].edges.shape[-1], graph_tokenizer.vocab_size)
n_edges = batch['graphs'][i].n_edge
self.assertEqual(batch['graphs'][i].senders.shape, (n_edges,))
self.assertEqual(batch['graphs'][i].receivers.shape, (n_edges,))
# Make sure the token count matches across the tokenized data and the raw
# data set.
num_tokens = 0
for batch in dataset:
num_tokens += batch['mask'].sum()
raw_dataset = paired_dataset.RawDataset(subset='valid', shuffle_data=False)
raw_num_tokens = 0
n_pairs = 0
for pair in raw_dataset:
raw_num_tokens += len(tokenizer.encode(
pair.text, prepend_bos=True, append_eos=True))
n_pairs += 1
# The first token of each example is not counted by `mask` as it masks the
# targets, and the first token of each example never appears in the targets.
self.assertEqual(raw_num_tokens, num_tokens + n_pairs)
def test_text_only_dataset(self):
tokenizer = tokenizers.WordTokenizer(vocab_file=WIKITEXT_VOCAB_FILE)
batch_size = 4
seq_len = 256
dataset = paired_dataset.TextOnlyDataset(
tokenizer,
batch_size=batch_size,
timesteps=seq_len,
subset='valid',
data_dir=WIKIGRAPHS_ROOT)
data_iter = iter(dataset)
batch = next(data_iter)
faux_batch = dataset.return_faux_batch()
self.assertCountEqual(list(batch.keys()),
['obs', 'target', 'should_reset', 'mask'])
self.assertCountEqual(list(faux_batch.keys()),
['obs', 'target', 'should_reset', 'mask'])
for k, v in batch.items():
faux_v = faux_batch[k]
self.assertEqual(v.shape, faux_v.shape)
self.assertEqual(v.dtype, faux_v.dtype)
self.assertEqual(batch['obs'].shape, (batch_size, seq_len))
self.assertEqual(batch['target'].shape, (batch_size, seq_len))
self.assertEqual(batch['should_reset'].shape, (batch_size, seq_len))
self.assertEqual(batch['mask'].shape, (batch_size, seq_len))
num_tokens = 0
for batch in dataset:
num_tokens += batch['mask'].sum()
raw_dataset = paired_dataset.RawDataset(subset='valid', shuffle_data=False)
raw_num_tokens = 0
n_pairs = 0
for pair in raw_dataset:
raw_num_tokens += len(tokenizer.encode(
pair.text, prepend_bos=True, append_eos=True))
n_pairs += 1
self.assertEqual(num_tokens + n_pairs, raw_num_tokens)
def test_bow_retrieval_dataset(self):
tokenizer = tokenizers.WordTokenizer(vocab_file=WIKITEXT_VOCAB_FILE)
graph_tokenizer = tokenizers.GraphTokenizer(vocab_file=GRAPH_VOCAB_FILE)
batch_size = 4
seq_len = 256
dataset = paired_dataset.Bow2TextDataset(
tokenizer,
graph_tokenizer,
batch_size=batch_size,
timesteps=seq_len,
subsample_nodes=0.8,
graph_retrieval_dataset=True,
subset='valid',
data_dir=WIKIGRAPHS_ROOT)
data_iter = iter(dataset)
batch = next(data_iter)
self.assertEqual(batch['obs'].shape, (batch_size, seq_len))
self.assertEqual(batch['target'].shape, (batch_size, seq_len))
self.assertEqual(batch['should_reset'].shape, (batch_size, seq_len))
self.assertEqual(batch['mask'].shape, (batch_size, seq_len))
self.assertEqual(batch['graph_id'].shape, (batch_size,))
self.assertEqual(batch['seq_id'].shape, (batch_size,))
def test_graph_retrieval_dataset(self):
tokenizer = tokenizers.WordTokenizer(vocab_file=WIKITEXT_VOCAB_FILE)
graph_tokenizer = tokenizers.GraphTokenizer(vocab_file=GRAPH_VOCAB_FILE)
batch_size = 4
seq_len = 256
dataset = paired_dataset.Graph2TextDataset(
tokenizer,
graph_tokenizer,
batch_size=batch_size,
timesteps=seq_len,
subsample_nodes=0.8,
graph_retrieval_dataset=True,
subset='valid',
data_dir=WIKIGRAPHS_ROOT)
data_iter = iter(dataset)
batch = next(data_iter)
self.assertEqual(batch['obs'].shape, (batch_size, seq_len))
self.assertEqual(batch['target'].shape, (batch_size, seq_len))
self.assertEqual(batch['should_reset'].shape, (batch_size, seq_len))
self.assertEqual(batch['mask'].shape, (batch_size, seq_len))
self.assertEqual(batch['graph_id'].shape, (batch_size,))
self.assertEqual(batch['seq_id'].shape, (batch_size,))
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | wikigraphs/wikigraphs/data/paired_dataset_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Tokenizers for text data."""
import abc
import csv
import io
import re
from typing import List
import nltk
import numpy as np
from wikigraphs.data import io_tools
class Tokenizer(abc.ABC):
"""Base class for tokenizers."""
@abc.abstractmethod
def encode(self,
inputs: str,
prepend_bos: bool = False,
append_eos: bool = False) -> np.ndarray:
"""Encode input string into an array of token IDs.
Args:
inputs: a string.
prepend_bos: set to True to add <bos> token at the beginning of the token
sequence.
append_eos: set to True to add <eos> token at the end of the token
sequence.
Returns:
tokens: [n_tokens] int array.
"""
@abc.abstractmethod
def decode(self, inputs) -> str:
"""Decode a sequence of tokens back into a string.
Args:
inputs: array or list of ints.
Returns:
s: the decoded string using this tokenizer.
"""
@property
@abc.abstractmethod
def vocab_size(self) -> int:
"""Size of the vocabulary."""
@abc.abstractmethod
def pad_token(self) -> int:
"""ID of the <pad> token."""
@abc.abstractmethod
def bos_token(self) -> int:
"""ID of the <bos> token."""
class WordTokenizer(Tokenizer):
"""Word-level tokenizer for white-space separated text data."""
def __init__(self, vocab_file: str):
"""Constructor.
Args:
vocab_file: a csv vocab file.
"""
content = io_tools.read_txt_file(vocab_file, encoding='utf-8')
with io.StringIO(content) as f:
r = csv.reader(f)
vocab = [w for w, _ in r]
# Add pad and bos tokens to the vocab
to_add = ['<pad>', '<bos>']
if '<unk>' not in vocab:
to_add.append('<unk>')
vocab = to_add + vocab
# token-index mappings
self._t2i = {t: i for i, t in enumerate(vocab)}
self._i2t = {i: t for t, i in self._t2i.items()}
self._unk_token = self._t2i['<unk>']
self._bos_token = self._t2i['<bos>']
self._pad_token = self._t2i['<pad>']
@property
def vocab_size(self):
return len(self._t2i)
def encode(self, inputs, prepend_bos=False, append_eos=False):
tokens = [self._t2i.get(t, self._unk_token) for t in inputs.split(' ') if t]
if prepend_bos:
tokens = [self._bos_token] + tokens
if append_eos:
# Reuse <bos> as <eos>.
tokens.append(self._bos_token)
return np.array(tokens, dtype=np.int32)
def decode(self, inputs):
"""Decode a sequence of token IDs back into a string."""
# Remove the first <bos> token if there is any.
if inputs[0] == self._bos_token:
inputs = inputs[1:]
tokens = []
for i in inputs:
# Use <bos> also as <eos> and stop there.
if i == self._bos_token:
break
tokens.append(self._i2t[i])
return ' '.join(tokens)
def pad_token(self):
return self._pad_token
def bos_token(self):
return self._bos_token
class GraphTokenizer:
"""Tokenizer for the content on the graphs."""
def __init__(self, vocab_file: str):
"""Constructor.
Args:
vocab_file: path to a vocab file.
"""
content = io_tools.read_txt_file(vocab_file, encoding='utf-16')
vocab = content.split('\n')
vocab = ['<pad>', '<bos>', '<unk>'] + vocab
# token-index mappings
self._t2i = {t: i for i, t in enumerate(vocab)}
self._i2t = {i: t for t, i in self._t2i.items()}
self._unk_token = self._t2i['<unk>']
self._bos_token = self._t2i['<bos>']
self._pad_token = self._t2i['<pad>']
@property
def vocab_size(self):
return len(self._t2i)
def encode_node(self, txt: str) -> np.ndarray:
return np.array([self._t2i.get(t, self._unk_token)
for t in self.split_node(txt)])
def encode_edge(self, txt: str) -> np.ndarray:
return np.array([self._t2i.get(t, self._unk_token)
for t in self.split_edge(txt)])
def encode(self, inputs, prepend_bos=False, append_eos=False):
tokens = [self._t2i.get(t, self._unk_token) for t in inputs.split(' ') if t]
if prepend_bos:
tokens = [self._bos_token] + tokens
if append_eos:
# Reuse <bos> as <eos>.
tokens.append(self._bos_token)
return np.array(tokens, dtype=np.int32)
def decode(self, inputs):
"""Decode a sequence of token IDs back into a string."""
# Remove the first <bos> token if there is any.
if inputs[0] == self._bos_token:
inputs = inputs[1:]
tokens = []
for i in inputs:
# Use <bos> also as <eos> and stop there.
if i == self._bos_token:
break
tokens.append(self._i2t[i])
return ' '.join(tokens)
@classmethod
def split_node(cls, txt: str) -> List[str]:
"""Split a node string into a sequence of tokens."""
if txt[0] == '"' and txt[-1] == '"': # Node is a string literal.
tokens = nltk.wordpunct_tokenize(io_tools.normalize_freebase_string(
txt[1:-1].lower()))
for i, t in enumerate(tokens):
if t.isnumeric():
tokens[i] = '<number>'
return tokens
else: # If node is not a string literal it is always an entity.
return ['<entity>']
@classmethod
def split_edge(cls, txt: str) -> List[str]:
"""Split an edge string into a sequence of tokens."""
return re.split('[._ ]+', txt.lower().split('/')[1])
def pad_token(self):
return self._pad_token
def bos_token(self):
return self._bos_token
| deepmind-research-master | wikigraphs/wikigraphs/data/tokenizers.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Geometric Manifold Component Estimator (GEOMANCER)."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from geomancer import geomancer
class GeomancerTest(parameterized.TestCase):
@parameterized.parameters(
{'zero_trace': False},
{'zero_trace': True})
def test_sym_op(self, zero_trace):
"""sym_op on tril(X) gives same result as QXQ' for symmetric X?"""
n = 5
x = np.random.randn(n, n)
x += x.T
if zero_trace:
np.fill_diagonal(x, np.diag(x)-np.trace(x)/n)
q, _ = np.linalg.qr(np.random.randn(n, n))
sym_q = geomancer.sym_op(q, zero_trace=zero_trace)
tril_x = x[np.tril_indices(n)]
if zero_trace:
tril_x = tril_x[:-1]
vec_y = sym_q @ tril_x
y = q @ x @ q.T
y_ = geomancer.vec_to_sym(vec_y, n, zero_trace=zero_trace)
np.testing.assert_allclose(y_, y)
def test_ffdiag(self):
k = 2
n = 5
w, _ = np.linalg.qr(np.random.randn(n, n))
psi = np.random.randn(k, n)
a = np.zeros((k, n, n))
for i in range(k):
a[i] = w @ np.diag(psi[i]) @ w.T
w_ = geomancer.ffdiag(a)
for i in range(k):
x = w_ @ a[i] @ w_.T
diag = np.diag(x).copy()
np.fill_diagonal(x, 1.0)
# check that x is diagonal
np.testing.assert_allclose(x, np.eye(n), rtol=1e-10, atol=1e-10)
self.assertTrue(np.all(np.min(
np.abs(diag[None, :] - psi[i][:, None]), axis=0) < 1e-10))
def test_make_nearest_neighbor_graph(self):
n = 100
# make points on a circle
data = np.zeros((n, 2))
for i in range(n):
data[i, 0] = np.sin(i*2*np.pi/n)
data[i, 1] = np.cos(i*2*np.pi/n)
graph = geomancer.make_nearest_neighbors_graph(data, 4, n=10)
for i in range(n):
self.assertLen(graph.rows[i], 4)
self.assertIn((i+1) % n, graph.rows[i])
self.assertIn((i+2) % n, graph.rows[i])
self.assertIn((i-1) % n, graph.rows[i])
self.assertIn((i-2) % n, graph.rows[i])
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | geomancer/geomancer_test.py |
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for pip package."""
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = ['numpy', 'scipy', 'matplotlib', 'absl-py', 'tqdm']
setup(
name='geomancer',
version='0.1',
description='A library for the Geometric Manifold Component Estimator.',
url='https://github.com/deepmind/deepmind-research/geomancer',
author='DeepMind',
author_email='[email protected]',
# Contained modules and scripts.
packages=find_packages(),
install_requires=REQUIRED_PACKAGES,
platforms=['any'],
license='Apache 2.0',
)
| deepmind-research-master | geomancer/setup.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run GEOMANCER on products of synthetic manifolds."""
import re
from absl import app
from absl import flags
from absl import logging
import geomancer
from matplotlib import gridspec
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import special_ortho_group
from tqdm import tqdm
SPECIFICATION = flags.DEFINE_list(
name='specification', default=['S^2', 'S^2'], help='List of submanifolds')
NPTS = flags.DEFINE_integer(
name='npts', default=1000, help='Number of data points')
ROTATE = flags.DEFINE_boolean(
name='rotate', default=False, help='Apply random rotation to the data')
PLOT = flags.DEFINE_boolean(
name='plot', default=True, help='Whether to enable plotting')
def make_so_tangent(q):
"""Given an n x n orthonormal matrix, return a basis for its tangent space."""
n = q.shape[0]
assert np.allclose(q.T @ q, np.eye(n), atol=1e-4, rtol=1e-4)
a = np.zeros((n, n))
ii = 0
dq = np.zeros((n, n, n*(n-1)//2))
for i in range(n):
for j in range(i+1, n):
a[i, j] = 1
a[j, i] = -1
dq[..., ii] = a @ q # tangent vectors are skew-symmetric matrix times Q
a[i, j] = 0
a[j, i] = 0
ii += 1
# reshape and orthonormalize the result
return np.linalg.qr(np.reshape(dq, (n**2, n*(n-1)//2)))[0]
def make_sphere_tangent(x):
_, _, v = np.linalg.svd(x[None, :])
return v[:, 1:]
def make_true_tangents(spec, data):
"""Return a set of orthonormal bases, one for each submanifold."""
for i in range(spec.shape[1]):
assert spec[0, i] == 0 or spec[1, i] == 0
so_dim = sum(dim ** 2 for dim in spec[0])
sphere_dim = sum(dim+1 if dim > 0 else 0 for dim in spec[1])
assert so_dim + sphere_dim == data.shape[0]
ii = 0
tangents = []
for i in range(spec.shape[1]):
if spec[0, i] != 0:
dim = spec[0, i]
tangents.append(make_so_tangent(np.reshape(data[ii:ii+dim**2],
(dim, dim))))
ii += dim ** 2
else:
dim = spec[1, i]
tangents.append(make_sphere_tangent(data[ii:ii+dim+1]))
ii += dim + 1
tangents2 = []
for i in range(len(tangents)):
size1 = sum(x.shape[0] for x in tangents[:i])
size2 = sum(x.shape[0] for x in tangents[i+1:])
tangents2.append(np.concatenate(
(np.zeros((size1, tangents[i].shape[1])),
tangents[i],
np.zeros((size2, tangents[i].shape[1]))), axis=0))
return tangents2
def make_product_manifold(specification, npts):
"""Generate data from a product of manifolds with the given specification."""
data = []
tangents = []
latent_dim = 0
spec_array = np.zeros((2, len(specification)), dtype=np.int32)
for i, spec in enumerate(specification):
so_spec = re.search(r'SO\(([0-9]+)\)', spec) # matches "SO(<numbers>)"
sphere_spec = re.search(r'S\^([0-9]+)', spec) # matches "S^<numbers>"
if sphere_spec is not None:
dim = int(sphere_spec.group(1))
spec_array[1, i] = dim
latent_dim += dim
dat = np.random.randn(npts, dim+1)
dat /= np.tile(np.sqrt(np.sum(dat**2, axis=1)[..., None]),
[1, dim+1])
elif so_spec is not None:
dim = int(so_spec.group(1))
spec_array[0, i] = dim
latent_dim += dim * (dim - 1) // 2
dat = [np.ndarray.flatten(special_ortho_group.rvs(dim), order='C')
for _ in range(npts)]
dat = np.stack(dat)
else:
raise ValueError(f'Unrecognized manifold: {spec}')
data.append(dat)
data = np.concatenate(data, axis=1)
for i in range(spec_array.shape[1]):
if spec_array[0, i] != 0:
dim = spec_array[0, i]
tangents.append(np.zeros((npts, data.shape[1], dim * (dim - 1) // 2)))
elif spec_array[1, i] != 0:
dim = spec_array[1, i]
tangents.append(np.zeros((npts, data.shape[1], dim)))
for i in tqdm(range(npts)):
true_tangent = make_true_tangents(spec_array, data[i])
for j in range(len(specification)):
tangents[j][i] = true_tangent[j]
logging.info('Constructed data and true tangents for %s',
' x '.join(specification))
return data, latent_dim, tangents
def main(_):
# Generate data and run GEOMANCER
data, dim, tangents = make_product_manifold(SPECIFICATION.value, NPTS.value)
if ROTATE.value:
rot, _ = np.linalg.qr(np.random.randn(data.shape[1], data.shape[1]))
data_rot = data @ rot.T
components, spectrum = geomancer.fit(data_rot, dim)
errors = geomancer.eval_unaligned(data_rot, components, data, tangents)
else:
components, spectrum = geomancer.fit(data, dim)
errors = geomancer.eval_aligned(components, tangents)
logging.info('Error between subspaces: %.2f +/- %.2f radians',
np.mean(errors),
np.std(errors))
if PLOT.value:
# Plot spectrum
plt.figure(figsize=(8, 6))
plt.scatter(np.arange(len(spectrum)), spectrum, s=100)
largest_gap = np.argmax(spectrum[1:]-spectrum[:-1]) + 1
plt.axvline(largest_gap, linewidth=2, c='r')
plt.xticks([])
plt.yticks(fontsize=18)
plt.xlabel('Index', fontsize=24)
plt.ylabel('Eigenvalue', fontsize=24)
plt.title('GeoManCEr Eigenvalue Spectrum', fontsize=24)
# Plot subspace bases
fig = plt.figure(figsize=(8, 6))
bases = components[0]
gs = gridspec.GridSpec(1, len(bases),
width_ratios=[b.shape[1] for b in bases])
for i in range(len(bases)):
ax = plt.subplot(gs[i])
ax.imshow(bases[i])
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(r'$T_{\mathbf{x}_1}\mathcal{M}_%d$' % (i+1), fontsize=18)
fig.canvas.set_window_title('GeoManCEr Results')
# Plot ground truth
fig = plt.figure(figsize=(8, 6))
gs = gridspec.GridSpec(1, len(tangents),
width_ratios=[b.shape[2] for b in tangents])
for i, spec in enumerate(SPECIFICATION.value):
ax = plt.subplot(gs[i])
ax.imshow(tangents[i][0])
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(r'$T_{\mathbf{x}_1}%s$' % spec, fontsize=18)
fig.canvas.set_window_title('Ground Truth')
plt.show()
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | geomancer/train.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for the Geometric Manifold Component Estimator (GEOMANCER)."""
import itertools
from absl import logging
import numpy as np
import scipy
import scipy.sparse
import scipy.sparse.linalg
from tqdm import tqdm
def sym_op(x, zero_trace=False):
"""Given X, makes L(A) = X @ A @ X' for symmetric matrices A.
If A is not symmetric, L(A) will return X @ (A_L + A_L') @ X' where A_L is
the lower triangular of A (with the diagonal divided by 2).
Args:
x: The matrix from which to construct the operator
zero_trace (optional): If true, restrict the operator to only act on
matrices with zero trace, effectively reducing the dimensionality by one.
Returns:
A matrix Y such that vec(L(A)) = Y @ vec(A).
"""
n = x.shape[0]
# Remember to subtract off the diagonal once
xx = (np.einsum('ik,jl->ijkl', x, x) +
np.einsum('il,jk->ijkl', x, x) -
np.einsum('ik,jl,kl->ijkl', x, x, np.eye(n)))
xx = xx[np.tril_indices(n)]
xx = xx.transpose(1, 2, 0)
xx = xx[np.tril_indices(n)]
xx = xx.T
if zero_trace:
diag_idx = np.cumsum([0]+list(range(2, n)))
proj_op = np.eye(n*(n+1)//2)[:, :-1]
proj_op[-1, diag_idx] = -1
# multiply by operator that completes last element of diagonal
# for a zero-trace matrix
xx = xx @ proj_op
xx = xx[:-1]
return xx
def vec_to_sym(x, n, zero_trace=False):
y = np.zeros((n, n))
if zero_trace:
x = np.append(x, 0.0)
y[np.tril_indices(n)] = x
y += y.T
y[np.diag_indices(n)] /= 2.0
if zero_trace:
y[-1, -1] = -np.trace(y)
return y
def ffdiag(data, lr=1.0, tol=1e-10, verbose=False, eig_init=False):
"""Orthogonal FFDiag algorithm of Ziehe et al 2004."""
n = data.shape[1]
k = data.shape[0]
c = data.copy()
if eig_init:
_, v = np.linalg.eig(data[0])
v = v.T
for i in range(k):
c[i] = v @ c[i] @ v.T
else:
v = np.eye(n)
err_ = np.inf
for t in range(10000):
w = np.zeros((n, n))
for i in range(n):
for j in range(i+1, n):
diag = c[:, i, i] - c[:, j, j]
w[i, j] = np.sum(c[:, i, j] * diag) / np.sum(diag ** 2)
w -= w.T
norm = np.linalg.svd(w, compute_uv=False).max()
if norm > lr:
w *= lr / norm
ew = scipy.linalg.expm(w)
v = ew @ v
for i in range(k):
c[i] = ew @ c[i] @ ew.T
cdiag = c.copy()
for i in range(n):
for j in range(k):
cdiag[j, i, i] = 0
err = np.linalg.norm(cdiag)
if verbose:
logging.info('Iter %d: %f', t, err)
if err_ - err < tol and err_ - err >= 0:
return v
err_ = err
return v
def avg_angle_between_subspaces(xs, ys):
"""Compute the error between two sets of subspaces."""
if len(xs) != len(ys):
return np.pi / 2 # largest possible angle
angles = []
for ys_perm in itertools.permutations(ys):
angles.append([])
for i in range(len(xs)):
if xs[i].shape[1] == ys_perm[i].shape[1]:
sigma = np.linalg.svd(xs[i].T @ ys_perm[i], compute_uv=False)
angles[-1].append(np.arccos(np.min(sigma)))
else:
angles[-1].append(np.pi / 2)
angles = np.array(angles)
return np.min(np.mean(angles, axis=1))
def make_nearest_neighbors_graph(data, k, n=1000):
"""Build exact k-nearest neighbors graph from numpy data.
Args:
data: Data to compute nearest neighbors of, each column is one point
k: number of nearest neighbors to compute
n (optional): number of neighbors to compute simultaneously
Returns:
A scipy sparse matrix in LIL format giving the symmetric nn graph.
"""
shape = data.shape
assert shape[0] % n == 0
nbr_graph = scipy.sparse.lil_matrix((shape[0], shape[0]))
norm = np.sum(data**2, axis=1)
cols = np.meshgrid(np.arange(n), np.ones(k+1))[0]
for i in tqdm(range(0, shape[0], n)):
dot = data @ data[i:i+n].T
dists = np.sqrt(np.abs(norm[:, None] - 2*dot + norm[i:i+n][None, :]))
idx = np.argpartition(dists, k, axis=0)[:k+1]
nbrs = idx[np.argsort(dists[idx, cols], axis=0), cols][1:]
for j in range(n):
nbr_graph[i+j, nbrs[:, j]] = 1
# Symmetrize graph
for i in tqdm(range(shape[0])):
for j in nbr_graph.rows[i]:
if nbr_graph[j, i] == 0:
nbr_graph[j, i] = nbr_graph[i, j]
logging.info('Symmetrized neighbor graph')
return nbr_graph
def make_tangents(data, neighbor_graph, k):
"""Construct all tangent vectors for the dataset."""
tangents = np.zeros((data.shape[0], k, data.shape[1]), dtype=np.float32)
for i in tqdm(range(data.shape[0])):
diff = data[neighbor_graph.rows[i]] - data[i]
_, _, u = np.linalg.svd(diff, full_matrices=False)
tangents[i] = u[:k]
logging.info('Computed all tangents')
return tangents
def make_connection(tangents, neighbor_graph):
"""Make connection matrices for all edges of the neighbor graph."""
connection = {}
for i in tqdm(range(tangents.shape[0])):
for j in neighbor_graph.rows[i]:
if j > i:
uy, _, ux = np.linalg.svd(tangents[j] @ tangents[i].T,
full_matrices=False)
conn = uy @ ux
connection[(i, j)] = conn
connection[(j, i)] = conn.T
logging.info('Constructed all connection matrices')
return connection
def make_laplacian(connection, neighbor_graph, sym=True, zero_trace=True):
"""Make symmetric zero-trace second-order graph connection Laplacian."""
n = neighbor_graph.shape[0]
k = list(connection.values())[0].shape[0]
bsz = (k*(k+1)//2 - 1 if zero_trace else k*(k+1)//2) if sym else k**2
data = np.zeros((neighbor_graph.nnz + n, bsz, bsz), dtype=np.float32)
indptr = []
indices = np.zeros(neighbor_graph.nnz + n)
index = 0
for i in tqdm(range(n)):
indptr.append(index)
data[index] = len(neighbor_graph.rows[i]) * np.eye(bsz)
indices[index] = i
index += 1
for j in neighbor_graph.rows[i]:
if sym:
kron = sym_op(connection[(j, i)], zero_trace=zero_trace)
else:
kron = np.kron(connection[(j, i)], connection[(j, i)])
data[index] = -kron
indices[index] = j
index += 1
indptr.append(index)
indptr = np.array(indptr)
laplacian = scipy.sparse.bsr_matrix((data, indices, indptr),
shape=(n*bsz, n*bsz))
logging.info('Built 2nd-order graph connection Laplacian.')
return laplacian
def cluster_subspaces(omega):
"""Cluster different dimensions from the eigenvectors of the Laplacian."""
w = ffdiag(omega) # simultaneous diagonalization
psi = np.zeros(omega.shape[:2])
for i in range(omega.shape[0]):
psi[i] = np.diag(w @ omega[i] @ w.T) # compute diagonals
# Compute cosine similarity of diagonal vectors
psi_outer = psi.T @ psi
psi_diag = np.diag(psi_outer)
cos_similarity = psi_outer / np.sqrt(np.outer(psi_diag, psi_diag))
adj = cos_similarity > 0.5 # adjacency matrix for graph of clusters
# Use graph Laplacian to find cliques
# (though a greedy algorithm could work too)
lapl = np.diag(np.sum(adj, axis=0)) - adj # graph Laplacian
d, v = np.linalg.eig(lapl)
# connected components of graph
cliques = np.abs(v[:, np.abs(d) < 1e-6]) > 1e-6
tangents = [w[cliques[:, i]] for i in range(sum(np.abs(d) < 1e-6))]
return tangents
def fit(data, k, gamma=None, nnbrs=None, neig=10, shard_size=1000):
"""The Geometric Manifold Component Estimator.
Args:
data: the dataset, a set of points sample from a product manifold.
k: the dimensionality of the manifold.
gamma (optional): the threshold in the spectrum at which to cut off the
number of submanifolds.
nnbrs (optional): number of neighbors to use for each point.
neig (optional): the total number of eigenvectors to compute.
shard_size (optional): the size of shard to use in knn computation.
Returns:
A list of lists of subspace bases, one list for each element of the dataset,
and the spectrum of the 2nd-order graph Laplacian.
"""
if not nnbrs:
nnbrs = 2*k
neighbor_graph = make_nearest_neighbors_graph(data, nnbrs, n=shard_size)
tangents = make_tangents(data, neighbor_graph, k)
connection = make_connection(tangents, neighbor_graph)
laplacian = make_laplacian(connection, neighbor_graph)
eigvals, eigvecs = scipy.sparse.linalg.eigsh(laplacian, k=neig, which='SM')
logging.info('Computed bottom eigenvectors of 2nd-order Laplacian')
bsz = k*(k+1)//2 - 1 # Block size for the projected 2nd-order Laplacian
if gamma:
nm = np.argwhere(eigvals < gamma)[-1, 0] + 1
else: # If no threshold is provided, just use the largest gap in the spectrum
nm = np.argmax(eigvals[1:] - eigvals[:-1]) + 1
eigvecs = eigvecs.reshape(data.shape[0], bsz, neig)
omega = np.zeros((nm, k, k), dtype=np.float32)
components = []
for i in tqdm(range(data.shape[0])):
for j in range(nm):
omega[j] = vec_to_sym(eigvecs[i, :, j], k, zero_trace=True)
components.append([tangents[i].T @ x.T for x in cluster_subspaces(omega)])
logging.info('GEOMANCER completed')
return components, eigvals
def eval_aligned(tangents, true_tangents):
"""Evaluation for aligned data."""
errors = np.zeros(len(tangents))
for i in tqdm(range(len(tangents))):
errors[i] = avg_angle_between_subspaces([gt[i] for gt in true_tangents],
tangents[i])
logging.info('Computed angles between ground truth and GEOMANCER results')
return errors
def eval_unaligned(data, tangents, true_data, true_tangents, k=10, n=1000):
"""Evaluation for unaligned data."""
logging.info('Evaluating unaligned data')
errors = np.zeros(data.shape[0])
nbrs = make_nearest_neighbors_graph(true_data, k=k, n=n)
for i in tqdm(range(data.shape[0])):
tangent = np.concatenate(tangents[i], axis=1)
true_tangent = np.concatenate([t[i] for t in true_tangents], axis=1)
dx_true = (true_data[nbrs.rows[i]] - true_data[i]) @ true_tangent
dx_result = (data[nbrs.rows[i]] - data[i]) @ tangent
# compute canonical correlations between the two dxs
xx = dx_true.T @ dx_true
yy = dx_result.T @ dx_result
xy = dx_true.T @ dx_result
xx_ = np.linalg.inv(xx)
yy_ = np.linalg.inv(yy)
foo = scipy.linalg.sqrtm(xx_) @ xy @ scipy.linalg.sqrtm(yy_)
u, _, v = np.linalg.svd(foo)
# project subspaces for results and ground truth into aligned space
proj = [v @ tangent.T @ s for s in tangents[i]]
true_proj = [u.T @ true_tangent.T @ s[i] for s in true_tangents]
errors[i] = avg_angle_between_subspaces(proj, true_proj)
return errors
| deepmind-research-master | geomancer/geomancer.py |
from setuptools import setup, find_packages
setup(
name = 'tr-rosetta-pytorch',
packages = find_packages(),
include_package_data = True,
entry_points={
'console_scripts': [
'tr_rosetta = tr_rosetta_pytorch.cli:predict',
],
},
version = '0.0.3',
license='MIT',
description = 'trRosetta - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/tr-rosetta-pytorch',
keywords = [
'artificial intelligence',
'protein folding',
'protein design'
],
install_requires=[
'einops>=0.3',
'fire',
'numpy',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| tr-rosetta-pytorch-main | setup.py |
from tr_rosetta_pytorch.tr_rosetta_pytorch import trRosettaNetwork
| tr-rosetta-pytorch-main | tr_rosetta_pytorch/__init__.py |
import fire
import torch
import tarfile
import numpy as np
from pathlib import Path
from tr_rosetta_pytorch.tr_rosetta_pytorch import trRosettaNetwork
from tr_rosetta_pytorch.utils import preprocess, d
# paths
CURRENT_PATH = Path(__file__).parent
DEFAULT_MODEL_PATH = CURRENT_PATH / 'models'
MODEL_PATH = DEFAULT_MODEL_PATH / 'models.tar.gz'
MODEL_FILES = [*Path(DEFAULT_MODEL_PATH).glob('*.pt')]
# extract model files if not extracted
if len(MODEL_FILES) == 0:
tar = tarfile.open(str(MODEL_PATH))
tar.extractall(DEFAULT_MODEL_PATH)
tar.close()
# prediction function
@torch.no_grad()
def get_ensembled_predictions(input_file, output_file=None, model_dir=DEFAULT_MODEL_PATH):
net = trRosettaNetwork()
i = preprocess(input_file)
if output_file is None:
input_path = Path(input_file)
output_file = f'{input_path.parents[0] / input_path.stem}.npz'
outputs = []
model_files = [*Path(model_dir).glob('*.pt')]
if len(model_files) == 0:
raise 'No model files can be found'
for model_file in model_files:
net.load_state_dict(torch.load(model_file, map_location=torch.device(d())))
net.to(d()).eval()
output = net(i)
outputs.append(output)
averaged_outputs = [torch.stack(model_output).mean(dim=0).cpu().numpy().squeeze(0).transpose(1,2,0) for model_output in zip(*outputs)]
# prob_theta, prob_phi, prob_distance, prob_omega
output_dict = dict(zip(['theta', 'phi', 'dist', 'omega'], averaged_outputs))
np.savez_compressed(output_file, **output_dict)
print(f'predictions for {input_file} saved to {output_file}')
def predict():
fire.Fire(get_ensembled_predictions)
| tr-rosetta-pytorch-main | tr_rosetta_pytorch/cli.py |
import string
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
def d(tensor=None):
if tensor is None:
return 'cuda' if torch.cuda.is_available() else 'cpu'
return 'cuda' if tensor.is_cuda else 'cpu'
# preprocessing fn
# read A3M and convert letters into
# integers in the 0..20 range
def parse_a3m(filename):
table = str.maketrans(dict.fromkeys(string.ascii_lowercase))
seqs = [line.strip().translate(table) for line in open(filename, 'r') if line[0] != '>']
alphabet = np.array(list("ARNDCQEGHILKMFPSTWYV-"), dtype='|S1').view(np.uint8)
msa = np.array([list(s) for s in seqs], dtype='|S1').view(np.uint8)
# convert letters into numbers
for i in range(alphabet.shape[0]):
msa[msa == alphabet[i]] = i
# treat all unknown characters as gaps
msa[msa > 20] = 20
return msa
# 1-hot MSA to PSSM
def msa2pssm(msa1hot, w):
beff = w.sum()
f_i = (w[:, None, None] * msa1hot).sum(dim=0) / beff + 1e-9
h_i = (-f_i * torch.log(f_i)).sum(dim=1)
return torch.cat((f_i, h_i[:, None]), dim=1)
# reweight MSA based on cutoff
def reweight(msa1hot, cutoff):
id_min = msa1hot.shape[1] * cutoff
id_mtx = torch.einsum('ikl,jkl->ij', msa1hot, msa1hot)
id_mask = id_mtx > id_min
w = 1. / id_mask.float().sum(dim=-1)
return w
# shrunk covariance inversion
def fast_dca(msa1hot, weights, penalty = 4.5):
device = msa1hot.device
nr, nc, ns = msa1hot.shape
x = msa1hot.view(nr, -1)
num_points = weights.sum() - torch.sqrt(weights.mean())
mean = (x * weights[:, None]).sum(dim=0, keepdims=True) / num_points
x = (x - mean) * torch.sqrt(weights[:, None])
cov = (x.t() @ x) / num_points
cov_reg = cov + torch.eye(nc * ns).to(device) * penalty / torch.sqrt(weights.sum())
inv_cov = torch.inverse(cov_reg)
x1 = inv_cov.view(nc, ns, nc, ns)
x2 = x1.transpose(1, 2).contiguous()
features = x2.reshape(nc, nc, ns * ns)
x3 = torch.sqrt((x1[:, :-1, :, :-1] ** 2).sum(dim=(1, 3))) * (1 - torch.eye(nc).to(device))
apc = x3.sum(dim=0, keepdims=True) * x3.sum(dim=1, keepdims=True) / x3.sum()
contacts = (x3 - apc) * (1 - torch.eye(nc).to(device))
return torch.cat((features, contacts[:, :, None]), dim=2)
def preprocess(msa_file, wmin=0.8, ns=21):
a3m = torch.from_numpy(parse_a3m(msa_file)).long()
nrow, ncol = a3m.shape
msa1hot = F.one_hot(a3m, ns).float().to(d())
w = reweight(msa1hot, wmin).float().to(d())
# 1d sequence
f1d_seq = msa1hot[0, :, :20].float()
f1d_pssm = msa2pssm(msa1hot, w)
f1d = torch.cat((f1d_seq, f1d_pssm), dim=1)
f1d = f1d[None, :, :].reshape((1, ncol, 42))
# 2d sequence
f2d_dca = fast_dca(msa1hot, w) if nrow > 1 else torch.zeros((ncol, ncol, 442)).float().to(d())
f2d_dca = f2d_dca[None, :, :, :]
f2d = torch.cat((
f1d[:, :, None, :].repeat(1, 1, ncol, 1),
f1d[:, None, :, :].repeat(1, ncol, 1, 1),
f2d_dca
), dim=-1)
f2d = f2d.view(1, ncol, ncol, 442 + 2*42)
return f2d.permute((0, 3, 2, 1))
| tr-rosetta-pytorch-main | tr_rosetta_pytorch/utils.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
def elu():
return nn.ELU(inplace=True)
def instance_norm(filters, eps=1e-6, **kwargs):
return nn.InstanceNorm2d(filters, affine=True, eps=eps, **kwargs)
def conv2d(in_chan, out_chan, kernel_size, dilation=1, **kwargs):
padding = dilation * (kernel_size - 1) // 2
return nn.Conv2d(in_chan, out_chan, kernel_size, padding=padding, dilation=dilation, **kwargs)
class trRosettaNetwork(nn.Module):
def __init__(self, filters=64, kernel=3, num_layers=61):
super().__init__()
self.filters = filters
self.kernel = kernel
self.num_layers = num_layers
self.first_block = nn.Sequential(
conv2d(442 + 2 * 42, filters, 1),
instance_norm(filters),
elu()
)
# stack of residual blocks with dilations
cycle_dilations = [1, 2, 4, 8, 16]
dilations = [cycle_dilations[i % len(cycle_dilations)] for i in range(num_layers)]
self.layers = nn.ModuleList([nn.Sequential(
conv2d(filters, filters, kernel, dilation=dilation),
instance_norm(filters),
elu(),
nn.Dropout(p=0.15),
conv2d(filters, filters, kernel, dilation=dilation),
instance_norm(filters)
) for dilation in dilations])
self.activate = elu()
# conv to anglegrams and distograms
self.to_prob_theta = nn.Sequential(conv2d(filters, 25, 1), nn.Softmax(dim=1))
self.to_prob_phi = nn.Sequential(conv2d(filters, 13, 1), nn.Softmax(dim=1))
self.to_distance = nn.Sequential(conv2d(filters, 37, 1), nn.Softmax(dim=1))
self.to_prob_bb = nn.Sequential(conv2d(filters, 3, 1), nn.Softmax(dim=1))
self.to_prob_omega = nn.Sequential(conv2d(filters, 25, 1), nn.Softmax(dim=1))
def forward(self, x):
x = self.first_block(x)
for layer in self.layers:
x = self.activate(x + layer(x))
prob_theta = self.to_prob_theta(x) # anglegrams for theta
prob_phi = self.to_prob_phi(x) # anglegrams for phi
x = 0.5 * (x + x.permute((0,1,3,2))) # symmetrize
prob_distance = self.to_distance(x) # distograms
# prob_bb = self.to_prob_bb(x) # beta-strand pairings (not used)
prob_omega = self.to_prob_omega(x) # anglegrams for omega
return prob_theta, prob_phi, prob_distance, prob_omega
| tr-rosetta-pytorch-main | tr_rosetta_pytorch/tr_rosetta_pytorch.py |
import torch
from triton_transformer import Transformer
assert torch.cuda.is_available()
# instantiate model and data
model = Transformer(
num_tokens = 256,
max_seq_len = 1024,
dim = 512,
depth = 6,
heads = 8,
dim_head = 64,
causal = True,
use_triton = False
).cuda()
x = torch.randint(0, 256, (1, 1024)).cuda()
labels = torch.randint(0, 256, (1, 1024)).cuda()
# forward and backward pass without triton
loss = model(x, labels = labels)
loss.backward()
loss = loss.clone()
emb_grad = model.token_emb.weight.grad.clone()
ln_weight_grad = model.norm.weight.grad.clone()
ln_bias_grad = model.norm.bias.grad.clone()
model.zero_grad()
# forward and backward pass with triton
triton_loss = model(x, labels = labels, use_triton = True)
triton_loss.backward()
triton_emb_grad = model.token_emb.weight.grad.clone()
triton_ln_weight_grad = model.norm.weight.grad.clone()
triton_ln_bias_grad = model.norm.bias.grad.clone()
# should be equal, for output and gradients on token embeddings
assert torch.allclose(loss.cpu(), triton_loss.cpu(), atol=1e-6), 'output is the same'
assert torch.allclose(emb_grad.cpu(), triton_emb_grad.cpu(), atol=2e-6), 'grad is the same'
assert torch.allclose(ln_weight_grad.cpu(), triton_ln_weight_grad.cpu(), atol=2e-6), 'layernorm weight grad is the same'
assert torch.allclose(ln_bias_grad.cpu(), triton_ln_bias_grad.cpu(), atol=2e-6), 'layernorm bias grad is the same'
print('succeeded')
| triton-transformer-main | assert.py |
from setuptools import setup, find_packages
setup(
name = 'triton-transformer',
packages = find_packages(),
version = '0.1.1',
license='MIT',
description = 'Transformer in Triton',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/triton-transformer',
keywords = [
'artificial intelligence',
'attention mechanism',
'transformers'
],
install_requires=[
'einops',
'torch>=1.6',
'triton==1.0.1.dev20210924'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| triton-transformer-main | setup.py |
from triton_transformer import Transformer
from triton_transformer.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 512
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = Transformer(
num_tokens = 256,
dim = 512,
max_seq_len = SEQ_LEN,
depth = 8,
heads = 8,
causal = True,
use_triton = True,
attn_dropout = 0.1,
ff_dropout = 0.1,
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
loss.backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp[None, ...], GENERATE_LENGTH)
output_str = decode_tokens(sample[0])
print(output_str)
| triton-transformer-main | train.py |
import torch
import torch.nn.functional as F
from einops import rearrange
import triton
import triton.language as tl
def cross_entropy_fn(logits, labels, ignore_index = 0., use_triton = False):
logits = rearrange(logits, 'b n c -> (b n) c')
labels = rearrange(labels, 'b n -> (b n)')
if use_triton:
loss = triton.ops.cross_entropy(logits, labels)
else:
loss = F.cross_entropy(logits, labels, reduction = 'none')
mask = (labels != ignore_index)
return loss[mask].mean()
| triton-transformer-main | triton_transformer/cross_entropy.py |
import torch
from torch import autograd
import torch.nn.functional as F
import triton
import triton.language as tl
from triton_transformer.utils import calc_num_warps, exists
# todo, make this autotuneable
GAMMA_BLOCK_SIZE = 64
GAMMA_ROW_BLOCK_SIZE = 64
@triton.jit
def layernorm_kernel_forward_training(
output_ptr,
mean_centered_ptr,
normed_ptr,
input_ptr,
gamma_ptr,
input_row_stride,
gamma_row_stride,
output_row_stride,
mean_centered_row_stride,
normed_row_stride,
n_cols,
stable,
eps,
**meta
):
row_idx = tl.program_id(0)
BLOCK_SIZE = meta['BLOCK_SIZE']
row_start_ptr = input_ptr + row_idx * input_row_stride
gamma_row_start_ptr = gamma_ptr + row_idx * gamma_row_stride
col_offsets = tl.arange(0, BLOCK_SIZE)
input_ptrs = row_start_ptr + col_offsets
gamma_ptrs = gamma_row_start_ptr + col_offsets
mask = col_offsets < n_cols
row = tl.load(input_ptrs, mask=mask, other=0.)
gammas = tl.load(gamma_ptrs, mask=mask, other=0.)
if stable:
row_max = tl.max(tl.where(mask, row, float('-inf')), axis = 0)
row /= row_max
row_mean = tl.sum(row, axis = 0) / n_cols
row_mean_centered = tl.where(mask, row - row_mean, 0.)
row_var = tl.sum(row_mean_centered * row_mean_centered, axis = 0) / n_cols
inv_var = 1. / tl.sqrt(row_var + eps)
normed = row_mean_centered * inv_var
output = normed * gammas
output_row_start_ptr = output_ptr + row_idx * output_row_stride
output_ptrs = output_row_start_ptr + col_offsets
tl.store(output_ptrs, output, mask=mask)
mean_centered_row_start_ptr = mean_centered_ptr + row_idx * mean_centered_row_stride
mean_centered_ptrs = mean_centered_row_start_ptr + col_offsets
tl.store(mean_centered_ptrs, row_mean_centered, mask=mask)
normed_row_start_ptr = normed_ptr + row_idx * normed_row_stride
normed_ptrs = normed_row_start_ptr + col_offsets
tl.store(normed_ptrs, normed, mask=mask)
@triton.jit
def layernorm_kernel_forward_inference(
output_ptr,
input_ptr,
gamma_ptr,
input_row_stride,
gamma_row_stride,
output_row_stride,
n_cols,
stable,
eps,
**meta
):
row_idx = tl.program_id(0)
BLOCK_SIZE = meta['BLOCK_SIZE']
row_start_ptr = input_ptr + row_idx * input_row_stride
gamma_row_start_ptr = gamma_ptr + row_idx * gamma_row_stride
col_offsets = tl.arange(0, BLOCK_SIZE)
input_ptrs = row_start_ptr + col_offsets
gamma_ptrs = gamma_row_start_ptr + col_offsets
mask = col_offsets < n_cols
row = tl.load(input_ptrs, mask=mask, other=0.)
gammas = tl.load(gamma_ptrs, mask=mask, other=0.)
if stable:
row_max = tl.max(tl.where(mask, row, float('-inf')), axis = 0)
row /= row_max
row_mean = tl.sum(row, axis = 0) / n_cols
row_mean_centered = tl.where(mask, row - row_mean, 0.)
row_var = tl.sum(row_mean_centered * row_mean_centered, axis = 0) / n_cols
inv_var = 1. / tl.sqrt(row_var + eps)
normed = row_mean_centered * inv_var
output = normed * gammas
output_row_start_ptr = output_ptr + row_idx * output_row_stride
output_ptrs = output_row_start_ptr + col_offsets
tl.store(output_ptrs, output, mask=mask)
@triton.jit
def layernorm_kernel_backward(
output_ptr,
dy_ptr,
mean_centered_ptr,
output_row_stride,
dy_row_stride,
mean_centered_row_stride,
n_cols,
eps,
**meta
):
row_idx = tl.program_id(0)
BLOCK_SIZE = meta['BLOCK_SIZE']
dy_row_start_ptr = dy_ptr + row_idx * dy_row_stride
mean_centered_row_start_ptr = mean_centered_ptr + row_idx * mean_centered_row_stride
col_offsets = tl.arange(0, BLOCK_SIZE)
dy_ptrs = dy_row_start_ptr + col_offsets
mean_centered_ptrs = mean_centered_row_start_ptr + col_offsets
mask = col_offsets < n_cols
dy = tl.load(dy_ptrs, mask=mask, other=0.)
mean_centered = tl.load(mean_centered_ptrs, mask=mask, other=0.)
row_var = tl.sum(mean_centered * mean_centered, axis = 0) / n_cols
inv_var = 1. / tl.sqrt(row_var + eps)
normed = mean_centered * inv_var
output = 1. / n_cols * inv_var * (n_cols * dy - tl.sum(dy, axis = 0) - normed * tl.sum(dy * normed, axis = 0))
output_row_start_ptr = output_ptr + row_idx * output_row_stride
output_ptrs = output_row_start_ptr + col_offsets
tl.store(output_ptrs, output, mask=mask)
@triton.jit
def layernorm_gamma_kernel_backward(
dgamma_ptr,
norm_ptr,
dy_ptr,
norm_stride,
dy_stride,
dgamma_row_stride,
n_rows,
n_cols,
**meta
):
col_idx = tl.program_id(0)
row_idx = tl.program_id(1)
BLOCK_SIZE = meta['BLOCK_SIZE']
ROW_BLOCK_SIZE = meta['BLOCK_SIZE_ROW']
col_offsets = tl.arange(0, BLOCK_SIZE)
row_offsets = tl.arange(0, ROW_BLOCK_SIZE)
col_range = col_idx * BLOCK_SIZE + col_offsets
row_range = row_idx * ROW_BLOCK_SIZE + row_offsets
col_mask = col_range < n_cols
mask = (row_range < n_rows)[:, None] & col_mask[None, :]
dy_ptr += row_range[:, None] * dy_stride + col_range[None, :]
norm_ptr += row_range[:, None] * norm_stride + col_range[None, :]
dy = tl.load(dy_ptr, mask = mask, other = 0.)
norm = tl.load(norm_ptr, mask = mask, other = 0.)
dgamma = tl.sum(dy * norm, axis = 0)
dgamma_ptr += row_idx * dgamma_row_stride + col_range
tl.store(dgamma_ptr, dgamma, mask = col_mask)
class _layernorm(autograd.Function):
@classmethod
def forward(cls, ctx, x, gamma, eps, stable):
shape = x.shape
dim = shape[-1]
x = x.view(-1, dim)
n_rows, n_cols = x.shape
expanded_gamma = gamma[None, :].expand(n_rows, -1)
BLOCK_SIZE = triton.next_power_of_2(n_cols)
num_warps = calc_num_warps(BLOCK_SIZE)
out = torch.empty_like(x)
ctx.eps = eps
if x.requires_grad:
scaled_x = torch.empty_like(x)
normed = torch.empty_like(x)
layernorm_kernel_forward_training[(n_rows,)](
out,
scaled_x,
normed,
x,
expanded_gamma,
x.stride(0),
expanded_gamma.stride(0),
out.stride(0),
scaled_x.stride(0),
normed.stride(0),
n_cols,
stable,
eps,
num_warps = num_warps,
BLOCK_SIZE = BLOCK_SIZE,
)
ctx.save_for_backward(scaled_x, gamma, out)
else:
layernorm_kernel_forward_inference[(n_rows,)](
out,
x,
expanded_gamma,
x.stride(0),
expanded_gamma.stride(0),
out.stride(0),
n_cols,
stable,
eps,
num_warps = num_warps,
BLOCK_SIZE = BLOCK_SIZE,
)
return out.view(*shape)
@classmethod
def backward(cls, ctx, dy):
shape, device = dy.shape, dy.device
dim = shape[-1]
dy = dy.view(-1, dim)
scaled_x, gamma, normed = ctx.saved_tensors
n_rows, n_cols = dy.shape
num_col_programs = triton.cdiv(n_cols, GAMMA_BLOCK_SIZE)
num_row_programs = triton.cdiv(n_rows, GAMMA_ROW_BLOCK_SIZE)
dgamma = torch.empty((num_row_programs, n_cols), device = device)
layernorm_gamma_kernel_backward[(num_col_programs, num_row_programs)](
dgamma,
normed,
dy,
normed.stride(0),
dy.stride(0),
dgamma.stride(0),
n_rows,
n_cols,
num_warps = 4,
BLOCK_SIZE = GAMMA_BLOCK_SIZE,
BLOCK_SIZE_ROW = GAMMA_ROW_BLOCK_SIZE
)
dgamma = dgamma.sum(dim = 0)
dxhat = dy * gamma
dx = torch.empty_like(dy)
BLOCK_SIZE = triton.next_power_of_2(n_cols)
num_warps = calc_num_warps(BLOCK_SIZE)
layernorm_kernel_backward[(n_rows,)](
dx,
dxhat,
scaled_x,
dx.stride(0),
dxhat.stride(0),
scaled_x.stride(0),
n_cols,
ctx.eps,
num_warps = num_warps,
BLOCK_SIZE = BLOCK_SIZE,
)
dx = dx.view(*shape)
return dx, dgamma, None, None
def layernorm(x, gamma, eps = 1e-5, use_triton = False, stable = False):
if use_triton:
out = _layernorm.apply(x, gamma, eps, stable)
else:
if stable:
x = x / torch.amax(x, dim = -1, keepdim = True)
out = F.layer_norm(x, (x.shape[-1],), gamma, torch.zeros_like(gamma), eps = eps)
return out
| triton-transformer-main | triton_transformer/layernorm.py |
import torch
from torch import nn
import torch.nn.functional as F
# helper function
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# top k filtering
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, pad_value = 0):
super().__init__()
self.pad_value = pad_value
self.net = net
self.max_seq_len = net.max_seq_len
@torch.no_grad()
@eval_decorator
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_thres = 0.9, **kwargs):
b, t, device = *start_tokens.shape, start_tokens.device
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
logits = self.net(x, **kwargs)[:, -1, :]
filtered_logits = top_k(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if exists(eos_token):
is_eos_token = (out == eos_token)
if is_eos_token.any(dim = -1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1
out = out.masked_fill(mask, self.pad_value)
break
out = out[:, t:]
return out
def forward(self, x, **kwargs):
x_inp, x_labels = x[:, :-1], x[:, 1:]
return self.net(x_inp, labels = x_labels, **kwargs)
| triton-transformer-main | triton_transformer/autoregressive_wrapper.py |
from triton_transformer.transformer import Transformer
| triton-transformer-main | triton_transformer/__init__.py |
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def calc_num_warps(block_size):
num_warps = 4
if block_size >= 2048:
num_warps = 8
if block_size >= 4096:
num_warps = 16
return num_warps
| triton-transformer-main | triton_transformer/utils.py |
from functools import partial
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
from triton_transformer.layernorm import layernorm
from triton_transformer.softmax import softmax
from triton_transformer.cross_entropy import cross_entropy_fn
from triton_transformer.bmm import fused_relu_squared
from triton_transformer.dropout import dropout_fn
from triton_transformer.utils import exists, default
# classes
class PreNormResidual(nn.Module):
def __init__(self, dim, fn, use_triton = False):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
self.use_triton = use_triton
def forward(self, x, **kwargs):
use_triton = kwargs.get('use_triton', self.use_triton)
normed = layernorm(x, self.norm.weight, use_triton = use_triton)
return self.fn(normed, **kwargs) + x
# helpers classes
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8,
causal = False,
dropout = 0.,
use_triton = False
):
super().__init__()
self.use_triton = use_triton
self.heads = heads
self.scale = dim_head ** -0.5
self.causal = causal
inner_dim = dim_head * heads
self.dropout = dropout
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Linear(inner_dim, dim, bias = False)
def forward(self, x, mask = None, use_triton = None):
use_triton = default(use_triton, self.use_triton)
h = self.heads
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = h), (q, k, v))
q = q * self.scale
sim = einsum('b i d, b j d -> b i j', q, k)
if exists(mask):
mask_value = -torch.finfo(sim.dtype).max
sim = sim.masked_fill(mask, mask_value)
attn = softmax(sim, causal = self.causal, use_triton = use_triton)
attn = dropout_fn(attn, self.dropout, use_triton = use_triton)
out = einsum('b i j, b j d -> b i d', attn, v)
out = rearrange(out, '(b h) n d -> b n (h d)', h = h)
out = self.to_out(out)
return dropout_fn(out, self.dropout, use_triton = use_triton)
class FeedForward(nn.Module):
def __init__(
self,
dim,
mult = 4,
dropout = 0.,
use_triton = False
):
super().__init__()
self.use_triton = use_triton
inner_dim = dim * mult
self.dropout = dropout
self.proj_in_weight = nn.Parameter(torch.randn(dim, inner_dim))
self.proj_out = nn.Linear(inner_dim, dim)
def forward(self, x, use_triton = None):
use_triton = default(use_triton, self.use_triton)
x = fused_relu_squared(x, self.proj_in_weight, use_triton = use_triton)
x = dropout_fn(x, self.dropout, use_triton = use_triton)
x = self.proj_out(x)
return x
# main class
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
num_tokens,
max_seq_len,
depth,
causal = False,
heads = 8,
dim_head = 64,
ff_dropout = 0.,
ff_mult = 4,
attn_dropout = 0.,
use_triton = False
):
super().__init__()
self.max_seq_len = max_seq_len
self.token_emb = nn.Embedding(num_tokens, dim)
self.pos_emb = nn.Embedding(max_seq_len, dim)
self.layers = nn.ModuleList([])
wrapper = partial(PreNormResidual, dim)
for _ in range(depth):
self.layers.append(nn.ModuleList([
wrapper(Attention(dim, heads = heads, dim_head = dim_head, causal = causal, dropout = attn_dropout, use_triton = use_triton)),
wrapper(FeedForward(dim, dropout = ff_dropout, mult = ff_mult, use_triton = use_triton))
]))
self.norm = nn.LayerNorm(dim)
self.to_logits = nn.Linear(dim, num_tokens)
# mask
self.use_triton = use_triton
self.causal = causal
mask = torch.ones(max_seq_len, max_seq_len, dtype = torch.bool).triu(1) if causal else None
self.register_buffer('mask', mask, persistent = False)
def forward(
self,
x,
mask = None,
*,
labels = None,
use_triton = None
):
use_triton = default(use_triton, self.use_triton)
n, device = x.shape[1], x.device
# embed token and add positional embedding
x = self.token_emb(x)
pos_emb = self.pos_emb(torch.arange(n, device = device))
x = x + rearrange(pos_emb, 'n d -> () n d')
# generate mask, depending on whether autoregressive or not
assert not (self.causal and exists(mask)), 'mask is not needed during autoregressive mode'
if self.causal and not use_triton:
mask = self.mask[:n, :n]
mask = rearrange(mask, 'i j -> () i j')
elif not self.causal and exists(mask):
mask = rearrange(mask, 'b i -> b i ()') * rearrange(mask, 'b j -> b () j')
mask = ~mask
# go through layers
for attn, ff in self.layers:
x = attn(x, mask = mask, use_triton = use_triton)
x = ff(x, use_triton = use_triton)
x = layernorm(x, self.norm.weight, use_triton = use_triton, stable = True)
logits = self.to_logits(x)
if not exists(labels):
return logits
loss = cross_entropy_fn(logits, labels, ignore_index = 0, use_triton = use_triton)
return loss
| triton-transformer-main | triton_transformer/transformer.py |
import torch
from torch import autograd
import torch.nn.functional as F
from triton_transformer.utils import calc_num_warps, exists
import triton
import triton.language as tl
@triton.autotune(
configs=[
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64 , 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64 , 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32 , 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64 , 'BLOCK_SIZE_N': 32 , 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2),
triton.Config({'BLOCK_SIZE_M': 32 , 'BLOCK_SIZE_N': 64 , 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2),
],
key=['M', 'N', 'K'],
)
@triton.jit
def bmm_kernel(
x_ptr, y_ptr, o_ptr,
M, N, K,
stride_al, stride_am, stride_ak,
stride_bl, stride_bk, stride_bn,
stride_ol, stride_om, stride_on,
**meta,
):
BLOCK_SIZE_M = meta['BLOCK_SIZE_M']
BLOCK_SIZE_N = meta['BLOCK_SIZE_N']
BLOCK_SIZE_K = meta['BLOCK_SIZE_K']
GROUP_SIZE_M = 8
pid_batch = tl.program_id(0)
pid = tl.program_id(1)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + (pid % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
offs_k = tl.arange(0, BLOCK_SIZE_K)
x_ptrs = x_ptr + (offs_am[:, None]*stride_am + offs_k [None, :]*stride_ak + pid_batch*stride_al)
y_ptrs = y_ptr + (offs_k [:, None]*stride_bk + offs_bn[None, :]*stride_bn + pid_batch*stride_bl)
o = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, K, BLOCK_SIZE_K):
x = tl.load(x_ptrs)
y = tl.load(y_ptrs)
o += tl.dot(x, y)
x_ptrs += BLOCK_SIZE_K * stride_ak
y_ptrs += BLOCK_SIZE_K * stride_bk
if exists(meta['ACTIVATION']):
o = meta['ACTIVATION'](o)
offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
mask = (offs_m[:, None] < M) & (offs_n[None, :] < N)
o_ptrs = o_ptr + stride_om * offs_m[:, None] + stride_on * offs_n[None, :] + stride_ol * pid_batch
tl.store(o_ptrs, o, mask=mask)
def triton_bmm(x, y, activation = None):
B, M, K = x.shape
if y.ndim == 2:
y = y.unsqueeze(0).expand(B, -1, -1)
_, K, N = y.shape
assert (K % 32 == 0), "K must be divisible by 32"
o = torch.empty((B, M, N), device = x.device, dtype = x.dtype)
grid = lambda META: (
B, triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']),
)
bmm_kernel[grid](
x, y, o,
M, N, K,
x.stride(0), x.stride(1), x.stride(2),
y.stride(0), y.stride(1), y.stride(2),
o.stride(0), o.stride(1), o.stride(2),
ACTIVATION = activation
)
return o
@triton.jit
def relu_squared_activation(x):
return tl.where(x > 0, x * x, 0.)
class _relu_squared(autograd.Function):
@classmethod
def forward(self, ctx, x, w):
o = triton_bmm(x, w, activation = relu_squared_activation)
if x.requires_grad:
ctx.save_for_backward(x, w, o)
return o
@classmethod
def backward(self, ctx, dy):
x, w, o = ctx.saved_tensors
dy = torch.sqrt(o) * 2 * dy
dx = triton_bmm(dy, w.t())
dw = triton_bmm(x.transpose(-1, -2), dy)
return dx, dw
triton_relu_squared = _relu_squared.apply
def fused_relu_squared(x, w, use_triton = False):
if use_triton:
return triton_relu_squared(x, w)
return F.relu(x @ w) ** 2
| triton-transformer-main | triton_transformer/bmm.py |
# https://triton-lang.org/getting-started/tutorials/04-low-memory-dropout.html#sphx-glr-getting-started-tutorials-04-low-memory-dropout-py
import torch
from torch import autograd
import torch.nn.functional as F
import triton
import triton.language as tl
from random import randrange
BLOCK_SIZE = 1024
@triton.jit
def _seeded_dropout(x_ptr, output_ptr, n_elements, p, seed, **meta):
BLOCK_SIZE = meta['BLOCK_SIZE']
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE * 4
off0 = block_start + BLOCK_SIZE * 0 + tl.arange(0, BLOCK_SIZE)
off1 = block_start + BLOCK_SIZE * 1 + tl.arange(0, BLOCK_SIZE)
off2 = block_start + BLOCK_SIZE * 2 + tl.arange(0, BLOCK_SIZE)
off3 = block_start + BLOCK_SIZE * 3 + tl.arange(0, BLOCK_SIZE)
mask0 = off0 < n_elements
mask1 = off1 < n_elements
mask2 = off2 < n_elements
mask3 = off3 < n_elements
x0 = tl.load(x_ptr + off0, mask = mask0)
x1 = tl.load(x_ptr + off1, mask = mask1)
x2 = tl.load(x_ptr + off2, mask = mask2)
x3 = tl.load(x_ptr + off3, mask = mask3)
r0, r1, r2, r3 = tl.random.rand4x(seed, off0)
keep0, keep1, keep2, keep3 = r0 > p, r1 > p, r2 > p, r3 > p
o0 = tl.where(keep0, x0 / (1 - p), 0.0)
o1 = tl.where(keep1, x1 / (1 - p), 0.0)
o2 = tl.where(keep2, x2 / (1 - p), 0.0)
o3 = tl.where(keep3, x3 / (1 - p), 0.0)
tl.store(output_ptr + off0, o0, mask = mask0)
tl.store(output_ptr + off1, o1, mask = mask1)
tl.store(output_ptr + off2, o2, mask = mask2)
tl.store(output_ptr + off3, o3, mask = mask3)
def seeded_dropout(x, p, seed):
output = torch.empty_like(x)
n_elements = x.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE'] * 4),)
_seeded_dropout[grid](x, output, n_elements, p, seed, BLOCK_SIZE = BLOCK_SIZE)
return output
class dropout_(autograd.Function):
@classmethod
def forward(cls, ctx, x, p):
seed = randrange(int(1e6))
ctx.p = p
ctx.seed = seed
return seeded_dropout(x, p, seed)
@classmethod
def backward(cls, ctx, dy):
p = ctx.p
seed = ctx.seed
return seeded_dropout(dy, p, seed), None
def dropout_fn(x, p, use_triton = False):
if p == 0. or not x.requires_grad:
return x
if not use_triton:
return F.dropout(x, p, training = True)
return dropout_.apply(x, p)
| triton-transformer-main | triton_transformer/dropout.py |
import torch
from torch import autograd
import torch.nn.functional as F
import triton
import triton.language as tl
from triton_transformer.utils import calc_num_warps
@triton.jit
def softmax_kernel_forward(
output_ptr,
input_ptr,
input_row_stride,
output_row_stride,
n_cols,
causal,
**meta
):
row_idx = tl.program_id(0)
BLOCK_SIZE = meta['BLOCK_SIZE']
row_start_ptr = input_ptr + row_idx * input_row_stride
col_offsets = tl.arange(0, BLOCK_SIZE)
input_ptrs = row_start_ptr + col_offsets
mask = col_offsets < n_cols
row = tl.load(input_ptrs, mask = mask, other = -float('inf'))
if causal:
causal_mask = col_offsets > (row_idx % n_cols)
row = row + tl.where(causal_mask, -float('inf'), 0.)
row_minus_max = row - tl.max(row, axis=0)
numerator = tl.exp(row_minus_max)
denominator = tl.sum(numerator, axis=0)
softmax_output = numerator / denominator
output_row_start_ptr = output_ptr + row_idx * output_row_stride
output_ptrs = output_row_start_ptr + col_offsets
tl.store(output_ptrs, softmax_output, mask = mask)
@triton.jit
def softmax_kernel_backward(
output_ptr,
input_ptr,
grad_ptr,
grad_row_stride,
input_row_stride,
output_row_stride,
n_cols,
**meta
):
row_idx = tl.program_id(0)
BLOCK_SIZE = meta['BLOCK_SIZE']
row_start_ptr = input_ptr + row_idx * input_row_stride
grad_row_start_ptr = grad_ptr + row_idx * grad_row_stride
col_offsets = tl.arange(0, BLOCK_SIZE)
input_ptrs = row_start_ptr + col_offsets
grad_ptrs = grad_row_start_ptr + col_offsets
mask = col_offsets < n_cols
probs_row = tl.load(input_ptrs, mask = mask, other = 0.)
grad_row = tl.load(grad_ptrs, mask = mask, other = 0.)
dxhat = probs_row * grad_row
softmax_grad_output = dxhat - probs_row * tl.sum(dxhat, axis = 0)
output_row_start_ptr = output_ptr + row_idx * output_row_stride
output_ptrs = output_row_start_ptr + col_offsets
tl.store(output_ptrs, softmax_grad_output, mask = mask)
class _softmax(autograd.Function):
@classmethod
def forward(self, ctx, x, causal):
shape = x.shape
x = x.view(-1, shape[-1])
n_rows, n_cols = x.shape
BLOCK_SIZE = triton.next_power_of_2(n_cols)
num_warps = calc_num_warps(BLOCK_SIZE)
y = torch.empty_like(x)
softmax_kernel_forward[(n_rows,)](
y,
x,
x.stride(0),
y.stride(0),
n_cols,
causal,
num_warps = num_warps,
BLOCK_SIZE = BLOCK_SIZE,
)
if x.requires_grad:
ctx.save_for_backward(y)
return y.view(*shape)
@classmethod
def backward(self, ctx, grad_probs):
shape = grad_probs.shape
probs, = ctx.saved_tensors
grad_probs = grad_probs.view(-1, grad_probs.shape[-1])
n_rows, n_cols = grad_probs.shape
BLOCK_SIZE = triton.next_power_of_2(n_cols)
num_warps = calc_num_warps(BLOCK_SIZE)
dx = torch.empty_like(probs)
softmax_kernel_backward[(n_rows,)](
dx,
probs,
grad_probs,
grad_probs.stride(0),
probs.stride(0),
dx.stride(0),
n_cols,
num_warps = num_warps,
BLOCK_SIZE = BLOCK_SIZE
)
return dx.view(*shape), None
triton_softmax = _softmax.apply
def softmax(x, causal = False, use_triton = False):
if use_triton:
return triton_softmax(x, causal)
else:
return F.softmax(x, dim = -1) | triton-transformer-main | triton_transformer/softmax.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""configure script to get build parameters from user."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import errno
import os
import platform
import re
import subprocess
import sys
# pylint: disable=g-import-not-at-top
try:
from shutil import which
except ImportError:
from distutils.spawn import find_executable as which
# pylint: enable=g-import-not-at-top
_DEFAULT_CUDA_VERSION = '10'
_DEFAULT_CUDNN_VERSION = '7'
_DEFAULT_TENSORRT_VERSION = '5'
_DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,7.0'
_TF_OPENCL_VERSION = '1.2'
_DEFAULT_COMPUTECPP_TOOLKIT_PATH = '/usr/local/computecpp'
_DEFAULT_TRISYCL_INCLUDE_DIR = '/usr/local/triSYCL/include'
_SUPPORTED_ANDROID_NDK_VERSIONS = [10, 11, 12, 13, 14, 15, 16, 17, 18]
_DEFAULT_PROMPT_ASK_ATTEMPTS = 10
_TF_BAZELRC_FILENAME = '.tf_configure.bazelrc'
_TF_WORKSPACE_ROOT = ''
_TF_BAZELRC = ''
_TF_CURRENT_BAZEL_VERSION = None
_TF_MIN_BAZEL_VERSION = '0.24.1'
_TF_MAX_BAZEL_VERSION = '0.26.1'
NCCL_LIB_PATHS = [
'lib64/', 'lib/powerpc64le-linux-gnu/', 'lib/x86_64-linux-gnu/', ''
]
# List of files to configure when building Bazel on Apple platforms.
APPLE_BAZEL_FILES = [
'tensorflow/lite/experimental/ios/BUILD',
'tensorflow/lite/experimental/objc/BUILD',
'tensorflow/lite/experimental/swift/BUILD'
]
# List of files to move when building for iOS.
IOS_FILES = [
'tensorflow/lite/experimental/objc/TensorFlowLiteObjC.podspec',
'tensorflow/lite/experimental/swift/TensorFlowLiteSwift.podspec',
]
class UserInputError(Exception):
pass
def is_windows():
return platform.system() == 'Windows'
def is_linux():
return platform.system() == 'Linux'
def is_macos():
return platform.system() == 'Darwin'
def is_ppc64le():
return platform.machine() == 'ppc64le'
def is_cygwin():
return platform.system().startswith('CYGWIN_NT')
def get_input(question):
try:
try:
answer = raw_input(question)
except NameError:
answer = input(question) # pylint: disable=bad-builtin
except EOFError:
answer = ''
return answer
def symlink_force(target, link_name):
"""Force symlink, equivalent of 'ln -sf'.
Args:
target: items to link to.
link_name: name of the link.
"""
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
def sed_in_place(filename, old, new):
"""Replace old string with new string in file.
Args:
filename: string for filename.
old: string to replace.
new: new string to replace to.
"""
with open(filename, 'r') as f:
filedata = f.read()
newdata = filedata.replace(old, new)
with open(filename, 'w') as f:
f.write(newdata)
def write_to_bazelrc(line):
with open(_TF_BAZELRC, 'a') as f:
f.write(line + '\n')
def write_action_env_to_bazelrc(var_name, var):
write_to_bazelrc('build --action_env %s="%s"' % (var_name, str(var)))
def run_shell(cmd, allow_non_zero=False):
if allow_non_zero:
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
output = e.output
else:
output = subprocess.check_output(cmd)
return output.decode('UTF-8').strip()
def cygpath(path):
"""Convert path from posix to windows."""
return os.path.abspath(path).replace('\\', '/')
def get_python_path(environ_cp, python_bin_path):
"""Get the python site package paths."""
python_paths = []
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
try:
library_paths = run_shell([
python_bin_path, '-c',
'import site; print("\\n".join(site.getsitepackages()))'
]).split('\n')
except subprocess.CalledProcessError:
library_paths = [
run_shell([
python_bin_path, '-c',
'from distutils.sysconfig import get_python_lib;'
'print(get_python_lib())'
])
]
all_paths = set(python_paths + library_paths)
paths = []
for path in all_paths:
if os.path.isdir(path):
paths.append(path)
return paths
def get_python_major_version(python_bin_path):
"""Get the python major version."""
return run_shell([python_bin_path, '-c', 'import sys; print(sys.version[0])'])
def setup_python(environ_cp):
"""Setup python related env variables."""
# Get PYTHON_BIN_PATH, default is the current running python.
default_python_bin_path = sys.executable
ask_python_bin_path = ('Please specify the location of python. [Default is '
'%s]: ') % default_python_bin_path
while True:
python_bin_path = get_from_env_or_user_or_default(environ_cp,
'PYTHON_BIN_PATH',
ask_python_bin_path,
default_python_bin_path)
# Check if the path is valid
if os.path.isfile(python_bin_path) and os.access(python_bin_path, os.X_OK):
break
elif not os.path.exists(python_bin_path):
print('Invalid python path: %s cannot be found.' % python_bin_path)
else:
print('%s is not executable. Is it the python binary?' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = ''
# Convert python path to Windows style before checking lib and version
if is_windows() or is_cygwin():
python_bin_path = cygpath(python_bin_path)
# Get PYTHON_LIB_PATH
python_lib_path = environ_cp.get('PYTHON_LIB_PATH')
if not python_lib_path:
python_lib_paths = get_python_path(environ_cp, python_bin_path)
if environ_cp.get('USE_DEFAULT_PYTHON_LIB_PATH') == '1':
python_lib_path = python_lib_paths[0]
else:
print('Found possible Python library paths:\n %s' %
'\n '.join(python_lib_paths))
default_python_lib_path = python_lib_paths[0]
python_lib_path = get_input(
'Please input the desired Python library path to use. '
'Default is [%s]\n' % python_lib_paths[0])
if not python_lib_path:
python_lib_path = default_python_lib_path
environ_cp['PYTHON_LIB_PATH'] = python_lib_path
_ = get_python_major_version(python_bin_path)
# Convert python path to Windows style before writing into bazel.rc
if is_windows() or is_cygwin():
python_lib_path = cygpath(python_lib_path)
# Set-up env variables used by python_configure.bzl
write_action_env_to_bazelrc('PYTHON_BIN_PATH', python_bin_path)
write_action_env_to_bazelrc('PYTHON_LIB_PATH', python_lib_path)
write_to_bazelrc('build --python_path=\"%s"' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = python_bin_path
# If choosen python_lib_path is from a path specified in the PYTHONPATH
# variable, need to tell bazel to include PYTHONPATH
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
if python_lib_path in python_paths:
write_action_env_to_bazelrc('PYTHONPATH', environ_cp.get('PYTHONPATH'))
# Write tools/python_bin_path.sh
with open(
os.path.join(_TF_WORKSPACE_ROOT, 'tools', 'python_bin_path.sh'),
'w') as f:
f.write('export PYTHON_BIN_PATH="%s"' % python_bin_path)
def reset_tf_configure_bazelrc():
"""Reset file that contains customized config settings."""
open(_TF_BAZELRC, 'w').close()
def cleanup_makefile():
"""Delete any leftover BUILD files from the Makefile build.
These files could interfere with Bazel parsing.
"""
makefile_download_dir = os.path.join(_TF_WORKSPACE_ROOT, 'tensorflow',
'contrib', 'makefile', 'downloads')
if os.path.isdir(makefile_download_dir):
for root, _, filenames in os.walk(makefile_download_dir):
for f in filenames:
if f.endswith('BUILD'):
os.remove(os.path.join(root, f))
def get_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None):
"""Get boolean input from user.
If var_name is not set in env, ask user to enable query_item or not. If the
response is empty, use the default.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
Returns:
boolean value of the variable.
Raises:
UserInputError: if an environment variable is set, but it cannot be
interpreted as a boolean indicator, assume that the user has made a
scripting error, and will continue to provide invalid input.
Raise the error to avoid infinitely looping.
"""
if not question:
question = 'Do you wish to build TensorFlow with %s support?' % query_item
if not yes_reply:
yes_reply = '%s support will be enabled for TensorFlow.' % query_item
if not no_reply:
no_reply = 'No %s' % yes_reply
yes_reply += '\n'
no_reply += '\n'
if enabled_by_default:
question += ' [Y/n]: '
else:
question += ' [y/N]: '
var = environ_cp.get(var_name)
if var is not None:
var_content = var.strip().lower()
true_strings = ('1', 't', 'true', 'y', 'yes')
false_strings = ('0', 'f', 'false', 'n', 'no')
if var_content in true_strings:
var = True
elif var_content in false_strings:
var = False
else:
raise UserInputError(
'Environment variable %s must be set as a boolean indicator.\n'
'The following are accepted as TRUE : %s.\n'
'The following are accepted as FALSE: %s.\n'
'Current value is %s.' %
(var_name, ', '.join(true_strings), ', '.join(false_strings), var))
while var is None:
user_input_origin = get_input(question)
user_input = user_input_origin.strip().lower()
if user_input == 'y':
print(yes_reply)
var = True
elif user_input == 'n':
print(no_reply)
var = False
elif not user_input:
if enabled_by_default:
print(yes_reply)
var = True
else:
print(no_reply)
var = False
else:
print('Invalid selection: %s' % user_input_origin)
return var
def set_build_var(environ_cp,
var_name,
query_item,
option_name,
enabled_by_default,
bazel_config_name=None):
"""Set if query_item will be enabled for the build.
Ask user if query_item will be enabled. Default is used if no input is given.
Set subprocess environment variable and write to .bazelrc if enabled.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
option_name: string for option to define in .bazelrc.
enabled_by_default: boolean for default behavior.
bazel_config_name: Name for Bazel --config argument to enable build feature.
"""
var = str(int(get_var(environ_cp, var_name, query_item, enabled_by_default)))
environ_cp[var_name] = var
if var == '1':
write_to_bazelrc('build:%s --define %s=true' %
(bazel_config_name, option_name))
write_to_bazelrc('build --config=%s' % bazel_config_name)
elif bazel_config_name is not None:
# TODO(mikecase): Migrate all users of configure.py to use --config Bazel
# options and not to set build configs through environment variables.
write_to_bazelrc('build:%s --define %s=true' %
(bazel_config_name, option_name))
def set_action_env_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None,
bazel_config_name=None):
"""Set boolean action_env variable.
Ask user if query_item will be enabled. Default is used if no input is given.
Set environment variable and write to .bazelrc.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
bazel_config_name: adding config to .bazelrc instead of action_env.
"""
var = int(
get_var(environ_cp, var_name, query_item, enabled_by_default, question,
yes_reply, no_reply))
if not bazel_config_name:
write_action_env_to_bazelrc(var_name, var)
elif var:
write_to_bazelrc('build --config=%s' % bazel_config_name)
environ_cp[var_name] = str(var)
def convert_version_to_int(version):
"""Convert a version number to a integer that can be used to compare.
Version strings of the form X.YZ and X.Y.Z-xxxxx are supported. The
'xxxxx' part, for instance 'homebrew' on OS/X, is ignored.
Args:
version: a version to be converted
Returns:
An integer if converted successfully, otherwise return None.
"""
version = version.split('-')[0]
version_segments = version.split('.')
# Treat "0.24" as "0.24.0"
if len(version_segments) == 2:
version_segments.append('0')
for seg in version_segments:
if not seg.isdigit():
return None
version_str = ''.join(['%03d' % int(seg) for seg in version_segments])
return int(version_str)
def check_bazel_version(min_version, max_version):
"""Check installed bazel version is between min_version and max_version.
Args:
min_version: string for minimum bazel version (must exist!).
max_version: string for maximum bazel version (must exist!).
Returns:
The bazel version detected.
"""
if which('bazel') is None:
print('Cannot find bazel. Please install bazel.')
sys.exit(0)
curr_version = run_shell(
['bazel', '--batch', '--bazelrc=/dev/null', 'version'])
for line in curr_version.split('\n'):
if 'Build label: ' in line:
curr_version = line.split('Build label: ')[1]
break
min_version_int = convert_version_to_int(min_version)
curr_version_int = convert_version_to_int(curr_version)
max_version_int = convert_version_to_int(max_version)
# Check if current bazel version can be detected properly.
if not curr_version_int:
print('WARNING: current bazel installation is not a release version.')
print('Make sure you are running at least bazel %s' % min_version)
return curr_version
print('You have bazel %s installed.' % curr_version)
if curr_version_int < min_version_int:
print('Please upgrade your bazel installation to version %s or higher to '
'build TensorFlow!' % min_version)
sys.exit(1)
if (curr_version_int > max_version_int and
'TF_IGNORE_MAX_BAZEL_VERSION' not in os.environ):
print('Please downgrade your bazel installation to version %s or lower to '
'build TensorFlow! To downgrade: download the installer for the old '
'version (from https://github.com/bazelbuild/bazel/releases) then '
'run the installer.' % max_version)
sys.exit(1)
return curr_version
def set_cc_opt_flags(environ_cp):
"""Set up architecture-dependent optimization flags.
Also append CC optimization flags to bazel.rc..
Args:
environ_cp: copy of the os.environ.
"""
if is_ppc64le():
# gcc on ppc64le does not support -march, use mcpu instead
default_cc_opt_flags = '-mcpu=native'
elif is_windows():
default_cc_opt_flags = '/arch:AVX'
else:
default_cc_opt_flags = '-march=native -Wno-sign-compare'
question = ('Please specify optimization flags to use during compilation when'
' bazel option "--config=opt" is specified [Default is %s]: '
) % default_cc_opt_flags
cc_opt_flags = get_from_env_or_user_or_default(environ_cp, 'CC_OPT_FLAGS',
question, default_cc_opt_flags)
for opt in cc_opt_flags.split():
write_to_bazelrc('build:opt --copt=%s' % opt)
# It should be safe on the same build host.
if not is_ppc64le() and not is_windows():
write_to_bazelrc('build:opt --host_copt=-march=native')
write_to_bazelrc('build:opt --define with_default_optimizations=true')
def set_tf_cuda_clang(environ_cp):
"""set TF_CUDA_CLANG action_env.
Args:
environ_cp: copy of the os.environ.
"""
question = 'Do you want to use clang as CUDA compiler?'
yes_reply = 'Clang will be used as CUDA compiler.'
no_reply = 'nvcc will be used as CUDA compiler.'
set_action_env_var(
environ_cp,
'TF_CUDA_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply,
bazel_config_name='cuda_clang')
def set_tf_download_clang(environ_cp):
"""Set TF_DOWNLOAD_CLANG action_env."""
question = 'Do you wish to download a fresh release of clang? (Experimental)'
yes_reply = 'Clang will be downloaded and used to compile tensorflow.'
no_reply = 'Clang will not be downloaded.'
set_action_env_var(
environ_cp,
'TF_DOWNLOAD_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply,
bazel_config_name='download_clang')
def get_from_env_or_user_or_default(environ_cp, var_name, ask_for_var,
var_default):
"""Get var_name either from env, or user or default.
If var_name has been set as environment variable, use the preset value, else
ask for user input. If no input is provided, the default is used.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
ask_for_var: string for how to ask for user input.
var_default: default value string.
Returns:
string value for var_name
"""
var = environ_cp.get(var_name)
if not var:
var = get_input(ask_for_var)
print('\n')
if not var:
var = var_default
return var
def set_clang_cuda_compiler_path(environ_cp):
"""Set CLANG_CUDA_COMPILER_PATH."""
default_clang_path = which('clang') or ''
ask_clang_path = ('Please specify which clang should be used as device and '
'host compiler. [Default is %s]: ') % default_clang_path
while True:
clang_cuda_compiler_path = get_from_env_or_user_or_default(
environ_cp, 'CLANG_CUDA_COMPILER_PATH', ask_clang_path,
default_clang_path)
if os.path.exists(clang_cuda_compiler_path):
break
# Reset and retry
print('Invalid clang path: %s cannot be found.' % clang_cuda_compiler_path)
environ_cp['CLANG_CUDA_COMPILER_PATH'] = ''
# Set CLANG_CUDA_COMPILER_PATH
environ_cp['CLANG_CUDA_COMPILER_PATH'] = clang_cuda_compiler_path
write_action_env_to_bazelrc('CLANG_CUDA_COMPILER_PATH',
clang_cuda_compiler_path)
def prompt_loop_or_load_from_env(environ_cp,
var_name,
var_default,
ask_for_var,
check_success,
error_msg,
suppress_default_error=False,
n_ask_attempts=_DEFAULT_PROMPT_ASK_ATTEMPTS):
"""Loop over user prompts for an ENV param until receiving a valid response.
For the env param var_name, read from the environment or verify user input
until receiving valid input. When done, set var_name in the environ_cp to its
new value.
Args:
environ_cp: (Dict) copy of the os.environ.
var_name: (String) string for name of environment variable, e.g. "TF_MYVAR".
var_default: (String) default value string.
ask_for_var: (String) string for how to ask for user input.
check_success: (Function) function that takes one argument and returns a
boolean. Should return True if the value provided is considered valid. May
contain a complex error message if error_msg does not provide enough
information. In that case, set suppress_default_error to True.
error_msg: (String) String with one and only one '%s'. Formatted with each
invalid response upon check_success(input) failure.
suppress_default_error: (Bool) Suppress the above error message in favor of
one from the check_success function.
n_ask_attempts: (Integer) Number of times to query for valid input before
raising an error and quitting.
Returns:
[String] The value of var_name after querying for input.
Raises:
UserInputError: if a query has been attempted n_ask_attempts times without
success, assume that the user has made a scripting error, and will
continue to provide invalid input. Raise the error to avoid infinitely
looping.
"""
default = environ_cp.get(var_name) or var_default
full_query = '%s [Default is %s]: ' % (
ask_for_var,
default,
)
for _ in range(n_ask_attempts):
val = get_from_env_or_user_or_default(environ_cp, var_name, full_query,
default)
if check_success(val):
break
if not suppress_default_error:
print(error_msg % val)
environ_cp[var_name] = ''
else:
raise UserInputError('Invalid %s setting was provided %d times in a row. '
'Assuming to be a scripting mistake.' %
(var_name, n_ask_attempts))
environ_cp[var_name] = val
return val
def create_android_ndk_rule(environ_cp):
"""Set ANDROID_NDK_HOME and write Android NDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_ndk_path = cygpath('%s/Android/Sdk/ndk-bundle' %
environ_cp['APPDATA'])
elif is_macos():
default_ndk_path = '%s/library/Android/Sdk/ndk-bundle' % environ_cp['HOME']
else:
default_ndk_path = '%s/Android/Sdk/ndk-bundle' % environ_cp['HOME']
def valid_ndk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'source.properties')))
android_ndk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_NDK_HOME',
var_default=default_ndk_path,
ask_for_var='Please specify the home path of the Android NDK to use.',
check_success=valid_ndk_path,
error_msg=('The path %s or its child file "source.properties" '
'does not exist.'))
write_action_env_to_bazelrc('ANDROID_NDK_HOME', android_ndk_home_path)
write_action_env_to_bazelrc(
'ANDROID_NDK_API_LEVEL',
get_ndk_api_level(environ_cp, android_ndk_home_path))
def create_android_sdk_rule(environ_cp):
"""Set Android variables and write Android SDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_sdk_path = cygpath('%s/Android/Sdk' % environ_cp['APPDATA'])
elif is_macos():
default_sdk_path = '%s/library/Android/Sdk' % environ_cp['HOME']
else:
default_sdk_path = '%s/Android/Sdk' % environ_cp['HOME']
def valid_sdk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'platforms')) and
os.path.exists(os.path.join(path, 'build-tools')))
android_sdk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_SDK_HOME',
var_default=default_sdk_path,
ask_for_var='Please specify the home path of the Android SDK to use.',
check_success=valid_sdk_path,
error_msg=('Either %s does not exist, or it does not contain the '
'subdirectories "platforms" and "build-tools".'))
platforms = os.path.join(android_sdk_home_path, 'platforms')
api_levels = sorted(os.listdir(platforms))
api_levels = [x.replace('android-', '') for x in api_levels]
def valid_api_level(api_level):
return os.path.exists(
os.path.join(android_sdk_home_path, 'platforms',
'android-' + api_level))
android_api_level = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_API_LEVEL',
var_default=api_levels[-1],
ask_for_var=('Please specify the Android SDK API level to use. '
'[Available levels: %s]') % api_levels,
check_success=valid_api_level,
error_msg='Android-%s is not present in the SDK path.')
build_tools = os.path.join(android_sdk_home_path, 'build-tools')
versions = sorted(os.listdir(build_tools))
def valid_build_tools(version):
return os.path.exists(
os.path.join(android_sdk_home_path, 'build-tools', version))
android_build_tools_version = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_BUILD_TOOLS_VERSION',
var_default=versions[-1],
ask_for_var=('Please specify an Android build tools version to use. '
'[Available versions: %s]') % versions,
check_success=valid_build_tools,
error_msg=('The selected SDK does not have build-tools version %s '
'available.'))
write_action_env_to_bazelrc('ANDROID_BUILD_TOOLS_VERSION',
android_build_tools_version)
write_action_env_to_bazelrc('ANDROID_SDK_API_LEVEL', android_api_level)
write_action_env_to_bazelrc('ANDROID_SDK_HOME', android_sdk_home_path)
def get_ndk_api_level(environ_cp, android_ndk_home_path):
"""Gets the appropriate NDK API level to use for the provided Android NDK path."""
# First check to see if we're using a blessed version of the NDK.
properties_path = '%s/source.properties' % android_ndk_home_path
if is_windows() or is_cygwin():
properties_path = cygpath(properties_path)
with open(properties_path, 'r') as f:
filedata = f.read()
revision = re.search(r'Pkg.Revision = (\d+)', filedata)
if revision:
ndk_version = revision.group(1)
else:
raise Exception('Unable to parse NDK revision.')
if int(ndk_version) not in _SUPPORTED_ANDROID_NDK_VERSIONS:
print('WARNING: The NDK version in %s is %s, which is not '
'supported by Bazel (officially supported versions: %s). Please use '
'another version. Compiling Android targets may result in confusing '
'errors.\n' %
(android_ndk_home_path, ndk_version, _SUPPORTED_ANDROID_NDK_VERSIONS))
# Now grab the NDK API level to use. Note that this is different from the
# SDK API level, as the NDK API level is effectively the *min* target SDK
# version.
platforms = os.path.join(android_ndk_home_path, 'platforms')
api_levels = sorted(os.listdir(platforms))
api_levels = [
x.replace('android-', '') for x in api_levels if 'android-' in x
]
def valid_api_level(api_level):
return os.path.exists(
os.path.join(android_ndk_home_path, 'platforms',
'android-' + api_level))
android_ndk_api_level = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_NDK_API_LEVEL',
var_default='18', # 18 is required for GPU acceleration.
ask_for_var=('Please specify the (min) Android NDK API level to use. '
'[Available levels: %s]') % api_levels,
check_success=valid_api_level,
error_msg='Android-%s is not present in the NDK path.')
return android_ndk_api_level
def set_gcc_host_compiler_path(environ_cp):
"""Set GCC_HOST_COMPILER_PATH."""
default_gcc_host_compiler_path = which('gcc') or ''
cuda_bin_symlink = '%s/bin/gcc' % environ_cp.get('CUDA_TOOLKIT_PATH')
if os.path.islink(cuda_bin_symlink):
# os.readlink is only available in linux
default_gcc_host_compiler_path = os.path.realpath(cuda_bin_symlink)
gcc_host_compiler_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='GCC_HOST_COMPILER_PATH',
var_default=default_gcc_host_compiler_path,
ask_for_var='Please specify which gcc should be used by nvcc as the host compiler.',
check_success=os.path.exists,
error_msg='Invalid gcc path. %s cannot be found.',
)
write_action_env_to_bazelrc('GCC_HOST_COMPILER_PATH', gcc_host_compiler_path)
def reformat_version_sequence(version_str, sequence_count):
"""Reformat the version string to have the given number of sequences.
For example:
Given (7, 2) -> 7.0
(7.0.1, 2) -> 7.0
(5, 1) -> 5
(5.0.3.2, 1) -> 5
Args:
version_str: String, the version string.
sequence_count: int, an integer.
Returns:
string, reformatted version string.
"""
v = version_str.split('.')
if len(v) < sequence_count:
v = v + (['0'] * (sequence_count - len(v)))
return '.'.join(v[:sequence_count])
def set_tf_cuda_paths(environ_cp):
"""Set TF_CUDA_PATHS."""
ask_cuda_paths = (
'Please specify the comma-separated list of base paths to look for CUDA '
'libraries and headers. [Leave empty to use the default]: ')
tf_cuda_paths = get_from_env_or_user_or_default(environ_cp, 'TF_CUDA_PATHS',
ask_cuda_paths, '')
if tf_cuda_paths:
environ_cp['TF_CUDA_PATHS'] = tf_cuda_paths
def set_tf_cuda_version(environ_cp):
"""Set TF_CUDA_VERSION."""
ask_cuda_version = (
'Please specify the CUDA SDK version you want to use. '
'[Leave empty to default to CUDA %s]: ') % _DEFAULT_CUDA_VERSION
tf_cuda_version = get_from_env_or_user_or_default(environ_cp,
'TF_CUDA_VERSION',
ask_cuda_version,
_DEFAULT_CUDA_VERSION)
environ_cp['TF_CUDA_VERSION'] = tf_cuda_version
def set_tf_cudnn_version(environ_cp):
"""Set TF_CUDNN_VERSION."""
ask_cudnn_version = (
'Please specify the cuDNN version you want to use. '
'[Leave empty to default to cuDNN %s]: ') % _DEFAULT_CUDNN_VERSION
tf_cudnn_version = get_from_env_or_user_or_default(environ_cp,
'TF_CUDNN_VERSION',
ask_cudnn_version,
_DEFAULT_CUDNN_VERSION)
environ_cp['TF_CUDNN_VERSION'] = tf_cudnn_version
def is_cuda_compatible(lib, cuda_ver, cudnn_ver):
"""Check compatibility between given library and cudnn/cudart libraries."""
ldd_bin = which('ldd') or '/usr/bin/ldd'
ldd_out = run_shell([ldd_bin, lib], True)
ldd_out = ldd_out.split(os.linesep)
cudnn_pattern = re.compile('.*libcudnn.so\\.?(.*) =>.*$')
cuda_pattern = re.compile('.*libcudart.so\\.?(.*) =>.*$')
cudnn = None
cudart = None
cudnn_ok = True # assume no cudnn dependency by default
cuda_ok = True # assume no cuda dependency by default
for line in ldd_out:
if 'libcudnn.so' in line:
cudnn = cudnn_pattern.search(line)
cudnn_ok = False
elif 'libcudart.so' in line:
cudart = cuda_pattern.search(line)
cuda_ok = False
if cudnn and len(cudnn.group(1)):
cudnn = convert_version_to_int(cudnn.group(1))
if cudart and len(cudart.group(1)):
cudart = convert_version_to_int(cudart.group(1))
if cudnn is not None:
cudnn_ok = (cudnn == cudnn_ver)
if cudart is not None:
cuda_ok = (cudart == cuda_ver)
return cudnn_ok and cuda_ok
def set_tf_tensorrt_version(environ_cp):
"""Set TF_TENSORRT_VERSION."""
if not is_linux():
raise ValueError('Currently TensorRT is only supported on Linux platform.')
if not int(environ_cp.get('TF_NEED_TENSORRT', False)):
return
ask_tensorrt_version = (
'Please specify the TensorRT version you want to use. '
'[Leave empty to default to TensorRT %s]: ') % _DEFAULT_TENSORRT_VERSION
tf_tensorrt_version = get_from_env_or_user_or_default(
environ_cp, 'TF_TENSORRT_VERSION', ask_tensorrt_version,
_DEFAULT_TENSORRT_VERSION)
environ_cp['TF_TENSORRT_VERSION'] = tf_tensorrt_version
def set_tf_nccl_version(environ_cp):
"""Set TF_NCCL_VERSION."""
if not is_linux():
raise ValueError('Currently NCCL is only supported on Linux platform.')
if 'TF_NCCL_VERSION' in environ_cp:
return
ask_nccl_version = (
'Please specify the locally installed NCCL version you want to use. '
'[Leave empty to use http://github.com/nvidia/nccl]: ')
tf_nccl_version = get_from_env_or_user_or_default(environ_cp,
'TF_NCCL_VERSION',
ask_nccl_version, '')
environ_cp['TF_NCCL_VERSION'] = tf_nccl_version
def get_native_cuda_compute_capabilities(environ_cp):
"""Get native cuda compute capabilities.
Args:
environ_cp: copy of the os.environ.
Returns:
string of native cuda compute capabilities, separated by comma.
"""
device_query_bin = os.path.join(
environ_cp.get('CUDA_TOOLKIT_PATH'), 'extras/demo_suite/deviceQuery')
if os.path.isfile(device_query_bin) and os.access(device_query_bin, os.X_OK):
try:
output = run_shell(device_query_bin).split('\n')
pattern = re.compile('[0-9]*\\.[0-9]*')
output = [pattern.search(x) for x in output if 'Capability' in x]
output = ','.join(x.group() for x in output if x is not None)
except subprocess.CalledProcessError:
output = ''
else:
output = ''
return output
def set_tf_cuda_compute_capabilities(environ_cp):
"""Set TF_CUDA_COMPUTE_CAPABILITIES."""
while True:
native_cuda_compute_capabilities = get_native_cuda_compute_capabilities(
environ_cp)
if not native_cuda_compute_capabilities:
default_cuda_compute_capabilities = _DEFAULT_CUDA_COMPUTE_CAPABILITIES
else:
default_cuda_compute_capabilities = native_cuda_compute_capabilities
ask_cuda_compute_capabilities = (
'Please specify a list of comma-separated '
'CUDA compute capabilities you want to '
'build with.\nYou can find the compute '
'capability of your device at: '
'https://developer.nvidia.com/cuda-gpus.\nPlease'
' note that each additional compute '
'capability significantly increases your '
'build time and binary size, and that '
'TensorFlow only supports compute '
'capabilities >= 3.5 [Default is: %s]: ' %
default_cuda_compute_capabilities)
tf_cuda_compute_capabilities = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDA_COMPUTE_CAPABILITIES',
ask_cuda_compute_capabilities, default_cuda_compute_capabilities)
# Check whether all capabilities from the input is valid
all_valid = True
# Remove all whitespace characters before splitting the string
# that users may insert by accident, as this will result in error
tf_cuda_compute_capabilities = ''.join(tf_cuda_compute_capabilities.split())
for compute_capability in tf_cuda_compute_capabilities.split(','):
m = re.match('[0-9]+.[0-9]+', compute_capability)
if not m:
print('Invalid compute capability: %s' % compute_capability)
all_valid = False
else:
ver = float(m.group(0))
if ver < 3.0:
print('ERROR: TensorFlow only supports CUDA compute capabilities 3.0 '
'and higher. Please re-specify the list of compute '
'capabilities excluding version %s.' % ver)
all_valid = False
if ver < 3.5:
print('WARNING: XLA does not support CUDA compute capabilities '
'lower than 3.5. Disable XLA when running on older GPUs.')
if all_valid:
break
# Reset and Retry
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = ''
# Set TF_CUDA_COMPUTE_CAPABILITIES
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = tf_cuda_compute_capabilities
write_action_env_to_bazelrc('TF_CUDA_COMPUTE_CAPABILITIES',
tf_cuda_compute_capabilities)
def set_other_cuda_vars(environ_cp):
"""Set other CUDA related variables."""
# If CUDA is enabled, always use GPU during build and test.
if environ_cp.get('TF_CUDA_CLANG') == '1':
write_to_bazelrc('build --config=cuda_clang')
else:
write_to_bazelrc('build --config=cuda')
def set_host_cxx_compiler(environ_cp):
"""Set HOST_CXX_COMPILER."""
default_cxx_host_compiler = which('g++') or ''
host_cxx_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_CXX_COMPILER',
var_default=default_cxx_host_compiler,
ask_for_var=('Please specify which C++ compiler should be used as the '
'host C++ compiler.'),
check_success=os.path.exists,
error_msg='Invalid C++ compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_CXX_COMPILER', host_cxx_compiler)
def set_host_c_compiler(environ_cp):
"""Set HOST_C_COMPILER."""
default_c_host_compiler = which('gcc') or ''
host_c_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_C_COMPILER',
var_default=default_c_host_compiler,
ask_for_var=('Please specify which C compiler should be used as the host '
'C compiler.'),
check_success=os.path.exists,
error_msg='Invalid C compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_C_COMPILER', host_c_compiler)
def set_computecpp_toolkit_path(environ_cp):
"""Set COMPUTECPP_TOOLKIT_PATH."""
def toolkit_exists(toolkit_path):
"""Check if a computecpp toolkit path is valid."""
if is_linux():
sycl_rt_lib_path = 'lib/libComputeCpp.so'
else:
sycl_rt_lib_path = ''
sycl_rt_lib_path_full = os.path.join(toolkit_path, sycl_rt_lib_path)
exists = os.path.exists(sycl_rt_lib_path_full)
if not exists:
print('Invalid SYCL %s library path. %s cannot be found' %
(_TF_OPENCL_VERSION, sycl_rt_lib_path_full))
return exists
computecpp_toolkit_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='COMPUTECPP_TOOLKIT_PATH',
var_default=_DEFAULT_COMPUTECPP_TOOLKIT_PATH,
ask_for_var=(
'Please specify the location where ComputeCpp for SYCL %s is '
'installed.' % _TF_OPENCL_VERSION),
check_success=toolkit_exists,
error_msg='Invalid SYCL compiler path. %s cannot be found.',
suppress_default_error=True)
write_action_env_to_bazelrc('COMPUTECPP_TOOLKIT_PATH',
computecpp_toolkit_path)
def set_trisycl_include_dir(environ_cp):
"""Set TRISYCL_INCLUDE_DIR."""
ask_trisycl_include_dir = ('Please specify the location of the triSYCL '
'include directory. (Use --config=sycl_trisycl '
'when building with Bazel) '
'[Default is %s]: ') % (
_DEFAULT_TRISYCL_INCLUDE_DIR)
while True:
trisycl_include_dir = get_from_env_or_user_or_default(
environ_cp, 'TRISYCL_INCLUDE_DIR', ask_trisycl_include_dir,
_DEFAULT_TRISYCL_INCLUDE_DIR)
if os.path.exists(trisycl_include_dir):
break
print('Invalid triSYCL include directory, %s cannot be found' %
(trisycl_include_dir))
# Set TRISYCL_INCLUDE_DIR
environ_cp['TRISYCL_INCLUDE_DIR'] = trisycl_include_dir
write_action_env_to_bazelrc('TRISYCL_INCLUDE_DIR', trisycl_include_dir)
def set_mpi_home(environ_cp):
"""Set MPI_HOME."""
default_mpi_home = which('mpirun') or which('mpiexec') or ''
default_mpi_home = os.path.dirname(os.path.dirname(default_mpi_home))
def valid_mpi_path(mpi_home):
exists = (
os.path.exists(os.path.join(mpi_home, 'include')) and
(os.path.exists(os.path.join(mpi_home, 'lib')) or
os.path.exists(os.path.join(mpi_home, 'lib64')) or
os.path.exists(os.path.join(mpi_home, 'lib32'))))
if not exists:
print(
'Invalid path to the MPI Toolkit. %s or %s or %s or %s cannot be found'
% (os.path.join(mpi_home, 'include'),
os.path.exists(os.path.join(mpi_home, 'lib')),
os.path.exists(os.path.join(mpi_home, 'lib64')),
os.path.exists(os.path.join(mpi_home, 'lib32'))))
return exists
_ = prompt_loop_or_load_from_env(
environ_cp,
var_name='MPI_HOME',
var_default=default_mpi_home,
ask_for_var='Please specify the MPI toolkit folder.',
check_success=valid_mpi_path,
error_msg='',
suppress_default_error=True)
def set_other_mpi_vars(environ_cp):
"""Set other MPI related variables."""
# Link the MPI header files
mpi_home = environ_cp.get('MPI_HOME')
symlink_force('%s/include/mpi.h' % mpi_home, 'third_party/mpi/mpi.h')
# Determine if we use OpenMPI or MVAPICH, these require different header files
# to be included here to make bazel dependency checker happy
if os.path.exists(os.path.join(mpi_home, 'include/mpi_portable_platform.h')):
symlink_force(
os.path.join(mpi_home, 'include/mpi_portable_platform.h'),
'third_party/mpi/mpi_portable_platform.h')
# TODO(gunan): avoid editing files in configure
sed_in_place('third_party/mpi/mpi.bzl', 'MPI_LIB_IS_OPENMPI=False',
'MPI_LIB_IS_OPENMPI=True')
else:
# MVAPICH / MPICH
symlink_force(
os.path.join(mpi_home, 'include/mpio.h'), 'third_party/mpi/mpio.h')
symlink_force(
os.path.join(mpi_home, 'include/mpicxx.h'), 'third_party/mpi/mpicxx.h')
# TODO(gunan): avoid editing files in configure
sed_in_place('third_party/mpi/mpi.bzl', 'MPI_LIB_IS_OPENMPI=True',
'MPI_LIB_IS_OPENMPI=False')
if os.path.exists(os.path.join(mpi_home, 'lib/libmpi.so')):
symlink_force(
os.path.join(mpi_home, 'lib/libmpi.so'), 'third_party/mpi/libmpi.so')
elif os.path.exists(os.path.join(mpi_home, 'lib64/libmpi.so')):
symlink_force(
os.path.join(mpi_home, 'lib64/libmpi.so'), 'third_party/mpi/libmpi.so')
elif os.path.exists(os.path.join(mpi_home, 'lib32/libmpi.so')):
symlink_force(
os.path.join(mpi_home, 'lib32/libmpi.so'), 'third_party/mpi/libmpi.so')
else:
raise ValueError(
'Cannot find the MPI library file in %s/lib or %s/lib64 or %s/lib32' %
(mpi_home, mpi_home, mpi_home))
def system_specific_test_config(env):
"""Add default build and test flags required for TF tests to bazelrc."""
write_to_bazelrc('test --flaky_test_attempts=3')
write_to_bazelrc('test --test_size_filters=small,medium')
write_to_bazelrc(
'test --test_tag_filters=-benchmark-test,-no_oss,-oss_serial')
write_to_bazelrc('test --build_tag_filters=-benchmark-test,-no_oss')
if is_windows():
if env.get('TF_NEED_CUDA', None) == '1':
write_to_bazelrc(
'test --test_tag_filters=-no_windows,-no_windows_gpu,-no_gpu')
write_to_bazelrc(
'test --build_tag_filters=-no_windows,-no_windows_gpu,-no_gpu')
else:
write_to_bazelrc('test --test_tag_filters=-no_windows,-gpu')
write_to_bazelrc('test --build_tag_filters=-no_windows,-gpu')
elif is_macos():
write_to_bazelrc('test --test_tag_filters=-gpu,-nomac,-no_mac')
write_to_bazelrc('test --build_tag_filters=-gpu,-nomac,-no_mac')
elif is_linux():
if env.get('TF_NEED_CUDA', None) == '1':
write_to_bazelrc('test --test_tag_filters=-no_gpu')
write_to_bazelrc('test --build_tag_filters=-no_gpu')
write_to_bazelrc('test --test_env=LD_LIBRARY_PATH')
else:
write_to_bazelrc('test --test_tag_filters=-gpu')
write_to_bazelrc('test --build_tag_filters=-gpu')
def set_system_libs_flag(environ_cp):
syslibs = environ_cp.get('TF_SYSTEM_LIBS', '')
if syslibs:
if ',' in syslibs:
syslibs = ','.join(sorted(syslibs.split(',')))
else:
syslibs = ','.join(sorted(syslibs.split()))
write_action_env_to_bazelrc('TF_SYSTEM_LIBS', syslibs)
if 'PREFIX' in environ_cp:
write_to_bazelrc('build --define=PREFIX=%s' % environ_cp['PREFIX'])
if 'LIBDIR' in environ_cp:
write_to_bazelrc('build --define=LIBDIR=%s' % environ_cp['LIBDIR'])
if 'INCLUDEDIR' in environ_cp:
write_to_bazelrc('build --define=INCLUDEDIR=%s' % environ_cp['INCLUDEDIR'])
def set_windows_build_flags(environ_cp):
"""Set Windows specific build options."""
# The non-monolithic build is not supported yet
write_to_bazelrc('build --config monolithic')
# Suppress warning messages
write_to_bazelrc('build --copt=-w --host_copt=-w')
# Fix winsock2.h conflicts
write_to_bazelrc(
'build --copt=-DWIN32_LEAN_AND_MEAN --host_copt=-DWIN32_LEAN_AND_MEAN '
'--copt=-DNOGDI --host_copt=-DNOGDI')
# Output more verbose information when something goes wrong
write_to_bazelrc('build --verbose_failures')
# The host and target platforms are the same in Windows build. So we don't
# have to distinct them. This avoids building the same targets twice.
write_to_bazelrc('build --distinct_host_configuration=false')
if get_var(
environ_cp, 'TF_OVERRIDE_EIGEN_STRONG_INLINE', 'Eigen strong inline',
True, ('Would you like to override eigen strong inline for some C++ '
'compilation to reduce the compilation time?'),
'Eigen strong inline overridden.', 'Not overriding eigen strong inline, '
'some compilations could take more than 20 mins.'):
# Due to a known MSVC compiler issue
# https://github.com/tensorflow/tensorflow/issues/10521
# Overriding eigen strong inline speeds up the compiling of
# conv_grad_ops_3d.cc and conv_ops_3d.cc by 20 minutes,
# but this also hurts the performance. Let users decide what they want.
write_to_bazelrc('build --define=override_eigen_strong_inline=true')
def config_info_line(name, help_text):
"""Helper function to print formatted help text for Bazel config options."""
print('\t--config=%-12s\t# %s' % (name, help_text))
def configure_ios():
"""Configures TensorFlow for iOS builds.
This function will only be executed if `is_macos()` is true.
"""
if not is_macos():
return
for filepath in APPLE_BAZEL_FILES:
existing_filepath = os.path.join(_TF_WORKSPACE_ROOT, filepath + '.apple')
renamed_filepath = os.path.join(_TF_WORKSPACE_ROOT, filepath)
symlink_force(existing_filepath, renamed_filepath)
for filepath in IOS_FILES:
filename = os.path.basename(filepath)
new_filepath = os.path.join(_TF_WORKSPACE_ROOT, filename)
symlink_force(filepath, new_filepath)
def validate_cuda_config(environ_cp):
"""Run find_cuda_config.py and return cuda_toolkit_path, or None."""
def maybe_encode_env(env):
"""Encodes unicode in env to str on Windows python 2.x."""
if not is_windows() or sys.version_info[0] != 2:
return env
for k, v in env.items():
if isinstance(k, unicode):
k = k.encode('ascii')
if isinstance(v, unicode):
v = v.encode('ascii')
env[k] = v
return env
cuda_libraries = ['cuda', 'cudnn']
if is_linux():
if int(environ_cp.get('TF_NEED_TENSORRT', False)):
cuda_libraries.append('tensorrt')
if environ_cp.get('TF_NCCL_VERSION', None):
cuda_libraries.append('nccl')
proc = subprocess.Popen(
[environ_cp['PYTHON_BIN_PATH'], 'third_party/gpus/find_cuda_config.py'] +
cuda_libraries,
stdout=subprocess.PIPE,
env=maybe_encode_env(environ_cp))
if proc.wait():
# Errors from find_cuda_config.py were sent to stderr.
print('Asking for detailed CUDA configuration...\n')
return False
config = dict(
tuple(line.decode('ascii').rstrip().split(': ')) for line in proc.stdout)
print('Found CUDA %s in:' % config['cuda_version'])
print(' %s' % config['cuda_library_dir'])
print(' %s' % config['cuda_include_dir'])
print('Found cuDNN %s in:' % config['cudnn_version'])
print(' %s' % config['cudnn_library_dir'])
print(' %s' % config['cudnn_include_dir'])
if 'tensorrt_version' in config:
print('Found TensorRT %s in:' % config['tensorrt_version'])
print(' %s' % config['tensorrt_library_dir'])
print(' %s' % config['tensorrt_include_dir'])
if config.get('nccl_version', None):
print('Found NCCL %s in:' % config['nccl_version'])
print(' %s' % config['nccl_library_dir'])
print(' %s' % config['nccl_include_dir'])
print('\n')
environ_cp['CUDA_TOOLKIT_PATH'] = config['cuda_toolkit_path']
return True
def main():
global _TF_WORKSPACE_ROOT
global _TF_BAZELRC
global _TF_CURRENT_BAZEL_VERSION
parser = argparse.ArgumentParser()
parser.add_argument(
'--workspace',
type=str,
default=os.path.abspath(os.path.dirname(__file__)),
help='The absolute path to your active Bazel workspace.')
args = parser.parse_args()
_TF_WORKSPACE_ROOT = args.workspace
_TF_BAZELRC = os.path.join(_TF_WORKSPACE_ROOT, _TF_BAZELRC_FILENAME)
# Make a copy of os.environ to be clear when functions and getting and setting
# environment variables.
environ_cp = dict(os.environ)
current_bazel_version = check_bazel_version(_TF_MIN_BAZEL_VERSION,
_TF_MAX_BAZEL_VERSION)
_TF_CURRENT_BAZEL_VERSION = convert_version_to_int(current_bazel_version)
reset_tf_configure_bazelrc()
cleanup_makefile()
setup_python(environ_cp)
if is_windows():
environ_cp['TF_NEED_OPENCL_SYCL'] = '0'
environ_cp['TF_NEED_COMPUTECPP'] = '0'
environ_cp['TF_NEED_OPENCL'] = '0'
environ_cp['TF_CUDA_CLANG'] = '0'
environ_cp['TF_NEED_TENSORRT'] = '0'
# TODO(ibiryukov): Investigate using clang as a cpu or cuda compiler on
# Windows.
environ_cp['TF_DOWNLOAD_CLANG'] = '0'
environ_cp['TF_NEED_MPI'] = '0'
environ_cp['TF_SET_ANDROID_WORKSPACE'] = '0'
if is_macos():
environ_cp['TF_NEED_TENSORRT'] = '0'
else:
environ_cp['TF_CONFIGURE_IOS'] = '0'
# The numpy package on ppc64le uses OpenBLAS which has multi-threading
# issues that lead to incorrect answers. Set OMP_NUM_THREADS=1 at
# runtime to allow the Tensorflow testcases which compare numpy
# results to Tensorflow results to succeed.
if is_ppc64le():
write_action_env_to_bazelrc('OMP_NUM_THREADS', 1)
xla_enabled_by_default = is_linux()
set_build_var(environ_cp, 'TF_ENABLE_XLA', 'XLA JIT', 'with_xla_support',
xla_enabled_by_default, 'xla')
set_action_env_var(
environ_cp,
'TF_NEED_OPENCL_SYCL',
'OpenCL SYCL',
False,
bazel_config_name='sycl')
if environ_cp.get('TF_NEED_OPENCL_SYCL') == '1':
set_host_cxx_compiler(environ_cp)
set_host_c_compiler(environ_cp)
set_action_env_var(environ_cp, 'TF_NEED_COMPUTECPP', 'ComputeCPP', True)
if environ_cp.get('TF_NEED_COMPUTECPP') == '1':
set_computecpp_toolkit_path(environ_cp)
else:
set_trisycl_include_dir(environ_cp)
set_action_env_var(
environ_cp, 'TF_NEED_ROCM', 'ROCm', False, bazel_config_name='rocm')
if (environ_cp.get('TF_NEED_ROCM') == '1' and
'LD_LIBRARY_PATH' in environ_cp and
environ_cp.get('LD_LIBRARY_PATH') != '1'):
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
environ_cp['TF_NEED_CUDA'] = str(
int(get_var(environ_cp, 'TF_NEED_CUDA', 'CUDA', False)))
if (environ_cp.get('TF_NEED_CUDA') == '1' and
'TF_CUDA_CONFIG_REPO' not in environ_cp):
set_action_env_var(
environ_cp,
'TF_NEED_TENSORRT',
'TensorRT',
False,
bazel_config_name='tensorrt')
environ_save = dict(environ_cp)
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
if validate_cuda_config(environ_cp):
cuda_env_names = [
'TF_CUDA_VERSION',
'TF_CUBLAS_VERSION',
'TF_CUDNN_VERSION',
'TF_TENSORRT_VERSION',
'TF_NCCL_VERSION',
'TF_CUDA_PATHS',
# Items below are for backwards compatibility when not using
# TF_CUDA_PATHS.
'CUDA_TOOLKIT_PATH',
'CUDNN_INSTALL_PATH',
'NCCL_INSTALL_PATH',
'NCCL_HDR_PATH',
'TENSORRT_INSTALL_PATH'
]
# Note: set_action_env_var above already writes to bazelrc.
for name in cuda_env_names:
if name in environ_cp:
write_action_env_to_bazelrc(name, environ_cp[name])
break
# Restore settings changed below if CUDA config could not be validated.
environ_cp = dict(environ_save)
set_tf_cuda_version(environ_cp)
set_tf_cudnn_version(environ_cp)
if is_linux():
set_tf_tensorrt_version(environ_cp)
set_tf_nccl_version(environ_cp)
set_tf_cuda_paths(environ_cp)
else:
raise UserInputError(
'Invalid CUDA setting were provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
set_tf_cuda_compute_capabilities(environ_cp)
if 'LD_LIBRARY_PATH' in environ_cp and environ_cp.get(
'LD_LIBRARY_PATH') != '1':
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
set_tf_cuda_clang(environ_cp)
if environ_cp.get('TF_CUDA_CLANG') == '1':
# Ask whether we should download the clang toolchain.
set_tf_download_clang(environ_cp)
if environ_cp.get('TF_DOWNLOAD_CLANG') != '1':
# Set up which clang we should use as the cuda / host compiler.
set_clang_cuda_compiler_path(environ_cp)
else:
# Use downloaded LLD for linking.
write_to_bazelrc('build:cuda_clang --config=download_clang_use_lld')
else:
# Set up which gcc nvcc should use as the host compiler
# No need to set this on Windows
if not is_windows():
set_gcc_host_compiler_path(environ_cp)
set_other_cuda_vars(environ_cp)
else:
# CUDA not required. Ask whether we should download the clang toolchain and
# use it for the CPU build.
set_tf_download_clang(environ_cp)
# SYCL / ROCm / CUDA are mutually exclusive.
# At most 1 GPU platform can be configured.
gpu_platform_count = 0
if environ_cp.get('TF_NEED_OPENCL_SYCL') == '1':
gpu_platform_count += 1
if environ_cp.get('TF_NEED_ROCM') == '1':
gpu_platform_count += 1
if environ_cp.get('TF_NEED_CUDA') == '1':
gpu_platform_count += 1
if gpu_platform_count >= 2:
raise UserInputError('SYCL / CUDA / ROCm are mututally exclusive. '
'At most 1 GPU platform can be configured.')
set_build_var(environ_cp, 'TF_NEED_MPI', 'MPI', 'with_mpi_support', False)
if environ_cp.get('TF_NEED_MPI') == '1':
set_mpi_home(environ_cp)
set_other_mpi_vars(environ_cp)
set_cc_opt_flags(environ_cp)
set_system_libs_flag(environ_cp)
if is_windows():
set_windows_build_flags(environ_cp)
# Add a config option to build TensorFlow 2.0 API.
write_to_bazelrc('build:v2 --define=tf_api_version=2')
if get_var(environ_cp, 'TF_SET_ANDROID_WORKSPACE', 'android workspace', False,
('Would you like to interactively configure ./WORKSPACE for '
'Android builds?'), 'Searching for NDK and SDK installations.',
'Not configuring the WORKSPACE for Android builds.'):
create_android_ndk_rule(environ_cp)
create_android_sdk_rule(environ_cp)
system_specific_test_config(os.environ)
set_action_env_var(environ_cp, 'TF_CONFIGURE_IOS', 'iOS', False)
if environ_cp.get('TF_CONFIGURE_IOS') == '1':
configure_ios()
print('Preconfigured Bazel build configs. You can use any of the below by '
'adding "--config=<>" to your build command. See .bazelrc for more '
'details.')
config_info_line('mkl', 'Build with MKL support.')
config_info_line('monolithic', 'Config for mostly static monolithic build.')
config_info_line('gdr', 'Build with GDR support.')
config_info_line('verbs', 'Build with libverbs support.')
config_info_line('ngraph', 'Build with Intel nGraph support.')
config_info_line('numa', 'Build with NUMA support.')
config_info_line(
'dynamic_kernels',
'(Experimental) Build kernels into separate shared objects.')
config_info_line('v2', 'Build TensorFlow 2.x instead of 1.x.')
print('Preconfigured Bazel build configs to DISABLE default on features:')
config_info_line('noaws', 'Disable AWS S3 filesystem support.')
config_info_line('nogcp', 'Disable GCP support.')
config_info_line('nohdfs', 'Disable HDFS support.')
config_info_line('noignite', 'Disable Apache Ignite support.')
config_info_line('nokafka', 'Disable Apache Kafka support.')
config_info_line('nonccl', 'Disable NVIDIA NCCL support.')
if __name__ == '__main__':
main()
| tensorflow-master | configure.py |
tensorflow-master | third_party/__init__.py |
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Expands CMake variables in a text file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import sys
_CMAKE_DEFINE_REGEX = re.compile(r"\s*#cmakedefine\s+([A-Za-z_0-9]*)(\s.*)?$")
_CMAKE_DEFINE01_REGEX = re.compile(r"\s*#cmakedefine01\s+([A-Za-z_0-9]*)")
_CMAKE_VAR_REGEX = re.compile(r"\${([A-Za-z_0-9]*)}")
def _parse_args(argv):
"""Parses arguments with the form KEY=VALUE into a dictionary."""
result = {}
for arg in argv:
k, v = arg.split("=")
result[k] = v
return result
def _expand_variables(input_str, cmake_vars):
"""Expands ${VARIABLE}s in 'input_str', using dictionary 'cmake_vars'.
Args:
input_str: the string containing ${VARIABLE} expressions to expand.
cmake_vars: a dictionary mapping variable names to their values.
Returns:
The expanded string.
"""
def replace(match):
if match.group(1) in cmake_vars:
return cmake_vars[match.group(1)]
return ""
return _CMAKE_VAR_REGEX.sub(replace, input_str)
def _expand_cmakedefines(line, cmake_vars):
"""Expands #cmakedefine declarations, using a dictionary 'cmake_vars'."""
# Handles #cmakedefine lines
match = _CMAKE_DEFINE_REGEX.match(line)
if match:
name = match.group(1)
suffix = match.group(2) or ""
if name in cmake_vars:
return "#define {}{}\n".format(name,
_expand_variables(suffix, cmake_vars))
else:
return "/* #undef {} */\n".format(name)
# Handles #cmakedefine01 lines
match = _CMAKE_DEFINE01_REGEX.match(line)
if match:
name = match.group(1)
value = cmake_vars.get(name, "0")
return "#define {} {}\n".format(name, value)
# Otherwise return the line unchanged.
return _expand_variables(line, cmake_vars)
def main():
cmake_vars = _parse_args(sys.argv[1:])
for line in sys.stdin:
sys.stdout.write(_expand_cmakedefines(line, cmake_vars))
if __name__ == "__main__":
main()
| tensorflow-master | third_party/llvm/expand_cmake_vars.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Prints CUDA library and header directories and versions found on the system.
The script searches for CUDA library and header files on the system, inspects
them to determine their version and prints the configuration to stdout.
The paths to inspect and the required versions are specified through environment
variables. If no valid configuration is found, the script prints to stderr and
returns an error code.
The list of libraries to find is specified as arguments. Supported libraries are
CUDA (includes cuBLAS), cuDNN, NCCL, and TensorRT.
The script takes a list of base directories specified by the TF_CUDA_PATHS
environment variable as comma-separated glob list. The script looks for headers
and library files in a hard-coded set of subdirectories from these base paths.
If TF_CUDA_PATHS is not specified, a OS specific default is used:
Linux: /usr/local/cuda, /usr, and paths from 'ldconfig -p'.
Windows: CUDA_PATH environment variable, or
C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\*
For backwards compatibility, some libraries also use alternative base
directories from other environment variables if they are specified. List of
library-specific environment variables:
Library Version env variable Additional base directories
----------------------------------------------------------------
CUDA TF_CUDA_VERSION CUDA_TOOLKIT_PATH
cuBLAS TF_CUBLAS_VERSION CUDA_TOOLKIT_PATH
cuDNN TF_CUDNN_VERSION CUDNN_INSTALL_PATH
NCCL TF_NCCL_VERSION NCCL_INSTALL_PATH, NCCL_HDR_PATH
TensorRT TF_TENSORRT_VERSION TENSORRT_INSTALL_PATH
Versions environment variables can be of the form 'x' or 'x.y' to request a
specific version, empty or unspecified to accept any version.
The output of a found library is of the form:
tf_<library>_version: x.y.z
tf_<library>_header_dir: ...
tf_<library>_library_dir: ...
"""
import io
import os
import glob
import platform
import re
import subprocess
import sys
# pylint: disable=g-import-not-at-top
try:
from shutil import which
except ImportError:
from distutils.spawn import find_executable as which
# pylint: enable=g-import-not-at-top
class ConfigError(Exception):
pass
def _is_linux():
return platform.system() == "Linux"
def _is_windows():
return platform.system() == "Windows"
def _is_macos():
return platform.system() == "Darwin"
def _matches_version(actual_version, required_version):
"""Checks whether some version meets the requirements.
All elements of the required_version need to be present in the
actual_version.
required_version actual_version result
-----------------------------------------
1 1.1 True
1.2 1 False
1.2 1.3 False
1 True
Args:
required_version: The version specified by the user.
actual_version: The version detected from the CUDA installation.
Returns: Whether the actual version matches the required one.
"""
if actual_version is None:
return False
# Strip spaces from the versions.
actual_version = actual_version.strip()
required_version = required_version.strip()
return actual_version.startswith(required_version)
def _at_least_version(actual_version, required_version):
actual = [int(v) for v in actual_version.split(".")]
required = [int(v) for v in required_version.split(".")]
return actual >= required
def _get_header_version(path, name):
"""Returns preprocessor defines in C header file."""
for line in io.open(path, "r", encoding="utf-8").readlines():
match = re.match("#define %s +(\d+)" % name, line)
if match:
return match.group(1)
return ""
def _cartesian_product(first, second):
"""Returns all path combinations of first and second."""
return [os.path.join(f, s) for f in first for s in second]
def _get_ld_config_paths():
"""Returns all directories from 'ldconfig -p'."""
if not _is_linux():
return []
ldconfig_path = which("ldconfig") or "/sbin/ldconfig"
output = subprocess.check_output([ldconfig_path, "-p"])
pattern = re.compile(".* => (.*)")
result = set()
for line in output.splitlines():
try:
match = pattern.match(line.decode("ascii"))
except UnicodeDecodeError:
match = False
if match:
result.add(os.path.dirname(match.group(1)))
return sorted(list(result))
def _get_default_cuda_paths(cuda_version):
if not cuda_version:
cuda_version = "*"
elif not "." in cuda_version:
cuda_version = cuda_version + ".*"
if _is_windows():
return [
os.environ.get(
"CUDA_PATH",
"C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v%s\\" %
cuda_version)
]
return ["/usr/local/cuda-%s" % cuda_version, "/usr/local/cuda", "/usr"
] + _get_ld_config_paths()
def _header_paths():
"""Returns hard-coded set of relative paths to look for header files."""
return [
"",
"include",
"include/cuda",
"include/*-linux-gnu",
"extras/CUPTI/include",
"include/cuda/CUPTI",
]
def _library_paths():
"""Returns hard-coded set of relative paths to look for library files."""
return [
"",
"lib64",
"lib",
"lib/*-linux-gnu",
"lib/x64",
"extras/CUPTI/*",
]
def _not_found_error(base_paths, relative_paths, filepattern):
base_paths = "".join(["\n '%s'" % path for path in sorted(base_paths)])
relative_paths = "".join(["\n '%s'" % path for path in relative_paths])
return ConfigError(
"Could not find any %s in any subdirectory:%s\nof:%s\n" %
(filepattern, relative_paths, base_paths))
def _find_file(base_paths, relative_paths, filepattern):
for path in _cartesian_product(base_paths, relative_paths):
for file in glob.glob(os.path.join(path, filepattern)):
return file
raise _not_found_error(base_paths, relative_paths, filepattern)
def _find_library(base_paths, library_name, required_version):
"""Returns first valid path to the requested library."""
if _is_windows():
filepattern = library_name + ".lib"
elif _is_macos():
filepattern = "%s*.dylib" % (".".join(["lib" + library_name] +
required_version.split(".")[:1]))
else:
filepattern = ".".join(["lib" + library_name, "so"] +
required_version.split(".")[:1]) + "*"
return _find_file(base_paths, _library_paths(), filepattern)
def _find_versioned_file(base_paths, relative_paths, filepattern,
required_version, get_version):
"""Returns first valid path to a file that matches the requested version."""
for path in _cartesian_product(base_paths, relative_paths):
for file in glob.glob(os.path.join(path, filepattern)):
actual_version = get_version(file)
if _matches_version(actual_version, required_version):
return file, actual_version
raise _not_found_error(
base_paths, relative_paths,
filepattern + " matching version '%s'" % required_version)
def _find_header(base_paths, header_name, required_version, get_version):
"""Returns first valid path to a header that matches the requested version."""
return _find_versioned_file(base_paths, _header_paths(), header_name,
required_version, get_version)
def _find_cuda_config(base_paths, required_version):
def get_header_version(path):
version = int(_get_header_version(path, "CUDA_VERSION"))
if not version:
return None
return "%d.%d" % (version // 1000, version % 1000 // 10)
cuda_header_path, header_version = _find_header(base_paths, "cuda.h",
required_version,
get_header_version)
cuda_version = header_version # x.y, see above.
cuda_library_path = _find_library(base_paths, "cudart", cuda_version)
def get_nvcc_version(path):
pattern = "Cuda compilation tools, release \d+\.\d+, V(\d+\.\d+\.\d+)"
for line in subprocess.check_output([path, "--version"]).splitlines():
match = re.match(pattern, line.decode("ascii"))
if match:
return match.group(1)
return None
nvcc_name = "nvcc.exe" if _is_windows() else "nvcc"
nvcc_path, nvcc_version = _find_versioned_file(base_paths, [
"",
"bin",
], nvcc_name, cuda_version, get_nvcc_version)
nvvm_path = _find_file(base_paths, [
"nvvm/libdevice",
"share/cuda",
"lib/nvidia-cuda-toolkit/libdevice",
], "libdevice*.10.bc")
cupti_header_path = _find_file(base_paths, _header_paths(), "cupti.h")
cupti_library_path = _find_library(base_paths, "cupti", required_version)
cuda_binary_dir = os.path.dirname(nvcc_path)
nvvm_library_dir = os.path.dirname(nvvm_path)
# XLA requires the toolkit path to find ptxas and libdevice.
# TODO(csigg): pass in both directories instead.
cuda_toolkit_paths = (
os.path.normpath(os.path.join(cuda_binary_dir, "..")),
os.path.normpath(os.path.join(nvvm_library_dir, "../..")),
)
if cuda_toolkit_paths[0] != cuda_toolkit_paths[1]:
raise ConfigError("Inconsistent CUDA toolkit path: %s vs %s" %
cuda_toolkit_paths)
return {
"cuda_version": cuda_version,
"cuda_include_dir": os.path.dirname(cuda_header_path),
"cuda_library_dir": os.path.dirname(cuda_library_path),
"cuda_binary_dir": cuda_binary_dir,
"nvvm_library_dir": nvvm_library_dir,
"cupti_include_dir": os.path.dirname(cupti_header_path),
"cupti_library_dir": os.path.dirname(cupti_library_path),
"cuda_toolkit_path": cuda_toolkit_paths[0],
}
def _find_cublas_config(base_paths, required_version, cuda_version):
if _at_least_version(cuda_version, "10.1"):
def get_header_version(path):
version = (
_get_header_version(path, name)
for name in ("CUBLAS_VER_MAJOR", "CUBLAS_VER_MINOR",
"CUBLAS_VER_PATCH"))
return ".".join(version)
header_path, header_version = _find_header(base_paths, "cublas_api.h",
required_version,
get_header_version)
# cuBLAS uses the major version only.
cublas_version = header_version.split(".")[0]
if not _matches_version(cuda_version, cublas_version):
raise ConfigError("cuBLAS version %s does not match CUDA version %s" %
(cublas_version, cuda_version))
else:
# There is no version info available before CUDA 10.1, just find the file.
header_path = _find_file(base_paths, _header_paths(), "cublas_api.h")
# cuBLAS version is the same as CUDA version (x.y).
cublas_version = required_version
library_path = _find_library(base_paths, "cublas", cublas_version)
return {
"cublas_include_dir": os.path.dirname(header_path),
"cublas_library_dir": os.path.dirname(library_path),
}
def _find_cudnn_config(base_paths, required_version):
def get_header_version(path):
version = (
_get_header_version(path, name)
for name in ("CUDNN_MAJOR", "CUDNN_MINOR", "CUDNN_PATCHLEVEL"))
return ".".join(version)
header_path, header_version = _find_header(base_paths, "cudnn.h",
required_version,
get_header_version)
cudnn_version = header_version.split(".")[0]
library_path = _find_library(base_paths, "cudnn", cudnn_version)
return {
"cudnn_version": cudnn_version,
"cudnn_include_dir": os.path.dirname(header_path),
"cudnn_library_dir": os.path.dirname(library_path),
}
def _find_nccl_config(base_paths, required_version):
def get_header_version(path):
version = (
_get_header_version(path, name)
for name in ("NCCL_MAJOR", "NCCL_MINOR", "NCCL_PATCH"))
return ".".join(version)
header_path, header_version = _find_header(base_paths, "nccl.h",
required_version,
get_header_version)
nccl_version = header_version.split(".")[0]
library_path = _find_library(base_paths, "nccl", nccl_version)
return {
"nccl_version": nccl_version,
"nccl_include_dir": os.path.dirname(header_path),
"nccl_library_dir": os.path.dirname(library_path),
}
def _find_tensorrt_config(base_paths, required_version):
def get_header_version(path):
version = (
_get_header_version(path, name)
for name in ("NV_TENSORRT_MAJOR", "NV_TENSORRT_MINOR",
"NV_TENSORRT_PATCH"))
# `version` is a generator object, so we convert it to a list before using
# it (muitiple times below).
version = list(version)
if not all(version):
return None # Versions not found, make _matches_version returns False.
return ".".join(version)
try:
header_path, header_version = _find_header(base_paths, "NvInfer.h",
required_version,
get_header_version)
except ConfigError:
# TensorRT 6 moved the version information to NvInferVersion.h.
header_path, header_version = _find_header(base_paths, "NvInferVersion.h",
required_version,
get_header_version)
tensorrt_version = header_version.split(".")[0]
library_path = _find_library(base_paths, "nvinfer", tensorrt_version)
return {
"tensorrt_version": tensorrt_version,
"tensorrt_include_dir": os.path.dirname(header_path),
"tensorrt_library_dir": os.path.dirname(library_path),
}
def _list_from_env(env_name, default=[]):
"""Returns comma-separated list from environment variable."""
if env_name in os.environ:
return os.environ[env_name].split(",")
return default
def _get_legacy_path(env_name, default=[]):
"""Returns a path specified by a legacy environment variable.
CUDNN_INSTALL_PATH, NCCL_INSTALL_PATH, TENSORRT_INSTALL_PATH set to
'/usr/lib/x86_64-linux-gnu' would previously find both library and header
paths. Detect those and return '/usr', otherwise forward to _list_from_env().
"""
if env_name in os.environ:
match = re.match("^(/[^/ ]*)+/lib/\w+-linux-gnu/?$", os.environ[env_name])
if match:
return [match.group(1)]
return _list_from_env(env_name, default)
def _normalize_path(path):
"""Returns normalized path, with forward slashes on Windows."""
path = os.path.normpath(path)
if _is_windows():
path = path.replace("\\", "/")
return path
def find_cuda_config():
"""Returns a dictionary of CUDA library and header file paths."""
libraries = [argv.lower() for argv in sys.argv[1:]]
cuda_version = os.environ.get("TF_CUDA_VERSION", "")
base_paths = _list_from_env("TF_CUDA_PATHS",
_get_default_cuda_paths(cuda_version))
base_paths = [path for path in base_paths if os.path.exists(path)]
result = {}
if "cuda" in libraries:
cuda_paths = _list_from_env("CUDA_TOOLKIT_PATH", base_paths)
result.update(_find_cuda_config(cuda_paths, cuda_version))
cuda_version = result["cuda_version"]
cublas_paths = base_paths
if tuple(int(v) for v in cuda_version.split(".")) < (10, 1):
# Before CUDA 10.1, cuBLAS was in the same directory as the toolkit.
cublas_paths = cuda_paths
cublas_version = os.environ.get("TF_CUBLAS_VERSION", "")
result.update(
_find_cublas_config(cublas_paths, cublas_version, cuda_version))
if "cudnn" in libraries:
cudnn_paths = _get_legacy_path("CUDNN_INSTALL_PATH", base_paths)
cudnn_version = os.environ.get("TF_CUDNN_VERSION", "")
result.update(_find_cudnn_config(cudnn_paths, cudnn_version))
if "nccl" in libraries:
nccl_paths = _get_legacy_path("NCCL_INSTALL_PATH", base_paths)
nccl_version = os.environ.get("TF_NCCL_VERSION", "")
result.update(_find_nccl_config(nccl_paths, nccl_version))
if "tensorrt" in libraries:
tensorrt_paths = _get_legacy_path("TENSORRT_INSTALL_PATH", base_paths)
tensorrt_version = os.environ.get("TF_TENSORRT_VERSION", "")
result.update(_find_tensorrt_config(tensorrt_paths, tensorrt_version))
for k, v in result.items():
if k.endswith("_dir") or k.endswith("_path"):
result[k] = _normalize_path(v)
return result
def main():
try:
for key, value in sorted(find_cuda_config().items()):
print("%s: %s" % (key, value))
except ConfigError as e:
sys.stderr.write(str(e))
sys.exit(1)
if __name__ == "__main__":
main()
| tensorflow-master | third_party/gpus/find_cuda_config.py |
r"""Implementation of SPINN in TensorFlow eager execution.
SPINN: Stack-Augmented Parser-Interpreter Neural Network.
Ths file contains model definition and code for training the model.
The model definition is based on PyTorch implementation at:
https://github.com/jekbradbury/examples/tree/spinn/snli
which was released under a BSD 3-Clause License at:
https://github.com/jekbradbury/examples/blob/spinn/LICENSE:
Copyright (c) 2017,
All rights reserved.
See ./LICENSE for more details.
Instructions for use:
* See `README.md` for details on how to prepare the SNLI and GloVe data.
* Suppose you have prepared the data at "/tmp/spinn-data", use the folloing
command to train the model:
```bash
python spinn.py --data_root /tmp/spinn-data --logdir /tmp/spinn-logs
```
Checkpoints and TensorBoard summaries will be written to "/tmp/spinn-logs".
References:
* Bowman, S.R., Gauthier, J., Rastogi A., Gupta, R., Manning, C.D., & Potts, C.
(2016). A Fast Unified Model for Parsing and Sentence Understanding.
https://arxiv.org/abs/1603.06021
* Bradbury, J. (2017). Recursive Neural Networks with PyTorch.
https://devblogs.nvidia.com/parallelforall/recursive-neural-networks-pytorch/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import itertools
import os
import sys
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow.contrib.eager.python.examples.spinn import data
layers = tf.keras.layers
def _bundle(lstm_iter):
"""Concatenate a list of Tensors along 1st axis and split result into two.
Args:
lstm_iter: A `list` of `N` dense `Tensor`s, each of which has the shape
(R, 2 * M).
Returns:
A `list` of two dense `Tensor`s, each of which has the shape (N * R, M).
"""
return tf.split(tf.concat(lstm_iter, 0), 2, axis=1)
def _unbundle(state):
"""Concatenate a list of Tensors along 2nd axis and split result.
This is the inverse of `_bundle`.
Args:
state: A `list` of two dense `Tensor`s, each of which has the shape (R, M).
Returns:
A `list` of `R` dense `Tensors`, each of which has the shape (1, 2 * M).
"""
return tf.split(tf.concat(state, 1), state[0].shape[0], axis=0)
# pylint: disable=not-callable
class Reducer(tf.keras.Model):
"""A module that applies reduce operation on left and right vectors."""
def __init__(self, size, tracker_size=None):
super(Reducer, self).__init__()
self.left = layers.Dense(5 * size, activation=None)
self.right = layers.Dense(5 * size, activation=None, use_bias=False)
if tracker_size is not None:
self.track = layers.Dense(5 * size, activation=None, use_bias=False)
else:
self.track = None
def call(self, left_in, right_in, tracking=None):
"""Invoke forward pass of the Reduce module.
This method feeds a linear combination of `left_in`, `right_in` and
`tracking` into a Tree LSTM and returns the output of the Tree LSTM.
Args:
left_in: A list of length L. Each item is a dense `Tensor` with
the shape (1, n_dims). n_dims is the size of the embedding vector.
right_in: A list of the same length as `left_in`. Each item should have
the same shape as the items of `left_in`.
tracking: Optional list of the same length as `left_in`. Each item is a
dense `Tensor` with shape (1, tracker_size * 2). tracker_size is the
size of the Tracker's state vector.
Returns:
Output: A list of length batch_size. Each item has the shape (1, n_dims).
"""
left, right = _bundle(left_in), _bundle(right_in)
lstm_in = self.left(left[0]) + self.right(right[0])
if self.track and tracking:
lstm_in += self.track(_bundle(tracking)[0])
return _unbundle(self._tree_lstm(left[1], right[1], lstm_in))
def _tree_lstm(self, c1, c2, lstm_in):
a, i, f1, f2, o = tf.split(lstm_in, 5, axis=1)
c = tf.tanh(a) * tf.sigmoid(i) + tf.sigmoid(f1) * c1 + tf.sigmoid(f2) * c2
h = tf.sigmoid(o) * tf.tanh(c)
return h, c
class Tracker(tf.keras.Model):
"""A module that tracks the history of the sentence with an LSTM."""
def __init__(self, tracker_size, predict):
"""Constructor of Tracker.
Args:
tracker_size: Number of dimensions of the underlying `LSTMCell`.
predict: (`bool`) Whether prediction mode is enabled.
"""
super(Tracker, self).__init__()
self._rnn = tf.nn.rnn_cell.LSTMCell(tracker_size)
self._state_size = tracker_size
if predict:
self._transition = layers.Dense(4)
else:
self._transition = None
def reset_state(self):
self.state = None
def call(self, bufs, stacks):
"""Invoke the forward pass of the Tracker module.
This method feeds the concatenation of the top two elements of the stacks
into an LSTM cell and returns the resultant state of the LSTM cell.
Args:
bufs: A `list` of length batch_size. Each item is a `list` of
max_sequence_len (maximum sequence length of the batch). Each item
of the nested list is a dense `Tensor` of shape (1, d_proj), where
d_proj is the size of the word embedding vector or the size of the
vector space that the word embedding vector is projected to.
stacks: A `list` of size batch_size. Each item is a `list` of
variable length corresponding to the current height of the stack.
Each item of the nested list is a dense `Tensor` of shape (1, d_proj).
Returns:
1. A list of length batch_size. Each item is a dense `Tensor` of shape
(1, d_tracker * 2).
2. If under predict mode, result of applying a Dense layer on the
first state vector of the RNN. Else, `None`.
"""
buf = _bundle([buf[-1] for buf in bufs])[0]
stack1 = _bundle([stack[-1] for stack in stacks])[0]
stack2 = _bundle([stack[-2] for stack in stacks])[0]
x = tf.concat([buf, stack1, stack2], 1)
if self.state is None:
batch_size = int(x.shape[0])
zeros = tf.zeros((batch_size, self._state_size), dtype=tf.float32)
self.state = [zeros, zeros]
_, self.state = self._rnn(x, self.state)
unbundled = _unbundle(self.state)
if self._transition:
return unbundled, self._transition(self.state[0])
else:
return unbundled, None
class SPINN(tf.keras.Model):
"""Stack-augmented Parser-Interpreter Neural Network.
See https://arxiv.org/abs/1603.06021 for more details.
"""
def __init__(self, config):
"""Constructor of SPINN.
Args:
config: A `namedtupled` with the following attributes.
d_proj - (`int`) number of dimensions of the vector space to project the
word embeddings to.
d_tracker - (`int`) number of dimensions of the Tracker's state vector.
d_hidden - (`int`) number of the dimensions of the hidden state, for the
Reducer module.
n_mlp_layers - (`int`) number of multi-layer perceptron layers to use to
convert the output of the `Feature` module to logits.
predict - (`bool`) Whether the Tracker will enabled predictions.
"""
super(SPINN, self).__init__()
self.config = config
self.reducer = Reducer(config.d_hidden, config.d_tracker)
if config.d_tracker is not None:
self.tracker = Tracker(config.d_tracker, config.predict)
else:
self.tracker = None
def call(self, buffers, transitions, training=False):
"""Invoke the forward pass of the SPINN model.
Args:
buffers: Dense `Tensor` of shape
(max_sequence_len, batch_size, config.d_proj).
transitions: Dense `Tensor` with integer values that represent the parse
trees of the sentences. A value of 2 indicates "reduce"; a value of 3
indicates "shift". Shape: (max_sequence_len * 2 - 3, batch_size).
training: Whether the invocation is under training mode.
Returns:
Output `Tensor` of shape (batch_size, config.d_embed).
"""
max_sequence_len, batch_size, d_proj = (int(x) for x in buffers.shape)
# Split the buffers into left and right word items and put the initial
# items in a stack.
splitted = tf.split(
tf.reshape(tf.transpose(buffers, [1, 0, 2]), [-1, d_proj]),
max_sequence_len * batch_size, axis=0)
buffers = [splitted[k:k + max_sequence_len]
for k in xrange(0, len(splitted), max_sequence_len)]
stacks = [[buf[0], buf[0]] for buf in buffers]
if self.tracker:
# Reset tracker state for new batch.
self.tracker.reset_state()
num_transitions = transitions.shape[0]
# Iterate through transitions and perform the appropriate stack-pop, reduce
# and stack-push operations.
transitions = transitions.numpy()
for i in xrange(num_transitions):
trans = transitions[i]
if self.tracker:
# Invoke tracker to obtain the current tracker states for the sentences.
tracker_states, trans_hypothesis = self.tracker(buffers, stacks=stacks)
if trans_hypothesis:
trans = tf.argmax(trans_hypothesis, axis=-1)
else:
tracker_states = itertools.repeat(None)
lefts, rights, trackings = [], [], []
for transition, buf, stack, tracking in zip(
trans, buffers, stacks, tracker_states):
if int(transition) == 3: # Shift.
stack.append(buf.pop())
elif int(transition) == 2: # Reduce.
rights.append(stack.pop())
lefts.append(stack.pop())
trackings.append(tracking)
if rights:
reducer_output = self.reducer(lefts, rights, trackings)
reduced = iter(reducer_output)
for transition, stack in zip(trans, stacks):
if int(transition) == 2: # Reduce.
stack.append(next(reduced))
return _bundle([stack.pop() for stack in stacks])[0]
class Perceptron(tf.keras.Model):
"""One layer of the SNLIClassifier multi-layer perceptron."""
def __init__(self, dimension, dropout_rate, previous_layer):
"""Configure the Perceptron."""
super(Perceptron, self).__init__()
self.dense = tf.keras.layers.Dense(dimension, activation=tf.nn.elu)
self.batchnorm = layers.BatchNormalization()
self.dropout = layers.Dropout(rate=dropout_rate)
self.previous_layer = previous_layer
def call(self, x, training):
"""Run previous Perceptron layers, then this one."""
x = self.previous_layer(x, training=training)
x = self.dense(x)
x = self.batchnorm(x, training=training)
x = self.dropout(x, training=training)
return x
class SNLIClassifier(tf.keras.Model):
"""SNLI Classifier Model.
A model aimed at solving the SNLI (Standford Natural Language Inference)
task, using the SPINN model from above. For details of the task, see:
https://nlp.stanford.edu/projects/snli/
"""
def __init__(self, config, embed):
"""Constructor of SNLICLassifier.
Args:
config: A namedtuple containing required configurations for the model. It
needs to have the following attributes.
projection - (`bool`) whether the word vectors are to be projected onto
another vector space (of `d_proj` dimensions).
d_proj - (`int`) number of dimensions of the vector space to project the
word embeddings to.
embed_dropout - (`float`) dropout rate for the word embedding vectors.
n_mlp_layers - (`int`) number of multi-layer perceptron (MLP) layers to
use to convert the output of the `Feature` module to logits.
mlp_dropout - (`float`) dropout rate of the MLP layers.
d_out - (`int`) number of dimensions of the final output of the MLP
layers.
lr - (`float`) learning rate.
embed: A embedding matrix of shape (vocab_size, d_embed).
"""
super(SNLIClassifier, self).__init__()
self.config = config
self.embed = tf.constant(embed)
self.projection = layers.Dense(config.d_proj)
self.embed_bn = layers.BatchNormalization()
self.embed_dropout = layers.Dropout(rate=config.embed_dropout)
self.encoder = SPINN(config)
self.feature_bn = layers.BatchNormalization()
self.feature_dropout = layers.Dropout(rate=config.mlp_dropout)
current_mlp = lambda result, training: result
for _ in range(config.n_mlp_layers):
current_mlp = Perceptron(dimension=config.d_mlp,
dropout_rate=config.mlp_dropout,
previous_layer=current_mlp)
self.mlp = current_mlp
self.mlp_output = layers.Dense(
config.d_out,
kernel_initializer=tf.random_uniform_initializer(minval=-5e-3,
maxval=5e-3))
def call(self,
premise,
premise_transition,
hypothesis,
hypothesis_transition,
training=False):
"""Invoke the forward pass the SNLIClassifier model.
Args:
premise: The word indices of the premise sentences, with shape
(max_prem_seq_len, batch_size).
premise_transition: The transitions for the premise sentences, with shape
(max_prem_seq_len * 2 - 3, batch_size).
hypothesis: The word indices of the hypothesis sentences, with shape
(max_hypo_seq_len, batch_size).
hypothesis_transition: The transitions for the hypothesis sentences, with
shape (max_hypo_seq_len * 2 - 3, batch_size).
training: Whether the invocation is under training mode.
Returns:
The logits, as a dense `Tensor` of shape (batch_size, d_out), where d_out
is the size of the output vector.
"""
# Perform embedding lookup on the premise and hypothesis inputs, which have
# the word-index format.
premise_embed = tf.nn.embedding_lookup(self.embed, premise)
hypothesis_embed = tf.nn.embedding_lookup(self.embed, hypothesis)
if self.config.projection:
# Project the embedding vectors to another vector space.
premise_embed = self.projection(premise_embed)
hypothesis_embed = self.projection(hypothesis_embed)
# Perform batch normalization and dropout on the possibly projected word
# vectors.
premise_embed = self.embed_bn(premise_embed, training=training)
hypothesis_embed = self.embed_bn(hypothesis_embed, training=training)
premise_embed = self.embed_dropout(premise_embed, training=training)
hypothesis_embed = self.embed_dropout(hypothesis_embed, training=training)
# Run the batch-normalized and dropout-processed word vectors through the
# SPINN encoder.
premise = self.encoder(premise_embed, premise_transition,
training=training)
hypothesis = self.encoder(hypothesis_embed, hypothesis_transition,
training=training)
# Combine encoder outputs for premises and hypotheses into logits.
# Then apply batch normalization and dropuout on the logits.
logits = tf.concat(
[premise, hypothesis, premise - hypothesis, premise * hypothesis], 1)
logits = self.feature_dropout(
self.feature_bn(logits, training=training), training=training)
# Apply the multi-layer perceptron on the logits.
logits = self.mlp(logits, training=training)
logits = self.mlp_output(logits)
return logits
class SNLIClassifierTrainer(tfe.Checkpointable):
"""A class that coordinates the training of an SNLIClassifier."""
def __init__(self, snli_classifier, lr):
"""Constructor of SNLIClassifierTrainer.
Args:
snli_classifier: An instance of `SNLIClassifier`.
lr: Learning rate.
"""
self._model = snli_classifier
# Create a custom learning rate Variable for the RMSProp optimizer, because
# the learning rate needs to be manually decayed later (see
# decay_learning_rate()).
self._learning_rate = tf.Variable(lr, name="learning_rate")
self._optimizer = tf.train.RMSPropOptimizer(self._learning_rate,
epsilon=1e-6)
def loss(self, labels, logits):
"""Calculate the loss given a batch of data.
Args:
labels: The truth labels, with shape (batch_size,).
logits: The logits output from the forward pass of the SNLIClassifier
model, with shape (batch_size, d_out), where d_out is the output
dimension size of the SNLIClassifier.
Returns:
The loss value, as a scalar `Tensor`.
"""
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits))
def train_batch(self,
labels,
premise,
premise_transition,
hypothesis,
hypothesis_transition):
"""Train model on batch of data.
Args:
labels: The truth labels, with shape (batch_size,).
premise: The word indices of the premise sentences, with shape
(max_prem_seq_len, batch_size).
premise_transition: The transitions for the premise sentences, with shape
(max_prem_seq_len * 2 - 3, batch_size).
hypothesis: The word indices of the hypothesis sentences, with shape
(max_hypo_seq_len, batch_size).
hypothesis_transition: The transitions for the hypothesis sentences, with
shape (max_hypo_seq_len * 2 - 3, batch_size).
Returns:
1. loss value as a scalar `Tensor`.
2. logits as a dense `Tensor` of shape (batch_size, d_out), where d_out is
the output dimension size of the SNLIClassifier.
"""
with tf.GradientTape() as tape:
tape.watch(self._model.variables)
logits = self._model(premise,
premise_transition,
hypothesis,
hypothesis_transition,
training=True)
loss = self.loss(labels, logits)
gradients = tape.gradient(loss, self._model.variables)
self._optimizer.apply_gradients(zip(gradients, self._model.variables),
global_step=tf.train.get_global_step())
return loss, logits
def decay_learning_rate(self, decay_by):
"""Decay learning rate of the optimizer by factor decay_by."""
self._learning_rate.assign(self._learning_rate * decay_by)
print("Decayed learning rate of optimizer to: %s" %
self._learning_rate.numpy())
@property
def learning_rate(self):
return self._learning_rate
@property
def model(self):
return self._model
@property
def variables(self):
return (self._model.variables + [self.learning_rate] +
self._optimizer.variables())
def _batch_n_correct(logits, label):
"""Calculate number of correct predictions in a batch.
Args:
logits: A logits Tensor of shape `(batch_size, num_categories)` and dtype
`float32`.
label: A labels Tensor of shape `(batch_size,)` and dtype `int64`
Returns:
Number of correct predictions.
"""
return tf.reduce_sum(
tf.cast((tf.equal(
tf.argmax(logits, axis=1), label)), tf.float32)).numpy()
def _evaluate_on_dataset(snli_data, batch_size, trainer, use_gpu):
"""Run evaluation on a dataset.
Args:
snli_data: The `data.SnliData` to use in this evaluation.
batch_size: The batch size to use during this evaluation.
trainer: An instance of `SNLIClassifierTrainer to use for this
evaluation.
use_gpu: Whether GPU is being used.
Returns:
1. Average loss across all examples of the dataset.
2. Average accuracy rate across all examples of the dataset.
"""
mean_loss = tfe.metrics.Mean()
accuracy = tfe.metrics.Accuracy()
for label, prem, prem_trans, hypo, hypo_trans in _get_dataset_iterator(
snli_data, batch_size):
if use_gpu:
label, prem, hypo = label.gpu(), prem.gpu(), hypo.gpu()
logits = trainer.model(prem, prem_trans, hypo, hypo_trans, training=False)
loss_val = trainer.loss(label, logits)
batch_size = tf.shape(label)[0]
mean_loss(loss_val, weights=batch_size.gpu() if use_gpu else batch_size)
accuracy(tf.argmax(logits, axis=1), label)
return mean_loss.result().numpy(), accuracy.result().numpy()
def _get_dataset_iterator(snli_data, batch_size):
"""Get a data iterator for a split of SNLI data.
Args:
snli_data: A `data.SnliData` object.
batch_size: The desired batch size.
Returns:
A dataset iterator.
"""
with tf.device("/device:CPU:0"):
# Some tf.data ops, such as ShuffleDataset, are available only on CPU.
dataset = tf.data.Dataset.from_generator(
snli_data.get_generator(batch_size),
(tf.int64, tf.int64, tf.int64, tf.int64, tf.int64))
dataset = dataset.shuffle(snli_data.num_batches(batch_size))
return tfe.Iterator(dataset)
def train_or_infer_spinn(embed,
word2index,
train_data,
dev_data,
test_data,
config):
"""Perform Training or Inference on a SPINN model.
Args:
embed: The embedding matrix as a float32 numpy array with shape
[vocabulary_size, word_vector_len]. word_vector_len is the length of a
word embedding vector.
word2index: A `dict` mapping word to word index.
train_data: An instance of `data.SnliData`, for the train split.
dev_data: Same as above, for the dev split.
test_data: Same as above, for the test split.
config: A configuration object. See the argument to this Python binary for
details.
Returns:
If `config.inference_premise ` and `config.inference_hypothesis` are not
`None`, i.e., inference mode: the logits for the possible labels of the
SNLI data set, as a `Tensor` of three floats.
else:
The trainer object.
Raises:
ValueError: if only one of config.inference_premise and
config.inference_hypothesis is specified.
"""
# TODO(cais): Refactor this function into separate one for training and
# inference.
use_gpu = tfe.num_gpus() > 0 and not config.force_cpu
device = "gpu:0" if use_gpu else "cpu:0"
print("Using device: %s" % device)
if ((config.inference_premise and not config.inference_hypothesis) or
(not config.inference_premise and config.inference_hypothesis)):
raise ValueError(
"--inference_premise and --inference_hypothesis must be both "
"specified or both unspecified, but only one is specified.")
if config.inference_premise:
# Inference mode.
inference_sentence_pair = [
data.encode_sentence(config.inference_premise, word2index),
data.encode_sentence(config.inference_hypothesis, word2index)]
else:
inference_sentence_pair = None
log_header = (
" Time Epoch Iteration Progress (%Epoch) Loss Dev/Loss"
" Accuracy Dev/Accuracy")
log_template = (
"{:>6.0f} {:>5.0f} {:>9.0f} {:>5.0f}/{:<5.0f} {:>7.0f}% {:>8.6f} {} "
"{:12.4f} {}")
dev_log_template = (
"{:>6.0f} {:>5.0f} {:>9.0f} {:>5.0f}/{:<5.0f} {:>7.0f}% {:>8.6f} "
"{:8.6f} {:12.4f} {:12.4f}")
summary_writer = tf.contrib.summary.create_file_writer(
config.logdir, flush_millis=10000)
with tf.device(device), \
summary_writer.as_default(), \
tf.contrib.summary.always_record_summaries():
model = SNLIClassifier(config, embed)
global_step = tf.train.get_or_create_global_step()
trainer = SNLIClassifierTrainer(model, config.lr)
checkpoint = tf.train.Checkpoint(trainer=trainer, global_step=global_step)
checkpoint.restore(tf.train.latest_checkpoint(config.logdir))
if inference_sentence_pair:
# Inference mode.
prem, prem_trans = inference_sentence_pair[0]
hypo, hypo_trans = inference_sentence_pair[1]
hypo_trans = inference_sentence_pair[1][1]
inference_logits = model(
tf.constant(prem), tf.constant(prem_trans),
tf.constant(hypo), tf.constant(hypo_trans), training=False)
inference_logits = inference_logits[0][1:]
max_index = tf.argmax(inference_logits)
print("\nInference logits:")
for i, (label, logit) in enumerate(
zip(data.POSSIBLE_LABELS, inference_logits)):
winner_tag = " (winner)" if max_index == i else ""
print(" {0:<16}{1:.6f}{2}".format(label + ":", logit, winner_tag))
return inference_logits
train_len = train_data.num_batches(config.batch_size)
start = time.time()
iterations = 0
mean_loss = tfe.metrics.Mean()
accuracy = tfe.metrics.Accuracy()
print(log_header)
for epoch in xrange(config.epochs):
batch_idx = 0
for label, prem, prem_trans, hypo, hypo_trans in _get_dataset_iterator(
train_data, config.batch_size):
if use_gpu:
label, prem, hypo = label.gpu(), prem.gpu(), hypo.gpu()
# prem_trans and hypo_trans are used for dynamic control flow and can
# remain on CPU. Same in _evaluate_on_dataset().
iterations += 1
batch_train_loss, batch_train_logits = trainer.train_batch(
label, prem, prem_trans, hypo, hypo_trans)
batch_size = tf.shape(label)[0]
mean_loss(batch_train_loss.numpy(),
weights=batch_size.gpu() if use_gpu else batch_size)
accuracy(tf.argmax(batch_train_logits, axis=1), label)
if iterations % config.save_every == 0:
checkpoint.save(os.path.join(config.logdir, "ckpt"))
if iterations % config.dev_every == 0:
dev_loss, dev_frac_correct = _evaluate_on_dataset(
dev_data, config.batch_size, trainer, use_gpu)
print(dev_log_template.format(
time.time() - start,
epoch, iterations, 1 + batch_idx, train_len,
100.0 * (1 + batch_idx) / train_len,
mean_loss.result(), dev_loss,
accuracy.result() * 100.0, dev_frac_correct * 100.0))
tf.contrib.summary.scalar("dev/loss", dev_loss)
tf.contrib.summary.scalar("dev/accuracy", dev_frac_correct)
elif iterations % config.log_every == 0:
mean_loss_val = mean_loss.result()
accuracy_val = accuracy.result()
print(log_template.format(
time.time() - start,
epoch, iterations, 1 + batch_idx, train_len,
100.0 * (1 + batch_idx) / train_len,
mean_loss_val, " " * 8, accuracy_val * 100.0, " " * 12))
tf.contrib.summary.scalar("train/loss", mean_loss_val)
tf.contrib.summary.scalar("train/accuracy", accuracy_val)
# Reset metrics.
mean_loss = tfe.metrics.Mean()
accuracy = tfe.metrics.Accuracy()
batch_idx += 1
if (epoch + 1) % config.lr_decay_every == 0:
trainer.decay_learning_rate(config.lr_decay_by)
test_loss, test_frac_correct = _evaluate_on_dataset(
test_data, config.batch_size, trainer, use_gpu)
print("Final test loss: %g; accuracy: %g%%" %
(test_loss, test_frac_correct * 100.0))
return trainer
def main(_):
config = FLAGS
# Load embedding vectors.
vocab = data.load_vocabulary(FLAGS.data_root)
word2index, embed = data.load_word_vectors(FLAGS.data_root, vocab)
if not (config.inference_premise or config.inference_hypothesis):
print("Loading train, dev and test data...")
train_data = data.SnliData(
os.path.join(FLAGS.data_root, "snli/snli_1.0/snli_1.0_train.txt"),
word2index, sentence_len_limit=FLAGS.sentence_len_limit)
dev_data = data.SnliData(
os.path.join(FLAGS.data_root, "snli/snli_1.0/snli_1.0_dev.txt"),
word2index, sentence_len_limit=FLAGS.sentence_len_limit)
test_data = data.SnliData(
os.path.join(FLAGS.data_root, "snli/snli_1.0/snli_1.0_test.txt"),
word2index, sentence_len_limit=FLAGS.sentence_len_limit)
else:
train_data = None
dev_data = None
test_data = None
train_or_infer_spinn(
embed, word2index, train_data, dev_data, test_data, config)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=
"TensorFlow eager implementation of the SPINN SNLI classifier.")
parser.add_argument("--data_root", type=str, default="/tmp/spinn-data",
help="Root directory in which the training data and "
"embedding matrix are found. See README.md for how to "
"generate such a directory.")
parser.add_argument("--sentence_len_limit", type=int, default=-1,
help="Maximum allowed sentence length (# of words). "
"The default of -1 means unlimited.")
parser.add_argument("--logdir", type=str, default="/tmp/spinn-logs",
help="Directory in which summaries will be written for "
"TensorBoard.")
parser.add_argument("--inference_premise", type=str, default=None,
help="Premise sentence for inference. Must be "
"accompanied by --inference_hypothesis. If specified, "
"will override all training parameters and perform "
"inference.")
parser.add_argument("--inference_hypothesis", type=str, default=None,
help="Hypothesis sentence for inference. Must be "
"accompanied by --inference_premise. If specified, will "
"override all training parameters and perform inference.")
parser.add_argument("--epochs", type=int, default=50,
help="Number of epochs to train.")
parser.add_argument("--batch_size", type=int, default=128,
help="Batch size to use during training.")
parser.add_argument("--d_proj", type=int, default=600,
help="Dimensions to project the word embedding vectors "
"to.")
parser.add_argument("--d_hidden", type=int, default=300,
help="Size of the hidden layer of the Tracker.")
parser.add_argument("--d_out", type=int, default=4,
help="Output dimensions of the SNLIClassifier.")
parser.add_argument("--d_mlp", type=int, default=1024,
help="Size of each layer of the multi-layer perceptron "
"of the SNLICLassifier.")
parser.add_argument("--n_mlp_layers", type=int, default=2,
help="Number of layers in the multi-layer perceptron "
"of the SNLICLassifier.")
parser.add_argument("--d_tracker", type=int, default=64,
help="Size of the tracker LSTM.")
parser.add_argument("--log_every", type=int, default=50,
help="Print log and write TensorBoard summary every _ "
"training batches.")
parser.add_argument("--lr", type=float, default=2e-3,
help="Initial learning rate.")
parser.add_argument("--lr_decay_by", type=float, default=0.75,
help="The ratio to multiply the learning rate by every "
"time the learning rate is decayed.")
parser.add_argument("--lr_decay_every", type=float, default=1,
help="Decay the learning rate every _ epoch(s).")
parser.add_argument("--dev_every", type=int, default=1000,
help="Run evaluation on the dev split every _ training "
"batches.")
parser.add_argument("--save_every", type=int, default=1000,
help="Save checkpoint every _ training batches.")
parser.add_argument("--embed_dropout", type=float, default=0.08,
help="Word embedding dropout rate.")
parser.add_argument("--mlp_dropout", type=float, default=0.07,
help="SNLIClassifier multi-layer perceptron dropout "
"rate.")
parser.add_argument("--no-projection", action="store_false",
dest="projection",
help="Whether word embedding vectors are projected to "
"another set of vectors (see d_proj).")
parser.add_argument("--predict_transitions", action="store_true",
dest="predict",
help="Whether the Tracker will perform prediction.")
parser.add_argument("--force_cpu", action="store_true", dest="force_cpu",
help="Force use CPU-only regardless of whether a GPU is "
"available.")
FLAGS, unparsed = parser.parse_known_args()
tfe.run(main=main, argv=[sys.argv[0]] + unparsed)
| tensorflow-master | third_party/examples/eager/spinn/spinn.py |
#!/usr/bin/env python
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Crosstool wrapper for compiling CUDA programs with nvcc on Windows.
DESCRIPTION:
This script is the Windows version of //third_party/gpus/crosstool/crosstool_wrapper_is_not_gcc
"""
from __future__ import print_function
from argparse import ArgumentParser
import os
import subprocess
import re
import sys
import pipes
# Template values set by cuda_autoconf.
CPU_COMPILER = ('/usr/bin/gcc')
GCC_HOST_COMPILER_PATH = ('/usr/bin/gcc')
NVCC_PATH = '/usr/local/cuda-10.0/bin/nvcc'
NVCC_VERSION = '10.0'
NVCC_TEMP_DIR = "C:\\Windows\\Temp\\nvcc_inter_files_tmp_dir"
supported_cuda_compute_capabilities = [ "3.0", "6.0" ]
def Log(s):
print('gpus/crosstool: {0}'.format(s))
def GetOptionValue(argv, option):
"""Extract the list of values for option from options.
Args:
option: The option whose value to extract, without the leading '/'.
Returns:
1. A list of values, either directly following the option,
(eg., /opt val1 val2) or values collected from multiple occurrences of
the option (eg., /opt val1 /opt val2).
2. The leftover options.
"""
parser = ArgumentParser(prefix_chars='/')
parser.add_argument('/' + option, nargs='*', action='append')
args, leftover = parser.parse_known_args(argv)
if args and vars(args)[option]:
return (sum(vars(args)[option], []), leftover)
return ([], leftover)
def _update_options(nvcc_options):
if NVCC_VERSION in ("7.0",):
return nvcc_options
update_options = { "relaxed-constexpr" : "expt-relaxed-constexpr" }
return [ update_options[opt] if opt in update_options else opt
for opt in nvcc_options ]
def GetNvccOptions(argv):
"""Collect the -nvcc_options values from argv.
Args:
argv: A list of strings, possibly the argv passed to main().
Returns:
1. The string that can be passed directly to nvcc.
2. The leftover options.
"""
parser = ArgumentParser()
parser.add_argument('-nvcc_options', nargs='*', action='append')
args, leftover = parser.parse_known_args(argv)
if args.nvcc_options:
options = _update_options(sum(args.nvcc_options, []))
return (['--' + a for a in options], leftover)
return ([], leftover)
def InvokeNvcc(argv, log=False):
"""Call nvcc with arguments assembled from argv.
Args:
argv: A list of strings, possibly the argv passed to main().
log: True if logging is requested.
Returns:
The return value of calling os.system('nvcc ' + args)
"""
src_files = [f for f in argv if
re.search('\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)]
if len(src_files) == 0:
raise Error('No source files found for cuda compilation.')
out_file = [ f for f in argv if f.startswith('/Fo') ]
if len(out_file) != 1:
raise Error('Please sepecify exactly one output file for cuda compilation.')
out = ['-o', out_file[0][len('/Fo'):]]
nvcc_compiler_options, argv = GetNvccOptions(argv)
opt_option, argv = GetOptionValue(argv, 'O')
opt = ['-g', '-G']
if (len(opt_option) > 0 and opt_option[0] != 'd'):
opt = ['-O2']
include_options, argv = GetOptionValue(argv, 'I')
includes = ["-I " + include for include in include_options]
defines, argv = GetOptionValue(argv, 'D')
defines = ['-D' + define for define in defines]
undefines, argv = GetOptionValue(argv, 'U')
undefines = ['-U' + define for define in undefines]
# The rest of the unrecongized options should be passed to host compiler
host_compiler_options = [option for option in argv if option not in (src_files + out_file)]
m_options = ["-m64"]
nvccopts = ['-D_FORCE_INLINES']
for capability in supported_cuda_compute_capabilities:
capability = capability.replace('.', '')
nvccopts += [r'-gencode=arch=compute_%s,"code=sm_%s,compute_%s"' % (
capability, capability, capability)]
nvccopts += nvcc_compiler_options
nvccopts += undefines
nvccopts += defines
nvccopts += m_options
nvccopts += ['--compiler-options="' + " ".join(host_compiler_options) + '"']
nvccopts += ['-x', 'cu'] + opt + includes + out + ['-c'] + src_files
# If we don't specify --keep-dir, nvcc will generate intermediate files under TEMP
# Put them under NVCC_TEMP_DIR instead, then Bazel can ignore files under NVCC_TEMP_DIR during dependency check
# http://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#options-for-guiding-compiler-driver
# Different actions are sharing NVCC_TEMP_DIR, so we cannot remove it if the directory already exists.
if os.path.isfile(NVCC_TEMP_DIR):
os.remove(NVCC_TEMP_DIR)
if not os.path.exists(NVCC_TEMP_DIR):
os.makedirs(NVCC_TEMP_DIR)
nvccopts += ['--keep', '--keep-dir', NVCC_TEMP_DIR]
cmd = [NVCC_PATH] + nvccopts
if log:
Log(cmd)
proc = subprocess.Popen(cmd,
stdout=sys.stdout,
stderr=sys.stderr,
env=os.environ.copy(),
shell=True)
proc.wait()
return proc.returncode
def main():
parser = ArgumentParser()
parser.add_argument('-x', nargs=1)
parser.add_argument('--cuda_log', action='store_true')
args, leftover = parser.parse_known_args(sys.argv[1:])
if args.x and args.x[0] == 'cuda':
if args.cuda_log: Log('-x cuda')
leftover = [pipes.quote(s) for s in leftover]
if args.cuda_log: Log('using nvcc')
return InvokeNvcc(leftover, log=args.cuda_log)
# Strip our flags before passing through to the CPU compiler for files which
# are not -x cuda. We can't just pass 'leftover' because it also strips -x.
# We not only want to pass -x to the CPU compiler, but also keep it in its
# relative location in the argv list (the compiler is actually sensitive to
# this).
cpu_compiler_flags = [flag for flag in sys.argv[1:]
if not flag.startswith(('--cuda_log'))
and not flag.startswith(('-nvcc_options'))]
return subprocess.call([CPU_COMPILER] + cpu_compiler_flags)
if __name__ == '__main__':
sys.exit(main())
| tensorflow-master | third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py |
#!/usr/bin/env python
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Crosstool wrapper for compiling CUDA programs with nvcc on Windows.
DESCRIPTION:
This script is the Windows version of //third_party/gpus/crosstool/crosstool_wrapper_is_not_gcc
"""
from __future__ import print_function
from argparse import ArgumentParser
import os
import subprocess
import re
import sys
import pipes
# Template values set by cuda_autoconf.
CPU_COMPILER = ('/usr/bin/gcc')
GCC_HOST_COMPILER_PATH = ('/usr/bin/gcc')
NVCC_PATH = '/usr/local/cuda-10.0/bin/nvcc'
NVCC_VERSION = '10.0'
NVCC_TEMP_DIR = "C:\\Windows\\Temp\\nvcc_inter_files_tmp_dir"
supported_cuda_compute_capabilities = [ "3.0", "6.0" ]
def Log(s):
print('gpus/crosstool: {0}'.format(s))
def GetOptionValue(argv, option):
"""Extract the list of values for option from options.
Args:
option: The option whose value to extract, without the leading '/'.
Returns:
1. A list of values, either directly following the option,
(eg., /opt val1 val2) or values collected from multiple occurrences of
the option (eg., /opt val1 /opt val2).
2. The leftover options.
"""
parser = ArgumentParser(prefix_chars='/')
parser.add_argument('/' + option, nargs='*', action='append')
args, leftover = parser.parse_known_args(argv)
if args and vars(args)[option]:
return (sum(vars(args)[option], []), leftover)
return ([], leftover)
def _update_options(nvcc_options):
if NVCC_VERSION in ("7.0",):
return nvcc_options
update_options = { "relaxed-constexpr" : "expt-relaxed-constexpr" }
return [ update_options[opt] if opt in update_options else opt
for opt in nvcc_options ]
def GetNvccOptions(argv):
"""Collect the -nvcc_options values from argv.
Args:
argv: A list of strings, possibly the argv passed to main().
Returns:
1. The string that can be passed directly to nvcc.
2. The leftover options.
"""
parser = ArgumentParser()
parser.add_argument('-nvcc_options', nargs='*', action='append')
args, leftover = parser.parse_known_args(argv)
if args.nvcc_options:
options = _update_options(sum(args.nvcc_options, []))
return (['--' + a for a in options], leftover)
return ([], leftover)
def InvokeNvcc(argv, log=False):
"""Call nvcc with arguments assembled from argv.
Args:
argv: A list of strings, possibly the argv passed to main().
log: True if logging is requested.
Returns:
The return value of calling os.system('nvcc ' + args)
"""
src_files = [f for f in argv if
re.search('\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)]
if len(src_files) == 0:
raise Error('No source files found for cuda compilation.')
out_file = [ f for f in argv if f.startswith('/Fo') ]
if len(out_file) != 1:
raise Error('Please sepecify exactly one output file for cuda compilation.')
out = ['-o', out_file[0][len('/Fo'):]]
nvcc_compiler_options, argv = GetNvccOptions(argv)
opt_option, argv = GetOptionValue(argv, 'O')
opt = ['-g', '-G']
if (len(opt_option) > 0 and opt_option[0] != 'd'):
opt = ['-O2']
include_options, argv = GetOptionValue(argv, 'I')
includes = ["-I " + include for include in include_options]
defines, argv = GetOptionValue(argv, 'D')
defines = ['-D' + define for define in defines]
undefines, argv = GetOptionValue(argv, 'U')
undefines = ['-U' + define for define in undefines]
# The rest of the unrecongized options should be passed to host compiler
host_compiler_options = [option for option in argv if option not in (src_files + out_file)]
m_options = ["-m64"]
nvccopts = ['-D_FORCE_INLINES']
for capability in supported_cuda_compute_capabilities:
capability = capability.replace('.', '')
nvccopts += [r'-gencode=arch=compute_%s,"code=sm_%s,compute_%s"' % (
capability, capability, capability)]
nvccopts += nvcc_compiler_options
nvccopts += undefines
nvccopts += defines
nvccopts += m_options
nvccopts += ['--compiler-options="' + " ".join(host_compiler_options) + '"']
nvccopts += ['-x', 'cu'] + opt + includes + out + ['-c'] + src_files
# If we don't specify --keep-dir, nvcc will generate intermediate files under TEMP
# Put them under NVCC_TEMP_DIR instead, then Bazel can ignore files under NVCC_TEMP_DIR during dependency check
# http://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#options-for-guiding-compiler-driver
# Different actions are sharing NVCC_TEMP_DIR, so we cannot remove it if the directory already exists.
if os.path.isfile(NVCC_TEMP_DIR):
os.remove(NVCC_TEMP_DIR)
if not os.path.exists(NVCC_TEMP_DIR):
os.makedirs(NVCC_TEMP_DIR)
nvccopts += ['--keep', '--keep-dir', NVCC_TEMP_DIR]
cmd = [NVCC_PATH] + nvccopts
if log:
Log(cmd)
proc = subprocess.Popen(cmd,
stdout=sys.stdout,
stderr=sys.stderr,
env=os.environ.copy(),
shell=True)
proc.wait()
return proc.returncode
def main():
parser = ArgumentParser()
parser.add_argument('-x', nargs=1)
parser.add_argument('--cuda_log', action='store_true')
args, leftover = parser.parse_known_args(sys.argv[1:])
if args.x and args.x[0] == 'cuda':
if args.cuda_log: Log('-x cuda')
leftover = [pipes.quote(s) for s in leftover]
if args.cuda_log: Log('using nvcc')
return InvokeNvcc(leftover, log=args.cuda_log)
# Strip our flags before passing through to the CPU compiler for files which
# are not -x cuda. We can't just pass 'leftover' because it also strips -x.
# We not only want to pass -x to the CPU compiler, but also keep it in its
# relative location in the argv list (the compiler is actually sensitive to
# this).
cpu_compiler_flags = [flag for flag in sys.argv[1:]
if not flag.startswith(('--cuda_log'))
and not flag.startswith(('-nvcc_options'))]
return subprocess.call([CPU_COMPILER] + cpu_compiler_flags)
if __name__ == '__main__':
sys.exit(main())
| tensorflow-master | third_party/toolchains/preconfig/ubuntu14.04/gcc-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py |
#!/usr/bin/env python
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Crosstool wrapper for compiling CUDA programs with nvcc on Windows.
DESCRIPTION:
This script is the Windows version of //third_party/gpus/crosstool/crosstool_wrapper_is_not_gcc
"""
from __future__ import print_function
from argparse import ArgumentParser
import os
import subprocess
import re
import sys
import pipes
# Template values set by cuda_autoconf.
CPU_COMPILER = ('/opt/rh/devtoolset-7/root/usr/bin/gcc')
GCC_HOST_COMPILER_PATH = ('/opt/rh/devtoolset-7/root/usr/bin/gcc')
NVCC_PATH = '/usr/local/cuda-10.0/bin/nvcc'
NVCC_VERSION = '10.0'
NVCC_TEMP_DIR = "C:\\Windows\\Temp\\nvcc_inter_files_tmp_dir"
supported_cuda_compute_capabilities = [ "3.0", "6.0" ]
def Log(s):
print('gpus/crosstool: {0}'.format(s))
def GetOptionValue(argv, option):
"""Extract the list of values for option from options.
Args:
option: The option whose value to extract, without the leading '/'.
Returns:
1. A list of values, either directly following the option,
(eg., /opt val1 val2) or values collected from multiple occurrences of
the option (eg., /opt val1 /opt val2).
2. The leftover options.
"""
parser = ArgumentParser(prefix_chars='/')
parser.add_argument('/' + option, nargs='*', action='append')
args, leftover = parser.parse_known_args(argv)
if args and vars(args)[option]:
return (sum(vars(args)[option], []), leftover)
return ([], leftover)
def _update_options(nvcc_options):
if NVCC_VERSION in ("7.0",):
return nvcc_options
update_options = { "relaxed-constexpr" : "expt-relaxed-constexpr" }
return [ update_options[opt] if opt in update_options else opt
for opt in nvcc_options ]
def GetNvccOptions(argv):
"""Collect the -nvcc_options values from argv.
Args:
argv: A list of strings, possibly the argv passed to main().
Returns:
1. The string that can be passed directly to nvcc.
2. The leftover options.
"""
parser = ArgumentParser()
parser.add_argument('-nvcc_options', nargs='*', action='append')
args, leftover = parser.parse_known_args(argv)
if args.nvcc_options:
options = _update_options(sum(args.nvcc_options, []))
return (['--' + a for a in options], leftover)
return ([], leftover)
def InvokeNvcc(argv, log=False):
"""Call nvcc with arguments assembled from argv.
Args:
argv: A list of strings, possibly the argv passed to main().
log: True if logging is requested.
Returns:
The return value of calling os.system('nvcc ' + args)
"""
src_files = [f for f in argv if
re.search('\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)]
if len(src_files) == 0:
raise Error('No source files found for cuda compilation.')
out_file = [ f for f in argv if f.startswith('/Fo') ]
if len(out_file) != 1:
raise Error('Please sepecify exactly one output file for cuda compilation.')
out = ['-o', out_file[0][len('/Fo'):]]
nvcc_compiler_options, argv = GetNvccOptions(argv)
opt_option, argv = GetOptionValue(argv, 'O')
opt = ['-g', '-G']
if (len(opt_option) > 0 and opt_option[0] != 'd'):
opt = ['-O2']
include_options, argv = GetOptionValue(argv, 'I')
includes = ["-I " + include for include in include_options]
defines, argv = GetOptionValue(argv, 'D')
defines = ['-D' + define for define in defines]
undefines, argv = GetOptionValue(argv, 'U')
undefines = ['-U' + define for define in undefines]
# The rest of the unrecongized options should be passed to host compiler
host_compiler_options = [option for option in argv if option not in (src_files + out_file)]
m_options = ["-m64"]
nvccopts = ['-D_FORCE_INLINES']
for capability in supported_cuda_compute_capabilities:
capability = capability.replace('.', '')
nvccopts += [r'-gencode=arch=compute_%s,"code=sm_%s,compute_%s"' % (
capability, capability, capability)]
nvccopts += nvcc_compiler_options
nvccopts += undefines
nvccopts += defines
nvccopts += m_options
nvccopts += ['--compiler-options="' + " ".join(host_compiler_options) + '"']
nvccopts += ['-x', 'cu'] + opt + includes + out + ['-c'] + src_files
# If we don't specify --keep-dir, nvcc will generate intermediate files under TEMP
# Put them under NVCC_TEMP_DIR instead, then Bazel can ignore files under NVCC_TEMP_DIR during dependency check
# http://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#options-for-guiding-compiler-driver
# Different actions are sharing NVCC_TEMP_DIR, so we cannot remove it if the directory already exists.
if os.path.isfile(NVCC_TEMP_DIR):
os.remove(NVCC_TEMP_DIR)
if not os.path.exists(NVCC_TEMP_DIR):
os.makedirs(NVCC_TEMP_DIR)
nvccopts += ['--keep', '--keep-dir', NVCC_TEMP_DIR]
cmd = [NVCC_PATH] + nvccopts
if log:
Log(cmd)
proc = subprocess.Popen(cmd,
stdout=sys.stdout,
stderr=sys.stderr,
env=os.environ.copy(),
shell=True)
proc.wait()
return proc.returncode
def main():
parser = ArgumentParser()
parser.add_argument('-x', nargs=1)
parser.add_argument('--cuda_log', action='store_true')
args, leftover = parser.parse_known_args(sys.argv[1:])
if args.x and args.x[0] == 'cuda':
if args.cuda_log: Log('-x cuda')
leftover = [pipes.quote(s) for s in leftover]
if args.cuda_log: Log('using nvcc')
return InvokeNvcc(leftover, log=args.cuda_log)
# Strip our flags before passing through to the CPU compiler for files which
# are not -x cuda. We can't just pass 'leftover' because it also strips -x.
# We not only want to pass -x to the CPU compiler, but also keep it in its
# relative location in the argv list (the compiler is actually sensitive to
# this).
cpu_compiler_flags = [flag for flag in sys.argv[1:]
if not flag.startswith(('--cuda_log'))
and not flag.startswith(('-nvcc_options'))]
return subprocess.call([CPU_COMPILER] + cpu_compiler_flags)
if __name__ == '__main__':
sys.exit(main())
| tensorflow-master | third_party/toolchains/preconfig/centos6/gcc7-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.