python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for pip package."""
from setuptools import setup
REQUIRED_PACKAGES = (
"absl-py",
"dataclasses",
"jax",
"networkx",
"numpy",
"ordered-set",
"typing",
)
LONG_DESCRIPTION = "\n".join([
"Kronecker-Factored Approximate Curvature (K-FAC) optimizer implemented in "
"JAX.",
"",
"Accompanying code for 'Better, Faster Fermionic Neural Networks'",
"James S. Spencer, David Pfau, Aleksandar Botev, and W. M. C. Foulkes.",
"https://arxiv.org/abs/2011.07125.",
])
setup(
name="kfac_ferminet_alpha",
version="0.0.1",
description="A K-FAC optimizer implemented in JAX",
long_description=LONG_DESCRIPTION,
url="https://github.com/deepmind/deepmind-research/kfac_ferminet_alpha",
author="DeepMind",
package_dir={"kfac_ferminet_alpha": "."},
packages=["kfac_ferminet_alpha"],
install_requires=REQUIRED_PACKAGES,
platforms=["any"],
license="Apache License, Version 2.0",
)
| deepmind-research-master | kfac_ferminet_alpha/setup.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of running KFAC."""
from absl import app
from absl import flags
import jax
import jax.numpy as jnp
import numpy as np
import kfac_ferminet_alpha as kfac_ferminet_alpha
from kfac_ferminet_alpha import utils
TRAINING_STEPS = flags.DEFINE_integer(
name="training_steps",
default=100,
help="Number of training steps to perform")
BATCH_SIZE = flags.DEFINE_integer(
name="batch_size", default=128, help="Batch size")
LEARNING_RATE = flags.DEFINE_float(
name="learning_rate", default=1e-3, help="Learning rate")
L2_REG = flags.DEFINE_float(
name="l2_reg", default=1e-3, help="L2 regularization coefficient")
MOMENTUM = flags.DEFINE_float(
name="momentum", default=0.8, help="Momentum coefficient")
DAMPING = flags.DEFINE_float(
name="damping", default=1e-2, help="Damping coefficient")
MULTI_DEVICE = flags.DEFINE_bool(
name="multi_device",
default=False,
help="Whether the computation should be replicated across multiple devices")
SEED = flags.DEFINE_integer(name="seed", default=12412321, help="JAX RNG seed")
def glorot_uniform(shape, key):
dim_in = np.prod(shape[:-1])
dim_out = shape[-1]
c = jnp.sqrt(6 / (dim_in + dim_out))
return jax.random.uniform(key, shape=shape, minval=-c, maxval=c)
def fully_connected_layer(params, x):
w, b = params
return jnp.matmul(x, w) + b[None]
def model_init(rng_key, batch, encoder_sizes=(1000, 500, 250, 30)):
"""Initialize the standard autoencoder."""
x_size = batch.shape[-1]
decoder_sizes = encoder_sizes[len(encoder_sizes) - 2::-1]
sizes = (x_size,) + encoder_sizes + decoder_sizes + (x_size,)
keys = jax.random.split(rng_key, len(sizes) - 1)
params = []
for rng_key, dim_in, dim_out in zip(keys, sizes, sizes[1:]):
# Glorot uniform initialization
w = glorot_uniform((dim_in, dim_out), rng_key)
b = jnp.zeros([dim_out])
params.append((w, b))
return params, None
def model_loss(params, inputs, l2_reg):
"""Evaluate the standard autoencoder."""
h = inputs.reshape([inputs.shape[0], -1])
for i, layer_params in enumerate(params):
h = fully_connected_layer(layer_params, h)
# Last layer does not have a nonlinearity
if i % 4 != 3:
h = jnp.tanh(h)
l2_value = 0.5 * sum(jnp.square(p).sum() for p in jax.tree_leaves(params))
error = jax.nn.sigmoid(h) - inputs.reshape([inputs.shape[0], -1])
mean_squared_error = jnp.mean(jnp.sum(error * error, axis=1), axis=0)
regularized_loss = mean_squared_error + l2_reg * l2_value
return regularized_loss, dict(mean_squared_error=mean_squared_error)
def random_data(multi_device, batch_shape, rng):
if multi_device:
shape = (multi_device,) + tuple(batch_shape)
else:
shape = tuple(batch_shape)
while True:
rng, key = jax.random.split(rng)
yield jax.random.normal(key, shape)
def main(argv):
del argv # Unused.
learning_rate = jnp.asarray([LEARNING_RATE.value])
momentum = jnp.asarray([MOMENTUM.value])
damping = jnp.asarray([DAMPING.value])
# RNG keys
global_step = jnp.zeros([])
rng = jax.random.PRNGKey(SEED.value)
params_key, opt_key, step_key, data_key = jax.random.split(rng, 4)
dataset = random_data(MULTI_DEVICE.value, (BATCH_SIZE.value, 20), data_key)
example_batch = next(dataset)
if MULTI_DEVICE.value:
global_step = utils.replicate_all_local_devices(global_step)
learning_rate = utils.replicate_all_local_devices(learning_rate)
momentum = utils.replicate_all_local_devices(momentum)
damping = utils.replicate_all_local_devices(damping)
params_key, opt_key = utils.replicate_all_local_devices(
(params_key, opt_key))
step_key = utils.make_different_rng_key_on_all_devices(step_key)
split_key = jax.pmap(lambda x: tuple(jax.random.split(x)))
jit_init_parameters_func = jax.pmap(model_init)
else:
split_key = jax.random.split
jit_init_parameters_func = jax.jit(model_init)
# Initialize or load parameters
params, func_state = jit_init_parameters_func(params_key, example_batch)
# Make optimizer
optim = kfac_ferminet_alpha.Optimizer(
value_and_grad_func=jax.value_and_grad(
lambda p, x: model_loss(p, x, L2_REG.value), has_aux=True),
l2_reg=L2_REG.value,
value_func_has_aux=True,
value_func_has_state=False,
value_func_has_rng=False,
learning_rate_schedule=None,
momentum_schedule=None,
damping_schedule=None,
norm_constraint=1.0,
num_burnin_steps=10,
)
# Initialize optimizer
opt_state = optim.init(params, opt_key, example_batch, func_state)
for t in range(TRAINING_STEPS.value):
step_key, key_t = split_key(step_key)
params, opt_state, stats = optim.step(
params,
opt_state,
key_t,
dataset,
learning_rate=learning_rate,
momentum=momentum,
damping=damping)
global_step = global_step + 1
# Log any of the statistics
print(f"iteration: {t}")
print(f"mini-batch loss = {stats['loss']}")
if "aux" in stats:
for k, v in stats["aux"].items():
print(f"{k} = {v}")
print("----")
if __name__ == "__main__":
app.run(main)
| deepmind-research-master | kfac_ferminet_alpha/example.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities related to multi-device operations."""
import collections
from typing import Any, Mapping, Optional, Sequence, Tuple, TypeVar, Union
import dataclasses
import jax
from jax import core
from jax import lax
import jax.numpy as jnp
from jax.scipy import linalg
import jax.tree_util as tree_util
T = TypeVar("T")
def wrap_if_pmap(p_func):
def p_func_if_pmap(obj, axis_name):
try:
core.axis_frame(axis_name)
return p_func(obj, axis_name)
except NameError:
return obj
return p_func_if_pmap
pmean_if_pmap = wrap_if_pmap(lax.pmean)
psum_if_pmap = wrap_if_pmap(lax.psum)
compute_mean = jax.pmap(lambda x: lax.pmean(x, "i"), axis_name="i")
compute_sum = jax.pmap(lambda x: lax.psum(x, "i"), axis_name="i")
def get_first(obj: T) -> T:
return jax.tree_map(lambda x: x[0], obj)
def get_mean(obj: T) -> T:
return get_first(compute_mean(obj))
def get_sum(obj: T) -> T:
return get_first(compute_sum(obj))
broadcast_all_local_devices = jax.pmap(lambda x: x)
def replicate_all_local_devices(obj: T) -> T:
n = jax.local_device_count()
obj_stacked = jax.tree_map(lambda x: jnp.stack([x] * n, axis=0), obj)
return broadcast_all_local_devices(obj_stacked)
def make_different_rng_key_on_all_devices(rng: jnp.ndarray) -> jnp.ndarray:
rng = jax.random.fold_in(rng, jax.host_id())
rng = jax.random.split(rng, jax.local_device_count())
return broadcast_all_local_devices(rng)
p_split = jax.pmap(lambda key: tuple(jax.random.split(key)))
def scalar_mul(obj: T, scalar: Union[float, jnp.ndarray]) -> T:
return jax.tree_map(lambda x: x * scalar, obj)
def scalar_div(obj: T, scalar: Union[float, jnp.ndarray]) -> T:
return jax.tree_map(lambda x: x / scalar, obj)
def make_func_args(params, func_state, rng, batch, has_state: bool,
has_rng: bool):
"""Correctly puts all arguments to the function together."""
func_args = (params,)
if has_state:
if func_state is None:
raise ValueError("The `func_state` is None, but the argument `has_state` "
"is True.")
func_args += (func_state,)
if has_rng:
if rng is None:
raise ValueError("The `rng` is None, but the argument `has_rng` is True.")
func_args += (rng,)
func_args += (batch,)
return func_args
def extract_func_outputs(
raw_outputs: Any,
has_aux: bool,
has_state: bool,
) -> Tuple[jnp.ndarray, Any, Any]:
"""Given the function output returns separately the loss, func_state, aux."""
if not has_aux and not has_state:
return raw_outputs, None, None
loss, other = raw_outputs
if has_aux and has_state:
func_state, aux = other
elif has_aux:
func_state, aux = None, other
else:
func_state, aux = other, None
return loss, func_state, aux
def inner_product(obj1: T, obj2: T) -> jnp.ndarray:
if jax.tree_structure(obj1) != jax.tree_structure(obj2):
raise ValueError("The two structures are not identical.")
elements_product = jax.tree_map(lambda x, y: jnp.sum(x * y), obj1, obj2)
return sum(jax.tree_flatten(elements_product)[0])
def psd_inv_cholesky(matrix: jnp.ndarray, damping: jnp.ndarray) -> jnp.ndarray:
assert matrix.ndim == 2
identity = jnp.eye(matrix.shape[0])
matrix = matrix + damping * identity
return linalg.solve(matrix, identity, sym_pos=True)
def solve_maybe_small(a: jnp.ndarray, b: jnp.ndarray) -> jnp.ndarray:
"""Computes a^-1 b more efficiently for small matrices."""
assert a.shape[-1] == a.shape[-2] == b.shape[-1]
d = a.shape[-1]
if d == 0:
return a
elif d == 1:
return b / a[..., 0]
elif d == 2:
det = a[..., 0, 0] * a[..., 1, 1] - a[..., 0, 1] * a[..., 1, 0]
b_0 = a[..., 1, 1] * b[..., 0] - a[..., 0, 1] * b[..., 1]
b_1 = a[..., 0, 0] * b[..., 1] - a[..., 1, 0] * b[..., 0]
return jnp.stack([b_0, b_1], axis=-1) / det
elif d == 3:
raise NotImplementedError()
return jnp.linalg.solve(a, b)
def pi_adjusted_inverse(
factor_0: jnp.ndarray,
factor_1: jnp.ndarray,
damping: jnp.ndarray,
pmap_axis_name: str,
) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Performs inversion with pi-adjusted damping."""
# Compute the norms of each factor
norm_0 = jnp.trace(factor_0)
norm_1 = jnp.trace(factor_1)
# We need to sync the norms here, because reduction can be non-deterministic.
# They specifically are on GPUs by default for better performance.
# Hence although factor_0 and factor_1 are synced, the trace operation above
# can still produce different answers on different devices.
norm_0, norm_1 = pmean_if_pmap((norm_0, norm_1), axis_name=pmap_axis_name)
# Compute the overall scale
scale = norm_0 * norm_1
def regular_inverse(
operand: Sequence[jnp.ndarray]) -> Tuple[jnp.ndarray, jnp.ndarray]:
factor0, factor1, norm0, norm1, s, d = operand
# Special cases with one or two scalar factors
if factor0.size == 1 and factor1.size == 1:
value = jnp.ones_like(factor0) / jnp.sqrt(s)
return value, value
if factor0.size == 1:
factor1_normed = factor1 / norm1
damping1 = d / norm1
factor1_inv = psd_inv_cholesky(factor1_normed, damping1)
return jnp.full((1, 1), s), factor1_inv
if factor1.size == 1:
factor0_normed = factor0 / norm0
damping0 = d / norm0
factor0_inv = psd_inv_cholesky(factor0_normed, damping0)
return factor0_inv, jnp.full((1, 1), s)
# Invert first factor
factor0_normed = factor0 / norm0
damping0 = jnp.sqrt(d * factor1.shape[0] / (s * factor0.shape[0]))
factor0_inv = psd_inv_cholesky(factor0_normed, damping0) / jnp.sqrt(s)
# Invert second factor
factor1_normed = factor1 / norm1
damping1 = jnp.sqrt(d * factor0.shape[0] / (s * factor1.shape[0]))
factor1_inv = psd_inv_cholesky(factor1_normed, damping1) / jnp.sqrt(s)
return factor0_inv, factor1_inv
def zero_inverse(
operand: Sequence[jnp.ndarray]) -> Tuple[jnp.ndarray, jnp.ndarray]:
return (jnp.eye(factor_0.shape[0]) / jnp.sqrt(operand[-1]),
jnp.eye(factor_1.shape[0]) / jnp.sqrt(operand[-1]))
# In the special case where for some reason one of the factors is zero, then
# the correct inverse of `(0 kron A + lambda I)` is
# `(I/sqrt(lambda) kron (I/sqrt(lambda)`. However, because one of the norms is
# zero, then `pi` and `1/pi` would be 0 and infinity leading to NaN values.
# Hence, we need to make this check explicitly.
return lax.cond(
jnp.greater(scale, 0.0),
regular_inverse,
zero_inverse,
operand=(factor_0, factor_1, norm_0, norm_1, scale, damping))
def convert_value_and_grad_to_value_func(
value_and_grad_func,
has_aux: bool = False,
):
"""Converts a value_and_grad function to value_func only."""
def value_func(*args, **kwargs):
out, _ = value_and_grad_func(*args, **kwargs)
if has_aux:
return out[0]
else:
return out
return value_func
def check_structure_shapes_and_dtype(obj1: T, obj2: T) -> None:
"""Verifies that the two objects have the same pytree structure."""
assert jax.tree_structure(obj1) == jax.tree_structure(obj2)
for v1, v2 in zip(jax.tree_flatten(obj1)[0], jax.tree_flatten(obj2)[0]):
assert v1.shape == v2.shape
assert v1.dtype == v2.dtype
def check_first_dim_is_batch_size(batch_size: int, *args: jnp.ndarray) -> None:
for i, arg in enumerate(args):
if arg.shape[0] != batch_size:
raise ValueError(f"Expecting first dimension of arg[{i}] with shape "
f"{arg.shape} to be equal to the batch size "
f"{batch_size}.")
def py_tree_registered_dataclass(cls, *args, **kwargs):
"""Creates a new dataclass type and registers it as a pytree node."""
dcls = dataclasses.dataclass(cls, *args, **kwargs)
tree_util.register_pytree_node(
dcls,
lambda instance: ( # pylint: disable=g-long-lambda
[getattr(instance, f.name)
for f in dataclasses.fields(instance)], None),
lambda _, instance_args: dcls(*instance_args))
return dcls
class WeightedMovingAverage:
"""A wrapped class for a variable for which we keep exponential moving average."""
def __init__(self, weight: jnp.ndarray, array: jnp.ndarray):
self._weight = weight
self._array = array
@staticmethod
def zero(shape: Sequence[int]) -> "WeightedMovingAverage":
return WeightedMovingAverage(weight=jnp.zeros([]), array=jnp.zeros(shape))
@property
def weight(self) -> jnp.ndarray:
return self._weight
@property
def value(self) -> jnp.ndarray:
return self._array / self._weight
@property
def raw_value(self) -> jnp.ndarray:
return self._array
def update(self, value: jnp.ndarray, old_weight_multiplier: float,
new_weight: float) -> None:
self._weight = old_weight_multiplier * self._weight + new_weight
self._array = old_weight_multiplier * self._array + new_weight * value
def sync(self, pmap_axis_name: str) -> None:
self._array = pmean_if_pmap(self._array, pmap_axis_name)
def __str__(self) -> str:
return (f"ExponentialMovingAverage(weight={self._weight}, "
f"array={self._array})")
def __repr__(self) -> str:
return self.__str__()
tree_util.register_pytree_node(
WeightedMovingAverage,
lambda instance: ((instance.weight, instance.raw_value), None),
lambda _, instance_args: WeightedMovingAverage(*instance_args),
)
class Stateful:
"""A class for stateful objects."""
def __init__(self, stateful_fields_names: Optional[Sequence[str]] = ()):
self.__stateful_fields_names = stateful_fields_names
def _add_stateful_fields_names(self, value: Sequence[str]) -> None:
self.__stateful_fields_names += tuple(value)
def get_state(self) -> Mapping[str, Any]:
"""Returns the state of the object."""
state = dict()
for name in self.__stateful_fields_names:
state[name] = Stateful._get_state_from_instance(getattr(self, name))
return state
def set_state(self, value):
"""Sets the state of the object with the provided value and returns the object."""
assert isinstance(value, dict)
for name in self.__stateful_fields_names:
setattr(self, name,
Stateful._set_state_to_instance(getattr(self, name), value[name]))
return self
def clear_state(self) -> None:
"""Clears the state of the object."""
for name in self.__stateful_fields_names:
setattr(self, name,
Stateful._clear_state_from_instance(getattr(self, name)))
def pop_state(self) -> Mapping[str, Any]:
"""Returns the current state of the object, while simultaneously clearing it."""
state = self.get_state()
self.clear_state()
return state
@staticmethod
def _get_state_from_instance(obj):
"""Recursively gets the state of the object and returns it."""
if isinstance(obj, Stateful):
return obj.get_state()
if isinstance(obj, list):
return [Stateful._get_state_from_instance(i) for i in obj]
if isinstance(obj, tuple):
return tuple(Stateful._get_state_from_instance(i) for i in obj)
if isinstance(obj, collections.OrderedDict):
return collections.OrderedDict(
(k, Stateful._get_state_from_instance(v)) for k, v in obj.items())
if isinstance(obj, dict):
return dict(
(k, Stateful._get_state_from_instance(v)) for k, v in obj.items())
return obj
@staticmethod
def _set_state_to_instance(obj, value):
"""Recursively sets the state of the object and returns it."""
if isinstance(obj, Stateful):
obj.set_state(value)
return obj
if isinstance(value, list):
if obj is None:
obj = [None] * len(value)
return [
Stateful._set_state_to_instance(obj_i, value_i)
for obj_i, value_i in zip(obj, value)
]
if isinstance(value, tuple):
if obj is None:
obj = [None] * len(value)
return tuple(
Stateful._set_state_to_instance(obj_i, value_i)
for obj_i, value_i in zip(obj, value))
if isinstance(value, collections.OrderedDict):
if obj is None:
obj = dict((k, None) for k in value)
return collections.OrderedDict(
(k, Stateful._set_state_to_instance(obj[k], value[k])) for k in obj)
if isinstance(value, dict):
obj = dict((k, None) for k in value)
return dict(
(k, Stateful._set_state_to_instance(obj[k], value[k])) for k in obj)
return value
@staticmethod
def _clear_state_from_instance(obj):
"""Recursively clears the state of the object and returns it."""
if isinstance(obj, Stateful):
obj.clear_state()
return obj
if isinstance(obj, list):
return [Stateful._clear_state_from_instance(obj_i) for obj_i in obj]
if isinstance(obj, tuple):
return tuple(Stateful._clear_state_from_instance(obj_i) for obj_i in obj)
if isinstance(obj, collections.OrderedDict):
return collections.OrderedDict(
(k, Stateful._clear_state_from_instance(obj[k])) for k in obj)
if isinstance(obj, dict):
return dict((k, Stateful._clear_state_from_instance(obj[k])) for k in obj)
return None
@staticmethod
def infer_class_state(class_type):
"""Infers a stateful class state attributes from class annotations."""
if not issubclass(class_type, Stateful):
raise ValueError(
f"In order to annotate a class as stateful it must inherit "
f"{Stateful!r}")
class_type = dataclasses.dataclass(
class_type, init=False, repr=False, eq=False) # pytype: disable=wrong-keyword-args
fields_names = tuple(field.name for field in dataclasses.fields(class_type))
original_init = getattr(class_type, "__init__", None)
if original_init is None:
def injected_init(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs) # pylint: disable=bad-super-call
Stateful._add_stateful_fields_names(self, fields_names)
for field_name in fields_names:
if getattr(self, field_name, None) is None:
setattr(self, field_name, None)
setattr(class_type, "__init__", injected_init)
else:
def injected_init(self, *args, **kwargs):
original_init(self, *args, **kwargs)
Stateful._add_stateful_fields_names(self, fields_names)
for field_name in fields_names:
if getattr(self, field_name, None) is None:
setattr(self, field_name, None)
setattr(class_type, "__init__", injected_init)
return class_type
def compute_sq_norm_relative_abs_diff(obj, pmap_axis_name):
sq_norm = inner_product(obj, obj)
synced_sq_norm = psum_if_pmap(sq_norm, pmap_axis_name)
synced_sq_norm = (synced_sq_norm - sq_norm) / (jax.device_count() - 1.0)
sq_norm_abs_diff = jnp.abs(sq_norm - synced_sq_norm)
return sq_norm_abs_diff / sq_norm
def product(iterable_object):
x = 1
for element in iterable_object:
x *= element
return x
| deepmind-research-master | kfac_ferminet_alpha/utils.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for the main curvature optimizer class."""
from typing import Any, Callable, Iterator, Mapping, Optional, Sequence, Tuple, Union
import jax
import jax.lax as lax
import jax.numpy as jnp
import jax.random as jnr
from kfac_ferminet_alpha import estimator
from kfac_ferminet_alpha import tag_graph_matcher as tgm
from kfac_ferminet_alpha import utils
ScheduleType = Callable[[jnp.ndarray], Optional[jnp.ndarray]]
Parameters = Any
Batch = Any
FuncState = Any
State = Mapping[str, Any]
@utils.Stateful.infer_class_state
class Optimizer(utils.Stateful):
"""The default optimizer class."""
velocities: Parameters
estimator: estimator.CurvatureEstimator
step_counter: jnp.ndarray
def __init__(
self,
value_and_grad_func,
l2_reg: Union[float, jnp.ndarray],
value_func_has_aux: bool = False,
value_func_has_state: bool = False,
value_func_has_rng: bool = False,
learning_rate_schedule: Optional[ScheduleType] = None,
momentum_schedule: Optional[ScheduleType] = None,
damping_schedule: Optional[ScheduleType] = None,
min_damping: Union[float, jnp.ndarray] = 1e-8,
max_damping: Union[float, jnp.ndarray] = jnp.inf,
norm_constraint: Optional[Union[float, jnp.ndarray]] = None,
num_burnin_steps: int = 10,
estimation_mode: str = "fisher_gradients",
curvature_ema: Union[float, jnp.ndarray] = 0.95,
inverse_update_period: int = 5,
register_only_generic: bool = False,
layer_tag_to_block_cls: Optional[estimator.TagMapping] = None,
patterns_to_skip: Sequence[str] = (),
donate_parameters: bool = False,
donate_optimizer_state: bool = False,
donate_batch_inputs: bool = False,
donate_func_state: bool = False,
batch_process_func: Optional[Callable[[Any], Any]] = None,
multi_device: bool = False,
use_jax_cond: bool = True,
debug: bool = False,
pmap_axis_name="kfac_axis",
):
"""Initializes the K-FAC optimizer with the given settings.
Args:
value_and_grad_func: Python callable. The function should return the value
of the loss to be optimized and its gradients. If the argument
`value_func_has_aux` is `False` then the interface should be: loss,
loss_grads = value_and_grad_func(params, batch)
If `value_func_has_aux` is `True` then the interface should be: (loss,
aux), loss_grads = value_and_grad_func(params, batch)
l2_reg: Scalar. Set this value to tell the optimizer what L2
regularization coefficient you are using (if any). Note the coefficient
appears in the regularizer as coeff / 2 * sum(param**2). Note that the
user is still responsible for adding regularization to the loss.
value_func_has_aux: Boolean. Specifies whether the provided callable
`value_and_grad_func` returns the loss value only, or also some
auxiliary data. (Default: False)
value_func_has_state: Boolean. Specifies whether the provided callable
`value_and_grad_func` has a persistent state that is inputed and
it also outputs an update version of it. (Default: False)
value_func_has_rng: Boolean. Specifies whether the provided callable
`value_and_grad_func` additionally takes as input an rng key.
(Default: False)
learning_rate_schedule: Callable. A schedule for the learning rate. This
should take as input the current step number and return a single
`jnp.ndarray` that represents the learning rate. (Default: None)
momentum_schedule: Callable. A schedule for the momentum. This should take
as input the current step number and return a single `jnp.ndarray`
that represents the momentum. (Default: None)
damping_schedule: Callable. A schedule for the damping. This should take
as input the current step number and return a single `jnp.ndarray`
that represents the learning rate. (Default: None)
min_damping: Scalar. Minimum value the damping parameter can take. Note
that the default value of 1e-8 is quite arbitrary, and you may have to
adjust this up or down for your particular problem. If you are using a
non-zero value of l2_reg you *may* be able to set this to
zero. (Default: 1e-8)
max_damping: Scalar. Maximum value the damping parameter can take.
(Default: Infinity)
norm_constraint: Scalar. If specified, the update is scaled down so that
its approximate squared Fisher norm `v^T F v` is at most the specified
value.(Note that here `F` is the approximate curvature matrix, not the
exact.) (Default: None)
num_burnin_steps: Int. At the start of optimization, e.g. the first step,
before performing the actual step the optimizer will perform this many
times updates to the curvature approximation without updating the
actual parameters. (Default: 10)
estimation_mode: String. The type of estimator to use for the curvature
matrix. Can be one of: * fisher_empirical * fisher_exact *
fisher_gradients * fisher_curvature_prop * ggn_exact *
ggn_curvature_prop See the doc-string for CurvatureEstimator (in
estimator.py) for a more
detailed description of these options. (Default: 'fisher_gradients').
curvature_ema: The decay factor used when calculating the covariance
estimate moving averages. (Default: 0.95)
inverse_update_period: Int. The number of steps in between updating the
the computation of the inverse curvature approximation. (Default: 5)
register_only_generic: Boolean. Whether when running the auto-tagger to
register only generic parameters, or allow it to use the graph matcher
to automatically pick up any kind of layer tags. (Default: False)
layer_tag_to_block_cls: Dictionary. A mapping from layer tags to block
classes which to override the default choices of block approximation for
that specific tag. See the doc-string for CurvatureEstimator (in
estimator.py) for a more detailed description of this.
patterns_to_skip: Tuple. A list of any patterns that should be skipped by
the graph matcher when auto-tagging.
donate_parameters: Boolean. Whether to use jax's `donate_argnums` to
donate the parameter values of each call to `step`. Note that this
implies that you will not be able to access the old parameter values'
buffers after calling into `step`.
donate_optimizer_state: Boolean. Whether to use jax's `donate_argnums` to
donate the optimizer state of each call to `step`. Note that this
implies that you will not be able to access the old optimizer state
values' buffers after calling into `step`.
donate_batch_inputs: Boolean. Whether to use jax's `donate_argnums` to
donate the batch values of each call to `step`. Note that this implies
that you will not be able to access the old batch values' buffers after
calling into `step`.
donate_func_state: Boolean. Whether to use jax's `donate_argnums` to
donate the persistent function state of each call to `step`. Note that
this implies that you will not be able to access the old function state
values' buffers after calling into `step`.
batch_process_func: Callable. A function which to be called on each batch
before feeding to the KFAC on device. This could be useful for specific
device input optimizations.
multi_device: Boolean. Whether to use `pmap` and run the optimizer on
multiple devices. (Default: False)
use_jax_cond: Not used for the moment.
debug: Boolean. If non of the step or init functions would be jitted. Note
that this also overrides `multi_device` and prevents using `pmap`.
(Default: False)
pmap_axis_name: String. The name of the `pmap` axis to use when
`multi_device` is set to True. (Default: curvature_axis)
"""
super().__init__()
self.value_and_grad_func = value_and_grad_func
self.value_func_has_aux = value_func_has_aux
self.value_func_has_state = value_func_has_state
self.value_func_has_rng = value_func_has_rng
self.value_func = utils.convert_value_and_grad_to_value_func(
value_and_grad_func, has_aux=value_func_has_aux)
self.l2_reg = l2_reg
self.learning_rate_schedule = learning_rate_schedule
if momentum_schedule is not None:
def schedule_with_first_step_zero(global_step: jnp.ndarray):
value = momentum_schedule(global_step)
check = jnp.equal(global_step, 0)
return check * jnp.zeros_like(value) + (1 - check) * value
self.momentum_schedule = schedule_with_first_step_zero
else:
self.momentum_schedule = None
self.damping_schedule = damping_schedule
self.min_damping = min_damping
self.max_damping = max_damping
self.norm_constraint = norm_constraint
self.num_burnin_steps = num_burnin_steps
self.estimation_mode = estimation_mode
self.curvature_ema = curvature_ema
self.inverse_update_period = inverse_update_period
self.register_only_generic = register_only_generic
self.layer_tag_to_block_cls = layer_tag_to_block_cls
self.patterns_to_skip = patterns_to_skip
self.donate_parameters = donate_parameters
self.donate_optimizer_state = donate_optimizer_state
self.donate_batch_inputs = donate_batch_inputs
self.donate_func_state = donate_func_state
self.batch_process_func = batch_process_func or (lambda x: x)
self.multi_device = multi_device
self.use_jax_cond = use_jax_cond
self.debug = debug
self.pmap_axis_name = pmap_axis_name if multi_device else None
self._rng_split = utils.p_split if multi_device else jnr.split
# Attributes filled in during self.init()
self.finalized = False
self.tagged_func = None
self.flat_params_shapes = None
self.params_treedef = None
# Special attributes related to jitting/pmap
self._jit_init = None
self._jit_burnin = None
self._jit_step = None
def finalize(
self,
params: Parameters,
rng: jnp.ndarray,
batch: Batch,
func_state: Optional[FuncState] = None,
) -> None:
"""Finalizes the optimizer by tracing the model function with the params and batch."""
if self.finalized:
raise ValueError("Optimizer has already been finalized.")
if self.multi_device:
# We assume that the parameters and batch are replicated, while tracing
# must happen with parameters for a single device call
params, rng, batch = jax.tree_map(lambda x: x[0], (params, rng, batch))
if func_state is not None:
func_state = jax.tree_map(lambda x: x[0], func_state)
batch = self.batch_process_func(batch)
# These are all tracing operations and we can run them with abstract values
func_args = utils.make_func_args(params, func_state, rng, batch,
self.value_func_has_state,
self.value_func_has_rng)
# Run all tracing with abstract values so no computation is done
flat_params, self.params_treedef = jax.tree_flatten(params)
self.flat_params_shapes = tuple(p.shape for p in flat_params)
self.tagged_func = tgm.auto_register_tags(
func=self.value_func,
func_args=func_args,
params_index=0,
register_only_generic=self.register_only_generic,
patterns_to_skip=self.patterns_to_skip)
self.estimator = estimator.CurvatureEstimator(
self.tagged_func,
func_args,
self.l2_reg,
self.estimation_mode,
layer_tag_to_block_cls=self.layer_tag_to_block_cls)
# Arguments: params, opt_state, rng, batch, func_state
donate_argnums = []
if self.donate_parameters:
donate_argnums.append(0)
if self.donate_optimizer_state:
donate_argnums.append(1)
if self.donate_batch_inputs:
donate_argnums.append(3)
if self.donate_func_state and self.value_func_has_state:
donate_argnums.append(4)
donate_argnums = tuple(donate_argnums)
if self.debug:
self._jit_init = self._init
self._jit_burnin = self._burnin
self._jit_step = self._step
elif self.multi_device:
self._jit_init = jax.pmap(
self._init, axis_name=self.pmap_axis_name, donate_argnums=[0])
# batch size is static argnum and is at index 5
self._jit_burnin = jax.pmap(
self._burnin,
axis_name=self.pmap_axis_name,
static_broadcasted_argnums=[5])
self._jit_step = jax.pmap(
self._step,
axis_name=self.pmap_axis_name,
donate_argnums=donate_argnums,
static_broadcasted_argnums=[5])
else:
self._jit_init = jax.jit(self._init, donate_argnums=[0])
# batch size is static argnum and is at index 5
self._jit_burnin = jax.jit(self._burnin, static_argnums=[5])
self._jit_step = jax.jit(
self._step, donate_argnums=donate_argnums, static_argnums=[5])
self.finalized = True
def _init(self, rng: jnp.ndarray) -> State:
"""This is the non-jitted version of initializing the state."""
flat_velocities = [jnp.zeros(shape) for shape in self.flat_params_shapes]
return dict(
velocities=jax.tree_unflatten(self.params_treedef, flat_velocities),
estimator=self.estimator.init(rng, None),
step_counter=jnp.asarray(0))
def verify_args_and_get_step_counter(
self,
params: Parameters,
state: State,
rng: jnp.ndarray,
data_iterator: Iterator[Batch],
func_state: Optional[FuncState] = None,
learning_rate: Optional[jnp.ndarray] = None,
momentum: Optional[jnp.ndarray] = None,
damping: Optional[jnp.ndarray] = None,
global_step_int: Optional[int] = None,
) -> int:
"""Verifies that the arguments passed to `Optimizer.step` are correct."""
if not self.finalized:
rng, rng_finalize = self._rng_split(rng)
self.finalize(params, rng_finalize, next(data_iterator), func_state)
# Verify correct arguments invocation
if self.learning_rate_schedule is not None and learning_rate is not None:
raise ValueError("When you have passed a `learning_rate_schedule` you "
"should not pass a value to the step function.")
if self.momentum_schedule is not None and momentum is not None:
raise ValueError("When you have passed a `momentum_schedule` you should "
"not pass a value to the step function.")
if self.damping_schedule is not None and damping is not None:
raise ValueError("When you have passed a `damping_schedule` you should "
"not pass a value to the step function.")
# Do a bunrnin on the first iteration
if global_step_int is None:
if self.multi_device:
return int(utils.get_first(state["step_counter"]))
else:
return int(state["step_counter"])
return global_step_int
def _burnin(
self,
params: Parameters,
state: State,
rng: jnp.ndarray,
batch: Batch,
func_state: Optional[FuncState],
batch_size: Optional[int],
) -> Tuple[State, Optional[FuncState]]:
"""This is the non-jitted version of a single burnin step."""
self.set_state(state)
batch = self.batch_process_func(batch)
rng, func_rng = jnr.split(rng) if self.value_func_has_rng else (rng, None)
func_args = utils.make_func_args(params, func_state, func_rng, batch,
self.value_func_has_state,
self.value_func_has_rng)
# Compute batch size
if batch_size is None:
batch_size = jax.tree_flatten(batch)[0][0].shape[0]
# Update curvature estimate
ema_old, ema_new = 1.0, 1.0 / self.num_burnin_steps
self.estimator.update_curvature_matrix_estimate(ema_old, ema_new,
batch_size, rng, func_args,
self.pmap_axis_name)
if func_state is not None:
out, _ = self.value_and_grad_func(*func_args)
_, func_state, _ = utils.extract_func_outputs(out,
self.value_func_has_aux,
self.value_func_has_state)
return self.pop_state(), func_state
def _step(
self,
params: Parameters,
state: State,
rng: jnp.ndarray,
batch: Batch,
func_state: Optional[FuncState],
batch_size: Optional[int],
learning_rate: Optional[jnp.ndarray],
momentum: Optional[jnp.ndarray],
damping: Optional[jnp.ndarray],
) -> Union[Tuple[Parameters, State, FuncState, Mapping[str, jnp.ndarray]],
Tuple[Parameters, State, Mapping[str, jnp.ndarray]]]:
"""This is the non-jitted version of a single step."""
# Unpack and set the state
self.set_state(state)
if damping is not None:
assert self.estimator.damping is None
self.estimator.damping = damping
else:
assert self.estimator.damping is not None
# Preprocess the batch and construct correctly the function arguments
batch = self.batch_process_func(batch)
rng, func_rng = jnr.split(rng) if self.value_func_has_rng else (rng, None)
func_args = utils.make_func_args(params, func_state, func_rng, batch,
self.value_func_has_state,
self.value_func_has_rng)
# Compute the batch size
if batch_size is None:
batch_size = jax.tree_flatten(batch)[0][0].shape[0]
# Compute schedules if applicable
if self.learning_rate_schedule is not None:
assert learning_rate is None
learning_rate = self.learning_rate_schedule(self.step_counter)
else:
assert learning_rate is not None
if self.momentum_schedule is not None:
assert momentum is None
momentum = self.momentum_schedule(self.step_counter)
else:
assert momentum is not None
if self.damping_schedule is not None:
assert damping is None
damping = self.damping_schedule(self.step_counter)
else:
assert damping is not None
# Compute current loss and gradients
out, grads = self.value_and_grad_func(*func_args)
loss, new_func_state, aux = utils.extract_func_outputs(
out, self.value_func_has_aux, self.value_func_has_state)
# Sync loss and grads
loss, grads = utils.pmean_if_pmap((loss, grads), self.pmap_axis_name)
# Update curvature estimate
self.estimator.update_curvature_matrix_estimate(
self.curvature_ema,
1.0,
batch_size,
rng,
func_args,
self.pmap_axis_name,
)
# Optionally update the inverse estimate
self.estimator.set_state(
lax.cond(
self.step_counter % self.inverse_update_period == 0,
lambda s: self.estimator.update_curvature_estimate_inverse( # pylint: disable=g-long-lambda
self.pmap_axis_name, s),
lambda s: s,
self.estimator.pop_state()))
# Compute proposed directions
vectors = self.propose_directions(
grads,
self.velocities,
learning_rate,
momentum,
)
# The learning rate is defined as the negative of the coefficient by which
# we multiply the gradients, while the momentum is the coefficient by
# which we multiply the velocities.
neg_learning_rate = -learning_rate # pytype: disable=unsupported-operands # trace-all-classes
# Compute the coefficients of the update vectors
assert neg_learning_rate is not None and momentum is not None
coefficients = (neg_learning_rate, momentum)
# Update velocities and compute new delta
self.velocities, delta = self.velocities_and_delta(
self.velocities,
vectors,
coefficients,
)
# Update parameters: params = params + delta
params = jax.tree_map(jnp.add, params, delta)
# Optionally compute the reduction ratio and update the damping
self.estimator.damping = None
rho = jnp.nan
# Statistics with useful information
stats = dict()
stats["step"] = self.step_counter
stats["loss"] = loss
stats["learning_rate"] = -coefficients[0]
stats["momentum"] = coefficients[1]
stats["damping"] = damping
stats["rho"] = rho
if self.value_func_has_aux:
stats["aux"] = aux
self.step_counter = self.step_counter + 1
if self.value_func_has_state:
return params, self.pop_state(), new_func_state, stats
else:
assert new_func_state is None
return params, self.pop_state(), stats
def init(
self,
params: Parameters,
rng: jnp.ndarray,
batch: Batch,
func_state: Optional[FuncState] = None,
) -> State:
"""Initializes the optimizer and returns the appropriate optimizer state."""
if not self.finalized:
self.finalize(params, rng, batch, func_state)
return self._jit_init(rng)
def step(
self,
params: Parameters,
state: Mapping[str, Any],
rng: jnp.ndarray,
data_iterator: Iterator[Any],
func_state: Any = None,
learning_rate: Optional[jnp.ndarray] = None,
momentum: Optional[jnp.ndarray] = None,
damping: Optional[jnp.ndarray] = None,
batch_size: Optional[int] = None,
global_step_int: Optional[int] = None,
) -> Union[Tuple[Parameters, State, FuncState, Mapping[str, jnp.ndarray]],
Tuple[Parameters, State, Mapping[str, jnp.ndarray]]]:
"""Performs a single update step using the optimizer.
Args:
params: The parameters of the model.
state: The state of the optimizer.
rng: A Jax PRNG key.
data_iterator: An iterator that returns a batch of data.
func_state: Any function state that gets passed in and returned.
learning_rate: This must be provided when
`use_adaptive_learning_rate=False` and `learning_rate_schedule=None`.
momentum: This must be provided when
`use_adaptive_momentum=False` and `momentum_schedule=None`.
damping: This must be provided when
`use_adaptive_damping=False` and `damping_schedule=None`.
batch_size: The batch size to use for KFAC. The default behaviour when it
is None is to use the leading dimension of the first data array.
global_step_int: The global step as a python int. Note that this must
match the step inte rnal to the optimizer that is part of its state.
Returns:
(params, state, stats)
where:
params: The updated model parameters.
state: The updated optimizer state.
stats: A dictionary of key statistics provided to be logged.
"""
step_counter_int = self.verify_args_and_get_step_counter(
params=params,
state=state,
rng=rng,
data_iterator=data_iterator,
func_state=func_state,
learning_rate=learning_rate,
momentum=momentum,
damping=damping,
global_step_int=global_step_int)
if step_counter_int == 0:
for _ in range(self.num_burnin_steps):
rng, rng_burn = self._rng_split(rng)
batch = next(data_iterator)
state, func_state = self._jit_burnin(params, state, rng_burn, batch,
func_state, batch_size)
# On the first step we always treat the momentum as 0.0
if self.momentum_schedule is None:
momentum = jnp.zeros([])
if self.multi_device:
momentum = utils.replicate_all_local_devices(momentum)
batch = next(data_iterator)
return self._jit_step(params, state, rng, batch, func_state, batch_size,
learning_rate, momentum, damping)
def propose_directions(
self,
grads: Parameters,
velocities: Parameters,
learning_rate: Optional[jnp.ndarray],
momentum: Optional[jnp.ndarray],
) -> Tuple[Parameters, Parameters]:
"""Computes the vector proposals for the next step."""
del momentum # not used in this, but could be used in subclasses
preconditioned_grads = self.estimator.multiply_matpower(grads, -1)
if self.norm_constraint is not None:
assert learning_rate is not None
sq_norm_grads = utils.inner_product(preconditioned_grads, grads)
sq_norm_scaled_grads = sq_norm_grads * learning_rate**2
# We need to sync the norms here, because reduction can be
# non-deterministic. They specifically are on GPUs by default for better
# performance. Hence although grads and preconditioned_grads are synced,
# the inner_product operation can still produce different answers on
# different devices.
sq_norm_scaled_grads = utils.pmean_if_pmap(sq_norm_scaled_grads,
self.pmap_axis_name)
max_coefficient = jnp.sqrt(self.norm_constraint / sq_norm_scaled_grads)
coefficient = jnp.minimum(max_coefficient, 1)
preconditioned_grads = utils.scalar_mul(preconditioned_grads, coefficient)
return preconditioned_grads, velocities
def velocities_and_delta(
self,
velocities: Parameters,
vectors: Sequence[Parameters],
coefficients: Sequence[jnp.ndarray],
) -> Sequence[Parameters]:
"""Computes the new velocities and delta (update to parameters)."""
del velocities
assert len(vectors) == len(coefficients)
delta = utils.scalar_mul(vectors[0], coefficients[0])
for vi, wi in zip(vectors[1:], coefficients[1:]):
delta = jax.tree_map(jnp.add, delta, utils.scalar_mul(vi, wi))
return delta, delta
| deepmind-research-master | kfac_ferminet_alpha/optimizer.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the high-level Fisher estimator class."""
import collections
from typing import Any, Callable, Mapping, Optional, Sequence, Union, TypeVar
import jax
import jax.numpy as jnp
import jax.random as jnr
import numpy as np
from kfac_ferminet_alpha import curvature_blocks
from kfac_ferminet_alpha import tracer
from kfac_ferminet_alpha import utils
_CurvatureBlock = curvature_blocks.CurvatureBlock
TagMapping = Mapping[str, curvature_blocks.CurvatureBlockCtor]
BlockVector = Sequence[jnp.ndarray]
_StructureT = TypeVar("_StructureT")
_OptionalStateT = TypeVar("_OptionalStateT", bound=Optional[Mapping[str, Any]])
@utils.Stateful.infer_class_state
class CurvatureEstimator(utils.Stateful):
"""Curvature estimator class supporting various curvature approximations."""
blocks: "collections.OrderedDict[str, _CurvatureBlock]"
damping: Optional[jnp.ndarray]
def __init__(self,
tagged_func: Callable[[Any], jnp.ndarray],
func_args: Sequence[Any],
l2_reg: Union[float, jnp.ndarray],
estimation_mode: str = "fisher_gradients",
params_index: int = 0,
layer_tag_to_block_cls: Optional[TagMapping] = None):
"""Create a FisherEstimator object.
Args:
tagged_func: The function which evaluates the model, in which layer and
loss tags has already been registered.
func_args: Arguments to trace the function for layer and loss tags.
l2_reg: Scalar. The L2 regularization coefficient, which represents
the following regularization function: `coefficient/2 ||theta||^2`.
estimation_mode: The type of curvature estimator to use. One of: *
'fisher_gradients' - the basic estimation approach from the original
K-FAC paper. (Default) * 'fisher_curvature_prop' - method which
estimates the Fisher using self-products of random 1/-1 vectors times
"half-factors" of the
Fisher, as described here: https://arxiv.org/abs/1206.6464 *
'fisher_exact' - is the obvious generalization of Curvature
Propagation to compute the exact Fisher (modulo any additional
diagonal or Kronecker approximations) by looping over one-hot
vectors for each coordinate of the output instead of using 1/-1
vectors. It is more expensive to compute than the other three
options by a factor equal to the output dimension, roughly
speaking. * 'fisher_empirical' - computes the 'empirical' Fisher
information matrix (which uses the data's distribution for the
targets, as opposed to the true Fisher which uses the model's
distribution) and requires that each registered loss have
specified targets. * 'ggn_curvature_prop' - Analogous to
fisher_curvature_prop, but estimates the Generalized
Gauss-Newton matrix (GGN). * 'ggn_exact'- Analogous to
fisher_exact, but estimates the Generalized Gauss-Newton matrix
(GGN).
params_index: The index of the arguments accepted by `func` which
correspond to parameters.
layer_tag_to_block_cls: An optional dict mapping tags to specific classes
of block approximations, which to override the default ones.
"""
if estimation_mode not in ("fisher_gradients", "fisher_empirical",
"fisher_exact", "fisher_curvature_prop",
"ggn_exact", "ggn_curvature_prop"):
raise ValueError(f"Unrecognised estimation_mode={estimation_mode}.")
super().__init__()
self.tagged_func = tagged_func
self.l2_reg = l2_reg
self.estimation_mode = estimation_mode
self.params_index = params_index
self.vjp = tracer.trace_estimator_vjp(self.tagged_func)
# Figure out the mapping from layer
self.layer_tag_to_block_cls = curvature_blocks.copy_default_tag_to_block()
if layer_tag_to_block_cls is None:
layer_tag_to_block_cls = dict()
layer_tag_to_block_cls = dict(**layer_tag_to_block_cls)
self.layer_tag_to_block_cls.update(layer_tag_to_block_cls)
# Create the blocks
self._in_tree = jax.tree_structure(func_args)
self._jaxpr = jax.make_jaxpr(self.tagged_func)(*func_args).jaxpr
self._layer_tags, self._loss_tags = tracer.extract_tags(self._jaxpr)
self.blocks = collections.OrderedDict()
counters = dict()
for eqn in self._layer_tags:
cls = self.layer_tag_to_block_cls[eqn.primitive.name]
c = counters.get(cls.__name__, 0)
self.blocks[cls.__name__ + "_" + str(c)] = cls(eqn)
counters[cls.__name__] = c + 1
@property
def diagonal_weight(self) -> jnp.ndarray:
return self.l2_reg + self.damping
def vectors_to_blocks(
self,
parameter_structured_vector: Any,
) -> Sequence[BlockVector]:
"""Splits the parameters to values for the corresponding blocks."""
in_vars = jax.tree_unflatten(self._in_tree, self._jaxpr.invars)
params_vars = in_vars[self.params_index]
params_vars_flat = jax.tree_flatten(params_vars)[0]
params_values_flat = jax.tree_flatten(parameter_structured_vector)[0]
assert len(params_vars_flat) == len(params_values_flat)
params_dict = dict(zip(params_vars_flat, params_values_flat))
per_block_vectors = []
for eqn in self._layer_tags:
if eqn.primitive.name == "generic_tag":
block_vars = eqn.invars
else:
block_vars = eqn.primitive.split_all_inputs(eqn.invars)[2] # pytype: disable=attribute-error # trace-all-classes
per_block_vectors.append(tuple(params_dict.pop(v) for v in block_vars))
if params_dict:
raise ValueError(f"From the parameters the following structure is not "
f"assigned to any block: {params_dict}. Most likely "
f"this part of the parameters is not part of the graph "
f"reaching the losses.")
return tuple(per_block_vectors)
def blocks_to_vectors(self, per_block_vectors: Sequence[BlockVector]) -> Any:
"""Reverses the function self.vectors_to_blocks."""
in_vars = jax.tree_unflatten(self._in_tree, self._jaxpr.invars)
params_vars = in_vars[self.params_index]
assigned_dict = dict()
for eqn, block_values in zip(self._layer_tags, per_block_vectors):
if eqn.primitive.name == "generic_tag":
block_params = eqn.invars
else:
block_params = eqn.primitive.split_all_inputs(eqn.invars)[2] # pytype: disable=attribute-error # trace-all-classes
assigned_dict.update(zip(block_params, block_values))
params_vars_flat, params_tree = jax.tree_flatten(params_vars)
params_values_flat = [assigned_dict[v] for v in params_vars_flat]
assert len(params_vars_flat) == len(params_values_flat)
return jax.tree_unflatten(params_tree, params_values_flat)
def init(
self,
rng: jnp.ndarray,
init_damping: Optional[jnp.ndarray],
) -> Mapping[str, Any]:
"""Returns an initialized variables for the curvature approximations and the inverses.."""
return dict(
blocks=collections.OrderedDict(
(name, block.init(block_rng)) #
for (name, block), block_rng #
in zip(self.blocks.items(), jnr.split(rng, len(self.blocks)))),
damping=init_damping)
@property
def mat_type(self) -> str:
return self.estimation_mode.split("_")[0]
def vec_block_apply(
self,
func: Callable[[_CurvatureBlock, BlockVector], BlockVector],
parameter_structured_vector: Any,
) -> Any:
"""Executes func for each approximation block on vectors."""
per_block_vectors = self.vectors_to_blocks(parameter_structured_vector)
assert len(per_block_vectors) == len(self.blocks)
results = jax.tree_map(func, tuple(self.blocks.values()),
per_block_vectors)
parameter_structured_result = self.blocks_to_vectors(results)
utils.check_structure_shapes_and_dtype(parameter_structured_vector,
parameter_structured_result)
return parameter_structured_result
def multiply_inverse(self, parameter_structured_vector: Any) -> Any:
"""Multiplies the vectors by the corresponding (damped) inverses of the blocks.
Args:
parameter_structured_vector: Structure equivalent to the parameters of the
model.
Returns:
A structured identical to `vectors` containing the product.
"""
return self.multiply_matpower(parameter_structured_vector, -1)
def multiply(self, parameter_structured_vector: Any) -> Any:
"""Multiplies the vectors by the corresponding (damped) blocks.
Args:
parameter_structured_vector: A vector in the same structure as the
parameters of the model.
Returns:
A structured identical to `vectors` containing the product.
"""
return self.multiply_matpower(parameter_structured_vector, 1)
def multiply_matpower(
self,
parameter_structured_vector: _StructureT,
exp: int,
) -> _StructureT:
"""Multiplies the vectors by the corresponding matrix powers of the blocks.
Args:
parameter_structured_vector: A vector in the same structure as the
parameters of the model.
exp: A float representing the power to raise the blocks by before
multiplying it by the vector.
Returns:
A structured identical to `vectors` containing the product.
"""
def func(block: _CurvatureBlock, vec: BlockVector) -> BlockVector:
return block.multiply_matpower(vec, exp, self.diagonal_weight)
return self.vec_block_apply(func, parameter_structured_vector)
def update_curvature_matrix_estimate(
self,
ema_old: Union[float, jnp.ndarray],
ema_new: Union[float, jnp.ndarray],
batch_size: int,
rng: jnp.ndarray,
func_args: Sequence[Any],
pmap_axis_name: str,
) -> None:
"""Updates the curvature estimate."""
# Compute the losses and the VJP function from the function inputs
losses, losses_vjp = self.vjp(func_args)
# Helper function that updates the blocks given a vjp vector
def _update_blocks(vjp_vec_, ema_old_, ema_new_):
blocks_info_ = losses_vjp(vjp_vec_)
for block_, block_info_ in zip(self.blocks.values(), blocks_info_):
block_.update_curvature_matrix_estimate(
info=block_info_,
batch_size=batch_size,
ema_old=ema_old_,
ema_new=ema_new_,
pmap_axis_name=pmap_axis_name)
if self.estimation_mode == "fisher_gradients":
keys = jnr.split(rng, len(losses)) if len(losses) > 1 else [rng]
vjp_vec = tuple(
loss.grad_of_evaluate_on_sample(key, coefficient_mode="sqrt")
for loss, key in zip(losses, keys))
_update_blocks(vjp_vec, ema_old, ema_new)
elif self.estimation_mode in ("fisher_curvature_prop",
"ggn_curvature_prop"):
keys = jnr.split(rng, len(losses)) if len(losses) > 1 else [rng]
vjp_vec = []
for loss, key in zip(losses, keys):
if self.estimation_mode == "fisher_curvature_prop":
random_b = jnr.bernoulli(key, shape=loss.fisher_factor_inner_shape())
vjp_vec.append(loss.multiply_fisher_factor(random_b * 2.0 - 1.0))
else:
random_b = jnr.bernoulli(key, shape=loss.ggn_factor_inner_shape())
vjp_vec.append(loss.multiply_ggn_factor(random_b * 2.0 - 1.0))
_update_blocks(tuple(vjp_vec), ema_old, ema_new)
elif self.estimation_mode in ("fisher_exact", "ggn_exact"):
# We use the following trick to simulate summation. The equation is:
# estimate = ema_old * estimate + ema_new * (sum_i estimate_index_i)
# weight = ema_old * weight + ema_new
# Instead we update the estimate n times with the following updates:
# for k = 1
# estimate_k = ema_old * estimate + (ema_new/n) * (n*estimate_index_k)
# weight_k = ema_old * weight + (ema_new/n)
# for k > 1:
# estimate_k = 1.0 * estimate_k-1 + (ema_new/n) * (n*estimate_index_k)
# weight_k = 1.0 * weight_k-1 + (ema_new/n)
# Which is mathematically equivalent to the original version.
zero_tangents = jax.tree_map(jnp.zeros_like,
list(loss.inputs for loss in losses))
if self.estimation_mode == "fisher_exact":
num_indices = [
(l, int(np.prod(l.fisher_factor_inner_shape[1:]))) for l in losses
]
else:
num_indices = [
(l, int(np.prod(l.ggn_factor_inner_shape()))) for l in losses
]
total_num_indices = sum(n for _, n in num_indices)
for i, (loss, loss_num_indices) in enumerate(num_indices):
for index in range(loss_num_indices):
vjp_vec = zero_tangents.copy()
if self.estimation_mode == "fisher_exact":
vjp_vec[i] = loss.multiply_fisher_factor_replicated_one_hot([index])
else:
vjp_vec[i] = loss.multiply_ggn_factor_replicated_one_hot([index])
if isinstance(vjp_vec[i], jnp.ndarray):
# In the special case of only one parameter, it still needs to be a
# tuple for the tangents.
vjp_vec[i] = (vjp_vec[i],)
vjp_vec[i] = jax.tree_map(lambda x: x * total_num_indices, vjp_vec[i])
_update_blocks(tuple(vjp_vec), ema_old, ema_new / total_num_indices)
ema_old = 1.0
elif self.estimation_mode == "fisher_empirical":
raise NotImplementedError()
else:
raise ValueError(f"Unrecognised estimation_mode={self.estimation_mode}")
def update_curvature_estimate_inverse(
self,
pmap_axis_name: str,
state: _OptionalStateT,
) -> _OptionalStateT:
if state is not None:
old_state = self.get_state()
self.set_state(state)
for block in self.blocks.values():
block.update_curvature_inverse_estimate(self.diagonal_weight,
pmap_axis_name)
if state is None:
return None
else:
state = self.pop_state()
self.set_state(old_state)
return state
| deepmind-research-master | kfac_ferminet_alpha/estimator.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for all of the different curvature blocks."""
import abc
from typing import Any, Callable, Dict, Mapping, MutableMapping, Optional, Sequence, Union
import jax
from jax import core
import jax.numpy as jnp
from kfac_ferminet_alpha import tag_graph_matcher as tgm
from kfac_ferminet_alpha import utils
_Arrays = Sequence[jnp.ndarray]
_BlockInfo = Mapping[str, Any]
class CurvatureBlock(utils.Stateful, abc.ABC):
"""Top level class."""
def __init__(self, layer_tag_eq: tgm.jax_core.JaxprEqn):
super(CurvatureBlock, self).__init__()
self._layer_tag_eq = layer_tag_eq
@property
def layer_tag_primitive(self) -> tgm.tags.LayerTag:
assert isinstance(self._layer_tag_eq.primitive, tgm.tags.LayerTag)
return self._layer_tag_eq.primitive
@property
def outputs_shapes(self) -> Sequence[Sequence[int]]:
output_vars = self.layer_tag_primitive.split_all_inputs(
self._layer_tag_eq.invars)[0]
return jax.tree_map(lambda x: x.aval.shape, output_vars)
@property
def inputs_shapes(self) -> Sequence[Sequence[int]]:
input_vars = self.layer_tag_primitive.split_all_inputs(
self._layer_tag_eq.invars)[1]
return jax.tree_map(lambda x: x.aval.shape, input_vars)
@property
def params_shapes(self) -> Sequence[Sequence[int]]:
params_vars = self.layer_tag_primitive.split_all_inputs(
self._layer_tag_eq.invars)[2]
return jax.tree_map(lambda x: x.aval.shape, params_vars)
@abc.abstractmethod
def init(self, rng: jnp.ndarray) -> MutableMapping[str, Any]:
"""This initializes/creates all of the arrays for the state of the block.
Usually this would include the arrays used for storing the curvature
approximation, as well as the arrays for storing any approximate
inverses/powers of the curvature block.
Args:
rng: The Jax PRNG key to use if any of the state is supposed to be
initialized randomly.
Returns:
A mutable mapping of the state.
"""
@abc.abstractmethod
def update_curvature_matrix_estimate(
self,
info: _BlockInfo,
batch_size: int,
ema_old: Union[float, jnp.ndarray],
ema_new: Union[float, jnp.ndarray],
pmap_axis_name: str
) -> None:
pass
@abc.abstractmethod
def update_curvature_inverse_estimate(
self,
diagonal_weight: Union[float, jnp.ndarray],
pmap_axis_name: str
) -> None:
pass
@abc.abstractmethod
def multiply_matpower(
self,
vec: _Arrays,
exp: Union[float, int],
diagonal_weight: Union[float, jnp.ndarray]
) -> _Arrays:
pass
CurvatureBlockCtor = Callable[[core.JaxprEqn], CurvatureBlock]
@utils.Stateful.infer_class_state
class NaiveDiagonal(CurvatureBlock):
"""The naively estimated diagonal block."""
diagonal_factor: utils.WeightedMovingAverage
def init(self, rng: jnp.ndarray) -> Dict[str, Any]:
del rng
return dict(
diagonal_factor=utils.WeightedMovingAverage.zero(
self.outputs_shapes[0])
)
def update_curvature_matrix_estimate(
self,
info: _BlockInfo,
batch_size: int,
ema_old: Union[float, jnp.ndarray],
ema_new: Union[float, jnp.ndarray],
pmap_axis_name: str
) -> None:
dw, = info["outputs_tangent"]
diagonal_update = dw * dw / batch_size
self.diagonal_factor.update(diagonal_update, ema_old, ema_new)
self.diagonal_factor.sync(pmap_axis_name)
def update_curvature_inverse_estimate(
self,
diagonal_weight: Union[float, jnp.ndarray],
pmap_axis_name: str
) -> None:
pass
def multiply_matpower(
self,
vec: _Arrays,
exp: Union[float, int],
diagonal_weight: Union[float, jnp.ndarray]
) -> _Arrays:
w, = vec
if exp == 1:
return w * (self.diagonal_factor.value + diagonal_weight),
elif exp == -1:
return w / (self.diagonal_factor.value + diagonal_weight),
else:
raise NotImplementedError()
@utils.Stateful.infer_class_state
class TwoKroneckerFactored(CurvatureBlock, abc.ABC):
"""A factor that is the Kronecker product of two matrices."""
inputs_factor: utils.WeightedMovingAverage
inputs_factor_inverse: jnp.ndarray
outputs_factor: utils.WeightedMovingAverage
outputs_factor_inverse: jnp.ndarray
extra_scale: Optional[Union[int, float, jnp.ndarray]]
@property
def has_bias(self) -> bool:
return len(self._layer_tag_eq.invars) == 4
@abc.abstractmethod
def input_size(self) -> int:
pass
@abc.abstractmethod
def output_size(self) -> int:
pass
def compute_extra_scale(self) -> Optional[Union[int, float, jnp.ndarray]]:
return 1
def init(self, rng: jnp.ndarray) -> Dict[str, Any]:
# The extra scale is technically a constant, but in general it could be
# useful for anyone examining the state to know it explicitly,
# hence we actually keep it as part of the state.
d_in = self.input_size()
d_out = self.output_size()
return dict(
inputs_factor=utils.WeightedMovingAverage.zero([d_in, d_in]),
inputs_factor_inverse=jnp.zeros([d_in, d_in]),
outputs_factor=utils.WeightedMovingAverage.zero([d_out, d_out]),
outputs_factor_inverse=jnp.zeros([d_out, d_out]),
extra_scale=self.compute_extra_scale()
)
def update_curvature_inverse_estimate(
self,
diagonal_weight: Union[float, jnp.ndarray],
pmap_axis_name: str
) -> None:
self.inputs_factor.sync(pmap_axis_name)
self.outputs_factor.sync(pmap_axis_name)
# This computes the approximate inverse factor using the pi-adjusted
# inversion from the original KFAC paper.
# Note that the damping is divided by extra_scale since:
# (s * A kron B + lambda I)^-1 = s^-1 (A kron B + s^-1 * lambda I)^-1
# And the extra division by the scale is included in `multiply_matpower`.
(self.inputs_factor_inverse,
self.outputs_factor_inverse) = utils.pi_adjusted_inverse(
factor_0=self.inputs_factor.value,
factor_1=self.outputs_factor.value,
damping=diagonal_weight / self.extra_scale,
pmap_axis_name=pmap_axis_name)
def multiply_matpower(
self,
vec: _Arrays,
exp: Union[float, int],
diagonal_weight: Union[float, jnp.ndarray]
) -> _Arrays:
if self.has_bias:
w, b = vec
vec = jnp.concatenate([w.reshape([-1, w.shape[-1]]), b[None]], axis=0)
else:
w, = vec
vec = w.reshape([-1, w.shape[-1]])
if exp == 1:
inputs_factor, outputs_factor = (self.inputs_factor.value,
self.outputs_factor.value)
scale = self.extra_scale
elif exp == -1:
inputs_factor, outputs_factor = (self.inputs_factor_inverse,
self.outputs_factor_inverse)
scale = 1.0 / self.extra_scale
diagonal_weight = 0
else:
raise NotImplementedError()
result = jnp.matmul(inputs_factor, vec)
result = jnp.matmul(result, outputs_factor)
result = result * scale + diagonal_weight * vec
if self.has_bias:
w_new, b_new = result[:-1], result[-1]
return w_new.reshape(w.shape), b_new
else:
return result.reshape(w.shape),
class DenseTwoKroneckerFactored(TwoKroneckerFactored):
"""Factor for a standard dense layer."""
def input_size(self) -> int:
if self.has_bias:
return self.params_shapes[0][0] + 1
else:
return self.params_shapes[0][0]
def output_size(self) -> int:
return self.params_shapes[0][1]
def update_curvature_matrix_estimate(
self,
info: _BlockInfo,
batch_size: int,
ema_old: Union[float, jnp.ndarray],
ema_new: Union[float, jnp.ndarray],
pmap_axis_name: str
) -> None:
del pmap_axis_name
(x,), (dy,) = info["inputs"], info["outputs_tangent"]
utils.check_first_dim_is_batch_size(batch_size, x, dy)
if self.has_bias:
x_one = jnp.ones_like(x[:, :1])
x = jnp.concatenate([x, x_one], axis=1)
input_stats = jnp.matmul(x.T, x) / batch_size
output_stats = jnp.matmul(dy.T, dy) / batch_size
self.inputs_factor.update(input_stats, ema_old, ema_new)
self.outputs_factor.update(output_stats, ema_old, ema_new)
@utils.Stateful.infer_class_state
class ScaleAndShiftDiagonal(CurvatureBlock):
"""A scale and shift block with a diagonal approximation to the curvature."""
scale_factor: Optional[utils.WeightedMovingAverage]
shift_factor: Optional[utils.WeightedMovingAverage]
@property
def has_scale(self) -> bool:
return self._layer_tag_eq.params["has_scale"]
@property
def has_shift(self) -> bool:
return self._layer_tag_eq.params["has_shift"]
def init(self, rng: jnp.ndarray) -> Dict[str, Any]:
del rng
if self.has_scale and self.has_shift:
return dict(
scale_factor=utils.WeightedMovingAverage.zero(
self.params_shapes[0]
),
shift_factor=utils.WeightedMovingAverage.zero(
self.params_shapes[1]
)
)
elif self.has_scale:
return dict(
scale_factor=utils.WeightedMovingAverage.zero(
self.params_shapes[0]
),
shift_factor=None
)
elif self.has_shift:
return dict(
scale_factor=None,
shift_factor=utils.WeightedMovingAverage.zero(
self.params_shapes[0]
),
)
else:
raise ValueError("Neither `has_scale` nor `has_shift`.")
def update_curvature_matrix_estimate(
self,
info: _BlockInfo,
batch_size: int,
ema_old: Union[float, jnp.ndarray],
ema_new: Union[float, jnp.ndarray],
pmap_axis_name: str
) -> None:
(x,), (dy,) = info["inputs"], info["outputs_tangent"]
utils.check_first_dim_is_batch_size(batch_size, x, dy)
if self.has_scale:
assert self.scale_factor is not None
scale_shape = info["params"][0].shape
full_scale_shape = (1,) * (len(x.shape) - len(scale_shape)) + scale_shape
axis = [i for i, s in enumerate(full_scale_shape) if s == 1 and i != 0]
d_scale = jnp.sum(x * dy, axis=axis)
scale_diag_update = jnp.sum(d_scale * d_scale, axis=0) / batch_size
self.scale_factor.update(scale_diag_update, ema_old, ema_new) # pytype: disable=attribute-error # trace-all-classes
self.scale_factor.sync(pmap_axis_name) # pytype: disable=attribute-error # trace-all-classes
if self.has_shift:
assert self.shift_factor is not None
shift_shape = info["params"][1].shape
full_shift_shape = (1,) * (len(x.shape) - len(shift_shape)) + shift_shape
axis = [i for i, s in enumerate(full_shift_shape) if s == 1 and i != 0]
d_shift = jnp.sum(dy, axis=axis)
shift_diag_update = jnp.sum(d_shift * d_shift, axis=0) / batch_size
self.shift_factor.update(shift_diag_update, ema_old, ema_new) # pytype: disable=attribute-error # trace-all-classes
self.shift_factor.sync(pmap_axis_name) # pytype: disable=attribute-error # trace-all-classes
def update_curvature_inverse_estimate(
self,
diagonal_weight: Union[float, jnp.ndarray],
pmap_axis_name: str
) -> None:
pass
def multiply_matpower(
self,
vec: _Arrays,
exp: Union[float, int],
diagonal_weight: Union[float, jnp.ndarray]
) -> _Arrays:
if self.has_scale and self.has_shift:
factors = (self.scale_factor.value, self.shift_factor.value) # pytype: disable=attribute-error # trace-all-classes
elif self.has_scale:
factors = (self.scale_factor.value,) # pytype: disable=attribute-error # trace-all-classes
elif self.has_shift:
factors = (self.shift_factor.value,) # pytype: disable=attribute-error # trace-all-classes
else:
raise ValueError("Neither `has_scale` nor `has_shift`.")
factors = jax.tree_map(lambda x: x + diagonal_weight, factors)
if exp == 1:
return jax.tree_map(jnp.multiply, vec, factors)
elif exp == -1:
return jax.tree_map(jnp.divide, vec, factors)
else:
raise NotImplementedError()
@utils.Stateful.infer_class_state
class ScaleAndShiftFull(CurvatureBlock):
"""A scale and shift block with full approximation to the curvature."""
factor: utils.WeightedMovingAverage
inverse_factor: jnp.ndarray
@property
def _has_scale(self) -> bool:
return self._layer_tag_eq.params["has_scale"]
@property
def _has_shift(self) -> bool:
return self._layer_tag_eq.params["has_shift"]
def init(self, rng: jnp.ndarray) -> Dict[str, Any]:
del rng
dims = sum(utils.product(shape) for shape in self.params_shapes)
return dict(
factor=utils.WeightedMovingAverage.zero([dims, dims]),
inverse_factor=jnp.zeros([dims, dims])
)
def update_curvature_matrix_estimate(
self,
info: _BlockInfo,
batch_size: int,
ema_old: Union[float, jnp.ndarray],
ema_new: Union[float, jnp.ndarray],
pmap_axis_name: str
) -> None:
del pmap_axis_name
(x,), (dy,) = info["inputs"], info["outputs_tangent"]
utils.check_first_dim_is_batch_size(batch_size, x, dy)
grads = list()
if self._has_scale:
# Scale gradients
scale_shape = info["params"][0].shape
full_scale_shape = (1,) * (len(x.shape) - len(scale_shape)) + scale_shape
axis = [i for i, s in enumerate(full_scale_shape) if s == 1 and i != 0]
d_scale = jnp.sum(x * dy, axis=axis)
d_scale = d_scale.reshape([batch_size, -1])
grads.append(d_scale)
if self._has_shift:
# Shift gradients
shift_shape = info["params"][1].shape
full_shift_shape = (1,) * (len(x.shape) - len(shift_shape)) + shift_shape
axis = [i for i, s in enumerate(full_shift_shape) if s == 1 and i != 0]
d_shift = jnp.sum(dy, axis=axis)
d_shift = d_shift.reshape([batch_size, -1])
grads.append(d_shift)
grads = jnp.concatenate(grads, axis=1)
factor_update = jnp.matmul(grads.T, grads) / batch_size
self.factor.update(factor_update, ema_old, ema_new)
def update_curvature_inverse_estimate(
self,
diagonal_weight: Union[float, jnp.ndarray],
pmap_axis_name: str
) -> None:
self.factor.sync(pmap_axis_name)
self.inverse_factor = utils.psd_inv_cholesky(self.factor.value,
diagonal_weight)
def multiply_matpower(
self,
vec: _Arrays,
exp: Union[float, int],
diagonal_weight: Union[float, jnp.ndarray]
) -> _Arrays:
# Remember the vector is a tuple of all parameters
if self._has_scale and self._has_shift:
flat_vec = jnp.concatenate([v.flatten() for v in vec])
else:
flat_vec = vec[0].flatten()
if exp == 1:
flat_result = (
jnp.matmul(self.factor.value, flat_vec) + diagonal_weight * flat_vec)
elif exp == -1:
flat_result = jnp.matmul(self.inverse_factor, flat_vec)
else:
raise NotImplementedError()
if self._has_scale and self._has_shift:
scale_dims = int(vec[0].size)
scale_result = flat_result[:scale_dims].reshape(vec[0].shape)
shift_result = flat_result[scale_dims:].reshape(vec[1].shape)
return scale_result, shift_result
else:
return flat_vec.reshape(vec[0].shape),
_default_tag_to_block: MutableMapping[str, CurvatureBlockCtor] = dict(
dense_tag=DenseTwoKroneckerFactored,
generic_tag=NaiveDiagonal,
scale_and_shift_tag=ScaleAndShiftDiagonal,
)
def copy_default_tag_to_block() -> MutableMapping[str, CurvatureBlockCtor]:
return dict(_default_tag_to_block)
def get_default_tag_to_block(tag_name: str) -> CurvatureBlockCtor:
return _default_tag_to_block[tag_name]
def set_default_tag_to_block(
tag_name: str,
block_class: CurvatureBlockCtor,
) -> None:
_default_tag_to_block[tag_name] = block_class
| deepmind-research-master | kfac_ferminet_alpha/curvature_blocks.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the Jax tracer functionality for tags."""
import functools
from typing import Any, Callable, Sequence, Tuple
import jax
from jax import core
from jax import util as jax_util
import jax.numpy as jnp
from kfac_ferminet_alpha import layers_and_loss_tags as tags
from kfac_ferminet_alpha import tag_graph_matcher as tgm
from kfac_ferminet_alpha import utils
_Function = Callable[[Any], Any]
_Loss = tags.LossTag
def extract_tags(
jaxpr: core.Jaxpr
) -> Tuple[Sequence[core.JaxprEqn], Sequence[core.JaxprEqn]]:
"""Extracts all of the tag equations."""
# Loop through equations and evaluate primitives using `bind`
layer_tags = []
loss_tags = []
for eqn in jaxpr.eqns:
if isinstance(eqn.primitive, tags.LossTag):
loss_tags.append(eqn)
elif isinstance(eqn.primitive, tags.LayerTag):
layer_tags.append(eqn)
return tuple(layer_tags), tuple(loss_tags)
def construct_compute_losses_inputs(
jaxpr: core.Jaxpr,
consts: Tuple[Any],
num_losses: int,
primals: Any,
params_index: int) -> Callable[[Any], Sequence[Sequence[jnp.ndarray]]]:
"""Constructs a function that computes all of the inputs to all losses."""
primals_ = list(primals)
def forward_compute_losses(
params_primals: Any,
) -> Sequence[Sequence[jnp.ndarray]]:
primals_[params_index] = params_primals
flat_args = jax.tree_flatten(primals_)[0]
# Mapping from variable -> value
env = dict()
read = functools.partial(tgm.read_env, env)
write = functools.partial(tgm.write_env, env)
# Bind args and consts to environment
write(jax.core.unitvar, jax.core.unit)
jax_util.safe_map(write, jaxpr.invars, flat_args)
jax_util.safe_map(write, jaxpr.constvars, consts)
# Loop through equations and evaluate primitives using `bind`
losses_so_far = 0
loss_tags = []
for eqn in jaxpr.eqns:
tgm.evaluate_eqn(eqn, jax_util.safe_map(read, eqn.invars), write)
if isinstance(eqn.primitive, tags.LossTag):
loss_tags.append(eqn)
losses_so_far += 1
if num_losses is not None and losses_so_far == num_losses:
break
return tuple(tuple(read(v) for v in tag.invars) for tag in loss_tags)
# return tuple(jax_util.safe_map(read, tag.invars) for tag in loss_tags)
return forward_compute_losses
# We know when `.primitive` will be either a `LossTag` or a `LayerTag`, however
# pytype cannot infer its subclass, so we need to unbox it.
def _unbox_loss_tag(jaxpr_eqn: core.JaxprEqn) -> tags.LossTag:
assert isinstance(jaxpr_eqn.primitive, tags.LossTag)
return jaxpr_eqn.primitive
def _unbox_layer_tag(jaxpr_eqn: core.JaxprEqn) -> tags.LayerTag:
assert isinstance(jaxpr_eqn.primitive, tags.LayerTag)
return jaxpr_eqn.primitive
def trace_losses_matrix_vector_vjp(tagged_func: _Function,
params_index: int = 0):
"""Returns the Jacobian-transposed vector product (backward mode) function in equivalent form to jax.vjp."""
def vjp(*primals):
typed_jaxpr = jax.make_jaxpr(tagged_func)(*primals)
jaxpr, consts = typed_jaxpr.jaxpr, typed_jaxpr.literals
_, loss_jaxpr_eqns = extract_tags(jaxpr)
n = len(loss_jaxpr_eqns)
losses_func = construct_compute_losses_inputs(
jaxpr, consts, n, primals, params_index)
losses_inputs, full_vjp_func = jax.vjp(losses_func, primals[params_index])
losses = []
for jaxpr_eqn, inputs in zip(loss_jaxpr_eqns, losses_inputs):
loss_tag = _unbox_loss_tag(jaxpr_eqn)
losses.append(loss_tag.loss(*inputs, weight=jaxpr_eqn.params["weight"]))
losses = tuple(losses)
def vjp_func(tangents):
flat_tangents = jax.tree_flatten(tangents)[0]
loss_invars = []
loss_targets = []
for jaxpr_eqn, inputs in zip(loss_jaxpr_eqns, losses_inputs):
num_inputs = _unbox_loss_tag(jaxpr_eqn).num_inputs
loss_invars.append(tuple(jaxpr_eqn.invars[:num_inputs]))
loss_targets.append(inputs[num_inputs:])
treedef = jax.tree_structure(loss_invars)
tangents = jax.tree_unflatten(treedef, flat_tangents)
# Since the losses could also take and targets as inputs and we don't want
# this function to computes vjp w.r.t to those (e.g. the user should not
# be providing tangent vectors for the targets, only for inputs) we have
# to manually fill in these "extra" tangents with zeros.
targets_tangents = jax.tree_map(jnp.zeros_like, loss_targets)
tangents = tuple(ti + tti for ti, tti in zip(tangents, targets_tangents))
input_tangents = full_vjp_func(tangents)[0]
return input_tangents,
return losses, vjp_func
return vjp
def trace_losses_matrix_vector_jvp(
tagged_func: _Function,
params_index: int = 0):
"""Returns the Jacobian vector product (forward mode) function in equivalent form to jax.jvp."""
def jvp(primals, params_tangents):
typed_jaxpr = jax.make_jaxpr(tagged_func)(*primals)
jaxpr, consts = typed_jaxpr.jaxpr, typed_jaxpr.literals
_, loss_tags = extract_tags(jaxpr)
n = len(loss_tags)
losses_func = construct_compute_losses_inputs(jaxpr, consts, n,
primals, params_index)
primals = (primals[params_index],)
tangents = (params_tangents,)
(primals_out, tangents_out) = jax.jvp(losses_func, primals, tangents)
tangents_out = tuple(tuple(t[:tag.primitive.num_inputs])
for t, tag in zip(tangents_out, loss_tags))
losses = tuple(tag.primitive.loss(*inputs, weight=tag.params["weight"])
for tag, inputs in zip(loss_tags, primals_out))
return losses, tangents_out
return jvp
def trace_losses_matrix_vector_hvp(tagged_func, params_index=0):
"""Returns the Hessian vector product function of **the tagged losses**, rather than the output value of `tagged_func`."""
# The function uses backward-over-forward mode.
def hvp(primals, params_tangents):
typed_jaxpr = jax.make_jaxpr(tagged_func)(*primals)
jaxpr, consts = typed_jaxpr.jaxpr, typed_jaxpr.literals
_, loss_tags = extract_tags(jaxpr)
n = len(loss_tags)
losses_func = construct_compute_losses_inputs(
jaxpr, consts, n, primals, params_index)
def losses_sum(param_primals):
loss_inputs = losses_func(param_primals)
losses = [
_unbox_loss_tag(jaxpr_eqn).loss(
*inputs, weight=jaxpr_eqn.params["weight"])
for jaxpr_eqn, inputs in zip(loss_tags, loss_inputs)
]
# This computes the sum of losses evaluated. Makes it easier as we can
# now use jax.grad rather than jax.vjp for taking derivatives.
return sum(jnp.sum(loss.evaluate(None)) for loss in losses)
def grads_times_tangents(params_primals):
grads = jax.grad(losses_sum)(params_primals)
return utils.inner_product(grads, params_tangents)
return jax.grad(grads_times_tangents)(primals[params_index])
return hvp
def trace_estimator_vjp(tagged_func: _Function) -> _Function:
"""Creates the function needed for an estimator of curvature matrices.
Args:
tagged_func: An function that has been annotated with tags both for layers
and losses.
Returns:
A function with the same signatures as `tagged_func`, which when provided
with inputs returns two things:
1. The instances of all losses objected that are tagged.
2. A second function, which when provide with tangent vectors for each
of the loss instances' parameters, returns for every tagged layer a
dictionary containing the following elements:
inputs - The primal values of the inputs to the layer.
outputs - The primal values of the outputs to the layer.
params - The primal values of the layer.
inputs_tangent - The tangent value of layer, given the provided
tangents of the losses.
inputs_tangent - The tangent value of layer, given the provided
tangents of the losses.
inputs_tangent - The tangent value of layer, given the provided
tangents of the losses.
"""
def full_vjp_func(func_args):
# Trace the tagged function
typed_jaxpr = jax.make_jaxpr(tagged_func)(*func_args)
jaxpr, consts = typed_jaxpr.jaxpr, typed_jaxpr.literals
layer_tags, loss_tags = extract_tags(jaxpr)
layer_vars_flat = jax.tree_flatten([tag.invars for tag in layer_tags])[0]
layer_input_vars = tuple(set(layer_vars_flat))
def forward():
own_func_args = func_args
# Mapping from variable -> value
env = dict()
read = functools.partial(tgm.read_env, env)
write = functools.partial(tgm.write_env, env)
# Bind args and consts to environment
write(jax.core.unitvar, jax.core.unit)
jax_util.safe_map(write, jaxpr.invars, jax.tree_flatten(own_func_args)[0])
jax_util.safe_map(write, jaxpr.constvars, consts)
# Loop through equations and evaluate primitives using `bind`
num_losses_passed = 0
for eqn in jaxpr.eqns:
tgm.evaluate_eqn(eqn, jax_util.safe_map(read, eqn.invars), write)
if isinstance(eqn.primitive, tags.LossTag):
num_losses_passed += 1
if num_losses_passed == len(loss_tags):
break
if num_losses_passed != len(loss_tags):
raise ValueError("This should be unreachable.")
return jax_util.safe_map(read, layer_input_vars)
def forward_aux(aux):
own_func_args = func_args
# Mapping from variable -> value
env = dict()
read = functools.partial(tgm.read_env, env)
def write(var, val):
if not isinstance(var, (jax.core.Literal, jax.core.UnitVar)):
val = val + aux[var] if var in aux else val
env[var] = val
# Bind args and consts to environment
write(jax.core.unitvar, jax.core.unit)
jax_util.safe_map(write, jaxpr.invars, jax.tree_flatten(own_func_args)[0])
jax_util.safe_map(write, jaxpr.constvars, consts)
# Loop through equations and evaluate primitives using `bind`
num_losses_passed = 0
losses_inputs_values = []
losses_kwargs_values = []
for eqn in jaxpr.eqns:
input_values = jax_util.safe_map(read, eqn.invars)
tgm.evaluate_eqn(eqn, input_values, write)
if isinstance(eqn.primitive, tags.LossTag):
loss = eqn.primitive.loss(*input_values, weight=eqn.params["weight"])
losses_inputs_values.append(loss.inputs)
losses_kwargs_values.append(dict(
targets=loss.targets,
weight=eqn.params["weight"]
))
num_losses_passed += 1
if num_losses_passed == len(loss_tags):
break
if num_losses_passed != len(loss_tags):
raise ValueError("This should be unreachable.")
# Read the inputs to the loss functions, but also return the target values
return tuple(losses_inputs_values), tuple(losses_kwargs_values)
layer_input_values = forward()
primals_dict = dict(zip(layer_input_vars, layer_input_values))
primals_dict.update(zip(jaxpr.invars, jax.tree_flatten(func_args)[0]))
aux_values = jax.tree_map(jnp.zeros_like, layer_input_values)
aux_dict = dict(zip(layer_input_vars, aux_values))
losses_args, aux_vjp, losses_kwargs = jax.vjp(forward_aux, aux_dict,
has_aux=True)
losses = tuple(tag.primitive.loss(*inputs, **kwargs)
for tag, inputs, kwargs in
zip(loss_tags, losses_args, losses_kwargs))
def vjp_func(tangents):
all_tangents = aux_vjp(tangents)
tangents_dict, inputs_tangents = all_tangents[0], all_tangents[1:]
inputs_tangents = jax.tree_flatten(inputs_tangents)[0]
tangents_dict.update(zip(jaxpr.invars, inputs_tangents))
read_primals = functools.partial(tgm.read_env, primals_dict)
read_tangents = functools.partial(tgm.read_env, tangents_dict)
layers_info = []
for jaxpr_eqn in layer_tags:
layer_tag = _unbox_layer_tag(jaxpr_eqn)
info = dict()
primals = jax_util.safe_map(read_primals, tuple(jaxpr_eqn.invars))
(
info["outputs"],
info["inputs"],
info["params"],
) = layer_tag.split_all_inputs(primals)
tangents = jax_util.safe_map(read_tangents, tuple(jaxpr_eqn.invars))
(
info["outputs_tangent"],
info["inputs_tangent"],
info["params_tangent"],
) = layer_tag.split_all_inputs(tangents)
layers_info.append(info)
return tuple(layers_info)
return losses, vjp_func
return full_vjp_func
| deepmind-research-master | kfac_ferminet_alpha/tracer.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for tagging and graph manipulation."""
import collections
import functools
import itertools
from typing import Any, NamedTuple, Sequence
from absl import logging
import jax
from jax import core as jax_core
from jax import lax
from jax import util as jax_util
from jax.interpreters import partial_eval as pe
import jax.numpy as jnp
import networkx as nx
from networkx.algorithms import isomorphism
import numpy as np
import ordered_set
from kfac_ferminet_alpha import layers_and_loss_tags as tags
USE_NETWORKX = False
def match_nodes(g1, g2, mapping, node1, node2):
"""Matching nodes when doing graph search."""
if not kfac_node_match(g1.nodes[node1], g2.nodes[node2]):
return False
# Check predecessors
p1 = set(n for n in g1.predecessors(node1) if n in mapping.keys())
p2 = set(n for n in g2.predecessors(node2) if n in mapping.values())
if len(p1) != len(p2):
return False
for p1_i in p1:
if mapping[p1_i] not in p2:
return False
# Check successors
s1 = set(n for n in g1.successors(node1) if n in mapping.keys())
s2 = set(n for n in g2.successors(node2) if n in mapping.values())
if len(s1) != len(s2):
return False
for s1_i in s1:
if mapping[s1_i] not in s2:
return False
return True
def generate_candidates(g1, g2, mapping, node1, node2):
"""Generates the initial candidates for graph search."""
# Check predecessors
p1 = set(n for n in g1.predecessors(node1) if n not in mapping.keys())
p2 = set(n for n in g2.predecessors(node2) if n not in mapping.values())
candidates = ordered_set.OrderedSet(itertools.product(p1, p2))
s1 = set(n for n in g1.successors(node1) if n not in mapping.keys())
s2 = set(n for n in g2.successors(node2) if n not in mapping.values())
candidates.update(list(itertools.product(s1, s2)))
return candidates
def find_mappings(pattern, graph, mapping, terminals):
"""Finds all mappings from graph search of the pattern."""
if len(mapping) == len(pattern):
for k, v in terminals.items():
v.add(mapping[k])
return [frozenset(mapping.items())]
mappings = set()
nodes_list = list(mapping.keys())
for node1 in reversed(nodes_list):
for s1 in pattern.successors(node1):
if s1 not in mapping.keys():
for s2 in graph.successors(mapping[node1]):
if s2 not in mapping.values():
if s1 not in terminals or s2 not in terminals[s1]:
if match_nodes(pattern, graph, mapping, s1, s2):
mapping[s1] = s2
mappings.update(
find_mappings(pattern, graph, mapping, terminals))
mapping.pop(s1)
for p1 in pattern.predecessors(node1):
if p1 not in mapping.keys():
for p2 in graph.predecessors(mapping[node1]):
if p2 not in mapping.values():
if p1 not in terminals or p2 not in terminals[p1]:
if match_nodes(pattern, graph, mapping, p1, p2):
mapping[p1] = p2
mappings.update(
find_mappings(pattern, graph, mapping, terminals))
mapping.pop(p1)
return mappings
def match_pattern(pattern, graph):
"""Given a pattern returns all matches inside the graph."""
if USE_NETWORKX:
matcher = isomorphism.GraphMatcher(
graph, pattern, node_match=kfac_node_match)
mappings = list(
dict((k, v)
for v, k in mapping.items())
for mapping in matcher.subgraph_isomorphisms_iter())
else:
mapping = collections.OrderedDict()
params1 = [n for n in pattern.nodes if pattern.nodes[n]["op"] == "param"]
params2 = [n for n in graph.nodes if graph.nodes[n]["op"] == "param"]
terminals = {
n: set() for n in pattern.nodes if not list(pattern.successors(n))
}
mappings = set()
for node1, node2 in itertools.product(params1, params2):
mapping[node1] = node2
mappings.update(find_mappings(pattern, graph, mapping, terminals))
mapping.pop(node1)
for v in terminals.values():
v.clear()
mappings = list(dict(mapping) for mapping in mappings)
var_mappings = []
for mapping in mappings:
var_mappings.append(dict())
for k, v in mapping.items():
cond = pattern.nodes[k]["op"] in ("param", "array")
source = pattern.nodes[k]["var"] if cond else k
target = graph.nodes[v]["var"] if cond else graph.nodes[v]["eqn"]
var_mappings[-1][source] = target
return var_mappings
def read_env(env, var):
# Literals are values baked into the Jaxpr
if isinstance(var, jax.core.Literal):
return var.val
return env[var]
def write_env(env, var, val):
env[var] = val
def abstract_single_value(value):
if isinstance(value, jnp.ndarray):
value = jax.ShapedArray(np.shape(value), np.result_type(value))
return pe.PartialVal.unknown(value)
else:
return value
def abstract_args(args):
return jax.tree_map(abstract_single_value, args)
def evaluate_eqn(eqn, in_values, write_func):
"""Evaluate a single Jax equation and writes the outputs."""
in_values = list(in_values)
# This is logic specifically to handle `xla_call`
call_jaxpr, params = jax.core.extract_call_jaxpr(eqn.primitive, eqn.params)
if call_jaxpr:
subfuns = [
jax.core.lu.wrap_init(
functools.partial(jax.core.eval_jaxpr, call_jaxpr, ()))
]
else:
subfuns = []
ans = eqn.primitive.bind(*(subfuns + in_values), **params)
if eqn.primitive.multiple_results:
jax_util.safe_map(write_func, eqn.outvars, ans)
else:
write_func(eqn.outvars[0], ans)
return ans
def clean_jaxpr_eqns(jaxpr, preserve_tags=True):
"""Performs dead code elimination on the jaxpr, preserving loss and layer tags."""
eqns = []
dependants = set(jaxpr.outvars)
for eqn in reversed(jaxpr.eqns):
check = False
for v in eqn.outvars:
if v in dependants:
dependants.remove(v)
check = True
if isinstance(eqn.primitive, (tags.LossTag, tags.LayerTag)):
check = check or preserve_tags
if check:
eqns.append(eqn)
new_dependants = set(
v for v in eqn.invars if not isinstance(v, jax_core.Literal))
dependants = dependants.union(new_dependants)
# Dependants should only be invars
dependants = dependants - set(jaxpr.invars + jaxpr.constvars)
if dependants:
raise ValueError("Something went wrong with the dead code elimination.")
return reversed(eqns)
def broadcast_merger(f):
"""Transforms `f` into a function where all consecutive broadcasts are merged."""
def merged_func(*func_args):
typed_jaxpr, out_avals = jax.make_jaxpr(f, return_shape=True)(*func_args)
out_tree = jax.tree_structure(out_avals)
jaxpr, consts = typed_jaxpr.jaxpr, typed_jaxpr.literals
# Mapping from variable -> value
env = dict()
read = functools.partial(read_env, env)
write = functools.partial(write_env, env)
# Bind args and consts to environment
flat_args = jax.tree_flatten(func_args)[0]
write(jax.core.unitvar, jax.core.unit)
jax_util.safe_map(write, jaxpr.invars, flat_args)
jax_util.safe_map(write, jaxpr.constvars, consts)
# Bind args and consts to environment
write(jax.core.unitvar, jax.core.unit)
jax_util.safe_map(write, jaxpr.invars, flat_args)
jax_util.safe_map(write, jaxpr.constvars, consts)
# Loop through equations and evaluate primitives using `bind`
broadcasts_outputs = dict()
for eqn in clean_jaxpr_eqns(jaxpr):
# We ignore broadcasting of constants
if (eqn.primitive.name == "broadcast_in_dim" and
not all(isinstance(v, jax_core.Literal) for v in eqn.invars)):
if eqn.invars[0] in broadcasts_outputs:
x, dims = broadcasts_outputs[eqn.invars[0]]
kept_dims = eqn.params["broadcast_dimensions"]
kept_dims = [kept_dims[d] for d in dims]
y = lax.broadcast_in_dim(x, eqn.params["shape"], kept_dims)
jax_util.safe_map(write, eqn.outvars, [y])
broadcasts_outputs[eqn.outvars[0]] = (x, kept_dims)
else:
inputs = jax_util.safe_map(read, eqn.invars)
evaluate_eqn(eqn, inputs, write)
broadcasts_outputs[eqn.outvars[0]] = (
inputs[0], eqn.params["broadcast_dimensions"])
else:
evaluate_eqn(eqn, jax_util.safe_map(read, eqn.invars), write)
return jax.tree_unflatten(out_tree, jax_util.safe_map(read, jaxpr.outvars))
return merged_func
class JaxGraph(NamedTuple):
jaxpr: Any
consts: Any
params: Any
params_tree: Any
in_tree: Any
out_tree: Any
digraph: nx.DiGraph
tagging_func: Any
SPECIAL_OP_COMPARE_RULES = dict()
def default_compare(node1, node2):
if node1["op"] != node2["op"]:
return False
params1, params2 = node1["eqn"].params, node2["eqn"].params
if set(params1.keys()) != set(params2.keys()):
return False
for k in params1.keys():
if params1[k] != params2[k]:
return False
return True
def reshape_compare(node1, node2):
"""Compares two reshape nodes."""
assert node1["op"] == node2["op"] == "reshape"
params1, params2 = node1["eqn"].params, node2["eqn"].params
if params1["dimensions"] != params2["dimensions"]:
return False
return True
def broadcast_in_dim_compare(node1, node2):
"""Compares two reshape nodes."""
assert node1["op"] == node2["op"] == "broadcast_in_dim"
return True
def conv_compare(node1, node2):
"""Compares two conv_general_dialted nodes."""
assert node1["op"] == node2["op"] == "conv_general_dilated"
params1, params2 = node1["eqn"].params, node2["eqn"].params
for k in ("window_strides", "padding", "lhs_dilation", "rhs_dilation",
"lhs_shape", "rhs_shape"):
if len(params1[k]) != len(params2[k]):
return False
if (len(params1["dimension_numbers"].lhs_spec) != #
len(params2["dimension_numbers"].lhs_spec)):
return False
if (len(params1["dimension_numbers"].rhs_spec) != #
len(params2["dimension_numbers"].rhs_spec)):
return False
if (len(params1["dimension_numbers"].out_spec) != #
len(params2["dimension_numbers"].out_spec)):
return False
if ((params1["feature_group_count"] > 1) != #
(params2["feature_group_count"] > 1)):
return False
if ((params1["batch_group_count"] > 1) != #
(params2["batch_group_count"] > 1)):
return False
return True
SPECIAL_OP_COMPARE_RULES["reshape"] = reshape_compare
SPECIAL_OP_COMPARE_RULES["broadcast_in_dim"] = broadcast_in_dim_compare
SPECIAL_OP_COMPARE_RULES["conv_general_dilated"] = conv_compare
def kfac_node_match(node1, node2):
"""Checks if two nodes are equivalent."""
# Parameters match with each other and nothing else
if node1["op"] == "param" and node2["op"] == "param":
return True
# return node1["rank"] == node2["rank"]
if node1["op"] == "param" or node2["op"] == "param":
return False
# Arrays always match each other and nothing else
if node1["op"] == "array" and node2["op"] == "array":
return True
if node1["op"] == "array" or node2["op"] == "array":
return False
# Operators match first on name
if node1["op"] != node2["op"]:
return False
compare = SPECIAL_OP_COMPARE_RULES.get(node1["op"], default_compare)
return compare(node1, node2)
def var_to_str(var):
"""Returns a string representation of the variable of a Jax expression."""
if isinstance(var, jax.core.Literal):
return str(var)
elif isinstance(var, jax.core.UnitVar):
return "*"
elif not isinstance(var, jax.core.Var):
raise ValueError(f"Idk what to do with this {type(var)}?")
c = int(var.count)
if c == -1:
return "_"
str_rep = ""
while c > 25:
str_rep += chr(c % 26 + ord("a"))
c = c // 26
str_rep += chr(c + ord("a"))
return str_rep[::-1]
def extract_param_vars_flat(jaxpr, in_tree, params_index):
if params_index is None:
params_index = []
elif isinstance(params_index, int):
params_index = [params_index]
in_vars = jax.tree_unflatten(in_tree, jaxpr.invars)
return jax.tree_flatten([in_vars[i] for i in params_index])
def fill_jaxpr_to_graph(graph, jaxpr, in_vars=None, out_vars=None):
"""Fills the graph with the jaxpr."""
in_vars = in_vars or [var_to_str(v) for v in jaxpr.invars + jaxpr.constvars]
in_map = dict(zip(jaxpr.invars + jaxpr.constvars, in_vars))
out_vars = out_vars or [var_to_str(v) for v in jaxpr.outvars]
out_map = dict(zip(jaxpr.outvars, out_vars))
for eqn in jaxpr.eqns:
in_vars = []
for v in eqn.invars:
if isinstance(v, (jax.core.Literal, jax.core.UnitVar)):
in_vars.append(var_to_str(v))
else:
in_vars.append(in_map.get(v, var_to_str(v)))
out_vars = [out_map.get(v, var_to_str(v)) for v in eqn.outvars]
in_str = ",".join(in_vars)
out_str = ",".join(out_vars)
if isinstance(eqn.primitive, tags.LossTag):
func_name = "__loss_tag"
elif isinstance(eqn.primitive, tags.LayerTag):
func_name = "__layer_tag"
else:
func_name = eqn.primitive.name
node_c = f"{func_name}({in_str})->{out_str}"
graph.add_node(node_c, op=eqn.primitive.name, eqn=eqn)
# Create incoming edges
for v, name in zip(eqn.invars, in_vars):
if (not isinstance(v, jax.core.Literal) and
not isinstance(v, jax.core.UnitVar)):
graph.add_edge(name, node_c)
# Create output nodes and edges
for v, name in zip(eqn.outvars, out_vars):
graph.add_node(name, op="array", var=v)
graph.add_edge(node_c, name)
def create_digraph(jaxpr, params):
"""Creates a directed graph from the given jaxpr and parameters."""
graph = nx.DiGraph()
# Create input nodes
for v in jaxpr.invars + jaxpr.constvars:
if v in params:
graph.add_node(var_to_str(v), op="param", var=v)
else:
graph.add_node(var_to_str(v), op="array", var=v)
fill_jaxpr_to_graph(graph, jaxpr)
return graph
def function_to_jax_graph(func, args, params_index, tagging_func=None):
"""Creates a `JaxGraph` instance from the provided function."""
in_tree = jax.tree_structure(args)
typed_jaxpr = jax.make_jaxpr(func)(*args)
jaxpr, consts = typed_jaxpr.jaxpr, typed_jaxpr.literals
params, params_tree = extract_param_vars_flat(jaxpr, in_tree, params_index)
digraph = create_digraph(jaxpr, params)
if tagging_func is not None:
tagging_func = functools.partial(tagging_func, jaxpr)
return JaxGraph(
jaxpr=jaxpr,
consts=consts,
params=params,
params_tree=params_tree,
in_tree=in_tree,
out_tree=None,
digraph=digraph,
tagging_func=tagging_func)
def print_nice_jaxpr(jaxpr):
for eqn in jaxpr.eqns:
print(tuple(eqn.invars), "->", eqn.primitive.name, tuple(eqn.outvars))
def auto_register_tags(func,
func_args,
params_index: int = 0,
register_only_generic: bool = False,
compute_only_loss_tags: bool = True,
patterns_to_skip: Sequence[str] = ()):
"""Transform the function to one that is populated with tags."""
func = broadcast_merger(func)
graph = function_to_jax_graph(func, func_args, params_index=params_index)
matches = dict()
# Extract the tagged losses variables and all their ancestors
loss_output_vars = []
num_losses = 0
loss_ancestors = set()
for node in graph.digraph.nodes:
if node.startswith("__loss_tag"):
num_losses += 1
ancestors = nx.ancestors(graph.digraph, node)
ancestors.add(node)
for output_node in node.split("->")[-1].split(","):
ancestors.add(output_node)
loss_output_vars.append(graph.digraph.nodes[output_node]["var"])
loss_ancestors = loss_ancestors.union(ancestors)
loss_output_vars = tuple(loss_output_vars)
# Extract the sub-graph that leads to losses
sub_graph = nx.induced_subgraph(graph.digraph, loss_ancestors)
# First collect all parameters that are already part of a layer tag
tagged_params = dict()
pattern_counters = dict()
for tag_node in (
node for node in sub_graph.nodes if node.startswith("__layer_tag")):
inputs = graph.digraph.nodes[tag_node]["eqn"].invars
tag_instance = graph.digraph.nodes[tag_node]["eqn"].primitive
if tag_instance.name == "generic_tag":
tag_params = tag_instance.split_all_inputs(inputs)[0]
else:
tag_params = tag_instance.split_all_inputs(inputs)[2]
pattern_number = pattern_counters.get(tag_instance.name, 0)
for param in tag_params:
if param not in graph.params:
raise ValueError(f"You have registered a layer tag with parameter "
f"that is not part of the parameters at index "
f"{params_index}.")
if param in tagged_params:
raise ValueError(f"You have registered twice the parameter {param}.")
tagged_params[param] = f"Manual[{tag_instance.name}_{pattern_number}]"
if tag_instance.name not in pattern_counters:
pattern_counters[tag_instance.name] = 1
else:
pattern_counters[tag_instance.name] += 1
if not register_only_generic:
for pattern_name, patterns in get_graph_patterns():
if pattern_name in patterns_to_skip:
logging.info("Skipping graph pattern %s", pattern_name)
continue
logging.info("Matching graph pattern %s", pattern_name)
for pattern in patterns:
for match_map in match_pattern(pattern.digraph, sub_graph):
if len(pattern.jaxpr.outvars) > 1:
raise NotImplementedError()
output = pattern.jaxpr.outvars[0]
if matches.get(match_map[output]) is not None:
raise ValueError(f"Found more than one match for equation "
f"{match_map[output]}. Examine the jaxpr:\n "
f"{graph.jaxpr}")
# Mark the parameters as already tagged
match_params = set()
match_params_already_tagged = False
for param in match_map.values():
if param in graph.params:
match_params.add(param)
if param in tagged_params.keys():
match_params_already_tagged = True
# Register the match only if no parameters are already registered
if not match_params_already_tagged:
matches[match_map[output]] = (match_map, pattern.tagging_func)
pattern_number = pattern_counters.get(pattern_name, 0)
for param in match_params:
tagged_params[param] = f"Auto[{pattern_name}_{pattern_number}]"
if pattern_name not in pattern_counters:
pattern_counters[pattern_name] = 1
else:
pattern_counters[pattern_name] += 1
# Mark remaining parameters as orphans
orphan_params = sorted(
set(graph.params) - set(tagged_params.keys()), key=lambda v: v.count)
params_regs = [tagged_params.get(p, "Orphan") for p in graph.params]
params_regs = jax.tree_unflatten(graph.params_tree, params_regs)
logging.info("=" * 50)
logging.info("Graph parameter registrations:")
logging.info(params_regs)
logging.info("=" * 50)
# Construct a function with all of the extra tag registrations
@functools.wraps(func)
def wrapped_auto_registered(*args):
flat_args, _ = jax.tree_flatten(args)
# Mapping from variable -> value
env = {}
read = functools.partial(read_env, env)
write = functools.partial(write_env, env)
def tag(var):
if matches.get(var) is not None:
inv_map, tagging_func = matches[var]
var_map = {k: v for k, v in inv_map.items() if not isinstance(k, str)}
val_map = jax.tree_map(read, var_map)
val = tagging_func(inv_map, val_map)
env[var] = val
# Bind args and consts to environment
write(jax.core.unitvar, jax.core.unit)
jax_util.safe_map(write, graph.jaxpr.invars, flat_args)
jax_util.safe_map(write, graph.jaxpr.constvars, graph.consts)
# Register any orphan parameters as generic
for param_var in orphan_params:
write(param_var, tags.register_generic(read(param_var)))
# Set the correct output variables
if compute_only_loss_tags:
output_vars = loss_output_vars
out_tree = jax.tree_structure(loss_output_vars)
else:
output_vars = graph.jaxpr.outvars
out_tree = graph.out_tree
# Loop through equations and evaluate primitives using `bind`
losses_evaluated = 0
for eqn in graph.jaxpr.eqns:
evaluate_eqn(eqn, jax_util.safe_map(read, eqn.invars), write)
jax_util.safe_map(tag, eqn.outvars)
# If we want to output only tagged losses
if isinstance(eqn.primitive, tags.LossTag):
losses_evaluated += 1
if compute_only_loss_tags and num_losses == losses_evaluated:
break
outputs = jax_util.safe_map(read, output_vars)
return jax.tree_unflatten(out_tree, outputs)
return wrapped_auto_registered
# Registered graphs
NAME_TO_JAX_GRAPH = dict()
DEFERRED_REGISTRATIONS = []
def register_function(name, func, tagging_func, example_args, params_index,
precedence):
"""Registers a function as a pattern in the graph matcher registry.
The graph matcher needs to trace at least once the full function, which means
you need to provide it with dummy arguments. The shapes of the arguments do
not matter, as the graph matcher ignores their values, however the rank does.
Especially if there is some broadcasting happening you should register with
every possible broadcast pattern. As a general advice avoid using a shape to
be 1, unless you want the pattern to specifically match that, as some
operations, like squeeze for example, can have special behaviour then.
Args:
name: The name of the pattern that is being registered to.
func: The function that performs the computation.
tagging_func: Function that correctly creates the tag.
example_args: Example arguments that can be inputted into `func`.
params_index: Specifies at which index of the `example_args` are considered
a parameter.
precedence: This specifies what precedence the graph matcher is going to
assign to the provided pattern. The graph matcher will go from lowest to
highest precedence, randomly breaking ties, when matching. Note that the
pattern that matches a parameter with the lowest precedence will get
registered and no other will. Specifically useful when there is a pattern
for a layer with and without bias, in which case the with bias
registration always should go with lower precedence.
"""
# This is required because we can not use Jax before InitGoogle() runs
def register():
jnp_args = jax.tree_map(jnp.asarray, example_args)
graph = function_to_jax_graph(
func, jnp_args, params_index=params_index, tagging_func=tagging_func)
if NAME_TO_JAX_GRAPH.get(name) is None:
NAME_TO_JAX_GRAPH[name] = (precedence, [])
assert precedence == NAME_TO_JAX_GRAPH[name][0]
NAME_TO_JAX_GRAPH[name][1].append(graph)
DEFERRED_REGISTRATIONS.append(register)
def get_graph_patterns():
"""Returns all graph patterns sorted by their precedence."""
while DEFERRED_REGISTRATIONS:
DEFERRED_REGISTRATIONS.pop()()
return [(name, pattern) for name, (_, pattern) in sorted(
NAME_TO_JAX_GRAPH.items(), key=lambda pair: pair[1][0])]
# Dense with bias
register_function(
"dense_with_bias",
tags.dense_func,
tags.dense_tagging,
[np.zeros([11, 13]), [np.zeros([13, 7]), np.zeros([7])]],
params_index=1,
precedence=0)
# Dense without bias
register_function(
"dense_no_bias",
tags.dense_func,
tags.dense_tagging, [np.zeros([11, 13]), [np.zeros([13, 7])]],
params_index=1,
precedence=1)
# Conv2d with bias
register_function(
"conv2d_with_bias",
tags.conv2d_func,
tags.conv2d_tagging,
[np.zeros([2, 8, 8, 5]), [np.zeros([3, 3, 5, 4]),
np.zeros([4])]],
params_index=1,
precedence=0)
# Conv2d without bias
register_function(
"conv2d_no_bias",
tags.conv2d_func,
tags.conv2d_tagging, [np.zeros([2, 8, 8, 5]), [np.zeros([3, 3, 5, 4])]],
params_index=1,
precedence=1)
# Standard scale and shift with both scale and shift
register_function(
"scale_and_shift",
functools.partial(
tags.scale_and_shift_func, has_scale=True, has_shift=True),
functools.partial(
tags.scale_and_shift_tagging, has_scale=True, has_shift=True),
[np.zeros([2, 13]), [np.zeros([13]), np.zeros([13])]],
params_index=1,
precedence=0)
# Same but no broadcasting
register_function(
"scale_and_shift",
functools.partial(
tags.scale_and_shift_func, has_scale=True, has_shift=True),
functools.partial(
tags.scale_and_shift_tagging, has_scale=True, has_shift=True),
[np.zeros([13]), [np.zeros([13]), np.zeros([13])]],
params_index=1,
precedence=0)
# Scale and shift as implemented in batch norm layers in Haiku
register_function(
"scale_and_shift",
tags.batch_norm_func,
functools.partial(
tags.batch_norm_tagging_func, has_scale=True, has_shift=True),
[[np.zeros([2, 13]), np.zeros([13])], [np.zeros([13]),
np.zeros([13])]],
params_index=1,
precedence=0)
# Same but no broadcasting
register_function(
"scale_and_shift",
tags.batch_norm_func,
functools.partial(
tags.batch_norm_tagging_func, has_scale=True, has_shift=True),
[[np.zeros([13]), np.zeros([13])], [np.zeros([13]),
np.zeros([13])]],
params_index=1,
precedence=0)
# Only scale
register_function(
"scale_only",
functools.partial(
tags.scale_and_shift_func, has_scale=True, has_shift=False),
functools.partial(
tags.scale_and_shift_tagging, has_scale=True, has_shift=False),
[np.zeros([2, 13]), [np.zeros([13])]],
params_index=1,
precedence=1)
| deepmind-research-master | kfac_ferminet_alpha/tag_graph_matcher.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from absl.testing import absltest
import jax
import jax.numpy as jnp
import jax.random as jnr
from kfac_ferminet_alpha import layers_and_loss_tags
from kfac_ferminet_alpha import loss_functions
from kfac_ferminet_alpha import tag_graph_matcher
from kfac_ferminet_alpha.tests import common
def tagged_autoencoder(all_params, x_in):
h_in = x_in
layers_values = []
for i, params in enumerate(all_params):
h_out = common.fully_connected_layer(params, h_in)
h_out = layers_and_loss_tags.register_dense(h_out, h_in, params[0],
params[1],)
layers_values.append((h_out, h_in))
# Last layer does not have a nonlinearity
if i % 4 != 3:
h_in = jnp.tanh(h_out)
else:
h_in = h_out
h1, _ = loss_functions.register_normal_predictive_distribution(
h_in, targets=x_in, weight=1.0)
h2, t2 = loss_functions.register_normal_predictive_distribution(
h_in, targets=x_in, weight=0.1)
return [[h1, t2], [h2, t2]]
class TestGraphMatcher(unittest.TestCase):
"""Class for running all of the tests for integrating the systems."""
def _test_jaxpr(self, init_func, model_func, tagged_model, data_shape):
data_shape = tuple(data_shape)
rng_key = jnr.PRNGKey(12345)
init_key, data_key = jnr.split(rng_key)
params = init_func(init_key, data_shape)
data = jnr.normal(data_key, (11,) + data_shape)
func = tag_graph_matcher.auto_register_tags(model_func, (params, data))
jaxpr = jax.make_jaxpr(func)(params, data).jaxpr
tagged_jaxpr = jax.make_jaxpr(tagged_model)(params, data).jaxpr
self.assertEqual(len(jaxpr.invars), len(tagged_jaxpr.invars))
self.assertEqual(len(jaxpr.constvars), len(tagged_jaxpr.constvars))
self.assertEqual(len(jaxpr.outvars), len(tagged_jaxpr.outvars))
for eq, tagged_eq in zip(jaxpr.eqns, tagged_jaxpr.eqns):
eq_in_vars = [v for v in eq.invars if not isinstance(v, jax.core.UnitVar)]
tagged_in_vars = [
v for v in tagged_eq.invars if not isinstance(v, jax.core.UnitVar)
]
self.assertEqual(len(eq_in_vars), len(tagged_in_vars))
self.assertEqual(len(eq.outvars), len(tagged_eq.outvars))
self.assertEqual(eq.primitive, tagged_eq.primitive)
for variable, t_variable in zip(eq_in_vars + eq.outvars,
tagged_in_vars + tagged_eq.outvars):
if isinstance(variable, jax.core.Literal):
self.assertEqual(variable.aval, t_variable.aval)
else:
if variable.count != t_variable.count:
print("0")
self.assertEqual(variable.count, t_variable.count)
def test_autoencoder(self):
self._test_jaxpr(common.init_autoencoder, common.autoencoder,
tagged_autoencoder, [784])
if __name__ == "__main__":
absltest.main()
| deepmind-research-master | kfac_ferminet_alpha/tests/graph_matcher_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from absl.testing import absltest
import jax
import jax.numpy as jnp
import jax.random as jnr
import numpy as np
from kfac_ferminet_alpha import loss_functions
from kfac_ferminet_alpha import tag_graph_matcher as tgm
from kfac_ferminet_alpha import tracer
from kfac_ferminet_alpha import utils
from kfac_ferminet_alpha.tests import common
def autoencoder_aux(all_aux, all_params, x_in):
h_in = x_in
layers_values = []
for i, (params, aux) in enumerate(zip(all_params, all_aux)):
h_out = common.fully_connected_layer(params, h_in + aux[1]) + aux[0]
layers_values.append((h_out, h_in))
# Last layer does not have a nonlinearity
if i % 4 != 3:
h_in = jnp.tanh(h_out)
else:
h_in = h_out
h1, _ = loss_functions.register_normal_predictive_distribution(h_in, x_in)
h2, _ = loss_functions.register_normal_predictive_distribution(
h_in, targets=x_in, weight=0.1)
l1 = (h1 - x_in)**2 + jnp.log(jnp.pi) / 2
l2 = (h2 - x_in)**2 + jnp.log(jnp.pi) / 2
return [l1, l2 * 0.1], layers_values
class TestTracer(unittest.TestCase):
"""Class for running all of the tests for integrating the systems."""
@staticmethod
def generate_data(init_func, func, data_shape, rng_key):
n = 3
rng_key, key = jnr.split(rng_key)
params = init_func(key, data_shape)
rng_key, key = jnr.split(rng_key)
p_tangents = init_func(key, data_shape)
rng_key, key = jnr.split(rng_key)
data = jnr.normal(key, [n] + data_shape)
loss_vals, layer_vals = func(params, data)
h = layer_vals[-1][0]
keys = jnr.split(key, len(loss_vals))
h_tangents = tuple(jnr.normal(key, shape=h.shape) for key in keys)
return params, data, p_tangents, h_tangents
def assertStructureAllClose(self, x, y, rtol=1E-5, atol=1E-5, **kwargs):
x_v, x_tree = jax.tree_flatten(x)
y_v, y_tree = jax.tree_flatten(y)
self.assertEqual(x_tree, y_tree)
for xi, yi in zip(x_v, y_v):
self.assertEqual(xi.shape, yi.shape)
self.assertEqual(xi.dtype, yi.dtype)
np.testing.assert_allclose(xi, yi, rtol=rtol, atol=atol, **kwargs)
def test_tacer_jvp(self):
init_func = common.init_autoencoder
func = common.autoencoder
data_shape = [784]
rng_key = jnr.PRNGKey(12345)
params, data, p_tangents, _ = self.generate_data(init_func, func,
data_shape, rng_key)
def no_data_func(args):
outputs = func(args, data)
return outputs[0], outputs[1][-1][0]
# True computation
(primals_out, tangents_out) = jax.jvp(no_data_func, [params], [p_tangents])
loss_vals, _ = primals_out
_, h_tangents = tangents_out
loss_tangents = ((h_tangents,),) * len(loss_vals)
# Tracer computation
tracer_jvp = tracer.trace_losses_matrix_vector_jvp(func)
tracer_losses, tracer_loss_tangents = tracer_jvp((params, data), p_tangents)
tracer_losses = [loss.evaluate(None) for loss in tracer_losses]
self.assertStructureAllClose(loss_vals, tracer_losses)
self.assertStructureAllClose(loss_tangents, tracer_loss_tangents)
def test_tracer_vjp(self):
init_func = common.init_autoencoder
func = common.autoencoder
data_shape = [784]
rng_key = jnr.PRNGKey(12345)
params, data, _, h_tangents = self.generate_data(init_func, func,
data_shape, rng_key)
def no_data_func(args):
outputs = func(args, data)
return outputs[0], outputs[1][-1][0]
# True computation
(loss_vals, _), vjp_func = jax.vjp(no_data_func, params)
loss_tangents = jax.tree_map(jnp.zeros_like, loss_vals)
summed_h_tangents = sum(jax.tree_flatten(h_tangents)[0])
p_tangents = vjp_func((loss_tangents, summed_h_tangents))
# Tracer computation
trace_vjp = tracer.trace_losses_matrix_vector_vjp(func)
tracer_losses, tracer_vjp_func = trace_vjp(params, data)
tracer_losses = [loss.evaluate(None) for loss in tracer_losses]
tracer_p_tangents = tracer_vjp_func(h_tangents)
self.assertStructureAllClose(loss_vals, tracer_losses)
self.assertStructureAllClose(p_tangents, tracer_p_tangents, atol=3e-6)
def test_tracer_hvp(self):
init_func = common.init_autoencoder
func = common.autoencoder
data_shape = [784]
rng_key = jnr.PRNGKey(12345)
params, data, p_tangents, _ = self.generate_data(init_func, func,
data_shape, rng_key)
def no_data_func(args):
outputs = func(args, data)
return sum(jax.tree_map(jnp.sum, outputs[0]))
# True computation
grad_func = jax.grad(no_data_func)
def grad_time_tangents(args):
return utils.inner_product(grad_func(args), p_tangents)
hvp = jax.grad(grad_time_tangents)
hvp_vectors = hvp(params)
# Tracer computation
tracer_hvp = tracer.trace_losses_matrix_vector_hvp(func)
tracer_hvp_vectors = tracer_hvp((params, data), p_tangents)
self.assertStructureAllClose(hvp_vectors, tracer_hvp_vectors, atol=1e-4)
def test_trace_estimator(self):
init_func = common.init_autoencoder
func = common.autoencoder
aux_func = autoencoder_aux
data_shape = [784]
rng_key = jnr.PRNGKey(12345)
params, data, _, h_tangents = self.generate_data(init_func, func,
data_shape, rng_key)
def aux_last_layer(aux, args):
outs = aux_func(aux, args, data)
return outs[1][-1][0]
# True computation
loss_vals, layer_vals = func(params, data)
aux_vals = jax.tree_map(jnp.zeros_like, layer_vals)
_, vjp = jax.vjp(aux_last_layer, aux_vals, params)
summed_h_tangents = sum(jax.tree_flatten(h_tangents)[0])
aux_tangents, p_tangents = vjp(summed_h_tangents)
layers_info = []
for aux_p, p_p in zip(layer_vals, params):
info = dict()
info["outputs"] = (aux_p[0],)
info["inputs"] = (aux_p[1],)
info["params"] = (p_p[0], p_p[1])
layers_info.append(info)
for i, (aux_t, p_t) in enumerate(zip(aux_tangents, p_tangents)):
info = dict()
info["outputs_tangent"] = (aux_t[0],)
info["inputs_tangent"] = (aux_t[1],)
info["params_tangent"] = (p_t[0], p_t[1])
layers_info[i].update(info)
layers_info = tuple(layers_info)
func = tgm.auto_register_tags(func, (params, data))
tracer_vjp = tracer.trace_estimator_vjp(func)
tracer_losses, tracer_vjp_func = tracer_vjp((params, data))
tracer_losses = [loss.evaluate(None) for loss in tracer_losses]
tracer_outputs = tracer_vjp_func((h_tangents[:1], h_tangents[1:]))
self.assertStructureAllClose(loss_vals, tracer_losses)
self.assertStructureAllClose(tracer_outputs, layers_info, atol=3e-6)
if __name__ == "__main__":
absltest.main()
| deepmind-research-master | kfac_ferminet_alpha/tests/tracer_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common functions used across more than one test."""
import jax
import jax.numpy as jnp
import jax.random as jnr
from kfac_ferminet_alpha import loss_functions
def fully_connected_layer(params, x):
w, b = params
return jnp.matmul(x, w) + b[None]
def init_autoencoder(key, data_shape):
"""Initialize the standard autoencoder."""
assert len(data_shape) == 1
x_size = data_shape[0]
sizes = [x_size, 1000, 500, 250, 30, 250, 500, 1000, x_size]
keys = jnr.split(key, len(sizes) - 1)
params = []
for key, dim_in, dim_out in zip(keys, sizes, sizes[1:]):
# Glorot uniform initialization
c = jnp.sqrt(6 / (dim_in + dim_out))
w = jax.random.uniform(key, shape=(dim_in, dim_out), minval=-c, maxval=c)
b = jnp.zeros([dim_out])
params.append((w, b))
return params
def autoencoder(all_params, x_in):
"""Evaluate the standard autoencoder.
Note that the objective of this autoencoder is not standard, bur rather a sum
of the standard sigmoid crossentropy and squared loss. The reason for this is
to test on handling multiple losses.
Args:
all_params: All parameter values.
x_in: Inputs to the network.
Returns:
The value of the two losses and intermediate layer values.
"""
h_in = x_in
layers_values = []
for i, params in enumerate(all_params):
h_out = fully_connected_layer(params, h_in)
layers_values.append((h_out, h_in))
# Last layer does not have a nonlinearity
if i % 4 != 3:
# h_in = nn.leaky_relu(h_out)
h_in = jnp.tanh(h_out)
else:
h_in = h_out
h1, _ = loss_functions.register_normal_predictive_distribution(h_in, x_in)
h2, _ = loss_functions.register_normal_predictive_distribution(
h_in, targets=x_in, weight=0.1)
l1 = (h1 - x_in)**2 + jnp.log(jnp.pi) / 2
l1 = jnp.sum(l1, axis=-1)
l2 = (h2 - x_in)**2 + jnp.log(jnp.pi) / 2
l2 = jnp.sum(l2, axis=-1)
return [l1, l2 * 0.1], layers_values
| deepmind-research-master | kfac_ferminet_alpha/tests/common.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decoder architectures to be used with VAE."""
import abc
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
class DecoderBase(hk.Module):
"""Base class for decoder network classes."""
def __init__(self, obs_var: float):
"""Class initializer.
Args:
obs_var: oversation variance of the dataset.
"""
super().__init__()
self._obs_var = obs_var
@abc.abstractmethod
def __call__(self, z: jnp.ndarray) -> jnp.ndarray:
"""Reconstruct from a given latent sample.
Args:
z: latent samples of shape (batch_size, latent_dim)
Returns:
Reconstruction with shape (batch_size, ...).
"""
def data_fidelity(
self,
input_data: jnp.ndarray,
recons: jnp.ndarray,
) -> jnp.ndarray:
"""Compute Data fidelity (recons loss) for given input and recons.
Args:
input_data: Input batch of shape (batch_size, ...).
recons: Reconstruction of the input data. An array with the same shape as
`input_data.data`.
Returns:
Computed data fidelity term across batch of data. An array of shape
`(batch_size,)`.
"""
error = (input_data - recons).reshape(input_data.shape[0], -1)
return -0.5 * jnp.sum(jnp.square(error), axis=1) / self._obs_var
class ColorMnistMLPDecoder(DecoderBase):
"""MLP decoder for Color Mnist."""
_hidden_units = (200, 200, 200, 200)
_image_dims = (28, 28, 3) # Dimensions of a single MNIST image.
def __call__(self, z: jnp.ndarray) -> jnp.ndarray:
"""Reconstruct with given latent sample.
Args:
z: latent samples of shape (batch_size, latent_dim)
Returns:
Reconstructions data of shape (batch_size, 28, 28, 3).
"""
out = z
for units in self._hidden_units:
out = hk.Linear(units)(out)
out = jax.nn.relu(out)
out = hk.Linear(np.product(self._image_dims))(out)
out = jax.nn.sigmoid(out)
return jnp.reshape(out, (-1,) + self._image_dims)
| deepmind-research-master | avae/decoders.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checkpointing functionality."""
import os
from typing import Any, Mapping, Optional
from absl import logging
import dill
import jax
import jax.numpy as jnp
class Checkpointer:
"""A checkpoint saving and loading class."""
def __init__(self, checkpoint_dir: str, filename: str):
"""Class initializer.
Args:
checkpoint_dir: Checkpoint directory.
filename: Filename of checkpoint in checkpoint directory.
"""
self._checkpoint_dir = checkpoint_dir
if not os.path.isdir(self._checkpoint_dir):
os.mkdir(self._checkpoint_dir)
self._checkpoint_path = os.path.join(self._checkpoint_dir, filename)
def save_checkpoint(
self,
experiment_state: Mapping[str, jnp.ndarray],
opt_state: Mapping[str, jnp.ndarray],
step: int,
extra_checkpoint_info: Optional[Mapping[str, Any]] = None) -> None:
"""Save checkpoint with experiment state and step information.
Args:
experiment_state: Experiment params to be stored.
opt_state: Optimizer state to be stored.
step: Training iteration step.
extra_checkpoint_info: Extra information to be stored.
"""
if jax.host_id() != 0:
return
checkpoint_data = dict(
experiment_state=jax.tree_map(jax.device_get, experiment_state),
opt_state=jax.tree_map(jax.device_get, opt_state),
step=step)
if extra_checkpoint_info is not None:
for key in extra_checkpoint_info:
checkpoint_data[key] = extra_checkpoint_info[key]
with open(self._checkpoint_path, 'wb') as checkpoint_file:
dill.dump(checkpoint_data, checkpoint_file, protocol=2)
def load_checkpoint(
self) -> Optional[Mapping[str, Mapping[str, jnp.ndarray]]]:
"""Load and return checkpoint data.
Returns:
Loaded checkpoint if it exists else returns None.
"""
if os.path.exists(self._checkpoint_path):
with open(self._checkpoint_path, 'rb') as checkpoint_file:
checkpoint_data = dill.load(checkpoint_file)
logging.info('Loading checkpoint from %s, saved at step %d',
self._checkpoint_path, checkpoint_data['step'])
return checkpoint_data
else:
logging.warning('No pre-saved checkpoint found at %s',
self._checkpoint_path)
return None
| deepmind-research-master | avae/checkpointer.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encoder architectures to be used with VAE."""
import abc
from typing import Generic, TypeVar
import haiku as hk
import jax
import jax.numpy as jnp
from avae import types
_Params = TypeVar('_Params')
class EncoderBase(hk.Module, Generic[_Params]):
"""Abstract class for encoder architectures."""
def __init__(self, latent_dim: int):
"""Class initializer.
Args:
latent_dim: Latent dimensions of the model.
"""
super().__init__()
self._latent_dim = latent_dim
@abc.abstractmethod
def __call__(self, input_data: jnp.ndarray) -> _Params:
"""Return posterior distribution over latents.
Args:
input_data: Input batch of shape (batch_size, ...).
Returns:
Parameters of the posterior distribution over the latents.
"""
@abc.abstractmethod
def sample(self, posterior: _Params, key: jnp.ndarray) -> jnp.ndarray:
"""Sample from the given posterior distribution.
Args:
posterior: Parameters of posterior distribution over the latents.
key: Random number generator key.
Returns:
Sample from the posterior distribution over latents,
shape[batch_size, latent_dim]
"""
class ColorMnistMLPEncoder(EncoderBase[types.NormalParams]):
"""MLP encoder for ColorMnist."""
_hidden_units = (200, 200, 200, 200)
def __call__(
self, input_data: jnp.ndarray) -> types.NormalParams:
"""Return posterior distribution over latents.
Args:
input_data: Input batch of shape (batch_size, ...).
Returns:
Posterior distribution over the latents.
"""
out = hk.Flatten()(input_data)
for units in self._hidden_units:
out = hk.Linear(units)(out)
out = jax.nn.relu(out)
out = hk.Linear(2 * self._latent_dim)(out)
return _normal_params_from_logits(out)
def sample(
self,
posterior: types.NormalParams,
key: jnp.ndarray,
) -> jnp.ndarray:
"""Sample from the given normal posterior (mean, var) distribution.
Args:
posterior: Posterior over the latents.
key: Random number generator key.
Returns:
Sample from the posterior distribution over latents,
shape[batch_size, latent_dim]
"""
eps = jax.random.normal(
key, shape=(posterior.mean.shape[0], self._latent_dim))
return posterior.mean + eps * posterior.variance
def _normal_params_from_logits(
logits: jnp.ndarray) -> types.NormalParams:
"""Construct mean and variance of normal distribution from given logits."""
mean, log_variance = jnp.split(logits, 2, axis=1)
variance = jnp.exp(log_variance)
return types.NormalParams(mean=mean, variance=variance)
| deepmind-research-master | avae/encoders.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Useful dataclasses types used across the code."""
from typing import Optional
import dataclasses
import jax.numpy as jnp
import numpy as np
@dataclasses.dataclass(frozen=True)
class ELBOOutputs:
elbo: jnp.ndarray
data_fidelity: jnp.ndarray
kl: jnp.ndarray
@dataclasses.dataclass(frozen=True)
class LabelledData:
"""A batch of labelled examples.
Attributes:
data: Array of shape (batch_size, ...).
label: Array of shape (batch_size, ...).
"""
data: np.ndarray
label: Optional[np.ndarray]
@dataclasses.dataclass(frozen=True)
class NormalParams:
"""Parameters of a normal distribution.
Attributes:
mean: Array of shape (batch_size, latent_dim).
variance: Array of shape (batch_size, latent_dim).
"""
mean: jnp.ndarray
variance: jnp.ndarray
| deepmind-research-master | avae/types.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various KL implementations in JAX."""
import jax.numpy as jnp
def kl_p_with_uniform_normal(mean: jnp.ndarray,
variance: jnp.ndarray) -> jnp.ndarray:
r"""KL between p_dist with uniform normal prior.
Args:
mean: Mean of the gaussian distribution, shape (latent_dims,)
variance: Variance of the gaussian distribution, shape (latent_dims,)
Returns:
KL divergence KL(P||N(0, 1)) shape ()
"""
if len(variance.shape) == 2:
# If `variance` is a full covariance matrix
variance_trace = jnp.trace(variance)
_, ldet1 = jnp.linalg.slogdet(variance)
else:
variance_trace = jnp.sum(variance)
ldet1 = jnp.sum(jnp.log(variance))
mean_contribution = jnp.sum(jnp.square(mean))
res = -ldet1
res += variance_trace + mean_contribution - mean.shape[0]
return res * 0.5
| deepmind-research-master | avae/kl.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VAE style training."""
from typing import Any, Callable, Iterator, Sequence, Mapping, Tuple, Optional
import haiku as hk
import jax
import jax.numpy as jnp
import optax
from avae import checkpointer
from avae import types
def train(
train_data_iterator: Iterator[types.LabelledData],
test_data_iterator: Iterator[types.LabelledData],
elbo_fun: hk.Transformed,
learning_rate: float,
checkpoint_dir: str,
checkpoint_filename: str,
checkpoint_every: int,
test_every: int,
iterations: int,
rng_seed: int,
test_functions: Optional[Sequence[Callable[[Mapping[str, jnp.ndarray]],
Tuple[str, float]]]] = None,
extra_checkpoint_info: Optional[Mapping[str, Any]] = None):
"""Train VAE with given data iterator and elbo definition.
Args:
train_data_iterator: Iterator of batched training data.
test_data_iterator: Iterator of batched testing data.
elbo_fun: Haiku transfomed function returning elbo.
learning_rate: Learning rate to be used with optimizer.
checkpoint_dir: Path of the checkpoint directory.
checkpoint_filename: Filename of the checkpoint.
checkpoint_every: Checkpoint every N iterations.
test_every: Test and log results every N iterations.
iterations: Number of training iterations to perform.
rng_seed: Seed for random number generator.
test_functions: Test function iterable, each function takes test data and
outputs extra info to print at test and log time.
extra_checkpoint_info: Extra info to put inside saved checkpoint.
"""
rng_seq = hk.PRNGSequence(jax.random.PRNGKey(rng_seed))
opt_init, opt_update = optax.chain(
# Set the parameters of Adam. Note the learning_rate is not here.
optax.scale_by_adam(b1=0.9, b2=0.999, eps=1e-8),
# Put a minus sign to *minimise* the loss.
optax.scale(-learning_rate))
@jax.jit
def loss(params, key, data):
elbo_outputs = elbo_fun.apply(params, key, data)
return -jnp.mean(elbo_outputs.elbo)
@jax.jit
def loss_test(params, key, data):
elbo_output = elbo_fun.apply(params, key, data)
return (-jnp.mean(elbo_output.elbo), jnp.mean(elbo_output.data_fidelity),
jnp.mean(elbo_output.kl))
@jax.jit
def update_step(params, key, data, opt_state):
grads = jax.grad(loss, has_aux=False)(params, key, data)
updates, opt_state = opt_update(grads, opt_state, params)
params = optax.apply_updates(params, updates)
return params, opt_state
exp_checkpointer = checkpointer.Checkpointer(
checkpoint_dir, checkpoint_filename)
experiment_data = exp_checkpointer.load_checkpoint()
if experiment_data is not None:
start = experiment_data['step']
params = experiment_data['experiment_state']
opt_state = experiment_data['opt_state']
else:
start = 0
params = elbo_fun.init(
next(rng_seq), next(train_data_iterator).data)
opt_state = opt_init(params)
for step in range(start, iterations, 1):
if step % test_every == 0:
test_loss, ll, kl = loss_test(params, next(rng_seq),
next(test_data_iterator).data)
output_message = (f'Step {step} elbo {-test_loss:0.2f} LL {ll:0.2f} '
f'KL {kl:0.2f}')
if test_functions:
for test_function in test_functions:
name, test_output = test_function(params)
output_message += f' {name}: {test_output:0.2f}'
print(output_message)
params, opt_state = update_step(params, next(rng_seq),
next(train_data_iterator).data, opt_state)
if step % checkpoint_every == 0:
exp_checkpointer.save_checkpoint(
params, opt_state, step, extra_checkpoint_info)
| deepmind-research-master | avae/train.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset iterators Mnist, ColorMnist."""
import enum
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from avae import types
class Dataset(enum.Enum):
color_mnist = enum.auto()
class MnistDataIterator(object):
"""Mnist data iterator class.
Data is obtained as dataclass, types.LabelledData.
"""
def __init__(self, subset: str, batch_size: int):
"""Class initializer.
Args:
subset: Subset of dataset.
batch_size: Batch size of the returned dataset iterator.
"""
datasets = tfds.load('mnist')
train_dataset = datasets[subset]
def _map_fun(x):
return {'data': tf.cast(x['image'], tf.float32) / 255.0,
'label': x['label']}
train_dataset = train_dataset.map(_map_fun)
train_dataset = train_dataset.batch(batch_size,
drop_remainder=True).repeat()
self._iterator = iter(tfds.as_numpy(train_dataset))
self._batch_size = batch_size
def __iter__(self):
return self
def __next__(self) -> types.LabelledData:
return types.LabelledData(**next(self._iterator))
class ColorMnistDataIterator(MnistDataIterator):
"""Color Mnist data iterator.
Each ColorMnist image is of shape (28, 28, 3). ColorMnist digit can have 7
different colors by permution of RGB channels (turning on and off RGB
channels, except for all channels off permutation).
Data is obtained as dataclass, util_dataclasses.LabelledData.
"""
def __next__(self) -> types.LabelledData:
mnist_batch = next(self._iterator)
mnist_image = mnist_batch['data']
# Colors are supported by turning off and on RGB channels.
# Thus possible colors are
# [black (excluded), red, green, yellow, blue, magenta, cyan, white]
# color_id takes value from [1,8)
color_id = np.random.randint(7, size=self._batch_size) + 1
red_channel_bool = np.mod(color_id, 2)
red_channel_bool = jnp.reshape(red_channel_bool, [-1, 1, 1, 1])
blue_channel_bool = np.floor_divide(color_id, 4)
blue_channel_bool = jnp.reshape(blue_channel_bool, [-1, 1, 1, 1])
green_channel_bool = np.mod(np.floor_divide(color_id, 2), 2)
green_channel_bool = jnp.reshape(green_channel_bool, [-1, 1, 1, 1])
color_mnist_image = jnp.stack([
jnp.multiply(red_channel_bool, mnist_image),
jnp.multiply(blue_channel_bool, mnist_image),
jnp.multiply(green_channel_bool, mnist_image)], axis=3)
color_mnist_image = jnp.reshape(color_mnist_image, [-1, 28, 28, 3])
# Color id takes value [1, 8)
# Although to make classification code easier `color` label attached to data
# takes value in [0, 7) (by subtracting 1 from color id)
return types.LabelledData(
data=color_mnist_image, label=mnist_batch['label'])
| deepmind-research-master | avae/data_iterators.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main file for training VAE/AVAE."""
import enum
from absl import app
from absl import flags
import haiku as hk
from avae import data_iterators
from avae import decoders
from avae import encoders
from avae import train
from avae import vae
class Model(enum.Enum):
vae = enum.auto()
avae = enum.auto()
class EncoderArch(enum.Enum):
color_mnist_mlp_encoder = 'ColorMnistMLPEncoder'
class DecoderArch(enum.Enum):
color_mnist_mlp_decoder = 'ColorMnistMLPDecoder'
_DATASET = flags.DEFINE_enum_class(
'dataset', data_iterators.Dataset.color_mnist, data_iterators.Dataset,
'Dataset to train on')
_LATENT_DIM = flags.DEFINE_integer('latent_dim', 32,
'Number of latent dimensions.')
_TRAIN_BATCH_SIZE = flags.DEFINE_integer('train_batch_size', 64,
'Train batch size.')
_TEST_BATCH_SIZE = flags.DEFINE_integer('test_batch_size', 64,
'Testing batch size.')
_TEST_EVERY = flags.DEFINE_integer('test_every', 1000,
'Test every N iterations.')
_ITERATIONS = flags.DEFINE_integer('iterations', 102000,
'Number of training iterations.')
_OBS_VAR = flags.DEFINE_float('obs_var', 0.5,
'Observation variance of the data. (Default 0.5)')
_MODEL = flags.DEFINE_enum_class('model', Model.avae, Model,
'Model used for training.')
_RHO = flags.DEFINE_float('rho', 0.8, 'Rho parameter used with AVAE or SE.')
_LEARNING_RATE = flags.DEFINE_float('learning_rate', 1e-4, 'Learning rate.')
_RNG_SEED = flags.DEFINE_integer('rng_seed', 0,
'Seed for random number generator.')
_CHECKPOINT_DIR = flags.DEFINE_string('checkpoint_dir', '/tmp/',
'Directory for checkpointing.')
_CHECKPOINT_FILENAME = flags.DEFINE_string(
'checkpoint_filename', 'color_mnist_avae_mlp', 'Checkpoint filename.')
_CHECKPOINT_EVERY = flags.DEFINE_integer(
'checkpoint_every', 1000, 'Checkpoint every N steps.')
_ENCODER = flags.DEFINE_enum_class(
'encoder', EncoderArch.color_mnist_mlp_encoder, EncoderArch,
'Encoder class name.')
_DECODER = flags.DEFINE_enum_class(
'decoder', DecoderArch.color_mnist_mlp_decoder, DecoderArch,
'Decoder class name.')
def main(_):
if _DATASET.value is data_iterators.Dataset.color_mnist:
train_data_iterator = iter(
data_iterators.ColorMnistDataIterator('train', _TRAIN_BATCH_SIZE.value))
test_data_iterator = iter(
data_iterators.ColorMnistDataIterator('test', _TEST_BATCH_SIZE.value))
def _elbo_fun(input_data):
if _ENCODER.value is EncoderArch.color_mnist_mlp_encoder:
encoder = encoders.ColorMnistMLPEncoder(_LATENT_DIM.value)
if _DECODER.value is DecoderArch.color_mnist_mlp_decoder:
decoder = decoders.ColorMnistMLPDecoder(_OBS_VAR.value)
vae_obj = vae.VAE(encoder, decoder, _RHO.value)
if _MODEL.value is Model.vae:
return vae_obj.vae_elbo(input_data, hk.next_rng_key())
else:
return vae_obj.avae_elbo(input_data, hk.next_rng_key())
elbo_fun = hk.transform(_elbo_fun)
extra_checkpoint_info = {
'dataset': _DATASET.value.name,
'encoder': _ENCODER.value.name,
'decoder': _DECODER.value.name,
'obs_var': _OBS_VAR.value,
'rho': _RHO.value,
'latent_dim': _LATENT_DIM.value,
}
train.train(
train_data_iterator=train_data_iterator,
test_data_iterator=test_data_iterator,
elbo_fun=elbo_fun,
learning_rate=_LEARNING_RATE.value,
checkpoint_dir=_CHECKPOINT_DIR.value,
checkpoint_filename=_CHECKPOINT_FILENAME.value,
checkpoint_every=_CHECKPOINT_EVERY.value,
test_every=_TEST_EVERY.value,
iterations=_ITERATIONS.value,
rng_seed=_RNG_SEED.value,
extra_checkpoint_info=extra_checkpoint_info)
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | avae/train_main.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standard VAE class."""
from typing import Optional
import jax
import jax.numpy as jnp
from avae import decoders
from avae import encoders
from avae import kl
from avae import types
class VAE:
"""VAE class.
This class defines the ELBO used in training VAE models. It also adds function
for forward passing data through VAE.
"""
def __init__(self, encoder: encoders.EncoderBase,
decoder: decoders.DecoderBase, rho: Optional[float] = None):
"""Class initializer.
Args:
encoder: Encoder network architecture.
decoder: Decoder network architecture.
rho: Rho parameter used in AVAE training.
"""
self._encoder = encoder
self._decoder = decoder
self._rho = rho
def vae_elbo(
self, input_data: jnp.ndarray,
key: jnp.ndarray) -> types.ELBOOutputs:
"""ELBO for training VAE.
Args:
input_data: Input batch of shape (batch_size, ...).
key: Key for random number generator.
Returns:
Computed VAE Elbo as type util_dataclasses.ELBOOutputs
"""
posterior = self._encoder(input_data)
samples = self._encoder.sample(posterior, key)
kls = jax.vmap(kl.kl_p_with_uniform_normal, [0])(
posterior.mean, posterior.variance)
recons = self._decoder(samples)
data_fidelity = self._decoder.data_fidelity(input_data, recons)
elbo = data_fidelity - kls
return types.ELBOOutputs(elbo, data_fidelity, kls)
def avae_elbo(
self, input_data: jnp.ndarray,
key: jnp.ndarray) -> types.ELBOOutputs:
"""ELBO for training AVAE model.
Args:
input_data: Input batch of shape (batch_size, ...).
key: Key for random number generator.
Returns:
Computed AVAE Elbo in nested tuple (Elbo, (data_fidelity, KL)). All arrays
have batch dimension intact.
"""
aux_images = jax.lax.stop_gradient(self(input_data, key))
posterior = self._encoder(input_data)
samples = self._encoder.sample(posterior, key)
kls = jax.vmap(kl.kl_p_with_uniform_normal, [0, 0])(
posterior.mean, posterior.variance)
recons = self._decoder(samples)
data_fidelity = self._decoder.data_fidelity(input_data, recons)
elbo = data_fidelity - kls
aux_posterior = self._encoder(aux_images)
latent_mean = posterior.mean
latent_var = posterior.variance
aux_latent_mean = aux_posterior.mean
aux_latent_var = aux_posterior.variance
latent_dim = latent_mean.shape[1]
def _reduce(x):
return jnp.mean(jnp.sum(x, axis=1))
# Computation of <log p(Z_aux | Z)>.
expected_log_conditional = (
aux_latent_var + jnp.square(self._rho) * latent_var +
jnp.square(aux_latent_mean - self._rho * latent_mean))
expected_log_conditional = _reduce(expected_log_conditional)
expected_log_conditional /= 2.0 * (1.0 - jnp.square(self._rho))
expected_log_conditional = (latent_dim *
jnp.log(1.0 / (2 * jnp.pi)) -
expected_log_conditional)
elbo += expected_log_conditional
# Entropy of Z_aux
elbo += _reduce(0.5 * jnp.log(2 * jnp.pi * jnp.e * aux_latent_var))
return types.ELBOOutputs(elbo, data_fidelity, kls)
def __call__(
self, input_data: jnp.ndarray,
key: jnp.ndarray) -> jnp.ndarray:
"""Reconstruction of the input data.
Args:
input_data: Input batch of shape (batch_size, ...).
key: Key for random number generator.
Returns:
Reconstruction of the input data as jnp.ndarray of shape
[batch_dim, observation_dims].
"""
posterior = self._encoder(input_data)
samples = self._encoder.sample(posterior, key)
recons = self._decoder(samples)
return recons
| deepmind-research-master | avae/vae.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example code for running model on CLEVRER."""
import json
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
from object_attention_for_reasoning import model as modellib
BATCH_SIZE = 1
NUM_FRAMES = 25
NUM_OBJECTS = 8
_BASE_DIR = flags.DEFINE_string(
"base_dir", "./clevrer_monet_latents",
"Directory containing checkpoints and MONet latents.")
_SCENE_IDX = flags.DEFINE_string(
"scene_idx", 1000, "Scene index of CLEVRER video.")
def load_monet_latents(base_dir, scene_index):
filename = f"{base_dir}/train/{scene_index}.npz"
with open(filename, "rb") as f:
return np.load(f)
def _split_string(s):
"""Splits string to words and standardize alphabet."""
return s.lower().replace("?", "").split()
def _pad(array, length):
"""Pad an array to desired length."""
return np.pad(array, [(0, length - array.shape[0])], mode="constant")
def encode_sentence(token_map, sentence, pad_length):
"""Encode CLEVRER question/choice sentences as sequence of token ids."""
ret = np.array(
[token_map["question_vocab"][w] for w in _split_string(sentence)],
np.int32)
return _pad(ret, pad_length)
def encode_choices(token_map, choices):
"""Encode CLEVRER choices."""
arrays = [encode_sentence(token_map, choice["choice"],
modellib.MAX_CHOICE_LENGTH)
for choice in choices]
return _pad(np.stack(arrays, axis=0), modellib.NUM_CHOICES)
def main(unused_argv):
base_dir = _BASE_DIR.value
with open(f"{base_dir}/vocab.json", "rb") as f:
token_map = json.load(f)
reverse_answer_lookup = {v: k for k, v in token_map["answer_vocab"].items()}
with open(f"{base_dir}/train.json", "rb") as f:
questions_data = json.load(f)
tf.reset_default_graph()
model = modellib.ClevrerTransformerModel(**modellib.PRETRAINED_MODEL_CONFIG)
inputs_descriptive = {
"monet_latents": tf.placeholder(
tf.float32,
[BATCH_SIZE, NUM_FRAMES, NUM_OBJECTS, modellib.EMBED_DIM]),
"question": tf.placeholder(
tf.int32, [BATCH_SIZE, modellib.MAX_QUESTION_LENGTH]),
}
inputs_mc = {
"monet_latents": tf.placeholder(
tf.float32,
[BATCH_SIZE, NUM_FRAMES, NUM_OBJECTS, modellib.EMBED_DIM]),
"question": tf.placeholder(tf.int32,
[BATCH_SIZE, modellib.MAX_QUESTION_LENGTH]),
"choices": tf.placeholder(
tf.int32, [BATCH_SIZE, modellib.NUM_CHOICES,
modellib.MAX_CHOICE_LENGTH]),
}
output_descriptive = model.apply_model_descriptive(inputs_descriptive)
output_mc = model.apply_model_mc(inputs_mc)
# Restore from checkpoint
saver = tf.train.Saver()
checkpoint_dir = f"{base_dir}/checkpoints/"
sess = tf.train.SingularMonitoredSession(checkpoint_dir=checkpoint_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
saver.restore(sess, ckpt.model_checkpoint_path)
def eval_descriptive(monet_latents, question_json):
# CLEVRER provides videos with 128 frames. In our model, we subsample 25
# frames (as was done in Yi et al (2020)).
# For training, we randomize the choice of 25 frames, and for evaluation, we
# sample the 25 frames as evenly as possible.
# We do that by doing strided sampling of the frames.
stride, rem = divmod(monet_latents.shape[0], NUM_FRAMES)
monet_latents = monet_latents[None, :-rem:stride]
assert monet_latents.shape[1] == NUM_FRAMES
question = encode_sentence(token_map, question_json["question"],
modellib.MAX_QUESTION_LENGTH)
batched_question = np.expand_dims(question, axis=0)
logits = sess.run(output_descriptive, feed_dict={
inputs_descriptive["monet_latents"]: monet_latents,
inputs_descriptive["question"]: batched_question,
})
descriptive_answer = np.argmax(logits)
return reverse_answer_lookup[descriptive_answer]
def eval_mc(monet_latents, question_json):
stride, rem = divmod(monet_latents.shape[0], NUM_FRAMES)
monet_latents = monet_latents[None, :-rem:stride]
assert monet_latents.shape[1] == NUM_FRAMES
question = encode_sentence(
token_map, question_json["question"], modellib.MAX_QUESTION_LENGTH)
choices = encode_choices(
token_map, question_json["choices"])
mc_answer = sess.run(output_mc, feed_dict={
inputs_mc["monet_latents"]: monet_latents,
inputs_mc["question"]: np.expand_dims(question, axis=0),
inputs_mc["choices"]: np.expand_dims(choices, axis=0),
})
return mc_answer >= 0
sample_scene_idx = _SCENE_IDX.value
question_json = questions_data[sample_scene_idx]["questions"][0]
print("Descriptive Question: ", question_json["question"])
print("Model Answer: ",
eval_descriptive(load_monet_latents(base_dir, sample_scene_idx),
question_json))
print("True Answer: ", question_json["answer"])
question_json = questions_data[sample_scene_idx]["questions"][-1]
print("Multiple-Choice Question: ", question_json["question"])
for i, choice_json in enumerate(question_json["choices"]):
print(f"{i+1}) {choice_json['choice']}")
print("Model Answer: ",
eval_mc(load_monet_latents(base_dir, sample_scene_idx), question_json))
print("True Answer: ",
[choice_json["answer"] for choice_json in question_json["choices"]])
if __name__ == "__main__":
app.run(main)
| deepmind-research-master | object_attention_for_reasoning/run_model.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model code. Provided settings are identical to what was used in the paper."""
import sonnet as snt
import tensorflow.compat.v1 as tf
from object_attention_for_reasoning import transformer
QUESTION_VOCAB_SIZE = 82
ANSWER_VOCAB_SIZE = 22
MAX_QUESTION_LENGTH = 20
MAX_CHOICE_LENGTH = 12
NUM_CHOICES = 4
EMBED_DIM = 16
PRETRAINED_MODEL_CONFIG = dict(
use_relative_positions=True,
shuffle_objects=True,
transformer_layers=28,
head_size=128,
num_heads=10,
embed_dim=EMBED_DIM,
)
def append_ids(tensor, id_vector, axis):
id_vector = tf.constant(id_vector, tf.float32)
for a in range(len(tensor.shape)):
if a != axis:
id_vector = tf.expand_dims(id_vector, axis=a)
tiling_vector = [s if i != axis else 1 for i, s in enumerate(tensor.shape)]
id_tensor = tf.tile(id_vector, tiling_vector)
return tf.concat([tensor, id_tensor], axis=axis)
class ClevrerTransformerModel(object):
"""Model from Ding et al. 2020 (https://arxiv.org/abs/2012.08508)."""
def __init__(self, use_relative_positions, shuffle_objects,
transformer_layers, num_heads, head_size, embed_dim):
"""Instantiate Sonnet modules."""
self._embed_dim = embed_dim
self._embed = snt.Embed(QUESTION_VOCAB_SIZE, embed_dim - 2)
self._shuffle_objects = shuffle_objects
self._memory_transformer = transformer.TransformerTower(
value_size=embed_dim + 2,
num_heads=num_heads,
num_layers=transformer_layers,
use_relative_positions=use_relative_positions,
causal=False)
self._final_layer_mc = snt.Sequential(
[snt.Linear(head_size), tf.nn.relu, snt.Linear(1)])
self._final_layer_descriptive = snt.Sequential(
[snt.Linear(head_size), tf.nn.relu,
snt.Linear(ANSWER_VOCAB_SIZE)])
self._dummy = tf.get_variable("dummy", [embed_dim + 2], tf.float32,
initializer=tf.zeros_initializer)
self._infill_linear = snt.Linear(embed_dim + 2)
self._mask_embedding = tf.get_variable(
"mask", [embed_dim + 2], tf.float32, initializer=tf.zeros_initializer)
def _apply_transformers(self, lang_embedding, vision_embedding):
"""Applies transformer to language and vision input.
Args:
lang_embedding: tensor,
vision_embedding: tensor, "validation", or "test".
Returns:
tuple, output at dummy token, all output embeddings, infill loss
"""
def _unroll(tensor):
"""Unroll the time dimension into the object dimension."""
return tf.reshape(
tensor, [tensor.shape[0], -1, tensor.shape[3]])
words = append_ids(lang_embedding, [1, 0], axis=2)
dummy_word = tf.tile(self._dummy[None, None, :], [tf.shape(words)[0], 1, 1])
vision_embedding = append_ids(vision_embedding, [0, 1], axis=3)
vision_over_time = _unroll(vision_embedding)
transformer_input = tf.concat([dummy_word, words, vision_over_time], axis=1)
output, _ = self._memory_transformer(transformer_input,
is_training=False)
return output[:, 0, :]
def apply_model_descriptive(self, inputs):
"""Applies model to CLEVRER descriptive questions.
Args:
inputs: dict of form: {
"question": tf.int32 tensor of shape [batch, MAX_QUESTION_LENGTH],
"monet_latents": tf.float32 tensor of shape [batch, frames, 8, 16],
}
Returns:
Tensor of shape [batch, ANSWER_VOCAB_SIZE], representing logits for each
possible answer word.
"""
question = inputs["question"]
# Shape: [batch, question_len, embed_dim-2]
question_embedding = self._embed(question)
# Shape: [batch, question_len, embed_dim]
question_embedding = append_ids(question_embedding, [0, 1], 2)
choices_embedding = self._embed(
tf.zeros([question.shape[0], MAX_CHOICE_LENGTH], tf.int64))
choices_embedding = append_ids(choices_embedding, [0, 1], 2)
# Shape: [batch, choices, question_len + choice_len, embed_dim]
lang_embedding = tf.concat([question_embedding, choices_embedding], axis=1)
# Shape: [batch, frames, num_objects, embed_dim]
vision_embedding = inputs["monet_latents"]
if self._shuffle_objects:
vision_embedding = tf.transpose(vision_embedding, [2, 1, 0, 3])
vision_embedding = tf.random.shuffle(vision_embedding)
vision_embedding = tf.transpose(vision_embedding, [2, 1, 0, 3])
output = self._apply_transformers(lang_embedding, vision_embedding)
output = self._final_layer_descriptive(output)
return output
def apply_model_mc(self, inputs):
"""Applies model to CLEVRER multiple-choice questions.
Args:
inputs: dict of form: {
"question": tf.int32 tensor of shape [batch, MAX_QUESTION_LENGTH],
"choices": tf.int32 tensor of shape [batch, 4, MAX_CHOICE_LENGTH],
"monet_latents": tf.float32 tensor of shape [batch, frames, 8, 16],
}
Returns:
Tensor of shape [batch, 4], representing logits for each choice
"""
question = inputs["question"]
choices = inputs["choices"]
# Shape: [batch, question_len, embed_dim-2]
question_embedding = self._embed(question)
# Shape: [batch, question_len, embed_dim]
question_embedding = append_ids(question_embedding, [1, 0], 2)
# Shape: [batch, choices, choice_len, embed_dim-2]
choices_embedding = snt.BatchApply(self._embed)(choices)
# Shape: [batch, choices, choice_len, embed_dim]
choices_embedding = append_ids(choices_embedding, [0, 1], 3)
# Shape: [batch, choices, question_len + choice_len, embed_dim]
lang_embedding = tf.concat([
tf.tile(question_embedding[:, None],
[1, choices_embedding.shape[1], 1, 1]),
choices_embedding], axis=2)
# Shape: [batch, frames, num_objects, embed_dim]
vision_embedding = inputs["monet_latents"]
if self._shuffle_objects:
vision_embedding = tf.transpose(vision_embedding, [2, 1, 0, 3])
vision_embedding = tf.random.shuffle(vision_embedding)
vision_embedding = tf.transpose(vision_embedding, [2, 1, 0, 3])
output_per_choice = []
for c in range(NUM_CHOICES):
output = self._apply_transformers(
lang_embedding[:, c, :, :], vision_embedding)
output_per_choice.append(output)
output = tf.stack(output_per_choice, axis=1)
output = tf.squeeze(snt.BatchApply(self._final_layer_mc)(output), axis=2)
return output
| deepmind-research-master | object_attention_for_reasoning/model.py |
# Fork of Sonnet transformer model with small modifications
#
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Implementation of Transformer networks.
Size glossary:
* Batch size (B).
* Sequence length (N).
* Memory size (M). The size of the optional memory, passed in via `state`.
* Number of heads (H): the number of attention heads.
* Value size (V): the size of each value embedding per head.
* Key size (K): the size of each key embedding per head. Equally, the size
of each query embedding per head. Typically K <= V.
* Embedding size (HV). The size of the activation or embedding relating to
each input between layers. Equal to value_size * num_heads.
* All attention size (F). The size of all attention activations over every
head.
* QKV size (F / H): The size of the query, key and value per head. Equal to
2K + V or equivalently F / H.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from sonnet.python.modules import base
from sonnet.python.modules import basic
from sonnet.python.modules import layer_norm as snt_ln
from sonnet.python.modules import util
from sonnet.python.modules.nets import mlp as snt_mlp
import tensorflow.compat.v1 as tf
AttentionState = collections.namedtuple('AttentionState',
('queries', 'keys', 'values', 'logits',
'weights', 'embeddings', 'read_words'))
CompressedMemoryState = collections.namedtuple(
'CompressedMemoryState', ('episodic_memory', 'compressed_memory', 'index'))
def rel_shift(position_logits):
"""Shifting of logits for relative attention.
Args:
position_logits: A tensor of shape [B, H, N, N + M].
Returns:
The shifted logits. Example, for input (H=1, B=1):
[5, 4, 3, 2, 1]
[5, 4, 3, 2, 1]
[5, 4, 3, 2, 1]
[5, 4, 3, 2, 1]
[5, 4, 3, 2, 1]
the function outputs:
[1, 0, 5, 4, 3]
[2, 1, 0, 5, 4]
[3, 2, 1, 0, 5]
[4, 3, 2, 1, 0]
[5, 4, 3, 2, 1]
Raises:
ValueError if position_logits is not 4D.
Note: this is not an exact shift as the upper triangle is non-zero. This
works as intended in the causally-masked case. If this is used with un-masked
attention, we'd want these to also be zero.
"""
if position_logits.get_shape().ndims != 4:
raise ValueError('Expected 4D position logits.')
input_shape = position_logits.shape
batch_size = input_shape[0]
num_heads = input_shape[1]
t1 = input_shape[2]
t2 = input_shape[3]
# We prepend zeros on the final timescale dimension.
to_pad = tf.zeros([batch_size, num_heads, t1, 1])
position_logits = tf.concat([to_pad, position_logits], -1)
# Reshape trick to shift input.
position_logits = tf.reshape(position_logits,
[batch_size, num_heads, t2 + 1, t1])
# Remove extra time dimension and re-shape.
position_logits = position_logits[:, :, 1:]
position_logits = tf.reshape(position_logits, input_shape)
return position_logits
def _layer_norm(inputs):
if inputs.get_shape().ndims > 2:
return basic.BatchApply(snt_ln.LayerNorm())(inputs)
else:
return snt_ln.LayerNorm()(inputs)
def _concat_and_slice(prev_memory, new_memory):
original_memory_size = prev_memory.get_shape().as_list()[1]
concat_memory = tf.concat([prev_memory, new_memory], 1)
memory = concat_memory[:, -original_memory_size:]
return memory, concat_memory
def simple_attention(queries, keys, values):
logits = tf.matmul(queries, keys, transpose_b=True)
weights = tf.nn.softmax(logits)
return tf.matmul(weights, values)
class ResidualDropoutWrapper(base.AbstractModule):
"""Wrapper class that applies residual connections, dropout and layer norm.
By default applies a relu to the module output before the other operations.
"""
def __init__(self,
layer,
dropout_rate,
layer_norm='input',
name='residual_dropout_wrapper'):
self._module = layer
self._dropout_rate = dropout_rate
self._layer_norm = layer_norm
super(ResidualDropoutWrapper, self).__init__(name=name)
def _build(self, inputs, *args, **kwargs):
if self._layer_norm in ('both', 'input'):
normed_inputs = _layer_norm(inputs)
else:
normed_inputs = inputs
module_output = self._module(normed_inputs, *args, **kwargs)
module_state = None
# If module outputs multiple items, assumes (output, state) tuple.
if isinstance(module_output, tuple):
module_output, module_state = module_output
if kwargs['is_training']: # kwargs must contain is_training.
module_output = tf.nn.dropout(module_output, rate=self._dropout_rate)
output = inputs + module_output
if self._layer_norm in ('both', 'output'):
output = _layer_norm(output)
if module_state is None:
return output
else:
return output, module_state
def future_mask(chunk_size, dtype):
"""Creates attention mask to ensure an element i cannot attend to j > i."""
square = tf.ones([chunk_size, chunk_size], dtype=dtype)
# Create upper diagonal matrix and remove diagonal entries (allow self-attn).
mask = tf.matrix_band_part(square, 0, -1) - tf.matrix_band_part(square, 0, 0)
# Multiply by -1e6 and expand to broadcast with [B, H, N, N] logits.
mask = -1e6 * tf.reshape(mask, [1, 1, chunk_size, chunk_size])
return mask
def _memory_size(state):
if isinstance(state, CompressedMemoryState):
return (state.episodic_memory.get_shape().as_list()[1] +
state.compressed_memory.get_shape().as_list()[1])
else:
return state.get_shape().as_list()[1]
def create_mask(inputs, state, equal_window):
"""Creates mask for future sequence positions.
Args:
inputs: inputs tensor of shape [B, N, D]
state: optional tensor of shape [B, M, D], CompressedMemoryState or a list
where the ith entry corresponds to the ith layer's state.
equal_window: if True, then each activation has an equally-sized attention
window of length 'M'. This only makes sense if a state is given.
Returns:
Float tensor of shape [1, 1, N, N + M], to be summed with logits.
"""
chunk_size = inputs.get_shape().as_list()[1]
dtype = inputs.dtype
mask = future_mask(chunk_size, dtype)
if state is not None:
if isinstance(state, (tuple, list)):
largest_memory_layer = np.argmax([_memory_size(s) for s in state])
state = state[largest_memory_layer]
mem_size = _memory_size(state)
mask = tf.concat(
[tf.zeros([1, 1, chunk_size, mem_size], dtype=dtype), mask], 3)
if equal_window:
attn_mask = tf.ones([chunk_size, chunk_size], dtype=dtype)
mask_dia = tf.cast(tf.matrix_band_part(attn_mask, 0, 0), dtype=dtype)
mask_l = tf.cast(tf.matrix_band_part(attn_mask, -1, 0), dtype=dtype)
start_mask = tf.reshape(mask_l - mask_dia,
[1, 1, chunk_size, chunk_size]) * -1e6
mask = tf.concat(
[mask[:, :, :, :chunk_size] + start_mask, mask[:, :, :, chunk_size:]],
3)
return mask
def default_mlp(hidden_sizes, activate_final=False, init_std=2., **kwargs):
"""Standard batch-applied MLP for transformer modules."""
init = {'w': tf.variance_scaling_initializer(init_std, distribution='normal')}
mlp = snt_mlp.MLP(
hidden_sizes,
activate_final=activate_final,
use_dropout=True,
initializers=init,
**kwargs)
return basic.BatchApply(mlp)
def get_position_encodings(sequence_length,
hidden_size,
clamp_value,
max_timescale=10000.,
min_timescale=2.0):
"""Creates sinusoidal encodings of shape [1, N + M, D]."""
# NOTE: when not using relative position encodings, min_timescale must be 2.0
# and hidden_size must be an even number. Otherwise, the dimensions do not
# match.
pos_seq = tf.range(sequence_length - 1, -1, -1.0)
if clamp_value > 0:
pos_seq = tf.minimum(pos_seq, clamp_value)
freqs = tf.range(0, hidden_size, min_timescale)
inv_freq = 1 / (max_timescale**(freqs / hidden_size))
sinusoid_inp = tf.einsum('i,j->ij', pos_seq, inv_freq)
pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], -1)
pos_emb = tf.expand_dims(pos_emb, 0)
output_dim = pos_emb.get_shape().as_list()[-1]
if output_dim != hidden_size:
raise ValueError(
'position embedding dimension ({}) does not match that of the input ({}).'
.format(output_dim, hidden_size))
return pos_emb
class MultiheadAttention(base.AbstractModule):
"""Implements multi-head attention with optional state context."""
def __init__(self,
value_size,
key_size,
num_heads,
mask=None,
scaling=True,
positional_encodings=None,
use_relative_positions=False,
init_std=2.,
name='multihead_attention'):
"""Creates a MultiheadAttention module.
Args:
value_size: V parameter. See size glossary in class docstring.
key_size: K parameter. See size glossary in class docstring.
num_heads: The number of independent queries per timestep.
mask: Optional mask to attention logits. This can prevent attending to
future positions or unused memory slots.
scaling: Whether to scale the attention logits.
positional_encodings: Either None (none given), or an iterable of
`(key_positional_encodings, query_positional_encodings)` tuples, where
the first encodings in the list indicate the oldest entries in memory
and the final encodings indicate the newest entries in memory and the
sequence.
use_relative_positions: If True then relative positions are incorporated,
vs absolute, into the attention logits. This is done exactly as
described in the TransformerXL, Dai et al. 2019.
init_std: scaling of standard deviation for weight matrices init.
name: Name of module.
"""
super(MultiheadAttention, self).__init__(name=name)
self._value_size = value_size
self._key_size = key_size
self._sizes = {
'value': self._value_size,
'key': self._key_size,
'query': self._key_size,
'relative_keys': self._key_size,
'relative_keys_0': self._key_size,
}
self._num_heads = num_heads
self._mask = mask
self._scaling = scaling
self._positional_encodings = positional_encodings
self._use_relative_positions = use_relative_positions
self._init = {'w': tf.variance_scaling_initializer(init_std)}
@util.reuse_variables
def multihead_linear(self, inputs, name):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
hidden_size = self._sizes[name]
input_size = inputs.shape[-1].value
w = tf.get_variable(
'linear/w',
shape=[input_size, self._num_heads * hidden_size],
initializer=self._init['w'])
w = tf.reshape(w, [input_size, self._num_heads, hidden_size])
out = tf.einsum('bij,jhk->bhik', inputs, w)
return out
def _build(self,
inputs,
query_inputs=None,
state=None,
is_training=False,
dropout_keep_prob=0.5,
key_value_inputs=None):
"""Calculates multi-layer self attention.
Args:
inputs: Tensor of shape [batch_size, num_steps, output_dim_size]. Inputs
used as the query, key, and value to the attention layer.
query_inputs: optional Tensor of shape [batch_size, num_steps,
output_dim_size]. Query inputs to the attention layer. Set when
query_inputs is different from the inputs argument.
state: optional CompressedMemoryState or a Tensor of shape [batch_size,
memory_size, dim_size] concatenated to the inputs. Set when attend to
the memory from previous steps.
is_training: if currently training.
dropout_keep_prob: dropout rate applied to attention weights.
key_value_inputs: optional Tensor of shape [batch_size, num_steps,
output_dim_size]. It is used as the key and value of the multihead
attention. Set when the key and value are different from the inputs
argument.
Returns:
output: the result Tensor of shape
[batch_size, num_steps, output_dim_size].
attention_state: named tuple of AttentionState.
"""
if key_value_inputs is not None and state is not None:
raise ValueError('Only one of the key_value_input and state is needed.')
embedding_size = self._value_size * self._num_heads
q_inputs = inputs if query_inputs is None else query_inputs
# Denoted by L. If query_inputs is None, L = N.
_, query_size = q_inputs.get_shape().as_list()[:2]
if key_value_inputs is not None:
k_inputs = key_value_inputs
v_inputs = k_inputs
elif state is not None:
if isinstance(state, CompressedMemoryState):
state_memory_list = [state.compressed_memory, state.episodic_memory]
else:
state_memory_list = [state]
k_inputs = tf.concat(state_memory_list + [inputs], 1)
v_inputs = k_inputs
else:
k_inputs = inputs
v_inputs = inputs
# Batch size denoted by B
batch_size = tf.shape(inputs)[0]
# Chunk_size denoted by N
chunk_size = inputs.get_shape().as_list()[1]
# Denoted by N + M
att_size = k_inputs.get_shape().as_list()[1]
if self._positional_encodings and not self._use_relative_positions:
if len(self._positional_encodings) != 1:
raise ValueError(
'Absolute positional encodings only supported for 1 memory. '
'Found %i.' % len(self._positional_encodings))
key_positions, query_positions = self._positional_encodings[0]
k_inputs += key_positions
q_inputs += query_positions
# [B, H, L, K]
q = self.multihead_linear(q_inputs, 'query')
# [B, H, N + M, K]
k = self.multihead_linear(k_inputs, 'key')
# [B, H, N + M, V]
v = self.multihead_linear(v_inputs, 'value')
# Scaling the dot-product
if self._scaling:
q *= self._key_size**-0.5
# [B, H, L, N + M]
if self._use_relative_positions:
r_w_bias = tf.get_variable(
'r_w_bias', [1, self._num_heads, 1, self._key_size],
dtype=inputs.dtype)
content_logits = tf.matmul(q + r_w_bias, k, transpose_b=True)
all_relative_logits = []
# Loop over multiple positional encodings, for the case of multiple
# memory types.
for i, positional_encodings in enumerate(self._positional_encodings):
key_positions, query_positions = positional_encodings
if key_positions.get_shape().as_list()[-1] != att_size:
key_positions = key_positions[:, -att_size:] # Crop to layer mem size
is_final = i == len(self._positional_encodings) - 1
suffix = '' if is_final else '_%d' % i
relative_keys = self.multihead_linear(
key_positions, name='relative_keys' + suffix)
# [B, H, N, D]
r_r_bias = tf.get_variable(
'r_r_bias' + suffix, [1, self._num_heads, 1, self._key_size],
dtype=inputs.dtype)
relative_keys = tf.tile(relative_keys, [batch_size, 1, 1, 1])
relative_logits = tf.matmul(
q + r_r_bias, relative_keys, transpose_b=True)
relative_logits = rel_shift(relative_logits)
if not is_final: # Include relative positions for input sequence.
relative_logits = relative_logits[:, :, :, :-chunk_size]
all_relative_logits.append(relative_logits)
all_relative_logits = tf.concat(all_relative_logits, 3)
logits = content_logits + all_relative_logits
else:
# [B, H, N, N + M]
logits = tf.matmul(q, k, transpose_b=True)
content_logits = logits
if self._mask is not None:
if self._mask.get_shape().as_list()[-1] != att_size:
mask = self._mask[:, :, :, -att_size:]
else:
mask = self._mask
logits += mask
weights = tf.nn.softmax(logits)
if is_training:
weights = tf.nn.dropout(weights, dropout_keep_prob)
# [B, L, H, V], where V is value_size
output_transpose = tf.einsum('bhij,bhjk->bihk', weights, v)
# [B, L, H, V] -> [B, L, HV]
attended_inputs = basic.BatchReshape([query_size, embedding_size])(
output_transpose)
# Apply final mlp to mix information between heads.
output = basic.BatchApply(basic.Linear(embedding_size))(attended_inputs)
attention_state = AttentionState(
queries=q,
keys=k,
values=v,
weights=weights,
logits=content_logits,
embeddings=inputs,
read_words=output)
return output, attention_state
class TransformerTower(base.AbstractModule):
"""Transformer tower.
Deep residual network using blocks of attention and MLPs, specified in
Vaswani et al. 2017.
"""
def __init__(self,
value_size,
num_heads,
num_layers,
causal=True,
key_size=None,
shared_attention=False,
output_size=None,
mlp_hidden_sizes=tuple([1024]),
dropout_rate=0.1,
use_relative_positions=True,
clamp_time_range=0,
same_attention_length=False,
layer_norm='input',
name='transformer_tower'):
"""Initializes TransformerTower.
Args:
value_size: dimensionality of values per-head.
num_heads: number of attention heads.
num_layers: number of transformer blocks, where each block contains a
multi-head attention layer and an MLP.
causal: if True, applies a causal mask.
key_size: optional dimensionality of key size. If unspecified then it is
set to `value_size`.
shared_attention: if True, attention params are shared across all layers.
output_size: if set, the desired output dimensionality. By default the
output size is `value_size` x `num_heads`.
mlp_hidden_sizes: tuple containing dimensionality of mlp layer(s). If
multiple values are specified, the mlp contains multiple layers for each
transformer block.
dropout_rate: dropout rate applied to hidden activations, attention, and
positional encodings.
use_relative_positions: if False, applies absolute positional encodings.
If true, uses relative positional encodings from Dai et al. 2019.
clamp_time_range: clamps max temporal positional encoding if specified.
same_attention_length: if True, attention is masked to ensure each
position in the sequence contains the same length of attention.
layer_norm: Where to apply layer-norm in Transformer block. Can be one of
'input' (Vaswani et al. 2017), 'output', or 'both'.
name: name of variable scope.
"""
super(TransformerTower, self).__init__(name=name)
self._causal = causal
self._mask = None
if key_size is None:
key_size = value_size
self._key_size = key_size
self._value_size = value_size
self._shared_attention = shared_attention
self._num_heads = num_heads
self._num_layers = num_layers
self._output_size = output_size
self._embedding_size = self._value_size * self._num_heads
self._mlp_hidden_sizes = list(mlp_hidden_sizes) + [self._embedding_size]
self._multihead_attention = None
self._object_embeddings = None
self._dropout_rate = dropout_rate
self._positional_encodings = None
self._use_relative_positions = use_relative_positions
self._clamp_time_range = clamp_time_range
self._same_attention_length = same_attention_length
self._layer_norm = layer_norm
self._attention_modules = []
self._object_mlps = []
def get_sublayers(self, is_training):
if self._multihead_attention is None or not self._shared_attention:
attention_module = MultiheadAttention(
value_size=self._value_size,
key_size=self._key_size,
num_heads=self._num_heads,
mask=self._mask,
positional_encodings=self._positional_encodings,
use_relative_positions=self._use_relative_positions,
init_std=2. / np.sqrt(self._num_layers),
)
self._multihead_attention = ResidualDropoutWrapper(
attention_module, self._dropout_rate, layer_norm=self._layer_norm)
mlp = default_mlp(
self._mlp_hidden_sizes, init_std=2. / np.sqrt(self._num_layers))
object_mlp = ResidualDropoutWrapper(
mlp, self._dropout_rate, layer_norm=self._layer_norm)
self._attention_modules.append(attention_module)
self._object_mlps.append(mlp)
return self._multihead_attention, object_mlp
def _build(self,
inputs,
state=None,
condition=None,
is_training=True,
final_layer_key_value_inputs=None):
"""Calculates multi-layer self attention and mlp transformation.
Args:
inputs: Tensor of shape [batch_size, num_steps, dim_size].
state: optional list of length num_layers of tensors of shape
[batch_size, memory_size, dim_size].
condition: optional tensor to condition on. The shape is shape
[batch_size, dim_size].
is_training: If true, dropout is applied.
final_layer_key_value_inputs: optional Tensor to be used as the key and
value for the final multi-head attention layer of shape
[batch_size, num_steps, dim_size]. Useful when the tower is a Seq2Seq
decoder and it can attend to encoder outputs.
Returns:
output: tensor of shape [batch_size, num_steps, output_dim_size].
state: list of length `num_layers` containing AttentionState tuples.
"""
# inputs: [B, N, F]
if final_layer_key_value_inputs is not None and state is not None and len(
state) == (self._num_layers - 1):
raise ValueError('When the final_layer_key_value_input is set, exclude'
'the state of the last layer.')
if condition is not None:
condition_tile = tf.tile(
tf.expand_dims(condition, 1), [1, tf.shape(inputs)[1], 1])
inputs = tf.concat([inputs, condition_tile], -1)
# Map inputs to be of `embedding_size` dimension.
if inputs.get_shape().as_list()[-1] != self._embedding_size:
inputs = default_mlp([self._embedding_size], activate_final=True)(
inputs,
is_training=is_training,
dropout_keep_prob=1 - self._dropout_rate)
if state is None:
memory_sizes = [0]
elif isinstance(state[0], CompressedMemoryState):
cm_mem_size = max(_memory_size(s.compressed_memory) for s in state)
em_mem_size = max(_memory_size(s.episodic_memory) for s in state)
memory_sizes = [cm_mem_size, em_mem_size]
else:
memory_sizes = [max([_memory_size(s) for s in state])]
chunk_size = inputs.get_shape().as_list()[1]
self._positional_encodings = []
# Creates positional encodings for different memory types.
for i, memory_size in enumerate(memory_sizes):
seq_len = chunk_size + memory_size
key_positions = get_position_encodings(
sequence_length=seq_len,
hidden_size=inputs.get_shape().as_list()[2],
clamp_value=self._clamp_time_range,
)
if is_training:
key_positions = tf.nn.dropout(key_positions, rate=self._dropout_rate)
key_positions = tf.cast(key_positions, dtype=inputs.dtype)
query_positions = key_positions[:, -chunk_size:, :]
self._positional_encodings.append((key_positions, query_positions))
if self._causal:
self._mask = create_mask(inputs, state, self._same_attention_length)
layer_i_inputs = inputs
attention_states = []
key_value_inputs = None
for i in range(self._num_layers):
with tf.variable_scope('layer_%d' % i, reuse=tf.AUTO_REUSE):
multihead_attention, object_mlp = self.get_sublayers(is_training)
# Multihead attention with residuals.
state_i = None if state is None else state[i]
if i == (self._num_layers -
1) and final_layer_key_value_inputs is not None:
# When the final_layer_key_value_inputs is set, the finaly layer
# of attention will use it as the key & value, thus no need for state.
key_value_inputs = final_layer_key_value_inputs
state_i = None
attention_outputs, attention_state = multihead_attention(
layer_i_inputs,
state=state_i,
is_training=is_training,
dropout_keep_prob=1. - self._dropout_rate,
key_value_inputs=key_value_inputs)
attention_states.append(attention_state)
# Feed-forward with residuals.
output = object_mlp(
attention_outputs,
is_training=is_training,
dropout_keep_prob=1 - self._dropout_rate)
layer_i_inputs = output
if self._output_size is not None:
output = basic.BatchApply(
basic.Linear(self._output_size, use_bias=False))(
output)
return output, attention_states
def attention_module(self, i):
"""Returns the i-th layer attention module."""
return self._attention_modules[i]
| deepmind-research-master | object_attention_for_reasoning/transformer.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenizer implementation mapping strings to their UTF-8 bytes."""
from typing import Union
import numpy as np
class BytesTokenizer:
"""Tokenizes string to utf-8 bytes."""
def __init__(self):
self._num_reserved_tokens = 6 # PAD, BOS, EOS, MASK, CLS, SEP
def to_string(self, inputs: np.ndarray) -> str:
inputs_no_special = (
inputs[inputs >= self._num_reserved_tokens] - self._num_reserved_tokens)
decoded_bytes = inputs_no_special.astype(np.uint8).tobytes()
return decoded_bytes.decode('utf-8', errors='replace')
def to_int(self, inputs: Union[str, bytes]) -> np.ndarray:
if isinstance(inputs, str):
inputs = inputs.encode('utf-8')
encoded = np.frombuffer(inputs, np.uint8).astype(np.int32)
encoded = encoded + self._num_reserved_tokens
return encoded.astype(np.int32)
@property
def vocab_size(self) -> int:
return 256 + self._num_reserved_tokens
@property
def pad_token(self) -> int:
return 0
@property
def bos_token(self) -> int:
return 1
@property
def eos_token(self) -> int:
return 2
@property
def mask_token(self) -> int:
return 3
@property
def cls_token(self) -> int:
return 4
@property
def sep_token(self) -> int:
return 5
| deepmind-research-master | perceiver/bytes_tokenizer.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perceiver architecture and components."""
import abc
import math
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from perceiver import io_processors
from perceiver import position_encoding
# -----------------------------------------------------------
# ---------------------- Primitives -----------------------
# -----------------------------------------------------------
def attend(q, k, v, dropout_prob=0.0, attention_mask=None):
"""Computes multi-head attention using a query, key and value.
Args:
q: Query with shape [batch, q_indices, num_heads, head_dim].
k: Key with shape [batch, kv_indices, num_heads, head_dim].
v: Value with shape [batch, kv_indices, num_heads, head_dim].
dropout_prob: dropout probability on the attention weights.
attention_mask: Array of shape [batch, q_indices, kv_indices] indicating
which attentions are valid
Returns:
Output of the attention with shape [batch, q_indices, hiddens]
"""
batch, q_indices, num_heads, q_head_dim = q.shape
_, _, _, v_head_dim = v.shape
hiddens = num_heads * v_head_dim
attention = jnp.einsum('bthd,bThd->bhtT', q, k)
scale = 1. / math.sqrt(q_head_dim)
attention *= scale
if attention_mask is not None:
# Use large_k instead of np.NINF because np.NINF breaks for causal-masked
# left-padded sampling.
large_k = jnp.array(1e4 if attention.dtype == jnp.float16 else 1e30,
dtype=attention.dtype)
attention = jnp.where(attention_mask[:, None, :, :], attention,
-large_k)
normalized = jax.nn.softmax(attention)
if dropout_prob > 0:
normalized = hk.dropout(hk.next_rng_key(), dropout_prob, normalized)
summed = jnp.einsum('bhtT,bThd->bthd', normalized, v)
summed = jnp.reshape(summed, [batch, q_indices, hiddens])
if attention_mask is not None:
# If all attended tokens are masked, or for masked tokens
# some rows of logits gets completely masked, in which case the softmax
# gives a uniform row and we obtain non-zero outputs where it should be
# zero. We force zeros.
wipe_attn = jnp.all(
attention_mask == 0, axis=2, keepdims=True) # shape (B, T, 1)
summed = jnp.where(wipe_attn, jnp.zeros_like(summed), summed)
return summed
def conv_1d(
output_channels,
init_scale=1.0,
with_bias=True,
name=None):
"""A 1D convolution."""
return hk.Linear(
output_size=output_channels,
with_bias=with_bias,
w_init=hk.initializers.VarianceScaling(init_scale),
name=name)
def layer_norm(x, name=None):
return hk.LayerNorm(axis=-1, create_scale=True, create_offset=True,
name=name)(x)
def make_cross_attention_mask(query_mask, kv_mask):
batch_size, query_len = query_mask.shape
_, key_len = kv_mask.shape
mask = jax.vmap(jnp.outer)(query_mask, kv_mask)
assert mask.shape == (batch_size, query_len, key_len)
return mask
# -----------------------------------------------------------
# ----------------------- Modules -------------------------
# -----------------------------------------------------------
class Attention(hk.Module):
"""Multi-headed {cross, self}-attention."""
def __init__(self,
num_heads=8,
init_scale=1.0,
with_final_bias=True,
final_init_scale_multiplier=1.,
dropout_prob=0.0,
qk_channels=None,
v_channels=None,
output_channels=None,
name=None):
super(Attention, self).__init__(name=name)
self._num_heads = num_heads
self._init_scale = init_scale
self._with_final_bias = with_final_bias
self._final_init_scale = final_init_scale_multiplier * init_scale
self._dropout_prob = dropout_prob
# If none of these are passed, the Q input determines the output shape:
self._qk_channels = qk_channels
self._v_channels = v_channels
self._output_channels = output_channels
def __call__(self, inputs_q, inputs_kv, attention_mask=None):
# Q and K must have the same number of channels.
# Default to preserving Q's input's shape.
if self._qk_channels is None:
self._qk_channels = inputs_q.shape[-1]
# V's num_channels determines the shape of the output of QKV-attention.
# Default to the same number of channels used in the key-query operation.
if self._v_channels is None:
self._v_channels = self._qk_channels
# Project the output of QKV attention to a desired number of channels.
# Default to the same number as the output of the QKV attention operation.
if self._output_channels is None:
self._output_channels = self._v_channels
if self._qk_channels % self._num_heads != 0:
raise ValueError(f'qk_channels ({self._qk_channels}) must be divisible by'
f' num_heads ({self._num_heads}).')
if self._v_channels % self._num_heads != 0:
raise ValueError(f'v_channels ({self._v_channels}) must be divisible by'
f' num_heads ({self._num_heads}).')
qk_channels_per_head = self._qk_channels // self._num_heads
v_channels_per_head = self._v_channels // self._num_heads
# Project QKV to a common feature dimension.
q = conv_1d(self._qk_channels, init_scale=self._init_scale)(inputs_q)
k = conv_1d(self._qk_channels, init_scale=self._init_scale)(inputs_kv)
v = conv_1d(self._v_channels, init_scale=self._init_scale)(inputs_kv)
# Reshape channels for multi-head attention.
batch, q_time, _ = q.shape
_, kv_time, _ = k.shape
q = jnp.reshape(q, [batch, q_time, self._num_heads, qk_channels_per_head])
k = jnp.reshape(k, [batch, kv_time, self._num_heads, qk_channels_per_head])
v = jnp.reshape(v, [batch, kv_time, self._num_heads, v_channels_per_head])
result = attend(q, k, v, dropout_prob=self._dropout_prob,
attention_mask=attention_mask)
return conv_1d(
self._output_channels,
with_bias=self._with_final_bias,
init_scale=self._final_init_scale)(result)
class MLP(hk.Module):
"""A Transformer-style dense module to follow attention."""
def __init__(self,
widening_factor=4,
dropout_prob=0.0,
init_scale=1.,
name=None):
super(MLP, self).__init__(name=name)
self._widening_factor = widening_factor
self._dropout_prob = dropout_prob
self._init_scale = init_scale
def __call__(self, x, *, is_training):
dropout_prob = self._dropout_prob if is_training else 0.0
output_channels = x.shape[-1]
x = conv_1d(
output_channels=self._widening_factor * output_channels,
init_scale=self._init_scale)(x)
x = jax.nn.gelu(x)
x = conv_1d(
output_channels=output_channels,
init_scale=self._init_scale)(x)
return hk.dropout(hk.next_rng_key(), dropout_prob, x)
class SelfAttention(hk.Module):
"""A self-attention module, including a dense block."""
def __init__(self,
widening_factor=4,
dropout_prob=0.0,
dropout_attn_prob=0.0,
num_heads=8,
att_init_scale=1.0,
dense_init_scale=1.0,
qk_channels=None,
v_channels=None,
name=None):
super(SelfAttention, self).__init__(name=name)
self._widening_factor = widening_factor
self._dropout_prob = dropout_prob
self._dropout_attn_prob = dropout_attn_prob
self._num_heads = num_heads
self._att_init_scale = att_init_scale
self._dense_init_scale = dense_init_scale
self._qk_channels = qk_channels
self._v_channels = v_channels
def __call__(self,
inputs,
*,
attention_mask=None,
is_training):
dropout_prob = self._dropout_prob if is_training else 0.0
dropout_attn_prob = self._dropout_attn_prob if is_training else 0.0
x = inputs
qkv_inputs = layer_norm(inputs)
attention = Attention(
num_heads=self._num_heads,
init_scale=self._att_init_scale,
qk_channels=self._qk_channels,
v_channels=self._v_channels,
dropout_prob=dropout_attn_prob)(qkv_inputs, qkv_inputs,
attention_mask=attention_mask)
attention = hk.dropout(hk.next_rng_key(), dropout_prob, attention)
x += attention
x += MLP(
widening_factor=self._widening_factor,
dropout_prob=dropout_prob,
init_scale=self._dense_init_scale)(
layer_norm(x), is_training=is_training)
return x
class CrossAttention(hk.Module):
"""A cross-attention module, including a dense block."""
def __init__(self,
widening_factor=1,
dropout_prob=0.0,
dropout_attn_prob=0.0,
num_heads=8,
att_init_scale=1.0,
dense_init_scale=1.0,
shape_for_attn='kv',
use_query_residual=True,
qk_channels=None,
v_channels=None,
name=None):
super(CrossAttention, self).__init__(name=name)
self._widening_factor = widening_factor
self._dropout_prob = dropout_prob
self._dropout_attn_prob = dropout_attn_prob
self._num_heads = num_heads
self._att_init_scale = att_init_scale
self._dense_init_scale = dense_init_scale
self._shape_for_attn = shape_for_attn
self._use_query_residual = use_query_residual
self._qk_channels = qk_channels
self._v_channels = v_channels
def __call__(self,
inputs_q,
inputs_kv,
*,
attention_mask=None,
is_training):
dropout_prob = self._dropout_prob if is_training else 0.0
dropout_attn_prob = self._dropout_attn_prob if is_training else 0.0
output_channels = inputs_q.shape[-1]
if self._shape_for_attn == 'q':
qk_channels = inputs_q.shape[-1]
elif self._shape_for_attn == 'kv':
qk_channels = inputs_kv.shape[-1]
else:
raise ValueError(f'Unknown value {self._shape_for_attn} for '
'shape_for_attention.')
v_channels = None
if self._qk_channels is not None:
qk_channels = self._qk_channels
if self._v_channels is not None:
v_channels = self._v_channels
attention = Attention(
num_heads=self._num_heads,
init_scale=self._att_init_scale,
dropout_prob=dropout_attn_prob,
qk_channels=qk_channels,
v_channels=v_channels,
output_channels=output_channels)(layer_norm(inputs_q),
layer_norm(inputs_kv),
attention_mask=attention_mask)
attention = hk.dropout(hk.next_rng_key(), dropout_prob, attention)
# Optionally include a residual to the query.
# Consider omitting the residual if the semantics of query and output
# are different, e.g. if queries are positions and outputs are pixels.
if self._use_query_residual:
x = inputs_q + attention
else:
x = attention
x += MLP(
widening_factor=self._widening_factor,
dropout_prob=dropout_prob,
init_scale=self._dense_init_scale)(
layer_norm(x), is_training=is_training)
return x
# -----------------------------------------------------------
# ----------------------- Perceiver -----------------------
# -----------------------------------------------------------
class Perceiver(hk.Module):
"""The Perceiver: a scalable, fully attentional architecture."""
def __init__(
self,
encoder,
decoder,
input_preprocessor=None,
output_postprocessor=None,
name='perceiver'):
super().__init__(name=name)
# Feature and task parameters:
self._input_preprocessor = input_preprocessor
self._output_postprocessor = output_postprocessor
self._decoder = decoder
self._encoder = encoder
def __call__(self, inputs, *, is_training, subsampled_output_points=None,
pos=None, input_mask=None, query_mask=None):
if self._input_preprocessor:
network_input_is_1d = self._encoder._input_is_1d
inputs, modality_sizes, inputs_without_pos = self._input_preprocessor(
inputs, pos=pos, is_training=is_training,
network_input_is_1d=network_input_is_1d)
else:
modality_sizes = None
inputs_without_pos = None
# Get the queries for encoder and decoder cross-attends.
encoder_query = self._encoder.latents(inputs)
decoder_query = self._decoder.decoder_query(
inputs, modality_sizes, inputs_without_pos,
subsampled_points=subsampled_output_points)
# Run the network forward:
z = self._encoder(inputs, encoder_query,
is_training=is_training, input_mask=input_mask)
_, output_modality_sizes = self._decoder.output_shape(
inputs)
output_modality_sizes = output_modality_sizes or modality_sizes
outputs = self._decoder(
decoder_query, z, is_training=is_training, query_mask=query_mask)
if self._output_postprocessor:
outputs = self._output_postprocessor(outputs, is_training=is_training,
modality_sizes=output_modality_sizes)
return outputs
class PerceiverEncoder(hk.Module):
"""The Perceiver Encoder: a scalable, fully attentional encoder."""
def __init__(
self,
# The encoder has a total of
# num_self_attends_per_block * num_blocks
# self-attend layers. We share weights between blocks.
num_self_attends_per_block=6,
num_blocks=8,
z_index_dim=512,
num_z_channels=1024,
qk_channels=None,
v_channels=None,
num_cross_attend_heads=1,
num_self_attend_heads=8,
cross_attend_widening_factor=1,
self_attend_widening_factor=1,
dropout_prob=0.0,
z_pos_enc_init_scale=0.02,
cross_attention_shape_for_attn='kv',
use_query_residual=True,
name='perceiver_encoder'):
super().__init__(name=name)
# Check that we can use multihead-attention with these shapes.
if num_z_channels % num_self_attend_heads != 0:
raise ValueError(f'num_z_channels ({num_z_channels}) must be divisible by'
f' num_self_attend_heads ({num_self_attend_heads}).')
if num_z_channels % num_cross_attend_heads != 0:
raise ValueError(f'num_z_channels ({num_z_channels}) must be divisible by'
f' num_cross_attend_heads ({num_cross_attend_heads}).')
self._input_is_1d = True
self._num_blocks = num_blocks
# Construct the latent array initial state.
self.z_pos_enc = position_encoding.TrainablePositionEncoding(
index_dim=z_index_dim,
num_channels=num_z_channels,
init_scale=z_pos_enc_init_scale)
# Construct the cross attend:
self.cross_attend = CrossAttention(
dropout_prob=dropout_prob,
num_heads=num_cross_attend_heads,
widening_factor=cross_attend_widening_factor,
shape_for_attn=cross_attention_shape_for_attn,
qk_channels=qk_channels,
v_channels=v_channels,
use_query_residual=use_query_residual)
# Construct the block of self-attend layers.
# We get deeper architectures by applying this block more than once.
self.self_attends = []
for _ in range(num_self_attends_per_block):
self_attend = SelfAttention(
num_heads=num_self_attend_heads,
dropout_prob=dropout_prob,
qk_channels=qk_channels,
v_channels=v_channels,
widening_factor=self_attend_widening_factor)
self.self_attends.append(self_attend)
def latents(self, inputs):
# Initialize the latent array for the initial cross-attend.
return self.z_pos_enc(batch_size=inputs.shape[0])
def __call__(self, inputs, z, *, is_training, input_mask=None):
attention_mask = None
if input_mask is not None:
attention_mask = make_cross_attention_mask(
query_mask=jnp.ones(z.shape[:2], dtype=jnp.int32),
kv_mask=input_mask)
z = self.cross_attend(z, inputs, is_training=is_training,
attention_mask=attention_mask)
for _ in range(self._num_blocks):
for self_attend in self.self_attends:
z = self_attend(z, is_training=is_training)
return z
class AbstractPerceiverDecoder(hk.Module, metaclass=abc.ABCMeta):
"""Abstract Perceiver decoder."""
@abc.abstractmethod
def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None,
subsampled_points=None):
raise NotImplementedError
@abc.abstractmethod
def output_shape(self, inputs):
raise NotImplementedError
@abc.abstractmethod
def __call__(self, query, z, *, is_training, query_mask=None):
raise NotImplementedError
class ProjectionDecoder(AbstractPerceiverDecoder):
"""Baseline projection decoder (no cross-attention)."""
def __init__(
self,
num_classes,
final_avg_before_project=False,
name='projection_decoder'):
super().__init__(name=name)
self._final_avg_before_project = final_avg_before_project
self._num_classes = num_classes
self.final_layer = hk.Linear(
num_classes, w_init=jnp.zeros, name='logits')
def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None,
subsampled_points=None):
return None
def output_shape(self, inputs):
return ((inputs.shape[0], self._num_classes), None)
def __call__(self, query, z, *, is_training, query_mask=None):
# b x n_z x c -> b x c
z = jnp.mean(z, axis=1, dtype=z.dtype)
# b x c -> b x n_logits
logits = self.final_layer(z)
return logits
class BasicDecoder(AbstractPerceiverDecoder):
"""Cross-attention-based decoder."""
def __init__(self,
output_num_channels,
position_encoding_type='trainable',
# Ignored if position_encoding_type == 'none':
output_index_dims=None,
subsampled_index_dims=None,
num_z_channels=1024,
qk_channels=None,
v_channels=None,
use_query_residual=False,
output_w_init=None,
concat_preprocessed_input=False,
num_heads=1,
name='basic_decoder',
final_project=True,
**position_encoding_kwargs):
super().__init__(name=name)
self._position_encoding_type = position_encoding_type
# If `none`, the decoder will not construct any position encodings.
# You should construct your own when quering the decoder.
self.output_pos_enc = None
if self._position_encoding_type != 'none':
self.output_pos_enc = position_encoding.build_position_encoding(
position_encoding_type,
index_dims=output_index_dims,
**position_encoding_kwargs)
self._output_index_dim = output_index_dims
if subsampled_index_dims is None:
subsampled_index_dims = output_index_dims
self._subsampled_index_dims = subsampled_index_dims
self._output_num_channels = output_num_channels
self._output_w_init = output_w_init
self._use_query_residual = use_query_residual
self._qk_channels = qk_channels
self._v_channels = v_channels
self._final_project = final_project
self._num_heads = num_heads
self._concat_preprocessed_input = concat_preprocessed_input
def output_shape(self, inputs):
return ((inputs[0], self._subsampled_index_dims, self._output_num_channels),
None)
def decoder_query(self, inputs, modality_sizes=None,
inputs_without_pos=None, subsampled_points=None):
assert self._position_encoding_type != 'none' # Queries come from elsewhere
if subsampled_points is not None:
# unravel_index returns a tuple (x_idx, y_idx, ...)
# stack to get the [n, d] tensor of coordinates
pos = jnp.stack(
jnp.unravel_index(subsampled_points, self._output_index_dim),
axis=1)
# Map these coordinates to [-1, 1]
pos = -1 + 2 * pos / jnp.array(self._output_index_dim)[None, :]
pos = jnp.broadcast_to(pos[None],
[inputs.shape[0], pos.shape[0], pos.shape[1]])
pos_emb = self.output_pos_enc(
batch_size=inputs.shape[0],
pos=pos)
pos_emb = jnp.reshape(pos_emb, [pos_emb.shape[0], -1, pos_emb.shape[-1]])
else:
pos_emb = self.output_pos_enc(batch_size=inputs.shape[0])
if self._concat_preprocessed_input:
if inputs_without_pos is None:
raise ValueError('Value is required for inputs_without_pos if'
' concat_preprocessed_input is True')
pos_emb = jnp.concatenate([inputs_without_pos, pos_emb], axis=-1)
return pos_emb
def __call__(self, query, z, *, is_training,
query_mask=None):
# Cross-attention decoding.
# key, value: B x N x K; query: B x M x K
# Attention maps -> B x N x M
# Output -> B x M x K
# Construct cross attention and linear layer lazily, in case we don't need
# them.
attention_mask = None
if query_mask is not None:
attention_mask = make_cross_attention_mask(
query_mask=query_mask,
kv_mask=jnp.ones(z.shape[:2], dtype=jnp.int32))
decoding_cross_attn = CrossAttention(
dropout_prob=0.0,
num_heads=self._num_heads,
widening_factor=1,
shape_for_attn='kv',
qk_channels=self._qk_channels,
v_channels=self._v_channels,
use_query_residual=self._use_query_residual)
final_layer = hk.Linear(
self._output_num_channels, w_init=self._output_w_init, name='output')
output = decoding_cross_attn(query, z, is_training=is_training,
attention_mask=attention_mask)
if self._final_project:
output = final_layer(output)
return output
class ClassificationDecoder(AbstractPerceiverDecoder):
"""Cross-attention based classification decoder.
Light-weight wrapper of `BasicDecoder` for logit output.
"""
def __init__(self,
num_classes,
name='classification_decoder',
**decoder_kwargs):
super().__init__(name=name)
self._num_classes = num_classes
self.decoder = BasicDecoder(
output_index_dims=(1,), # Predict a single logit array.
output_num_channels=num_classes,
**decoder_kwargs)
def decoder_query(self, inputs, modality_sizes=None,
inputs_without_pos=None, subsampled_points=None):
return self.decoder.decoder_query(inputs, modality_sizes,
inputs_without_pos,
subsampled_points=subsampled_points)
def output_shape(self, inputs):
return (inputs.shape[0], self._num_classes), None
def __call__(self, query, z, *, is_training, query_mask=None):
# B x 1 x num_classes -> B x num_classes
logits = self.decoder(query, z, is_training=is_training)
return logits[:, 0, :]
class MultimodalDecoder(AbstractPerceiverDecoder):
"""Multimodal decoding by composing uni-modal decoders.
The modalities argument of the constructor is a dictionary mapping modality
name to the decoder of that modality. That decoder will be used to construct
queries for that modality. However, there is a shared cross attention across
all modalities, using the concatenated per-modality query vectors.
"""
def __init__(self, modalities, num_outputs, output_num_channels,
min_padding_size=2,
subsampled_index_dims=None,
name='multimodal_decoder', **decoder_kwargs):
super().__init__(name=name)
self._modalities = modalities
self._subsampled_index_dims = subsampled_index_dims
self._min_padding_size = min_padding_size
self._output_num_channels = output_num_channels
self._num_outputs = num_outputs
self._decoder = BasicDecoder(
output_index_dims=(num_outputs,),
output_num_channels=output_num_channels,
position_encoding_type='none',
**decoder_kwargs)
def decoder_query(self, inputs, modality_sizes, inputs_without_pos=None,
subsampled_points=None):
# Partition the flat inputs among the different modalities
inputs = io_processors.restructure(modality_sizes, inputs)
# Obtain modality-specific decoders' queries
subsampled_points = subsampled_points or dict()
decoder_queries = dict()
for modality, decoder in self._modalities.items():
# Get input_without_pos for this modality if it exists.
input_without_pos = None
if inputs_without_pos is not None:
input_without_pos = inputs_without_pos.get(modality, None)
decoder_queries[modality] = decoder.decoder_query(
inputs=inputs[modality],
modality_sizes=None,
inputs_without_pos=input_without_pos,
subsampled_points=subsampled_points.get(modality, None)
)
# Pad all queries with trainable position encodings to make them
# have the same channels
num_channels = (max(query.shape[2] for query in decoder_queries.values())
+ self._min_padding_size)
def embed(modality, x):
x = jnp.reshape(x, [x.shape[0], np.prod(x.shape[1:-1]), x.shape[-1]])
pos = position_encoding.TrainablePositionEncoding(
1, num_channels=num_channels - x.shape[2],
init_scale=0.02, name=f'{modality}_padding')(x.shape[0])
pos = jnp.broadcast_to(
pos, [x.shape[0], x.shape[1], num_channels - x.shape[2]])
return jnp.concatenate([x, pos], axis=2)
# Apply a predictable ordering to the modalities
return jnp.concatenate([
embed(modality, decoder_queries[modality])
for modality in sorted(self._modalities.keys())
], axis=1)
def output_shape(self, inputs):
if self._subsampled_index_dims is not None:
subsampled_index_dims = sum(self._subsampled_index_dims.values())
else:
subsampled_index_dims = self._num_outputs
return ((inputs.shape[0], subsampled_index_dims, self._output_num_channels),
self._subsampled_index_dims)
def __call__(self, query, z, *, is_training, query_mask=None):
# B x 1 x num_classes -> B x num_classes
return self._decoder(query, z, is_training=is_training)
class BasicVideoAutoencodingDecoder(AbstractPerceiverDecoder):
"""Cross-attention based video-autoencoding decoder.
Light-weight wrapper of `BasicDecoder` with video reshaping logic.
"""
def __init__(self,
output_shape,
position_encoding_type,
name='basic_video_autoencoding_decoder',
**decoder_kwargs):
super().__init__(name=name)
if len(output_shape) != 4: # B, T, H, W
raise ValueError(f'Expected rank 4 output_shape, got {output_shape}.')
# Build the decoder components:
self._output_shape = output_shape
self._output_num_channels = decoder_kwargs['output_num_channels']
self.decoder = BasicDecoder(
output_index_dims=self._output_shape[1:4], # T*H*W
position_encoding_type=position_encoding_type,
**decoder_kwargs)
def decoder_query(self, inputs, modality_sizes=None,
inputs_without_pos=None, subsampled_points=None):
return self.decoder.decoder_query(inputs,
modality_sizes=modality_sizes,
inputs_without_pos=inputs_without_pos,
subsampled_points=subsampled_points)
def output_shape(self, inputs):
return ([inputs.shape[0]] + self._output_shape[1:] +
[self._output_num_channels], None)
def __call__(self, query, z, *, is_training, query_mask=None):
output = self.decoder(query, z, is_training=is_training)
output = jnp.reshape(output, self._output_shape + [output.shape[-1]])
return output
class FlowDecoder(AbstractPerceiverDecoder):
"""Cross-attention based flow decoder."""
def __init__(self,
output_image_shape,
output_num_channels=2,
rescale_factor=100.0,
name='flow_decoder',
**decoder_kwargs):
super().__init__(name=name)
self._output_image_shape = output_image_shape
self._output_num_channels = output_num_channels
self._rescale_factor = rescale_factor
self.decoder = BasicDecoder(
output_num_channels=output_num_channels,
**decoder_kwargs)
def output_shape(self, inputs):
# The channel dimensions of output here don't necessarily correspond to
# (u, v) of flow: they may contain dims needed for the post-processor.
return ((inputs.shape[0],) + tuple(self._output_image_shape) + (
self._output_num_channels,), None)
def decoder_query(
self, inputs, modality_sizes=None, inputs_without_pos=None,
subsampled_points=None):
if subsampled_points is not None:
raise ValueError("FlowDecoder doesn't support subsampling yet.")
# assumes merged in time
return inputs
def __call__(self, query, z, *, is_training, query_mask=None):
# Output flow and rescale.
preds = self.decoder(query, z, is_training=is_training)
preds /= self._rescale_factor
return preds.reshape([preds.shape[0]] + list(self._output_image_shape) +
[preds.shape[-1]])
| deepmind-research-master | perceiver/perceiver.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Position encodings and utilities."""
import abc
import functools
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
def generate_fourier_features(
pos, num_bands, max_resolution=(224, 224),
concat_pos=True, sine_only=False):
"""Generate a Fourier frequency position encoding with linear spacing.
Args:
pos: The position of n points in d dimensional space.
A jnp array of shape [n, d].
num_bands: The number of bands (K) to use.
max_resolution: The maximum resolution (i.e. the number of pixels per dim).
A tuple representing resolution for each dimension
concat_pos: Concatenate the input position encoding to the Fourier features?
sine_only: Whether to use a single phase (sin) or two (sin/cos) for each
frequency band.
Returns:
embedding: A 1D jnp array of shape [n, n_channels]. If concat_pos is True
and sine_only is False, output dimensions are ordered as:
[dim_1, dim_2, ..., dim_d,
sin(pi*f_1*dim_1), ..., sin(pi*f_K*dim_1), ...,
sin(pi*f_1*dim_d), ..., sin(pi*f_K*dim_d),
cos(pi*f_1*dim_1), ..., cos(pi*f_K*dim_1), ...,
cos(pi*f_1*dim_d), ..., cos(pi*f_K*dim_d)],
where dim_i is pos[:, i] and f_k is the kth frequency band.
"""
min_freq = 1.0
# Nyquist frequency at the target resolution:
freq_bands = jnp.stack([
jnp.linspace(min_freq, res / 2, num=num_bands, endpoint=True)
for res in max_resolution], axis=0)
# Get frequency bands for each spatial dimension.
# Output is size [n, d * num_bands]
per_pos_features = pos[:, :, None] * freq_bands[None, :, :]
per_pos_features = jnp.reshape(per_pos_features,
[-1, np.prod(per_pos_features.shape[1:])])
if sine_only:
# Output is size [n, d * num_bands]
per_pos_features = jnp.sin(jnp.pi * (per_pos_features))
else:
# Output is size [n, 2 * d * num_bands]
per_pos_features = jnp.concatenate(
[jnp.sin(jnp.pi * per_pos_features),
jnp.cos(jnp.pi * per_pos_features)], axis=-1)
# Concatenate the raw input positions.
if concat_pos:
# Adds d bands to the encoding.
per_pos_features = jnp.concatenate([pos, per_pos_features], axis=-1)
return per_pos_features
def build_linear_positions(index_dims, output_range=(-1.0, 1.0)):
"""Generate an array of position indices for an N-D input array.
Args:
index_dims: The shape of the index dimensions of the input array.
output_range: The min and max values taken by each input index dimension.
Returns:
A jnp array of shape [index_dims[0], index_dims[1], .., index_dims[-1], N].
"""
def _linspace(n_xels_per_dim):
return jnp.linspace(
output_range[0], output_range[1],
num=n_xels_per_dim,
endpoint=True, dtype=jnp.float32)
dim_ranges = [
_linspace(n_xels_per_dim) for n_xels_per_dim in index_dims]
array_index_grid = jnp.meshgrid(*dim_ranges, indexing='ij')
return jnp.stack(array_index_grid, axis=-1)
class AbstractPositionEncoding(hk.Module, metaclass=abc.ABCMeta):
"""Abstract Perceiver decoder."""
@abc.abstractmethod
def __call__(self, batch_size, pos):
raise NotImplementedError
class TrainablePositionEncoding(AbstractPositionEncoding):
"""Trainable position encoding."""
def __init__(self, index_dim, num_channels=128, init_scale=0.02, name=None):
super(TrainablePositionEncoding, self).__init__(name=name)
self._index_dim = index_dim
self._num_channels = num_channels
self._init_scale = init_scale
def __call__(self, batch_size, pos=None):
del pos # Unused.
pos_embs = hk.get_parameter(
'pos_embs', [self._index_dim, self._num_channels],
init=hk.initializers.TruncatedNormal(stddev=self._init_scale))
if batch_size is not None:
pos_embs = jnp.broadcast_to(
pos_embs[None, :, :], (batch_size,) + pos_embs.shape)
return pos_embs
def _check_or_build_spatial_positions(pos, index_dims, batch_size):
"""Checks or builds spatial position features (x, y, ...).
Args:
pos: None, or an array of position features. If None, position features
are built. Otherwise, their size is checked.
index_dims: An iterable giving the spatial/index size of the data to be
featurized.
batch_size: The batch size of the data to be featurized.
Returns:
An array of position features, of shape [batch_size, prod(index_dims)].
"""
if pos is None:
pos = build_linear_positions(index_dims)
pos = jnp.broadcast_to(pos[None], (batch_size,) + pos.shape)
pos = jnp.reshape(pos, [batch_size, np.prod(index_dims), -1])
else:
# Just a warning label: you probably don't want your spatial features to
# have a different spatial layout than your pos coordinate system.
# But feel free to override if you think it'll work!
assert pos.shape[-1] == len(index_dims)
return pos
class FourierPositionEncoding(AbstractPositionEncoding):
"""Fourier (Sinusoidal) position encoding."""
def __init__(self, index_dims, num_bands, concat_pos=True,
max_resolution=None, sine_only=False, name=None):
super(FourierPositionEncoding, self).__init__(name=name)
self._num_bands = num_bands
self._concat_pos = concat_pos
self._sine_only = sine_only
self._index_dims = index_dims
# Use the index dims as the maximum resolution if it's not provided.
self._max_resolution = max_resolution or index_dims
def __call__(self, batch_size, pos=None):
pos = _check_or_build_spatial_positions(pos, self._index_dims, batch_size)
build_ff_fn = functools.partial(
generate_fourier_features,
num_bands=self._num_bands,
max_resolution=self._max_resolution,
concat_pos=self._concat_pos,
sine_only=self._sine_only)
return jax.vmap(build_ff_fn, 0, 0)(pos)
class PositionEncodingProjector(AbstractPositionEncoding):
"""Projects a position encoding to a target size."""
def __init__(self, output_size, base_position_encoding, name=None):
super(PositionEncodingProjector, self).__init__(name=name)
self._output_size = output_size
self._base_position_encoding = base_position_encoding
def __call__(self, batch_size, pos=None):
base_pos = self._base_position_encoding(batch_size, pos)
projected_pos = hk.Linear(output_size=self._output_size)(base_pos)
return projected_pos
def build_position_encoding(
position_encoding_type,
index_dims,
project_pos_dim=-1,
trainable_position_encoding_kwargs=None,
fourier_position_encoding_kwargs=None,
name=None):
"""Builds the position encoding."""
if position_encoding_type == 'trainable':
assert trainable_position_encoding_kwargs is not None
output_pos_enc = TrainablePositionEncoding(
# Construct 1D features:
index_dim=np.prod(index_dims),
name=name,
**trainable_position_encoding_kwargs)
elif position_encoding_type == 'fourier':
assert fourier_position_encoding_kwargs is not None
output_pos_enc = FourierPositionEncoding(
index_dims=index_dims,
name=name,
**fourier_position_encoding_kwargs)
else:
raise ValueError(f'Unknown position encoding: {position_encoding_type}.')
if project_pos_dim > 0:
# Project the position encoding to a target dimension:
output_pos_enc = PositionEncodingProjector(
output_size=project_pos_dim,
base_position_encoding=output_pos_enc)
return output_pos_enc
| deepmind-research-master | perceiver/position_encoding.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for io_processors."""
import numpy as np
import tensorflow as tf
from perceiver import io_processors
def _create_test_image(shape):
image = np.arange(np.prod(np.array(shape)))
return np.reshape(image, shape)
def test_space_to_depth_image():
image_shape = (2, 3 * 5, 3 * 7, 11)
image = _create_test_image(image_shape)
output = io_processors.space_to_depth(image, spatial_block_size=3)
assert output.shape == (2, 5, 7, 3 * 3 * 11)
def test_space_to_depth_video():
image_shape = (2, 5 * 7, 3 * 11, 3 * 13, 17)
image = _create_test_image(image_shape)
output = io_processors.space_to_depth(image, spatial_block_size=3,
temporal_block_size=5)
assert output.shape == (2, 7, 11, 13, 5 * 3 * 3 * 17)
def test_reverse_space_to_depth_image():
image_shape = (2, 5, 7, 3 * 3 * 11)
image = _create_test_image(image_shape)
output = io_processors.reverse_space_to_depth(image, spatial_block_size=3)
assert output.shape == (2, 3 * 5, 3 * 7, 11)
def test_reverse_space_to_depth_video():
image_shape = (2, 7, 11, 13, 5 * 3 * 3 * 17)
image = _create_test_image(image_shape)
output = io_processors.reverse_space_to_depth(
image, spatial_block_size=3, temporal_block_size=5)
assert output.shape == (2, 5 * 7, 3 * 11, 3 * 13, 17)
def test_extract_patches():
image_shape = (2, 5, 7, 3)
image = _create_test_image(image_shape)
sizes = [1, 2, 3, 1]
strides = [1, 1, 2, 1]
rates = [1, 2, 1, 1]
for padding in ["VALID", "SAME"]:
jax_patches = io_processors.extract_patches(
image, sizes=sizes, strides=strides, rates=rates, padding=padding)
tf_patches = tf.image.extract_patches(
image, sizes=sizes, strides=strides, rates=rates, padding=padding)
assert np.array_equal(
np.array(jax_patches),
tf_patches.numpy())
| deepmind-research-master | perceiver/io_processors_test.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IO pre- and post-processors for Perceiver."""
import functools
import math
from typing import Any, Callable, Mapping, Optional, Sequence, Tuple
import einops
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from perceiver import position_encoding
ModalitySizeT = Mapping[str, int]
PreprocessorOutputT = Tuple[jnp.ndarray, Optional[jnp.ndarray], jnp.ndarray]
PreprocessorT = Callable[..., PreprocessorOutputT]
PostprocessorT = Callable[..., Any]
def reverse_space_to_depth(
frames: jnp.ndarray,
temporal_block_size: int = 1,
spatial_block_size: int = 1) -> jnp.ndarray:
"""Reverse space to depth transform."""
if len(frames.shape) == 4:
return einops.rearrange(
frames, 'b h w (dh dw c) -> b (h dh) (w dw) c',
dh=spatial_block_size, dw=spatial_block_size)
elif len(frames.shape) == 5:
return einops.rearrange(
frames, 'b t h w (dt dh dw c) -> b (t dt) (h dh) (w dw) c',
dt=temporal_block_size, dh=spatial_block_size, dw=spatial_block_size)
else:
raise ValueError(
'Frames should be of rank 4 (batch, height, width, channels)'
' or rank 5 (batch, time, height, width, channels)')
def space_to_depth(
frames: jnp.ndarray,
temporal_block_size: int = 1,
spatial_block_size: int = 1) -> jnp.ndarray:
"""Space to depth transform."""
if len(frames.shape) == 4:
return einops.rearrange(
frames, 'b (h dh) (w dw) c -> b h w (dh dw c)',
dh=spatial_block_size, dw=spatial_block_size)
elif len(frames.shape) == 5:
return einops.rearrange(
frames, 'b (t dt) (h dh) (w dw) c -> b t h w (dt dh dw c)',
dt=temporal_block_size, dh=spatial_block_size, dw=spatial_block_size)
else:
raise ValueError(
'Frames should be of rank 4 (batch, height, width, channels)'
' or rank 5 (batch, time, height, width, channels)')
def extract_patches(images: jnp.ndarray,
sizes: Sequence[int],
strides: Sequence[int],
rates: Sequence[int],
padding: str = 'VALID') -> jnp.ndarray:
"""Extract patches from images.
This function is a wrapper for jax.lax.conv_general_dilated_patches
to conforms to the same interface as tf.image.extract_patches.
The function extracts patches of shape sizes from the input images in the same
manner as a convolution with kernel of shape sizes, stride equal to strides,
and the given padding scheme.
The patches are stacked in the channel dimension.
Args:
images: input batch of images of shape [B, H, W, C].
sizes: size of extracted patches. Must be [1, size_rows, size_cols, 1].
strides: strides, must be [1, stride_rows, stride_cols, 1].
rates: sampling rate (as in dilated convolutions),
must be [1, rate_rows, rate_cols, 1].
padding: padding algorithm to use.
Returns:
Tensor of shape [B, patch_rows, patch_cols, size_rows * size_cols * C]
"""
if len(sizes) != 4 or sizes[0] != 1 or sizes[3] != 1:
raise ValueError(
f'Shape of sizes must be [1, size_rows, size_cols, 1], got {sizes}.')
if len(strides) != 4 or strides[0] != 1 or strides[3] != 1:
raise ValueError(
f'Shape of strides must be [1, size_rows, size_cols, 1], '
f'got {strides}.')
if len(rates) != 4 or rates[0] != 1 or rates[3] != 1:
raise ValueError(
f'Shape of rates must be [1, size_rows, size_cols, 1], got {rates}.')
if images.ndim != 4:
raise ValueError(
f'Rank of images must be 4 (got tensor of shape {jnp.shape(images)})')
# Rearrange axes of images to NCHW for conv_general_dilated_patches
images = einops.rearrange(images, 'n h w c -> n c h w')
channels = images.shape[1]
patches = jax.lax.conv_general_dilated_patches(
images, sizes[1:-1], strides[1:-1], padding, rhs_dilation=rates[1:-1])
# conv_general_dilated_patches returns patches in channel-major order.
# Rearrange to match interface of tf.image.extract_patches.
patches = einops.rearrange(patches, 'n (c ph pw) h w -> n h w (ph pw c)',
c=channels, ph=sizes[1], pw=sizes[2])
return patches
def patches_for_flow(inputs: jnp.ndarray) -> jnp.ndarray:
"""Extract 3x3x2 image patches for flow inputs."""
def pad_and_extract_patches(inputs):
padded_inputs = jnp.pad(inputs, [[0, 0], [1, 1], [1, 1], [0, 0]],
mode='constant')
return extract_patches(
padded_inputs,
sizes=[1, 3, 3, 1],
strides=[1, 1, 1, 1],
padding='VALID',
rates=[1, 1, 1, 1])
return jax.vmap(pad_and_extract_patches, in_axes=1, out_axes=1)(inputs)
# ------------------------------------------------------------
# ------------------- Up/down-sampling ---------------------
# ------------------------------------------------------------
class Conv2DDownsample(hk.Module):
"""Downsamples 4x by applying a 2D convolution and doing max pooling."""
def __init__(
self,
num_layers: int = 1,
num_channels: int = 64,
use_batchnorm: bool = True,
bn_config: Optional[Mapping[str, float]] = None,
name: Optional[str] = None,
):
"""Constructs a Conv2DDownsample model.
Args:
num_layers: The number of conv->max_pool layers.
num_channels: The number of conv output channels.
use_batchnorm: Whether to use batchnorm.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers. By default the
``decay_rate`` is ``0.9`` and ``eps`` is ``1e-5``.
name: Name of the module.
"""
super().__init__(name=name)
self._num_layers = num_layers
self._use_batchnorm = use_batchnorm
bn_config = dict(bn_config or {})
bn_config.setdefault('decay_rate', 0.9)
bn_config.setdefault('eps', 1e-5)
bn_config.setdefault('create_scale', True)
bn_config.setdefault('create_offset', True)
self.layers = []
for _ in range(self._num_layers):
conv = hk.Conv2D(
output_channels=num_channels,
kernel_shape=7,
stride=2,
with_bias=False,
padding='SAME',
name='conv')
if use_batchnorm:
batchnorm = hk.BatchNorm(name='batchnorm', **bn_config)
else:
batchnorm = None
self.layers.append(dict(conv=conv, batchnorm=batchnorm))
def __call__(self, inputs: jnp.ndarray, *,
is_training: bool,
test_local_stats: bool = False) -> jnp.ndarray:
out = inputs
for layer in self.layers:
out = layer['conv'](out)
if layer['batchnorm'] is not None:
out = layer['batchnorm'](out, is_training, test_local_stats)
out = jax.nn.relu(out)
out = hk.max_pool(out,
window_shape=(1, 3, 3, 1),
strides=(1, 2, 2, 1),
padding='SAME')
return out
class Conv2DUpsample(hk.Module):
"""Upsamples 4x using 2 2D transposed convolutions."""
def __init__(
self,
n_outputs: int,
name: Optional[str] = None,
):
"""Constructs a Conv2DUpsample model.
Args:
n_outputs: The number of output channels of the module.
name: Name of the module.
"""
super().__init__(name=name)
self.transp_conv1 = hk.Conv2DTranspose(
output_channels=n_outputs*2,
kernel_shape=4,
stride=2,
with_bias=True,
padding='SAME',
name='transp_conv_1')
self.transp_conv2 = hk.Conv2DTranspose(
output_channels=n_outputs,
kernel_shape=4,
stride=2,
with_bias=True,
padding='SAME',
name='transp_conv_2')
def __call__(self, inputs: jnp.ndarray, *,
is_training: bool,
test_local_stats: bool = False) -> jnp.ndarray:
out = inputs
out = self.transp_conv1(out)
out = jax.nn.relu(out)
out = self.transp_conv2(out)
return out
class Conv3DUpsample(hk.Module):
"""Simple convolutional auto-encoder."""
def __init__(self,
n_outputs: int,
n_time_upsamples: int = 2,
n_space_upsamples: int = 4,
name: Optional[str] = None):
super().__init__(name=name)
self._n_outputs = n_outputs
self._n_time_upsamples = n_time_upsamples
self._n_space_upsamples = n_space_upsamples
def __call__(self, x: jnp.ndarray, *, is_training: bool) -> jnp.ndarray:
n_upsamples = max(self._n_time_upsamples, self._n_space_upsamples)
time_stride = 2
space_stride = 2
for i in range(n_upsamples):
if i >= self._n_time_upsamples:
time_stride = 1
if i >= self._n_space_upsamples:
space_stride = 1
channels = self._n_outputs * pow(2, n_upsamples - 1 - i)
x = hk.Conv3DTranspose(output_channels=channels,
stride=[time_stride, space_stride, space_stride],
kernel_shape=[4, 4, 4],
name=f'conv3d_transpose_{i}')(x)
if i != n_upsamples - 1:
x = jax.nn.relu(x)
return x
class ImagePreprocessor(hk.Module):
"""Image preprocessing for Perceiver Encoder."""
def __init__(
self,
prep_type='conv',
spatial_downsample: int = 4,
temporal_downsample: int = 1,
position_encoding_type: str = 'fourier',
n_extra_pos_mlp: int = 0,
num_channels: int = 64,
conv_after_patching: bool = False,
conv2d_use_batchnorm: bool = True,
concat_or_add_pos: str = 'concat',
name: Optional[str] = None,
**position_encoding_kwargs):
super().__init__(name=name)
if prep_type not in ('conv', 'patches', 'pixels', 'conv1x1'):
raise ValueError('Invalid prep_type!')
if concat_or_add_pos not in ['concat', 'add']:
raise ValueError(
f'Invalid value {concat_or_add_pos} for concat_or_add_pos.')
self._prep_type = prep_type
self._spatial_downsample = spatial_downsample
self._temporal_downsample = temporal_downsample
self._concat_or_add_pos = concat_or_add_pos
self._conv_after_patching = conv_after_patching
self._num_channels = num_channels
if self._prep_type == 'conv':
# Downsampling with conv is currently restricted
convnet_num_layers = math.log(spatial_downsample, 4)
convnet_num_layers_is_int = (
convnet_num_layers == np.round(convnet_num_layers))
if not convnet_num_layers_is_int or temporal_downsample != 1:
raise ValueError('Only powers of 4 expected for spatial '
'and 1 expected for temporal '
'downsampling with conv.')
self.convnet = Conv2DDownsample(
num_layers=int(convnet_num_layers),
num_channels=num_channels,
use_batchnorm=conv2d_use_batchnorm)
elif self._prep_type == 'conv1x1':
assert temporal_downsample == 1, 'conv1x1 does not downsample in time.'
self.convnet_1x1 = hk.Conv2D(
num_channels, kernel_shape=[1, 1],
# spatial_downsample is unconstrained for 1x1 convolutions.
stride=[spatial_downsample, spatial_downsample])
# Partially construct the positional encoding function.
# We fully construct it when we know the input size.
self._positional_encoding_ctor = functools.partial(
position_encoding.build_position_encoding,
position_encoding_type=position_encoding_type,
**position_encoding_kwargs)
# Stack MLPs to get a deeper positional embedding.
self._n_extra_pos_mlp = n_extra_pos_mlp
def _build_network_inputs(
self, inputs: jnp.ndarray, pos: jnp.ndarray,
network_input_is_1d: bool = True) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Construct the final input, including position encoding."""
batch_size = inputs.shape[0]
index_dims = inputs.shape[1:-1]
# Reshape input features to a 1D index dimension if necessary.
if len(inputs.shape) > 3 and network_input_is_1d:
inputs = jnp.reshape(
inputs, [batch_size, np.prod(index_dims), -1])
# Construct the position encoding.
pos_enc = self._positional_encoding_ctor(
index_dims=index_dims)(batch_size=batch_size, pos=pos)
for i in range(0, self._n_extra_pos_mlp):
pos_enc += hk.Linear(pos_enc.shape[-1])(pos_enc)
if i < (self._n_extra_pos_mlp-1):
pos_enc = jax.nn.relu(pos_enc)
if not network_input_is_1d:
# Reshape pos to match the input feature shape
# if the network takes non-1D inputs
sh = inputs.shape
pos_enc = jnp.reshape(pos_enc, list(sh)[:-1]+[-1])
if self._concat_or_add_pos == 'concat':
inputs_with_pos = jnp.concatenate([inputs, pos_enc], axis=-1)
elif self._concat_or_add_pos == 'add':
inputs_with_pos = inputs + pos_enc
return inputs_with_pos, inputs
def __call__(
self, inputs: jnp.ndarray, *,
is_training: bool,
pos: Optional[jnp.ndarray] = None,
network_input_is_1d: bool = True) -> PreprocessorOutputT:
if self._prep_type == 'conv':
# Convnet image featurization.
# Downsamples spatially by a factor of 4
conv = self.convnet
if len(inputs.shape) == 5:
conv = hk.BatchApply(conv)
inputs = conv(inputs, is_training=is_training)
elif self._prep_type == 'conv1x1':
# maps inputs to 64d
conv = self.convnet_1x1
if len(inputs.shape) == 5:
conv = hk.BatchApply(conv)
inputs = conv(inputs)
elif self._prep_type == 'patches':
# Space2depth featurization.
# Video: B x T x H x W x C
inputs = space_to_depth(
inputs,
temporal_block_size=self._temporal_downsample,
spatial_block_size=self._spatial_downsample)
if inputs.ndim == 5 and inputs.shape[1] == 1:
# for flow
inputs = jnp.squeeze(inputs, axis=1)
if self._conv_after_patching:
inputs = hk.Linear(self._num_channels, name='patches_linear')(inputs)
elif self._prep_type == 'pixels':
# if requested, downsamples in the crudest way
if inputs.ndim == 4:
inputs = inputs[:,
::self._spatial_downsample, ::self._spatial_downsample]
elif inputs.ndim == 5:
inputs = inputs[:, ::self._temporal_downsample,
::self._spatial_downsample, ::self._spatial_downsample]
else:
raise ValueError('Unsupported data format for pixels.')
inputs, inputs_without_pos = self._build_network_inputs(
inputs, pos, network_input_is_1d)
modality_sizes = None # Size for each modality, only needed for multimodal
return inputs, modality_sizes, inputs_without_pos
class ImagePostprocessor(hk.Module):
"""Image postprocessing for Perceiver."""
def __init__(
self,
postproc_type: str = 'pixels',
spatial_upsample: int = 1,
temporal_upsample: int = 1,
n_outputs: int = -1, # only relevant for 'conv1x1', 'conv', and 'raft'
input_reshape_size: Optional[Sequence[int]] = None,
name: Optional[str] = None):
super().__init__(name=name)
if postproc_type not in ('conv', 'patches', 'pixels', 'raft', 'conv1x1'):
raise ValueError('Invalid postproc_type!')
# Architecture parameters:
self._postproc_type = postproc_type
self._temporal_upsample = temporal_upsample
self._spatial_upsample = spatial_upsample
self._input_reshape_size = input_reshape_size
if self._postproc_type == 'pixels':
# No postprocessing.
if self._temporal_upsample != 1 or self._spatial_upsample != 1:
raise ValueError('Pixels postprocessing should not currently upsample.')
elif self._postproc_type == 'conv1x1':
assert self._temporal_upsample == 1, 'conv1x1 does not upsample in time.'
if n_outputs == -1:
raise ValueError('Expected value for n_outputs')
self.conv1x1 = hk.Conv2D(
n_outputs, kernel_shape=[1, 1],
# spatial_downsample is unconstrained for 1x1 convolutions.
stride=[self._spatial_upsample, self._spatial_upsample])
elif self._postproc_type == 'conv':
if n_outputs == -1:
raise ValueError('Expected value for n_outputs')
if self._temporal_upsample != 1:
def int_log2(x):
return int(np.round(np.log(x) / np.log(2)))
self.convnet = Conv3DUpsample(
n_outputs, int_log2(temporal_upsample), int_log2(spatial_upsample))
else:
self.convnet = Conv2DUpsample(n_outputs)
def __call__(
self, inputs: jnp.ndarray, *,
is_training: bool,
pos: Optional[jnp.ndarray] = None,
modality_sizes: Optional[ModalitySizeT] = None) -> jnp.ndarray:
if self._input_reshape_size is not None:
inputs = jnp.reshape(
inputs,
[inputs.shape[0]] + list(self._input_reshape_size)
+ [inputs.shape[-1]])
if self._postproc_type == 'conv' or self._postproc_type == 'raft':
# Convnet image featurization.
conv = self.convnet
if len(inputs.shape) == 5 and self._temporal_upsample == 1:
conv = hk.BatchApply(conv)
inputs = conv(inputs, is_training=is_training)
elif self._postproc_type == 'conv1x1':
inputs = self.conv1x1(inputs)
elif self._postproc_type == 'patches':
inputs = reverse_space_to_depth(
inputs, self._temporal_upsample, self._spatial_upsample)
return inputs
class OneHotPreprocessor(hk.Module):
"""One-hot preprocessor for Perceiver Encoder."""
def __init__(self, name: Optional[str] = None):
super().__init__(name=name)
def __call__(self, inputs: jnp.ndarray, *,
is_training: bool,
pos: Optional[jnp.ndarray] = None,
network_input_is_1d: bool = True) -> PreprocessorOutputT:
# Add a dummy index dimension.
inputs = inputs[:, None, :]
# No position encodings, so the 1st (input) and 3rd (inputs_without_pos)
# outputs are identical.
return inputs, None, inputs
class AudioPreprocessor(hk.Module):
"""Audio preprocessing for Perceiver Encoder."""
def __init__(
self,
prep_type: str = 'patches',
samples_per_patch: int = 96,
position_encoding_type: str = 'fourier',
n_extra_pos_mlp: int = 0,
concat_or_add_pos: str = 'concat',
name: Optional[str] = None,
**position_encoding_kwargs):
super().__init__(name=name)
if prep_type not in ('patches',):
raise ValueError('Invalid prep_type!')
if concat_or_add_pos not in ['concat', 'add']:
raise ValueError(
f'Invalid value {concat_or_add_pos} for concat_or_add_pos.')
self._samples_per_patch = samples_per_patch
self._concat_or_add_pos = concat_or_add_pos
# Partially construct the positional encoding function.
# We fully construct it when we know the input size.
self._positional_encoding_ctor = functools.partial(
position_encoding.build_position_encoding,
position_encoding_type=position_encoding_type,
**position_encoding_kwargs)
# for deeper positional embeddings
self._n_extra_pos_mlp = n_extra_pos_mlp
def _build_network_inputs(
self, inputs: jnp.ndarray,
pos: jnp.ndarray) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Construct the final input, including position encoding."""
batch_size = inputs.shape[0]
index_dims = inputs.shape[1:-1]
# Construct the position encoding.
pos_enc = self._positional_encoding_ctor(
index_dims=index_dims)(batch_size=batch_size, pos=pos)
for i in range(0, self._n_extra_pos_mlp):
pos_enc += hk.Linear(pos_enc.shape[-1])(pos_enc)
if i < (self._n_extra_pos_mlp-1):
pos_enc = jax.nn.relu(pos_enc)
if self._concat_or_add_pos == 'concat':
inputs_with_pos = jnp.concatenate([inputs, pos_enc], axis=-1)
elif self._concat_or_add_pos == 'add':
inputs_with_pos = inputs + pos_enc
return inputs_with_pos, inputs
def __call__(self, inputs: jnp.ndarray, *,
is_training: bool,
pos: Optional[jnp.ndarray] = None,
network_input_is_1d: bool = True) -> PreprocessorOutputT:
inputs = jnp.reshape(inputs, [inputs.shape[0], -1,
self._samples_per_patch])
inputs, inputs_without_pos = self._build_network_inputs(inputs, pos)
modality_sizes = None # Size for each modality, only needed for multimodal
return inputs, modality_sizes, inputs_without_pos
class AudioPostprocessor(hk.Module):
"""Audio postprocessing for Perceiver."""
def __init__(
self,
postproc_type: str = 'patches', # 'conv', 'patches', 'pixels'
samples_per_patch: int = 96,
name: Optional[str] = None):
super().__init__(name=name)
if postproc_type not in ('patches',):
raise ValueError('Invalid postproc_type!')
self._samples_per_patch = samples_per_patch
# Architecture parameters:
self._postproc_type = postproc_type
def __call__(self, inputs: jnp.ndarray, *,
is_training: bool,
pos: Optional[jnp.ndarray] = None,
modality_sizes: Optional[ModalitySizeT] = None) -> jnp.ndarray:
out = hk.Linear(self._samples_per_patch)(inputs)
return jnp.reshape(out, [inputs.shape[0], -1])
class IdentityPostprocessor(hk.Module):
"""Passes through the inputs unchanged."""
def __init__(self, name: Optional[str] = None):
super().__init__(name=name)
def __call__(self, inputs: jnp.ndarray, *,
is_training: bool,
pos: Optional[jnp.ndarray] = None,
modality_sizes: Optional[ModalitySizeT] = None) -> jnp.ndarray:
return inputs
def restructure(modality_sizes: ModalitySizeT,
inputs: jnp.ndarray) -> Mapping[str, jnp.ndarray]:
"""Partitions a [B, N, C] tensor into tensors for each modality.
Args:
modality_sizes: dict specifying the size of the modality
inputs: input tensor
Returns:
dict mapping name of modality to its associated tensor.
"""
outputs = {}
index = 0
# Apply a predictable ordering to the modalities
for modality in sorted(modality_sizes.keys()):
size = modality_sizes[modality]
inp = inputs[:, index:index + size]
index += size
outputs[modality] = inp
return outputs
class MultimodalPreprocessor(hk.Module):
"""Multimodal preprocessing for Perceiver Encoder.
Inputs for each modality is preprocessed then padded with trainable position
embeddings to have the same number of channels.
"""
def __init__(
self,
modalities: Mapping[str, PreprocessorT],
mask_probs: Optional[Mapping[str, float]] = None,
min_padding_size: int = 2,
name: Optional[str] = None):
"""Constructor.
Args:
modalities: dict mapping modality name to preprocessor
mask_probs: dict mapping modality name to masking probability of that
modality
min_padding_size: the minimum padding size for all modalities.
The final output will have num_channels equal to the maximum channels
across all modalities plus min_padding_size.
name: name of module
"""
super().__init__(name=name)
self._modalities = modalities
self._min_padding_size = min_padding_size
self._mask_probs = mask_probs
def __call__(self, inputs: jnp.ndarray, *,
is_training: bool,
pos: Optional[jnp.ndarray] = None,
network_input_is_1d: bool = True) -> PreprocessorOutputT:
outputs = {}
inputs_without_pos = {}
for modality, preprocessor in self._modalities.items():
outputs[modality], _, inputs_without_pos[modality] = preprocessor(
inputs[modality], is_training=is_training, pos=pos,
network_input_is_1d=network_input_is_1d)
common_channel_size = (max(o.shape[2] for o in outputs.values())
+ self._min_padding_size)
padded = {}
modality_sizes = {}
for modality, output in outputs.items():
pos_enc = position_encoding.TrainablePositionEncoding(
1, num_channels=common_channel_size-output.shape[2],
init_scale=0.02, name=f'{modality}_padding')
padding = jnp.broadcast_to(
pos_enc(batch_size=output.shape[0]),
[output.shape[0], output.shape[1],
common_channel_size-output.shape[2]])
output_padded = jnp.concatenate([output, padding], axis=2)
if self._mask_probs is not None:
# Randomly mask out each token corresponding to this modality
mask_token = position_encoding.TrainablePositionEncoding(
1, num_channels=output_padded.shape[2],
init_scale=0.02, name=f'{modality}_mask_token')(output.shape[0])
mask_prob = self._mask_probs[modality]
rng = hk.next_rng_key()
mask = jax.random.bernoulli(rng, mask_prob,
shape=[output.shape[0], output.shape[1]])
mask = jnp.expand_dims(mask, axis=2)
output_padded = (1 - mask) * output_padded + mask * mask_token
padded[modality] = output_padded
modality_sizes[modality] = output_padded.shape[1]
# Apply a predictable ordering to the modalities
padded_ls = [padded[k] for k in sorted(padded.keys())]
return (jnp.concatenate(padded_ls, axis=1),
modality_sizes,
inputs_without_pos)
class MultimodalPostprocessor(hk.Module):
"""Multimodal postprocessing for Perceiver."""
def __init__(
self,
modalities: Mapping[str, PostprocessorT],
input_is_dict: bool = False,
name: Optional[str] = None):
"""Constructor.
Args:
modalities: dict mapping modality name to post processor for that modality
input_is_dict: If True, input is assumed to be dictionary structured,
and outputs keep the same dictionary shape. If False, input is a tensor
which is sliced up during postprocessing by `modality_sizes`.
name: name of the module
"""
super().__init__(name=name)
self._modalities = modalities
self._input_is_dict = input_is_dict
def __call__(
self, inputs: jnp.ndarray, *,
is_training: bool,
pos: Optional[jnp.ndarray] = None,
modality_sizes: Optional[ModalitySizeT] = None) -> Mapping[str,
jnp.ndarray]:
if not self._input_is_dict:
# Slice up modalities by their sizes.
assert modality_sizes is not None
inputs = restructure(modality_sizes=modality_sizes, inputs=inputs)
outputs = {modality: postprocessor(
inputs[modality], is_training=is_training, pos=pos, modality_sizes=None)
for modality, postprocessor in self._modalities.items()}
return outputs
class ClassificationPostprocessor(hk.Module):
"""Classification postprocessing for Perceiver."""
def __init__(
self,
num_classes: int,
name: Optional[str] = None):
super().__init__(name=name)
self._num_classes = num_classes
def __call__(self, inputs: jnp.ndarray, *,
is_training: bool,
pos: Optional[jnp.ndarray] = None,
modality_sizes: Optional[ModalitySizeT] = None) -> jnp.ndarray:
logits = hk.Linear(self._num_classes)(inputs)
return logits[:, 0, :]
class ProjectionPostprocessor(hk.Module):
"""Projection postprocessing for Perceiver."""
def __init__(
self,
num_outputs: int,
name: Optional[str] = None):
super().__init__(name=name)
self._num_outputs = num_outputs
def __call__(self, inputs: jnp.ndarray, *,
is_training: bool,
pos: Optional[jnp.ndarray] = None,
modality_sizes: Optional[ModalitySizeT] = None) -> jnp.ndarray:
logits = hk.Linear(self._num_outputs)(inputs)
return logits
class EmbeddingDecoder(hk.Module):
"""Haiku module to decode embeddings."""
def __init__(self, embedding_matrix: jnp.ndarray, name='embedding_decoder'):
"""Constructs the module.
Args:
embedding_matrix: Array of shape [vocab_size, d_model].
name: Name of the module.
"""
super().__init__(name=name)
self._embedding_matrix = embedding_matrix
self._vocab_size, self._d_model = embedding_matrix.shape
def __call__(self, embeddings: jnp.ndarray) -> jnp.ndarray:
batch_size, seq_len, _ = embeddings.shape
output = jnp.matmul(
embeddings.reshape([-1, self._d_model]), # Flatten batch dim
jnp.transpose(self._embedding_matrix))
bias = hk.get_parameter('bias', shape=[self._vocab_size], init=jnp.zeros)
output = output + bias
return output.reshape([batch_size, seq_len, self._vocab_size])
| deepmind-research-master | perceiver/io_processors.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ImageNet dataset with pre-processing and augmentation.
Deng, et al CVPR 2009 - ImageNet: A large-scale hierarchical image database.
https://image-net.org/
"""
import enum
from typing import Any, Generator, Mapping, Optional, Sequence, Text, Tuple
import jax
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
from perceiver.train import autoaugment
Batch = Mapping[Text, np.ndarray]
MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)
STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)
AUTOTUNE = tf.data.experimental.AUTOTUNE
INPUT_DIM = 224 # The number of pixels in the image resize.
class Split(enum.Enum):
"""ImageNet dataset split."""
TRAIN = 1
TRAIN_AND_VALID = 2
VALID = 3
TEST = 4
@classmethod
def from_string(cls, name: Text) -> 'Split':
return {'TRAIN': Split.TRAIN, 'TRAIN_AND_VALID': Split.TRAIN_AND_VALID,
'VALID': Split.VALID, 'VALIDATION': Split.VALID,
'TEST': Split.TEST}[name.upper()]
@property
def num_examples(self):
return {Split.TRAIN_AND_VALID: 1281167, Split.TRAIN: 1271167,
Split.VALID: 10000, Split.TEST: 50000}[self]
def load(
split: Split,
*,
is_training: bool,
# batch_dims should be:
# [device_count, per_device_batch_size] or [total_batch_size]
batch_dims: Sequence[int],
augmentation_settings: Mapping[str, Any],
# The shape to which images are resized.
im_dim: int = INPUT_DIM,
threadpool_size: int = 48,
max_intra_op_parallelism: int = 1,
) -> Generator[Batch, None, None]:
"""Loads the given split of the dataset."""
start, end = _shard(split, jax.host_id(), jax.host_count())
im_size = (im_dim, im_dim)
total_batch_size = np.prod(batch_dims)
tfds_split = tfds.core.ReadInstruction(_to_tfds_split(split),
from_=start, to=end, unit='abs')
ds = tfds.load('imagenet2012:5.*.*', split=tfds_split,
decoders={'image': tfds.decode.SkipDecoding()})
options = tf.data.Options()
options.experimental_threading.private_threadpool_size = threadpool_size
options.experimental_threading.max_intra_op_parallelism = (
max_intra_op_parallelism)
options.experimental_optimization.map_parallelization = True
if is_training:
options.experimental_deterministic = False
ds = ds.with_options(options)
if is_training:
if jax.host_count() > 1:
# Only cache if we are reading a subset of the dataset.
ds = ds.cache()
ds = ds.repeat()
ds = ds.shuffle(buffer_size=10 * total_batch_size, seed=0)
else:
if split.num_examples % total_batch_size != 0:
raise ValueError(f'Test/valid must be divisible by {total_batch_size}')
def crop_augment_preprocess(example):
image, _ = _preprocess_image(
example['image'], is_training, im_size, augmentation_settings)
label = tf.cast(example['label'], tf.int32)
out = {'images': image, 'labels': label}
if is_training:
if augmentation_settings['cutmix']:
out['mask'] = cutmix_padding(*im_size)
out['cutmix_ratio'] = tf.reduce_mean(out['mask'])
if augmentation_settings['mixup_alpha'] is not None:
beta = tfp.distributions.Beta(
augmentation_settings['mixup_alpha'],
augmentation_settings['mixup_alpha'])
out['mixup_ratio'] = beta.sample()
return out
ds = ds.map(crop_augment_preprocess, num_parallel_calls=AUTOTUNE)
# Mixup/cutmix by temporarily batching (using the per-device batch size):
use_cutmix = augmentation_settings['cutmix']
use_mixup = augmentation_settings['mixup_alpha'] is not None
if is_training and (use_cutmix or use_mixup):
inner_batch_size = batch_dims[-1]
# Apply mixup, cutmix, or mixup + cutmix on batched data.
# We use data from 2 batches to produce 1 mixed batch.
ds = ds.batch(inner_batch_size * 2)
if not use_cutmix and use_mixup:
ds = ds.map(my_mixup, num_parallel_calls=AUTOTUNE)
elif use_cutmix and not use_mixup:
ds = ds.map(my_cutmix, num_parallel_calls=AUTOTUNE)
elif use_cutmix and use_mixup:
ds = ds.map(my_mixup_cutmix, num_parallel_calls=AUTOTUNE)
# Unbatch for further processing.
ds = ds.unbatch()
for batch_size in reversed(batch_dims):
ds = ds.batch(batch_size)
ds = ds.prefetch(AUTOTUNE)
yield from tfds.as_numpy(ds)
# cutmix_padding, my_cutmix, my_mixup, and my_mixup_cutmix taken from:
# https://github.com/deepmind/deepmind-research/blob/master/nfnets/dataset.py
def cutmix_padding(h, w):
"""Returns image mask for CutMix.
Taken from (https://github.com/google/edward2/blob/master/experimental
/marginalization_mixup/data_utils.py#L367)
Args:
h: image height.
w: image width.
"""
r_x = tf.random.uniform([], 0, w, tf.int32)
r_y = tf.random.uniform([], 0, h, tf.int32)
# Beta dist in paper, but they used Beta(1,1) which is just uniform.
image1_proportion = tf.random.uniform([])
patch_length_ratio = tf.math.sqrt(1 - image1_proportion)
r_w = tf.cast(patch_length_ratio * tf.cast(w, tf.float32), tf.int32)
r_h = tf.cast(patch_length_ratio * tf.cast(h, tf.float32), tf.int32)
bbx1 = tf.clip_by_value(tf.cast(r_x - r_w // 2, tf.int32), 0, w)
bby1 = tf.clip_by_value(tf.cast(r_y - r_h // 2, tf.int32), 0, h)
bbx2 = tf.clip_by_value(tf.cast(r_x + r_w // 2, tf.int32), 0, w)
bby2 = tf.clip_by_value(tf.cast(r_y + r_h // 2, tf.int32), 0, h)
# Create the binary mask.
pad_left = bbx1
pad_top = bby1
pad_right = tf.maximum(w - bbx2, 0)
pad_bottom = tf.maximum(h - bby2, 0)
r_h = bby2 - bby1
r_w = bbx2 - bbx1
mask = tf.pad(
tf.ones((r_h, r_w)),
paddings=[[pad_top, pad_bottom], [pad_left, pad_right]],
mode='CONSTANT',
constant_values=0)
mask.set_shape((h, w))
return mask[..., None] # Add channel dim.
def my_cutmix(batch):
"""Apply CutMix: https://arxiv.org/abs/1905.04899."""
batch = dict(**batch)
bs = tf.shape(batch['images'])[0] // 2
mask = batch['mask'][:bs]
images = (mask * batch['images'][:bs] + (1.0 - mask) * batch['images'][bs:])
mix_labels = batch['labels'][bs:]
labels = batch['labels'][:bs]
ratio = batch['cutmix_ratio'][:bs]
return {'images': images, 'labels': labels,
'mix_labels': mix_labels, 'ratio': ratio}
def my_mixup(batch):
"""Apply mixup: https://arxiv.org/abs/1710.09412."""
batch = dict(**batch)
bs = tf.shape(batch['images'])[0] // 2
ratio = batch['mixup_ratio'][:bs, None, None, None]
images = (ratio * batch['images'][:bs] + (1.0 - ratio) * batch['images'][bs:])
mix_labels = batch['labels'][bs:]
labels = batch['labels'][:bs]
ratio = ratio[..., 0, 0, 0] # Unsqueeze
return {'images': images, 'labels': labels,
'mix_labels': mix_labels, 'ratio': ratio}
def my_mixup_cutmix(batch):
"""Apply mixup to half the batch, and cutmix to the other."""
batch = dict(**batch)
bs = tf.shape(batch['images'])[0] // 4
mixup_ratio = batch['mixup_ratio'][:bs, None, None, None]
mixup_images = (mixup_ratio * batch['images'][:bs]
+ (1.0 - mixup_ratio) * batch['images'][bs:2*bs])
mixup_labels = batch['labels'][:bs]
mixup_mix_labels = batch['labels'][bs:2*bs]
cutmix_mask = batch['mask'][2*bs:3*bs]
cutmix_images = (cutmix_mask * batch['images'][2*bs:3*bs]
+ (1.0 - cutmix_mask) * batch['images'][-bs:])
cutmix_labels = batch['labels'][2*bs:3*bs]
cutmix_mix_labels = batch['labels'][-bs:]
cutmix_ratio = batch['cutmix_ratio'][2*bs : 3*bs]
return {'images': tf.concat([mixup_images, cutmix_images], axis=0),
'labels': tf.concat([mixup_labels, cutmix_labels], axis=0),
'mix_labels': tf.concat([mixup_mix_labels, cutmix_mix_labels], 0),
'ratio': tf.concat([mixup_ratio[..., 0, 0, 0], cutmix_ratio], axis=0)}
def _to_tfds_split(split: Split) -> tfds.Split:
"""Returns the TFDS split appropriately sharded."""
# NOTE: Imagenet did not release labels for the test split used in the
# competition, so it has been typical at DeepMind to consider the VALID
# split the TEST split and to reserve 10k images from TRAIN for VALID.
if split in (
Split.TRAIN, Split.TRAIN_AND_VALID, Split.VALID):
return tfds.Split.TRAIN
else:
assert split == Split.TEST
return tfds.Split.VALIDATION
def _shard(
split: Split, shard_index: int, num_shards: int) -> Tuple[int, int]:
"""Returns [start, end) for the given shard index."""
assert shard_index < num_shards
arange = np.arange(split.num_examples)
shard_range = np.array_split(arange, num_shards)[shard_index]
start, end = shard_range[0], (shard_range[-1] + 1)
if split == Split.TRAIN:
# Note that our TRAIN=TFDS_TRAIN[10000:] and VALID=TFDS_TRAIN[:10000].
offset = Split.VALID.num_examples
start += offset
end += offset
return start, end
def _preprocess_image(
image_bytes: tf.Tensor,
is_training: bool,
image_size: Sequence[int],
augmentation_settings: Mapping[str, Any],
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Returns processed and resized images."""
# Get the image crop.
if is_training:
image, im_shape = _decode_and_random_crop(image_bytes)
image = tf.image.random_flip_left_right(image)
else:
image, im_shape = _decode_and_center_crop(image_bytes)
assert image.dtype == tf.uint8
# Optionally apply RandAugment: https://arxiv.org/abs/1909.13719
if is_training:
if augmentation_settings['randaugment'] is not None:
# Input and output images are dtype uint8.
image = autoaugment.distort_image_with_randaugment(
image,
num_layers=augmentation_settings['randaugment']['num_layers'],
magnitude=augmentation_settings['randaugment']['magnitude'])
# Resize and normalize the image crop.
# NOTE: Bicubic resize (1) casts uint8 to float32 and (2) resizes without
# clamping overshoots. This means values returned will be outside the range
# [0.0, 255.0] (e.g. we have observed outputs in the range [-51.1, 336.6]).
image = tf.image.resize(
image, image_size, tf.image.ResizeMethod.BICUBIC)
image = _normalize_image(image)
return image, im_shape
def _normalize_image(image: tf.Tensor) -> tf.Tensor:
"""Normalize the image to zero mean and unit variance."""
image -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=image.dtype)
image /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=image.dtype)
return image
def _distorted_bounding_box_crop(
image_bytes: tf.Tensor,
*,
jpeg_shape: tf.Tensor,
bbox: tf.Tensor,
min_object_covered: float,
aspect_ratio_range: Tuple[float, float],
area_range: Tuple[float, float],
max_attempts: int,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Generates cropped_image using one of the bboxes randomly distorted."""
bbox_begin, bbox_size, _ = tf.image.sample_distorted_bounding_box(
jpeg_shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = [offset_y, offset_x, target_height, target_width]
if image_bytes.dtype == tf.dtypes.string:
image = tf.image.decode_and_crop_jpeg(image_bytes,
tf.stack(crop_window),
channels=3)
else:
image = tf.image.crop_to_bounding_box(image_bytes, *crop_window)
im_shape = tf.stack([target_height, target_width])
return image, im_shape
def _decode_whole_image(image_bytes: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
image = tf.io.decode_jpeg(image_bytes, channels=3)
im_shape = tf.io.extract_jpeg_shape(image_bytes, output_type=tf.int32)
return image, im_shape
def _decode_and_random_crop(
image_bytes: tf.Tensor
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Make a random crop of INPUT_DIM."""
if image_bytes.dtype == tf.dtypes.string:
jpeg_shape = tf.image.extract_jpeg_shape(image_bytes)
else:
jpeg_shape = tf.shape(image_bytes)
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
image, im_shape = _distorted_bounding_box_crop(
image_bytes,
jpeg_shape=jpeg_shape,
bbox=bbox,
min_object_covered=0.1,
aspect_ratio_range=(3 / 4, 4 / 3),
area_range=(0.08, 1.0),
max_attempts=10)
if tf.reduce_all(tf.equal(jpeg_shape, tf.shape(image))):
# If the random crop failed fall back to center crop.
image, im_shape = _decode_and_center_crop(image_bytes, jpeg_shape)
return image, im_shape
def _center_crop(image, crop_dim):
"""Center crops an image to a target dimension."""
image_height = image.shape[0]
image_width = image.shape[1]
offset_height = ((image_height - crop_dim) + 1) // 2
offset_width = ((image_width - crop_dim) + 1) // 2
return tf.image.crop_to_bounding_box(
image, offset_height, offset_width, crop_dim, crop_dim)
def _decode_and_center_crop(
image_bytes: tf.Tensor,
jpeg_shape: Optional[tf.Tensor] = None,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Crops to center of image with padding then scales."""
if jpeg_shape is None:
if image_bytes.dtype == tf.dtypes.string:
jpeg_shape = tf.image.extract_jpeg_shape(image_bytes)
else:
jpeg_shape = tf.shape(image_bytes)
image_height = jpeg_shape[0]
image_width = jpeg_shape[1]
padded_center_crop_size = tf.cast(
((INPUT_DIM / (INPUT_DIM + 32)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)), tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = [offset_height, offset_width,
padded_center_crop_size, padded_center_crop_size]
if image_bytes.dtype == tf.dtypes.string:
image = tf.image.decode_and_crop_jpeg(image_bytes,
tf.stack(crop_window),
channels=3)
else:
image = tf.image.crop_to_bounding_box(image_bytes, *crop_window)
im_shape = tf.stack([padded_center_crop_size, padded_center_crop_size])
return image, im_shape
| deepmind-research-master | perceiver/train/dataset.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A reference training pipeline for Perceiver/Perceiver IO on ImageNet.
We use the Jaxline (https://github.com/deepmind/jaxline) training framework.
Two sets of hyperparameters are provided, the hyperparameters we used for the
Perceiver IO paper, and scaled-down hyperparameters for local testing.
This script should run out-of-the-box with the local hyper parameters.
The scaled-up hyperparameters requires a distributed learning setup to run,
and this script will need to be adapted to your specific setup.
"""
import functools
from typing import Generator, Mapping, Text, Tuple
from absl import app
from absl import flags
from absl import logging
import haiku as hk
import jax
import jax.numpy as jnp
from jaxline import base_config
from jaxline import experiment
from jaxline import platform
from jaxline import utils as jl_utils
from ml_collections import config_dict
import numpy as np
import optax
from perceiver import io_processors
from perceiver import perceiver
from perceiver.train import dataset
from perceiver.train import utils
FLAGS = flags.FLAGS
OptState = Tuple[optax.TraceState, optax.ScaleByScheduleState, optax.ScaleState]
Scalars = Mapping[Text, jnp.ndarray]
N_TRAIN_EXAMPLES = dataset.Split.TRAIN_AND_VALID.num_examples
N_CLASSES = 1000
# Only local/debug parameters are supported out of the box.
# To use the scaled-up hyperparameters, please adapt this script to your
# training setup and set this flag to False
IS_LOCAL = True
def get_training_steps(batch_size, n_epochs):
return (N_TRAIN_EXAMPLES * n_epochs) // batch_size
def get_config():
"""Return config object for training."""
use_debug_settings = IS_LOCAL
config = base_config.get_base_config()
# Experiment config.
local_batch_size = 2
# Modify this to adapt to your custom distributed learning setup
num_devices = 1
config.train_batch_size = local_batch_size * num_devices
config.n_epochs = 110
def _default_or_debug(default_value, debug_value):
return debug_value if use_debug_settings else default_value
n_train_examples = N_TRAIN_EXAMPLES
num_classes = N_CLASSES
config.experiment_kwargs = config_dict.ConfigDict(
dict(
config=dict(
optimizer=dict(
base_lr=5e-4,
max_norm=10.0, # < 0 to turn off.
schedule_type='constant_cosine',
weight_decay=1e-1,
decay_pos_embs=True,
scale_by_batch=True,
cosine_decay_kwargs=dict(
init_value=0.0,
warmup_epochs=0,
end_value=0.0,
),
step_decay_kwargs=dict(
decay_boundaries=[0.5, 0.8, 0.95],
decay_rate=0.1,
),
constant_cosine_decay_kwargs=dict(
constant_fraction=0.5,
end_value=0.0,
),
optimizer='lamb',
# Optimizer-specific kwargs:
adam_kwargs=dict(
b1=0.9,
b2=0.999,
eps=1e-8,
),
lamb_kwargs=dict(
b1=0.9,
b2=0.999,
eps=1e-6,
),
),
# Don't specify output_channels - it's not used for
# classifiers.
model=dict(
perceiver_kwargs=dict(
input_preprocessor=dict(
prep_type='pixels',
# Channels for conv/conv1x1 preprocessing:
num_channels=64,
# -------------------------
# Position encoding arguments:
# -------------------------
position_encoding_type='fourier',
concat_or_add_pos='concat',
spatial_downsample=1,
# If >0, project position to this size:
project_pos_dim=-1,
trainable_position_encoding_kwargs=dict(
num_channels=258, # Match default # for Fourier.
init_scale=0.02,
),
fourier_position_encoding_kwargs=dict(
num_bands=64,
max_resolution=(224, 224),
sine_only=False,
concat_pos=True,
),
),
encoder=dict(
num_self_attends_per_block=_default_or_debug(6, 2),
# Weights won't be shared if num_blocks is set to 1.
num_blocks=_default_or_debug(8, 2),
z_index_dim=512,
num_z_channels=1024,
num_cross_attend_heads=1,
num_self_attend_heads=8,
cross_attend_widening_factor=1,
self_attend_widening_factor=1,
dropout_prob=0.0,
# Position encoding for the latent array.
z_pos_enc_init_scale=0.02,
cross_attention_shape_for_attn='kv',
use_query_residual=True,
),
decoder=dict(
num_z_channels=1024,
use_query_residual=True,
# Position encoding for the output logits.
position_encoding_type='trainable',
trainable_position_encoding_kwargs=dict(
num_channels=1024,
init_scale=0.02,
),
),
),
),
training=dict(
images_per_epoch=n_train_examples,
label_smoothing=0.1,
n_epochs=config.get_oneway_ref('n_epochs'),
batch_size=config.get_oneway_ref('train_batch_size')
),
data=dict(
num_classes=num_classes,
# Run on smaller images to debug.
im_dim=_default_or_debug(224, 32),
augmentation=dict(
# Typical randaug params:
# num_layers in [1, 3]
# magnitude in [5, 30]
# Set randaugment to None to disable.
randaugment=dict(
num_layers=4,
magnitude=5),
cutmix=True,
# Mixup alpha should be in [0, 1].
# Set to None to disable.
mixup_alpha=0.2,
),
),
evaluation=dict(
subset='test',
batch_size=2,
),
)
)
)
# Training loop config.
config.training_steps = get_training_steps(
config.get_oneway_ref('train_batch_size'),
config.get_oneway_ref('n_epochs'))
config.log_train_data_interval = 60
config.log_tensors_interval = 60
config.save_checkpoint_interval = 300
config.eval_specific_checkpoint_dir = ''
config.best_model_eval_metric = 'eval_top_1_acc'
config.checkpoint_dir = '/tmp/perceiver_imagnet_checkpoints'
config.train_checkpoint_all_hosts = False
# Prevents accidentally setting keys that aren't recognized (e.g. in tests).
config.lock()
return config
class Experiment(experiment.AbstractExperiment):
"""ImageNet experiment."""
# A map from object properties that will be checkpointed to their name
# in a checkpoint. Currently we assume that these are all sharded
# device arrays.
CHECKPOINT_ATTRS = {
'_params': 'params',
'_state': 'state',
'_opt_state': 'opt_state',
}
def __init__(self, mode, init_rng, config):
"""Initializes experiment."""
super(Experiment, self).__init__(mode=mode, init_rng=init_rng)
self.mode = mode
self.init_rng = init_rng
self.config = config
# Checkpointed experiment state.
self._params = None
self._state = None
self._opt_state = None
# Input pipelines.
self._train_input = None
self._eval_input = None
self.forward = hk.transform_with_state(self._forward_fn)
# NOTE: We "donate" the `params, state, opt_state` arguments which allows
# JAX (on some backends) to reuse the device memory associated with these
# inputs to store the outputs of our function (which also start with
# `params, state, opt_state`).
self._update_func = jax.pmap(self._update_func, axis_name='i',
donate_argnums=(0, 1, 2))
self._eval_batch = jax.jit(self._eval_batch)
def _forward_fn(
self,
inputs: dataset.Batch,
is_training: bool,
) -> jnp.ndarray:
images = inputs['images']
perceiver_kwargs = self.config.model.perceiver_kwargs
input_preprocessor = io_processors.ImagePreprocessor(
**perceiver_kwargs['input_preprocessor'])
encoder = perceiver.PerceiverEncoder(**perceiver_kwargs['encoder'])
decoder = perceiver.ClassificationDecoder(
self.config.data.num_classes,
**perceiver_kwargs['decoder'])
model = perceiver.Perceiver(
encoder=encoder,
decoder=decoder,
input_preprocessor=input_preprocessor)
return model(images, is_training=is_training)
# _ _
# | |_ _ __ __ _(_)_ __
# | __| '__/ _` | | '_ \
# | |_| | | (_| | | | | |
# \__|_| \__,_|_|_| |_|
#
def step(self, global_step: int, rng: jnp.ndarray,
*unused_args, **unused_kwargs):
"""See base class."""
if self._train_input is None:
self._initialize_train()
inputs = next(self._train_input)
self._params, self._state, self._opt_state, scalars = (
self._update_func(
self._params, self._state, self._opt_state, inputs, rng, global_step
))
scalars = jl_utils.get_first(scalars)
return scalars
def _initialize_train(self):
self._train_input = jl_utils.py_prefetch(self._build_train_input)
total_batch_size = self.config.training.batch_size
steps_per_epoch = (
self.config.training.images_per_epoch / self.config.training.batch_size)
total_steps = self.config.training.n_epochs * steps_per_epoch
# Scale by the (negative) learning rate.
self._lr_schedule = utils.get_learning_rate_schedule(
total_batch_size, steps_per_epoch, total_steps, self.config.optimizer)
self._optimizer = utils.make_optimizer(
self.config.optimizer,
self._lr_schedule)
# Check we haven't already restored params
if self._params is None:
logging.info('Initializing parameters.')
inputs = next(self._train_input)
init_net = jax.pmap(lambda *a: self.forward.init(*a, is_training=True))
init_opt = jax.pmap(self._optimizer.init)
# Init uses the same RNG key on all hosts+devices to ensure everyone
# computes the same initial state.
init_rng = jl_utils.bcast_local_devices(self.init_rng)
self._params, self._state = init_net(init_rng, inputs)
self._opt_state = init_opt(self._params)
def _load_data(self, split, is_training, batch_dims):
"""Wrapper for dataset loading."""
return dataset.load(
split=split,
is_training=is_training,
batch_dims=batch_dims,
im_dim=self.config.data.im_dim,
augmentation_settings=self.config.data.augmentation,
)
def _build_train_input(self) -> Generator[dataset.Batch, None, None]:
"""See base class."""
num_devices = jax.device_count()
global_batch_size = self.config.training.batch_size
per_device_batch_size, ragged = divmod(global_batch_size, num_devices)
if ragged:
raise ValueError(
f'Global batch size {global_batch_size} must be divisible by '
f'num devices {num_devices}')
split = dataset.Split.TRAIN_AND_VALID
return self._load_data(
split=split,
is_training=True,
batch_dims=[jax.local_device_count(), per_device_batch_size])
def _one_hot(self, value):
"""One-hot encoding potentially over a sequence of labels."""
y = jax.nn.one_hot(value, self.config.data.num_classes)
return y
def _loss_fn(
self,
params: hk.Params,
state: hk.State,
inputs: dataset.Batch,
rng: jnp.ndarray,
) -> Tuple[jnp.ndarray, Tuple[Scalars, hk.State]]:
logits, state = self.forward.apply(
params, state, rng, inputs, is_training=True)
label = self._one_hot(inputs['labels'])
# Handle cutmix/mixup label mixing:
if 'mix_labels' in inputs:
logging.info('Using mixup or cutmix!')
mix_label = self._one_hot(inputs['mix_labels'])
mix_ratio = inputs['ratio'][:, None]
label = mix_ratio * label + (1. - mix_ratio) * mix_label
# Apply label-smoothing to one-hot labels.
label_smoothing = self.config.training.label_smoothing
if not (label_smoothing >= 0. and label_smoothing < 1.):
raise ValueError(
f"'label_smoothing is {label_smoothing} and should be in [0, 1)")
if label_smoothing > 0:
smooth_positives = 1. - label_smoothing
smooth_negatives = label_smoothing / self.config.data.num_classes
label = smooth_positives * label + smooth_negatives
loss_w_batch = utils.softmax_cross_entropy(logits, label)
loss = jnp.mean(loss_w_batch, dtype=loss_w_batch.dtype)
scaled_loss = loss / jax.device_count()
metrics = utils.topk_correct(logits, inputs['labels'], prefix='')
metrics = jax.tree_map(jnp.mean, metrics)
top_1_acc = metrics['top_1_acc']
top_5_acc = metrics['top_5_acc']
loss_scalars = dict(
loss=loss,
top_1_acc=top_1_acc,
top_5_acc=top_5_acc,
)
return scaled_loss, (loss_scalars, state)
def _update_func(
self,
params: hk.Params,
state: hk.State,
opt_state: OptState,
inputs: dataset.Batch,
rng: jnp.ndarray,
global_step: int,
) -> Tuple[hk.Params, hk.State, OptState, Scalars]:
"""Applies an update to parameters and returns new state."""
# This function computes the gradient of the first output of loss_fn and
# passes through the other arguments unchanged.
grad_loss_fn = jax.grad(self._loss_fn, has_aux=True)
scaled_grads, (loss_scalars, state) = grad_loss_fn(
params, state, inputs, rng)
grads = jax.lax.psum(scaled_grads, axis_name='i')
# Grab the learning rate to log before performing the step.
learning_rate = self._lr_schedule(global_step)
# Compute and apply updates via our optimizer.
updates, opt_state = self._optimizer.update(grads, opt_state, params)
params = optax.apply_updates(params, updates)
n_params = 0
for k in params.keys():
for l in params[k]:
n_params = n_params + np.prod(params[k][l].shape)
# Scalars to log (note: we log the mean across all hosts/devices).
scalars = {'learning_rate': learning_rate,
'n_params (M)': float(n_params/1e6),
'global_gradient_norm': optax.global_norm(grads)}
loss_scalars = {f'train_{k}': v for k, v in loss_scalars.items()}
scalars.update(loss_scalars)
scalars = jax.lax.pmean(scalars, axis_name='i')
return params, state, opt_state, scalars
# _
# _____ ____ _| |
# / _ \ \ / / _` | |
# | __/\ V / (_| | |
# \___| \_/ \__,_|_|
#
def evaluate(self, global_step, rng, **unused_args):
"""See base class."""
global_step = np.array(jl_utils.get_first(global_step))
scalars = jax.device_get(self._eval_epoch(jl_utils.get_first(rng)))
logging.info('[Step %d] Eval scalars: %s', global_step, scalars)
return scalars
def _eval_batch(
self,
params: hk.Params,
state: hk.State,
inputs: dataset.Batch,
rng: jnp.ndarray,
) -> Scalars:
"""Evaluates a batch."""
logits, _ = self.forward.apply(
params, state, rng, inputs, is_training=False)
labels = self._one_hot(inputs['labels'])
loss = utils.softmax_cross_entropy(logits, labels)
metrics = utils.topk_correct(logits, inputs['labels'], prefix='')
metrics = jax.tree_map(jnp.mean, metrics)
top_1_acc = metrics['top_1_acc']
top_5_acc = metrics['top_5_acc']
bs = logits.shape[0]
top_1_acc = jnp.expand_dims(top_1_acc, axis=0) * bs
top_5_acc = jnp.expand_dims(top_5_acc, axis=0) * bs
# NOTE: Returned values will be summed and finally divided by num_samples.
return {
'eval_loss': loss,
'eval_top_1_acc': top_1_acc, 'eval_top_5_acc': top_5_acc}
def _build_eval_input(self) -> Generator[dataset.Batch, None, None]:
split = dataset.Split.from_string(self.config.evaluation.subset)
return self._load_data(
split=split,
is_training=False,
batch_dims=[self.config.evaluation.batch_size])
def _eval_epoch(self, rng):
"""Evaluates an epoch."""
num_samples = 0.
summed_scalars = None
params = jl_utils.get_first(self._params)
state = jl_utils.get_first(self._state)
for inputs in self._build_eval_input():
num_samples += inputs['labels'].shape[0]
scalars = self._eval_batch(params, state, inputs, rng)
# Accumulate the sum of scalars for each step.
scalars = jax.tree_map(lambda x: jnp.sum(x, axis=0), scalars)
if summed_scalars is None:
summed_scalars = scalars
else:
summed_scalars = jax.tree_map(jnp.add, summed_scalars, scalars)
mean_scalars = jax.tree_map(lambda x: x / num_samples, summed_scalars)
return mean_scalars
if __name__ == '__main__':
flags.mark_flag_as_required('config')
app.run(functools.partial(platform.main, Experiment))
| deepmind-research-master | perceiver/train/experiment.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities."""
from typing import Callable, List, Mapping, NamedTuple, Optional, Tuple, Union
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
Batch = Mapping[str, np.ndarray]
OptState = Tuple[optax.TraceState, optax.ScaleByScheduleState, optax.ScaleState]
Scalars = Mapping[str, jnp.ndarray]
ParamsOrState = Union[hk.Params, hk.State]
NORM_NAMES = ['layer_norm', 'batchnorm']
# any_in and topk_correct taken from
# https://github.com/deepmind/deepmind-research/blob/master/nfnets/utils.py
@jax.vmap
def any_in(prediction, target):
"""For each row in a and b, checks if any element of a is in b."""
return jnp.isin(prediction, target)
def topk_correct(logits, labels, mask=None, prefix='', topk=(1, 5)):
"""Calculate top-k error for multiple k values."""
metrics = {}
argsorted_logits = jnp.argsort(logits)
for k in topk:
pred_labels = argsorted_logits[..., -k:]
# Get the number of examples where the label is in the top-k predictions
correct = any_in(pred_labels, labels).any(axis=-1).astype(jnp.float32)
if mask is not None:
correct *= mask
metrics[f'{prefix}top_{k}_acc'] = correct
return metrics
def softmax_cross_entropy(logits, labels):
"""Computes softmax cross entropy given logits and one-hot class labels.
Args:
logits: Logit output values.
labels: Ground truth one-hot-encoded labels.
Returns:
Loss value with the same shape as `labels`;
"""
return jnp.asarray(optax.softmax_cross_entropy(logits, labels))
def _get_batch_scaled_lr(total_batch_size, lr, scale_by_batch=True):
# This is the linear scaling rule in Section 5.1 of
# https://arxiv.org/pdf/1706.02677.pdf.
if scale_by_batch:
lr = (lr * total_batch_size) / 256
return lr
def get_learning_rate_schedule(
total_batch_size, steps_per_epoch, total_steps, optimizer_config):
"""Build the learning rate schedule function."""
base_lr = _get_batch_scaled_lr(total_batch_size, optimizer_config.base_lr,
optimizer_config.scale_by_batch)
schedule_type = optimizer_config.schedule_type
if schedule_type == 'steps':
boundaries = optimizer_config.step_decay_kwargs.decay_boundaries
boundaries.sort()
decay_rate = optimizer_config.step_decay_kwargs.decay_rate
boundaries_and_scales = {
int(boundary * total_steps): decay_rate for boundary in boundaries}
schedule_fn = optax.piecewise_constant_schedule(
init_value=base_lr, boundaries_and_scales=boundaries_and_scales)
elif schedule_type == 'cosine':
warmup_steps = (optimizer_config.cosine_decay_kwargs.warmup_epochs
* steps_per_epoch)
# Batch scale the other lr values as well:
init_value = _get_batch_scaled_lr(
total_batch_size,
optimizer_config.cosine_decay_kwargs.init_value,
optimizer_config.scale_by_batch)
end_value = _get_batch_scaled_lr(
total_batch_size,
optimizer_config.cosine_decay_kwargs.end_value,
optimizer_config.scale_by_batch)
schedule_fn = optax.warmup_cosine_decay_schedule(
init_value=init_value,
peak_value=base_lr,
warmup_steps=warmup_steps,
decay_steps=total_steps,
end_value=end_value)
elif schedule_type == 'constant_cosine':
# Convert end_value to alpha, used by cosine_decay_schedule.
alpha = optimizer_config.constant_cosine_decay_kwargs.end_value / base_lr
# Number of steps spent in constant phase.
constant_steps = int(
optimizer_config.constant_cosine_decay_kwargs.constant_fraction
* total_steps)
decay_steps = total_steps - constant_steps
constant_phase = optax.constant_schedule(value=base_lr)
decay_phase = optax.cosine_decay_schedule(
init_value=base_lr,
decay_steps=decay_steps,
alpha=alpha)
schedule_fn = optax.join_schedules(
schedules=[constant_phase, decay_phase],
boundaries=[constant_steps])
else:
raise ValueError(f'Unknown learning rate schedule: {schedule_type}')
return schedule_fn
def _weight_decay_exclude(
exclude_names: Optional[List[str]] = None
) -> Callable[[str, str, jnp.ndarray], bool]:
"""Logic for deciding which parameters to include for weight decay..
Args:
exclude_names: an optional list of names to include for weight_decay. ['w']
by default.
Returns:
A predicate that returns True for params that need to be excluded from
weight_decay.
"""
# By default weight_decay the weights but not the biases.
if not exclude_names:
exclude_names = ['b']
def exclude(module_name: str, name: str, value: jnp.array):
del value
# Do not weight decay the parameters of normalization blocks.
if any([norm_name in module_name for norm_name in NORM_NAMES]):
return True
else:
return name in exclude_names
return exclude
class AddWeightDecayState(NamedTuple):
"""Stateless transformation."""
def add_weight_decay(
weight_decay: float,
exclude_names: Optional[List[str]] = None) -> optax.GradientTransformation:
"""Add parameter scaled by `weight_decay` to the `updates`.
Same as optax.add_decayed_weights but can exclude parameters by name.
Args:
weight_decay: weight_decay coefficient.
exclude_names: an optional list of names to exclude for weight_decay. ['b']
by default.
Returns:
An (init_fn, update_fn) tuple.
"""
def init_fn(_):
return AddWeightDecayState()
def update_fn(updates, state, params):
exclude = _weight_decay_exclude(exclude_names=exclude_names)
u_ex, u_in = hk.data_structures.partition(exclude, updates)
_, p_in = hk.data_structures.partition(exclude, params)
u_in = jax.tree_map(lambda g, p: g + weight_decay * p, u_in, p_in)
updates = hk.data_structures.merge(u_ex, u_in)
return updates, state
return optax.GradientTransformation(init_fn, update_fn)
def make_optimizer(optimizer_config, lr_schedule):
"""Construct the optax optimizer with given LR schedule."""
if (optimizer_config.get('decay_pos_embs') is None or
optimizer_config.decay_pos_embs):
# Decay learned position embeddings by default.
weight_decay_exclude_names = ['b']
else:
weight_decay_exclude_names = ['pos_embs', 'b']
optax_chain = []
if optimizer_config.max_norm > 0:
optax_chain.append(
optax.clip_by_global_norm(optimizer_config.max_norm))
if optimizer_config.optimizer == 'adam':
# See: https://arxiv.org/abs/1412.6980
optax_chain.extend([
optax.scale_by_adam(**optimizer_config.adam_kwargs),
add_weight_decay(
optimizer_config.weight_decay,
exclude_names=weight_decay_exclude_names)
])
elif optimizer_config.optimizer == 'lamb':
# See: https://arxiv.org/abs/1904.00962
optax_chain.extend([
optax.scale_by_adam(**optimizer_config.lamb_kwargs),
add_weight_decay(
optimizer_config.weight_decay,
exclude_names=weight_decay_exclude_names),
optax.scale_by_trust_ratio()
])
else:
raise ValueError(f'Undefined optimizer {optimizer_config.optimizer}')
# Scale by the (negative) learning rate.
optax_chain.extend([
optax.scale_by_schedule(lr_schedule),
optax.scale(-1),
])
return optax.chain(*optax_chain)
| deepmind-research-master | perceiver/train/utils.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AutoAugment and RandAugment policies for enhanced image preprocessing.
AutoAugment Reference: https://arxiv.org/abs/1805.09501
RandAugment Reference: https://arxiv.org/abs/1909.13719
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import math
from ml_collections import config_dict
import tensorflow.compat.v1 as tf
from tensorflow_addons import image as contrib_image
# pylint: disable=deprecated-method
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
def policy_v0():
"""Autoaugment policy that was used in AutoAugment Paper."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('Equalize', 0.8, 1), ('ShearY', 0.8, 4)],
[('Color', 0.4, 9), ('Equalize', 0.6, 3)],
[('Color', 0.4, 1), ('Rotate', 0.6, 8)],
[('Solarize', 0.8, 3), ('Equalize', 0.4, 7)],
[('Solarize', 0.4, 2), ('Solarize', 0.6, 2)],
[('Color', 0.2, 0), ('Equalize', 0.8, 8)],
[('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)],
[('ShearX', 0.2, 9), ('Rotate', 0.6, 8)],
[('Color', 0.6, 1), ('Equalize', 1.0, 2)],
[('Invert', 0.4, 9), ('Rotate', 0.6, 0)],
[('Equalize', 1.0, 9), ('ShearY', 0.6, 3)],
[('Color', 0.4, 7), ('Equalize', 0.6, 0)],
[('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)],
[('Solarize', 0.6, 8), ('Color', 0.6, 9)],
[('Solarize', 0.2, 4), ('Rotate', 0.8, 9)],
[('Rotate', 1.0, 7), ('TranslateY', 0.8, 9)],
[('ShearX', 0.0, 0), ('Solarize', 0.8, 4)],
[('ShearY', 0.8, 0), ('Color', 0.6, 4)],
[('Color', 1.0, 0), ('Rotate', 0.6, 2)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 8)],
[('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)],
[('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)],
[('Posterize', 0.8, 2), ('Solarize', 0.6, 10)],
[('Solarize', 0.6, 8), ('Equalize', 0.6, 1)],
[('Color', 0.8, 6), ('Rotate', 0.4, 5)],
]
return policy
def policy_vtest():
"""Autoaugment test policy for debugging."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('TranslateX', 1.0, 4), ('Equalize', 1.0, 10)],
]
return policy
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
Factor can be above 0.0. A value of 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor of type uint8.
image2: An image Tensor of type uint8.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor of type uint8.
"""
if factor == 0.0:
return tf.convert_to_tensor(image1)
if factor == 1.0:
return tf.convert_to_tensor(image2)
image1 = tf.to_float(image1)
image2 = tf.to_float(image2)
difference = image2 - image1
scaled = factor * difference
# Do addition in float.
temp = tf.to_float(image1) + scaled
# Interpolate
if factor > 0.0 and factor < 1.0:
# Interpolation means we always stay within 0 and 255.
return tf.cast(temp, tf.uint8)
# Extrapolate:
#
# We need to clip and then cast.
return tf.cast(tf.clip_by_value(temp, 0.0, 255.0), tf.uint8)
def cutout(image, pad_size, replace=0):
"""Apply cutout (https://arxiv.org/abs/1708.04552) to image.
This operation applies a (2*pad_size x 2*pad_size) mask of zeros to
a random location within `img`. The pixel values filled in will be of the
value `replace`. The located where the mask will be applied is randomly
chosen uniformly over the whole image.
Args:
image: An image Tensor of type uint8.
pad_size: Specifies how big the zero mask that will be generated is that
is applied to the image. The mask will be of size
(2*pad_size x 2*pad_size).
replace: What pixel value to fill in the image in the area that has
the cutout mask applied to it.
Returns:
An image Tensor that is of type uint8.
"""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random_uniform(
shape=[], minval=0, maxval=image_height,
dtype=tf.int32)
cutout_center_width = tf.random_uniform(
shape=[], minval=0, maxval=image_width,
dtype=tf.int32)
lower_pad = tf.maximum(0, cutout_center_height - pad_size)
upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_size)
left_pad = tf.maximum(0, cutout_center_width - pad_size)
right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims, constant_values=1)
mask = tf.expand_dims(mask, -1)
mask = tf.tile(mask, [1, 1, 3])
image = tf.where(
tf.equal(mask, 0),
tf.ones_like(image, dtype=image.dtype) * replace,
image)
return image
def solarize(image, threshold=128):
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract the pixel from 255.
return tf.where(image < threshold, image, 255 - image)
def solarize_add(image, addition=0, threshold=128):
# For each pixel in the image less than threshold
# we add 'addition' amount to it and then clip the
# pixel value to be between 0 and 255. The value
# of 'addition' is between -128 and 128.
added_image = tf.cast(image, tf.int64) + addition
added_image = tf.cast(tf.clip_by_value(added_image, 0, 255), tf.uint8)
return tf.where(image < threshold, added_image, image)
def color(image, factor):
"""Equivalent of PIL Color."""
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor)
def contrast(image, factor):
"""Equivalent of PIL Contrast."""
degenerate = tf.image.rgb_to_grayscale(image)
# Cast before calling tf.histogram.
degenerate = tf.cast(degenerate, tf.int32)
# Compute the grayscale histogram, then compute the mean pixel value,
# and create a constant image size of that value. Use that as the
# blending degenerate target of the original image.
hist = tf.histogram_fixed_width(degenerate, [0, 255], nbins=256)
mean = tf.reduce_sum(tf.cast(hist, tf.float32)) / 256.0
degenerate = tf.ones_like(degenerate, dtype=tf.float32) * mean
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.image.grayscale_to_rgb(tf.cast(degenerate, tf.uint8))
return blend(degenerate, image, factor)
def brightness(image, factor):
"""Equivalent of PIL Brightness."""
degenerate = tf.zeros_like(image)
return blend(degenerate, image, factor)
def posterize(image, bits):
"""Equivalent of PIL Posterize."""
shift = 8 - bits
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
def rotate(image, degrees, replace):
"""Rotates the image by degrees either clockwise or counterclockwise.
Args:
image: An image Tensor of type uint8.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
replace: A one or three value 1D tensor to fill empty pixels caused by
the rotate operation.
Returns:
The rotated version of image.
"""
# Convert from degrees to radians.
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# In practice, we should randomize the rotation degrees by flipping
# it negatively half the time, but that's done on 'degrees' outside
# of the function.
image = contrib_image.rotate(wrap(image), radians)
return unwrap(image, replace)
def translate_x(image, pixels, replace):
"""Equivalent of PIL Translate in X dimension."""
image = contrib_image.translate(wrap(image), [-pixels, 0])
return unwrap(image, replace)
def translate_y(image, pixels, replace):
"""Equivalent of PIL Translate in Y dimension."""
image = contrib_image.translate(wrap(image), [0, -pixels])
return unwrap(image, replace)
def shear_x(image, level, replace):
"""Equivalent of PIL Shearing in X dimension."""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1].
image = contrib_image.transform(
wrap(image), [1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image, replace)
def shear_y(image, level, replace):
"""Equivalent of PIL Shearing in Y dimension."""
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1].
image = contrib_image.transform(
wrap(image), [1., 0., 0., level, 1., 0., 0., 0.])
return unwrap(image, replace)
def autocontrast(image):
"""Implements Autocontrast function from PIL using TF ops.
Args:
image: A 3D uint8 tensor.
Returns:
The image after it has had autocontrast applied to it and will be of type
uint8.
"""
def scale_channel(image):
"""Scale the 2D image using the autocontrast rule."""
# A possibly cheaper version can be done using cumsum/unique_with_counts
# over the histogram values, rather than iterating over the entire image.
# to compute mins and maxes.
lo = tf.to_float(tf.reduce_min(image))
hi = tf.to_float(tf.reduce_max(image))
# Scale the image, making the lowest value 0 and the highest value 255.
def scale_values(im):
scale = 255.0 / (hi - lo)
offset = -lo * scale
im = tf.to_float(im) * scale + offset
im = tf.clip_by_value(im, 0.0, 255.0)
return tf.cast(im, tf.uint8)
result = tf.cond(hi > lo, lambda: scale_values(image), lambda: image)
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[:, :, 0])
s2 = scale_channel(image[:, :, 1])
s3 = scale_channel(image[:, :, 2])
image = tf.stack([s1, s2, s3], 2)
return image
def sharpness(image, factor):
"""Implements Sharpness function from PIL using TF ops."""
orig_image = image
image = tf.cast(image, tf.float32)
# Make image 4D for conv operation.
image = tf.expand_dims(image, 0)
# SMOOTH PIL Kernel.
kernel = tf.constant(
[[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.
# Tile across channel dimension.
kernel = tf.tile(kernel, [1, 1, 3, 1])
strides = [1, 1, 1, 1]
with tf.device('/cpu:0'):
# Some augmentation that uses depth-wise conv will cause crashing when
# training on GPU. See (b/156242594) for details.
degenerate = tf.nn.depthwise_conv2d(
image, kernel, strides, padding='VALID', rate=[1, 1])
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.squeeze(tf.cast(degenerate, tf.uint8), [0])
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(degenerate)
padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])
padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]])
result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image)
# Blend the final result.
return blend(result, orig_image, factor)
def equalize(image):
"""Implements Equalize function from PIL using TF ops."""
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = tf.cast(im[:, :, c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(tf.equal(step, 0),
lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, tf.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], 2)
return image
def invert(image):
"""Inverts the image pixels."""
image = tf.convert_to_tensor(image)
return 255 - image
def wrap(image):
"""Returns 'image' with an extra channel set to all 1s."""
shape = tf.shape(image)
extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)
extended = tf.concat([image, extended_channel], 2)
return extended
def unwrap(image, replace):
"""Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D Image Tensor with 4 channels.
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
image: A 3D image Tensor with 3 channels.
"""
image_shape = tf.shape(image)
# Flatten the spatial dimensions.
flattened_image = tf.reshape(image, [-1, image_shape[2]])
# Find all pixels where the last channel is zero.
alpha_channel = flattened_image[:, 3]
replace = tf.concat([replace, tf.ones([1], image.dtype)], 0)
# Where they are zero, fill them in with 'replace'.
flattened_image = tf.where(
tf.equal(alpha_channel, 0),
tf.ones_like(flattened_image, dtype=image.dtype) * replace,
flattened_image)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], 3])
return image
NAME_TO_FUNC = {
'AutoContrast': autocontrast,
'Equalize': equalize,
'Invert': invert,
'Rotate': rotate,
'Posterize': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'ShearX': shear_x,
'ShearY': shear_y,
'TranslateX': translate_x,
'TranslateY': translate_y,
'Cutout': cutout,
}
def _randomly_negate_tensor(tensor):
"""With 50% prob turn the tensor negative."""
should_flip = tf.cast(tf.floor(tf.random_uniform([]) + 0.5), tf.bool)
final_tensor = tf.cond(should_flip, lambda: tensor, lambda: -tensor)
return final_tensor
def _rotate_level_to_arg(level):
level = (level/_MAX_LEVEL) * 30.
level = _randomly_negate_tensor(level)
return (level,)
def _shrink_level_to_arg(level):
"""Converts level to ratio by which we shrink the image content."""
if level == 0:
return (1.0,) # if level is zero, do not shrink the image
# Maximum shrinking ratio is 2.9.
level = 2. / (_MAX_LEVEL / level) + 0.9
return (level,)
def _enhance_level_to_arg(level):
return ((level/_MAX_LEVEL) * 1.8 + 0.1,)
def _shear_level_to_arg(level):
level = (level/_MAX_LEVEL) * 0.3
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _translate_level_to_arg(level, translate_const):
level = (level/_MAX_LEVEL) * float(translate_const)
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def level_to_arg(hparams):
return {
'AutoContrast': lambda level: (),
'Equalize': lambda level: (),
'Invert': lambda level: (),
'Rotate': _rotate_level_to_arg,
'Posterize': lambda level: (int((level/_MAX_LEVEL) * 4),),
'Solarize': lambda level: (int((level/_MAX_LEVEL) * 256),),
'SolarizeAdd': lambda level: (int((level/_MAX_LEVEL) * 110),),
'Color': _enhance_level_to_arg,
'Contrast': _enhance_level_to_arg,
'Brightness': _enhance_level_to_arg,
'Sharpness': _enhance_level_to_arg,
'ShearX': _shear_level_to_arg,
'ShearY': _shear_level_to_arg,
'Cutout': lambda level: (int((level/_MAX_LEVEL) * hparams.cutout_const),),
# pylint:disable=g-long-lambda
'TranslateX': lambda level: _translate_level_to_arg(
level, hparams.translate_const),
'TranslateY': lambda level: _translate_level_to_arg(
level, hparams.translate_const),
# pylint:enable=g-long-lambda
}
def _parse_policy_info(name, prob, level, replace_value, augmentation_hparams):
"""Return the function that corresponds to `name` and update `level` param."""
func = NAME_TO_FUNC[name]
args = level_to_arg(augmentation_hparams)[name](level)
# Check to see if prob is passed into function. This is used for operations
# where we alter bboxes independently.
# pytype:disable=wrong-arg-types
if 'prob' in inspect.getargspec(func)[0]:
args = tuple([prob] + list(args))
# pytype:enable=wrong-arg-types
# Add in replace arg if it is required for the function that is being called.
# pytype:disable=wrong-arg-types
if 'replace' in inspect.getargspec(func)[0]:
# Make sure replace is the final argument
assert 'replace' == inspect.getargspec(func)[0][-1]
args = tuple(list(args) + [replace_value])
# pytype:enable=wrong-arg-types
return (func, prob, args)
def _apply_func_with_prob(func, image, args, prob):
"""Apply `func` to image w/ `args` as input with probability `prob`."""
assert isinstance(args, tuple)
# If prob is a function argument, then this randomness is being handled
# inside the function, so make sure it is always called.
# pytype:disable=wrong-arg-types
if 'prob' in inspect.getargspec(func)[0]:
prob = 1.0
# pytype:enable=wrong-arg-types
# Apply the function with probability `prob`.
should_apply_op = tf.cast(
tf.floor(tf.random_uniform([], dtype=tf.float32) + prob), tf.bool)
augmented_image = tf.cond(
should_apply_op,
lambda: func(image, *args),
lambda: image)
return augmented_image
def select_and_apply_random_policy(policies, image):
"""Select a random policy from `policies` and apply it to `image`."""
policy_to_select = tf.random_uniform([], maxval=len(policies), dtype=tf.int32)
# Note that using tf.case instead of tf.conds would result in significantly
# larger graphs and would even break export for some larger policies.
for (i, policy) in enumerate(policies):
image = tf.cond(
tf.equal(i, policy_to_select),
lambda selected_policy=policy: selected_policy(image),
lambda: image)
return image
def build_and_apply_nas_policy(policies, image,
augmentation_hparams):
"""Build a policy from the given policies passed in and apply to image.
Args:
policies: list of lists of tuples in the form `(func, prob, level)`, `func`
is a string name of the augmentation function, `prob` is the probability
of applying the `func` operation, `level` is the input argument for
`func`.
image: tf.Tensor that the resulting policy will be applied to.
augmentation_hparams: Hparams associated with the NAS learned policy.
Returns:
A version of image that now has data augmentation applied to it based on
the `policies` pass into the function.
"""
replace_value = [128, 128, 128]
# func is the string name of the augmentation function, prob is the
# probability of applying the operation and level is the parameter associated
# with the tf op.
# tf_policies are functions that take in an image and return an augmented
# image.
tf_policies = []
for policy in policies:
tf_policy = []
# Link string name to the correct python function and make sure the correct
# argument is passed into that function.
for policy_info in policy:
policy_info = list(policy_info) + [replace_value, augmentation_hparams]
tf_policy.append(_parse_policy_info(*policy_info))
# Now build the tf policy that will apply the augmentation procedue
# on image.
def make_final_policy(tf_policy_):
def final_policy(image_):
for func, prob, args in tf_policy_:
image_ = _apply_func_with_prob(
func, image_, args, prob)
return image_
return final_policy
tf_policies.append(make_final_policy(tf_policy))
augmented_image = select_and_apply_random_policy(
tf_policies, image)
return augmented_image
def distort_image_with_autoaugment(image, augmentation_name):
"""Applies the AutoAugment policy to `image`.
AutoAugment is from the paper: https://arxiv.org/abs/1805.09501.
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
augmentation_name: The name of the AutoAugment policy to use. The available
options are `v0` and `test`. `v0` is the policy used for
all of the results in the paper and was found to achieve the best results
on the COCO dataset. `v1`, `v2` and `v3` are additional good policies
found on the COCO dataset that have slight variation in what operations
were used during the search procedure along with how many operations are
applied in parallel to a single image (2 vs 3).
Returns:
A tuple containing the augmented versions of `image`.
"""
available_policies = {'v0': policy_v0,
'test': policy_vtest}
if augmentation_name not in available_policies:
raise ValueError('Invalid augmentation_name: {}'.format(augmentation_name))
policy = available_policies[augmentation_name]()
# Hparams that will be used for AutoAugment.
augmentation_hparams = config_dict.ConfigDict(dict(
cutout_const=100, translate_const=250))
return build_and_apply_nas_policy(policy, image, augmentation_hparams)
def distort_image_with_randaugment(image, num_layers, magnitude):
"""Applies the RandAugment policy to `image`.
RandAugment is from the paper https://arxiv.org/abs/1909.13719,
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
num_layers: Integer, the number of augmentation transformations to apply
sequentially to an image. Represented as (N) in the paper. Usually best
values will be in the range [1, 3].
magnitude: Integer, shared magnitude across all augmentation operations.
Represented as (M) in the paper. Usually best values are in the range
[5, 30].
Returns:
The augmented version of `image`.
"""
replace_value = [128] * 3
tf.logging.info('Using RandAug.')
augmentation_hparams = config_dict.ConfigDict(dict(
cutout_const=40, translate_const=100))
available_ops = [
'AutoContrast', 'Equalize', 'Invert', 'Rotate', 'Posterize',
'Solarize', 'Color', 'Contrast', 'Brightness', 'Sharpness',
'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Cutout', 'SolarizeAdd']
for layer_num in range(num_layers):
op_to_select = tf.random_uniform(
[], maxval=len(available_ops), dtype=tf.int32)
random_magnitude = float(magnitude)
with tf.name_scope('randaug_layer_{}'.format(layer_num)):
for (i, op_name) in enumerate(available_ops):
prob = tf.random_uniform([], minval=0.2, maxval=0.8, dtype=tf.float32)
func, _, args = _parse_policy_info(op_name, prob, random_magnitude,
replace_value, augmentation_hparams)
image = tf.cond(
tf.equal(i, op_to_select),
# pylint:disable=g-long-lambda
lambda selected_func=func, selected_args=args: selected_func(
image, *selected_args),
# pylint:enable=g-long-lambda
lambda: image)
return image
| deepmind-research-master | perceiver/train/autoaugment.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Atari RL Unplugged datasets.
Examples in the dataset represent SARSA transitions stored during a
DQN training run as described in https://arxiv.org/pdf/1907.04543.
For every training run we have recorded all 50 million transitions corresponding
to 200 million environment steps (4x factor because of frame skipping). There
are 5 separate datasets for each of the 45 games.
Every transition in the dataset is a tuple containing the following features:
* o_t: Observation at time t. Observations have been processed using the
canonical Atari frame processing, including 4x frame stacking. The shape
of a single observation is [84, 84, 4].
* a_t: Action taken at time t.
* r_t: Reward after a_t.
* d_t: Discount after a_t.
* o_tp1: Observation at time t+1.
* a_tp1: Action at time t+1.
* extras:
* episode_id: Episode identifier.
* episode_return: Total episode return computed using per-step [-1, 1]
clipping.
"""
import functools
import os
from typing import Dict
from acme import wrappers
import dm_env
from dm_env import specs
from dopamine.discrete_domains import atari_lib
import reverb
import tensorflow as tf
# 9 tuning games.
TUNING_SUITE = [
'BeamRider',
'DemonAttack',
'DoubleDunk',
'IceHockey',
'MsPacman',
'Pooyan',
'RoadRunner',
'Robotank',
'Zaxxon',
]
# 36 testing games.
TESTING_SUITE = [
'Alien',
'Amidar',
'Assault',
'Asterix',
'Atlantis',
'BankHeist',
'BattleZone',
'Boxing',
'Breakout',
'Carnival',
'Centipede',
'ChopperCommand',
'CrazyClimber',
'Enduro',
'FishingDerby',
'Freeway',
'Frostbite',
'Gopher',
'Gravitar',
'Hero',
'Jamesbond',
'Kangaroo',
'Krull',
'KungFuMaster',
'NameThisGame',
'Phoenix',
'Pong',
'Qbert',
'Riverraid',
'Seaquest',
'SpaceInvaders',
'StarGunner',
'TimePilot',
'UpNDown',
'VideoPinball',
'WizardOfWor',
'YarsRevenge',
]
# Total of 45 games.
ALL = TUNING_SUITE + TESTING_SUITE
def _decode_frames(pngs: tf.Tensor):
"""Decode PNGs.
Args:
pngs: String Tensor of size (4,) containing PNG encoded images.
Returns:
4 84x84 grayscale images packed in a (84, 84, 4) uint8 Tensor.
"""
# Statically unroll png decoding
frames = [tf.image.decode_png(pngs[i], channels=1) for i in range(4)]
frames = tf.concat(frames, axis=2)
frames.set_shape((84, 84, 4))
return frames
def _make_reverb_sample(o_t: tf.Tensor,
a_t: tf.Tensor,
r_t: tf.Tensor,
d_t: tf.Tensor,
o_tp1: tf.Tensor,
a_tp1: tf.Tensor,
extras: Dict[str, tf.Tensor]) -> reverb.ReplaySample:
"""Create Reverb sample with offline data.
Args:
o_t: Observation at time t.
a_t: Action at time t.
r_t: Reward at time t.
d_t: Discount at time t.
o_tp1: Observation at time t+1.
a_tp1: Action at time t+1.
extras: Dictionary with extra features.
Returns:
Replay sample with fake info: key=0, probability=1, table_size=0.
"""
info = reverb.SampleInfo(
key=tf.constant(0, tf.uint64),
probability=tf.constant(1.0, tf.float64),
table_size=tf.constant(0, tf.int64),
priority=tf.constant(1.0, tf.float64),
times_sampled=tf.constant(1, tf.int32))
data = (o_t, a_t, r_t, d_t, o_tp1, a_tp1, extras)
return reverb.ReplaySample(info=info, data=data)
def _tf_example_to_reverb_sample(tf_example: tf.train.Example
) -> reverb.ReplaySample:
"""Create a Reverb replay sample from a TF example."""
# Parse tf.Example.
feature_description = {
'o_t': tf.io.FixedLenFeature([4], tf.string),
'o_tp1': tf.io.FixedLenFeature([4], tf.string),
'a_t': tf.io.FixedLenFeature([], tf.int64),
'a_tp1': tf.io.FixedLenFeature([], tf.int64),
'r_t': tf.io.FixedLenFeature([], tf.float32),
'd_t': tf.io.FixedLenFeature([], tf.float32),
'episode_id': tf.io.FixedLenFeature([], tf.int64),
'episode_return': tf.io.FixedLenFeature([], tf.float32),
}
data = tf.io.parse_single_example(tf_example, feature_description)
# Process data.
o_t = _decode_frames(data['o_t'])
o_tp1 = _decode_frames(data['o_tp1'])
a_t = tf.cast(data['a_t'], tf.int32)
a_tp1 = tf.cast(data['a_tp1'], tf.int32)
episode_id = tf.bitcast(data['episode_id'], tf.uint64)
# Build Reverb replay sample.
extras = {
'episode_id': episode_id,
'return': data['episode_return']
}
return _make_reverb_sample(o_t, a_t, data['r_t'], data['d_t'], o_tp1, a_tp1,
extras)
def dataset(path: str,
game: str,
run: int,
num_shards: int = 100,
shuffle_buffer_size: int = 100000) -> tf.data.Dataset:
"""TF dataset of Atari SARSA tuples."""
path = os.path.join(path, f'{game}/run_{run}')
filenames = [f'{path}-{i:05d}-of-{num_shards:05d}' for i in range(num_shards)]
file_ds = tf.data.Dataset.from_tensor_slices(filenames)
file_ds = file_ds.repeat().shuffle(num_shards)
example_ds = file_ds.interleave(
functools.partial(tf.data.TFRecordDataset, compression_type='GZIP'),
cycle_length=tf.data.experimental.AUTOTUNE,
block_length=5)
example_ds = example_ds.shuffle(shuffle_buffer_size)
return example_ds.map(_tf_example_to_reverb_sample,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
class AtariDopamineWrapper(dm_env.Environment):
"""Wrapper for Atari Dopamine environmnet."""
def __init__(self, env, max_episode_steps=108000):
self._env = env
self._max_episode_steps = max_episode_steps
self._episode_steps = 0
self._reset_next_episode = True
def reset(self):
self._episode_steps = 0
self._reset_next_step = False
observation = self._env.reset()
return dm_env.restart(observation.squeeze(-1))
def step(self, action):
if self._reset_next_step:
return self.reset()
observation, reward, terminal, _ = self._env.step(action.item())
observation = observation.squeeze(-1)
discount = 1 - float(terminal)
self._episode_steps += 1
if terminal:
self._reset_next_episode = True
return dm_env.termination(reward, observation)
elif self._episode_steps == self._max_episode_steps:
self._reset_next_episode = True
return dm_env.truncation(reward, observation, discount)
else:
return dm_env.transition(reward, observation, discount)
def observation_spec(self):
space = self._env.observation_space
return specs.Array(space.shape[:-1], space.dtype)
def action_spec(self):
return specs.DiscreteArray(self._env.action_space.n)
def environment(game: str) -> dm_env.Environment:
"""Atari environment."""
env = atari_lib.create_atari_environment(game_name=game,
sticky_actions=True)
env = AtariDopamineWrapper(env)
env = wrappers.FrameStackingWrapper(env, num_frames=4)
return wrappers.SinglePrecisionWrapper(env)
| deepmind-research-master | rl_unplugged/atari.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Real World RL for RL Unplugged datasets.
Examples in the dataset represent SARS transitions stored when running a
partially online trained agent as described in https://arxiv.org/abs/1904.12901.
We release 8 datasets in total -- with no combined challenge and easy combined
challenge on the cartpole, walker, quadruped, and humanoid tasks. For details
on how the dataset was generated, please refer to the paper.
Every transition in the dataset is a tuple containing the following features:
* o_t: Observation at time t. Observations have been processed using the
canonical
* a_t: Action taken at time t.
* r_t: Reward at time t.
* d_t: Discount at time t.
* o_tp1: Observation at time t+1.
* a_tp1: Action taken at time t+1. This is set to equal to the last action
for the last timestep.
Note that this serves as an example. For optimal data loading speed, consider
separating out data preprocessing from the data loading loop during training,
e.g. saving the preprocessed data.
"""
import collections
import functools
import os
from typing import Any, Dict, Optional, Sequence
from acme import wrappers
import dm_env
import realworldrl_suite.environments as rwrl_envs
import reverb
import tensorflow as tf
import tree
DELIMITER = ':'
# Control suite tasks have 1000 timesteps per episode. One additional timestep
# accounts for the very first observation where no action has been taken yet.
DEFAULT_NUM_TIMESTEPS = 1001
def _decombine_key(k: str, delimiter: str = DELIMITER) -> Sequence[str]:
return k.split(delimiter)
def tf_example_to_feature_description(example,
num_timesteps=DEFAULT_NUM_TIMESTEPS):
"""Takes a string tensor encoding an tf example and returns its features."""
if not tf.executing_eagerly():
raise AssertionError(
'tf_example_to_reverb_sample() only works under eager mode.')
example = tf.train.Example.FromString(example.numpy())
ret = {}
for k, v in example.features.feature.items():
l = len(v.float_list.value)
if l % num_timesteps:
raise ValueError('Unexpected feature length %d. It should be divisible '
'by num_timesteps: %d' % (l, num_timesteps))
size = l // num_timesteps
ret[k] = tf.io.FixedLenFeature([num_timesteps, size], tf.float32)
return ret
def tree_deflatten_with_delimiter(
flat_dict: Dict[str, Any], delimiter: str = DELIMITER) -> Dict[str, Any]:
"""De-flattens a dict to its originally nested structure.
Does the opposite of {combine_nested_keys(k) :v
for k, v in tree.flatten_with_path(nested_dicts)}
Example: {'a:b': 1} -> {'a': {'b': 1}}
Args:
flat_dict: the keys of which equals the `path` separated by `delimiter`.
delimiter: the delimiter that separates the keys of the nested dict.
Returns:
An un-flattened dict.
"""
root = collections.defaultdict(dict)
for delimited_key, v in flat_dict.items():
keys = _decombine_key(delimited_key, delimiter=delimiter)
node = root
for k in keys[:-1]:
node = node[k]
node[keys[-1]] = v
return dict(root)
def get_slice_of_nested(nested: Dict[str, Any], start: int,
end: int) -> Dict[str, Any]:
return tree.map_structure(lambda item: item[start:end], nested)
def repeat_last_and_append_to_nested(nested: Dict[str, Any]) -> Dict[str, Any]:
return tree.map_structure(
lambda item: tf.concat((item, item[-1:]), axis=0), nested)
def tf_example_to_reverb_sample(example,
feature_description,
num_timesteps=DEFAULT_NUM_TIMESTEPS):
"""Converts the episode encoded as a tf example into SARSA reverb samples."""
example = tf.io.parse_single_example(example, feature_description)
kv = tree_deflatten_with_delimiter(example)
output = (
get_slice_of_nested(kv['observation'], 0, num_timesteps - 1),
get_slice_of_nested(kv['action'], 1, num_timesteps),
kv['reward'][1:num_timesteps],
# The two fields below aren't needed for learning,
# but are kept here to be compatible with acme learner format.
kv['discount'][1:num_timesteps],
get_slice_of_nested(kv['observation'], 1, num_timesteps),
repeat_last_and_append_to_nested(
get_slice_of_nested(kv['action'], 2, num_timesteps)))
ret = tf.data.Dataset.from_tensor_slices(output)
ret = ret.map(lambda *x: reverb.ReplaySample(info=b'None', data=x)) # pytype: disable=wrong-arg-types
return ret
def dataset(path: str,
combined_challenge: str,
domain: str,
task: str,
difficulty: str,
num_shards: int = 100,
shuffle_buffer_size: int = 100000) -> tf.data.Dataset:
"""TF dataset of RWRL SARSA tuples."""
path = os.path.join(
path,
f'combined_challenge_{combined_challenge}/{domain}/{task}/'
f'offline_rl_challenge_{difficulty}'
)
filenames = [
f'{path}/episodes.tfrecord-{i:05d}-of-{num_shards:05d}'
for i in range(num_shards)
]
file_ds = tf.data.Dataset.from_tensor_slices(filenames)
file_ds = file_ds.repeat().shuffle(num_shards)
tf_example_ds = file_ds.interleave(
tf.data.TFRecordDataset,
cycle_length=tf.data.experimental.AUTOTUNE,
block_length=5)
# Take one item to get the output types and shapes.
example_item = None
for example_item in tf.data.TFRecordDataset(filenames[:1]).take(1):
break
if example_item is None:
raise ValueError('Empty dataset')
feature_description = tf_example_to_feature_description(example_item)
reverb_ds = tf_example_ds.interleave(
functools.partial(
tf_example_to_reverb_sample, feature_description=feature_description),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
deterministic=False)
reverb_ds = reverb_ds.prefetch(100)
reverb_ds = reverb_ds.shuffle(shuffle_buffer_size)
return reverb_ds
def environment(
combined_challenge: str,
domain: str,
task: str,
log_output: Optional[str] = None,
environment_kwargs: Optional[Dict[str, Any]] = None) -> dm_env.Environment:
"""RWRL environment."""
env = rwrl_envs.load(
domain_name=domain,
task_name=task,
log_output=log_output,
environment_kwargs=environment_kwargs,
combined_challenge=combined_challenge)
return wrappers.SinglePrecisionWrapper(env)
| deepmind-research-master | rl_unplugged/rwrl.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Atari dataset example.
Instructions:
> mkdir -p /tmp/dataset/Asterix
> gsutil cp gs://rl_unplugged/atari/Asterix/run_1-00000-of-00100 \
/tmp/dataset/Asterix/run_1-00000-of-00001
> python atari_example.py --path=/tmp/dataset --game=Asterix
"""
from absl import app
from absl import flags
from acme import specs
import tree
from rl_unplugged import atari
flags.DEFINE_string('path', '/tmp/dataset', 'Path to dataset.')
flags.DEFINE_string('game', 'Asterix', 'Game.')
FLAGS = flags.FLAGS
def main(_):
ds = atari.dataset(FLAGS.path, FLAGS.game, 1,
num_shards=1,
shuffle_buffer_size=1)
for sample in ds.take(1):
print('Data spec')
print(tree.map_structure(lambda x: (x.dtype, x.shape), sample.data))
env = atari.environment(FLAGS.game)
print('Environment spec')
print(specs.make_environment_spec(env))
print('Environment observation')
timestep = env.reset()
print(tree.map_structure(lambda x: (x.dtype, x.shape), timestep.observation))
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | rl_unplugged/atari_example.py |
# pylint: disable=line-too-long
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""RWRL dataset example.
Instructions:
> export TMP_PATH=/tmp/dataset/rwrl
> export DATA_PATH=combined_challenge_easy/quadruped/walk/offline_rl_challenge_easy
> mkdir -p $TMP_PATH/$DATA_PATH
> gsutil cp gs://rl_unplugged/rwrl/$DATA_PATH/episodes.tfrecord-00001-of-00015 \
$TMP_PATH/$DATA_PATH/episodes.tfrecord-00000-of-00001
> python rwrl_example.py --path=$TMP_PATH
"""
# pylint: enable=line-too-long
from absl import app
from absl import flags
import tree
from rl_unplugged import rwrl
flags.DEFINE_string('path', '/tmp/dataset', 'Path to dataset.')
def main(_):
ds = rwrl.dataset(
flags.FLAGS.path,
combined_challenge='easy',
domain='quadruped',
task='walk',
difficulty='easy',
num_shards=1,
shuffle_buffer_size=1)
for replay_sample in ds.take(1):
print(tree.map_structure(lambda x: (x.dtype, x.shape), replay_sample.data))
break
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | rl_unplugged/rwrl_example.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Networks used for training agents.
"""
from acme.tf import networks as acme_networks
from acme.tf import utils as tf2_utils
import numpy as np
import sonnet as snt
import tensorflow as tf
def instance_norm_and_elu(x):
mean = tf.reduce_mean(x, axis=[1, 2], keepdims=True)
x_ = x - mean
var = tf.reduce_mean(x_**2, axis=[1, 2], keepdims=True)
x_norm = x_ / (var + 1e-6)
return tf.nn.elu(x_norm)
class ControlNetwork(snt.Module):
"""Image, proprio and optionally action encoder used for actors and critics.
"""
def __init__(self,
proprio_encoder_size: int,
proprio_keys=None,
activation=tf.nn.elu):
"""Creates a ControlNetwork.
Args:
proprio_encoder_size: Size of the linear layer for the proprio encoder.
proprio_keys: Optional list of names of proprioceptive observations.
Defaults to all observations. Note that if this is specified, any
observation not contained in proprio_keys will be ignored by the agent.
activation: Linear layer activation function.
"""
super().__init__(name='control_network')
self._activation = activation
self._proprio_keys = proprio_keys
self._proprio_encoder = acme_networks.LayerNormMLP([proprio_encoder_size])
def __call__(self, inputs, action: tf.Tensor = None, task=None):
"""Evaluates the ControlNetwork.
Args:
inputs: A dictionary of agent observation tensors.
action: Agent actions.
task: Optional encoding of the task.
Raises:
ValueError: if neither proprio_input is provided.
ValueError: if some proprio input looks suspiciously like pixel inputs.
Returns:
Processed network output.
"""
if not isinstance(inputs, dict):
inputs = {'inputs': inputs}
proprio_input = []
# By default, treat all observations as proprioceptive.
if self._proprio_keys is None:
self._proprio_keys = list(sorted(inputs.keys()))
for key in self._proprio_keys:
proprio_input.append(snt.Flatten()(inputs[key]))
if np.prod(inputs[key].shape[1:]) > 32*32*3:
raise ValueError(
'This input does not resemble a proprioceptive '
'state: {} with shape {}'.format(
key, inputs[key].shape))
# Append optional action input (i.e. for critic networks).
if action is not None:
proprio_input.append(action)
proprio_input = tf2_utils.batch_concat(proprio_input)
proprio_state = self._proprio_encoder(proprio_input)
return proprio_state
| deepmind-research-master | rl_unplugged/networks.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Control RL Unplugged datasets.
Examples in the dataset represent sequences stored when running a partially
trained agent (trained in online way) as described in
https://arxiv.org/abs/2006.13888.
Every dataset has a SARSA version, and datasets for environments for solving
which we believe one may need a recurrent agent also include a version of the
dataset with overlapping sequences of length 40.
Datasets for the dm_control_suite environments only include proprio
observations, while datasets for dm_locomotion include both pixel and proprio
observations.
"""
import collections
import functools
import os
from typing import Dict, Optional, Tuple, Set
from acme import wrappers
from acme.adders import reverb as adders
from dm_control import composer
from dm_control import suite
from dm_control.composer.variation import colors
from dm_control.composer.variation import distributions
from dm_control.locomotion import arenas
from dm_control.locomotion import props
from dm_control.locomotion import tasks
from dm_control.locomotion import walkers
from dm_env import specs
import numpy as np
import reverb
import tensorflow as tf
import tree
def _build_rodent_escape_env():
"""Build environment where a rodent escapes from a bowl."""
walker = walkers.Rat(
observable_options={'egocentric_camera': dict(enabled=True)},
)
arena = arenas.bowl.Bowl(
size=(20., 20.),
aesthetic='outdoor_natural')
locomotion_task = tasks.escape.Escape(
walker=walker,
arena=arena,
physics_timestep=0.001,
control_timestep=.02)
raw_env = composer.Environment(
time_limit=20,
task=locomotion_task,
strip_singleton_obs_buffer_dim=True)
return raw_env
def _build_rodent_maze_env():
"""Build environment where a rodent runs to targets."""
walker = walkers.Rat(
observable_options={'egocentric_camera': dict(enabled=True)},
)
wall_textures = arenas.labmaze_textures.WallTextures(
style='style_01')
arena = arenas.mazes.RandomMazeWithTargets(
x_cells=11,
y_cells=11,
xy_scale=.5,
z_height=.3,
max_rooms=4,
room_min_size=4,
room_max_size=5,
spawns_per_room=1,
targets_per_room=3,
wall_textures=wall_textures,
aesthetic='outdoor_natural')
rodent_task = tasks.random_goal_maze.ManyGoalsMaze(
walker=walker,
maze_arena=arena,
target_builder=functools.partial(
props.target_sphere.TargetSphere,
radius=0.05,
height_above_ground=.125,
rgb1=(0, 0, 0.4),
rgb2=(0, 0, 0.7)),
target_reward_scale=50.,
contact_termination=False,
control_timestep=.02,
physics_timestep=0.001)
raw_env = composer.Environment(
time_limit=30,
task=rodent_task,
strip_singleton_obs_buffer_dim=True)
return raw_env
def _build_rodent_corridor_gaps():
"""Build environment where a rodent runs over gaps."""
walker = walkers.Rat(
observable_options={'egocentric_camera': dict(enabled=True)},
)
platform_length = distributions.Uniform(low=0.4, high=0.8)
gap_length = distributions.Uniform(low=0.05, high=0.2)
arena = arenas.corridors.GapsCorridor(
corridor_width=2,
platform_length=platform_length,
gap_length=gap_length,
corridor_length=40,
aesthetic='outdoor_natural')
rodent_task = tasks.corridors.RunThroughCorridor(
walker=walker,
arena=arena,
walker_spawn_position=(5, 0, 0),
walker_spawn_rotation=0,
target_velocity=1.0,
contact_termination=False,
terminate_at_height=-0.3,
physics_timestep=0.001,
control_timestep=.02)
raw_env = composer.Environment(
time_limit=30,
task=rodent_task,
strip_singleton_obs_buffer_dim=True)
return raw_env
def _build_rodent_two_touch_env():
"""Build environment where a rodent touches targets."""
walker = walkers.Rat(
observable_options={'egocentric_camera': dict(enabled=True)},
)
arena_floor = arenas.floors.Floor(
size=(10., 10.), aesthetic='outdoor_natural')
task_reach = tasks.reach.TwoTouch(
walker=walker,
arena=arena_floor,
target_builders=[
functools.partial(
props.target_sphere.TargetSphereTwoTouch,
radius=0.025),
],
randomize_spawn_rotation=True,
target_type_rewards=[25.],
shuffle_target_builders=False,
target_area=(1.5, 1.5),
physics_timestep=0.001,
control_timestep=.02)
raw_env = composer.Environment(
time_limit=30,
task=task_reach,
strip_singleton_obs_buffer_dim=True)
return raw_env
def _build_humanoid_walls_env():
"""Build humanoid walker walls environment."""
walker = walkers.CMUHumanoidPositionControlled(
name='walker',
observable_options={'egocentric_camera': dict(enabled=True)},
)
wall_width = distributions.Uniform(low=1, high=7)
wall_height = distributions.Uniform(low=2.5, high=4.0)
swap_wall_side = distributions.Bernoulli(prob=0.5)
wall_r = distributions.Uniform(low=0.5, high=0.6)
wall_g = distributions.Uniform(low=0.21, high=0.41)
wall_rgba = colors.RgbVariation(r=wall_r, g=wall_g, b=0, alpha=1)
arena = arenas.WallsCorridor(
wall_gap=5.0,
wall_width=wall_width,
wall_height=wall_height,
swap_wall_side=swap_wall_side,
wall_rgba=wall_rgba,
corridor_width=10,
corridor_length=100)
humanoid_task = tasks.RunThroughCorridor(
walker=walker,
arena=arena,
walker_spawn_rotation=1.57, # pi / 2
physics_timestep=0.005,
control_timestep=0.03)
raw_env = composer.Environment(
time_limit=30,
task=humanoid_task,
strip_singleton_obs_buffer_dim=True)
return raw_env
def _build_humanoid_corridor_env():
"""Build humanoid walker walls environment."""
walker = walkers.CMUHumanoidPositionControlled(
name='walker',
observable_options={'egocentric_camera': dict(enabled=True)},
)
arena = arenas.EmptyCorridor(
corridor_width=10,
corridor_length=100)
humanoid_task = tasks.RunThroughCorridor(
walker=walker,
arena=arena,
walker_spawn_rotation=1.57, # pi / 2
physics_timestep=0.005,
control_timestep=0.03)
raw_env = composer.Environment(
time_limit=30,
task=humanoid_task,
strip_singleton_obs_buffer_dim=True)
return raw_env
def _build_humanoid_corridor_gaps():
"""Build humanoid walker walls environment."""
walker = walkers.CMUHumanoidPositionControlled(
name='walker',
observable_options={'egocentric_camera': dict(enabled=True)},
)
platform_length = distributions.Uniform(low=0.3, high=2.5)
gap_length = distributions.Uniform(low=0.75, high=1.25)
arena = arenas.GapsCorridor(
corridor_width=10,
platform_length=platform_length,
gap_length=gap_length,
corridor_length=100)
humanoid_task = tasks.RunThroughCorridor(
walker=walker,
arena=arena,
walker_spawn_position=(2, 0, 0),
walker_spawn_rotation=1.57, # pi / 2
physics_timestep=0.005,
control_timestep=0.03)
raw_env = composer.Environment(
time_limit=30,
task=humanoid_task,
strip_singleton_obs_buffer_dim=True)
return raw_env
class MujocoActionNormalizer(wrappers.EnvironmentWrapper):
"""Rescale actions to [-1, 1] range for mujoco physics engine.
For control environments whose actions have bounded range in [-1, 1], this
adaptor rescale actions to the desired range. This allows actor network to
output unscaled actions for better gradient dynamics.
"""
def __init__(self, environment, rescale='clip'):
super().__init__(environment)
self._rescale = rescale
def step(self, action):
"""Rescale actions to [-1, 1] range before stepping wrapped environment."""
if self._rescale == 'tanh':
scaled_actions = tree.map_structure(np.tanh, action)
elif self._rescale == 'clip':
scaled_actions = tree.map_structure(lambda a: np.clip(a, -1., 1.), action)
else:
raise ValueError('Unrecognized scaling option: %s' % self._rescale)
return self._environment.step(scaled_actions)
class NormilizeActionSpecWrapper(wrappers.EnvironmentWrapper):
"""Turn each dimension of the actions into the range of [-1, 1]."""
def __init__(self, environment):
super().__init__(environment)
action_spec = environment.action_spec()
self._scale = action_spec.maximum - action_spec.minimum
self._offset = action_spec.minimum
minimum = action_spec.minimum * 0 - 1.
maximum = action_spec.minimum * 0 + 1.
self._action_spec = specs.BoundedArray(
action_spec.shape,
action_spec.dtype,
minimum,
maximum,
name=action_spec.name)
def _from_normal_actions(self, actions):
actions = 0.5 * (actions + 1.0) # a_t is now in the range [0, 1]
# scale range to [minimum, maximum]
return actions * self._scale + self._offset
def step(self, action):
action = self._from_normal_actions(action)
return self._environment.step(action)
def action_spec(self):
return self._action_spec
class FilterObservationsWrapper(wrappers.EnvironmentWrapper):
"""Filter out all the observations not specified to this wrapper."""
def __init__(self, environment, observations_to_keep):
super().__init__(environment)
self._observations_to_keep = observations_to_keep
spec = self._environment.observation_spec()
filtered = [(k, spec[k]) for k in observations_to_keep]
self._observation_spec = collections.OrderedDict(filtered)
def _filter_observation(self, timestep):
observation = timestep.observation
filtered = [(k, observation[k]) for k in self._observations_to_keep]
return timestep._replace(observation=collections.OrderedDict(filtered))
def step(self, action):
return self._filter_observation(self._environment.step(action))
def reset(self):
return self._filter_observation(self._environment.reset())
def observation_spec(self):
return self._observation_spec
class ControlSuite:
"""Create bits needed to run agents on an Control Suite dataset."""
def __init__(self, task_name='humanoid_run'):
"""Initializes datasets/environments for the Deepmind Control suite.
Args:
task_name: take name. Must be one of,
finger_turn_hard, manipulator_insert_peg, humanoid_run,
cartpole_swingup, cheetah_run, fish_swim, manipulator_insert_ball,
walker_stand, walker_walk
"""
self.task_name = task_name
self._uint8_features = set([])
self._environment = None
if task_name == 'swim':
self._domain_name = 'fish'
self._task_name = 'swim'
self._shapes = {
'observation/target': (3,),
'observation/velocity': (13,),
'observation/upright': (1,),
'observation/joint_angles': (7,),
'action': (5,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()
}
elif task_name == 'humanoid_run':
self._domain_name = 'humanoid'
self._task_name = 'run'
self._shapes = {
'observation/velocity': (27,),
'observation/com_velocity': (3,),
'observation/torso_vertical': (3,),
'observation/extremities': (12,),
'observation/head_height': (1,),
'observation/joint_angles': (21,),
'action': (21,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()
}
elif task_name == 'manipulator_insert_ball':
self._domain_name = 'manipulator'
self._task_name = 'insert_ball'
self._shapes = {
'observation/arm_pos': (16,),
'observation/arm_vel': (8,),
'observation/touch': (5,),
'observation/hand_pos': (4,),
'observation/object_pos': (4,),
'observation/object_vel': (3,),
'observation/target_pos': (4,),
'action': (5,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()}
elif task_name == 'manipulator_insert_peg':
self._domain_name = 'manipulator'
self._task_name = 'insert_peg'
self._shapes = {
'observation/arm_pos': (16,),
'observation/arm_vel': (8,),
'observation/touch': (5,),
'observation/hand_pos': (4,),
'observation/object_pos': (4,),
'observation/object_vel': (3,),
'observation/target_pos': (4,),
'episodic_reward': (),
'action': (5,),
'discount': (),
'reward': (),
'step_type': ()}
elif task_name == 'cartpole_swingup':
self._domain_name = 'cartpole'
self._task_name = 'swingup'
self._shapes = {
'observation/position': (3,),
'observation/velocity': (2,),
'action': (1,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()}
elif task_name == 'walker_walk':
self._domain_name = 'walker'
self._task_name = 'walk'
self._shapes = {
'observation/orientations': (14,),
'observation/velocity': (9,),
'observation/height': (1,),
'action': (6,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()}
elif task_name == 'walker_stand':
self._domain_name = 'walker'
self._task_name = 'stand'
self._shapes = {
'observation/orientations': (14,),
'observation/velocity': (9,),
'observation/height': (1,),
'action': (6,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()}
elif task_name == 'cheetah_run':
self._domain_name = 'cheetah'
self._task_name = 'run'
self._shapes = {
'observation/position': (8,),
'observation/velocity': (9,),
'action': (6,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()}
elif task_name == 'finger_turn_hard':
self._domain_name = 'finger'
self._task_name = 'turn_hard'
self._shapes = {
'observation/position': (4,),
'observation/velocity': (3,),
'observation/touch': (2,),
'observation/target_position': (2,),
'observation/dist_to_target': (1,),
'action': (2,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()}
else:
raise ValueError('Task \'{}\' not found.'.format(task_name))
self._data_path = 'dm_control_suite/{}/train'.format(task_name)
@property
def shapes(self):
return self._shapes
@property
def data_path(self):
return self._data_path
@property
def uint8_features(self):
return self._uint8_features
@property
def environment(self):
"""Build and return the environment."""
if self._environment is not None:
return self._environment
self._environment = suite.load(
domain_name=self._domain_name,
task_name=self._task_name)
self._environment = wrappers.SinglePrecisionWrapper(self._environment)
self._environment = NormilizeActionSpecWrapper(self._environment)
return self._environment
class CmuThirdParty:
"""Create bits needed to run agents on an locomotion humanoid dataset."""
def __init__(self, task_name='humanoid_walls'):
# 'humanoid_corridor|humanoid_gaps|humanoid_walls'
self._task_name = task_name
self._pixel_keys = self.get_pixel_keys()
self._uint8_features = set(['observation/walker/egocentric_camera'])
self.additional_paths = {}
self._proprio_keys = [
'walker/joints_vel',
'walker/sensors_velocimeter',
'walker/sensors_gyro',
'walker/joints_pos',
'walker/world_zaxis',
'walker/body_height',
'walker/sensors_accelerometer',
'walker/end_effectors_pos'
]
self._shapes = {
'observation/walker/joints_vel': (56,),
'observation/walker/sensors_velocimeter': (3,),
'observation/walker/sensors_gyro': (3,),
'observation/walker/joints_pos': (56,),
'observation/walker/world_zaxis': (3,),
'observation/walker/body_height': (1,),
'observation/walker/sensors_accelerometer': (3,),
'observation/walker/end_effectors_pos': (12,),
'observation/walker/egocentric_camera': (
64,
64,
3,
),
'action': (56,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()
}
if task_name == 'humanoid_corridor':
self._data_path = 'dm_locomotion/humanoid_corridor/seq2/train'
elif task_name == 'humanoid_gaps':
self._data_path = 'dm_locomotion/humanoid_gaps/seq2/train'
elif task_name == 'humanoid_walls':
self._data_path = 'dm_locomotion/humanoid_walls/seq40/train'
else:
raise ValueError('Task \'{}\' not found.'.format(task_name))
@staticmethod
def get_pixel_keys():
return ('walker/egocentric_camera',)
@property
def uint8_features(self):
return self._uint8_features
@property
def shapes(self):
return self._shapes
@property
def data_path(self):
return self._data_path
@property
def environment(self):
"""Build and return the environment."""
if self._task_name == 'humanoid_corridor':
self._environment = _build_humanoid_corridor_env()
elif self._task_name == 'humanoid_gaps':
self._environment = _build_humanoid_corridor_gaps()
elif self._task_name == 'humanoid_walls':
self._environment = _build_humanoid_walls_env()
self._environment = NormilizeActionSpecWrapper(self._environment)
self._environment = MujocoActionNormalizer(
environment=self._environment, rescale='clip')
self._environment = wrappers.SinglePrecisionWrapper(self._environment)
all_observations = list(self._proprio_keys) + list(self._pixel_keys)
self._environment = FilterObservationsWrapper(self._environment,
all_observations)
return self._environment
class Rodent:
"""Create bits needed to run agents on an Rodent dataset."""
def __init__(self, task_name='rodent_gaps'):
# 'rodent_escape|rodent_two_touch|rodent_gaps|rodent_mazes'
self._task_name = task_name
self._pixel_keys = self.get_pixel_keys()
self._uint8_features = set(['observation/walker/egocentric_camera'])
self._proprio_keys = [
'walker/joints_pos', 'walker/joints_vel', 'walker/tendons_pos',
'walker/tendons_vel', 'walker/appendages_pos', 'walker/world_zaxis',
'walker/sensors_accelerometer', 'walker/sensors_velocimeter',
'walker/sensors_gyro', 'walker/sensors_touch',
]
self._shapes = {
'observation/walker/joints_pos': (30,),
'observation/walker/joints_vel': (30,),
'observation/walker/tendons_pos': (8,),
'observation/walker/tendons_vel': (8,),
'observation/walker/appendages_pos': (15,),
'observation/walker/world_zaxis': (3,),
'observation/walker/sensors_accelerometer': (3,),
'observation/walker/sensors_velocimeter': (3,),
'observation/walker/sensors_gyro': (3,),
'observation/walker/sensors_touch': (4,),
'observation/walker/egocentric_camera': (64, 64, 3),
'action': (38,),
'discount': (),
'reward': (),
'step_type': ()
}
if task_name == 'rodent_gaps':
self._data_path = 'dm_locomotion/rodent_gaps/seq2/train'
elif task_name == 'rodent_escape':
self._data_path = 'dm_locomotion/rodent_bowl_escape/seq2/train'
elif task_name == 'rodent_two_touch':
self._data_path = 'dm_locomotion/rodent_two_touch/seq40/train'
elif task_name == 'rodent_mazes':
self._data_path = 'dm_locomotion/rodent_mazes/seq40/train'
else:
raise ValueError('Task \'{}\' not found.'.format(task_name))
@staticmethod
def get_pixel_keys():
return ('walker/egocentric_camera',)
@property
def shapes(self):
return self._shapes
@property
def uint8_features(self):
return self._uint8_features
@property
def data_path(self):
return self._data_path
@property
def environment(self):
"""Return environment."""
if self._task_name == 'rodent_escape':
self._environment = _build_rodent_escape_env()
elif self._task_name == 'rodent_gaps':
self._environment = _build_rodent_corridor_gaps()
elif self._task_name == 'rodent_two_touch':
self._environment = _build_rodent_two_touch_env()
elif self._task_name == 'rodent_mazes':
self._environment = _build_rodent_maze_env()
self._environment = NormilizeActionSpecWrapper(self._environment)
self._environment = MujocoActionNormalizer(
environment=self._environment, rescale='clip')
self._environment = wrappers.SinglePrecisionWrapper(self._environment)
all_observations = list(self._proprio_keys) + list(self._pixel_keys)
self._environment = FilterObservationsWrapper(self._environment,
all_observations)
return self._environment
def _parse_seq_tf_example(example, uint8_features, shapes):
"""Parse tf.Example containing one or two episode steps."""
def to_feature(key, shape):
if key in uint8_features:
return tf.io.FixedLenSequenceFeature(
shape=[], dtype=tf.string, allow_missing=True)
else:
return tf.io.FixedLenSequenceFeature(
shape=shape, dtype=tf.float32, allow_missing=True)
feature_map = {}
for k, v in shapes.items():
feature_map[k] = to_feature(k, v)
parsed = tf.io.parse_single_example(example, features=feature_map)
observation = {}
restructured = {}
for k in parsed.keys():
if 'observation' not in k:
restructured[k] = parsed[k]
continue
if k in uint8_features:
observation[k.replace('observation/', '')] = tf.reshape(
tf.io.decode_raw(parsed[k], out_type=tf.uint8), (-1,) + shapes[k])
else:
observation[k.replace('observation/', '')] = parsed[k]
restructured['observation'] = observation
restructured['length'] = tf.shape(restructured['action'])[0]
return restructured
def _build_sequence_example(sequences):
"""Convert raw sequences into a Reverb sequence sample."""
data = adders.Step(
observation=sequences['observation'],
action=sequences['action'],
reward=sequences['reward'],
discount=sequences['discount'],
start_of_episode=(),
extras=())
info = reverb.SampleInfo(
key=tf.constant(0, tf.uint64),
probability=tf.constant(1.0, tf.float64),
table_size=tf.constant(0, tf.int64),
priority=tf.constant(1.0, tf.float64),
times_sampled=tf.constant(1.0, tf.int32))
return reverb.ReplaySample(info=info, data=data)
def _build_sarsa_example(sequences):
"""Convert raw sequences into a Reverb n-step SARSA sample."""
o_tm1 = tree.map_structure(lambda t: t[0], sequences['observation'])
o_t = tree.map_structure(lambda t: t[1], sequences['observation'])
a_tm1 = tree.map_structure(lambda t: t[0], sequences['action'])
a_t = tree.map_structure(lambda t: t[1], sequences['action'])
r_t = tree.map_structure(lambda t: t[0], sequences['reward'])
p_t = tree.map_structure(lambda t: t[0], sequences['discount'])
info = reverb.SampleInfo(
key=tf.constant(0, tf.uint64),
probability=tf.constant(1.0, tf.float64),
table_size=tf.constant(0, tf.int64),
priority=tf.constant(1.0, tf.float64),
times_sampled=tf.constant(1.0, tf.int32))
return reverb.ReplaySample(info=info, data=(o_tm1, a_tm1, r_t, p_t, o_t, a_t))
def _padded_batch(example_ds, batch_size, shapes, drop_remainder=False):
"""Batch data while handling unequal lengths."""
padded_shapes = {}
padded_shapes['observation'] = {}
for k, v in shapes.items():
if 'observation' in k:
padded_shapes['observation'][
k.replace('observation/', '')] = (-1,) + v
else:
padded_shapes[k] = (-1,) + v
padded_shapes['length'] = ()
return example_ds.padded_batch(batch_size,
padded_shapes=padded_shapes,
drop_remainder=drop_remainder)
def dataset(root_path: str,
data_path: str,
shapes: Dict[str, Tuple[int]],
num_threads: int,
batch_size: int,
uint8_features: Optional[Set[str]] = None,
num_shards: int = 100,
shuffle_buffer_size: int = 100000,
sarsa: bool = True) -> tf.data.Dataset:
"""Create tf dataset for training."""
uint8_features = uint8_features if uint8_features else {}
path = os.path.join(root_path, data_path)
filenames = [f'{path}-{i:05d}-of-{num_shards:05d}' for i in range(num_shards)]
file_ds = tf.data.Dataset.from_tensor_slices(filenames)
file_ds = file_ds.repeat().shuffle(num_shards)
example_ds = file_ds.interleave(
functools.partial(tf.data.TFRecordDataset, compression_type='GZIP'),
cycle_length=tf.data.experimental.AUTOTUNE,
block_length=5)
example_ds = example_ds.shuffle(shuffle_buffer_size)
def map_func(example):
example = _parse_seq_tf_example(example, uint8_features, shapes)
return example
example_ds = example_ds.map(map_func, num_parallel_calls=num_threads)
example_ds = example_ds.repeat().shuffle(batch_size * 10)
if sarsa:
example_ds = example_ds.map(
_build_sarsa_example,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
example_ds.batch(batch_size)
else:
example_ds = _padded_batch(
example_ds, batch_size, shapes, drop_remainder=True)
example_ds = example_ds.map(
_build_sequence_example,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
example_ds = example_ds.prefetch(tf.data.experimental.AUTOTUNE)
return example_ds
| deepmind-research-master | rl_unplugged/dm_control_suite.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""DM control suite and locomotion dataset examples.
Example:
Instructions:
> export TMP_PATH=/tmp/dataset
> export TASK_NAME=humanoid_run
> mkdir -p $TMP_PATH/$TASK_NAME
> gsutil cp gs://rl_unplugged/dm_control_suite/$TASK_NAME/train-00000-of-00100 \
$TMP_PATH/dm_control_suite/$TASK_NAME/train-00000-of-00001
> python dm_control_suite_example.py --path=$TMP_PATH \
--task_class=control_suite --task_name=$TASK_NAME
"""
from absl import app
from absl import flags
import tree
from rl_unplugged import dm_control_suite
flags.DEFINE_string('path', '/tmp/dataset', 'Path to dataset.')
flags.DEFINE_string('task_name', 'humanoid_run', 'Game.')
flags.DEFINE_enum('task_class', 'control_suite',
['humanoid', 'rodent', 'control_suite'],
'Task classes.')
FLAGS = flags.FLAGS
def main(_):
if FLAGS.task_class == 'control_suite':
task = dm_control_suite.ControlSuite(task_name=FLAGS.task_name)
elif FLAGS.task_class == 'humanoid':
task = dm_control_suite.CmuThirdParty(task_name=FLAGS.task_name)
elif FLAGS.task_class == 'rodent':
task = dm_control_suite.Rodent(task_name=FLAGS.task_name)
ds = dm_control_suite.dataset(root_path=FLAGS.path,
data_path=task.data_path,
shapes=task.shapes,
num_threads=1,
batch_size=2,
uint8_features=task.uint8_features,
num_shards=1,
shuffle_buffer_size=10)
for sample in ds.take(1):
print('Data spec')
print(tree.map_structure(lambda x: (x.dtype, x.shape), sample.data))
environment = task.environment
timestep = environment.reset()
print(tree.map_structure(lambda x: (x.dtype, x.shape), timestep.observation))
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | rl_unplugged/dm_control_suite_example.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Setup for pip package."""
from setuptools import find_packages
from setuptools import setup
setup(
name='wikigraphs',
version='0.1.0',
description='A Wikipedia - knowledge graph paired dataset.',
url='https://github.com/deepmind/deepmind-research/tree/master/wikigraphs',
author='DeepMind',
author_email='[email protected]',
packages=find_packages(),
license='Apache 2.0',
)
| deepmind-research-master | wikigraphs/setup.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Data Parallel Updater for Graph2text data."""
import functools
import os
import pickle
from absl import logging
import haiku as hk
import jax
from jax.tree_util import tree_map
import numpy as np
import optax
def call_fn_with_state_keys(jit_fn, state, other_inputs, keys):
"""Executes `jit_fn`, filtering out all keys except some subset."""
state = state.copy()
extra_state = {}
for k in list(state.keys()):
if k not in keys:
extra_state[k] = state.pop(k)
return jit_fn(state, *other_inputs), extra_state
class Updater:
"""Graph2text model updater with multi-GPU support."""
def __init__(self, loss_fn, optimizer, devices=None, has_graph=False):
self._net_init_fn, self._apply_fn = hk.transform_with_state(
functools.partial(loss_fn, is_training=True))
_, self._eval_apply_fn = hk.transform_with_state(
functools.partial(loss_fn, is_training=False))
if optimizer is None:
optimizer = optax.identity()
self._optimizer = optimizer
self._num_devices = jax.local_device_count()
if devices is None:
devices = []
for host_id in range(jax.process_count()):
for device_id in jax.local_devices(host_id):
devices.append(device_id)
else:
self._num_devices = min(self._num_devices, len(devices))
def _pmap(f, static_broadcasted_argnums=()):
return jax.pmap(f, axis_name='i', devices=devices,
static_broadcasted_argnums=static_broadcasted_argnums)
def handle_graph_size(fn):
def _fn(*args):
batch = args[-1].copy()
max_graph_size = batch['max_graph_size']
del batch['max_graph_size']
args = args[:-1] + (batch, max_graph_size)
return fn(*args)
return _fn
# Try to jit.
if has_graph:
# If the model contains full graphs, we need to set the max_graph_size
# as a statically broadcasted argument.
self._init_fn = handle_graph_size(_pmap(self._init, 4))
self._update_fn = handle_graph_size(_pmap(self._update, 2))
self._eval_fn = handle_graph_size(_pmap(self._eval, 2))
else:
self._init_fn = _pmap(self._init)
self._update_fn = _pmap(self._update)
self._eval_fn = _pmap(self._eval)
def _init(self, master_rng, params, network_state, data, max_graph_size=None):
"""Initializes state of the updater."""
out_rng, init_rng = jax.random.split(master_rng)
if max_graph_size is not None:
new_params, new_network_state = self._net_init_fn(
init_rng, data, max_graph_size)
else:
new_params, new_network_state = self._net_init_fn(init_rng, data)
if params is None:
params = new_params
if network_state is None:
network_state = new_network_state
opt_state = self._optimizer.init(params)
return dict(
replicated_step=0,
rng=out_rng,
state=network_state,
opt_state=opt_state,
params=params,
)
def init(self, master_rng, data, params=None, network_state=None,
replicated_params=False):
"""Initializes state of the updater."""
data = self._preprocess(data)
rngs = np.array([master_rng] * self._num_devices)
if not replicated_params and params is not None:
params = jax.tree_map(
lambda x: np.array([x] * self._num_devices), params)
state = self._init_fn(rngs, params, network_state, data)
state['step'] = np.array(0, dtype=np.int64)
# Wait for initialization to finish before starting training to keep
# memory usage low.
flat_params = jax.tree_leaves(state['params'])
if flat_params:
jax.tree_leaves(state['params'])[0].block_until_ready()
return state
def _update(self, state, data, max_graph_size=None):
"""Updates parameters."""
replicated_step = state['replicated_step']
rng = state['rng']
opt_state = state['opt_state']
params = state['params']
net_state = state['state']
rng, new_rng = jax.random.split(rng)
rng = jax.random.fold_in(rng, jax.lax.axis_index('i'))
def _loss(params, state, batch, rng):
if max_graph_size is not None:
(loss, metrics), state = self._apply_fn(params, state, rng, batch,
max_graph_size)
else:
(loss, metrics), state = self._apply_fn(params, state, rng, batch)
return loss, (metrics, state)
(loss, (metrics, new_net_state)), g = jax.value_and_grad(
_loss, has_aux=True)(params, net_state, data, rng)
g = jax.lax.pmean(g, axis_name='i')
loss = jax.lax.pmean(loss, axis_name='i')
metrics = jax.lax.pmean(metrics, axis_name='i')
updates, new_opt_state = self._optimizer.update(g, opt_state, params)
new_params = optax.apply_updates(params, updates)
new_state = dict(
replicated_step=replicated_step + 1,
rng=new_rng,
state=new_net_state,
opt_state=new_opt_state,
params=new_params,
)
metrics['loss'] = loss
metrics['step'] = replicated_step
return new_state, metrics
def update(self, state, data):
"""Updates the state using some data and returns metrics."""
data = self._preprocess(data)
(state, out), extra_state = call_fn_with_state_keys(
self._update_fn, state, [data], keys=set([
'state', 'params', 'rng', 'replicated_step', 'opt_state']))
state.update(extra_state)
state['step'] += 1
return state, tree_map(lambda x: x[0], out)
def _eval(self, state, data, max_graph_size=None):
"""Evaluates the current state on the given data."""
if max_graph_size is not None:
(loss, metrics), new_state = self._eval_apply_fn(
state['params'], state['state'], state['rng'], data, max_graph_size)
else:
(loss, metrics), new_state = self._eval_apply_fn(
state['params'], state['state'], state['rng'], data)
state['state'] = new_state
loss = jax.lax.pmean(loss, axis_name='i')
metrics = jax.lax.pmean(metrics, axis_name='i')
metrics['loss'] = loss
metrics['step'] = state['replicated_step']
return state, metrics
def eval_return_state(self, state, data):
"""Returns metrics without updating the model."""
data = self._preprocess(data)
(state, out), extra_state = call_fn_with_state_keys(
self._eval_fn, state, [data], keys=set([
'state', 'params', 'rng', 'replicated_step']))
state.update(extra_state)
return state, tree_map(lambda x: x[0], out)
def eval(self, state, data):
"""Returns metrics without updating the model."""
_, out = self.eval_return_state(state, data)
return out
def _preprocess(self, data):
"""Reshapes input so that it can be distributed across multiple cores."""
multi_inputs = data.copy()
def add_core_dimension(x):
if np.isscalar(x):
return x
if x.shape[0] % self._num_devices != 0:
raise ValueError(f'The batch size must be a multiple of the number of'
f' devices. Got batch size = {x.shape[0]} and number'
f' of devices = {self._num_devices}.')
prefix = (self._num_devices, x.shape[0] // self._num_devices)
return np.reshape(x, prefix + x.shape[1:])
multi_inputs = tree_map(add_core_dimension, multi_inputs)
return multi_inputs
def params(self, state):
"""Returns model parameters."""
return tree_map(lambda x: x[0], state['params'])
def opt_state(self, state):
"""Returns the state of the optimiser."""
return tree_map(lambda x: x[0], state['opt_state'])
def network_state(self, state):
"""Returns the model's state."""
return tree_map(lambda x: x[0], state['state'])
def to_checkpoint_state(self, state):
"""Transforms the updater state into a checkpointable state."""
checkpoint_state = state.copy()
# Wrapper around checkpoint_state['step'] so we can get [0].
checkpoint_state['step'] = checkpoint_state['step'][np.newaxis]
# Unstack the replicated contents.
checkpoint_state = tree_map(lambda x: x[0], checkpoint_state)
return checkpoint_state
def from_checkpoint_state(self, checkpoint_state):
"""Initializes the updater state from the checkpointed state."""
# Expand the checkpoint so we have a copy for each device.
state = tree_map(lambda x: np.stack(jax.local_device_count() * [x]),
checkpoint_state)
state['step'] = state['step'][0] # Undo stacking for step.
return state
class CheckpointingUpdater:
"""A checkpointing wrapper around an Updater."""
def __init__(self,
inner: Updater,
checkpoint_dir: str):
self._inner = inner
self._checkpoint_dir = checkpoint_dir
def _checkpoint_paths(self):
return [p for p in os.listdir(self._checkpoint_dir) if 'checkpoint' in p]
def init(self, rng, data, params=None, network_state=None):
"""Initialize experiment state."""
if not os.path.exists(self._checkpoint_dir) or not self._checkpoint_paths():
os.makedirs(self._checkpoint_dir, exist_ok=True)
return self._inner.init(rng, data, params, network_state)
return self.load_checkpoint()
def init_from_checkpoint(self, rng, data, checkpoint_state):
params = self._inner.params(checkpoint_state)
network_state = None
return self._inner.init(rng, data, params, network_state)
def eval_return_state(self, state, data):
return self._inner.eval_return_state(state, data)
def save_checkpoint(self, state):
path = os.path.join(self._checkpoint_dir, 'checkpoint.pkl')
logging.info('Serializing experiment state to %s', path)
checkpoint_state = self._inner.to_checkpoint_state(jax.device_get(state))
with open(path, 'wb') as f:
pickle.dump(checkpoint_state, f)
def load_checkpoint(self):
checkpoint = os.path.join(self._checkpoint_dir,
self._checkpoint_paths()[-1])
logging.info('Loading checkpoint from %s', checkpoint)
with open(checkpoint, 'rb') as f:
state = pickle.load(f)
return self._inner.from_checkpoint_state(state)
def update(self, state, data):
"""Update experiment state."""
state, out = self._inner.update(state, data)
return state, out
| deepmind-research-master | wikigraphs/updaters.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Utility functions for the training script."""
import collections
import math
import random
from absl import flags
from absl import logging
import jax.numpy as jnp
import jraph
import numpy as np
import sklearn
from wikigraphs.data import paired_dataset as pd
from wikigraphs.data import tokenizers
from wikigraphs.data import wikitext as wt
from wikigraphs.model import graph_net as gn
from wikigraphs.model import sampler as transformer_sampler
from wikigraphs.model import transformer
FLAGS = flags.FLAGS
VOCAB_FILES_MAP = {
'wikitext': '/tmp/data/wikitext-vocab.csv',
'freebase2wikitext': '/tmp/data/text-vocab.csv',
}
GRAPH_VOCAB_FILE = '/tmp/data/graph-vocab.csv'
def init_tokenizer(dataset_name):
"""Initialie the tokenizer."""
logging.info('Loading tokenizer...')
tokenizer = tokenizers.WordTokenizer(VOCAB_FILES_MAP[dataset_name])
logging.info('Vocab size: %d', tokenizer.vocab_size)
return tokenizer
def init_graph_tokenizer():
"""Initialie the tokenizer."""
logging.info('Loading graph tokenizer...')
tokenizer = tokenizers.GraphTokenizer(GRAPH_VOCAB_FILE)
logging.info('Vocab size: %d', tokenizer.vocab_size)
return tokenizer
def get_dataset_class(dataset_name, model_type, job_mode='train'):
"""Get the dataset class used for all jobs."""
if dataset_name == 'freebase2wikitext':
if model_type == 'bow2text':
return pd.Bow2TextDataset
elif FLAGS.model_type == 'graph2text':
return pd.Graph2TextDataset
elif FLAGS.model_type == 'text':
if job_mode in ['train', 'eval']:
return pd.TextOnlyDataset
else:
# for sampling: taking the unique graphs for a fair comparison
return pd.Bow2TextDataset
else:
# Add other graph2text data here.
raise NotImplementedError()
else:
def dataset(graph_tokenizer, *args, **kwargs):
del graph_tokenizer
return wt.Dataset(*args, **kwargs)
return dataset
def preprocess(batch, model_type, num_devices=1):
"""Preprocess the batch before sending to the model."""
if model_type == 'text':
if 'graphs' in batch:
del batch['graphs']
elif model_type == 'bow2text':
# Do nothing, bow2text data is already in a good form.
pass
else: # graph2text
if num_devices == 1:
graphs = gn.pad_graphs(jraph.batch(batch['graphs']))
else:
# We need to first batch graphs into num_devices batchs.
graphs = gn.batch_graphs_by_device(batch['graphs'], num_devices)
# Then we pad them to the maximum graph size in the batch and concat.
# This way graphs can be distributed to each device through pmap.
graphs = gn.pad_graphs_by_device(graphs)
max_graph_size = gn.pad_size(graphs.n_node.max())
batch.update({
'graphs': graphs,
'max_graph_size': max_graph_size})
return batch
def text_model_fn(vocab_size):
return transformer.TransformerXL(
vocab_size=vocab_size,
emb_dim=FLAGS.emb_dim,
num_layers=FLAGS.num_layers,
num_heads=FLAGS.num_heads,
dropout_prob=FLAGS.dropout,
dropout_attn_prob=FLAGS.dropout_attn,
self_att_init_scale=FLAGS.self_att_init_scale,
dense_init_scale=FLAGS.dense_init_scale,
dense_dim=FLAGS.dense_dim,
tail_shrink_factor=FLAGS.tail_shrink_factor,
relative_pos_clamp_len=FLAGS.clamp_len or None)
def graph2text_model_fn(vocab_size):
"""Get graph2text transformer model."""
return transformer.Graph2TextTransformer(
vocab_size=vocab_size,
emb_dim=FLAGS.emb_dim,
num_layers=FLAGS.num_layers,
num_heads=FLAGS.num_heads,
dropout_prob=FLAGS.dropout,
dropout_attn_prob=FLAGS.dropout_attn,
self_att_init_scale=FLAGS.self_att_init_scale,
dense_init_scale=FLAGS.dense_init_scale,
dense_dim=FLAGS.dense_dim,
tail_shrink_factor=FLAGS.tail_shrink_factor,
relative_pos_clamp_len=FLAGS.clamp_len or None,
gnn_embed_dim=FLAGS.gnn_embed_dim,
gnn_num_layers=FLAGS.gnn_num_layers,
gnn_layer_norm=FLAGS.gnn_layer_norm)
def bow2text_model_fn(vocab_size):
"""Get the bow2text model."""
return transformer.Bow2TextTransformer(
vocab_size=vocab_size,
emb_dim=FLAGS.emb_dim,
num_layers=FLAGS.num_layers,
num_heads=FLAGS.num_heads,
dropout_prob=FLAGS.dropout,
dropout_attn_prob=FLAGS.dropout_attn,
self_att_init_scale=FLAGS.self_att_init_scale,
dense_init_scale=FLAGS.dense_init_scale,
dense_dim=FLAGS.dense_dim,
tail_shrink_factor=FLAGS.tail_shrink_factor,
relative_pos_clamp_len=FLAGS.clamp_len or None,
bow_embedding_dim=FLAGS.bow_embedding_dim,
bow_n_tokens=FLAGS.bow_n_tokens)
def build_loss_fn(vocab_size, cache_steps):
"""Build the appropriate loss function according to the configs."""
if FLAGS.model_type == 'text':
def loss_fn(data, is_training=True):
return text_model_fn(vocab_size=vocab_size).loss(
data['obs'], data['target'], data['mask'],
is_training=is_training,
should_reset=data['should_reset'],
cache_steps=cache_steps)
elif FLAGS.model_type == 'graph2text':
def loss_fn(data, max_graph_size, is_training=True):
return graph2text_model_fn(vocab_size=vocab_size).loss(
data['graphs'], max_graph_size, True,
data['obs'], data['target'], data['mask'],
is_training=is_training,
should_reset=data['should_reset'],
cache_steps=cache_steps)
elif FLAGS.model_type == 'bow2text':
def loss_fn(data, is_training=True):
return bow2text_model_fn(vocab_size=vocab_size).loss(
data['graphs'], data['obs'], data['target'], data['mask'],
is_training=is_training,
should_reset=data['should_reset'],
cache_steps=cache_steps)
else:
raise ValueError(f'Unknown model type "{FLAGS.model_type}".')
return loss_fn
def build_sampler(tokenizer, device=None):
"""Build the appropriate sampler according to the configs."""
if FLAGS.model_type == 'text':
model_fn = lambda prompts: text_model_fn(tokenizer.vocab_size)( # pylint: disable=g-long-lambda
prompts, is_training=False, cache_steps=FLAGS.sample_memory_size)
sampler_class = transformer_sampler.TransformerXLSampler
elif FLAGS.model_type == 'graph2text':
def model_fn(graphs, max_graph_size, prompts):
return graph2text_model_fn(tokenizer.vocab_size)(
graphs, max_graph_size, True, prompts, is_training=False,
cache_steps=FLAGS.sample_memory_size)
sampler_class = transformer_sampler.Graph2TextTransformerSampler
elif FLAGS.model_type == 'bow2text':
def model_fn(graphs, prompts):
return bow2text_model_fn(tokenizer.vocab_size)(
graphs, prompts, is_training=False,
cache_steps=FLAGS.sample_memory_size)
sampler_class = transformer_sampler.Bow2TextTransformerSampler
sampler = sampler_class(model_fn, FLAGS.sampling_temperature, device)
return sampler
def schedule(i, lr_schedule, init_lr, min_lr_ratio, max_steps):
if lr_schedule == 'cosine':
cosine_decay = 0.5 * (1 + jnp.cos(jnp.pi * i / max_steps))
decayed = (1 - min_lr_ratio) * cosine_decay + min_lr_ratio
return init_lr * decayed
else:
return jnp.where(
i > 350000, init_lr / 3**3,
jnp.where(i > 250000, init_lr / 3**2,
jnp.where(i > 150000, init_lr / 3, init_lr)))
def evaluate(eval_set, initial_state, updater, eval_batch_size=1,
preprocess_fn=None, max_eval_samples=-1,
print_progress_every=None):
"""Evaluate a model on given dataset."""
total_losses = []
total_counts = []
token_accuracy = []
seq_accuracy = []
state = initial_state
step = state['step']
for i, batch in enumerate(eval_set):
state, eval_out = updater.eval_return_state(state, preprocess_fn(batch))
total_losses.append(eval_out['total_loss'])
total_counts.append(eval_out['total_count'])
token_accuracy.append(
eval_out['token_accuracy'] * eval_out['total_count'])
seq_accuracy.append(eval_out['seq_accuracy'])
if print_progress_every and (i + 1) % print_progress_every == 0:
total_loss = float(jnp.array(total_losses).sum())
total_count = float(jnp.array(total_counts).sum())
avg_loss = total_loss / total_count
bpc = avg_loss * np.log2(np.e)
perplexity = np.exp(avg_loss)
logging.info(
'Evaluated %d batches, total tokens %d, average loss %g,'
' bpc %g, perplexity %g.',
i + 1, total_count, avg_loss, bpc, perplexity)
if 0 < max_eval_samples <= (i + 1) * eval_batch_size:
break
total_loss = jnp.array(total_losses).sum()
total_count = jnp.array(total_counts).sum()
avg_loss = total_loss / total_count
eval_out = dict(total_loss=float(total_loss),
total_count=float(total_count),
loss=float(avg_loss),
token_accuracy=float(
jnp.array(token_accuracy).sum() / total_count),
seq_accuracy=float(
jnp.array(seq_accuracy).sum() / len(seq_accuracy)),
step=float(step),
bits_per_token=float(avg_loss) * np.log2(np.e),
perplexity=np.exp(float(avg_loss)))
return eval_out, state
def extract_title(text, tokenizer):
r"""Extract the title in the text.
The wikitext articles is in the format of `\n = TITLE = \n \n...`. We extract
the title as the tokens from the start to when the `\n \n` first appears.
Args:
text: tokenized input text using `tokenizer`.
tokenizer: text tokenizer.
Returns:
title_end_idx: a numpy.array of shape (batch_size,), it indicates the index
in `text` that marks the end of the title.
"""
batch_size, text_length = text.shape
title_end_idx = np.ones(batch_size, dtype=np.int32)
newline_token = tokenizer.encode('\n')[0]
for b in range(batch_size):
prev_token = 1 # start tokens
for i in range(1, text_length): # skip start token
# when we first see '\n \n', that is the title
if prev_token == newline_token and text[b, i] == newline_token:
title_end_idx[b] = i
break
else:
prev_token = text[b, i]
return title_end_idx
def construct_prompts(text, batch_size, sample_length, tokenizer, prompt_title):
"""Construct prompts for text generation.
Args:
text: tokenized input text using `tokenizer`.
batch_size: the size of the batch.
sample_length: the length of the sample to be generated.
tokenizer: text tokenizer.
prompt_title: whether to return a prompt with the title of the `text`.
Returns:
prompts: a numpy.array of shape [batch_size, sample_length], in which -1
indicates tokens that need to be generated using the sampler.
"""
prompts = -np.ones((batch_size, sample_length), dtype=np.int32)
prompts[:, 0] = tokenizer.bos_token()
if prompt_title and text is not None:
title_end_idx = extract_title(text, tokenizer)
for i in range(batch_size):
prompts[i, 1:title_end_idx[i]+1] = text[i, 1:title_end_idx[i]+1]
return prompts
def generate_samples(params, tokenizer, sampler, model_type, prompts, graphs):
"""Generate a batch of samples using a sampler."""
if model_type == 'text':
samples = sampler.sample(params, prompts)
elif model_type == 'graph2text':
samples = sampler.sample(params, prompts, graphs, pad=True)
elif model_type == 'bow2text':
samples = sampler.sample(params, prompts, graphs)
else:
raise ValueError(f'Unknown model_type {model_type}')
return [tokenizer.decode(s) for s in samples], samples
def take_unique_graphs(data_iter, model_type):
"""Filter data such that it only returns batches with unique graphs."""
prev_graphs = None
for batch in data_iter:
graphs = batch.get('graphs', None)
# If there's no graph in batch, don't do any filtering
if graphs is None:
yield batch
else:
if prev_graphs is None:
prev_graphs = graphs
yield batch
else:
if model_type == 'graph2text':
not_same_graph = (prev_graphs.nodes.shape != graphs.nodes.shape or
not (prev_graphs.nodes == graphs.nodes).all())
else:
not_same_graph = (prev_graphs.shape != graphs.shape or
not (prev_graphs == graphs).all())
if not_same_graph:
prev_graphs = graphs
yield batch
def compute_map_sklearn(pred, gt):
"""Computes mAP using scikit-learn."""
assert len(gt.shape) == len(pred.shape) == 2, (
'gt should be a one-hot encoding with the same shape as pred')
ap = [
sklearn.metrics.average_precision_score(
gt[c, :], pred[c, :], average=None)
for c in range(gt.shape[0])
]
return sum(ap) / len(ap)
def compute_recall_at_k(pred, k=1):
"""Computes recall@1 score."""
num_articles = pred.shape[1]
return sklearn.metrics.top_k_accuracy_score(
np.arange(num_articles), pred, k=k)
def compute_text_graph_relevance(
eval_set, initial_state, updater, eval_batch_size=1, preprocess_fn=None,
print_progress_every=None):
"""Compute the text and graph relevance a model on given dataset."""
assert eval_batch_size == 1
num_articles = eval_set.num_articles
tokens_count = np.zeros((num_articles, num_articles))
log_probs = np.zeros((num_articles, num_articles)) # [graphs, texts]
state = initial_state
for i, batch in enumerate(eval_set):
state, eval_out = updater.eval_return_state(state, preprocess_fn(batch))
graph_id = batch['graph_id'][0]
seq_id = batch['seq_id'][0]
tokens_count[graph_id, seq_id] += eval_out['total_count']
log_probs[graph_id, seq_id] += eval_out['log_probs']
if print_progress_every is not None and (i + 1) % print_progress_every == 0:
logging.info('Evaluated %d samples', i + 1)
log_probs_per_token = log_probs / tokens_count
labels = np.eye(num_articles)
eval_out = dict(
log_probs=log_probs,
tokens_count=tokens_count,
log_probs_per_token=log_probs_per_token,
text2graph_recall_at_1=compute_recall_at_k(log_probs_per_token.T, k=1),
text2graph_recall_at_5=compute_recall_at_k(log_probs_per_token.T, k=5),
text2graph_map=compute_map_sklearn(log_probs_per_token.T, labels),
graph2text_recall_at_1=compute_recall_at_k(log_probs_per_token, k=1),
graph2text_recall_at_5=compute_recall_at_k(log_probs_per_token, k=5),
graph2text_map=compute_map_sklearn(log_probs_per_token, labels))
return eval_out, state
def _get_ngrams(segment, max_order):
"""Extracts all n-grams upto a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i+order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Originally from tensor2tensor/tensor2tensor/utils/bleu_hook.py
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
BLEU score and n-gram precisions.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
if random.random() < 0.01:
print('==========')
for k, v in overlap.items():
if len(k) >= 3:
print('%s : %d' % (str(k), v))
# print(matches_by_order)
# print(possible_matches_by_order)
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return bleu, precisions
| deepmind-research-master | wikigraphs/utils.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Train a transformer for language modeling on Wikitext-103."""
import concurrent
import functools
import os
import pickle
import time
from absl import app
from absl import flags
from absl import logging
import jax
import jraph
import numpy as np
import optax
from updaters import CheckpointingUpdater
from updaters import Updater
import utils
# Train
flags.DEFINE_integer('train_batch_size', 4, '(Per-Device) batch size for'
' training.')
flags.DEFINE_integer('train_timesteps', 150, 'Sequence length to learn on')
flags.DEFINE_integer('train_memory_size', 150, 'Memory size for transformer XL')
flags.DEFINE_bool('debug', False, 'Whether to turn on debugging mode')
flags.DEFINE_string('job_mode', 'train',
'One of `train`, `eval`, `sample`, `retrieve`.')
flags.DEFINE_integer('random_seed', 42, 'Random seed id.')
flags.DEFINE_integer('num_gpus', 8, 'Number of GPUs for training.')
# Eval
flags.DEFINE_integer('eval_batch_size', 1, 'Evaluation batch size')
flags.DEFINE_string('eval_subset', 'valid', 'Which subset to evaluate on,'
' one of `valid`, `test`.')
flags.DEFINE_integer('eval_every', 10, 'Evaluation frequency.')
flags.DEFINE_integer('eval_timesteps', 64, 'Sequence length to learn on')
flags.DEFINE_integer('eval_memory_size', 640, 'Memory size for transformer XL')
flags.DEFINE_integer('max_eval_samples', -1, 'Max number of eval samples. Set'
' as -1 to use the entire eval set.')
# Model
flags.DEFINE_integer('emb_dim', 410, 'model width')
flags.DEFINE_integer('num_heads', 10, 'Number of attention heads')
flags.DEFINE_integer('num_layers', 16, 'Number of transformer layers')
flags.DEFINE_integer('dense_dim', 2100, 'Size of dense hidden layer.')
flags.DEFINE_integer('tail_shrink_factor', 4,
'Low-frequency vocabulary shrinkage factor in adaptive'
' softmax.')
flags.DEFINE_string('emb_type', 'adaptive_softmax', 'Type of the word embedding'
' layer.')
flags.DEFINE_integer('clamp_len', 400, 'Clamp length for transformer XL.')
flags.DEFINE_float('dropout', 0.1, 'Dropout rate for the transformer layers.')
flags.DEFINE_float('dropout_attn', 0.0, 'Dropout rate for the attention'
' weights.')
flags.DEFINE_float('self_att_init_scale', 0.02,
'Self attention module initilization scale.')
flags.DEFINE_float('dense_init_scale', 0.02,
'Dense module initilization scale.')
# Graph neural net configs
flags.DEFINE_string('gnn_embed_type', 'adaptive', 'Token embedding type for the'
' graph.')
flags.DEFINE_integer('gnn_embed_dim', 128, 'Graph node embedding size.')
flags.DEFINE_integer('gnn_num_layers', 1, 'Number of layers in the GNN.')
flags.DEFINE_bool('gnn_layer_norm', True, 'Whether to use layer norm in GNN.')
# Bag-of-words to text configs
flags.DEFINE_integer('bow_embedding_dim', 256, 'Size of the bow embeddings.')
flags.DEFINE_integer('bow_n_tokens', 1, 'Number of tokens to use for the'
' bow2text model.')
# Sampling
flags.DEFINE_float('sampling_temperature', 0.8, 'Temperature used for'
' sampling. Sampling becomes more deterministic with a'
' lower temperature. Setting temperature to 1.0 samples'
' from the model distribution.')
flags.DEFINE_bool('prompt_title', False, 'Whether to prompt title when sample')
flags.DEFINE_integer('sample_length', 512, 'Length of samples.')
flags.DEFINE_integer('sample_memory_size', 640, 'Memory size for sampling.')
flags.DEFINE_integer('num_samples', 1000, 'Maximum number of samples to'
' generate.')
# Optimization
flags.DEFINE_float('init_lr', 0.00025, 'Initial learning rate.')
flags.DEFINE_float('min_lr_ratio', 0.0, 'Minimum learning rate as a ratio of'
' `init_lr`.')
flags.DEFINE_string('lr_schedule', 'cosine', 'One of `default`, `cosine`.')
flags.DEFINE_float('grad_clip', 0.25, 'Maximum gradient norm allowed for'
' clipping, set to a very large number to disable clipping.')
flags.DEFINE_integer('max_steps', 200_000, 'Number of training steps.')
flags.DEFINE_string('checkpoint_dir', '/tmp/graph2text',
'Directory to store checkpoints.')
# Data
flags.DEFINE_string('dataset', 'freebase2wikitext', 'Which dataset to train on,'
' one of "wikitext", "freebase2wikitext".')
flags.DEFINE_string('model_type', 'graph2text', 'One of "text", "graph2text",'
' "bow2text".')
flags.DEFINE_string('graph_data_version', 'max256', 'One of "max256", "max512",'
' "max1024".')
flags.DEFINE_integer('log_every', 50, 'Log every this many steps.')
flags.DEFINE_integer('ckpt_every', 1000, 'Checkpoint every this many steps.')
FLAGS = flags.FLAGS
def _preprocess(batch, num_devices=1):
return utils.preprocess(batch, FLAGS.model_type, num_devices)
def _train(updater, train_dataset, num_devices):
"""Train the transformer model."""
# Initialize parameters.
logging.info('Initializing parameters...')
rng = jax.random.PRNGKey(FLAGS.random_seed)
state = updater.init(
rng, _preprocess(train_dataset.return_faux_batch(), num_devices))
logging.info('Starting train loop...')
prev_time = time.time()
while True:
data = next(train_dataset)
state, metrics = updater.update(state, _preprocess(data, num_devices))
# We use JAX runahead to mask data preprocessing and JAX dispatch overheads.
# Using values from state/metrics too often will block the runahead and can
# cause these overheads to become more prominent.
step = np.array(metrics['step'])
if step % FLAGS.log_every == 0:
steps_per_sec = FLAGS.log_every / (time.time() - prev_time)
prev_time = time.time()
metrics.update({'steps_per_sec': steps_per_sec})
logging.info({k: float(v) for k, v in metrics.items()})
if step % FLAGS.ckpt_every == 0:
updater.save_checkpoint(state)
if step > FLAGS.max_steps:
break
def _eval(updater, eval_dataset):
"""Evaluate the transformer model."""
checkpoint_state = updater.load_checkpoint()
rng = jax.random.PRNGKey(FLAGS.random_seed)
state = updater.init_from_checkpoint(
rng, _preprocess(eval_dataset.return_faux_batch()), checkpoint_state)
eval_out, state = utils.evaluate(
eval_dataset, state, updater, FLAGS.eval_batch_size, _preprocess,
FLAGS.max_eval_samples, print_progress_every=20)
logging.info('Eval output: %s', eval_out)
def _retrieve(updater, eval_dataset):
"""Graph and text retrieval using the transformer model."""
checkpoint_state = updater.load_checkpoint()
rng = jax.random.PRNGKey(FLAGS.random_seed)
state = updater.init_from_checkpoint(
rng, _preprocess(eval_dataset.return_faux_batch()), checkpoint_state)
retrieval_out, _ = utils.compute_text_graph_relevance(
eval_dataset, state, updater, preprocess_fn=_preprocess,
print_progress_every=20)
logging.info('Retrieval output: %s', retrieval_out)
def _sample(eval_dataset, tokenizer, devices, batch_size=1):
"""Evaluate the graph2text transformer."""
checkpoint_dir = os.path.join(FLAGS.checkpoint_dir, 'checkpoint.pkl')
logging.info('Loading checkpoint from %s', checkpoint_dir)
with open(checkpoint_dir, 'rb') as f:
state = pickle.load(f)
if FLAGS.model_type == 'graph2text':
# process list of graphs into a batch
eval_dataset = map(lambda x: dict( # pylint: disable=g-long-lambda
obs=x['obs'],
target=x['target'],
should_reset=x['should_reset'],
mask=x['mask'],
graphs=jraph.batch(x['graphs']),
), eval_dataset)
eval_dataset = utils.take_unique_graphs(eval_dataset, FLAGS.model_type)
samplers = []
for device in devices:
sampler = utils.build_sampler(tokenizer, device=device)
samplers.append(sampler)
step = state['step']
params = state['params']
sample_logger = []
with concurrent.futures.ThreadPoolExecutor(
max_workers=len(samplers)) as executor:
futures = dict()
for sampler in samplers:
batch = next(eval_dataset)
prompts = utils.construct_prompts(
batch['obs'], batch_size, FLAGS.sample_length, tokenizer,
prompt_title=FLAGS.prompt_title)
if FLAGS.model_type in ['graph2text', 'bow2text']:
future = executor.submit(
utils.generate_samples, params, tokenizer, sampler,
model_type=FLAGS.model_type, prompts=prompts,
graphs=batch['graphs'])
futures[future] = (sampler, batch['graphs'], batch['obs'])
else:
future = executor.submit(
utils.generate_samples, params, tokenizer, sampler,
model_type=FLAGS.model_type, prompts=prompts, graphs=None)
futures[future] = (sampler, batch['obs'])
n_samples = 0
while n_samples < FLAGS.num_samples:
for future, future_items in list(futures.items()):
if not future.done():
continue
samples, tokens = future.result()
if FLAGS.model_type == 'graph2text':
sampler, graphs, text = future_items
graphs = jraph.unbatch(graphs)
elif FLAGS.model_type == 'bow2text':
sampler, graphs, text = future_items
else:
sampler, text = future_items
if FLAGS.model_type in ['graph2text', 'bow2text']:
for s, g, tk, txt in zip(samples, graphs, tokens, text):
# Only log a small fraction of the generated samples, if we are
# generating non-stop. Otherwise log every sample.
logging.info('[step %d]', step)
logging.info('graph=\n%r', g)
logging.info('sample=\n%s', s)
if FLAGS.model_type == 'graph2text':
sample_logger.append({
'step': step,
'sample': s,
'sample_tokens': tk,
'ground_truth_text': txt,
})
elif FLAGS.model_type == 'bow2text':
sample_logger.append({
'step': step,
'bow': g,
'sample': s,
'sample_tokens': tk,
'ground_truth_text': txt,
})
else:
for s, tk, txt in zip(samples, tokens, text):
# Only log a small fraction of the generated samples, if we are
# generating non-stop. Otherwise log every sample.
logging.info('[step %d]', step)
logging.info('sample=\n%s', s)
sample_logger.append({
'step': step,
'sample': s,
'sample_tokens': tk,
'ground_truth_text': txt,
})
n_samples += len(samples)
logging.info('Finished generating %d samples', n_samples)
del futures[future]
if n_samples < FLAGS.num_samples:
batch = next(eval_dataset)
prompts = utils.construct_prompts(
batch['obs'], batch_size, FLAGS.sample_length, tokenizer,
prompt_title=FLAGS.prompt_title)
if FLAGS.model_type in ['graph2text', 'bow2text']:
future = executor.submit(
utils.generate_samples, params, tokenizer, sampler,
model_type=FLAGS.model_type, prompts=prompts,
graphs=batch['graphs'])
futures[future] = (sampler, batch['graphs'], batch['obs'])
else:
future = executor.submit(
utils.generate_samples, params, tokenizer, sampler,
model_type=FLAGS.model_type, prompts=prompts, graphs=None)
futures[future] = (sampler, batch['obs'])
logging.info('Finished')
path = os.path.join(FLAGS.checkpoint_dir, 'samples.pkl')
with open(path, 'wb') as f:
pickle.dump(dict(samples=sample_logger), f)
logging.info('Samples saved to %s', path)
def main(_):
# Create the dataset.
tokenizer = utils.init_tokenizer(FLAGS.dataset)
graph_tokenizer = utils.init_graph_tokenizer()
dataset_class = utils.get_dataset_class(FLAGS.dataset, FLAGS.model_type)
has_graph = True if FLAGS.model_type == 'graph2text' else False
local_devices = jax.local_devices()
num_gpus = min(FLAGS.num_gpus, len(local_devices))
if FLAGS.job_mode == 'train':
train_dataset = dataset_class(
tokenizer=tokenizer,
graph_tokenizer=graph_tokenizer,
batch_size=FLAGS.train_batch_size,
subset='train',
timesteps=FLAGS.train_timesteps,
version=FLAGS.graph_data_version,
shuffle_data=True,
repeat=True,
debug=FLAGS.debug)
train_iter = iter(train_dataset)
loss_fn = utils.build_loss_fn(vocab_size=tokenizer.vocab_size,
cache_steps=FLAGS.train_memory_size)
optimizer = optax.chain(
optax.clip_by_global_norm(FLAGS.grad_clip),
optax.scale_by_adam(),
optax.scale_by_schedule(functools.partial(
utils.schedule,
lr_schedule=FLAGS.lr_schedule,
init_lr=FLAGS.init_lr,
min_lr_ratio=FLAGS.min_lr_ratio,
max_steps=FLAGS.max_steps)),
optax.scale(-1))
optimizer = optax.apply_if_finite(optimizer, max_consecutive_errors=5)
updater = Updater(loss_fn, optimizer,
devices=local_devices[:num_gpus],
has_graph=has_graph)
updater = CheckpointingUpdater(updater, FLAGS.checkpoint_dir)
_train(updater, train_iter, num_gpus)
elif FLAGS.job_mode == 'eval':
eval_dataset = dataset_class(
tokenizer=tokenizer,
graph_tokenizer=graph_tokenizer,
batch_size=FLAGS.eval_batch_size,
subset=FLAGS.eval_subset,
timesteps=FLAGS.eval_timesteps,
version=FLAGS.graph_data_version,
shuffle_data=False,
repeat=False,
debug=FLAGS.debug)
eval_iter = iter(eval_dataset)
loss_fn = utils.build_loss_fn(vocab_size=tokenizer.vocab_size,
cache_steps=FLAGS.eval_memory_size)
# only use one device for evaluation
devices = local_devices[:1]
updater = Updater(loss_fn, optimizer=None, devices=devices,
has_graph=has_graph)
updater = CheckpointingUpdater(updater, FLAGS.checkpoint_dir)
_eval(updater, eval_iter)
elif FLAGS.job_mode == 'sample':
eval_dataset = dataset_class(
tokenizer=tokenizer,
graph_tokenizer=graph_tokenizer,
batch_size=1,
subset=FLAGS.eval_subset,
timesteps=FLAGS.sample_length,
version=FLAGS.graph_data_version,
shuffle_data=False,
repeat=True,
debug=FLAGS.debug)
eval_iter = iter(eval_dataset)
_sample(eval_iter, tokenizer, local_devices[:num_gpus])
elif FLAGS.job_mode == 'retrieve':
eval_dataset = dataset_class(
tokenizer=tokenizer,
graph_tokenizer=graph_tokenizer,
batch_size=1,
subset=FLAGS.eval_subset,
timesteps=FLAGS.eval_timesteps,
version=FLAGS.graph_data_version,
shuffle_data=False,
repeat=False,
graph_retrieval_dataset=True,
debug=FLAGS.debug)
eval_iter = iter(eval_dataset)
loss_fn = utils.build_loss_fn(vocab_size=tokenizer.vocab_size,
cache_steps=FLAGS.eval_memory_size)
# only use one device for evaluation
devices = local_devices[:1]
updater = Updater(loss_fn, optimizer=None, devices=devices,
has_graph=has_graph)
updater = CheckpointingUpdater(updater, FLAGS.checkpoint_dir)
_retrieve(updater, eval_iter)
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | wikigraphs/main.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Script for building vocabulary files for datasets."""
import collections
import csv
import enum
import io
import os
from typing import List, Tuple
from absl import app
from absl import flags
from absl import logging
from wikigraphs.data import io_tools
from wikigraphs.data import paired_dataset
from wikigraphs.data import tokenizers
from wikigraphs.data import wikitext
class DatasetType(enum.Enum):
text = 1
graph = 2
wikitext = 3
FLAGS = flags.FLAGS
flags.DEFINE_string('data_dir', '', 'Path to the directory that contains the'
' unzipped wikitext-103 data.')
flags.DEFINE_string('vocab_file_path', '', 'Path to the output vocab file.')
flags.DEFINE_enum_class('data_type', DatasetType.wikitext, DatasetType,
'One of {`wikitext`, `graph`, `text`}.')
flags.DEFINE_integer('threshold', 1, 'Frequency threshold for a word to be'
' included in the vocabulary.')
flags.DEFINE_string('version', 'max256', 'Which version of paired data to use.')
def get_vocab(dataset: wikitext.RawDataset) -> List[Tuple[str, int]]:
"""Build vocabulary, return (word, count) tuples sorted by count."""
vocab = collections.defaultdict(int)
for pair in dataset:
for t in pair.text.split(' '):
if t:
vocab[t] += 1
return sorted(vocab.items(), key=lambda t: -t[1])
def write_vocab(vocab: List[Tuple[str, int]], output_path: str):
"""Write a vocab list to a file."""
output_dir = os.path.dirname(output_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(output_path, mode='wb') as f_:
with io.TextIOWrapper(f_, encoding='utf-8') as f:
w = csv.writer(f)
w.writerows(vocab)
def build_wikitext_vocab():
logging.info('Loading the dataset.')
dataset = wikitext.RawDataset(subset='train', data_dir=FLAGS.data_dir)
logging.info('Building the vocab.')
vocab = get_vocab(dataset)
logging.info('Finished, vocab size %d, total number of tokens %d',
len(vocab), sum([c for _, c in vocab]))
logging.info('Writing the vocab to %s', FLAGS.vocab_file_path)
write_vocab(vocab, FLAGS.vocab_file_path)
def build_graph_vocab():
"""Build vocabulary for graph data."""
logging.info('Loading the dataset.')
dataset = paired_dataset.ParsedDataset(
subset='train', data_dir=FLAGS.data_dir, version=FLAGS.version)
logging.info('Building graph vocab.')
vocab = collections.defaultdict(int)
for pair in dataset:
graph = pair.graph
for n in graph.nodes():
for t in tokenizers.GraphTokenizer.split_node(n):
if t:
vocab[t] += 1
for _, _, e in graph.edges():
for t in tokenizers.GraphTokenizer.split_edge(e):
if t:
vocab[t] += 1
vocab = sorted(vocab.items(), key=lambda t: -t[1])
vocab = [k for k, v in vocab if v >= FLAGS.threshold]
logging.info('Finished, vocab size %d.', len(vocab))
logging.info('Writing the vocab to %s.', FLAGS.vocab_file_path)
io_tools.write_txt_file(FLAGS.vocab_file_path, '\n'.join(vocab),
# Some unicode characters requires utf-16 to encode.
encoding='utf-16')
def build_text_vocab():
"""Build vocabulary for the text part of the graph-to-text data."""
logging.info('Loading the dataset.')
dataset = paired_dataset.ParsedDataset(
subset='train', data_dir=FLAGS.data_dir, version=FLAGS.version)
logging.info('Building text vocab.')
vocab = collections.defaultdict(int)
for pair in dataset:
for t in pair.text.split(' '):
if t:
vocab[t] += 1
vocab = sorted(vocab.items(), key=lambda t: -t[1])
logging.info('Finished, vocab size %d, total number of tokens %d.',
len(vocab), sum([v for _, v in vocab]))
vocab = [(k, v) for k, v in vocab if v >= FLAGS.threshold]
logging.info('After filtering, vocab size %d.', len(vocab))
logging.info('Writing the vocab to %s.', FLAGS.vocab_file_path)
write_vocab(vocab, FLAGS.vocab_file_path)
def main(_):
if FLAGS.data_type == DatasetType.wikitext:
build_wikitext_vocab()
elif FLAGS.data_type == DatasetType.text:
build_text_vocab()
elif FLAGS.data_type == DatasetType.graph:
build_graph_vocab()
else:
raise ValueError(f'Unknown data type {FLAGS.data_type}.')
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | wikigraphs/scripts/build_vocab.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
r"""Tool to visualize graphs.
You need to have the command line tool `dot` installed locally, for example by
`sudo apt-get install graphviz`.
Example usage:
python visualize_graph.py \
--logtostderr --graph_ids=0:48 --truncate_limit=500 --layout=fdp
"""
import html
import os
import textwrap
from absl import app
from absl import flags
from absl import logging
from wikigraphs.data import io_tools
from wikigraphs.data import paired_dataset as pd
FLAGS = flags.FLAGS
flags.DEFINE_string('subset', 'valid', 'Which subset to choose graphs from.')
flags.DEFINE_string('graph_ids', '', 'A comma-separated string of graph IDs'
' (0-based), for example `1,2,3`. Or alternatively a'
' range, e.g. `0:10` which is equivalent to'
' `0,1,2,3,...,9`.')
flags.DEFINE_string('version', 'max256', 'Which version of data to load.')
flags.DEFINE_string('data_dir', '', 'Path to a directory that contains the raw'
' paired data, if provided.')
flags.DEFINE_string('output_dir', '/tmp/graph_vis', 'Output directory to save'
' the visualized graphs.')
flags.DEFINE_integer('truncate_limit', -1, 'Maximum length for graph nodes in'
' visualization.')
flags.DEFINE_string('layout', 'fdp', 'Which one of the dot layout to use.')
def truncate(s: str) -> str:
if FLAGS.truncate_limit > 0 and len(s) > FLAGS.truncate_limit:
s = s[:FLAGS.truncate_limit] + '...'
return s
def format_label(s: str, width: int = 40) -> str:
"""Format a node / edge label."""
s = io_tools.normalize_freebase_string(s)
s = truncate(s)
lines = s.split('\\n')
output_lines = []
for line in lines:
line = html.escape(line)
if width > 0:
output_lines += textwrap.wrap(line, width)
else:
output_lines.append(line)
return '<' + '<br/>'.join(output_lines) + '>'
def graph_to_dot(graph_text_pair: io_tools.GraphTextPair) -> str:
"""Convert a graph to a dot file."""
dot = ['digraph {', 'node [shape=rect];']
graph = pd.Graph.from_edges(graph_text_pair.edges)
center_node_id = graph.node2id(graph_text_pair.center_node)
for i, n in enumerate(graph.nodes()):
color = '#f5dc98' if i == center_node_id else (
'#b0ffad' if not(n[0] == '"' and n[-1] == '"') else '#ffffff')
label = format_label(n)
dot.append(f'{i} [ label = {label}, fillcolor="{color}", style="filled"];')
for i, j, e in graph.edges():
dot.append(f'{i} -> {j} [ label = {format_label(e, width=0)} ];')
dot.append('}')
return '\n'.join(dot)
def visualize_graph(graph_text_pair: io_tools.GraphTextPair,
graph_id: int,
output_dir: str):
"""Visualize a graph and save the visualization to the specified directory."""
dot = graph_to_dot(graph_text_pair)
output_file = os.path.join(output_dir, f'{graph_id}.dot')
logging.info('Writing output to %s', output_file)
with open(output_file, 'w') as f:
f.write(dot)
pdf_output = os.path.join(output_dir, f'{graph_id}.pdf')
os.system(f'dot -K{FLAGS.layout} -Tpdf -o {pdf_output} {output_file}')
def main(_):
logging.info('Loading the %s set of data.', FLAGS.subset)
pairs = list(pd.RawDataset(subset=FLAGS.subset,
data_dir=FLAGS.data_dir or None,
shuffle_data=False,
version=FLAGS.version))
logging.info('Loaded %d graph-text pairs.')
if ':' in FLAGS.graph_ids:
start, end = [int(i) for i in FLAGS.graph_ids.split(':')]
graph_ids = list(range(start, end))
else:
graph_ids = [int(i) for i in FLAGS.graph_ids.split(',')]
logging.info('Visualizing graphs with ID %r', graph_ids)
if not os.path.exists(FLAGS.output_dir):
os.makedirs(FLAGS.output_dir)
for gid in graph_ids:
visualize_graph(pairs[gid], gid, FLAGS.output_dir)
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | wikigraphs/scripts/visualize_graph.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Preprocess freebase data and pair with wikitext."""
import os
from absl import app
from absl import flags
from absl import logging
from wikigraphs.data import io_tools
from wikigraphs.data import wikitext
FLAGS = flags.FLAGS
flags.DEFINE_string('freebase_dir', '', 'Directory that containns Freebase'
' graphs.')
flags.DEFINE_string('output_dir', '', 'Path to output directory to store the'
' paired dataset.')
def pair_graphs_with_wikitext(subset: str, graph_dir: str, output_dir: str):
"""Pair graphs with wikitext articles, and write to output directory."""
logging.info('Pairing graphs from the %s set from %s with wikitext.',
subset, graph_dir)
graphs = list(io_tools.graphs_from_file(
os.path.join(graph_dir, f'{subset}.gz')))
title2graph = {
io_tools.normalize_freebase_string(g.title).replace(' ', ''): g
for g in graphs}
n_graphs = len(graphs)
# Use raw version of the wikitext data as the tokenized version has <unk> in
# titles which is bad for matching. We will handle the <unk>s through the
# tokenizer to make sure our data are equivalent to that of the tokenized
# version of wikitext-103.
wikitext_articles = list(wikitext.RawDataset(subset=subset, version='raw'))
n_wiki = len(wikitext_articles)
logging.info('Loaded %d graphs and %d wikitext articles in total.',
n_graphs, n_wiki)
# Keep track of the article titles in the dataset. Unfortunately wikitext-103
# has about 1% of duplicated articles, we want to take care of that.
retrieved_titles = set()
pairs = []
n_duplicates = 0
for a in wikitext_articles:
title = wikitext.normalize_title(a.title).replace(' ', '')
g = title2graph.get(title, None)
if g is not None:
if title not in retrieved_titles:
retrieved_titles.add(title)
pairs.append(io_tools.GraphTextPair(
center_node=g.center,
title=g.title,
edges=g.edges,
text=a.text))
else:
n_duplicates += 1
n_pairs = len(pairs)
logging.info('Matched %d/%d = %.1f%% of wikitext articles,'
' and %d/%d = %.1f%% of graphs.',
n_pairs, n_wiki, float(n_pairs) / n_wiki * 100,
n_pairs, n_graphs, float(n_pairs) / n_graphs * 100)
logging.info('Detected %d/%d = %.1f%% of duplicated wikitext articles.',
n_duplicates, n_wiki, float(n_duplicates) / n_wiki * 100)
io_tools.write_pairs_to_gzip_txt_file(
os.path.join(output_dir, f'{subset}.gz'), pairs)
def main(_):
for subset in ['train', 'valid', 'test']:
pair_graphs_with_wikitext(subset, FLAGS.freebase_dir, FLAGS.output_dir)
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | wikigraphs/scripts/freebase_preprocess.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Compute the bleu score on generated text and the ground truth."""
import math
import os
import pickle
from absl import app
from absl import flags
from absl import logging
import numpy as np
import utils
flags.DEFINE_string('checkpoint_dir', '/tmp/transformerXL',
'Checkpoint directory to load saved samples.')
flags.DEFINE_string('dataset', 'freebase2wikitext', 'Which dataset to the model'
' is trained on, one of "wikitext", "freebase2wikitext".')
FLAGS = flags.FLAGS
def group_samples(samples, tokenizer):
"""Groups generated and ground truth texts."""
groups = {}
for i, row in enumerate(samples):
gt = tokenizer.decode(row['ground_truth_text'])
sample = tokenizer.decode(row['sample_tokens'])
if gt not in groups:
groups[gt] = (gt.split(), [sample.split()])
else:
groups[gt][-1].append(sample.split())
if (i + 1) % 100 == 0:
logging.info('Processed %d samples', i + 1)
return groups
def eval_samples(raw_samples, tokenizer):
"""Evaluates generated samples."""
gt_refs = []
samples = []
groups = group_samples(raw_samples, tokenizer)
groups = list(groups.values())
avg_group_size = np.mean([len(g[-1]) for g in groups])
logging.info('Average samples per example: %.2f', avg_group_size)
avg_group_size = int(math.ceil(avg_group_size))
for i, (gt, s) in enumerate(groups):
gt_refs.append(gt)
idx = i % len(groups)
samples.append(groups[idx][-1])
gt_bleu, gt_n_grams = utils.compute_bleu(samples, gt_refs)
logging.info('Processed %d samples in total.', sum([len(s) for s in samples]))
flat_samples = []
for s in samples:
flat_samples.extend(s)
logging.info('Average sample len: %.2f',
np.mean([len(s) for s in flat_samples]))
logging.info('Average ground-truth len: %.2f',
np.mean([len(gt) for gt in gt_refs]))
logging.info('Ground-truth BLEU: %6.2f, n-gram precision: (%s)',
gt_bleu * 100,
', '.join(['%6.2f%%' % (s * 100) for s in gt_n_grams]))
def main(_):
tokenizer = utils.init_tokenizer(FLAGS.dataset)
checkpoint_dir = os.path.join(FLAGS.checkpoint_dir, 'samples.pkl')
logging.info('Loading samples from %s', checkpoint_dir)
with open(checkpoint_dir, 'rb') as f:
samples = pickle.load(f)['samples']
eval_samples(samples, tokenizer)
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | wikigraphs/scripts/compute_blue_score.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Transformer blocks."""
import math
from typing import Callable, Optional
import haiku as hk
from haiku import initializers as init
import jax
import jax.numpy as jnp
from wikigraphs.model.embedding import RelativePositionEmbedding
def conv1d(x, num_units, init_scale=0.02, with_bias=True):
return hk.Conv1D(
output_channels=num_units, kernel_shape=1, with_bias=with_bias,
w_init=init.RandomNormal(stddev=init_scale))(x)
def layer_norm(x):
return hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(x)
class FeedForwardBlock(hk.Module):
"""Feed forward block."""
def __init__(self,
dense_dim: int = 2100,
dropout_prob: float = 0.1,
init_scale: float = 1.,
name: Optional[str] = None):
"""Initializes a FeedForwardBlock.
Args:
dense_dim: feature size of the feedforward block.
dropout_prob: dropout probability.
init_scale: the initialization scale of the VarianceScaling used for the
feedforward layer.
name: Optional name for this Haiku module.
"""
super(FeedForwardBlock, self).__init__(name=name)
self._dense_dim = dense_dim
self._dropout_prob = dropout_prob
self._init_scale = init_scale
def __call__(self, x: jnp.ndarray) -> jnp.ndarray:
hiddens = x.shape[-1]
x = conv1d(x, num_units=self._dense_dim, init_scale=self._init_scale)
x = jax.nn.relu(x)
x = hk.dropout(hk.next_rng_key(), self._dropout_prob, x)
x = conv1d(x, num_units=hiddens, init_scale=self._init_scale)
return hk.dropout(hk.next_rng_key(), self._dropout_prob, x)
def get_reset_attention_mask(should_reset: jnp.ndarray) -> jnp.ndarray:
"""Maps a reset token vector into an attention mask that consists of blocks.
A sequence of should reset tokens such as:
[0, 1, 0, 1, 0, 0]
transforms into an attention mask such as:
[[1, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1]]
Args:
should_reset: Reset tokens with shape [batch, timesteps].
Returns:
attention_mask: Attention mask with shape [batch, timesteps, timesteps].
"""
should_reset = jnp.cumsum(should_reset, axis=-1)
attention_mask = should_reset[:, :, None] == should_reset[:, None, :]
return attention_mask.astype(jnp.float32)
def attend(q: jnp.ndarray,
k: jnp.ndarray,
v: jnp.ndarray,
mask: Optional[jnp.ndarray] = None,
attend_fn:
Optional[Callable[[jnp.ndarray, jnp.ndarray], jnp.ndarray]] = None,
dropout_prob: float = 0.0,
extra_k: Optional[jnp.ndarray] = None,
extra_v: Optional[jnp.ndarray] = None,
extra_mask: Optional[jnp.ndarray] = None) -> jnp.ndarray:
"""Computes multi-head attention using the given query, key and value.
Args:
q: Query with shape [batch, q_timesteps, num_heads, head_dim].
k: Key with shape [batch, timesteps, num_heads, head_dim].
v: Value with shape [batch, timesteps, num_heads, head_dim].
mask: Attention mask to apply [batch, 1, q_timesteps, timesteps].
attend_fn: An optionally defined attend function. The default attend_fn is
is jnp.einsum('bthd,bThd->bhtT', q, k).
dropout_prob: dropout probability on the attention weights.
extra_k: Extra keys to attend to, if provided. Note the extra keys and
values do not apply the specified attention_fn, but instead use the
default dot-product attention. [batch, timesteps_extra, num_heads,
head_dim].
extra_v: Extra values to attend to, if provided. [batch, timesteps_extra,
num_heads, head_dim].
extra_mask: Extra attention mask to apply on the extra inputs [batch, 1,
q_timesteps, timesteps_extra].
Returns:
Output of the attention with shape [batch, timesteps, hiddens]
"""
infinity_proxy = 1e9
batch, q_time, num_heads, head_dim = q.shape
hiddens = num_heads * head_dim
_, kv_time, _, _ = k.shape
expected_kv_shape = (batch, kv_time, num_heads, head_dim)
if k.shape != expected_kv_shape:
raise ValueError(
f'Expected key shape {expected_kv_shape} but got shape {k.shape}')
if v.shape != expected_kv_shape:
raise ValueError(
f'Expected value shape {expected_kv_shape} but got shape {v.shape}')
if attend_fn is not None:
attention = attend_fn(q, k)
else:
attention = jnp.einsum('bthd,bThd->bhtT', q, k)
if mask is not None:
attention = attention * mask - infinity_proxy * (1 - mask)
if extra_k is not None and extra_v is not None:
extra_time = extra_k.shape[1]
expected_extra_shape = (batch, extra_time, num_heads, head_dim)
if extra_k.shape != expected_extra_shape:
raise ValueError(
f'Expected extra key shape {expected_extra_shape} but got'
f' {extra_k.shape}')
if extra_v.shape != expected_extra_shape:
raise ValueError(
f'Expected extra value shape {expected_extra_shape} but got'
f' {extra_v.shape}')
# [B, H, t, T']
extra_attention = jnp.einsum('bthd,bThd->bhtT', q, extra_k)
if extra_mask is not None:
extra_attention = extra_attention * extra_mask - infinity_proxy * (
1 - extra_mask)
# [B, H, t, T+T']
attention = jnp.concatenate([attention, extra_attention], axis=-1)
# [B, T+T', H, D]
v = jnp.concatenate([v, extra_v], axis=1)
scale = 1. / math.sqrt(head_dim)
attention *= scale
normalized = jax.nn.softmax(attention)
if dropout_prob > 0:
normalized = hk.dropout(hk.next_rng_key(), dropout_prob, normalized)
summed = jnp.einsum('bhtT,bThd->bthd', normalized, v)
return jnp.reshape(summed, [batch, q_time, hiddens])
class Attention(hk.Module):
"""Attention with memory (https://arxiv.org/abs/1901.02860).
This implementation leverages the `state` in Haiku, in which the inputs are
stored as `states`. At each step, these states in memory are updated with a
rolling window.
"""
def __init__(self,
r_w_bias: Optional[jnp.ndarray] = None,
r_r_bias: Optional[jnp.ndarray] = None,
num_heads: int = 8,
init_scale: float = 1.0,
with_final_bias: bool = False,
final_init_scale_multiplier: float = 1.,
relative_pos_clamp_len: Optional[int] = None,
dropout_prob: float = 0.0,
name: Optional[str] = None):
"""Initializes a Attention module.
Args:
r_w_bias: global content bias.
r_r_bias: global positional bias.
num_heads: number of attention heads.
init_scale: the initialization scale of the VarianceScaling used for the
linear layer.
with_final_bias: whether to let final layer have biases.
final_init_scale_multiplier: how much to scale the initialization scale of
the output layer.
relative_pos_clamp_len: clamp length of the relative position embeddings.
dropout_prob: dropout probability.
name: Optional name for this Haiku module.
"""
super(Attention, self).__init__(name=name)
self._r_w_bias = r_w_bias
self._r_r_bias = r_r_bias
self._num_heads = num_heads
self._init_scale = init_scale
self._with_final_bias = with_final_bias
self._final_init_scale = final_init_scale_multiplier * init_scale
self._relative_pos_clamp_len = relative_pos_clamp_len
self._dropout_prob = dropout_prob
def _update_cache(self,
key: jnp.ndarray,
value: jnp.ndarray,
cache_steps: Optional[int] = None,
axis: int = 1) -> jnp.ndarray:
"""Update the cache stored in hk.state."""
cache_shape = list(value.shape)
value_steps = cache_shape[axis]
if cache_steps is not None:
cache_shape[axis] += cache_steps
cache = hk.get_state(
key, shape=cache_shape, dtype=value.dtype, init=jnp.zeros)
# Overwrite at index 0, then rotate timesteps left so what was just
# inserted is first.
value = jax.lax.dynamic_update_slice(
cache, value, jnp.zeros(len(cache_shape), dtype=jnp.int32))
value = jnp.roll(value, -value_steps, axis)
hk.set_state(key, value)
return value
def _update_memory(self,
mem: jnp.ndarray,
mask: jnp.ndarray,
input_length: int,
cache_steps: int,
should_reset: jnp.ndarray) -> jnp.ndarray:
"""Logic for using and updating cached activations."""
batch_size = mem.shape[0]
if cache_steps > 0:
# Tells us how much of the cache should be used.
cache_progress_idx = hk.get_state(
'cache_progress_idx', [batch_size], dtype=jnp.int32, init=jnp.zeros)
hk.set_state('cache_progress_idx', cache_progress_idx + input_length)
mem = self._update_cache('mem', mem, cache_steps=cache_steps)
if mask is None:
mask = jnp.ones((batch_size, 1, input_length, input_length))
cache_mask = (jnp.arange(cache_steps - 1, -1, -1)[None, None, None, :]
< cache_progress_idx[:, None, None, None])
cache_mask = jnp.broadcast_to(
cache_mask, (batch_size, 1, input_length, cache_steps))
mask = jnp.concatenate([cache_mask, mask], axis=-1)
if should_reset is not None:
if cache_steps > 0:
should_reset = self._update_cache('should_reset', should_reset,
cache_steps=cache_steps)
reset_mask = get_reset_attention_mask(should_reset)[:, None, :, :]
mask *= reset_mask[:, :, cache_steps:, :]
return mem, mask
def __call__(self,
x: jnp.ndarray,
mask: Optional[jnp.ndarray] = None,
should_reset: Optional[jnp.ndarray] = None,
cache_steps: int = 0,
extra: Optional[jnp.ndarray] = None,
extra_mask: Optional[jnp.ndarray] = None) -> jnp.ndarray:
"""Compute the multi-head attention.
Args:
x: input [batch, x_timesteps, in_dim].
mask: attention mask [batch, 1, x_timesteps, y_timesteps].
should_reset: reset marker [batch, timesteps].
cache_steps: number of timesteps in the cache.
extra: if provided should be extra key-value input
[batch, extra_timesteps, in_dim'].
extra_mask: if provided should be the mask for extra key-value input,
[batch, extra_timesteps].
Returns:
output: attention output [batch, x_timesteps, in_dim].
"""
hiddens_in = x.shape[-1]
steps = x.shape[1]
qkv_hiddens = hiddens_in
y, mask = self._update_memory(x, mask, steps, cache_steps, should_reset)
q = conv1d(x, qkv_hiddens, init_scale=self._init_scale, with_bias=False)
k = conv1d(y, qkv_hiddens, init_scale=self._init_scale, with_bias=False)
v = conv1d(y, qkv_hiddens, init_scale=self._init_scale, with_bias=False)
batch, q_time, _ = q.shape
_, kv_time, _ = k.shape
head_dim = qkv_hiddens // self._num_heads
assert qkv_hiddens % self._num_heads == 0, 'Head dim should be an integer.'
q = jnp.reshape(q, [batch, q_time, self._num_heads, head_dim])
k = jnp.reshape(k, [batch, kv_time, self._num_heads, head_dim])
v = jnp.reshape(v, [batch, kv_time, self._num_heads, head_dim])
attend_fn = RelativePositionEmbedding(
dim=qkv_hiddens, dropout_rate=self._dropout_prob,
r_w_bias=self._r_w_bias, r_r_bias=self._r_r_bias,
init_scale=self._init_scale, clamp_len=self._relative_pos_clamp_len)
if extra is not None:
extra_k = conv1d(extra, qkv_hiddens, init_scale=self._init_scale,
with_bias=False)
extra_v = conv1d(extra, qkv_hiddens, init_scale=self._init_scale,
with_bias=False)
extra_time = extra.shape[1]
extra_k = jnp.reshape(
extra_k, [batch, extra_time, self._num_heads, head_dim])
extra_v = jnp.reshape(
extra_v, [batch, extra_time, self._num_heads, head_dim])
if extra_mask is not None:
extra_mask = extra_mask[:, None, None, :]
attn_vec = attend(q, k, v, mask=mask, attend_fn=attend_fn,
dropout_prob=self._dropout_prob,
extra_k=extra_k, extra_v=extra_v, extra_mask=extra_mask)
else:
attn_vec = attend(q, k, v, mask=mask, attend_fn=attend_fn,
dropout_prob=self._dropout_prob)
attn_out = conv1d(attn_vec, hiddens_in, with_bias=self._with_final_bias,
init_scale=self._final_init_scale)
return hk.dropout(hk.next_rng_key(), self._dropout_prob, attn_out)
class SelfAttentionBlock(hk.Module):
"""Self attention block."""
def __init__(self,
r_w_bias: Optional[jnp.ndarray] = None,
r_r_bias: Optional[jnp.ndarray] = None,
causal: bool = False,
num_heads: int = 8,
dropout_prob: float = 0.1,
dropout_attn_prob: float = 0.0,
init_scale: float = 1.0,
relative_pos_clamp_len: Optional[int] = None,
name: Optional[str] = None):
"""Initializes a SelfAttentionBlock.
Args:
r_w_bias: global content bias.
r_r_bias: global positional bias.
causal: whether to apply a causal mask to the input.
num_heads: number of attention heads.
dropout_prob: dropout probability.
dropout_attn_prob: dropout probability of the attention module.
init_scale: the initialization scale of the VarianceScaling used for the
linear layer.
relative_pos_clamp_len: clamp length of the relative position embeddings.
name: Optional name for this Haiku module.
"""
super(SelfAttentionBlock, self).__init__(name=name)
self._r_w_bias = r_w_bias
self._r_r_bias = r_r_bias
self._causal = causal
self._num_heads = num_heads
self._dropout_prob = dropout_prob
self._dropout_attn_prob = dropout_attn_prob
self._init_scale = init_scale
self._relative_pos_clamp_len = relative_pos_clamp_len
def __call__(self,
x: jnp.ndarray,
mask: Optional[jnp.ndarray] = None,
should_reset: Optional[jnp.ndarray] = None,
cache_steps: int = 0,
extra: Optional[jnp.ndarray] = None,
extra_mask: Optional[jnp.ndarray] = None) -> jnp.ndarray:
"""Computes the outputs of the self attention block.
Args:
x: query input [batch, x_timesteps, in_dim].
mask: attention mask [batch, 1, 1, x_timesteps].
should_reset: reset marker [batch, timesteps].
cache_steps: number of timesteps in the cache.
extra: if provided should be extra key-value input
[batch, extra_timesteps, in_dim'].
extra_mask: if provided should be the mask for extra key-value input,
[batch, extra_timesteps].
Returns:
output: block output [batch, x_timesteps, in_dim].
"""
if self._causal:
timesteps = x.shape[1]
batch_size = x.shape[0]
t = jnp.arange(timesteps, dtype=jnp.int32)
causal_mask = (t[:, None] >= t[None, :])[None, None, :, :]
causal_mask = causal_mask.astype(x.dtype)
if mask is None:
mask = jnp.broadcast_to(
causal_mask, (batch_size, 1, timesteps, timesteps))
else:
mask *= causal_mask
x = Attention(
self._r_w_bias,
self._r_r_bias,
num_heads=self._num_heads,
init_scale=self._init_scale,
relative_pos_clamp_len=self._relative_pos_clamp_len,
dropout_prob=self._dropout_attn_prob)(
x, mask=mask, should_reset=should_reset,
cache_steps=cache_steps, extra=extra, extra_mask=extra_mask)
else:
x = Attention(
self._r_w_bias,
self._r_r_bias,
num_heads=self._num_heads,
init_scale=self._init_scale,
dropout_prob=self._dropout_attn_prob)(
x, mask=mask, extra=extra, extra_mask=extra_mask)
return hk.dropout(hk.next_rng_key(), self._dropout_prob, x)
class GPT2Block(hk.Module):
"""GPT-2 style transformer block with memory."""
def __init__(self,
r_w_bias: Optional[jnp.ndarray] = None,
r_r_bias: Optional[jnp.ndarray] = None,
causal: bool = True,
dense_dim: int = 2100,
dropout_prob: float = 0.1,
dropout_attn_prob: float = 0.0,
num_heads: int = 8,
self_att_init_scale: float = 0.02,
dense_init_scale: float = 0.02,
relative_pos_clamp_len: Optional[int] = None,
name: Optional[str] = None):
"""Initializes a GPT2Block.
Args:
r_w_bias: global content bias.
r_r_bias: global positional bias.
causal: whether to apply a causal mask to the input.
dense_dim: feature size of the feedforward block.
dropout_prob: dropout probability.
dropout_attn_prob: dropout probability of the attention module.
num_heads: number of attention heads.
self_att_init_scale: the initialization scale of the VarianceScaling
used for the linear layer in the attention module.
dense_init_scale: the initialization scale of the VarianceScaling
used for the linear layer in the feedforward module.
relative_pos_clamp_len: clamp length of the relative position embeddings.
name: Optional name for this Haiku module.
"""
super(GPT2Block, self).__init__(name=name)
self._r_w_bias = r_w_bias
self._r_r_bias = r_r_bias
self._causal = causal
self._dense_dim = dense_dim
self._dropout_prob = dropout_prob
self._dropout_attn_prob = dropout_attn_prob
self._num_heads = num_heads
self._self_att_init_scale = self_att_init_scale
self._dense_init_scale = dense_init_scale
self._relative_pos_clamp_len = relative_pos_clamp_len
def __call__(self,
x: jnp.ndarray,
mask: Optional[jnp.ndarray] = None,
is_training: bool = True,
should_reset: Optional[jnp.ndarray] = None,
cache_steps: int = 0,
extra: Optional[jnp.ndarray] = None,
extra_mask: Optional[jnp.ndarray] = None) -> jnp.ndarray:
"""Computes the outputs of the GPT-2 block.
Args:
x: query input [batch, x_timesteps, in_dim].
mask: attention mask [batch, 1, 1, x_timesteps].
is_training: whether the current stage is training or not.
should_reset: reset marker [batch, timesteps].
cache_steps: number of timesteps in the cache.
extra: if provided should be extra key-value input
[batch, extra_timesteps, in_dim'].
extra_mask: if provided should be the mask for extra key-value input,
[batch, extra_timesteps].
Returns:
output: block output [batch, x_timesteps, in_dim].
"""
dropout_prob = self._dropout_prob if is_training else 0.0
dropout_attn_prob = self._dropout_attn_prob if is_training else 0.0
x = layer_norm(x + SelfAttentionBlock(
self._r_w_bias,
self._r_r_bias,
causal=self._causal,
num_heads=self._num_heads,
dropout_prob=dropout_prob,
dropout_attn_prob=dropout_attn_prob,
init_scale=self._self_att_init_scale,
relative_pos_clamp_len=self._relative_pos_clamp_len)(
x, mask=mask, should_reset=should_reset,
cache_steps=cache_steps, extra=extra, extra_mask=extra_mask))
x = layer_norm(x + FeedForwardBlock(
dense_dim=self._dense_dim,
dropout_prob=dropout_prob,
init_scale=self._dense_init_scale)(x))
return x
| deepmind-research-master | wikigraphs/wikigraphs/model/transformer_block.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Tests for wikigraphs.model.transformer."""
from absl import logging
from absl.testing import absltest
import haiku as hk
import jax
import jax.numpy as jnp
import jraph
import numpy as np
import optax
from wikigraphs.model import embedding
from wikigraphs.model import transformer as models
def tree_size(nest):
return sum(x.size for x in jax.tree_util.tree_leaves(nest))
class TransformerXlTest(absltest.TestCase):
def test_transformer_param_count(self):
seqs = np.array([[1, 2, 3, 0, 0],
[3, 3, 5, 1, 2]], dtype=np.int32)
x = seqs[:, :-1]
y = seqs[:, 1:]
vocab_size = 267_735
def forward(inputs, labels):
input_mask = (labels != 0).astype(jnp.float32)
model = models.TransformerXL(
vocab_size=vocab_size,
emb_dim=210,
num_layers=2,
num_heads=10,
dropout_prob=0.0,
dropout_attn_prob=0.0,
self_att_init_scale=0.02,
dense_init_scale=0.02,
dense_dim=2100,
cutoffs=(20000, 40000, 200000), # WikiText-103
relative_pos_clamp_len=None,
)
return model.loss(inputs, labels, mask=input_mask, cache_steps=2)
init_fn, apply_fn = hk.transform_with_state(forward)
key = hk.PRNGSequence(8)
params, state = init_fn(next(key), x, y)
out, _ = apply_fn(params, state, next(key), x, y)
loss, metrics = out
logging.info('loss: %g', loss)
logging.info('metrics: %r', metrics)
param_count = tree_size(params)
self.assertEqual(param_count, 58_704_438)
def test_transformer_with_extra_runs(self):
extra = np.array([[1, 1, 0, 0],
[2, 2, 2, 2],
[3, 3, 3, 0]], dtype=np.int32)
seqs = np.array([[1, 2, 3, 0, 0],
[2, 4, 5, 6, 0],
[3, 3, 5, 1, 2]], dtype=np.int32)
x = seqs[:, :-1]
y = seqs[:, 1:]
vocab_size = seqs.max() + 1
extra_vocab_size = extra.max() + 1
def forward(inputs, labels, extra):
input_mask = (labels != 0).astype(jnp.float32)
extra_mask = (extra != 0).astype(jnp.float32)
extra = hk.Embed(vocab_size=extra_vocab_size, embed_dim=16)(extra)
model = models.TransformerXL(
vocab_size=vocab_size,
emb_dim=16,
num_layers=2,
num_heads=4,
cutoffs=[],
)
return model.loss(inputs, labels, mask=input_mask,
extra=extra, extra_mask=extra_mask)
init_fn, apply_fn = hk.transform_with_state(forward)
key = hk.PRNGSequence(8)
params, state = init_fn(next(key), x, y, extra)
out, _ = apply_fn(params, state, next(key), x, y, extra)
loss, metrics = out
logging.info('loss: %g', loss)
logging.info('metrics: %r', metrics)
def test_graph_embedding_model_runs(self):
graph = jraph.GraphsTuple(
nodes=np.array([[0, 1, 1],
[1, 2, 0],
[0, 3, 0],
[0, 4, 4]], dtype=np.float32),
edges=np.array([[1, 1],
[2, 2],
[3, 3]], dtype=np.float32),
senders=np.array([0, 1, 2], dtype=np.int32),
receivers=np.array([1, 2, 3], dtype=np.int32),
n_node=np.array([4], dtype=np.int32),
n_edge=np.array([3], dtype=np.int32),
globals=None)
embed_dim = 3
def forward(graph):
return embedding.GraphEmbeddingModel(embed_dim=3, num_layers=2)(graph)
init_fn, apply_fn = hk.without_apply_rng(hk.transform(forward))
key = hk.PRNGSequence(8)
params = init_fn(next(key), graph)
out = apply_fn(params, graph)
self.assertEqual(out.nodes.shape, (graph.nodes.shape[0], embed_dim))
self.assertEqual(out.edges.shape, (graph.edges.shape[0], embed_dim))
np.testing.assert_array_equal(out.senders, graph.senders)
np.testing.assert_array_equal(out.receivers, graph.receivers)
np.testing.assert_array_equal(out.n_node, graph.n_node)
def test_unpack_and_pad(self):
x = np.array([1, 1, 2, 2, 2, 3, 4, 4], dtype=np.float32)
s = np.array([2, 3, 1, 2], dtype=np.int32)
tensors, mask = models.unpack_and_pad(x, s, pad_size=s.max(), pad_value=0)
np.testing.assert_array_equal(
tensors,
[[1, 1, 0],
[2, 2, 2],
[3, 0, 0],
[4, 4, 0]])
np.testing.assert_array_equal(
mask,
[[1, 1, 0],
[1, 1, 1],
[1, 0, 0],
[1, 1, 0]])
# [n, 1] tensor
x = np.array([1, 1, 2, 2, 2, 3, 4, 4], dtype=np.float32)[:, None]
s = np.array([2, 3, 1, 2], dtype=np.int32)
tensors, mask = models.unpack_and_pad(x, s, pad_size=s.max(), pad_value=0)
np.testing.assert_array_equal(
tensors,
np.array([[1, 1, 0],
[2, 2, 2],
[3, 0, 0],
[4, 4, 0]])[:, :, None])
np.testing.assert_array_equal(
mask,
[[1, 1, 0],
[1, 1, 1],
[1, 0, 0],
[1, 1, 0]])
def test_graph_conditioned_transformer_runs(self):
graphs = jraph.GraphsTuple(
nodes=np.ones((4, 3), dtype=np.float32),
edges=np.ones((3, 1), dtype=np.float32),
senders=np.array([0, 2, 3], dtype=np.int32),
receivers=np.array([1, 3, 2], dtype=np.int32),
n_node=np.array([2, 2], dtype=np.int32),
n_edge=np.array([1, 2], dtype=np.int32),
globals=None,
)
seqs = np.array([[1, 1, 0],
[2, 2, 2]], dtype=np.int32)
vocab_size = seqs.max() + 1
embed_dim = 8
x = seqs[:, :-1]
y = seqs[:, 1:]
def forward(graphs, inputs, labels):
graphs = models.GraphEmbeddingModel(embed_dim=embed_dim,
num_layers=2)(graphs)
extra, extra_mask = models.unpack_and_pad(graphs.nodes,
graphs.n_node,
graphs.n_node.max())
input_mask = (labels != 0).astype(jnp.float32)
transformer = models.TransformerXL(vocab_size=vocab_size,
emb_dim=embed_dim,
num_layers=2,
num_heads=4,
cutoffs=[])
return transformer.loss(inputs, labels, mask=input_mask, extra=extra,
extra_mask=extra_mask)
init_fn, apply_fn = hk.transform_with_state(forward)
key = hk.PRNGSequence(8)
params, state = init_fn(next(key), graphs, x, y)
out, _ = apply_fn(params, state, next(key), graphs, x, y)
loss, metrics = out
logging.info('loss: %g', loss)
logging.info('metrics: %r', metrics)
def test_graph_conditioned_transformer_learns(self):
graphs = jraph.GraphsTuple(
nodes=np.ones((4, 3), dtype=np.float32),
edges=np.ones((3, 1), dtype=np.float32),
senders=np.array([0, 2, 3], dtype=np.int32),
receivers=np.array([1, 3, 2], dtype=np.int32),
n_node=np.array([2, 2], dtype=np.int32),
n_edge=np.array([1, 2], dtype=np.int32),
globals=None,
)
seqs = np.array([[1, 2, 2, 0],
[1, 3, 3, 3]], dtype=np.int32)
vocab_size = seqs.max() + 1
embed_dim = 8
max_graph_size = graphs.n_node.max()
logging.info('Training seqs: %r', seqs)
x = seqs[:, :-1]
y = seqs[:, 1:]
def model_fn(vocab_size, embed_dim):
return models.Graph2TextTransformer(
vocab_size=vocab_size,
emb_dim=embed_dim,
num_layers=2,
num_heads=4,
cutoffs=[],
gnn_embed_dim=embed_dim,
gnn_num_layers=2)
def forward(graphs, inputs, labels, max_graph_size):
input_mask = (labels != 0).astype(jnp.float32)
return model_fn(vocab_size, embed_dim).loss(
graphs, max_graph_size, False, inputs, labels, mask=input_mask)
init_fn, apply_fn = hk.transform_with_state(forward)
rng = hk.PRNGSequence(8)
params, state = init_fn(next(rng), graphs, x, y, max_graph_size)
def apply(*args, **kwargs):
out, state = apply_fn(*args, **kwargs)
return out[0], (out[1], state)
apply = jax.jit(apply, static_argnums=6)
optimizer = optax.chain(
optax.scale_by_adam(),
optax.scale(-1e-3))
opt_state = optimizer.init(params)
for i in range(500):
(loss, model_state), grad = jax.value_and_grad(apply, has_aux=True)(
params, state, next(rng), graphs, x, y, max_graph_size)
metrics, state = model_state
updates, opt_state = optimizer.update(grad, opt_state, params)
params = optax.apply_updates(params, updates)
if (i + 1) % 100 == 0:
logging.info(
'Step %d, %r', i + 1, {k: float(v) for k, v in metrics.items()})
logging.info('Loss: %.8f', loss)
self.assertLess(loss, 1.0)
def test_bow_transformer_runs(self):
bow = np.array([[0, 0, 1, 0, 2, 0, 0, 1],
[0, 1, 0, 0, 1, 0, 1, 0],
[1, 0, 0, 0, 1, 0, 0, 1]], dtype=np.int32)
seqs = np.array([[1, 2, 3, 0, 0],
[2, 4, 5, 6, 0],
[3, 3, 5, 1, 2]], dtype=np.int32)
x = seqs[:, :-1]
y = seqs[:, 1:]
vocab_size = seqs.max() + 1
def forward(bow, inputs, labels):
model = models.Bow2TextTransformer(
vocab_size=vocab_size,
emb_dim=16,
num_layers=2,
num_heads=4,
cutoffs=[])
return model.loss(bow, inputs, labels)
init_fn, apply_fn = hk.transform_with_state(forward)
key = hk.PRNGSequence(8)
params, state = init_fn(next(key), bow, x, y)
out, _ = apply_fn(params, state, next(key), bow, x, y)
loss, metrics = out
logging.info('loss: %g', loss)
logging.info('metrics: %r', metrics)
def test_bow_transformer_learns(self):
bow = np.array([[0, 0, 1, 0, 2, 0, 0, 1],
[0, 1, 0, 0, 1, 0, 1, 0],
[1, 0, 0, 0, 1, 0, 0, 1]], dtype=np.int32)
seqs = np.array([[1, 2, 2, 3, 0, 0],
[1, 2, 4, 5, 6, 0],
[1, 3, 3, 5, 4, 2]], dtype=np.int32)
x = seqs[:, :-1]
y = seqs[:, 1:]
vocab_size = seqs.max() + 1
def model_fn():
return models.Bow2TextTransformer(
vocab_size=vocab_size,
emb_dim=16,
num_layers=2,
num_heads=4,
cutoffs=[])
def loss_fn(bow, inputs, labels):
mask = (labels != 0).astype(jnp.float32)
return model_fn().loss(bow, inputs, labels, mask=mask)
init_fn, apply_fn = hk.transform_with_state(loss_fn)
key = hk.PRNGSequence(8)
params, state = init_fn(next(key), bow, x, y)
def apply(*args, **kwargs):
out, state = apply_fn(*args, **kwargs)
return out[0], (out[1], state)
value_and_grad = jax.jit(jax.value_and_grad(apply, has_aux=True))
optimizer = optax.chain(
optax.scale_by_adam(),
optax.scale(-1e-3))
opt_state = optimizer.init(params)
for i in range(800):
(loss, model_state), grad = value_and_grad(
params, state, next(key), bow, x, y)
metrics, state = model_state
updates, opt_state = optimizer.update(grad, opt_state, params)
params = optax.apply_updates(params, updates)
if (i + 1) % 100 == 0:
logging.info('Step %d, %r', i + 1,
{k: float(v) for k, v in metrics.items()})
logging.info('Loss: %.8f', loss)
self.assertLess(loss, 0.1)
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | wikigraphs/wikigraphs/model/transformer_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Transformer embedding modules."""
from typing import List, Optional
import haiku as hk
from haiku import initializers as init
import jax
import jax.numpy as jnp
import jraph
from wikigraphs.model import graph_net as gn
def get_pos_start(timesteps: int, batch_size: int) -> jnp.ndarray:
"""Find the right slice of positional embeddings for incremental sampling."""
pos_start = hk.get_state(
'cache_progress_idx', [batch_size], dtype=jnp.int32, init=jnp.zeros)
hk.set_state('cache_progress_idx', pos_start + timesteps)
return pos_start
class SinusoidalPositionEmbedding(hk.Module):
"""Position encoding, using mixture of sinusoidal signals."""
def __init__(self,
dim: int,
cache_steps: int = 0,
reverse_order: bool = False,
clamp_len: Optional[int] = None,
name: Optional[str] = None):
"""Initialize a SinusoidalPositionEmbedding.
Args:
dim: Embedding dimension.
cache_steps: The length of the memory.
reverse_order: If set to True, position index is reversed.
clamp_len: position beyond clamp_len will be reset to clamp_len, default
to not clamping.
name: Optional name for this Haiku module.
"""
super(SinusoidalPositionEmbedding, self).__init__(name=name)
self._dim = dim
self._cache_steps = cache_steps
self._reverse_order = reverse_order
self._clamp_len = clamp_len
self._inv_freq = 1.0 / (
10000 ** (jnp.arange(0, dim, 2).astype(jnp.float32) / dim))
def __call__(self, timesteps: int, batch_size: int) -> jnp.ndarray:
"""Computes the sinusoidal position embedding.
Args:
timesteps: The length of the sequence.
batch_size: The size of the batch.
Returns:
Sinusoidal position embedding.
"""
full_length = timesteps + self._cache_steps
if self._reverse_order:
positions = jnp.arange(full_length - 1, -1, -1)
positions = jnp.repeat(positions[None, :], batch_size, axis=0)
else:
if self._cache_steps > 0:
positions = (get_pos_start(timesteps, batch_size)[:, None]
+ jnp.arange(timesteps)[None, :])
else:
positions = jnp.arange(0, full_length)
positions = jnp.repeat(positions[None, :], batch_size, axis=0)
if self._clamp_len is not None:
positions = jnp.minimum(positions, self._clamp_len)
scaled_time = positions[:, :, None] * self._inv_freq[None, None, :]
return jnp.concatenate([jnp.sin(scaled_time), jnp.cos(scaled_time)], axis=2)
def relative_shift(x: jnp.ndarray) -> jnp.ndarray:
"""Shift the relative logits."""
x_shape = list(x.shape)
x = jnp.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0]])
x = jnp.reshape(
x, [x_shape[0], x_shape[1], x_shape[3] + 1, x_shape[2]])[:, :, 1:, :]
x = jnp.reshape(x, x_shape)
return x
class RelativePositionEmbedding(hk.Module):
"""Position encoding, using relative positions than absolute positions."""
def __init__(self,
dim: int,
dropout_rate: float,
r_w_bias: jnp.ndarray,
r_r_bias: jnp.ndarray,
init_scale: float = 0.02,
clamp_len: Optional[int] = None,
name: Optional[str] = None):
"""Initialize a RelativePositionEmbedding.
Args:
dim: Embedding dimension.
dropout_rate: dropout rate.
r_w_bias: global content bias.
r_r_bias: global positional bias.
init_scale: the initialization scale of the RandomNormal used for the
linear layer.
clamp_len: position beyond clamp_len will be reset to clamp_len, default
to not clamping.
name: Optional name for this Haiku module.
"""
super(RelativePositionEmbedding, self).__init__(name=name)
self._dim = dim
self._dropout_rate = dropout_rate
self._r_w_bias = r_w_bias
self._r_r_bias = r_r_bias
self._init_scale = init_scale
self._sinusoidal_pos_emb = SinusoidalPositionEmbedding(
dim=dim,
reverse_order=True,
clamp_len=clamp_len,
name=name)
def __call__(self, q: jnp.ndarray, k: jnp.ndarray) -> jnp.ndarray:
"""Computes the relative position embedding.
Args:
q: The query.
k: The key.
Returns:
Relative position embedding.
"""
# Use key instead of query to obtain the length.
batch_size, key_length, num_heads, head_dim = list(k.shape)
# Content based addressing and global content bias
content_score = jnp.einsum('bthd,bThd->bhtT', q + self._r_w_bias, k)
# Relative position encoding
positional_encodings = self._sinusoidal_pos_emb(key_length, batch_size)
positional_encodings = hk.dropout(hk.next_rng_key(), self._dropout_rate,
positional_encodings)
rel_pos_emb = hk.Conv1D(
output_channels=self._dim, kernel_shape=1, with_bias=False,
w_init=init.RandomNormal(stddev=self._init_scale))(positional_encodings)
rel_pos_emb = jnp.reshape(rel_pos_emb, [
batch_size, key_length, num_heads, head_dim])
# Content dependent positional bias and global positional bias
rel_pos_score = jnp.einsum('bthd,bThd->bhtT', q + self._r_r_bias,
rel_pos_emb)
rel_pos_score = relative_shift(rel_pos_score)
assert content_score.shape == rel_pos_score.shape
return content_score + rel_pos_score
def hierarchical_logprobs(
logits: jnp.ndarray,
class_logits: jnp.ndarray,
cutoffs: List[int]) -> jnp.ndarray:
"""Hierarchical log-probs for adaptive softmax."""
sizes = [y - x for x, y in zip(cutoffs[:-1], cutoffs[1:])]
num_tails = len(sizes) - 1
split_logits = jnp.split(logits, cutoffs[1:-1], axis=-1)
all_head_logits = jnp.concatenate([split_logits[0], class_logits], -1)
# Mask out item 0, the NULL token
all_head_logits += jnp.concatenate(
[jnp.ones([1], dtype=logits.dtype) * -10,
jnp.zeros([sizes[0] + num_tails - 1], dtype=logits.dtype)], 0)
all_head_logprobs = jax.nn.log_softmax(all_head_logits)
head_logprobs, class_logprobs = jnp.split(all_head_logprobs,
[sizes[0]], axis=-1)
tail_logprobs = []
for i, tail_size in enumerate(sizes[1:]): # pylint: disable=unused-variable
tail_logprobs += [jax.nn.log_softmax(split_logits[i + 1])
+ class_logprobs[..., [i]]]
return jnp.concatenate([head_logprobs] + tail_logprobs, -1)
class AdaptiveSoftmaxEmbedding(hk.Module):
"""Adaptive inputs and softmax (https://arxiv.org/abs/1809.10853)."""
def __init__(self,
dim: int,
vocab_size: int,
cutoffs: List[int],
tail_shrink_factor: int = 4,
hierarchical: bool = True,
init_std: float = 0.02,
init_proj_std: float = 0.01,
dtype: jnp.dtype = jnp.float32,
name: Optional[str] = None):
"""Initialize a AdaptiveSoftmaxEmbedding.
Args:
dim: dimensionality of the hidden space.
vocab_size: the size of the vocabulary.
cutoffs: the cutoff indices of the vocabulary used for the adaptive
softmax embedding.
tail_shrink_factor: how many times to shrink the hidden dimensionality
for low-frequency vocabulary after each cutoff.
hierarchical: whether to use hierarchical softmax.
init_std: standard deviation of the Normal distribution used to initialize
the embedding weights.
init_proj_std: standard deviation of the Normal distribution used to
initialize the projection weights.
dtype: Optional data type default to jnp.float32.
name: Optional name for this Haiku module.
"""
super(AdaptiveSoftmaxEmbedding, self).__init__(name=name)
self._hidden_size = dim
self._vocab_size = vocab_size
self._cutoffs = [0] + list(cutoffs) + [self._vocab_size]
self._tail_shrink_factor = tail_shrink_factor
self._hierarchical = hierarchical
self._dtype = dtype
self._embeddings = []
self._projections = []
self._bias = hk.get_parameter(
'bias', [self._vocab_size], dtype=self._dtype, init=jnp.zeros)
l_cutoffs = self._cutoffs[:-1]
r_cutoffs = self._cutoffs[1:]
for i, (l_cutoff, r_cutoff) in enumerate(zip(l_cutoffs, r_cutoffs)):
hidden_size = self._hidden_size // (self._tail_shrink_factor ** i)
embedding = hk.get_parameter(
f'embeddings_{l_cutoff}_{r_cutoff}',
[r_cutoff - l_cutoff, hidden_size],
dtype=self._dtype,
init=hk.initializers.RandomNormal(stddev=init_std))
self._embeddings += [embedding]
if self._tail_shrink_factor != 1:
projection = hk.get_parameter(
f'projection_{l_cutoff}_{r_cutoff}',
[hidden_size, self._hidden_size],
dtype=self._dtype,
init=hk.initializers.RandomNormal(stddev=init_proj_std))
self._projections += [projection]
if self._tail_shrink_factor != 1:
self._output_projection = hk.get_parameter(
'output_head_projection',
[self._hidden_size, self._hidden_size],
dtype=self._dtype,
init=hk.initializers.RandomNormal(stddev=init_proj_std))
if self._hierarchical:
self._class_weights = hk.get_parameter(
'tail_class_weights',
[self._hidden_size, len(cutoffs)],
init=hk.initializers.RandomNormal(stddev=init_std))
self._class_bias = hk.get_parameter(
'tail_class_bias',
[len(cutoffs)],
dtype=self._dtype,
init=jnp.zeros)
@hk.transparent
def build_embeddings(self):
"""Builds input embeddings."""
if self._projections:
embedding_mat = [
jnp.dot(emb, proj) for emb, proj in zip(self._embeddings,
self._projections)]
else:
embedding_mat = self._embeddings
input_embeddings = jnp.concatenate(embedding_mat, 0)
return input_embeddings
@hk.transparent
def build_output_embeddings(self):
"""Builds separate output embeddings."""
if self._projections:
projections = [self._output_projection] + self._projections[1:]
embedding_mat = [jnp.dot(emb, proj)
for emb, proj in zip(self._embeddings, projections)]
else:
embedding_mat = self._embeddings
output_embeddings = jnp.concatenate(embedding_mat, 0)
return jnp.transpose(output_embeddings)
def embed_input(self, input_tokens: jnp.ndarray) -> jnp.ndarray:
"""Embeds the input."""
assert jnp.issubdtype(input_tokens.dtype, jnp.integer)
input_embeddings = self.build_embeddings()
embedded_inputs = input_embeddings[input_tokens]
return embedded_inputs * self._hidden_size ** 0.5
def embed_output(self, inputs: jnp.ndarray) -> jnp.ndarray:
"""Outputs logits."""
output_embs = self.build_output_embeddings()
logits = jnp.einsum('btd,dv->btv', inputs, output_embs) + self._bias
if self._hierarchical:
class_logits = jnp.dot(inputs, self._class_weights) + self._class_bias
logprobs = hierarchical_logprobs(logits, class_logits, self._cutoffs)
return logprobs
else:
return logits
class GraphEmbeddingModel(hk.Module):
"""A single graph network for embedding graph data."""
def __init__(self,
embed_dim: int,
num_layers: int,
msg_hidden_size_factor: int = 2,
use_layer_norm: bool = False,
name: Optional[str] = None):
"""Constructor.
Args:
embed_dim: node embedding size.
num_layers: number of message passing layers to use.
msg_hidden_size_factor: size of the message network hiddens as a factor
of embed_dim.
use_layer_norm: whether to apply layer norm on node updates.
name: optional name for this module.
"""
super().__init__(name=name)
self._embed_dim = embed_dim
self._num_layers = num_layers
self._msg_hidden_size_factor = msg_hidden_size_factor
self._use_layer_norm = use_layer_norm
def __call__(self, graphs: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Compute embeddings for each node in the graphs.
Args:
graphs: a set of graphs batched into a single graph. The nodes and edges
are represented as feature tensors.
Returns:
graphs: new graph with node embeddings updated (shape [n_nodes,
embed_dim]).
"""
nodes = hk.Linear(self._embed_dim)(graphs.nodes)
edges = hk.Linear(self._embed_dim)(graphs.edges)
nodes = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(
jax.nn.gelu(nodes))
edges = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(
jax.nn.gelu(edges))
graphs = graphs._replace(nodes=nodes, edges=edges)
graphs = gn.SimpleGraphNet(
num_layers=self._num_layers,
msg_hidden_size_factor=self._msg_hidden_size_factor,
layer_norm=self._use_layer_norm)(graphs)
return graphs
| deepmind-research-master | wikigraphs/wikigraphs/model/embedding.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Graph net utils."""
from typing import Union, List, Optional
import haiku as hk
import jax
import jax.numpy as jnp
import jraph
import numpy as np
ArrayType = Union[np.ndarray, jnp.ndarray]
def pad_size(in_size: int):
out_size = 1
while out_size < in_size:
out_size *= 2
return out_size
def pad_graphs(
graphs: jraph.GraphsTuple,
pad_n_nodes: Optional[int] = None,
pad_n_edges: Optional[int] = None) -> jraph.GraphsTuple:
"""Pad graphs to have a canonical number of nodes and edges.
Here we pad the number of nodes and number of edges to powers of 2 by adding a
placeholder graph to the end of the batch. So that the batch gets at most 2x
as large as before, and number of graphs increase by 1.
Note this method always adds at least one new node to the placeholder graph to
make sure any edges if added are valid.
Args:
graphs: a batch of graphs.
pad_n_nodes: (optional) number of nodes to pad to.
pad_n_edges: (optional) number of edges to pad to.
Returns:
padded: the input batch padded to canonical sizes.
"""
n_nodes, node_dim = graphs.nodes.shape
n_edges, edge_dim = graphs.edges.shape
# Add at least one extra node to the placeholder graph.
if pad_n_nodes is None:
pad_n_nodes = pad_size(n_nodes + 1)
if pad_n_edges is None:
pad_n_edges = pad_size(n_edges)
nodes = np.concatenate([
graphs.nodes,
np.zeros((pad_n_nodes - n_nodes, node_dim), dtype=graphs.nodes.dtype)
], axis=0)
edges = np.concatenate([
graphs.edges,
np.zeros((pad_n_edges - n_edges, edge_dim), dtype=graphs.edges.dtype)
], axis=0)
# Add padding edges
senders = np.concatenate([
graphs.senders,
np.full(pad_n_edges - n_edges, n_nodes, dtype=graphs.senders.dtype)
], axis=0)
receivers = np.concatenate([
graphs.receivers,
np.full(pad_n_edges - n_edges, n_nodes, dtype=graphs.receivers.dtype)
], axis=0)
n_node = np.concatenate([
graphs.n_node, np.full(1, pad_n_nodes - n_nodes)], axis=0)
n_edge = np.concatenate([
graphs.n_edge, np.full(1, pad_n_edges - n_edges)], axis=0)
return jraph.GraphsTuple(
nodes=nodes, edges=edges, senders=senders, receivers=receivers,
n_node=n_node, n_edge=n_edge, globals=None)
def batch_graphs_by_device(
graphs: List[jraph.GraphsTuple],
num_devices: int
) -> List[jraph.GraphsTuple]:
"""Batch a list of graphs into num_devices batched graphs.
The input graphs are grouped into num_devices groups. Within each group the
graphs are merged. This is needed for parallelizing the graphs using pmap.
Args:
graphs: a list of graphs to be merged.
num_devices: the number of local devices.
Returns:
graph: a size num_devices list of merged graphs.
"""
bs = len(graphs)
assert bs % num_devices == 0, (
'Batch size {} is not divisible by {} devices.'.format(bs, num_devices))
bs_per_device = bs // num_devices
graphs_on_devices = []
for i in range(num_devices):
graphs_on_device_i = graphs[i*bs_per_device:(i+1)*bs_per_device]
graphs_on_device_i = jraph.batch(graphs_on_device_i)
graphs_on_devices.append(graphs_on_device_i)
return graphs_on_devices
def pad_graphs_by_device(graphs: List[jraph.GraphsTuple]) -> jraph.GraphsTuple:
"""Pad and concatenate the list of graphs.
Each graph in the list is padded according to the maximum n_nodes and n_edges
in the list, such that all graphs have the same length. Then they are
concatenated. This is need for pmap.
Args:
graphs: a list of graphs.
Returns:
graph: a single padded and merged graph.
"""
# Add at least one extra node to the placeholder graph.
pad_n_nodes = pad_size(max([g.nodes.shape[0] for g in graphs]) + 1)
pad_n_edges = pad_size(max([g.edges.shape[0] for g in graphs]))
padded_graphs = [pad_graphs(g, pad_n_nodes, pad_n_edges) for g in graphs]
nodes = []
edges = []
senders = []
receivers = []
n_node = []
n_edge = []
for g in padded_graphs:
assert g.nodes.shape[0] == pad_n_nodes
assert g.edges.shape[0] == pad_n_edges
assert g.senders.size == pad_n_edges
assert g.receivers.size == pad_n_edges
assert g.n_node.size == padded_graphs[0].n_node.size
assert g.n_edge.size == padded_graphs[0].n_edge.size
nodes.append(g.nodes)
edges.append(g.edges)
senders.append(g.senders)
receivers.append(g.receivers)
n_node.append(g.n_node)
n_edge.append(g.n_edge)
return jraph.GraphsTuple(
nodes=np.concatenate(nodes, axis=0),
edges=np.concatenate(edges, axis=0),
senders=np.concatenate(senders, axis=0),
receivers=np.concatenate(receivers, axis=0),
n_node=np.concatenate(n_node, axis=0),
n_edge=np.concatenate(n_edge, axis=0),
globals=None)
class MLPMessagePassingLayer(hk.Module):
"""Message passing layer implemented as MLPs."""
def __init__(self,
node_hidden_sizes: List[int],
msg_hidden_sizes: List[int],
residual: bool = True,
layer_norm: bool = False,
name: Optional[str] = None):
"""Constructor.
Args:
node_hidden_sizes: hidden sizes for the node update model.
msg_hidden_sizes: hidden sizes for the edge message model.
residual: set to True to use residual connections, this will also mean the
input dimension is appended to `node_hidden_sizes` as the output size.
layer_norm: whether to apply layer norm on the node representations.
name: name for this module.
"""
super().__init__(name=name)
self._node_hidden_sizes = node_hidden_sizes
self._msg_hidden_sizes = msg_hidden_sizes
self._residual = residual
self._layer_norm = layer_norm
def _compute_messages(self, graph: jraph.GraphsTuple) -> ArrayType:
"""Compute the messages on each edge."""
x = jnp.concatenate([graph.nodes[graph.senders],
graph.nodes[graph.receivers],
graph.edges], axis=-1)
return hk.nets.MLP(self._msg_hidden_sizes, activate_final=True)(x)
def _update_nodes(self, graph: jraph.GraphsTuple,
messages: ArrayType) -> ArrayType:
"""Compute updated node representations."""
x = jax.ops.segment_sum(messages, graph.receivers,
num_segments=graph.nodes.shape[0])
x = jnp.concatenate([graph.nodes, x], axis=-1)
layer_sizes = self._node_hidden_sizes[:]
if self._residual:
layer_sizes += [graph.nodes.shape[-1]]
x = hk.nets.MLP(layer_sizes, activate_final=False)(x)
if self._layer_norm:
x = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(x)
if self._residual:
return graph.nodes + x
else:
return x
def __call__(self, graph: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Apply this layer on the input graph."""
messages = self._compute_messages(graph)
updated_nodes = self._update_nodes(graph, messages)
return graph._replace(nodes=updated_nodes)
class SimpleGraphNet(hk.Module):
"""A simple graph net module, a stack of message passing layers."""
def __init__(self,
num_layers: int,
msg_hidden_size_factor: int = 2,
layer_norm: bool = False,
name: Optional[str] = None):
"""Constructor.
Args:
num_layers: number of message passing layers in the network.
msg_hidden_size_factor: size of message module hidden sizes as a factor of
the input node feature dimensionality.
layer_norm: whether to apply layer norm on node updates.
name: name of this module.
"""
super().__init__(name=name)
self._num_layers = num_layers
self._msg_hidden_size_factor = msg_hidden_size_factor
self._layer_norm = layer_norm
def __call__(self, graph: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Run the simple graph net on the input data.
Args:
graph: input graph.
Returns:
graph: output graph.
"""
input_node_dim = graph.nodes.shape[-1]
msg_hidden_size = input_node_dim * self._msg_hidden_size_factor
for _ in range(self._num_layers):
graph = MLPMessagePassingLayer(
node_hidden_sizes=[],
msg_hidden_sizes=[msg_hidden_size],
layer_norm=self._layer_norm)(graph)
return graph
def add_reverse_edges(graph: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Add edges in the reverse direction, copy edge features."""
senders = np.concatenate([graph.senders, graph.receivers], axis=0)
receivers = np.concatenate([graph.receivers, graph.senders], axis=0)
edges = np.concatenate([graph.edges, graph.edges], axis=0)
return graph._replace(senders=senders, receivers=receivers, edges=edges)
| deepmind-research-master | wikigraphs/wikigraphs/model/graph_net.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""WikiGraphs model modules."""
from . import embedding
from . import graph_net
from . import sampler
from . import transformer
from . import transformer_block
| deepmind-research-master | wikigraphs/wikigraphs/model/__init__.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Tests for wikigraphs.model.sampler."""
from absl.testing import absltest
import jraph
import numpy as np
from wikigraphs.model import sampler
from wikigraphs.model import transformer as models
class SamplerTest(absltest.TestCase):
def test_uncond_sampler_runs(self):
prompt = np.array([[0, 1, 2, -1, -1],
[0, 1, 2, -1, -1]], dtype=np.int32)
vocab_size = prompt.max() + 1
bos_token = 0
memory_size = 2
params = None
def model_fn(x):
return models.TransformerXL(
vocab_size=vocab_size,
emb_dim=8,
num_layers=2,
num_heads=4,
cutoffs=[])(x, is_training=False, cache_steps=memory_size)
uncond_sampler = sampler.TransformerXLSampler(model_fn)
sample = uncond_sampler.sample(params, prompt)
self.assertTrue((sample[:, 0] == bos_token).all())
self.assertTrue((sample != -1).all())
self.assertEqual(sample.shape, prompt.shape)
sample2 = uncond_sampler.sample(params, prompt)
self.assertTrue((sample2[:, 0] == bos_token).all())
self.assertTrue((sample2 != -1).all())
self.assertEqual(sample2.shape, prompt.shape)
self.assertTrue((sample != sample2).any())
def test_bow2text_sampler_runs(self):
bow = np.array([[0, 0, 1, 0, 2, 0, 0, 1],
[0, 1, 0, 0, 1, 0, 1, 0]], dtype=np.int32)
prompt = np.array([[0, 1, 2, -1, -1, -1],
[0, 1, 2, -1, -1, -1]], dtype=np.int32)
vocab_size = prompt.max() + 1
bos_token = 0
memory_size = 2
params = None
def model_fn(bow, x):
return models.Bow2TextTransformer(
vocab_size=vocab_size,
emb_dim=16,
num_layers=2,
num_heads=4,
cutoffs=[])(bow, x, is_training=False, cache_steps=memory_size)
bow_sampler = sampler.Bow2TextTransformerSampler(model_fn)
sample = bow_sampler.sample(params, prompt, bow)
self.assertTrue((sample[:, 0] == bos_token).all())
self.assertTrue((sample != -1).all())
self.assertEqual(sample.shape, prompt.shape)
sample2 = bow_sampler.sample(params, prompt, bow)
self.assertTrue((sample2[:, 0] == bos_token).all())
self.assertTrue((sample2 != -1).all())
self.assertEqual(sample2.shape, prompt.shape)
self.assertTrue((sample != sample2).any())
def test_graph2text_sampler_runs(self):
graphs = jraph.GraphsTuple(
nodes=np.ones((4, 3), dtype=np.float32),
edges=np.ones((3, 1), dtype=np.float32),
senders=np.array([0, 2, 3], dtype=np.int32),
receivers=np.array([1, 3, 2], dtype=np.int32),
n_node=np.array([2, 2], dtype=np.int32),
n_edge=np.array([1, 2], dtype=np.int32),
globals=None,
)
prompt = np.array([[0, 1, 2, -1, -1, -1],
[0, 1, 2, -1, -1, -1]], dtype=np.int32)
vocab_size = prompt.max() + 1
bos_token = 0
memory_size = 2
params = None
def model_fn(graphs, max_graph_size, x):
return models.Graph2TextTransformer(
vocab_size=vocab_size,
emb_dim=8,
num_layers=2,
num_heads=4,
cutoffs=[],
gnn_embed_dim=8,
gnn_num_layers=2)(
graphs, max_graph_size, True, x,
is_training=False, cache_steps=memory_size)
graph_sampler = sampler.Graph2TextTransformerSampler(model_fn)
sample = graph_sampler.sample(params, prompt, graphs)
self.assertTrue((sample[:, 0] == bos_token).all())
self.assertTrue((sample != -1).all())
self.assertEqual(sample.shape, prompt.shape)
sample2 = graph_sampler.sample(params, prompt, graphs)
self.assertTrue((sample2[:, 0] == bos_token).all())
self.assertTrue((sample2 != -1).all())
self.assertEqual(sample2.shape, prompt.shape)
self.assertTrue((sample != sample2).any())
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | wikigraphs/wikigraphs/model/sampler_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Jax implementation of the Transformer-XL model."""
from typing import Dict, List, Optional, Tuple
import haiku as hk
from haiku import initializers as init
import jax
import jax.numpy as jnp
import jraph
import numpy as np
from wikigraphs.model import transformer_block
from wikigraphs.model.embedding import AdaptiveSoftmaxEmbedding
from wikigraphs.model.embedding import GraphEmbeddingModel
# For WikiText-103
DEFAULT_CUTOFFS = (20000 + 1, 40000 + 1, 200000 + 1)
def sequence_prediction_metrics(
logits: jnp.ndarray,
labels: jnp.ndarray,
mask: Optional[jnp.ndarray] = None
) -> Dict[str, float]:
"""Compute the metrics for sequence prediction.
Args:
logits: [B, T, V] array of logits.
labels: [B, T] array of labels.
mask: [B, T] array of binary masks, if provided.
Returns:
metrics: a dictionary of metrics.
"""
vocab_size = logits.shape[-1]
logps = jax.nn.log_softmax(logits)
labels_one_hot = hk.one_hot(labels, vocab_size)
class_logps = jnp.sum(logps * labels_one_hot, axis=-1)
prediction_correct = jnp.argmax(logits, axis=-1) == labels
if mask is not None:
masked_logps = mask * class_logps
total_count = jnp.sum(mask)
tokens_correct = jnp.sum(prediction_correct * mask)
seq_correct = jnp.all(
jnp.logical_or(prediction_correct, jnp.logical_not(mask)), axis=-1)
else:
masked_logps = class_logps
total_count = np.prod(class_logps.shape)
tokens_correct = jnp.sum(prediction_correct)
seq_correct = jnp.all(prediction_correct, axis=-1)
token_accuracy = tokens_correct.astype(jnp.float32) / total_count
seq_accuracy = jnp.mean(seq_correct)
log_probs = jnp.mean(jnp.sum(masked_logps, axis=-1))
total_loss = -jnp.sum(masked_logps)
loss = total_loss / total_count
return dict(
loss=loss,
total_loss=total_loss,
total_count=total_count,
token_accuracy=token_accuracy,
seq_accuracy=seq_accuracy,
log_probs=log_probs,
)
class TransformerXL(hk.Module):
"""TransformerXL language model with memory using GPT2 blocks.
TransformerXL: https://arxiv.org/abs/1901.02860
GPT-2: http://www.persagen.com/files/misc/radford2019language.pdf
"""
def __init__(self,
vocab_size: int = 256,
emb_dim: int = 256,
num_layers: int = 10,
num_heads: int = 8,
dropout_prob: float = 0.1,
dropout_attn_prob: float = 0.0,
self_att_init_scale: float = 0.02,
dense_init_scale: float = 0.02,
dense_dim: int = 2100,
cutoffs: List[int] = DEFAULT_CUTOFFS,
tail_shrink_factor: int = 1,
relative_pos_clamp_len: Optional[int] = None,
name: Optional[str] = None):
"""Initialize a TransformerXL.
Args:
vocab_size: the size of the vocabulary.
emb_dim: the dimensionality of the embeddings.
num_layers: number of transformer blocks.
num_heads: number of attention heads.
dropout_prob: dropout probability.
dropout_attn_prob: dropout probability of the attention module.
self_att_init_scale: the initialization scale of the VarianceScaling
used for the linear layer in the attention module.
dense_init_scale: the initialization scale of the VarianceScaling
used for the linear layer in the feedforward module.
dense_dim: feature size of the feedforward block.
cutoffs: the cutoff indices of the vocabulary used for the adaptive
softmax embedding.
tail_shrink_factor: how many times to shrink the hidden dimensionality
for low-frequency vocabulary after each cutoff in the adaptive softmax
embedding.
relative_pos_clamp_len: clamp length of the relative position embeddings.
name: Optional name for this Haiku module.
"""
super().__init__(name=name)
self._vocab_size = vocab_size
self._emb_dim = emb_dim
self._num_layers = num_layers
self._num_heads = num_heads
self._dropout_prob = dropout_prob
self._dropout_attn_prob = dropout_attn_prob
self._self_att_init_scale = self_att_init_scale
self._dense_init_scale = dense_init_scale
self._dense_dim = dense_dim
self._relative_pos_clamp_len = relative_pos_clamp_len
self._io_emb = AdaptiveSoftmaxEmbedding(
emb_dim, vocab_size, cutoffs=cutoffs,
tail_shrink_factor=tail_shrink_factor)
def __call__(self,
x: jnp.ndarray,
mask: Optional[jnp.ndarray] = None,
is_training: bool = True,
should_reset: Optional[jnp.ndarray] = None,
cache_steps: int = 0,
extra: Optional[jnp.ndarray] = None,
extra_mask: Optional[jnp.ndarray] = None) -> jnp.ndarray:
"""Computes the outputs of the TransformerXL.
Args:
x: [batch, timesteps]. Inputs at time step t.
mask: [batch, timesteps]. It indicates what tokens to be predicted. In
other words it corresponds to non-pad tokens in x_{t+1}.
is_training: whether the current stage is training or not.
should_reset: reset marker [batch, timesteps].
cache_steps: number of timesteps in the cache.
extra: if provided should be extra key-value input
[batch, extra_timesteps, in_dim].
extra_mask: if provided should be the mask for extra key-value input,
[batch, extra_timesteps].
Returns:
output: transformer output [batch, timesteps].
"""
if cache_steps == 0:
cache_steps = x.shape[1]
if should_reset is None:
should_reset = jnp.where(x == 1, 1, 0)
h = self._io_emb.embed_input(x)
if mask is not None:
attention_mask = mask[:, None, None, :]
else:
attention_mask = None
head_dim = self._emb_dim // self._num_heads
assert self._emb_dim % self._num_heads == 0, 'Head dim should be an int.'
# Biases for relative position embedding shared across all layers
r_w_bias = hk.get_parameter(
'r_w_bias', [1, 1, self._num_heads, head_dim],
init=init.RandomNormal(stddev=self._self_att_init_scale))
r_r_bias = hk.get_parameter(
'r_r_bias', [1, 1, self._num_heads, head_dim],
init=init.RandomNormal(stddev=self._self_att_init_scale))
for i in range(self._num_layers):
if mask is not None:
h *= mask[:, :, None]
h = transformer_block.GPT2Block(
r_w_bias=r_w_bias,
r_r_bias=r_r_bias,
causal=True,
dense_dim=self._dense_dim,
dropout_prob=self._dropout_prob,
dropout_attn_prob=self._dropout_attn_prob,
num_heads=self._num_heads,
self_att_init_scale=self._self_att_init_scale,
dense_init_scale=self._dense_init_scale,
relative_pos_clamp_len=self._relative_pos_clamp_len,
name='transformer_block_{}'.format(i),
)(
h, mask=attention_mask, is_training=is_training,
should_reset=should_reset, cache_steps=cache_steps,
extra=extra, extra_mask=extra_mask)
if mask is not None:
h *= mask[:, :, None]
return self._io_emb.embed_output(h)
def loss(self,
inputs: jnp.ndarray,
labels: jnp.ndarray,
mask: Optional[jnp.ndarray] = None,
is_training: bool = True,
should_reset: Optional[jnp.ndarray] = None,
cache_steps: int = 0,
extra: Optional[jnp.ndarray] = None,
extra_mask: Optional[jnp.ndarray] = None
) -> Tuple[float, Dict[str, float]]:
"""Computes the loss of the TransformerXL.
Args:
inputs: [batch, timesteps].
labels: [batch, timesteps].
mask: [batch, timesteps]. It indicates what tokens to be predicted. In
other words it corresponds to non-pad tokens in the `labels`.
is_training: whether the current stage is training or not.
should_reset: reset marker [batch, timesteps].
cache_steps: number of timesteps in the cache.
extra: if provided should be extra key-value input
[batch, extra_timesteps, in_dim].
extra_mask: if provided should be the mask for extra key-value input,
[batch, extra_timesteps].
Returns:
output: loss and a dict containing metrics.
"""
# [B, T, V]
logits = self(inputs, mask=mask, is_training=is_training,
should_reset=should_reset, cache_steps=cache_steps,
extra=extra, extra_mask=extra_mask)
metrics = sequence_prediction_metrics(logits, labels, mask)
return metrics['loss'], metrics
def repeat_rows(a: jnp.ndarray, repeats: int, out_length: int) -> jnp.ndarray:
"""Repeat rows of input tensor a.
Output is
[a[0],
a[0],
...
a[0], # A total of repeats[0] copies of a[0].
a[1],
a[1],
...,
a[1], # A total of repeats[1] copies of a[1].
...
a[n-1]], # A total of repeats[n-1] copies of a[n-1].
Args:
a: [n_rows, ...] input tensor.
repeats: [n_rows] int tensor, the number of repeats for each row.
out_length: number of rows in the output, it should be the same as
sum(repeats), provided to be static for jit.
Returns:
out: [out_length, ...] output tensor.
"""
a = jnp.asarray(a)
n = a.shape[0]
assert n == repeats.size
chunk_start = jnp.cumsum(repeats)
idx = jnp.sum(jnp.arange(out_length)[:, None] >= chunk_start[None, :],
axis=-1)
return a[idx]
def unpack_and_pad(
packed: jnp.ndarray,
split_sizes: jnp.ndarray,
pad_size: int,
pad_value: int = 0) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Unpack and pad tensors to a standard size.
Args:
packed: a [total_size, ...] tensor, which contains n individual tensors
concatenated along the 0-th axis.
split_sizes: size [n] int tensor, size of each individual tensor.
pad_size: size for each split to pad to.
pad_value: the value to use for padding.
Returns:
tensors: [n, pad_size, ...] tensor, tensors[i] is the i-th individual tensor
padded to pad_size length.
mask: [n, pad_size] mask tensor indicating which value is padded.
"""
in_shape = list(packed.shape)
total_size = in_shape[0]
n_splits = split_sizes.shape[0]
idx = jnp.arange(pad_size)
masks = split_sizes[:, None] > idx[None, :]
out_shape = in_shape[:]
out_shape[0] = n_splits * pad_size
out = jnp.full(out_shape, pad_value, dtype=packed.dtype)
# Index for the rows of `packed`:
# Define split_start[k] = sum_{i=0}^{k-1} split_sizes[i], which is the
# starting index of split k. So if split_start[k] <= i < split_start[k+1]
# then index belongs to split k. We therefore have:
# idx[i] = k * pad_size + i - split_start[k]
cumsum = jnp.concatenate([jnp.array([0], dtype=split_sizes.dtype),
jnp.cumsum(split_sizes)[:-1]])
idx = jnp.arange(total_size)
idx += repeat_rows(jnp.arange(n_splits), split_sizes, total_size) * pad_size
idx -= repeat_rows(cumsum, split_sizes, total_size)
out = out.at[idx].set(packed)
out = out.reshape([n_splits, pad_size] + out_shape[1:])
return out, masks
class Graph2TextTransformer(hk.Module):
"""A graph2text TransformerXL model.
It embeds the graph with a simple graph neural network model, and passes the
graph embeddings to the TransformerXL model, which are presented as the extra
inputs to attend to in addition to the text embeddings inputs.
"""
def __init__(self,
*transformer_args,
gnn_embed_dim: int = 128,
gnn_num_layers: int = 5,
gnn_layer_norm: bool = False,
name: Optional[str] = None,
**transformer_kwargs):
"""Constructor.
Args:
*transformer_args: args for the transformer module.
gnn_embed_dim: node embedding size.
gnn_num_layers: number of message passing layers to use.
gnn_layer_norm: whether to use layer norm in the GNN.
name: optional name for this module.
**transformer_kwargs: kwargs for the transformer module.
"""
super().__init__(name=name)
self._transformer = TransformerXL(*transformer_args, **transformer_kwargs)
self._gnn = GraphEmbeddingModel(
embed_dim=gnn_embed_dim,
num_layers=gnn_num_layers,
use_layer_norm=gnn_layer_norm)
def _encode_graphs(self,
graphs: jraph.GraphsTuple,
pad_n_nodes: Optional[int] = None,
padded: bool = False) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Encode graphs so that it can be used in the transformer.
Args:
graphs: a graph structured using jraph.GraphsTuple.
pad_n_nodes: size for each node to pad to.
padded: Whether to pad each graph to the same number of nodes.
Returns:
tensors: unpacked and padded graph nodes.
mask: mask tensor indicating which value is padded.
"""
graphs = self._gnn(graphs)
if pad_n_nodes is None:
pad_n_nodes = graphs.n_node.max()
out, mask = unpack_and_pad(graphs.nodes, graphs.n_node, pad_n_nodes)
if padded:
# Remove the padding graph from the batch
return out[:-1], mask[:-1]
else:
return out, mask
def __call__(self,
graphs: jraph.GraphsTuple,
pad_n_nodes: int,
batch_padded: bool,
*args, **kwargs):
"""Computes the outputs of the graph2text TransformerXL.
Args:
graphs: a graph structured using graph_net.Graph.
pad_n_nodes: size for each node to pad to.
batch_padded: whether the graph batch is padded or not.
*args: args to the TransformerXL model.
**kwargs: kwargs to the TransformerXL model.
Returns:
output: transformer output [batch, timesteps].
"""
extra, extra_mask = self._encode_graphs(graphs, pad_n_nodes, batch_padded)
return self._transformer(
*args, extra=extra, extra_mask=extra_mask, **kwargs)
def loss(self,
graphs: jraph.GraphsTuple,
pad_n_nodes: int,
batch_padded: bool,
inputs: jnp.ndarray,
labels: jnp.ndarray,
mask: jnp.ndarray,
**kwargs):
"""Computes the loss of the graph2text TransformerXL.
Args:
graphs: a graph structured using graph_net.Graph.
pad_n_nodes: size for each node to pad to.
batch_padded: whether the graph batch is padded or not.
inputs: [batch, timesteps].
labels: [batch, timesteps].
mask: [batch, timesteps].
**kwargs: kwargs to the TransformerXL model.
Returns:
output: loss and a dict containing metrics.
"""
extra, extra_mask = self._encode_graphs(graphs, pad_n_nodes, batch_padded)
return self._transformer.loss(
inputs, labels, mask, extra=extra, extra_mask=extra_mask, **kwargs)
class Bow2TextTransformer(hk.Module):
"""A bag-of-words to text TransformerXL model.
This model embeds bag-of-words into vectors and the text transformer can then
condition on these vectors to generate text.
More specifically, the bow embedded vectors will be treated as extra tokens
that the transformer can attend to, in addition to the text data it is already
modelling.
To make the model more expressive, we allow each bag-of-words to be embedded
into potentially more than 1 vectors, and the transformer will treat them as
more than 1 extra tokens correspondingly.
"""
def __init__(self,
*transformer_args,
bow_embedding_dim: int = 256,
bow_n_tokens: int = 1,
name: Optional[str] = None,
**transformer_kwargs):
"""Constructor.
Args:
*transformer_args: the TransformerXL constructor arguments.
bow_embedding_dim: dimensionality for the bag-of-words embeddings.
bow_n_tokens: number of extra tokens to create for the bag-of-words
representations.
name: optional name for this module.
**transformer_kwargs: kwargs for the transformer module.
"""
super().__init__(name=name)
self._transformer = TransformerXL(*transformer_args, **transformer_kwargs)
self._bow_embedding_dim = bow_embedding_dim
self._bow_n_tokens = bow_n_tokens
def _encode_bow(self, bow: jnp.ndarray) -> jnp.ndarray:
"""Encode the bag-of-words into tensors that can be used by the transormer.
Args:
bow: a [batch_size, bow_vocab_size] tensor, each row is a bow vector.
Returns:
embeddings: [batch_size, bow_n_tokens, bow_embedding_dim] tensor.
"""
batch_size = bow.shape[0]
bow = bow.astype(jnp.float32)
# [B, D * n]
embeddings = hk.Linear(self._bow_embedding_dim * self._bow_n_tokens)(bow)
embeddings = transformer_block.layer_norm(jax.nn.gelu(embeddings))
return jnp.reshape(
embeddings, [batch_size, self._bow_n_tokens, self._bow_embedding_dim])
def __call__(self, bow: jnp.ndarray, *args, **kwargs):
"""Compute the output of this bag-of-words-to-text transformer model.
Args:
bow: a [batch_size, bow_vocab_size] tensor, each row is a bow vector.
*args: args to the TransformerXL model.
**kwargs: kwargs to the TransformerXL model.
Returns:
output: transformer output [batch, timesteps].
"""
return self._transformer(*args, extra=self._encode_bow(bow), **kwargs)
def loss(self, bow: jnp.ndarray, *args, **kwargs):
"""Computes the loss of the graph2text TransformerXL.
Args:
bow: a [batch_size, bow_vocab_size] tensor, each row is a bow vector.
*args: args to the TransformerXL model.
**kwargs: kwargs to the TransformerXL model.
Returns:
output: loss and a dict containing metrics.
"""
return self._transformer.loss(*args, extra=self._encode_bow(bow), **kwargs)
| deepmind-research-master | wikigraphs/wikigraphs/model/transformer.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Tests for wikigraphs.model.graph_net."""
from absl import logging
from absl.testing import absltest
import haiku as hk
import jax
import jax.numpy as jnp
import jraph
import numpy as np
import optax
from wikigraphs.model import graph_net as gn
class GraphNetTest(absltest.TestCase):
def test_node_classification(self):
# If node has more than 2 neighbors --> class 1, otherwise class 0.
# Graph structure:
# 1 4
# | \ / |
# | 0 - 3 |
# | / \ |
# 2 5
edges = np.array([
[0, 1],
[1, 2],
[2, 0],
[0, 3],
[3, 4],
[4, 5],
[5, 3],
], dtype=np.int32)
n_node = edges.max() + 1
n_edge = edges.shape[0]
g = jraph.GraphsTuple(
senders=edges[:, 0],
receivers=edges[:, 1],
edges=np.ones((edges.shape[0], 1), dtype=np.float32),
nodes=np.ones((n_node, 1), dtype=np.float32),
n_node=np.array([n_node], dtype=np.int32),
n_edge=np.array([n_edge], dtype=np.int32),
globals=None)
g = gn.add_reverse_edges(g)
targets = np.array([1, 0, 0, 1, 0, 0], dtype=np.int32)
n_classes = 2
def forward(graph, targets):
model = gn.SimpleGraphNet(num_layers=5, layer_norm=False)
graph = model(graph)
nodes = graph.nodes
logits = hk.Linear(n_classes)(nodes)
pred = logits.argmax(axis=-1)
accuracy = (pred == targets).mean()
targets = jax.nn.one_hot(targets, n_classes, dtype=jnp.float32)
return -jnp.mean(jnp.sum(
jax.nn.log_softmax(logits, axis=-1) * targets, axis=-1)), accuracy
init_fn, apply_fn = hk.without_apply_rng(hk.transform(forward))
rng = hk.PRNGSequence(0)
params = init_fn(next(rng), g, targets)
optimizer = optax.chain(
optax.scale_by_adam(),
optax.scale(-1e-3))
opt_state = optimizer.init(params)
apply_fn = jax.jit(apply_fn)
for i in range(500):
(loss, acc), grad = jax.value_and_grad(apply_fn,
has_aux=True)(params, g, targets)
updates, opt_state = optimizer.update(grad, opt_state, params)
params = optax.apply_updates(params, updates)
if (i + 1) % 100 == 0:
logging.info('Step %d, loss %.8f, accuracy %.4f', i + 1, loss, acc)
self.assertLess(loss, 0.01)
self.assertEqual(acc, 1.0)
def test_pad_size(self):
self.assertEqual(gn.pad_size(1), 1)
self.assertEqual(gn.pad_size(5), 8)
self.assertEqual(gn.pad_size(7), 8)
self.assertEqual(gn.pad_size(101), 128)
def test_pad_graphs(self):
# No new edges to add
graphs = jraph.GraphsTuple(
nodes=np.arange(6)[:, None],
edges=np.arange(4)[:, None],
senders=np.array([0, 2, 3, 4]),
receivers=np.array([1, 3, 4, 5]),
n_node=np.array([2, 4]),
n_edge=np.array([1, 3]),
globals=None)
padded = gn.pad_graphs(graphs)
np.testing.assert_array_equal(
padded.nodes,
np.array([0, 1, 2, 3, 4, 5, 0, 0])[:, None])
np.testing.assert_array_equal(padded.edges, graphs.edges)
np.testing.assert_array_equal(padded.senders, graphs.senders)
np.testing.assert_array_equal(padded.receivers, graphs.receivers)
np.testing.assert_array_equal(padded.n_node, [2, 4, 2])
np.testing.assert_array_equal(padded.n_edge, [1, 3, 0])
# Add just a single default node
graphs = jraph.GraphsTuple(
nodes=np.arange(7)[:, None],
edges=np.arange(5)[:, None],
senders=np.array([0, 2, 3, 5, 6]),
receivers=np.array([1, 3, 4, 6, 5]),
n_node=np.array([2, 3, 2]),
n_edge=np.array([1, 2, 2]),
globals=None)
padded = gn.pad_graphs(graphs)
np.testing.assert_array_equal(
padded.nodes,
np.array([0, 1, 2, 3, 4, 5, 6, 0])[:, None])
np.testing.assert_array_equal(
padded.edges,
np.array([0, 1, 2, 3, 4, 0, 0, 0])[:, None])
np.testing.assert_array_equal(
padded.senders,
[0, 2, 3, 5, 6, 7, 7, 7])
np.testing.assert_array_equal(
padded.receivers,
[1, 3, 4, 6, 5, 7, 7, 7])
np.testing.assert_array_equal(
padded.n_node, [2, 3, 2, 1])
np.testing.assert_array_equal(
padded.n_edge, [1, 2, 2, 3])
# Num. nodes is a power of 2 but we still pad at least one extra node
graphs = jraph.GraphsTuple(
nodes=np.arange(8)[:, None],
edges=np.arange(5)[:, None],
senders=np.array([0, 2, 3, 5, 6]),
receivers=np.array([1, 3, 4, 6, 7]),
n_node=np.array([2, 3, 3]),
n_edge=np.array([1, 2, 2]),
globals=None)
padded = gn.pad_graphs(graphs)
np.testing.assert_array_equal(
padded.nodes,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0])[:, None])
np.testing.assert_array_equal(
padded.edges,
np.array([0, 1, 2, 3, 4, 0, 0, 0])[:, None])
np.testing.assert_array_equal(
padded.senders,
[0, 2, 3, 5, 6, 8, 8, 8])
np.testing.assert_array_equal(
padded.receivers,
[1, 3, 4, 6, 7, 8, 8, 8])
np.testing.assert_array_equal(
padded.n_node, [2, 3, 3, 8])
np.testing.assert_array_equal(
padded.n_edge, [1, 2, 2, 3])
def test_batch_graphs_by_device(self):
# batch 4 graphs for 2 devices
num_devices = 2
graphs = [
jraph.GraphsTuple(
nodes=np.arange(2)[:, None],
edges=np.arange(2)[:, None],
senders=np.array([0, 1]),
receivers=np.array([1, 0]),
n_node=np.array([2]),
n_edge=np.array([2]),
globals=None),
jraph.GraphsTuple(
nodes=np.arange(3)[:, None],
edges=np.arange(1)[:, None],
senders=np.array([2]),
receivers=np.array([0]),
n_node=np.array([3]),
n_edge=np.array([1]),
globals=None),
jraph.GraphsTuple(
nodes=np.arange(4)[:, None],
edges=np.arange(2)[:, None],
senders=np.array([1, 0]),
receivers=np.array([2, 3]),
n_node=np.array([4]),
n_edge=np.array([2]),
globals=None),
jraph.GraphsTuple(
nodes=np.arange(5)[:, None],
edges=np.arange(3)[:, None],
senders=np.array([2, 1, 3]),
receivers=np.array([1, 4, 0]),
n_node=np.array([5]),
n_edge=np.array([3]),
globals=None),
]
batched = gn.batch_graphs_by_device(graphs, num_devices)
self.assertLen(batched, num_devices)
np.testing.assert_array_equal(
batched[0].nodes,
np.array([0, 1, 0, 1, 2])[:, None])
np.testing.assert_array_equal(
batched[0].edges,
np.array([0, 1, 0])[:, None])
np.testing.assert_array_equal(
batched[0].senders,
np.array([0, 1, 4]))
np.testing.assert_array_equal(
batched[0].receivers,
np.array([1, 0, 2]))
np.testing.assert_array_equal(
batched[0].n_node,
np.array([2, 3]))
np.testing.assert_array_equal(
batched[0].n_edge,
np.array([2, 1]))
np.testing.assert_array_equal(
batched[1].nodes,
np.array([0, 1, 2, 3, 0, 1, 2, 3, 4])[:, None])
np.testing.assert_array_equal(
batched[1].edges,
np.array([0, 1, 0, 1, 2])[:, None])
np.testing.assert_array_equal(
batched[1].senders,
np.array([1, 0, 6, 5, 7]))
np.testing.assert_array_equal(
batched[1].receivers,
np.array([2, 3, 5, 8, 4]))
np.testing.assert_array_equal(
batched[1].n_node,
np.array([4, 5]))
np.testing.assert_array_equal(
batched[1].n_edge,
np.array([2, 3]))
def test_pad_graphs_by_device(self):
graphs = [
jraph.GraphsTuple(
nodes=np.arange(5)[:, None], # pad to 8
edges=np.arange(3)[:, None], # pad to 4
senders=np.array([0, 1, 4]), # pad to 4
receivers=np.array([1, 0, 2]), # pad to 4
n_node=np.array([2, 3]), # pad to 3
n_edge=np.array([2, 1]), # pad to 3
globals=None),
jraph.GraphsTuple(
nodes=np.arange(4)[:, None], # pad to 8
edges=np.arange(1)[:, None], # pad to 4
senders=np.array([1]), # pad to 4
receivers=np.array([0]), # pad to 4
n_node=np.array([2, 2]), # pad to 3
n_edge=np.array([1, 0]), # pad to 3
globals=None),
]
padded = gn.pad_graphs_by_device(graphs)
np.testing.assert_array_equal(
padded.nodes,
np.array([0, 1, 2, 3, 4, 0, 0, 0,
0, 1, 2, 3, 0, 0, 0, 0])[:, None])
np.testing.assert_array_equal(
padded.edges,
np.array([0, 1, 2, 0, 0, 0, 0, 0])[:, None])
np.testing.assert_array_equal(
padded.senders,
np.array([0, 1, 4, 5, 1, 4, 4, 4]))
np.testing.assert_array_equal(
padded.receivers,
np.array([1, 0, 2, 5, 0, 4, 4, 4]))
np.testing.assert_array_equal(
padded.n_node,
np.array([2, 3, 3, 2, 2, 4]))
np.testing.assert_array_equal(
padded.n_edge,
np.array([2, 1, 1, 1, 0, 3]))
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | wikigraphs/wikigraphs/model/graph_net_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Samplers for the graph2text transformers."""
import abc
from typing import Any, Optional, Mapping
import haiku as hk
import jax
import jax.numpy as jnp
import jraph
import numpy as np
from wikigraphs.model import graph_net as gn
class BaseSampler:
"""Base class for transformer samplers."""
def __init__(self,
model_fn,
temperature: float = 1.0,
device: Optional[Any] = None,
rng: Optional[np.ndarray] = None):
"""Constructor.
Args:
model_fn: a transformer language model defined in model.transformer.
temperature: sampling temperature.
device: the sampler will run on this device if provided.
rng: random number generator.
"""
self._temperature = temperature
self._device = device or jax.local_devices()[0]
init_fn, apply_fn = hk.transform_with_state(model_fn)
if rng is None:
rng = jax.random.PRNGKey(np.random.randint(2**32))
rng = jax.random.fold_in(rng, jax.host_id())
self._rng = rng
self._init_state = None
self._jit_model(init_fn, apply_fn)
def _jit_model(self, init_fn, apply_fn):
"""Jit the `init_fn` and `apply_fn`."""
pass
@abc.abstractmethod
def _sample(self,
params: Mapping[str, Any],
state: Mapping[str, Any],
rng: jnp.ndarray,
x: jnp.ndarray,
**kwargs) -> np.ndarray:
"""Generate samples.
Args:
params: parameters of the transformer.
state: state of the transformer.
rng: random number generator.
x: a prompt of shape [batch_size, sample_len], in which an entry of -1
indicates it will be generate at that place. Otherwise it acts as the
prompt.
**kwargs: additional inputs.
Returns:
output: [batch_size, sample_len] tensor, the generated sequence.
"""
@abc.abstractmethod
def sample(self,
params: Mapping[str, Any],
x: jnp.ndarray,
**kwargs) -> jnp.ndarray:
"""Generate samples based on the given parameters and prompts.
Args:
params: parameters of the transformer.
x: a prompt of shape [batch_size, sample_len], in which an entry of -1
indicates it will be generate at that place. Otherwise it acts as the
prompt.
**kwargs: additional inputs.
Returns:
output: the generated sequence.
"""
class TransformerXLSampler(BaseSampler):
"""Sampling from the TransformerXL model."""
def _jit_model(self, init_fn, apply_fn):
"""Jit `init_fn` and `apply_fn`, the latter is used in `self._sample`."""
self._init_fn = jax.jit(init_fn, device=self._device)
self._apply_fn = apply_fn
self._sample_fn = jax.jit(self._sample, device=self._device)
def _sample(self,
params: Mapping[str, Any],
state: Mapping[str, Any],
rng: jnp.ndarray,
x: jnp.ndarray) -> np.ndarray:
"""Generate unconditional samples.
Args:
params: parameters of the transformer.
state: state of the transformer.
rng: random number generator.
x: a prompt of shape [batch_size, sample_len], in which an entry of -1
indicates it will be generate at that place. Otherwise it acts as the
prompt.
Returns:
output: [batch_size, sample_len] tensor, the generated sequence.
"""
batch_size, sample_len = x.shape
def one_step(params, state, rng, i, x):
step_sample = jax.lax.dynamic_slice(x, [0, i], [batch_size, 1])
rng, rng_ = jax.random.split(rng)
# step_sample shape is [batch_size, 1].
logits, state = self._apply_fn(params, state, rng_, step_sample)
rng, rng_ = jax.random.split(rng)
step_sample = jax.random.categorical(rng_, logits / self._temperature)
update = jnp.where(x[:, i + 1] < 0, step_sample[:, 0], x[:, i + 1])[:,
None]
x = jax.lax.dynamic_update_slice(x, update, [0, i + 1])
return state, rng, x
def loop_body(i, data):
state, rng, x = data
return one_step(params, state, rng, i, x)
_, _, x = jax.lax.fori_loop(0, sample_len - 1, loop_body,
(state, rng, x))
return x
def sample(self,
params: Mapping[str, Any],
x: jnp.ndarray) -> jnp.ndarray:
"""Generate samples based on the given graphs and parameters.
Args:
params: parameters of the transformer.
x: a prompt of shape [batch_size, sample_len], in which an entry of -1
indicates it will be generate at that place. Otherwise it acts as the
prompt.
Returns:
output: the generated sequence.
"""
if self._init_state is None:
self._rng, rng = jax.random.split(self._rng)
self._init_params, self._init_state = self._init_fn(rng, x[:, :1])
if params is None:
params = self._init_params
self._rng, rng = jax.random.split(self._rng)
sample = self._sample_fn(params, self._init_state, rng, x)
return sample
class Bow2TextTransformerSampler(BaseSampler):
"""Sampling from the TransformerXL model."""
def _jit_model(self, init_fn, apply_fn):
"""Jit `init_fn` and `apply_fn`, the latter is used in `self._sample`."""
self._init_fn = jax.jit(init_fn, device=self._device)
self._apply_fn = apply_fn
self._sample_fn = jax.jit(self._sample, device=self._device)
def _sample(self,
params: Mapping[str, Any],
state: Mapping[str, Any],
rng: jnp.ndarray,
bow: jnp.ndarray,
x: jnp.ndarray) -> np.ndarray:
"""Generate samples conditioned on the bag-of-words of the graph.
Args:
params: parameters of the transformer.
state: state of the transformer.
rng: random number generator.
bow: a [batch_size, bow_vocab_size] tensor, each row is a bow vector.
x: a prompt of shape [batch_size, sample_len], in which an entry of -1
indicates it will be generate at that place. Otherwise it acts as the
prompt.
Returns:
output: [batch_size, sample_len] tensor, the generated sequence.
"""
batch_size, sample_len = x.shape
def one_step(params, state, rng, i, x):
step_sample = jax.lax.dynamic_slice(x, [0, i], [batch_size, 1])
rng, rng_ = jax.random.split(rng)
# step_sample shape is [batch_size, 1].
logits, state = self._apply_fn(params, state, rng_, bow, step_sample)
rng, rng_ = jax.random.split(rng)
step_sample = jax.random.categorical(rng_, logits / self._temperature)
update = jnp.where(x[:, i + 1] < 0, step_sample[:, 0], x[:, i + 1])[:,
None]
x = jax.lax.dynamic_update_slice(x, update, [0, i + 1])
return state, rng, x
def loop_body(i, data):
state, rng, x = data
return one_step(params, state, rng, i, x)
_, _, x = jax.lax.fori_loop(0, sample_len - 1, loop_body,
(state, rng, x))
return x
def sample(self,
params: Mapping[str, Any],
x: jnp.ndarray,
bow: jnp.ndarray) -> jnp.ndarray:
"""Generate samples based on the given graphs and parameters.
Args:
params: parameters of the transformer.
x: a prompt of shape [batch_size, sample_len], in which an entry of -1
indicates it will be generate at that place. Otherwise it acts as the
prompt.
bow: a [batch_size, bow_vocab_size] tensor, each row is a bow vector.
Returns:
output: the generated sequence.
"""
if self._init_state is None:
self._rng, rng = jax.random.split(self._rng)
self._init_params, self._init_state = self._init_fn(rng, bow, x[:, :1])
if params is None:
params = self._init_params
self._rng, rng = jax.random.split(self._rng)
sample = self._sample_fn(params, self._init_state, rng, bow, x)
return sample
class Graph2TextTransformerSampler(BaseSampler):
"""Sampling from the Graph2Text TransformerXL model."""
def _jit_model(self, init_fn, apply_fn):
"""Jit `init_fn` and `apply_fn`, the latter is used in `self._sample`."""
# `pad_n_nodes` is set as a static argument.
self._init_fn = jax.jit(init_fn, device=self._device, static_argnums=2)
self._apply_fn = apply_fn
self._sample_fn = jax.jit(self._sample, device=self._device,
static_argnums=4)
def _sample(self,
params: Mapping[str, Any],
state: Mapping[str, Any],
rng: jnp.ndarray,
graphs: jraph.GraphsTuple,
pad_n_nodes: int,
x: jnp.ndarray) -> np.ndarray:
"""Generate samples conditioned on the bag-of-words reprensation of graph.
Args:
params: parameters of the transformer.
state: state of the transformer.
rng: random number generator.
graphs: a graph structured using graph_net.Graph.
pad_n_nodes: size for each node to pad to.
x: a prompt of shape [batch_size, sample_len], in which an entry of -1
indicates it will be generate at that place. Otherwise it acts as the
prompt.
Returns:
output: [batch_size, sample_len] tensor, the generated sequence.
"""
batch_size, sample_len = x.shape
def one_step(params, state, rng, i, x):
step_sample = jax.lax.dynamic_slice(x, [0, i], [batch_size, 1])
rng, rng_ = jax.random.split(rng)
# step_sample shape is [batch_size, 1].
logits, state = self._apply_fn(
params, state, rng_, graphs, pad_n_nodes, step_sample)
rng, rng_ = jax.random.split(rng)
step_sample = jax.random.categorical(rng_, logits / self._temperature)
update = jnp.where(x[:, i + 1] < 0, step_sample[:, 0], x[:, i + 1])[:,
None]
x = jax.lax.dynamic_update_slice(x, update, [0, i + 1])
return state, rng, x
def loop_body(i, data):
state, rng, x = data
return one_step(params, state, rng, i, x)
_, _, x = jax.lax.fori_loop(0, sample_len - 1, loop_body,
(state, rng, x))
return x
def sample(self,
params: Mapping[str, Any],
x: jnp.ndarray,
graphs: jraph.GraphsTuple,
pad: bool = True) -> jnp.ndarray:
"""Generate samples based on the given graphs and parameters.
Args:
params: parameters of the transformer.
x: a prompt of shape [batch_size, sample_len], in which an entry of -1
indicates it will be generate at that place. Otherwise it acts as the
prompt.
graphs: a graph structured using graph_net.Graph.
pad: whether to pad the graph nodes and edges or not.
Returns:
output: the generated sequence.
"""
if pad:
graphs = gn.pad_graphs(graphs)
max_graph_size = gn.pad_size(graphs.n_node.max())
else:
max_graph_size = graphs.n_node.max()
if self._init_state is None:
self._rng, rng = jax.random.split(self._rng)
self._init_params, self._init_state = self._init_fn(
rng, graphs, max_graph_size, x[:, :1])
if params is None:
params = self._init_params
self._rng, rng = jax.random.split(self._rng)
sample = self._sample_fn(
params, self._init_state, rng, graphs, max_graph_size, x)
return sample
| deepmind-research-master | wikigraphs/wikigraphs/model/sampler.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Tests for wikigraphs.data.wikitext."""
from absl.testing import absltest
from wikigraphs.data import tokenizers
from wikigraphs.data import wikitext
WIKITEXT_ROOT = '/tmp/data/wikitext-103'
WIKITEXT_VOCAB_FILE = '/tmp/data/wikitext-vocab.csv'
class WikitextTest(absltest.TestCase):
def test_wikitext_size(self):
valid_set = wikitext.RawDataset(
subset='valid', shuffle_data=False, data_dir=WIKITEXT_ROOT)
n_tokens = 0
n_articles = 0
for article in valid_set:
n_tokens += len([t for t in article.text.split(' ') if t])
n_articles += 1
# Dataset size must match published values.
self.assertEqual(n_tokens, 217646)
self.assertEqual(n_articles, 60)
def test_wikitext_dataset_size(self):
tokenizer = tokenizers.WordTokenizer(vocab_file=WIKITEXT_VOCAB_FILE)
batch_size = 4
timesteps = 256
valid_set = wikitext.WikitextDataset(
tokenizer=tokenizer, batch_size=batch_size, timesteps=timesteps,
subset='valid', shuffle_data=False, repeat=False,
data_dir=WIKITEXT_ROOT)
n_tokens = 0
n_bos = 0
for batch in valid_set:
n_tokens += (batch['obs'] != tokenizer.pad_token()).sum()
n_bos += (batch['obs'] == tokenizer.bos_token()).sum()
self.assertEqual(
batch['obs'].shape, (batch_size, timesteps))
self.assertEqual(
batch['target'].shape, (batch_size, timesteps))
self.assertEqual(
batch['should_reset'].shape, (batch_size, timesteps))
self.assertEqual(
batch['mask'].shape, (batch_size, timesteps))
n_tokens -= n_bos
self.assertEqual(n_tokens, 217646)
self.assertEqual(n_bos, 60)
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | wikigraphs/wikigraphs/data/wikitext_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Tests for wikigraphs.data.tokenizers."""
from absl.testing import absltest
from wikigraphs.data import tokenizers
WIKITEXT_VOCAB_FILE = '/tmp/data/wikitext-vocab.csv'
GRAPH_VOCAB_FILE = '/tmp/data/graph-vocab.csv'
class TokenizerTest(absltest.TestCase):
def test_tokenizer(self):
tokenizer = tokenizers.WordTokenizer(vocab_file=WIKITEXT_VOCAB_FILE)
# Vocab size must match published number.
self.assertEqual(tokenizer.vocab_size, 267735 + 2)
s = 'Hello world ! \n How are you ?'
encoded = tokenizer.encode(s, prepend_bos=True)
self.assertEqual(encoded.shape, (9,))
decoded = tokenizer.decode(encoded)
self.assertEqual(s, decoded)
def test_graph_tokenizer_tokenize_nodes_edges(self):
self.assertEqual(
tokenizers.GraphTokenizer.split_node(
'"Hello, how are you?"'),
['hello', ',', 'how', 'are', 'you', '?'])
self.assertEqual(
tokenizers.GraphTokenizer.split_node(
'"This building was built in 1998."'),
['this', 'building', 'was', 'built', 'in', '<number>', '.'])
self.assertEqual(
tokenizers.GraphTokenizer.split_node('ns/m.030ssw'),
['<entity>'])
self.assertEqual(
tokenizers.GraphTokenizer.split_edge('ns/common.topic.description'),
['common', 'topic', 'description'])
self.assertEqual(
tokenizers.GraphTokenizer.split_edge('ns/type.object.name'),
['type', 'object', 'name'])
def test_graph_tokenizer_vocab(self):
tokenizer = tokenizers.GraphTokenizer(vocab_file=GRAPH_VOCAB_FILE)
self.assertEqual(tokenizer.vocab_size, 31087 + 3)
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | wikigraphs/wikigraphs/data/tokenizers_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Some tools for processing data."""
from typing import Any, Iterator
from absl import logging
import numpy as np
def pad_to(x: np.array, size: int, axis: int = -1, pad_value: float = 0.):
"""Pad an array to the specified size along a specified axis."""
if x.shape[axis] > size:
raise ValueError(f'Data item has size {x.shape[axis]} larger than {size}'
f' in axis {axis} already.')
elif x.shape[axis] == size:
return x
else:
pad_amount = [(0, 0)] * x.ndim
pad_amount[axis] = (0, size - x.shape[axis])
return np.pad(x, pad_amount, mode='constant', constant_values=pad_value)
def dynamic_batch(
iterable: Iterator[Any],
batch_size: int,
timesteps: int,
return_incomplete_batch: bool = False,
pad: bool = False,
pad_value: float = 0.) -> Iterator[Any]:
"""Batches up values in iterable to [batch_size, timesteps].
This function takes items from the iterable and pack them into the batch.
Sequence #i in the batch is a continuation from the sequence #i in the
previous batch, i.e. it will start from where the previous sequence left over.
When an item is finished, a new item is taken from the iterable to append to
the sequence and fill the batch.
This function is designed for language modeling, where the input and the
target sequences are offset by one. We take that into account by making sure
neighboring batches have one token overlap.
Example:
If the iterable contains [[0, 1, 2], [10, 11, 12, 13, 14], [20, 21, 22]] and
batch size is 2, timesteps is 3, then the first batch would be:
[[0, 1, 2],
[10, 11, 12]]
then the second batch:
[[2, 20, 21], # seq 0 finished, continuing from seq 2
[12, 13, 14]]
Note the overlap of 1 token between these two batches, and the continuation
of sequences across batches.
Args:
iterable: the iterable that yields sequences of integer token IDs.
batch_size: number of examples in a batch.
timesteps: length of each sequence in a batch.
return_incomplete_batch: if True return the incomplete batches, which
typically appears at the end of the dataset.
pad: set to True to pad the incomplete batches.
pad_value: the value to use for padding.
Yields:
batches: where batches['obs'] are the observations of size
[batch_size, timesteps], and batches['should_reset'] is a 0/1 mask of
the same size that marks sequence boundaries, e.g. the entries in this
mask are all 0 except at locations where a new sequence is starting.
"""
if return_incomplete_batch and not pad:
raise ValueError(
f'If return_incomplete_batch, then pad must be True, currently {pad}.')
iterator = iter(iterable)
elems = []
for _ in range(batch_size):
item = next(iterator)
elems.append(item)
start_batch = [True] * batch_size
iter_finished = False
loaded_finished = False
while not (iter_finished and loaded_finished):
batch = []
for i in range(batch_size):
# should_reset value is 1 when a new sequence begins.
# [old[-3], old[-2], old[-1], new[0], new[1], new[2]]
# [0, 0, 0, 1, 0, 0]
should_reset = np.zeros(timesteps, np.float32)
if start_batch[i]:
should_reset[0] = 1
# Pack new examples in the sequence until they go beyond the required
# timesteps.
while len(elems[i]) < timesteps:
should_reset[len(elems[i])] = 1
try:
item = next(iterator)
except StopIteration:
iter_finished = True
break
elems[i] = np.concatenate([elems[i], item])
batch.append(dict(obs=elems[i][:timesteps], should_reset=should_reset))
# Shift and make sure we have a 1 token overlap.
elems[i] = elems[i][timesteps - 1:]
# Since the last token is shifted to be the first token of the next batch,
# We need to make sure reset is handled properly as well.
start_batch[i] = (should_reset[-1] == 1)
# If any loaded data is not yet consumed in the output we should keep
# generating.
loaded_finished = all(e.size == 0 for e in elems)
if not return_incomplete_batch:
elem_len = len(batch[0]['obs'])
if (elem_len != timesteps or
not all(len(x['obs']) == elem_len for x in batch[1:])):
logging.info('Dropping the (last?) incomplete batch.')
break
if pad:
for x in batch:
x['obs'] = pad_to(x['obs'], timesteps, axis=0, pad_value=pad_value)
yield dict(
obs=np.stack([x['obs'] for x in batch], axis=0),
should_reset=np.stack([x['should_reset'] for x in batch], axis=0))
def batch_graph_text_pairs(
iterable: Iterator[Any],
batch_size: int,
timesteps: int,
pad_value: float = 0.,
seq_and_graph_id: bool = False) -> Iterator[Any]:
"""Batch graph and text pairs.
This method pairs text with graphs, each text sequence is split into chunks
(with an overlap of 1) of size `timesteps`, and the graph associated with the
text is used and associated with each chunk as well. The last incomplete
chunk of each text sequence is padded with the `pad_value`.
Args:
iterable: Iterable that returns (graph, sequence) pairs, graph can be
anything, and sequence is a list of tokenized token IDs.
batch_size: Number of examples in a batch.
timesteps: Window size for the sequences.
pad_value: Value to use for padding.
seq_and_graph_id: whether the `iterable` contains `seq_id` and `graph_id`.
Yields:
batch: a batch of text sequence paired with graphs.
"""
iterator = iter(iterable)
seqs = [None] * batch_size
graphs = [None] * batch_size
graph_ids = [None] * batch_size
seq_ids = [None] * batch_size
iter_finished = False
loaded_finished = False
while not (iter_finished and loaded_finished):
batch = []
for idx in range(batch_size):
should_reset = np.zeros(timesteps, np.float32)
# pylint: disable=g-explicit-length-test
if seqs[idx] is None or len(seqs[idx]) == 0:
should_reset[0] = 1
# One sequence exhausted, get the next example.
try:
if seq_and_graph_id:
(graph, seq), (graph_id, seq_id) = next(iterator)
graph_ids[idx] = graph_id
seq_ids[idx] = seq_id
else:
graph, seq = next(iterator)
seqs[idx] = seq
graphs[idx] = graph
except StopIteration:
iter_finished = True
seqs[idx] = np.array([pad_value], dtype=np.int32)
graphs[idx] = None
example = dict(obs=seqs[idx][:timesteps], graph=graphs[idx],
should_reset=should_reset)
if seq_and_graph_id:
example['seq_id'] = seq_ids[idx]
example['graph_id'] = graph_ids[idx]
batch.append(example)
# Make sure that there is an overlap, as we generate targets by shifting
# the tensor by 1 timestep. So the next element should be shifted by
# `timesteps - 1' timesteps.
seqs[idx] = seqs[idx][timesteps - 1:]
# Make sure all loaded data are consumed in the output
loaded_finished = all(s.size == 0 for s in seqs)
# Also check for the last batch to avoid returning a fully empty batch
if iter_finished and all([np.all(b['obs'] == pad_value) for b in batch]):
break
# pad sequences to specified length
for e in batch:
e['obs'] = pad_to(e['obs'], timesteps, axis=0, pad_value=pad_value)
stacked_batch = dict(
obs=np.stack([e['obs'] for e in batch], axis=0),
graphs=[e['graph'] for e in batch],
should_reset=np.stack([e['should_reset'] for e in batch], axis=0))
if seq_and_graph_id:
stacked_batch['seq_id'] = np.stack(
[e['seq_id'] for e in batch], axis=0)
stacked_batch['graph_id'] = np.stack(
[e['graph_id'] for e in batch], axis=0)
yield stacked_batch
| deepmind-research-master | wikigraphs/wikigraphs/data/tools.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""WikiGraphs data modules."""
from . import dataset
from . import io_tools
from . import paired_dataset
from . import tokenizers
from . import tools
from . import wikitext
| deepmind-research-master | wikigraphs/wikigraphs/data/__init__.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Some tools for I/O."""
import gzip
import io
import os
import re
from typing import NamedTuple, List, Iterator
from absl import logging
def read_txt_file(file_path: str, encoding: str = 'utf-8') -> str:
"""Read a plain txt file."""
with open(file_path, 'rb') as f:
content = f.read()
return content.decode(encoding)
def write_txt_file(file_path: str, txt: str, encoding: str = 'utf-8'):
"""Write the given txt string to file."""
make_dir_if_necessary(file_path)
with open(file_path, 'wb') as f:
f.write(txt.encode(encoding, 'surrogatepass'))
def read_gzip_txt_file(file_path: str, encoding: str = 'utf-8') -> str:
"""Read gzipped txt file."""
with open(file_path, 'rb') as f:
content = f.read()
with gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') as f:
content = f.read()
return content.decode(encoding)
def make_dir_if_necessary(output_path):
output_dir = os.path.dirname(output_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
def write_lines_to_gzipped_file(file_path, lines):
make_dir_if_necessary(file_path)
with open(file_path, 'wb') as f_zip:
with gzip.GzipFile(fileobj=f_zip, mode='wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
class Graph(NamedTuple):
title: str
center: str
edges: List[str]
def graphs_from_file(file_path: str) -> Iterator[Graph]:
"""Read freebase graphs from file.
Args:
file_path: path to the input `.gz` file that contains a list of graphs.
Yields:
graphs: a list of read from the file.
"""
content = read_gzip_txt_file(file_path)
graph_header_sep_re = re.compile(
r'(<graph center=[^ ]+ title="[^"]+">\n)')
graph_header_re = re.compile(
r'<graph center=([^ ]+) title="([^"]+)">\n')
parts = graph_header_sep_re.split(content)
# Skip the first part which is empty
for i in range(1, len(parts), 2):
header, body = parts[i], parts[i + 1]
m = graph_header_re.match(header)
yield Graph(title=m.group(2),
center=m.group(1),
edges=body.strip().split('\n'))
_UNICODE_RE = re.compile(r'(\$[0-9A-Fa-f]{4})')
def normalize_freebase_string(s: str) -> str:
"""Expand the `$xxxx` escaped unicode characters in the input string."""
# '"' is escaped as '``', convert it back.
s.replace('``', '"')
parts = _UNICODE_RE.split(s)
parts = [p if not _UNICODE_RE.match(p) else chr(int(p[1:], base=16))
for p in parts]
return ''.join(parts).replace('_', ' ')
class GraphTextPair(NamedTuple):
"""Text paired with raw graph represented as in `edges`."""
center_node: str
title: str
edges: List[str]
text: str
def pair2lines(pair):
lines = [f'<graph center={pair.center_node} title="{pair.title}">']
lines.append('<section id="text">')
lines.append(pair.text)
lines.append('<section id="edges">')
lines.extend(pair.edges)
return lines
def write_pairs_to_gzip_txt_file(file_path, pairs):
logging.info('Writing %d pairs to %s.', len(pairs), file_path)
lines = []
for p in pairs:
lines.extend(pair2lines(p))
write_lines_to_gzipped_file(file_path, lines)
def read_pairs_from_gzip_txt_file(file_path: str) -> Iterator[GraphTextPair]:
"""Read graph-text pairs from gzip txt files.
Args:
file_path: a `.gz` file of graph-text pairs written in the same format as
using the `write_pairs_to_gzip_txt_file` function.
Yields:
Graph-text pairs from this file.
"""
content = read_gzip_txt_file(file_path)
graph_header_sep_re = re.compile(
r'(<graph center=[^ ]+ title="[^"]+">)')
graph_header_re = re.compile(
r'<graph center=([^ ]+) title="([^"]+)">$')
section_sep_re = re.compile(r'\n(<section id="[^"]+">\n)')
parts = graph_header_sep_re.split(content)
# Skip the first part which is empty
for i in range(1, len(parts), 2):
header, body = parts[i], parts[i + 1]
m = graph_header_re.match(header)
# 5 parts total, empty first part, "text", text section, "edges", edges
# section.
section_parts = section_sep_re.split(body)
yield GraphTextPair(center_node=m.group(1),
title=m.group(2),
text=section_parts[2],
edges=section_parts[-1].strip().split('\n'))
| deepmind-research-master | wikigraphs/wikigraphs/data/io_tools.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Tests for wikigraphs.data.tools."""
from absl.testing import absltest
import numpy as np
from wikigraphs.data import tools
class ToolsTest(absltest.TestCase):
def test_padding(self):
np.testing.assert_array_equal(
tools.pad_to(np.arange(3), 5),
[0, 1, 2, 0, 0])
np.testing.assert_array_equal(
tools.pad_to(np.arange(3), 5, pad_value=-1),
[0, 1, 2, -1, -1])
np.testing.assert_array_equal(
tools.pad_to(np.arange(6).reshape(2, 3), 4, axis=0, pad_value=-1),
[[0, 1, 2],
[3, 4, 5],
[-1, -1, -1],
[-1, -1, -1]])
np.testing.assert_array_equal(
tools.pad_to(np.arange(6).reshape(2, 3), 4, axis=-1, pad_value=-1),
[[0, 1, 2, -1],
[3, 4, 5, -1]])
def test_dynamic_batch(self):
def dataset():
data = [[1, 2, 2, 2],
[1, 3, 3],
[1, 4]]
for d in data:
yield np.array(d, dtype=np.int32)
batches = list(tools.dynamic_batch(
dataset(), batch_size=2, timesteps=3, return_incomplete_batch=False))
self.assertLen(batches, 1)
np.testing.assert_array_equal(
batches[0]['obs'],
[[1, 2, 2], [1, 3, 3]])
np.testing.assert_array_equal(
batches[0]['should_reset'],
[[1, 0, 0], [1, 0, 0]])
batches = list(tools.dynamic_batch(
dataset(), batch_size=2, timesteps=3, return_incomplete_batch=True,
pad=True, pad_value=0))
# Note `return_incomplete_batch=False` drops all the incomplete batches,
# and this can be more than just the last batch.
self.assertLen(batches, 3)
np.testing.assert_array_equal(
batches[0]['obs'],
[[1, 2, 2], [1, 3, 3]])
np.testing.assert_array_equal(
batches[0]['should_reset'],
[[1, 0, 0], [1, 0, 0]])
np.testing.assert_array_equal(
batches[1]['obs'],
[[2, 2, 1], [3, 0, 0]])
np.testing.assert_array_equal(
batches[1]['should_reset'],
[[0, 0, 1], [0, 1, 0]])
np.testing.assert_array_equal(
batches[2]['obs'],
[[1, 4, 0], [0, 0, 0]])
np.testing.assert_array_equal(
batches[2]['should_reset'],
[[1, 0, 1], [1, 0, 0]])
with self.assertRaises(ValueError):
batches = list(tools.dynamic_batch(
dataset(), batch_size=2, timesteps=3, return_incomplete_batch=True,
pad=False))
def test_batch_graph_text_pairs(self):
def source():
yield (1, np.array([1, 1, 1, 1, 1], dtype=np.int32))
yield (2, np.array([2, 2], dtype=np.int32))
yield (3, np.array([3, 3, 3, 3, 3, 3], dtype=np.int32))
data_iter = tools.batch_graph_text_pairs(
source(), batch_size=2, timesteps=3, pad_value=0)
batches = list(data_iter)
self.assertLen(batches, 4)
batch = batches[0]
np.testing.assert_array_equal(
batch['obs'],
[[1, 1, 1],
[2, 2, 0]])
self.assertEqual(batch['graphs'], [1, 2])
np.testing.assert_array_equal(
batch['should_reset'],
[[1, 0, 0],
[1, 0, 0]])
batch = batches[1]
np.testing.assert_array_equal(
batch['obs'],
[[1, 1, 1],
[3, 3, 3]])
self.assertEqual(batch['graphs'], [1, 3])
np.testing.assert_array_equal(
batch['should_reset'],
[[0, 0, 0],
[1, 0, 0]])
batch = batches[2]
np.testing.assert_array_equal(
batch['obs'],
[[1, 0, 0],
[3, 3, 3]])
self.assertEqual(batch['graphs'], [1, 3])
np.testing.assert_array_equal(
batch['should_reset'],
[[0, 0, 0],
[0, 0, 0]])
batch = batches[3]
np.testing.assert_array_equal(
batch['obs'],
[[0, 0, 0],
[3, 3, 0]])
self.assertEqual(batch['graphs'], [None, 3])
np.testing.assert_array_equal(
batch['should_reset'],
[[1, 0, 0],
[0, 0, 0]])
def test_batch_graph_text_pairs_batch_size1(self):
def source():
yield (0, np.array([1, 2], dtype=np.int32))
yield (1, np.array([1, 2, 3, 4, 5, 6], dtype=np.int32))
data_iter = tools.batch_graph_text_pairs(
source(), batch_size=1, timesteps=3, pad_value=0)
batches = list(data_iter)
batch = batches[0]
np.testing.assert_array_equal(batch['obs'], [[1, 2, 0]])
self.assertEqual(batch['graphs'], [0])
np.testing.assert_array_equal(batch['should_reset'], [[1, 0, 0]])
batch = batches[1]
np.testing.assert_array_equal(batch['obs'], [[1, 2, 3]])
self.assertEqual(batch['graphs'], [1])
np.testing.assert_array_equal(batch['should_reset'], [[1, 0, 0]])
batch = batches[2]
np.testing.assert_array_equal(batch['obs'], [[3, 4, 5]])
self.assertEqual(batch['graphs'], [1])
np.testing.assert_array_equal(batch['should_reset'], [[0, 0, 0]])
batch = batches[3]
np.testing.assert_array_equal(batch['obs'], [[5, 6, 0]])
self.assertEqual(batch['graphs'], [1])
np.testing.assert_array_equal(batch['should_reset'], [[0, 0, 0]])
self.assertLen(batches, 4)
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | wikigraphs/wikigraphs/data/tools_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Tools for accessing the graph-text paired datasets."""
import abc
import collections
from typing import List, Tuple, NamedTuple, Any, Dict, Optional, Union
from absl import logging
import jax.numpy as jnp
import jraph
import numpy as np
from wikigraphs.data import dataset
from wikigraphs.data import io_tools
from wikigraphs.data import tokenizers
from wikigraphs.data import tools
ArrayType = Union[np.ndarray, jnp.ndarray]
DATA_ROOT = '/tmp/data/wikigraphs'
class RawDataset(dataset.Dataset):
"""The untokenized raw dataset."""
def __init__(self,
subset: str = 'train',
shuffle_data: bool = False,
data_dir: str = None,
version: str = 'max256'):
"""Constructor.
Args:
subset: which subset to load.
shuffle_data: set to True to randomly shuffle the data.
data_dir: if provided this will be used instead of the default location to
look for data, it must contain files like `train.gz`, `valid.gz` and
`test.gz`.
version: which version of the data to load, this must be the name of a
directory in `DATA_ROOT`.
"""
super().__init__()
self._subset = subset
self._shuffle_data = shuffle_data
self._data_dir = data_dir or DATA_ROOT
self._dataset = None
allowed_versions = ('max256', 'max512', 'max1024')
if version not in allowed_versions:
raise ValueError(f'Version {version} not one of the allowed versions:'
f' {allowed_versions}.')
self._version = version
def _load_data(self):
"""Load and prepare the data iterator."""
if self._dataset is None:
self._dataset = list(io_tools.read_pairs_from_gzip_txt_file(
f'{self._data_dir}/{self._version}/{self._subset}.gz'))
def source():
n_pairs = len(self._dataset)
if self._shuffle_data:
idx = np.random.permutation(n_pairs)
else:
idx = np.arange(n_pairs)
for i in range(n_pairs):
yield self._dataset[idx[i]]
return source()
class Graph:
"""A convenience class for representing graphs."""
def __init__(self, nodes: List[str], edges: List[Tuple[int, int, str]]):
"""Construct a graph from a list of nodes and edges.
Args:
nodes: a list of node attributes, one for each node.
edges: a list of (source_node_id, target_node_id, edge_attribute) for each
edge.
"""
self._nodes = nodes
self._edges = edges
self._node2id = {n: i for i, n in enumerate(nodes)}
def nodes(self) -> List[str]:
return self._nodes
def edges(self) -> List[Tuple[int, int, str]]:
return self._edges
def node2id(self, node: str) -> int:
return self._node2id[node]
@classmethod
def from_edges(cls, edges: List[str]) -> 'Graph':
"""Build a graph instance from a list of edges."""
node2id = dict()
parsed_edges = []
next_node_id = 0
for e in edges:
src, edge, tgt = e.split('\t')[:3]
src_id = node2id.get(src, next_node_id)
if src_id == next_node_id:
node2id[src] = src_id
next_node_id += 1
tgt_id = node2id.get(tgt, next_node_id)
if tgt_id == next_node_id:
node2id[tgt] = tgt_id
next_node_id += 1
parsed_edges.append((src_id, tgt_id, edge))
id2node = {i: n for n, i in node2id.items()}
return Graph(nodes=[id2node[i] for i in range(next_node_id)],
edges=parsed_edges)
def to_edges(self) -> List[str]:
r"""Convert graph to a list of edges.
The converted list of edges should be compatible with the format specified
in io_tools and compatible with the `from_edges` method above.
Returns:
edges: one edge per line, with the (source, target, edge_type) separated
by `\t`.
"""
edges = []
for s, t, e in self._edges:
edges.append(f'{self._nodes[s]}\t{e}\t{self._nodes[t]}')
return edges
@classmethod
def subsample_nodes(
cls, graph: 'Graph', subsample_rate: float = 1.0, center_node: str = None
) -> 'Graph':
"""Subsample the nodes of a graph."""
graph_size = len(graph.nodes())
if subsample_rate == 1.0 or graph_size <= 1:
return graph
subsampled_nodes_id = np.arange(graph_size)
if subsample_rate < 1.0:
subsample_graph_size = int(subsample_rate * graph_size)
if center_node is not None:
# We need to keep the center node during subsampling
center_node_id = graph.node2id(center_node)
subsampled_nodes_id = subsampled_nodes_id[
subsampled_nodes_id != center_node_id]
subsample_graph_size = max(1, subsample_graph_size - 1)
subsampled_nodes_id = np.random.choice(
subsampled_nodes_id, subsample_graph_size, replace=False)
subsampled_nodes_id = np.append(subsampled_nodes_id, center_node_id)
else:
subsampled_nodes_id = np.random.choice(
subsampled_nodes_id, subsample_graph_size, replace=False)
subsampled_nodes_id = np.sort(subsampled_nodes_id)
map_subsampled_nodes_id = {
old_id: new_id for new_id, old_id in enumerate(subsampled_nodes_id)}
nodes = []
edges = []
for node_id, n in enumerate(graph.nodes()):
if node_id in subsampled_nodes_id:
nodes.append(n)
for out_node, in_node, e in graph.edges():
if out_node in subsampled_nodes_id and in_node in subsampled_nodes_id:
edges.append((map_subsampled_nodes_id[out_node],
map_subsampled_nodes_id[in_node], e))
return Graph(nodes=nodes, edges=edges)
class ParsedGraphTextPair(NamedTuple):
"""Graph-text pair with graph parsed into a `Graph` instance."""
center_node: str
title: str
text: str
graph: Graph
class ParsedDataset(dataset.Dataset):
"""Raw dataset + parsing graphs into Graph instances."""
def __init__(self,
subset: str = 'train',
shuffle_data: bool = False,
data_dir: str = None,
version: str = 'max256'):
"""Constructor.
Args:
subset: which subset to load.
shuffle_data: set to True to randomly shuffle the data.
data_dir: if provided this will be used instead of the default location to
look for data, it must contain files like `train.gz`, `valid.gz` and
`test.gz`.
version: which version of the data to load, this must be the name of a
directory in `DATA_ROOT`.
"""
super().__init__()
self._raw_data = RawDataset(subset=subset, shuffle_data=False,
data_dir=data_dir, version=version)
self._shuffle_data = shuffle_data
self._dataset = None
def _load_data(self):
if self._dataset is None:
# pylint: disable=g-complex-comprehension
self._dataset = [ParsedGraphTextPair(center_node=pair.center_node,
title=pair.title,
text=pair.text,
graph=Graph.from_edges(pair.edges))
for pair in self._raw_data]
def source():
n_pairs = len(self._dataset)
if self._shuffle_data:
idx = np.random.permutation(n_pairs)
else:
idx = np.arange(n_pairs)
for i in range(n_pairs):
yield self._dataset[idx[i]]
return source()
class BaseGraph2TextDataset(dataset.Dataset):
"""Base dataset class for graph-to-text tasks."""
def __init__(self,
tokenizer: tokenizers.Tokenizer,
graph_tokenizer: Optional[tokenizers.GraphTokenizer] = None,
batch_size: int = 1,
timesteps: int = 128,
subset: str = 'train',
shuffle_data: bool = False,
repeat: bool = False,
version: str = 'max256',
data_dir: str = None,
subsample_nodes: float = 1.0,
graph_retrieval_dataset: bool = False,
debug: bool = False):
"""Constructor.
Args:
tokenizer: the tokenizer for text data.
graph_tokenizer: the tokenizer for graph data.
batch_size: number of sequences to put in a batch.
timesteps: number of tokens to put in a sequence in a batch.
subset: which subset to load.
shuffle_data: whether to shuffle data.
repeat: set to True to repeat the dataset infinitely, otherwise do only
one pass through the dataset.
version: which version of the data to load.
data_dir: if set load data instead from this directory, and ignore
`version`.
subsample_nodes: the proportion of the nodes in a graph to keep.
graph_retrieval_dataset: whether to construct the dataset for graph
retrieval tasks.
debug: set to True to use debug mode and only load a small number of
examples.
"""
super().__init__()
self._parsed_data = ParsedDataset(subset=subset,
shuffle_data=False,
data_dir=data_dir,
version=version)
self._tokenizer = tokenizer
self._graph_tokenizer = graph_tokenizer
self._batch_size = batch_size
self._timesteps = timesteps
self._subset = subset
self._shuffle_data = shuffle_data
self._repeat = repeat
self._subsample_nodes = subsample_nodes
self._graph_retrieval_dataset = graph_retrieval_dataset
self._debug = debug
self._dataset = None
@property
def num_articles(self):
return self._num_articles
@abc.abstractmethod
def _process_graph(self, center_node: str, graph: Graph):
"""Process the graph part of a `ParsedGraphTextPair` instance."""
def _process_graph_text_pair(
self, pair: ParsedGraphTextPair) -> Tuple[Any, np.ndarray]:
"""Process the given graph-text pair and prepare one example.
Args:
pair: the input `ParsedGraphTextPair` instance.
Returns:
graph: the processed graph content.
text: the tokenized text, a sequence of token IDs.
"""
return (self._process_graph(pair.center_node, pair.graph),
self._tokenizer.encode(
pair.text, prepend_bos=True, append_eos=True))
def _load_data(self):
"""Prepare the data."""
if self._dataset is None:
if self._debug:
data = [next(self._parsed_data) for _ in range(10)]
else:
data = list(self._parsed_data)
self._dataset = [self._process_graph_text_pair(p) for p in data]
self._num_articles = len(self._dataset)
logging.info('Loaded a total of %d examples from %s set.',
self._num_articles, self._subset)
if self._graph_retrieval_dataset:
# For graph retrieval tasks we pair all texts and graphs in the dataset,
# and indicate their (text_id, graph_id)
retrieval_data = []
for i, (g1, _) in enumerate(self._dataset):
for j, (_, t2) in enumerate(self._dataset):
retrieval_data.append(((g1, t2), (i, j)))
self._dataset = retrieval_data
logging.info('Constructed %d pairs.', len(self._dataset))
def source():
n_examples = len(self._dataset)
if self._shuffle_data:
idx = np.random.permutation(n_examples)
else:
idx = np.arange(n_examples)
for i in range(n_examples):
yield self._dataset[idx[i]]
def maybe_repeated_source():
if self._repeat:
while True:
yield from source()
else:
yield from source()
data_iter = tools.batch_graph_text_pairs(
maybe_repeated_source(),
self._batch_size,
self._timesteps + 1,
pad_value=self._tokenizer.pad_token(),
seq_and_graph_id=self._graph_retrieval_dataset)
if self._graph_retrieval_dataset:
data_iter = map(lambda x: dict( # pylint: disable=g-long-lambda
obs=x['obs'][:, :-1],
target=x['obs'][:, 1:],
should_reset=x['should_reset'][:, :-1],
# If target is a <pad> token then that target should not be predicted.
mask=(x['obs'][:, 1:] != self._tokenizer.pad_token()).astype(
np.float32),
seq_id=x['seq_id'],
graph_id=x['graph_id'],
graphs=self._process_graph_batch(x['graphs']),
), data_iter)
else:
data_iter = map(lambda x: dict( # pylint: disable=g-long-lambda
obs=x['obs'][:, :-1],
target=x['obs'][:, 1:],
should_reset=x['should_reset'][:, :-1],
# If target is a <pad> token then that target should not be predicted.
mask=(x['obs'][:, 1:] != self._tokenizer.pad_token()).astype(
np.float32),
graphs=self._process_graph_batch(x['graphs']),
), data_iter)
# Filter out batches that does not have targets.
# This may happen when an observation contains a single last token of the
# sequence, which was predicted as target in the previous batch, and only
# used as observation in this batch, without a matching target. In this
# case all the masks are 0, therefore this batch provides no training signal
# and we can safely remove this batch. This also avoids some potential
# downstream issues.
data_iter = filter(lambda x: x['mask'].sum() > 0, data_iter)
return data_iter
@abc.abstractmethod
def _process_graph_batch(self, graphs: List[Any]):
"""Process a batch of graph data.
Args:
graphs: a list of graph data, each as returned by `_process_graph`.
Returns:
processed_graphs: processed tensor(s) that can be directly fed into a
model.
"""
@abc.abstractmethod
def return_faux_batch(self) -> Dict[str, np.ndarray]:
"""Return a fake batch with the right shapes and dtypes."""
class TextOnlyDataset(BaseGraph2TextDataset):
"""Text-only version of the paired dataset."""
def __init__(self,
tokenizer: tokenizers.Tokenizer,
graph_tokenizer: Optional[tokenizers.GraphTokenizer] = None,
batch_size: int = 1,
timesteps: int = 128,
subset: str = 'train',
shuffle_data: bool = False,
repeat: bool = False,
version: str = 'max256',
data_dir: str = None,
debug: bool = False,
**kwargs):
"""Constructor.
Args:
tokenizer: the tokenizer for text data.
graph_tokenizer: not used, keeping it here for compatibility with other
graph2text datasets.
batch_size: number of sequences to put in a batch.
timesteps: number of tokens to put in a sequence in a batch.
subset: which subset to load.
shuffle_data: whether to shuffle data.
repeat: set to True to repeat the dataset infinitely, otherwise do only
one pass through the dataset.
version: which version of the data to load.
data_dir: if set load data instead from this directory, and ignore
`version`.
debug: set to True to use debug mode and only load a small number of
examples.
**kwargs: other arguments (for interface compatibility).
"""
del graph_tokenizer
super().__init__(tokenizer=tokenizer,
graph_tokenizer=None,
batch_size=batch_size,
timesteps=timesteps,
subset=subset,
shuffle_data=shuffle_data,
repeat=repeat,
version=version,
data_dir=data_dir,
debug=debug)
def _process_graph_batch(self, graphs: List[Any]):
del graphs
return None
def _process_graph(self, center_node: str, graph: Graph):
del center_node
del graph
return None
def __next__(self):
batch = super().__next__()
# Data should be text-only.
del batch['graphs']
return batch
def return_faux_batch(self):
"""Return a fake batch with the right shapes and types."""
obs = np.zeros((self._batch_size, self._timesteps), dtype=np.int32)
target = np.zeros_like(obs)
should_reset = np.zeros_like(obs, dtype=np.float32)
mask = np.zeros_like(obs, dtype=np.float32)
return dict(obs=obs, target=target, should_reset=should_reset, mask=mask)
class Bow2TextDataset(BaseGraph2TextDataset):
"""Dataset for bag-of-words to text."""
def _process_graph(self, center_node: str, graph: Graph):
"""Process the graph part of a `ParsedGraphTextPair` instance."""
# We don't use center node in a bag-of-words representation
del center_node
if self._subsample_nodes < 1.0:
graph = Graph.subsample_nodes(graph, self._subsample_nodes)
bow = np.zeros(self._graph_tokenizer.vocab_size, dtype=np.int32)
for n in graph.nodes():
for t in self._graph_tokenizer.encode_node(n):
bow[t] += 1
for _, _, e in graph.edges():
for t in self._graph_tokenizer.encode_edge(e):
bow[t] += 1
return bow
def _process_graph_batch(self, graphs: List[Any]):
"""Process a batch of graph data.
Args:
graphs: a list of graph data, each as returned by `_process_graph`.
Returns:
processed_graphs: processed tensor(s) that can be directly fed into a
model.
"""
empty_graph_bow = np.zeros(self._graph_tokenizer.vocab_size, dtype=np.int32)
graphs = [g if g is not None else empty_graph_bow for g in graphs]
# B x [V] -> [B, V]
return np.stack(graphs, axis=0)
def return_faux_batch(self):
obs = np.zeros((self._batch_size, self._timesteps), dtype=np.int32)
target = np.zeros_like(obs)
should_reset = np.zeros_like(obs, dtype=np.float32)
mask = np.zeros_like(obs, dtype=np.float32)
graphs = np.zeros((self._batch_size, self._graph_tokenizer.vocab_size),
dtype=np.float32)
return dict(obs=obs, target=target, should_reset=should_reset, mask=mask,
graphs=graphs)
class Graph2TextDataset(BaseGraph2TextDataset):
"""Graph-to-text dataset.
This dataset encodes the graph nodes and edges using a bag-of-words
representation.
"""
def __init__(self,
tokenizer: tokenizers.Tokenizer,
graph_tokenizer: tokenizers.GraphTokenizer,
batch_size: int = 1,
timesteps: int = 128,
subset: str = 'train',
shuffle_data: bool = False,
repeat: bool = False,
version: str = 'max256',
data_dir: str = None,
subsample_nodes: float = 1.0,
graph_retrieval_dataset: bool = False,
debug: bool = False):
"""Constructor.
Args:
tokenizer: the tokenizer for text data.
graph_tokenizer: the tokenizer for graph data.
batch_size: number of sequences to put in a batch.
timesteps: number of tokens to put in a sequence in a batch.
subset: which subset to load.
shuffle_data: whether to shuffle data.
repeat: set to True to repeat the dataset infinitely, otherwise do only
one pass through the dataset.
version: which version of the data to load.
data_dir: if set load data instead from this directory, and ignore
`version`.
subsample_nodes: the proportion of the nodes in a graph to keep.
graph_retrieval_dataset: whether to construct the dataset for graph
retrieval tasks.
debug: set to True to use debug mode and only load a small number of
examples.
"""
self._graph_feature_dim = graph_tokenizer.vocab_size
super().__init__(tokenizer=tokenizer,
graph_tokenizer=graph_tokenizer,
batch_size=batch_size,
timesteps=timesteps,
subset=subset,
shuffle_data=shuffle_data,
repeat=repeat,
version=version,
data_dir=data_dir,
subsample_nodes=subsample_nodes,
graph_retrieval_dataset=graph_retrieval_dataset,
debug=debug)
self._placeholder_graph = self._process_graph(
center_node='<pad>',
graph=Graph(nodes=['<pad>'], edges=[]))
def _process_graph(self, center_node: str, graph: Graph):
"""Process the graph part of a `ParsedGraphTextPair` instance."""
if self._subsample_nodes < 1.0:
graph = Graph.subsample_nodes(graph, self._subsample_nodes, center_node)
nodes = graph.nodes()
edges = graph.edges()
n_edges = len(edges)
sender = np.zeros(n_edges, dtype=np.int32)
receiver = np.zeros(n_edges, dtype=np.int32)
nodes_bow = []
edges_bow = []
for n in nodes:
bow = collections.defaultdict(int)
for t in self._graph_tokenizer.encode_node(n):
bow[t] += 1
nodes_bow.append(bow)
for i, (s, r, e) in enumerate(edges):
bow = collections.defaultdict(int)
for t in self._graph_tokenizer.encode_edge(e):
bow[t] += 1
edges_bow.append(bow)
sender[i] = s
receiver[i] = r
return (nodes_bow, edges_bow, sender, receiver, graph.node2id(center_node))
def _to_graph_with_features(
self, nodes_bow, edges_bow, sender, receiver, center_node_id):
"""Convert the input to a `jraph.GraphsTuple` instance."""
n_nodes = len(nodes_bow)
n_edges = len(edges_bow)
# +1 for the center node indicator
nodes = np.zeros((n_nodes, self._graph_feature_dim + 1), dtype=np.float32)
edges = np.zeros((n_edges, self._graph_feature_dim), dtype=np.float32)
nodes[center_node_id][-1] = 1
for i, bow in enumerate(nodes_bow):
for t, c in bow.items():
nodes[i][t] = c
for i, bow in enumerate(edges_bow):
for t, c in bow.items():
edges[i][t] = c
return jraph.GraphsTuple(
nodes=nodes, edges=edges, senders=sender, receivers=receiver,
globals=None, n_node=np.array([n_nodes], dtype=np.int32),
n_edge=np.array([n_edges], dtype=np.int32))
def _process_graph_batch(self, graphs: List[Any]):
"""Process a batch of graph data.
Args:
graphs: a list of graph data, each as returned by `_process_graph`.
Returns:
processed_graphs: a list of processed tensor(s).
"""
graphs = [g if g is not None else self._placeholder_graph for g in graphs]
return [self._to_graph_with_features(*g) for g in graphs]
def return_faux_batch(self) -> Dict[str, np.ndarray]:
"""Return a fake batch with the right shapes and dimensions."""
obs = np.zeros([self._batch_size, self._timesteps], dtype=np.int32)
target = np.zeros([self._batch_size, self._timesteps], dtype=np.int32)
should_reset = np.zeros_like(obs, np.float32)
mask = np.zeros_like(obs, np.float32)
# A batch should contain `batch_size` graphs. Here we make sure each graph
# has one node and one edge.
graphs = self._batch_size * [jraph.GraphsTuple(
nodes=np.zeros([1, self._graph_feature_dim + 1], dtype=np.float32),
edges=np.zeros([1, self._graph_feature_dim], dtype=np.float32),
senders=np.zeros([1], dtype=np.int32),
receivers=np.zeros([1], dtype=np.int32),
n_node=np.ones(1, dtype=np.int32),
n_edge=np.ones(1, dtype=np.int32),
globals=None)]
return dict(obs=obs, target=target, mask=mask, should_reset=should_reset,
graphs=graphs)
| deepmind-research-master | wikigraphs/wikigraphs/data/paired_dataset.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Base class of the datasets."""
import abc
from typing import Any, Iterator
class Dataset(abc.ABC):
"""Base class for all datasets.
All sub-classes should define `_load_data()` where an iterator
`self._data_iter` should be instantiated that iterates over the dataset.
"""
def __init__(self):
"""Constructor."""
self._data_iter = None # An iterator produced by `self._load_data`.
@abc.abstractmethod
def _load_data(self) -> Iterator[Any]:
"""Prepare data for another pass through the dataset.
This method should return a generator in a child class.
"""
def __next__(self):
return next(self._data_iter)
def __iter__(self):
self._data_iter = self._load_data()
return self
| deepmind-research-master | wikigraphs/wikigraphs/data/dataset.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Wikitext-103 datasets."""
import re
from typing import NamedTuple, List
from absl import logging
import numpy as np
from wikigraphs.data import dataset
from wikigraphs.data import tokenizers
from wikigraphs.data import tools
# The data directory that contains subdirectories `wikitext-103` and
# `wikitext-103-raw`.
DATA_ROOT = '/tmp/data/wikitext-103'
class WikitextArticle(NamedTuple):
title: str
text: str
def articles_from_file(file_path: str) -> List[WikitextArticle]:
"""Read wikitext articles from file.
Args:
file_path: path to the input `.tokens` file.
Returns:
A list of `WikitextArticle` tuples.
"""
with open(file_path, mode='rb') as f:
content = f.read()
content = content.decode('utf-8')
title_re = re.compile(r'(\n = ([^=].*) = \n \n)')
parts = title_re.split(content)
# Skip the first part which is empty
return [WikitextArticle(title=parts[i+1], text=parts[i] + parts[i+2])
for i in range(1, len(parts), 3)]
class RawDataset(dataset.Dataset):
"""Raw text dataset for wikitext-103."""
def __init__(self,
subset: str = 'train',
shuffle_data: bool = False,
data_dir: str = None,
version: str = 'tokens'):
"""Constructor.
Args:
subset: which subset to load, one of {"train", "valid", "test"}.
shuffle_data: if set to True the data will be randomly shuffled.
data_dir: if provided will be used instead of the default `DATA_ROOT` as
the directory that contains the data.
version: one of {'tokens', 'raw'}
"""
super().__init__()
self._subset = subset
self._shuffle_data = shuffle_data
self._data_dir = data_dir or DATA_ROOT
self._dataset = None
allowed_versions = ('tokens', 'raw')
if version not in allowed_versions:
raise ValueError(f'Version must be one of {allowed_versions}.')
self._version = version
def _load_data(self):
"""Prepare data for another pass through the dataset."""
if self._dataset is None:
data_root = self._data_dir + ('-raw' if self._version == 'raw' else '')
self._dataset = articles_from_file(
f'{data_root}/wiki.{self._subset}.{self._version}')
def source():
n_articles = len(self._dataset)
if self._shuffle_data:
idx = np.random.permutation(n_articles)
else:
idx = np.arange(n_articles)
for i in range(n_articles):
yield self._dataset[idx[i]]
return source()
def normalize_title(title: str) -> str:
"""Normalize the wikitext article title by handling special characters."""
return title.replace(
'@-@', '-').replace('@,@', ',').replace('@.@', '.').replace(' ', '')
class WikitextDataset(dataset.Dataset):
"""Tokenized dataset for wikitext-103."""
def __init__(self,
tokenizer: tokenizers.Tokenizer,
batch_size: int = 1,
timesteps: int = 128,
subset: str = 'train',
shuffle_data: bool = True,
data_dir: str = None,
repeat: bool = False,
debug: bool = False,
**kwargs):
"""Constructor.
Args:
tokenizer: a tokenizer for text data.
batch_size: number of sequences to put into a batch.
timesteps: length of the sequences.
subset: which subset to load, one of {"train", "valid", "test"}.
shuffle_data: if set to True the data will be randomly shuffled.
data_dir: if provided will be used instead of the default `DATA_ROOT` as
the directory that contains the data.
repeat: set to False to go through the data only once, otherwise go
through the data indefinitely.
debug: set to True to only load a small amount of data for fast debugging.
**kwargs: other arguments (for interface compatibility).
"""
super().__init__()
self._tokenizer = tokenizer
self._batch_size = batch_size
self._timesteps = timesteps
self._subset = subset
self._shuffle_data = shuffle_data
self._data_dir = data_dir
self._repeat = repeat
self._debug = debug
self._dataset = None
def _load_data(self):
"""Prepare data for one pass through the dataset."""
# Pre-tokenize everything in our dataset so we don't have to when going
# through the data more than once.
if not self._dataset:
raw_dataset = RawDataset(
subset=self._subset, shuffle_data=False, data_dir=self._data_dir)
if self._debug:
# Load a small number of examples for debugging.
self._dataset = [
self._tokenizer.encode(next(raw_dataset).text, prepend_bos=True)
for _ in range(5)]
else:
self._dataset = [self._tokenizer.encode(item.text, prepend_bos=True)
for item in raw_dataset]
logging.info('%s set loaded, total %d examples.',
self._subset, len(self._dataset))
def source():
idx = np.random.permutation(len(self._dataset))
for i in idx:
yield self._dataset[i]
def repeated_source():
if self._repeat:
while True:
yield from source()
else:
yield from source()
data_iter = tools.dynamic_batch(
repeated_source(),
self._batch_size,
self._timesteps + 1, # Extra token to count for the overlap.
return_incomplete_batch=True,
pad=True,
pad_value=self._tokenizer.pad_token())
data_iter = map(lambda x: dict( # pylint: disable=g-long-lambda
obs=x['obs'][:, :-1],
target=x['obs'][:, 1:],
should_reset=x['should_reset'][:, :-1],
mask=(x['obs'][:, 1:] != self._tokenizer.pad_token()).astype(
np.float32),
), data_iter)
return data_iter
def return_faux_batch(self):
"""Return a fake batch with the right shapes and dtypes."""
obs = np.zeros((self._batch_size, self._timesteps), dtype=np.int32)
target = np.zeros_like(obs, dtype=np.int32)
should_reset = np.zeros_like(obs, dtype=np.float32)
mask = np.zeros_like(obs, dtype=np.float32)
return dict(obs=obs, target=target, should_reset=should_reset, mask=mask)
| deepmind-research-master | wikigraphs/wikigraphs/data/wikitext.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Tests for wikigraphs.data.paired_dataset."""
from absl.testing import absltest
import jraph
from wikigraphs.data import io_tools
from wikigraphs.data import paired_dataset
from wikigraphs.data import tokenizers
from wikigraphs.data import wikitext
WIKITEXT_ROOT = '/tmp/data/wikitext-103'
WIKIGRAPHS_ROOT = '/tmp/data/wikigraphs'
WIKITEXT_VOCAB_FILE = '/tmp/data/wikitext-vocab.csv'
GRAPH_VOCAB_FILE = '/tmp/data/graph-vocab.csv'
class PairedDatasetTest(absltest.TestCase):
def test_raw_paired_dataset_size(self):
dataset = paired_dataset.RawDataset(
subset='valid', shuffle_data=False, data_dir=WIKIGRAPHS_ROOT)
pairs = list(dataset)
self.assertLen(pairs, 48)
self.assertEqual(pairs[0].title, 'Homarus_gammarus')
self.assertEqual(pairs[-1].title, 'Rakie_Ayola')
# Make sure the content of the articles match the original
wikitext_set = wikitext.RawDataset(
subset='valid', shuffle_data=False, version='raw',
data_dir=WIKITEXT_ROOT)
title2article = {wikitext.normalize_title(a.title).replace(' ', ''): a.text
for a in wikitext_set}
for p in pairs:
title = io_tools.normalize_freebase_string(p.title).replace(' ', '')
article = title2article.get(title, None)
self.assertIsNotNone(article)
self.assertEqual(article, p.text)
def test_graph_from_edges(self):
edges = ['A\tE1\tB',
'A\tE2\tC',
'B\tE1\tC',
'C\tE3\tD',
'C\tE2\tE']
graph = paired_dataset.Graph.from_edges(edges)
self.assertEqual(graph.nodes(), ['A', 'B', 'C', 'D', 'E'])
self.assertEqual(graph.edges(), [(0, 1, 'E1'),
(0, 2, 'E2'),
(1, 2, 'E1'),
(2, 3, 'E3'),
(2, 4, 'E2')])
def test_graph_to_edges(self):
edges = ['A\tE1\tB',
'A\tE2\tC',
'B\tE1\tC',
'C\tE3\tD',
'C\tE2\tE']
graph = paired_dataset.Graph.from_edges(edges)
self.assertEqual(graph.to_edges(), edges)
def test_bow2text_dataset(self):
tokenizer = tokenizers.WordTokenizer(vocab_file=WIKITEXT_VOCAB_FILE)
graph_tokenizer = tokenizers.GraphTokenizer(vocab_file=GRAPH_VOCAB_FILE)
batch_size = 4
seq_len = 256
dataset = paired_dataset.Bow2TextDataset(
tokenizer,
graph_tokenizer,
batch_size=batch_size,
timesteps=seq_len,
subset='valid',
subsample_nodes=0.7,
repeat=False,
data_dir=WIKIGRAPHS_ROOT)
num_tokens = 0
for batch in dataset:
num_tokens += batch['mask'].sum()
self.assertEqual(batch['graphs'].shape,
(batch_size, graph_tokenizer.vocab_size))
raw_dataset = paired_dataset.RawDataset(subset='valid', shuffle_data=False)
raw_num_tokens = 0
n_pairs = 0
for pair in raw_dataset:
raw_num_tokens += len(tokenizer.encode(
pair.text, prepend_bos=True, append_eos=True))
n_pairs += 1
# The first token of each example is not counted by `mask` as it masks the
# targets, and the first token of each example never appears in the targets.
self.assertEqual(raw_num_tokens, num_tokens + n_pairs)
def test_graph2text_dataset(self):
tokenizer = tokenizers.WordTokenizer(vocab_file=WIKITEXT_VOCAB_FILE)
graph_tokenizer = tokenizers.GraphTokenizer(vocab_file=GRAPH_VOCAB_FILE)
batch_size = 4
seq_len = 256
dataset = paired_dataset.Graph2TextDataset(
tokenizer,
graph_tokenizer,
batch_size=batch_size,
timesteps=seq_len,
subsample_nodes=0.8,
subset='valid',
data_dir=WIKIGRAPHS_ROOT)
data_iter = iter(dataset)
batch = next(data_iter)
self.assertEqual(batch['obs'].shape, (batch_size, seq_len))
self.assertEqual(batch['target'].shape, (batch_size, seq_len))
self.assertEqual(batch['should_reset'].shape, (batch_size, seq_len))
self.assertEqual(batch['mask'].shape, (batch_size, seq_len))
self.assertIsInstance(batch['graphs'], list)
self.assertLen(batch['graphs'], batch_size)
for i in range(batch_size):
self.assertIsInstance(batch['graphs'][i], jraph.GraphsTuple)
# +1 for the center_node mask
self.assertEqual(
batch['graphs'][i].nodes.shape[-1], graph_tokenizer.vocab_size + 1)
self.assertEqual(
batch['graphs'][i].edges.shape[-1], graph_tokenizer.vocab_size)
n_edges = batch['graphs'][i].n_edge
self.assertEqual(batch['graphs'][i].senders.shape, (n_edges,))
self.assertEqual(batch['graphs'][i].receivers.shape, (n_edges,))
# Make sure the token count matches across the tokenized data and the raw
# data set.
num_tokens = 0
for batch in dataset:
num_tokens += batch['mask'].sum()
raw_dataset = paired_dataset.RawDataset(subset='valid', shuffle_data=False)
raw_num_tokens = 0
n_pairs = 0
for pair in raw_dataset:
raw_num_tokens += len(tokenizer.encode(
pair.text, prepend_bos=True, append_eos=True))
n_pairs += 1
# The first token of each example is not counted by `mask` as it masks the
# targets, and the first token of each example never appears in the targets.
self.assertEqual(raw_num_tokens, num_tokens + n_pairs)
def test_text_only_dataset(self):
tokenizer = tokenizers.WordTokenizer(vocab_file=WIKITEXT_VOCAB_FILE)
batch_size = 4
seq_len = 256
dataset = paired_dataset.TextOnlyDataset(
tokenizer,
batch_size=batch_size,
timesteps=seq_len,
subset='valid',
data_dir=WIKIGRAPHS_ROOT)
data_iter = iter(dataset)
batch = next(data_iter)
faux_batch = dataset.return_faux_batch()
self.assertCountEqual(list(batch.keys()),
['obs', 'target', 'should_reset', 'mask'])
self.assertCountEqual(list(faux_batch.keys()),
['obs', 'target', 'should_reset', 'mask'])
for k, v in batch.items():
faux_v = faux_batch[k]
self.assertEqual(v.shape, faux_v.shape)
self.assertEqual(v.dtype, faux_v.dtype)
self.assertEqual(batch['obs'].shape, (batch_size, seq_len))
self.assertEqual(batch['target'].shape, (batch_size, seq_len))
self.assertEqual(batch['should_reset'].shape, (batch_size, seq_len))
self.assertEqual(batch['mask'].shape, (batch_size, seq_len))
num_tokens = 0
for batch in dataset:
num_tokens += batch['mask'].sum()
raw_dataset = paired_dataset.RawDataset(subset='valid', shuffle_data=False)
raw_num_tokens = 0
n_pairs = 0
for pair in raw_dataset:
raw_num_tokens += len(tokenizer.encode(
pair.text, prepend_bos=True, append_eos=True))
n_pairs += 1
self.assertEqual(num_tokens + n_pairs, raw_num_tokens)
def test_bow_retrieval_dataset(self):
tokenizer = tokenizers.WordTokenizer(vocab_file=WIKITEXT_VOCAB_FILE)
graph_tokenizer = tokenizers.GraphTokenizer(vocab_file=GRAPH_VOCAB_FILE)
batch_size = 4
seq_len = 256
dataset = paired_dataset.Bow2TextDataset(
tokenizer,
graph_tokenizer,
batch_size=batch_size,
timesteps=seq_len,
subsample_nodes=0.8,
graph_retrieval_dataset=True,
subset='valid',
data_dir=WIKIGRAPHS_ROOT)
data_iter = iter(dataset)
batch = next(data_iter)
self.assertEqual(batch['obs'].shape, (batch_size, seq_len))
self.assertEqual(batch['target'].shape, (batch_size, seq_len))
self.assertEqual(batch['should_reset'].shape, (batch_size, seq_len))
self.assertEqual(batch['mask'].shape, (batch_size, seq_len))
self.assertEqual(batch['graph_id'].shape, (batch_size,))
self.assertEqual(batch['seq_id'].shape, (batch_size,))
def test_graph_retrieval_dataset(self):
tokenizer = tokenizers.WordTokenizer(vocab_file=WIKITEXT_VOCAB_FILE)
graph_tokenizer = tokenizers.GraphTokenizer(vocab_file=GRAPH_VOCAB_FILE)
batch_size = 4
seq_len = 256
dataset = paired_dataset.Graph2TextDataset(
tokenizer,
graph_tokenizer,
batch_size=batch_size,
timesteps=seq_len,
subsample_nodes=0.8,
graph_retrieval_dataset=True,
subset='valid',
data_dir=WIKIGRAPHS_ROOT)
data_iter = iter(dataset)
batch = next(data_iter)
self.assertEqual(batch['obs'].shape, (batch_size, seq_len))
self.assertEqual(batch['target'].shape, (batch_size, seq_len))
self.assertEqual(batch['should_reset'].shape, (batch_size, seq_len))
self.assertEqual(batch['mask'].shape, (batch_size, seq_len))
self.assertEqual(batch['graph_id'].shape, (batch_size,))
self.assertEqual(batch['seq_id'].shape, (batch_size,))
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | wikigraphs/wikigraphs/data/paired_dataset_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Tokenizers for text data."""
import abc
import csv
import io
import re
from typing import List
import nltk
import numpy as np
from wikigraphs.data import io_tools
class Tokenizer(abc.ABC):
"""Base class for tokenizers."""
@abc.abstractmethod
def encode(self,
inputs: str,
prepend_bos: bool = False,
append_eos: bool = False) -> np.ndarray:
"""Encode input string into an array of token IDs.
Args:
inputs: a string.
prepend_bos: set to True to add <bos> token at the beginning of the token
sequence.
append_eos: set to True to add <eos> token at the end of the token
sequence.
Returns:
tokens: [n_tokens] int array.
"""
@abc.abstractmethod
def decode(self, inputs) -> str:
"""Decode a sequence of tokens back into a string.
Args:
inputs: array or list of ints.
Returns:
s: the decoded string using this tokenizer.
"""
@property
@abc.abstractmethod
def vocab_size(self) -> int:
"""Size of the vocabulary."""
@abc.abstractmethod
def pad_token(self) -> int:
"""ID of the <pad> token."""
@abc.abstractmethod
def bos_token(self) -> int:
"""ID of the <bos> token."""
class WordTokenizer(Tokenizer):
"""Word-level tokenizer for white-space separated text data."""
def __init__(self, vocab_file: str):
"""Constructor.
Args:
vocab_file: a csv vocab file.
"""
content = io_tools.read_txt_file(vocab_file, encoding='utf-8')
with io.StringIO(content) as f:
r = csv.reader(f)
vocab = [w for w, _ in r]
# Add pad and bos tokens to the vocab
to_add = ['<pad>', '<bos>']
if '<unk>' not in vocab:
to_add.append('<unk>')
vocab = to_add + vocab
# token-index mappings
self._t2i = {t: i for i, t in enumerate(vocab)}
self._i2t = {i: t for t, i in self._t2i.items()}
self._unk_token = self._t2i['<unk>']
self._bos_token = self._t2i['<bos>']
self._pad_token = self._t2i['<pad>']
@property
def vocab_size(self):
return len(self._t2i)
def encode(self, inputs, prepend_bos=False, append_eos=False):
tokens = [self._t2i.get(t, self._unk_token) for t in inputs.split(' ') if t]
if prepend_bos:
tokens = [self._bos_token] + tokens
if append_eos:
# Reuse <bos> as <eos>.
tokens.append(self._bos_token)
return np.array(tokens, dtype=np.int32)
def decode(self, inputs):
"""Decode a sequence of token IDs back into a string."""
# Remove the first <bos> token if there is any.
if inputs[0] == self._bos_token:
inputs = inputs[1:]
tokens = []
for i in inputs:
# Use <bos> also as <eos> and stop there.
if i == self._bos_token:
break
tokens.append(self._i2t[i])
return ' '.join(tokens)
def pad_token(self):
return self._pad_token
def bos_token(self):
return self._bos_token
class GraphTokenizer:
"""Tokenizer for the content on the graphs."""
def __init__(self, vocab_file: str):
"""Constructor.
Args:
vocab_file: path to a vocab file.
"""
content = io_tools.read_txt_file(vocab_file, encoding='utf-16')
vocab = content.split('\n')
vocab = ['<pad>', '<bos>', '<unk>'] + vocab
# token-index mappings
self._t2i = {t: i for i, t in enumerate(vocab)}
self._i2t = {i: t for t, i in self._t2i.items()}
self._unk_token = self._t2i['<unk>']
self._bos_token = self._t2i['<bos>']
self._pad_token = self._t2i['<pad>']
@property
def vocab_size(self):
return len(self._t2i)
def encode_node(self, txt: str) -> np.ndarray:
return np.array([self._t2i.get(t, self._unk_token)
for t in self.split_node(txt)])
def encode_edge(self, txt: str) -> np.ndarray:
return np.array([self._t2i.get(t, self._unk_token)
for t in self.split_edge(txt)])
def encode(self, inputs, prepend_bos=False, append_eos=False):
tokens = [self._t2i.get(t, self._unk_token) for t in inputs.split(' ') if t]
if prepend_bos:
tokens = [self._bos_token] + tokens
if append_eos:
# Reuse <bos> as <eos>.
tokens.append(self._bos_token)
return np.array(tokens, dtype=np.int32)
def decode(self, inputs):
"""Decode a sequence of token IDs back into a string."""
# Remove the first <bos> token if there is any.
if inputs[0] == self._bos_token:
inputs = inputs[1:]
tokens = []
for i in inputs:
# Use <bos> also as <eos> and stop there.
if i == self._bos_token:
break
tokens.append(self._i2t[i])
return ' '.join(tokens)
@classmethod
def split_node(cls, txt: str) -> List[str]:
"""Split a node string into a sequence of tokens."""
if txt[0] == '"' and txt[-1] == '"': # Node is a string literal.
tokens = nltk.wordpunct_tokenize(io_tools.normalize_freebase_string(
txt[1:-1].lower()))
for i, t in enumerate(tokens):
if t.isnumeric():
tokens[i] = '<number>'
return tokens
else: # If node is not a string literal it is always an entity.
return ['<entity>']
@classmethod
def split_edge(cls, txt: str) -> List[str]:
"""Split an edge string into a sequence of tokens."""
return re.split('[._ ]+', txt.lower().split('/')[1])
def pad_token(self):
return self._pad_token
def bos_token(self):
return self._bos_token
| deepmind-research-master | wikigraphs/wikigraphs/data/tokenizers.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Geometric Manifold Component Estimator (GEOMANCER)."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from geomancer import geomancer
class GeomancerTest(parameterized.TestCase):
@parameterized.parameters(
{'zero_trace': False},
{'zero_trace': True})
def test_sym_op(self, zero_trace):
"""sym_op on tril(X) gives same result as QXQ' for symmetric X?"""
n = 5
x = np.random.randn(n, n)
x += x.T
if zero_trace:
np.fill_diagonal(x, np.diag(x)-np.trace(x)/n)
q, _ = np.linalg.qr(np.random.randn(n, n))
sym_q = geomancer.sym_op(q, zero_trace=zero_trace)
tril_x = x[np.tril_indices(n)]
if zero_trace:
tril_x = tril_x[:-1]
vec_y = sym_q @ tril_x
y = q @ x @ q.T
y_ = geomancer.vec_to_sym(vec_y, n, zero_trace=zero_trace)
np.testing.assert_allclose(y_, y)
def test_ffdiag(self):
k = 2
n = 5
w, _ = np.linalg.qr(np.random.randn(n, n))
psi = np.random.randn(k, n)
a = np.zeros((k, n, n))
for i in range(k):
a[i] = w @ np.diag(psi[i]) @ w.T
w_ = geomancer.ffdiag(a)
for i in range(k):
x = w_ @ a[i] @ w_.T
diag = np.diag(x).copy()
np.fill_diagonal(x, 1.0)
# check that x is diagonal
np.testing.assert_allclose(x, np.eye(n), rtol=1e-10, atol=1e-10)
self.assertTrue(np.all(np.min(
np.abs(diag[None, :] - psi[i][:, None]), axis=0) < 1e-10))
def test_make_nearest_neighbor_graph(self):
n = 100
# make points on a circle
data = np.zeros((n, 2))
for i in range(n):
data[i, 0] = np.sin(i*2*np.pi/n)
data[i, 1] = np.cos(i*2*np.pi/n)
graph = geomancer.make_nearest_neighbors_graph(data, 4, n=10)
for i in range(n):
self.assertLen(graph.rows[i], 4)
self.assertIn((i+1) % n, graph.rows[i])
self.assertIn((i+2) % n, graph.rows[i])
self.assertIn((i-1) % n, graph.rows[i])
self.assertIn((i-2) % n, graph.rows[i])
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | geomancer/geomancer_test.py |
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for pip package."""
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = ['numpy', 'scipy', 'matplotlib', 'absl-py', 'tqdm']
setup(
name='geomancer',
version='0.1',
description='A library for the Geometric Manifold Component Estimator.',
url='https://github.com/deepmind/deepmind-research/geomancer',
author='DeepMind',
author_email='[email protected]',
# Contained modules and scripts.
packages=find_packages(),
install_requires=REQUIRED_PACKAGES,
platforms=['any'],
license='Apache 2.0',
)
| deepmind-research-master | geomancer/setup.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run GEOMANCER on products of synthetic manifolds."""
import re
from absl import app
from absl import flags
from absl import logging
import geomancer
from matplotlib import gridspec
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import special_ortho_group
from tqdm import tqdm
SPECIFICATION = flags.DEFINE_list(
name='specification', default=['S^2', 'S^2'], help='List of submanifolds')
NPTS = flags.DEFINE_integer(
name='npts', default=1000, help='Number of data points')
ROTATE = flags.DEFINE_boolean(
name='rotate', default=False, help='Apply random rotation to the data')
PLOT = flags.DEFINE_boolean(
name='plot', default=True, help='Whether to enable plotting')
def make_so_tangent(q):
"""Given an n x n orthonormal matrix, return a basis for its tangent space."""
n = q.shape[0]
assert np.allclose(q.T @ q, np.eye(n), atol=1e-4, rtol=1e-4)
a = np.zeros((n, n))
ii = 0
dq = np.zeros((n, n, n*(n-1)//2))
for i in range(n):
for j in range(i+1, n):
a[i, j] = 1
a[j, i] = -1
dq[..., ii] = a @ q # tangent vectors are skew-symmetric matrix times Q
a[i, j] = 0
a[j, i] = 0
ii += 1
# reshape and orthonormalize the result
return np.linalg.qr(np.reshape(dq, (n**2, n*(n-1)//2)))[0]
def make_sphere_tangent(x):
_, _, v = np.linalg.svd(x[None, :])
return v[:, 1:]
def make_true_tangents(spec, data):
"""Return a set of orthonormal bases, one for each submanifold."""
for i in range(spec.shape[1]):
assert spec[0, i] == 0 or spec[1, i] == 0
so_dim = sum(dim ** 2 for dim in spec[0])
sphere_dim = sum(dim+1 if dim > 0 else 0 for dim in spec[1])
assert so_dim + sphere_dim == data.shape[0]
ii = 0
tangents = []
for i in range(spec.shape[1]):
if spec[0, i] != 0:
dim = spec[0, i]
tangents.append(make_so_tangent(np.reshape(data[ii:ii+dim**2],
(dim, dim))))
ii += dim ** 2
else:
dim = spec[1, i]
tangents.append(make_sphere_tangent(data[ii:ii+dim+1]))
ii += dim + 1
tangents2 = []
for i in range(len(tangents)):
size1 = sum(x.shape[0] for x in tangents[:i])
size2 = sum(x.shape[0] for x in tangents[i+1:])
tangents2.append(np.concatenate(
(np.zeros((size1, tangents[i].shape[1])),
tangents[i],
np.zeros((size2, tangents[i].shape[1]))), axis=0))
return tangents2
def make_product_manifold(specification, npts):
"""Generate data from a product of manifolds with the given specification."""
data = []
tangents = []
latent_dim = 0
spec_array = np.zeros((2, len(specification)), dtype=np.int32)
for i, spec in enumerate(specification):
so_spec = re.search(r'SO\(([0-9]+)\)', spec) # matches "SO(<numbers>)"
sphere_spec = re.search(r'S\^([0-9]+)', spec) # matches "S^<numbers>"
if sphere_spec is not None:
dim = int(sphere_spec.group(1))
spec_array[1, i] = dim
latent_dim += dim
dat = np.random.randn(npts, dim+1)
dat /= np.tile(np.sqrt(np.sum(dat**2, axis=1)[..., None]),
[1, dim+1])
elif so_spec is not None:
dim = int(so_spec.group(1))
spec_array[0, i] = dim
latent_dim += dim * (dim - 1) // 2
dat = [np.ndarray.flatten(special_ortho_group.rvs(dim), order='C')
for _ in range(npts)]
dat = np.stack(dat)
else:
raise ValueError(f'Unrecognized manifold: {spec}')
data.append(dat)
data = np.concatenate(data, axis=1)
for i in range(spec_array.shape[1]):
if spec_array[0, i] != 0:
dim = spec_array[0, i]
tangents.append(np.zeros((npts, data.shape[1], dim * (dim - 1) // 2)))
elif spec_array[1, i] != 0:
dim = spec_array[1, i]
tangents.append(np.zeros((npts, data.shape[1], dim)))
for i in tqdm(range(npts)):
true_tangent = make_true_tangents(spec_array, data[i])
for j in range(len(specification)):
tangents[j][i] = true_tangent[j]
logging.info('Constructed data and true tangents for %s',
' x '.join(specification))
return data, latent_dim, tangents
def main(_):
# Generate data and run GEOMANCER
data, dim, tangents = make_product_manifold(SPECIFICATION.value, NPTS.value)
if ROTATE.value:
rot, _ = np.linalg.qr(np.random.randn(data.shape[1], data.shape[1]))
data_rot = data @ rot.T
components, spectrum = geomancer.fit(data_rot, dim)
errors = geomancer.eval_unaligned(data_rot, components, data, tangents)
else:
components, spectrum = geomancer.fit(data, dim)
errors = geomancer.eval_aligned(components, tangents)
logging.info('Error between subspaces: %.2f +/- %.2f radians',
np.mean(errors),
np.std(errors))
if PLOT.value:
# Plot spectrum
plt.figure(figsize=(8, 6))
plt.scatter(np.arange(len(spectrum)), spectrum, s=100)
largest_gap = np.argmax(spectrum[1:]-spectrum[:-1]) + 1
plt.axvline(largest_gap, linewidth=2, c='r')
plt.xticks([])
plt.yticks(fontsize=18)
plt.xlabel('Index', fontsize=24)
plt.ylabel('Eigenvalue', fontsize=24)
plt.title('GeoManCEr Eigenvalue Spectrum', fontsize=24)
# Plot subspace bases
fig = plt.figure(figsize=(8, 6))
bases = components[0]
gs = gridspec.GridSpec(1, len(bases),
width_ratios=[b.shape[1] for b in bases])
for i in range(len(bases)):
ax = plt.subplot(gs[i])
ax.imshow(bases[i])
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(r'$T_{\mathbf{x}_1}\mathcal{M}_%d$' % (i+1), fontsize=18)
fig.canvas.set_window_title('GeoManCEr Results')
# Plot ground truth
fig = plt.figure(figsize=(8, 6))
gs = gridspec.GridSpec(1, len(tangents),
width_ratios=[b.shape[2] for b in tangents])
for i, spec in enumerate(SPECIFICATION.value):
ax = plt.subplot(gs[i])
ax.imshow(tangents[i][0])
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(r'$T_{\mathbf{x}_1}%s$' % spec, fontsize=18)
fig.canvas.set_window_title('Ground Truth')
plt.show()
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | geomancer/train.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for the Geometric Manifold Component Estimator (GEOMANCER)."""
import itertools
from absl import logging
import numpy as np
import scipy
import scipy.sparse
import scipy.sparse.linalg
from tqdm import tqdm
def sym_op(x, zero_trace=False):
"""Given X, makes L(A) = X @ A @ X' for symmetric matrices A.
If A is not symmetric, L(A) will return X @ (A_L + A_L') @ X' where A_L is
the lower triangular of A (with the diagonal divided by 2).
Args:
x: The matrix from which to construct the operator
zero_trace (optional): If true, restrict the operator to only act on
matrices with zero trace, effectively reducing the dimensionality by one.
Returns:
A matrix Y such that vec(L(A)) = Y @ vec(A).
"""
n = x.shape[0]
# Remember to subtract off the diagonal once
xx = (np.einsum('ik,jl->ijkl', x, x) +
np.einsum('il,jk->ijkl', x, x) -
np.einsum('ik,jl,kl->ijkl', x, x, np.eye(n)))
xx = xx[np.tril_indices(n)]
xx = xx.transpose(1, 2, 0)
xx = xx[np.tril_indices(n)]
xx = xx.T
if zero_trace:
diag_idx = np.cumsum([0]+list(range(2, n)))
proj_op = np.eye(n*(n+1)//2)[:, :-1]
proj_op[-1, diag_idx] = -1
# multiply by operator that completes last element of diagonal
# for a zero-trace matrix
xx = xx @ proj_op
xx = xx[:-1]
return xx
def vec_to_sym(x, n, zero_trace=False):
y = np.zeros((n, n))
if zero_trace:
x = np.append(x, 0.0)
y[np.tril_indices(n)] = x
y += y.T
y[np.diag_indices(n)] /= 2.0
if zero_trace:
y[-1, -1] = -np.trace(y)
return y
def ffdiag(data, lr=1.0, tol=1e-10, verbose=False, eig_init=False):
"""Orthogonal FFDiag algorithm of Ziehe et al 2004."""
n = data.shape[1]
k = data.shape[0]
c = data.copy()
if eig_init:
_, v = np.linalg.eig(data[0])
v = v.T
for i in range(k):
c[i] = v @ c[i] @ v.T
else:
v = np.eye(n)
err_ = np.inf
for t in range(10000):
w = np.zeros((n, n))
for i in range(n):
for j in range(i+1, n):
diag = c[:, i, i] - c[:, j, j]
w[i, j] = np.sum(c[:, i, j] * diag) / np.sum(diag ** 2)
w -= w.T
norm = np.linalg.svd(w, compute_uv=False).max()
if norm > lr:
w *= lr / norm
ew = scipy.linalg.expm(w)
v = ew @ v
for i in range(k):
c[i] = ew @ c[i] @ ew.T
cdiag = c.copy()
for i in range(n):
for j in range(k):
cdiag[j, i, i] = 0
err = np.linalg.norm(cdiag)
if verbose:
logging.info('Iter %d: %f', t, err)
if err_ - err < tol and err_ - err >= 0:
return v
err_ = err
return v
def avg_angle_between_subspaces(xs, ys):
"""Compute the error between two sets of subspaces."""
if len(xs) != len(ys):
return np.pi / 2 # largest possible angle
angles = []
for ys_perm in itertools.permutations(ys):
angles.append([])
for i in range(len(xs)):
if xs[i].shape[1] == ys_perm[i].shape[1]:
sigma = np.linalg.svd(xs[i].T @ ys_perm[i], compute_uv=False)
angles[-1].append(np.arccos(np.min(sigma)))
else:
angles[-1].append(np.pi / 2)
angles = np.array(angles)
return np.min(np.mean(angles, axis=1))
def make_nearest_neighbors_graph(data, k, n=1000):
"""Build exact k-nearest neighbors graph from numpy data.
Args:
data: Data to compute nearest neighbors of, each column is one point
k: number of nearest neighbors to compute
n (optional): number of neighbors to compute simultaneously
Returns:
A scipy sparse matrix in LIL format giving the symmetric nn graph.
"""
shape = data.shape
assert shape[0] % n == 0
nbr_graph = scipy.sparse.lil_matrix((shape[0], shape[0]))
norm = np.sum(data**2, axis=1)
cols = np.meshgrid(np.arange(n), np.ones(k+1))[0]
for i in tqdm(range(0, shape[0], n)):
dot = data @ data[i:i+n].T
dists = np.sqrt(np.abs(norm[:, None] - 2*dot + norm[i:i+n][None, :]))
idx = np.argpartition(dists, k, axis=0)[:k+1]
nbrs = idx[np.argsort(dists[idx, cols], axis=0), cols][1:]
for j in range(n):
nbr_graph[i+j, nbrs[:, j]] = 1
# Symmetrize graph
for i in tqdm(range(shape[0])):
for j in nbr_graph.rows[i]:
if nbr_graph[j, i] == 0:
nbr_graph[j, i] = nbr_graph[i, j]
logging.info('Symmetrized neighbor graph')
return nbr_graph
def make_tangents(data, neighbor_graph, k):
"""Construct all tangent vectors for the dataset."""
tangents = np.zeros((data.shape[0], k, data.shape[1]), dtype=np.float32)
for i in tqdm(range(data.shape[0])):
diff = data[neighbor_graph.rows[i]] - data[i]
_, _, u = np.linalg.svd(diff, full_matrices=False)
tangents[i] = u[:k]
logging.info('Computed all tangents')
return tangents
def make_connection(tangents, neighbor_graph):
"""Make connection matrices for all edges of the neighbor graph."""
connection = {}
for i in tqdm(range(tangents.shape[0])):
for j in neighbor_graph.rows[i]:
if j > i:
uy, _, ux = np.linalg.svd(tangents[j] @ tangents[i].T,
full_matrices=False)
conn = uy @ ux
connection[(i, j)] = conn
connection[(j, i)] = conn.T
logging.info('Constructed all connection matrices')
return connection
def make_laplacian(connection, neighbor_graph, sym=True, zero_trace=True):
"""Make symmetric zero-trace second-order graph connection Laplacian."""
n = neighbor_graph.shape[0]
k = list(connection.values())[0].shape[0]
bsz = (k*(k+1)//2 - 1 if zero_trace else k*(k+1)//2) if sym else k**2
data = np.zeros((neighbor_graph.nnz + n, bsz, bsz), dtype=np.float32)
indptr = []
indices = np.zeros(neighbor_graph.nnz + n)
index = 0
for i in tqdm(range(n)):
indptr.append(index)
data[index] = len(neighbor_graph.rows[i]) * np.eye(bsz)
indices[index] = i
index += 1
for j in neighbor_graph.rows[i]:
if sym:
kron = sym_op(connection[(j, i)], zero_trace=zero_trace)
else:
kron = np.kron(connection[(j, i)], connection[(j, i)])
data[index] = -kron
indices[index] = j
index += 1
indptr.append(index)
indptr = np.array(indptr)
laplacian = scipy.sparse.bsr_matrix((data, indices, indptr),
shape=(n*bsz, n*bsz))
logging.info('Built 2nd-order graph connection Laplacian.')
return laplacian
def cluster_subspaces(omega):
"""Cluster different dimensions from the eigenvectors of the Laplacian."""
w = ffdiag(omega) # simultaneous diagonalization
psi = np.zeros(omega.shape[:2])
for i in range(omega.shape[0]):
psi[i] = np.diag(w @ omega[i] @ w.T) # compute diagonals
# Compute cosine similarity of diagonal vectors
psi_outer = psi.T @ psi
psi_diag = np.diag(psi_outer)
cos_similarity = psi_outer / np.sqrt(np.outer(psi_diag, psi_diag))
adj = cos_similarity > 0.5 # adjacency matrix for graph of clusters
# Use graph Laplacian to find cliques
# (though a greedy algorithm could work too)
lapl = np.diag(np.sum(adj, axis=0)) - adj # graph Laplacian
d, v = np.linalg.eig(lapl)
# connected components of graph
cliques = np.abs(v[:, np.abs(d) < 1e-6]) > 1e-6
tangents = [w[cliques[:, i]] for i in range(sum(np.abs(d) < 1e-6))]
return tangents
def fit(data, k, gamma=None, nnbrs=None, neig=10, shard_size=1000):
"""The Geometric Manifold Component Estimator.
Args:
data: the dataset, a set of points sample from a product manifold.
k: the dimensionality of the manifold.
gamma (optional): the threshold in the spectrum at which to cut off the
number of submanifolds.
nnbrs (optional): number of neighbors to use for each point.
neig (optional): the total number of eigenvectors to compute.
shard_size (optional): the size of shard to use in knn computation.
Returns:
A list of lists of subspace bases, one list for each element of the dataset,
and the spectrum of the 2nd-order graph Laplacian.
"""
if not nnbrs:
nnbrs = 2*k
neighbor_graph = make_nearest_neighbors_graph(data, nnbrs, n=shard_size)
tangents = make_tangents(data, neighbor_graph, k)
connection = make_connection(tangents, neighbor_graph)
laplacian = make_laplacian(connection, neighbor_graph)
eigvals, eigvecs = scipy.sparse.linalg.eigsh(laplacian, k=neig, which='SM')
logging.info('Computed bottom eigenvectors of 2nd-order Laplacian')
bsz = k*(k+1)//2 - 1 # Block size for the projected 2nd-order Laplacian
if gamma:
nm = np.argwhere(eigvals < gamma)[-1, 0] + 1
else: # If no threshold is provided, just use the largest gap in the spectrum
nm = np.argmax(eigvals[1:] - eigvals[:-1]) + 1
eigvecs = eigvecs.reshape(data.shape[0], bsz, neig)
omega = np.zeros((nm, k, k), dtype=np.float32)
components = []
for i in tqdm(range(data.shape[0])):
for j in range(nm):
omega[j] = vec_to_sym(eigvecs[i, :, j], k, zero_trace=True)
components.append([tangents[i].T @ x.T for x in cluster_subspaces(omega)])
logging.info('GEOMANCER completed')
return components, eigvals
def eval_aligned(tangents, true_tangents):
"""Evaluation for aligned data."""
errors = np.zeros(len(tangents))
for i in tqdm(range(len(tangents))):
errors[i] = avg_angle_between_subspaces([gt[i] for gt in true_tangents],
tangents[i])
logging.info('Computed angles between ground truth and GEOMANCER results')
return errors
def eval_unaligned(data, tangents, true_data, true_tangents, k=10, n=1000):
"""Evaluation for unaligned data."""
logging.info('Evaluating unaligned data')
errors = np.zeros(data.shape[0])
nbrs = make_nearest_neighbors_graph(true_data, k=k, n=n)
for i in tqdm(range(data.shape[0])):
tangent = np.concatenate(tangents[i], axis=1)
true_tangent = np.concatenate([t[i] for t in true_tangents], axis=1)
dx_true = (true_data[nbrs.rows[i]] - true_data[i]) @ true_tangent
dx_result = (data[nbrs.rows[i]] - data[i]) @ tangent
# compute canonical correlations between the two dxs
xx = dx_true.T @ dx_true
yy = dx_result.T @ dx_result
xy = dx_true.T @ dx_result
xx_ = np.linalg.inv(xx)
yy_ = np.linalg.inv(yy)
foo = scipy.linalg.sqrtm(xx_) @ xy @ scipy.linalg.sqrtm(yy_)
u, _, v = np.linalg.svd(foo)
# project subspaces for results and ground truth into aligned space
proj = [v @ tangent.T @ s for s in tangents[i]]
true_proj = [u.T @ true_tangent.T @ s[i] for s in true_tangents]
errors[i] = avg_angle_between_subspaces(proj, true_proj)
return errors
| deepmind-research-master | geomancer/geomancer.py |
import torch
from palme.model import PalmE
#usage
img = torch.randn(1, 3, 256, 256)
caption = torch.randint(0, 20000, (1, 1024))
model = PalmE()
output = model(img, caption)
print(output.shape) # (1, 1024, 20000)
| PALM-E-main | example.py |
import math
import multiprocessing
import os
from datetime import timedelta
from functools import partial
from itertools import chain
import torch
# constants
from accelerate import Accelerator
from accelerate.utils import InitProcessGroupKwargs
from datasets import load_dataset
from lion_pytorch import Lion
from palm_rlhf_pytorch.palm import LayerNorm, ParallelTransformerBlock
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
CheckpointImpl,
apply_activation_checkpointing,
checkpoint_wrapper,
)
from torch.distributed.fsdp import (
BackwardPrefetch,
FullyShardedDataParallel,
MixedPrecision,
ShardingStrategy,
)
from torch.distributed.fsdp.wrap import (
transformer_auto_wrap_policy,
)
from torch.optim import AdamW
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import (
AutoTokenizer,
default_data_collator,
get_cosine_schedule_with_warmup,
get_linear_schedule_with_warmup,
set_seed,
)
from palme.stable_adamw import StableAdamWUnfused
# from palm.utils import print_num_params
from palme.utils import print_num_params
from palme.model import PALME
# setup
import torch.distributed as dist
from accelerate.logging import get_logger
logger = get_logger(__name__, log_level="INFO")
class CFG:
BATCH_SIZE = 1
GRADIENT_ACCUMULATE_EVERY: int = 1
SEED: int = 42
LEARNING_RATE: float = 1e-4 #3e-4 # 1e-4 for lion
WEIGHT_DECAY: float = 0.1
SEQ_LEN: int = 8192
NUM_CPU: int = multiprocessing.cpu_count()
USE_DEEPSPEED: bool = True
USE_FSDP: bool = True
USE_PRETOKENIZED: bool = True
USE_ACTIVATION_CHECKPOINTING: bool = True
RESUME_FROM_CHECKPOINT: str = False
CHECKPOINTING_STEPS: int = 1000
OUTPUT_DIR: str = 'checkpoints/' # Folder
ENTITY_NAME: str = "PALME"
LOGGING_STEPS: int = 100
# helpers
def print_num_params(model, accelerator: Accelerator):
# n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
accelerator.print(f"Number of parameters in model: {n_params}")
# activation checkpointing
def activation_checkpointing(
model: torch.nn.Module,
offload_to_cpu: bool = False,
accelerator: Accelerator = None,
):
"""
Apply activation checkpointing to a model.
Args:
model (Module): The model to which to apply activation checkpointing.
offload_to_cpu (bool, optional): Whether to offload the activations to CPU. Defaults to False.
accelerator (Accelerator, optional): The Accelerate library accelerator. Defaults to None.
"""
if accelerator is not None:
accelerator.print("Using activation checkpointing")
def check_fn(submodule):
return isinstance(submodule, ParallelTransformerBlock)
non_reentrant_wrapper = partial(
checkpoint_wrapper,
offload_to_cpu=offload_to_cpu,
checkpoint_impl=CheckpointImpl.NO_REENTRANT,
)
apply_activation_checkpointing(
model, checkpoint_wrapper_fn=non_reentrant_wrapper, check_fn=check_fn
)
# FSDP
def fsdp(
model: torch.nn.Module,
auto_wrap: bool = False,
mp: str = "fp32",
shard_strat: str = "NO_SHARD",
):
"""
This function wraps a given PyTorch model with the FullyShardedDataParallel (FSDP) wrapper to enable efficient data parallelism and model sharding.
Args:
model (torch.nn.Module): The original PyTorch model to be wrapped with FSDP.
auto_wrap (bool, optional): If True, it enables automatic wrapping of the model's layers according to the transformer_auto_wrap_policy. Default is False.
mp (str, optional): The mixed precision mode to be used. Can be 'bf16' for BFloat16, 'fp16' for Float16 or 'fp32' for Float32 precision. Default is 'fp32'.
shard_strat (str, optional): The sharding strategy to be used. Can be 'SHARD_GRAD' for sharding at gradient computation, 'FULL_SHARD' for full model sharding or 'NO_SHARD' for no sharding. Default is 'NO_SHARD'.
Raises:
ValueError: If the provided mp (mixed precision mode) is not 'bf16', 'fp16' or 'fp32'.
ValueError: If the provided shard_strat (sharding strategy) is not 'SHARD_GRAD', 'FULL_SHARD' or 'NO_SHARD'.
Returns:
torch.nn.Module: The input model wrapped with FSDP.
"""
if auto_wrap:
PALME_auto_wrap_policy = partial(
transformer_auto_wrap_policy,
transformer_layer_cls={
ParallelTransformerBlock,
},
)
else:
PALME_auto_wrap_policy = None
if mp == "bf16":
mp_fsdp = MixedPrecision(
param_dtype=torch.bfloat16,
# Gradient communication precision.
reduce_dtype=torch.bfloat16,
# Buffer precision.
buffer_dtype=torch.bfloat16,
)
elif mp == "fp16":
mp_fsdp = MixedPrecision(
param_dtype=torch.float16,
# Gradient communication precision.
reduce_dtype=torch.float16,
# Buffer precision.
buffer_dtype=torch.float16,
)
elif mp == "fp32":
mp_fsdp = MixedPrecision(
param_dtype=torch.float32,
# Gradient communication precision.
reduce_dtype=torch.float32,
# Buffer precision.
buffer_dtype=torch.float32,
)
else:
raise ValueError(
"Invalid scheduler_type. Expected 'bf16', 'fp16' or 'fp32', got: {}".format(
mp
)
)
if shard_strat == "SHARD_GRAD":
sharding_strat_fsdp = ShardingStrategy.SHARD_GRAD_OP
elif shard_strat == "FULL_SHARD":
sharding_strat_fsdp = ShardingStrategy.FULL_SHARD
elif shard_strat == "NO_SHARD":
sharding_strat_fsdp = ShardingStrategy.NO_SHARD
else:
raise ValueError(
"Invalid scheduler_type. Expected 'SHARD_GRAD', 'FULL_SHARD' or 'NO_SHARD', got: {}".format(
shard_strat
)
)
model = FullyShardedDataParallel(
model,
auto_wrap_policy=PALME_auto_wrap_policy,
mixed_precision=mp_fsdp,
backward_prefetch=BackwardPrefetch.BACKWARD_PRE,
sharding_strategy=sharding_strat_fsdp,
forward_prefetch=True,
use_orig_params=True,
)
return model
# learning rate scheduler
def get_lr_scheduler_with_warmup(
optimizer: torch.optim.Optimizer,
scheduler_type: str,
num_warmup_steps: int,
max_train_steps: int,
grad_accumulate_every: int = 1,
accelerator: Accelerator = None,
):
"""
Get a learning rate scheduler with warmup.
Args:
optimizer (Optimizer): The optimizer for which to create the learning rate scheduler.
scheduler_type (str): The type of learning rate scheduler to create, either "linear" or "cosine".
num_warmup_steps (int): The number of warmup steps for the learning rate scheduler.
max_train_steps (int): The maximum number of training steps.
grad_accumulate_every (int, optional): The gradient accumulation factor. Defaults to 1.
accelerator (Accelerator, optional): The Accelerate library accelerator. Defaults to None.
Returns:
The learning rate scheduler with warmup.
Raises:
ValueError: If scheduler_type is not "linear" or "cosine".
"""
NUM_WARMUP_STEPS = num_warmup_steps
GRADIENT_ACCUMULATE_EVERY = grad_accumulate_every
if accelerator is not None:
accelerator.print(f"Using {scheduler_type} lr scheduler")
if scheduler_type == "linear":
return get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=NUM_WARMUP_STEPS * GRADIENT_ACCUMULATE_EVERY,
num_training_steps=max_train_steps * GRADIENT_ACCUMULATE_EVERY,
)
elif scheduler_type == "cosine":
return get_cosine_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=NUM_WARMUP_STEPS * GRADIENT_ACCUMULATE_EVERY,
num_training_steps=max_train_steps * GRADIENT_ACCUMULATE_EVERY,
)
else:
raise ValueError(
"Invalid scheduler_type. Expected 'linear' or 'cosine', got: {}".format(
scheduler_type
)
)
# optimizers
def decoupled_optimizer(
model: torch.nn.Module,
learning_rate: float,
weight_decay: float,
beta_1: float,
beta_2: float,
optimizer_type: str,
use_fsdp: bool = True,
accelerator: Accelerator = None,
):
"""
Decouples the optimizer from the training process.
This function sets up the optimizer for the model by creating two groups of parameters:
one for weight decay and one without weight decay. Then, it initializes the optimizer
with these two groups of parameters.
Args:
model (Module): The model whose parameters are optimized.
learning_rate (float): The learning rate for the optimizer.
weight_decay (float): The weight decay for the optimizer.
beta_1 (float): The exponential decay rate for the 1st moment estimates.
beta_2 (float): The exponential decay rate for the 2nd moment estimates.
optimizer_type (str): The type of the optimizer. Can be 'lion', 'adamw', or 'stable_adamw'.
use_fsdp (bool, optional): If True, the optimizer will work with fully sharded data parallelism. Defaults to True.
accelerator (Accelerator, optional): The accelerator from HuggingFace's Accelerate library. Defaults to None.
Returns:
Optimizer: The initialized optimizer.
Raises:
ValueError: If the optimizer type is not 'lion', 'adamw' or 'stable_adamw'.
"""
accelerator.print(f"Using {optimizer_type} optimizer")
# Create an empty dictionary called param_dict to store the model's named parameters.
param_dict = {}
# Iterate over the model's named parameters and populate the param_dict with key-value pairs.
for param_name, param in model.named_parameters():
param_dict[param_name] = param
# Separate the model's named modules into two groups: decay and no_decay.
# Create an empty list to store the names of the LayerNorm and Embedding layer weights with no weight decay.
no_decay = []
if use_fsdp:
exclude_module = "_fsdp_wrapped_module.token_emb"
else:
exclude_module = "token_emb"
# Iterate through the named modules of the model.
for module_name, module in model.named_modules():
# Check if the current module is an instance of any of the desired types (LayerNorm or torch.nn.Embedding).
for ndim in [LayerNorm, torch.nn.Embedding]:
if isinstance(module, ndim):
# If torch.nn.Embedding, append its name with a ".weight" suffix to the no_decay list.
if module_name == exclude_module:
no_decay.append(f"{module_name}.weight")
else:
# If the module is an instance of LayerNorm
no_decay.append(f"{module_name}.gamma")
# Exit the inner loop since the desired module has been found.
break
# Create an empty list to store the names of the Linear layer weights with weight decay.
decay = []
# Iterate through the named modules of the model.
for module_name, module in model.named_modules():
# Check if the current module is an instance of the desired type (torch.nn.Linear).
for ndim in [torch.nn.Linear]:
if isinstance(module, ndim):
# If the module is an instance of torch.nn.Linear, append its name with a ".weight" suffix to the decay list.
decay.append(f"{module_name}.weight")
# Exit the inner loop since the desired module has been found.
break
# Create two separate lists of model parameters: decay_param and no_decay_param.
# The decay_param list contains the parameters that should have weight decay applied.
# The no_decay_param list contains the parameters that should not have weight decay applied, excluding the 'to_logits.weight' parameter.
# Create an empty list called decay_param to store the parameters with weight decay.
decay_param = []
if use_fsdp:
exclude_param = "_fsdp_wrapped_module.to_logits.weight"
else:
exclude_param = "to_logits.weight"
# Iterate over the decay list, which contains the names of the parameters with weight decay.
for param in decay:
# Check if the current parameter is not 'to_logits.weight'.
# Append the corresponding parameter from param_dict to the decay_param list.
if param != exclude_param:
decay_param.append(param_dict[param])
# Create an empty list called no_decay_param to store the parameters without weight decay.
no_decay_param = []
# Iterate over the no_decay list, which contains the names of the parameters without weight decay.
for param in no_decay:
try:
# Append the corresponding parameter from param_dict to the no_decay_param list.
no_decay_param.append(param_dict[param])
except KeyError:
# print(f"Parameter {param_name} does not exist in the model")
pass
# Create a list called grouped_params that contains two dictionaries.
# The first dictionary has the decay_param list and the corresponding weight_decay value.
# The second dictionary has the no_decay_param list and a weight_decay value of 0.0.
grouped_params = [
{"params": decay_param, "weight_decay": weight_decay},
{"params": no_decay_param, "weight_decay": 0.0},
]
# Create a variable called optimizer that stores an instance of the optimizer.
if optimizer_type == "lion":
optimizer = Lion(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "adamw":
optimizer = AdamW(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
# elif optimizer_type == "deepspeed":
# optimizer = DummyOptim(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "stable_adamw":
optimizer = StableAdamWUnfused(
grouped_params, lr=learning_rate, betas=(beta_1, beta_2),
)
# elif optimizer_type=="Adam8bit":
# optimizer = bnb.optim.Adam8bit(grouped_params, lr=learning_rate, betas=(beta_1, beta_2))
# elif optimizer_type=="Lion8Bit":
# optimizer = bnb.optim.Lion8bit(grouped_params, lr=learning_rate, betas=(beta_1, beta_2))
else:
raise ValueError(
"Invalid optimizer_type. Expected 'lion', 'adamw', 'deepspeed' or 'stable_adamw', got: {}".format(
optimizer_type
)
)
# Return the optimizer.
return optimizer
# dataloaders
def build_dataloaders():
"""
Build data loaders for training.
This function performs the following steps:
1. Load the tokenizer from the pretrained "EleutherAI/gpt-neox-20b" model.
2. Load the "openwebtext" dataset.
3. Tokenize the dataset, adding the end-of-sentence token to each text.
4. Process the tokenized dataset into chunks of a specified block size.
Returns:
Dataset: The processed dataset ready for training.
"""
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
dataset = load_dataset("openwebtext", split="train")
tokenized_dataset = dataset.map(
lambda example: tokenizer([t + tokenizer.eos_token for t in example["text"]]),
batched=True,
num_proc=CFG.NUM_CPU,
remove_columns=["text"],
)
block_size = CFG.SEQ_LEN
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
return result
train_dataset = tokenized_dataset.map(
group_texts, batched=True, num_proc=CFG.NUM_CPU,
)
return train_dataset
#switch to falconwebdataset
def build_pre_tokenized():
d0 = load_dataset("conceptofmind/c4_0-to-20_neox_with_eos_8k", split="train[:10]")
# d1 = load_dataset("conceptofmind/c4_21-to-40_neox_with_eos_8k", split="train")
# d2 = load_dataset("conceptofmind/c4_41-to-60_neox_with_eos_8k", split="train")
# d3 = load_dataset("conceptofmind/c4_61-to-80_neox_with_eos_8k", split="train")
# d4 = load_dataset("conceptofmind/c4_81-to-100_neox_with_eos_8k", split="train")
# train_dataset = concatenate_datasets([d0, d1, d2, d3, d4])
return d0
def Train():
# accelerator
timeout = InitProcessGroupKwargs(timeout=timedelta(seconds=1_000_000))
accelerator = Accelerator(
gradient_accumulation_steps=CFG.GRADIENT_ACCUMULATE_EVERY,
mixed_precision="fp16",
log_with="wandb",
kwargs_handlers=[timeout],
)
# state = AcceleratorState()
# state.deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu'] = CFG.BATCH_SIZE #??????
accelerator.init_trackers(
project_name="PALME",
config={
"batch_size": CFG.BATCH_SIZE,
"gradient_accumulate_every": CFG.GRADIENT_ACCUMULATE_EVERY,
"learning_rate": CFG.LEARNING_RATE,
"seq_len": CFG.SEQ_LEN,
},
# init_kwargs={"wandb": {"entity": CFG.ENTITY_NAME}},
)
accelerator.print(f"Total GPUS: {accelerator.num_processes}")
# set seed
set_seed(CFG.SEED)
model = PALME()
print_num_params(model, accelerator)
if CFG.USE_FSDP:
model = fsdp(
model,
mp="fp16",
shard_strat="SHARD_GRAD"
)
if CFG.USE_ACTIVATION_CHECKPOINTING:
activation_checkpointing(model, accelerator)
model = accelerator.prepare(model)
# dataloaders
if CFG.USE_PRETOKENIZED:
train_dataset = build_pre_tokenized()
else:
train_dataset = build_dataloaders()
train_loader = DataLoader(
train_dataset, batch_size=CFG.BATCH_SIZE, collate_fn=default_data_collator,
)
# optimizer
optim = decoupled_optimizer(
model=model,
learning_rate=CFG.LEARNING_RATE,
weight_decay=CFG.WEIGHT_DECAY,
beta_1=0.90,
beta_2=0.95,
optimizer_type='lion',
use_fsdp=True,
accelerator=accelerator
)
# Determine number of training steps
max_train_steps = math.ceil(len(train_loader) / CFG.GRADIENT_ACCUMULATE_EVERY)
accelerator.print(f"Max train steps: {max_train_steps}")
# lr scheduler
NUM_WARMUP_STEPS = int(max_train_steps * 0.01)
accelerator.print(f"Num warmup steps: {NUM_WARMUP_STEPS}")
lr_scheduler = get_lr_scheduler_with_warmup(
optimizer=optim,
scheduler_type="cosine",
num_warmup_steps=NUM_WARMUP_STEPS,
max_train_steps=max_train_steps,
grad_accumulate_every=CFG.GRADIENT_ACCUMULATE_EVERY,
)
# prepare
optim, train_loader, lr_scheduler = accelerator.prepare(
optim, train_loader, lr_scheduler
)
# checkpoint scheduler
accelerator.register_for_checkpointing(lr_scheduler)
# I do not know why Huggingface recommends recalculation of max_train_steps
max_train_steps = math.ceil(len(train_loader) / CFG.GRADIENT_ACCUMULATE_EVERY)
accelerator.print(f"Max train steps recalculated: {max_train_steps}")
# Total batch size for logging
total_batch_size = (
CFG.BATCH_SIZE * accelerator.num_processes * CFG.GRADIENT_ACCUMULATE_EVERY
)
accelerator.print(f"Total batch size: {total_batch_size}")
# resume training
progress_bar = tqdm(
range(max_train_steps), disable=not accelerator.is_local_main_process
)
completed_steps = 0
if CFG.RESUME_FROM_CHECKPOINT:
if CFG.RESUME_FROM_CHECKPOINT is not None or CFG.RESUME_FROM_CHECKPOINT != "":
accelerator.print(f"Resuming from checkpoint {CFG.RESUME_FROM_CHECKPOINT}")
accelerator.load_state(CFG.RESUME_FROM_CHECKPOINT)
path = os.path.basename(CFG.RESUME_FROM_CHECKPOINT)
training_difference = os.path.splitext(path)[0]
# need to multiply `gradient_accumulation_steps` to reflect real steps
resume_step = (
int(training_difference.replace("step_", ""))
* CFG.GRADIENT_ACCUMULATE_EVERY
)
if CFG.RESUME_FROM_CHECKPOINT and resume_step is not None:
train_loader = accelerator.skip_first_batches(train_loader, resume_step)
completed_steps += resume_step
progress_bar.update(resume_step)
# training
model.train()
for step, batch in enumerate(train_loader):
with accelerator.accumulate(model):
inputs = batch["input_ids"].to(accelerator.device)
loss = model(inputs, return_loss=True)
accelerator.backward(loss)
accelerator.log({"loss": loss.item()}, step=step)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(model.parameters(), 1.0)
optim.step()
lr_scheduler.step()
optim.zero_grad()
if accelerator.sync_gradients:
progress_bar.update(1)
completed_steps += 1
if isinstance(CFG.CHECKPOINTING_STEPS, int):
if completed_steps % CFG.CHECKPOINTING_STEPS == 0:
output_dir = f"step_{completed_steps }"
if CFG.OUTPUT_DIR is not None:
output_dir = os.path.join(CFG.OUTPUT_DIR, output_dir)
accelerator.save_state(output_dir)
if completed_steps >= max_train_steps:
break
#logging every CFG.LOGGING STEPS
if CFG.LOGGING_STEPS > 0 and step % CFG.LOGGING_STEPS == 0:
logger.info(
f"Step: {completed_steps}/{max_train_steps}, Loss: {loss.item():.5f}"
)
# end training
# accelerator.print(f"Training Finished")
accelerator.end_training()
# save final model
# accelerator.print(f"Saving model to {CFG.OUTPUT_DIR}")
if CFG.OUTPUT_DIR is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
with accelerator.main_process_first():
accelerator.save(
unwrapped_model.state_dict(), f"{CFG.OUTPUT_DIR}/final/final_model.pt"
)
def main():
os.environ['MASTER_ADDR'] #'localhost'
os.environ['MASTER_PORT'] #= '9994'
# # [CRITICAL] Pay attention to this when scaling to multiple GPUs and clusters
# # Pay attention to this, use "accelerate config"
os.environ['RANK'] #= str(0) # Number of nodes (servers)
os.environ['WORLD_SIZE'] # = str(torch.cuda.device_count())
dist.init_process_group(backend='nccl') #init_method="env://")
Train()
if __name__ == '__main__':
main() | PALM-E-main | train.py |
from palme.model import PalmE
| PALM-E-main | palme/__init__.py |
import torch
import torch.nn as nn
from transformers import AutoTokenizer, CLIPProcessor
from palme.transformer import (
Decoder,
Encoder,
Transformer,
ViTransformerWrapper,
)
class PalmeETokenizer:
def __init__(self):
try:
self.processor = CLIPProcessor.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K")
self.tokenizer = AutoTokenizer.from_pretrained(
"EleutherAI/gpt-neox-20b",
additional_special_tokens=[""],
eos_token ="<eos>",
pad_token="<pad>",
extra_ids=0,
model_max_length=8192
)
self.im_idx, self.im_end_idx = self.tokenizer.convert_tokens_to_ids([""])
except Exception as e:
print(f"Error init tokenizer: {e}")
def tokenize_texts(self, texts):
try:
texts = self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True).input_ids
image_tokens = torch.tensor([[self.im_idx, self.im_end_idx]] * texts.shape[0])
return torch.cat([texts[:, 0:1], image_tokens, texts[:, 1:]], dim=1), texts
except Exception as e:
print(f"Error tokenizing texts: {e}")
def tokenize_images(self, images):
try:
tokenized_images = self.processor(images=images, return_tensors="pt").pixel_values
print(f"Tokenized image: {tokenized_images.shape}")
return tokenized_images
except Exception as e:
print(f"Error tokenizing texts: {e}")
def tokenize(self, sample):
try:
text_tokens, only_text_tokens = self.tokenize_texts(sample["target_text"])
attention_mask = text_tokens != self.tokenizer.pad_token_id
dummy_image_features = torch.ones((text_tokens.shape[0], 64))
attention_mask = torch.cat([dummy_image_features, attention_mask], dim=1)
return {
"text_tokens": text_tokens,
"images": self.tokenize_images(sample["image"]),
"labels": only_text_tokens,
"attention_mask": attention_mask,
}
except Exception as e:
print(f"Error during tokenization {e}")
class PalmE(torch.nn.Module):
def __init__(self,
image_size=256,
patch_size=32,
encoder_dim=512,
encoder_depth=6,
encoder_heads=8,
num_tokens=20000,
max_seq_len=1024,
decoder_dim=512,
decoder_depth=6,
decoder_heads=8,
alibi_num_heads=4,
use_abs_pos_emb=False,
cross_attend=True,
alibi_pos_bias=True,
rotary_xpos=True,
attn_flash=True,
qk_norm=True):
super(PalmE, self).__init__()
self.encoder = ViTransformerWrapper(
image_size=image_size,
patch_size=patch_size,
attn_layers=Encoder(
dim=encoder_dim,
depth=encoder_depth,
heads=encoder_heads
)
)
self.decoder = Transformer(
num_tokens=num_tokens,
max_seq_len=max_seq_len,
use_abs_pos_emb=use_abs_pos_emb,
attn_layers=Decoder(
dim=decoder_dim,
depth=decoder_depth,
heads=decoder_heads,
cross_attend=cross_attend,
alibi_pos_bias=alibi_pos_bias,
alibi_num_heads=alibi_num_heads,
rotary_xpos=rotary_xpos,
attn_flash=attn_flash,
qk_norm=qk_norm,
)
)
def forward(self, img, text):
try:
encoded = self.encoder(img, return_embeddings=True)
return self.decoder(text, context=encoded)
except Exception as error:
print(f"Failed in forward method: {error}")
raise
| PALM-E-main | palme/model.py |
from functools import partial
from typing import Optional
import torch
from torch import nn, einsum, Tensor
import torch.nn.functional as F
from collections import namedtuple
from functools import wraps
from packaging import version
from dataclasses import dataclass
from einops import rearrange
# constants
EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
@dataclass
class Intermediates:
qk_similarities: Optional[Tensor] = None
pre_softmax_attn: Optional[Tensor] = None
post_softmax_attn: Optional[Tensor] = None
def to_tuple(self):
return (self.qk_similarities, self.pre_softmax_attn, self.post_softmax_attn)
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def compact(arr):
return [*filter(exists, arr)]
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# functions for creating causal mask
# need a special one for onnx cpu (no support for .triu)
def create_causal_mask(i, j, device):
return torch.ones((i, j), device = device, dtype = torch.bool).triu(j - i + 1)
def onnx_create_causal_mask(i, j, device):
r = torch.arange(i, device = device)
causal_mask = rearrange(r, 'i -> i 1') < rearrange(r, 'j -> 1 j')
causal_mask = F.pad(causal_mask, (j - i, 0), value = False)
return causal_mask
# main class
class Attend(nn.Module):
def __init__(
self,
*,
dropout = 0.,
causal = False,
heads = None,
talking_heads = False,
sparse_topk = None,
scale = None,
qk_norm = False,
flash = False,
add_zero_kv = False,
onnxable = False
):
super().__init__()
self.scale = scale
self.qk_norm = qk_norm
self.causal = causal
self.create_causal_mask = onnx_create_causal_mask if onnxable else create_causal_mask
self.attn_fn = partial(F.softmax, dtype = torch.float32) if not qk_norm else F.softmax
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
# talking heads
assert not (flash and talking_heads), 'talking heads not compatible with flash attention'
self.talking_heads = talking_heads
if talking_heads:
self.pre_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
self.post_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
# sparse topk
assert not (flash and sparse_topk), 'sparse topk not compatible with flash attention'
self.sparse_topk = sparse_topk
# add a key / value token composed of zeros
# in case this helps controlling outliers, proposed by https://www.evanmiller.org/attention-is-off-by-one.html
self.add_zero_kv = add_zero_kv
# flash attention
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = EfficientAttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(False, True, True)
def flash_attn(
self,
q, k, v,
mask = None,
attn_bias = None
):
batch, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
# Recommended for multi-query single-key-value attention by Tri Dao
# kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64])
if k.ndim == 3:
k = rearrange(k, 'b ... -> b 1 ...').expand_as(q)
if v.ndim == 3:
v = rearrange(v, 'b ... -> b 1 ...').expand_as(q)
# handle scale - by default they scale by dim_head ** -0.5, but need to take care if using cosine sim attention
if self.qk_norm:
default_scale = q.shape[-1] ** -0.5
q = q * (default_scale / self.scale)
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
causal = self.causal
if exists(mask):
assert mask.ndim == 4
mask = mask.expand(batch, heads, q_len, k_len)
# manually handle causal mask, if another mask was given
if causal:
causal_mask = self.create_causal_mask(q_len, k_len, device = device)
mask = mask & ~causal_mask
causal = False
# handle alibi positional bias
# convert from bool to float
if exists(attn_bias):
attn_bias = rearrange(attn_bias, 'h i j -> 1 h i j').expand(batch, heads, -1, -1)
# if mask given, the mask would already contain the causal mask from above logic
# otherwise, if no mask given but still causal, mask out alibi positional bias to a large negative number
mask_value = -torch.finfo(q.dtype).max
if exists(mask):
attn_bias = attn_bias.masked_fill(~mask, mask_value // 2)
elif causal:
causal_mask = self.create_causal_mask(q_len, k_len, device = device)
attn_bias = attn_bias.masked_fill(causal_mask, mask_value // 2)
causal = False
# scaled_dot_product_attention handles attn_mask either as bool or additive bias
# make it an additive bias here
mask = attn_bias
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = causal
)
return out, Intermediates()
def forward(
self,
q, k, v,
mask = None,
attn_bias = None,
prev_attn = None
):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device = q.shape[-2], q.device
scale = default(self.scale, q.shape[-1] ** -0.5)
if self.add_zero_kv:
k, v = map(lambda t: F.pad(t, (0, 0, 1, 0), value = 0.), (k, v))
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
if exists(attn_bias):
attn_bias = F.pad(attn_bias, (1, 0), value = 0.)
if self.flash:
assert not exists(prev_attn), 'residual attention not compatible with flash attention'
return self.flash_attn(q, k, v, mask = mask, attn_bias = attn_bias)
kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d'
dots = einsum(f'b h i d, {kv_einsum_eq} -> b h i j', q, k) * scale
if exists(prev_attn):
dots = dots + prev_attn
qk_similarities = dots.clone()
if self.talking_heads:
dots = self.pre_softmax_talking_heads(dots)
if exists(attn_bias):
dots = dots + attn_bias
i, j, dtype = *dots.shape[-2:], dots.dtype
mask_value = -torch.finfo(dots.dtype).max
if exists(self.sparse_topk) and self.sparse_topk < j:
top_values, _ = dots.topk(self.sparse_topk, dim = -1)
sparse_topk_mask = dots < top_values[..., -1:]
mask = (mask & sparse_topk_mask) if exists(mask) else sparse_topk_mask
if exists(mask):
dots = dots.masked_fill(~mask, mask_value)
if self.causal:
causal_mask = self.create_causal_mask(i, j, device = device)
dots = dots.masked_fill(causal_mask, mask_value)
pre_softmax_attn = dots.clone()
attn = self.attn_fn(dots, dim = -1)
attn = attn.type(dtype)
post_softmax_attn = attn.clone()
attn = self.attn_dropout(attn)
if self.talking_heads:
attn = self.post_softmax_talking_heads(attn)
out = einsum(f'b h i j, {kv_einsum_eq} -> b h i d', attn, v)
intermediates = Intermediates(
qk_similarities = qk_similarities,
pre_softmax_attn = pre_softmax_attn,
post_softmax_attn = post_softmax_attn
)
return out, intermediates
# cascading heads logic
def to_single_heads(t, dim = 1):
heads = t.unbind(dim = dim)
return tuple(head.unsqueeze(dim) for head in heads)
class CascadingHeads(nn.Module):
def __init__(self, attend: Attend):
super().__init__()
self.attend = attend
def forward(
self,
q, k, v,
mask = None,
attn_bias = None,
prev_attn = None
):
assert q.shape[-1] == v.shape[-1], 'cascading heads can only be done if query / key and value head dimensions are the same'
# split inputs into per-head inputs
heads = q.shape[1]
queries = to_single_heads(q)
keys = to_single_heads(k) if k.ndim == 4 else ((k,) * heads)
values = to_single_heads(v) if v.ndim == 4 else ((v,) * heads)
mask = (mask,) * heads
attn_bias = to_single_heads(attn_bias, dim = 0) if exists(attn_bias) else ((None,) * heads)
prev_attn = to_single_heads(prev_attn) if exists(prev_attn) else ((None,) * heads)
# now loop through each head, without output of previous head summed with the next head
# thus cascading
all_outs = []
all_intermediates = []
prev_head_out = None
for h_q, h_k, h_v, h_mask, h_attn_bias, h_prev_attn in zip(queries, keys, values, mask, attn_bias, prev_attn):
if exists(prev_head_out):
h_q = h_q + prev_head_out
out, intermediates = self.attend(
h_q, h_k, h_v,
mask = h_mask,
attn_bias = h_attn_bias,
prev_attn = h_prev_attn
)
prev_head_out = out
all_outs.append(out)
all_intermediates.append(intermediates)
# cat all output heads
all_outs = torch.cat(all_outs, dim = 1)
# cat all intermediates, if they exist
qk_similarities, pre_softmax_attn, post_softmax_attn = zip(*map(lambda i: i.to_tuple(), all_intermediates))
qk_similarities, pre_softmax_attn, post_softmax_attn = map(compact, (qk_similarities, pre_softmax_attn, post_softmax_attn))
aggregated_intermediates = Intermediates(
qk_similarities = torch.cat(qk_similarities, dim = 1) if len(qk_similarities) > 0 else None,
pre_softmax_attn = torch.cat(pre_softmax_attn, dim = 1) if len(pre_softmax_attn) > 0 else None,
post_softmax_attn = torch.cat(post_softmax_attn, dim = 1) if len(post_softmax_attn) > 0 else None
)
return all_outs, aggregated_intermediates | PALM-E-main | palme/attend.py |
import math
from dataclasses import dataclass
from functools import partial, wraps
from inspect import isfunction
# constants
from math import ceil
from random import random
from typing import Callable, List, Optional
import torch
import torch.nn.functional as F
from einops import pack, rearrange, reduce, repeat, unpack
from torch import Tensor, einsum, nn
from palme.attend import Attend, Intermediates
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(self, *args, **kwargs):
was_training = self.training
self.eval()
out = fn(self, *args, **kwargs)
self.train(was_training)
return out
return inner
# nucleus
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
# topk
def top_k(logits, thres = 0.9):
k = ceil((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# top_a
def top_a(logits, min_p_pow=2.0, min_p_ratio=0.02):
probs = F.softmax(logits, dim=-1)
limit = torch.pow(torch.max(probs), min_p_pow) * min_p_ratio
logits[probs < limit] = float('-inf')
logits[probs >= limit] = 1
return logits
# autoregressive wrapper class
class AutoregressiveWrapper(nn.Module):
def __init__(
self,
net,
ignore_index = -100,
pad_value = 0,
mask_prob = 0.
):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.max_seq_len
# paper shows masking (MLM) in conjunction with autoregressive decoder-only training leads to big improvements https://arxiv.org/abs/2210.13432
assert mask_prob < 1.
self.mask_prob = mask_prob
@torch.no_grad()
@eval_decorator
def generate(
self,
start_tokens,
seq_len,
eos_token = None,
temperature = 1.,
filter_logits_fn = top_k,
filter_thres = 0.9,
min_p_pow = 2.0,
min_p_ratio = 0.02,
**kwargs
):
start_tokens, ps = pack([start_tokens], '* n')
b, t = start_tokens.shape
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
logits = self.net(x, **kwargs)[:, -1]
if filter_logits_fn in {top_k, top_p}:
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
elif filter_logits_fn is top_a:
filtered_logits = filter_logits_fn(logits, min_p_pow = min_p_pow, min_p_ratio= min_p_ratio)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if exists(eos_token):
is_eos_tokens = (out == eos_token)
if is_eos_tokens.any(dim = -1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1
out = out.masked_fill(mask, self.pad_value)
break
out = out[:, t:]
out, = unpack(out, ps, '* n')
return out
def forward(self, x, return_loss=True, **kwargs):
seq, ignore_index = x.shape[1], self.ignore_index
inp, target = x[:, :-1], x[:, 1:]
if self.mask_prob > 0.:
rand = torch.randn(inp.shape, device = x.device)
rand[:, 0] = -torch.finfo(rand.dtype).max # first token should not be masked out
num_mask = min(int(seq * self.mask_prob), seq - 1)
indices = rand.topk(num_mask, dim = -1).indices
mask = ~torch.zeros_like(inp).scatter(1, indices, 1.).bool()
kwargs.update(self_attn_context_mask = mask)
logits = self.net(inp, **kwargs)
loss = F.cross_entropy(
rearrange(logits, 'b n c -> b c n'),
target,
ignore_index = ignore_index
)
if return_loss:
return logits, loss
return logits
DEFAULT_DIM_HEAD = 64
@dataclass
class LayerIntermediates:
hiddens: Optional[List[Tensor]] = None
attn_intermediates: Optional[List[Intermediates]] = None
layer_hiddens: Optional[List[Tensor]] = None
attn_z_loss: Optional[Tensor] = None
# helpers
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
def cast_tuple(val, depth):
return val if isinstance(val, tuple) else (val,) * depth
def maybe(fn):
@wraps(fn)
def inner(x, *args, **kwargs):
if not exists(x):
return x
return fn(x, *args, **kwargs)
return inner
class always():
def __init__(self, val):
self.val = val
def __call__(self, *args, **kwargs):
return self.val
class not_equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x != self.val
class equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x == self.val
def Sequential(*modules):
return nn.Sequential(*filter(exists, modules))
# tensor helpers
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
def l2norm(t, groups = 1):
t = rearrange(t, '... (g d) -> ... g d', g = groups)
t = F.normalize(t, p = 2, dim = -1)
return rearrange(t, '... g d -> ... (g d)')
def pad_at_dim(t, pad, dim = -1, value = 0.):
dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1)
zeros = ((0, 0) * dims_from_right)
return F.pad(t, (*zeros, *pad), value = value)
def or_reduce(masks):
head, *body = masks
for rest in body:
head = head | rest
return head
# auxiliary loss helpers
def calc_z_loss(
pre_softmax_attns: List[Tensor],
mask = None,
weight = 1.
):
# the same loss applied to the mixture of experts router logits in https://arxiv.org/abs/2202.08906
# in the paper, in a tiny footnote, they mention using it on attention logits with stabilizing effects
# also used in PaLM as one of the measures
lse = 0.
for attn in pre_softmax_attns:
lse = lse + attn.logsumexp(dim = -1)
loss = torch.square(lse)
loss = reduce(loss, 'b h n -> b n', 'sum')
if not exists(mask):
return loss.mean() * weight
loss = loss[mask].sum() / mask.sum().clamp(min = 1e-5)
return loss * weight
# init helpers
def init_zero_(layer):
nn.init.constant_(layer.weight, 0.)
if exists(layer.bias):
nn.init.constant_(layer.bias, 0.)
# keyword argument helpers
def pick_and_pop(keys, d):
values = list(map(lambda key: d.pop(key), keys))
return dict(zip(keys, values))
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, str):
return str.startswith(prefix)
def group_by_key_prefix(prefix, d):
return group_dict_by_key(partial(string_begins_with, prefix), d)
def groupby_prefix_and_trim(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
# initializations
def deepnorm_init(
transformer,
beta,
module_name_match_list = ['.ff.', '.to_v', '.to_out']
):
for name, module in transformer.named_modules():
if type(module) != nn.Linear:
continue
needs_beta_gain = any(map(lambda substr: substr in name, module_name_match_list))
gain = beta if needs_beta_gain else 1
nn.init.xavier_normal_(module.weight.data, gain = gain)
if exists(module.bias):
nn.init.constant_(module.bias.data, 0)
# structured dropout, more effective than traditional attention dropouts
def dropout_seq(seq, mask, dropout):
b, n, *_, device = *seq.shape, seq.device
logits = torch.randn(b, n, device = device)
if exists(mask):
mask_value = max_neg_value(logits)
logits = logits.masked_fill(~mask, mask_value)
keep_prob = 1. - dropout
num_keep = max(1, int(keep_prob * n))
keep_indices = logits.topk(num_keep, dim = 1).indices
batch_indices = torch.arange(b, device = device)
batch_indices = rearrange(batch_indices, 'b -> b 1')
seq = seq[batch_indices, keep_indices]
if exists(mask):
seq_counts = mask.sum(dim = -1)
seq_keep_counts = torch.ceil(seq_counts * keep_prob).int()
keep_mask = torch.arange(num_keep, device = device) < rearrange(seq_keep_counts, 'b -> b 1')
mask = mask[batch_indices, keep_indices] & keep_mask
return seq, mask
# activations
class ReluSquared(nn.Module):
def forward(self, x):
return F.relu(x) ** 2
# embedding
class TokenEmbedding(nn.Module):
def __init__(self, dim, num_tokens, l2norm_embed = False):
super().__init__()
self.l2norm_embed = l2norm_embed
self.emb = nn.Embedding(num_tokens, dim)
def forward(self, x):
token_emb = self.emb(x)
return l2norm(token_emb) if self.l2norm_embed else token_emb
# positional embeddings
class AbsolutePositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len, l2norm_embed = False):
super().__init__()
self.scale = dim ** -0.5 if not l2norm_embed else 1.
self.max_seq_len = max_seq_len
self.l2norm_embed = l2norm_embed
self.emb = nn.Embedding(max_seq_len, dim)
def forward(self, x, pos = None):
seq_len, device = x.shape[1], x.device
assert seq_len <= self.max_seq_len, f'you are passing in a sequence length of {seq_len} but your absolute positional embedding has a max sequence length of {self.max_seq_len}'
if not exists(pos):
pos = torch.arange(seq_len, device = device)
pos_emb = self.emb(pos)
pos_emb = pos_emb * self.scale
return l2norm(pos_emb) if self.l2norm_embed else pos_emb
class ScaledSinusoidalEmbedding(nn.Module):
def __init__(self, dim, theta = 10000):
super().__init__()
assert (dim % 2) == 0
self.scale = nn.Parameter(torch.ones(1) * dim ** -0.5)
half_dim = dim // 2
freq_seq = torch.arange(half_dim).float() / half_dim
inv_freq = theta ** -freq_seq
self.register_buffer('inv_freq', inv_freq, persistent = False)
def forward(self, x, pos = None):
seq_len, device = x.shape[1], x.device
if not exists(pos):
pos = torch.arange(seq_len, device = device)
emb = einsum('i, j -> i j', pos, self.inv_freq)
emb = torch.cat((emb.sin(), emb.cos()), dim = -1)
return emb * self.scale
class RelativePositionBias(nn.Module):
def __init__(self, scale, causal = False, num_buckets = 32, max_distance = 128, heads = 8):
super().__init__()
self.scale = scale
self.causal = causal
self.num_buckets = num_buckets
self.max_distance = max_distance
self.relative_attention_bias = nn.Embedding(num_buckets, heads)
@staticmethod
def _relative_position_bucket(relative_position, causal = True, num_buckets = 32, max_distance = 128):
ret = 0
n = -relative_position
if not causal:
num_buckets //= 2
ret += (n < 0).long() * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).long()
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
@property
def device(self):
return next(self.parameters()).device
def forward(self, i, j):
device = self.device
q_pos = torch.arange(j - i, j, dtype = torch.long, device = device)
k_pos = torch.arange(j, dtype = torch.long, device = device)
rel_pos = k_pos[None, :] - q_pos[:, None]
rp_bucket = self._relative_position_bucket(rel_pos, causal = self.causal, num_buckets = self.num_buckets, max_distance = self.max_distance)
values = self.relative_attention_bias(rp_bucket)
bias = rearrange(values, 'i j h -> h i j')
return bias * self.scale
class DynamicPositionBias(nn.Module):
def __init__(self, dim, *, heads, depth, log_distance = False, norm = False):
super().__init__()
assert depth >= 1, 'depth for dynamic position bias MLP must be greater or equal to 1'
self.log_distance = log_distance
self.mlp = nn.ModuleList([])
self.mlp.append(Sequential(
nn.Linear(1, dim),
nn.LayerNorm(dim) if norm else None,
nn.SiLU()
))
for _ in range(depth - 1):
self.mlp.append(Sequential(
nn.Linear(dim, dim),
nn.LayerNorm(dim) if norm else None,
nn.SiLU()
))
self.mlp.append(nn.Linear(dim, heads))
@property
def device(self):
return next(self.parameters()).device
def forward(self, i, j):
assert i == j
n, device = j, self.device
# get the (n x n) matrix of distances
seq_arange = torch.arange(n, device = device)
context_arange = torch.arange(n, device = device)
indices = rearrange(seq_arange, 'i -> i 1') - rearrange(context_arange, 'j -> 1 j')
indices += (n - 1)
# input to continuous positions MLP
pos = torch.arange(-n + 1, n, device = device).float()
pos = rearrange(pos, '... -> ... 1')
if self.log_distance:
pos = torch.sign(pos) * torch.log(pos.abs() + 1) # log of distance is sign(rel_pos) * log(abs(rel_pos) + 1)
for layer in self.mlp:
pos = layer(pos)
# get position biases
bias = pos[indices]
bias = rearrange(bias, 'i j h -> h i j')
return bias
class AlibiPositionalBias(nn.Module):
def __init__(self, heads, total_heads, **kwargs):
super().__init__()
self.heads = heads
self.total_heads = total_heads
slopes = Tensor(self._get_slopes(heads))
slopes = rearrange(slopes, 'h -> h 1 1')
self.register_buffer('slopes', slopes, persistent = False)
self.register_buffer('bias', None, persistent = False)
def get_bias(self, i, j, device):
i_arange = torch.arange(j - i, j, device = device)
j_arange = torch.arange(j, device = device)
bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1'))
return bias
@staticmethod
def _get_slopes(heads):
def get_slopes_power_of_2(n):
start = (2**(-2**-(math.log2(n)-3)))
ratio = start
return [start*ratio**i for i in range(n)]
if math.log2(heads).is_integer():
return get_slopes_power_of_2(heads)
closest_power_of_2 = 2 ** math.floor(math.log2(heads))
return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2]
@property
def device(self):
return next(self.buffers()).device
def forward(self, i, j):
h, device = self.total_heads, self.device
if exists(self.bias) and self.bias.shape[-1] >= j and self.bias.shape[-2] >= i:
return self.bias[..., :i, :j]
bias = self.get_bias(i, j, device)
bias = bias * self.slopes
num_heads_unalibied = h - bias.shape[0]
bias = pad_at_dim(bias, (0, num_heads_unalibied), dim = 0)
self.register_buffer('bias', bias, persistent = False)
return self.bias
class RotaryEmbedding(nn.Module):
def __init__(
self,
dim,
use_xpos = False,
scale_base = 512,
interpolation_factor = 1.,
base = 10000,
base_rescale_factor = 1.
):
super().__init__()
# proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning
# has some connection to NTK literature
# https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/
base *= base_rescale_factor ** (dim / (dim - 2))
inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
assert interpolation_factor >= 1.
self.interpolation_factor = interpolation_factor
if not use_xpos:
self.register_buffer('scale', None)
return
scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
self.scale_base = scale_base
self.register_buffer('scale', scale)
def forward(self, seq_len, device):
t = torch.arange(seq_len, device = device).type_as(self.inv_freq)
t = t / self.interpolation_factor
freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
if not exists(self.scale):
return freqs, 1.
power = (torch.arange(seq_len, device = device) - (seq_len // 2)) / self.scale_base
scale = self.scale ** rearrange(power, 'n -> n 1')
scale = torch.cat((scale, scale), dim = -1)
return freqs, scale
def rotate_half(x):
x = rearrange(x, '... (j d) -> ... j d', j = 2)
x1, x2 = x.unbind(dim = -2)
return torch.cat((-x2, x1), dim = -1)
def apply_rotary_pos_emb(t, freqs, scale = 1):
seq_len = t.shape[-2]
freqs = freqs[-seq_len:, :]
return (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale)
# norms
class Scale(nn.Module):
def __init__(self, value, fn):
super().__init__()
self.value = value
self.fn = fn
def forward(self, x, **kwargs):
out = self.fn(x, **kwargs)
scale_fn = lambda t: t * self.value
if not isinstance(out, tuple):
return scale_fn(out)
return (scale_fn(out[0]), *out[1:])
class ScaleNorm(nn.Module):
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1) * (dim ** -0.5))
def forward(self, x):
norm = torch.norm(x, dim = -1, keepdim = True)
return x / norm.clamp(min = self.eps) * self.g
class RMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale * self.g
class SimpleRMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale
# residual and residual gates
class Residual(nn.Module):
def __init__(self, dim, scale_residual = False, scale_residual_constant = 1.):
super().__init__()
self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
self.scale_residual_constant = scale_residual_constant
def forward(self, x, residual):
if exists(self.residual_scale):
residual = residual * self.residual_scale
if self.scale_residual_constant != 1:
residual = residual * self.scale_residual_constant
return x + residual
class GRUGating(nn.Module):
def __init__(self, dim, scale_residual = False, **kwargs):
super().__init__()
self.gru = nn.GRUCell(dim, dim)
self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
def forward(self, x, residual):
if exists(self.residual_scale):
residual = residual * self.residual_scale
gated_output = self.gru(
rearrange(x, 'b n d -> (b n) d'),
rearrange(residual, 'b n d -> (b n) d')
)
return gated_output.reshape_as(x)
# token shifting
def shift(t, amount, mask = None):
if amount == 0:
return t
else:
amount = min(amount, t.shape[1])
if exists(mask):
t = t.masked_fill(~mask[..., None], 0.)
return pad_at_dim(t, (amount, -amount), dim = - 2, value = 0.)
class ShiftTokens(nn.Module):
def __init__(self, shifts, fn):
super().__init__()
self.fn = fn
self.shifts = tuple(shifts)
def forward(self, x, **kwargs):
mask = kwargs.get('mask', None)
shifts = self.shifts
segments = len(shifts)
feats_per_shift = x.shape[-1] // segments
splitted = x.split(feats_per_shift, dim = -1)
segments_to_shift, rest = splitted[:segments], splitted[segments:]
segments_to_shift = list(map(lambda args: shift(*args, mask = mask), zip(segments_to_shift, shifts)))
x = torch.cat((*segments_to_shift, *rest), dim = -1)
return self.fn(x, **kwargs)
# feedforward
class GLU(nn.Module):
def __init__(
self,
dim_in,
dim_out,
activation: Callable,
mult_bias = False
):
super().__init__()
self.act = activation
self.proj = nn.Linear(dim_in, dim_out * 2)
self.mult_bias = nn.Parameter(torch.ones(dim_out)) if mult_bias else 1.
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim = -1)
return x * self.act(gate) * self.mult_bias
class FeedForward(nn.Module):
def __init__(
self,
dim,
dim_out = None,
mult = 4,
glu = False,
glu_mult_bias = False,
swish = False,
relu_squared = False,
post_act_ln = False,
dropout = 0.,
no_bias = False,
zero_init_output = False
):
super().__init__()
inner_dim = int(dim * mult)
dim_out = default(dim_out, dim)
if relu_squared:
activation = ReluSquared()
elif swish:
activation = nn.SiLU()
else:
activation = nn.GELU()
if glu:
project_in = GLU(dim, inner_dim, activation, mult_bias = glu_mult_bias)
else:
project_in = nn.Sequential(
nn.Linear(dim, inner_dim, bias = not no_bias),
activation
)
self.ff = Sequential(
project_in,
nn.LayerNorm(inner_dim) if post_act_ln else None,
nn.Dropout(dropout),
nn.Linear(inner_dim, dim_out, bias = not no_bias)
)
# init last linear layer to 0
if zero_init_output:
init_zero_(self.ff[-1])
def forward(self, x):
return self.ff(x)
# attention. it is all we need
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = DEFAULT_DIM_HEAD,
heads = 8,
causal = False,
flash = False,
talking_heads = False,
head_scale = False,
sparse_topk = None,
num_mem_kv = 0,
dropout = 0.,
on_attn = False,
gate_values = False,
zero_init_output = False,
max_attend_past = None,
qk_norm = False,
qk_norm_groups = 1,
qk_norm_scale = 10,
qk_norm_dim_scale = False,
one_kv_head = False,
shared_kv = False,
value_dim_head = None,
tensor_product = False, # https://arxiv.org/abs/2208.06061
cascading_heads = False,
add_zero_kv = False, # same as add_zero_attn in pytorch
onnxable = False
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
self.causal = causal
self.max_attend_past = max_attend_past
value_dim_head = default(value_dim_head, dim_head)
q_dim = k_dim = dim_head * heads
v_dim = out_dim = value_dim_head * heads
self.one_kv_head = one_kv_head
if one_kv_head:
k_dim = dim_head
v_dim = value_dim_head
out_dim = v_dim * heads
self.to_q = nn.Linear(dim, q_dim, bias = False)
self.to_k = nn.Linear(dim, k_dim, bias = False)
# shared key / values, for further memory savings during inference
assert not (shared_kv and value_dim_head != dim_head), 'key and value head dimensions must be equal for shared key / values'
self.to_v = nn.Linear(dim, v_dim, bias = False) if not shared_kv else None
# relations projection from tp-attention
self.to_r = nn.Linear(dim, v_dim, bias = False) if tensor_product else None
# add GLU gating for aggregated values, from alphafold2
self.to_v_gate = None
if gate_values:
self.to_v_gate = nn.Linear(dim, out_dim)
nn.init.constant_(self.to_v_gate.weight, 0)
nn.init.constant_(self.to_v_gate.bias, 1)
# cosine sim attention
self.qk_norm = qk_norm
self.qk_norm_groups = qk_norm_groups
self.qk_norm_scale = qk_norm_scale
# whether to use the rmsnorm (equivalent to cosine sim attention when scale is equal to 1) - https://arxiv.org/abs/2302.05442
self.qk_norm_dim_scale = qk_norm_dim_scale
self.qk_norm_q_scale = self.qk_norm_k_scale = 1
if qk_norm and qk_norm_dim_scale:
self.qk_norm_q_scale = nn.Parameter(torch.ones(dim_head))
self.qk_norm_k_scale = nn.Parameter(torch.ones(dim_head))
assert (not qk_norm) or (dim_head % qk_norm_groups) == 0, 'dimension per attention head must be divisible by the qk norm groups'
assert not (qk_norm and (dim_head // qk_norm_groups) <= 2), 'the group dimension may be too small (2 was too small in my tests, but 4 still works, surprisingly)'
# attend class - includes core attention algorithm + talking heads
self.attend = Attend(
heads = heads,
causal = causal,
talking_heads = talking_heads,
dropout = dropout,
sparse_topk = sparse_topk,
qk_norm = qk_norm,
scale = qk_norm_scale if qk_norm else self.scale,
add_zero_kv = add_zero_kv,
flash = flash,
onnxable = onnxable
)
# head scaling
self.head_scale = head_scale
if head_scale:
self.head_scale_params = nn.Parameter(torch.ones(1, heads, 1, 1))
# explicit topk sparse attention
self.sparse_topk = sparse_topk
# add memory key / values
self.num_mem_kv = num_mem_kv
if num_mem_kv > 0:
self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
# attention on attention
self.attn_on_attn = on_attn
self.to_out = nn.Sequential(nn.Linear(out_dim, dim * 2, bias = False), nn.GLU()) if on_attn else nn.Linear(out_dim, dim, bias = False)
# init output projection 0
if zero_init_output:
init_zero_(self.to_out)
def forward(
self,
x,
context = None,
mask = None,
context_mask = None,
attn_mask = None,
rel_pos = None,
rotary_pos_emb = None,
prev_attn = None,
mem = None
):
b, n, _, h, head_scale, device, has_context = *x.shape, self.heads, self.head_scale, x.device, exists(context)
kv_input = default(context, x)
q_input = x
k_input = kv_input
v_input = kv_input
r_input = x
if exists(mem):
k_input = torch.cat((mem, k_input), dim = -2)
v_input = torch.cat((mem, v_input), dim = -2)
q = self.to_q(q_input)
k = self.to_k(k_input)
v = self.to_v(v_input) if exists(self.to_v) else k
r = self.to_r(r_input) if exists(self.to_r) else None
q = rearrange(q, 'b n (h d) -> b h n d', h = h)
if not self.one_kv_head:
k, v, r = map(lambda t: maybe(rearrange)(t, 'b n (h d) -> b h n d', h = h), (k, v, r))
if self.qk_norm:
qk_l2norm = partial(l2norm, groups = self.qk_norm_groups)
q, k = map(qk_l2norm, (q, k))
scale = self.qk_norm_scale
q = q * self.qk_norm_q_scale
k = k * self.qk_norm_k_scale
if exists(rotary_pos_emb) and not has_context:
freqs, xpos_scale = rotary_pos_emb
l = freqs.shape[-1]
q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale ** -1.) if exists(xpos_scale) else (1., 1.)
(ql, qr), (kl, kr), (vl, vr) = map(lambda t: (t[..., :l], t[..., l:]), (q, k, v))
ql, kl, vl = map(lambda arg: apply_rotary_pos_emb(arg[0], freqs, arg[1]), ((ql, q_xpos_scale), (kl, k_xpos_scale), (vl, k_xpos_scale)))
q, k, v = map(lambda t: torch.cat(t, dim = -1), ((ql, qr), (kl, kr), (vl, vr)))
input_mask = context_mask if has_context else mask
if self.num_mem_kv > 0:
mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b = b), (self.mem_k, self.mem_v))
if self.qk_norm:
mem_k = l2norm(mem_k)
mem_k = mem_k * self.qk_norm_k_scale
k = torch.cat((mem_k, k), dim = -2)
v = torch.cat((mem_v, v), dim = -2)
if exists(input_mask):
input_mask = pad_at_dim(input_mask, (self.num_mem_kv, 0), dim = -1, value = True)
i, j = map(lambda t: t.shape[-2], (q, k))
# determine masking
mask_value = max_neg_value(q)
masks = []
final_attn_mask = None
if exists(input_mask):
input_mask = rearrange(input_mask, 'b j -> b 1 1 j')
masks.append(~input_mask)
if exists(attn_mask):
assert 2 <= attn_mask.ndim <= 4, 'attention mask must have greater than 2 dimensions but less than or equal to 4'
if attn_mask.ndim == 2:
attn_mask = rearrange(attn_mask, 'i j -> 1 1 i j')
elif attn_mask.ndim == 3:
attn_mask = rearrange(attn_mask, 'h i j -> 1 h i j')
masks.append(~attn_mask)
if exists(self.max_attend_past):
range_q = torch.arange(j - i, j, device = device)
range_k = torch.arange(j, device = device)
dist = rearrange(range_q, 'i -> 1 1 i 1') - rearrange(range_k, 'j -> 1 1 1 j')
max_attend_past_mask = dist > self.max_attend_past
masks.append(max_attend_past_mask)
if len(masks) > 0:
final_attn_mask = ~or_reduce(masks)
# prepare relative positional bias, if needed
attn_bias = None
if exists(rel_pos):
attn_bias = rel_pos(i, j)
# attention is all we need
out, intermediates = self.attend(
q, k, v,
mask = final_attn_mask,
attn_bias = attn_bias,
prev_attn = prev_attn
)
# https://arxiv.org/abs/2208.06061 proposes to add a residual for better gradients
if exists(r):
out = out * r + out
# normformer scaling of heads
if head_scale:
out = out * self.head_scale_params
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
# alphafold2 styled gating of the values
if exists(self.to_v_gate):
gates = self.to_v_gate(x)
out = out * gates.sigmoid()
# combine the heads
out = self.to_out(out)
if exists(mask):
mask = rearrange(mask, 'b n -> b n 1')
out = out.masked_fill(~mask, 0.)
return out, intermediates
class AttentionLayers(nn.Module):
def __init__(
self,
dim,
depth,
heads = 8,
causal = False,
cross_attend = False,
only_cross = False,
use_scalenorm = False,
use_rmsnorm = False,
use_simple_rmsnorm = False,
alibi_pos_bias = False,
alibi_num_heads = None,
rel_pos_bias = False,
rel_pos_num_buckets = 32,
rel_pos_max_distance = 128,
dynamic_pos_bias = False,
dynamic_pos_bias_log_distance = False,
dynamic_pos_bias_mlp_depth = 2,
dynamic_pos_bias_norm = False,
rotary_pos_emb = False,
rotary_emb_dim = None,
rotary_xpos = False,
rotary_interpolation_factor = 1.,
rotary_xpos_scale_base = 512,
rotary_base_rescale_factor = 1.,
custom_layers = None,
sandwich_coef = None,
par_ratio = None,
residual_attn = False,
cross_residual_attn = False,
macaron = False,
pre_norm = True,
pre_norm_has_final_norm = True,
gate_residual = False,
scale_residual = False,
scale_residual_constant = 1.,
deepnorm = False,
shift_tokens = 0,
sandwich_norm = False,
resi_dual = False,
resi_dual_scale = 1.,
zero_init_branch_output = False,
layer_dropout = 0.,
cross_attn_tokens_dropout = 0.,
**kwargs
):
super().__init__()
rotary_pos_emb = rotary_pos_emb or rotary_xpos
ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs)
attn_kwargs, kwargs = groupby_prefix_and_trim('attn_', kwargs)
dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD)
self.dim = dim
self.depth = depth
self.layers = nn.ModuleList([])
self.has_pos_emb = rel_pos_bias or rotary_pos_emb
rotary_emb_dim = max(default(rotary_emb_dim, dim_head // 2), 32)
assert not (rotary_xpos and not causal), 'rotary xpos is not compatible with bidirectional attention'
self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim, use_xpos = rotary_xpos, scale_base = rotary_xpos_scale_base, interpolation_factor = rotary_interpolation_factor, base_rescale_factor = rotary_base_rescale_factor) if rotary_pos_emb else None
assert not (alibi_pos_bias and rel_pos_bias), 'you can only choose Alibi positional bias or T5 relative positional bias, not both'
assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance'
# relative positional bias
flash_attn = attn_kwargs.get('flash', False)
assert (int(rel_pos_bias) + int(dynamic_pos_bias) + int(alibi_pos_bias)) <= 1, 'you can only choose up to one of t5, alibi, or dynamic positional bias'
self.rel_pos = None
if rel_pos_bias:
assert not flash_attn, 'flash attention not compatible with t5 relative positional bias'
self.rel_pos = RelativePositionBias(scale = dim_head ** 0.5, causal = causal, heads = heads, num_buckets = rel_pos_num_buckets, max_distance = rel_pos_max_distance)
elif dynamic_pos_bias:
assert not flash_attn, 'flash attention not compatible with dynamic positional bias'
self.rel_pos = DynamicPositionBias(dim = dim // 4, heads = heads, log_distance = dynamic_pos_bias_log_distance, depth = dynamic_pos_bias_mlp_depth, norm = dynamic_pos_bias_norm)
elif alibi_pos_bias:
alibi_num_heads = default(alibi_num_heads, heads)
assert alibi_num_heads <= heads, 'number of ALiBi heads must be less than the total number of heads'
self.rel_pos = AlibiPositionalBias(heads = alibi_num_heads, total_heads = heads)
# determine deepnorm and residual scale
if deepnorm:
assert scale_residual_constant == 1, 'scale residual constant is being overridden by deep norm settings'
pre_norm = sandwich_norm = resi_dual = False
scale_residual = True
scale_residual_constant = (2 * depth) ** 0.25
assert (int(sandwich_norm) + int(resi_dual)) <= 1, 'either sandwich norm or resiDual is selected, but not both'
assert not (not pre_norm and sandwich_norm), 'sandwich norm cannot be used when not using prenorm'
if resi_dual:
pre_norm = False
self.pre_norm = pre_norm
self.sandwich_norm = sandwich_norm
self.resi_dual = resi_dual
assert 0 < resi_dual_scale <= 1., 'resiDual prenorm residual must be scaled by a factor greater than 0 and less than or equal to 1.'
self.resi_dual_scale = resi_dual_scale
self.residual_attn = residual_attn
self.cross_residual_attn = cross_residual_attn
assert not (flash_attn and (residual_attn or cross_residual_attn)), 'flash attention is not compatible with residual attention'
self.cross_attend = cross_attend
assert (int(use_scalenorm) + int(use_rmsnorm) + int(use_simple_rmsnorm)) <= 1, 'you can only use either scalenorm, rmsnorm, or simple rmsnorm'
if use_scalenorm:
norm_class = ScaleNorm
elif use_rmsnorm:
norm_class = RMSNorm
elif use_simple_rmsnorm:
norm_class = SimpleRMSNorm
else:
norm_class = nn.LayerNorm
norm_fn = partial(norm_class, dim)
if cross_attend and not only_cross:
default_block = ('a', 'c', 'f')
elif cross_attend and only_cross:
default_block = ('c', 'f')
else:
default_block = ('a', 'f')
if macaron:
default_block = ('f',) + default_block
# zero init
if zero_init_branch_output:
attn_kwargs = {**attn_kwargs, 'zero_init_output': True}
ff_kwargs = {**ff_kwargs, 'zero_init_output': True}
# calculate layer block order
if exists(custom_layers):
layer_types = custom_layers
elif exists(par_ratio):
par_depth = depth * len(default_block)
assert 1 < par_ratio <= par_depth, 'par ratio out of range'
default_block = tuple(filter(not_equals('f'), default_block))
par_attn = par_depth // par_ratio
depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper
par_width = (depth_cut + depth_cut // par_attn) // par_attn
assert len(default_block) <= par_width, 'default block is too large for par_ratio'
par_block = default_block + ('f',) * (par_width - len(default_block))
par_head = par_block * par_attn
layer_types = par_head + ('f',) * (par_depth - len(par_head))
elif exists(sandwich_coef):
assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth'
layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef
else:
layer_types = default_block * depth
self.layer_types = layer_types
self.num_attn_layers = len(list(filter(equals('a'), layer_types)))
# stochastic depth
self.layer_dropouts = cast_tuple(layer_dropout, len(layer_types))
# structured dropout for cross attending
self.cross_attn_tokens_dropout = cross_attn_tokens_dropout
# calculate token shifting
shift_tokens = cast_tuple(shift_tokens, len(layer_types))
# whether it has post norm
self.final_norm = norm_fn() if pre_norm or resi_dual else nn.Identity()
# iterate and construct layers
for ind, (layer_type, layer_shift_tokens) in enumerate(zip(self.layer_types, shift_tokens)):
is_last_layer = ind == (len(self.layer_types) - 1)
if layer_type == 'a':
layer = Attention(dim, heads = heads, causal = causal, **attn_kwargs)
elif layer_type == 'c':
layer = Attention(dim, heads = heads, **attn_kwargs)
elif layer_type == 'f':
layer = FeedForward(dim, **ff_kwargs)
layer = layer if not macaron else Scale(0.5, layer)
else:
raise Exception(f'invalid layer type {layer_type}')
if layer_shift_tokens > 0:
shift_range_upper = layer_shift_tokens + 1
shift_range_lower = -layer_shift_tokens if not causal else 0
layer = ShiftTokens(range(shift_range_lower, shift_range_upper), layer)
residual_fn = GRUGating if gate_residual else Residual
residual = residual_fn(dim, scale_residual = scale_residual, scale_residual_constant = scale_residual_constant)
pre_branch_norm = norm_fn() if pre_norm else None
post_branch_norm = norm_fn() if sandwich_norm else None
post_main_norm = norm_fn() if not pre_norm else None
norms = nn.ModuleList([
pre_branch_norm,
post_branch_norm,
post_main_norm
])
self.layers.append(nn.ModuleList([
norms,
layer,
residual
]))
if deepnorm:
init_gain = (8 * depth) ** -0.25
deepnorm_init(self, init_gain)
def forward(
self,
x,
context = None,
mask = None,
context_mask = None,
attn_mask = None,
self_attn_context_mask = None,
mems = None,
return_hiddens = False
):
assert not (self.cross_attend ^ exists(context)), 'context must be passed in if cross_attend is set to True'
hiddens = []
layer_hiddens = []
intermediates = []
prev_attn = None
prev_cross_attn = None
mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers
rotary_pos_emb = None
if exists(self.rotary_pos_emb):
max_rotary_emb_length = max(list(map(lambda m: (m.shape[1] if exists(m) else 0) + x.shape[1], mems)))
rotary_pos_emb = self.rotary_pos_emb(max_rotary_emb_length, x.device)
outer_residual = x * self.resi_dual_scale
for ind, (layer_type, (norm, block, residual_fn), layer_dropout) in enumerate(zip(self.layer_types, self.layers, self.layer_dropouts)):
is_last = ind == (len(self.layers) - 1)
if self.training and layer_dropout > 0. and random() < layer_dropout:
continue
if layer_type == 'a':
if return_hiddens:
hiddens.append(x)
layer_mem = mems.pop(0) if mems else None
if layer_type == 'c':
if self.training and self.cross_attn_tokens_dropout > 0.:
context, context_mask = dropout_seq(context, context_mask, self.cross_attn_tokens_dropout)
inner_residual = x
if return_hiddens:
layer_hiddens.append(x)
pre_norm, post_branch_norm, post_main_norm = norm
if exists(pre_norm):
x = pre_norm(x)
if layer_type == 'a':
out, inter = block(x, mask = mask, context_mask = self_attn_context_mask, attn_mask = attn_mask, rel_pos = self.rel_pos, rotary_pos_emb = rotary_pos_emb, prev_attn = prev_attn, mem = layer_mem)
elif layer_type == 'c':
out, inter = block(x, context = context, mask = mask, context_mask = context_mask, prev_attn = prev_cross_attn)
elif layer_type == 'f':
out = block(x)
if self.resi_dual:
outer_residual = outer_residual + out * self.resi_dual_scale
if exists(post_branch_norm):
out = post_branch_norm(out)
x = residual_fn(out, inner_residual)
if layer_type in ('a', 'c') and return_hiddens:
intermediates.append(inter)
if layer_type == 'a' and self.residual_attn:
prev_attn = inter.pre_softmax_attn
elif layer_type == 'c' and self.cross_residual_attn:
prev_cross_attn = inter.pre_softmax_attn
if exists(post_main_norm):
x = post_main_norm(x)
if return_hiddens:
layer_hiddens.append(x)
if self.resi_dual:
x = x + self.final_norm(outer_residual)
else:
x = self.final_norm(x)
if return_hiddens:
intermediates = LayerIntermediates(
hiddens = hiddens,
attn_intermediates = intermediates,
layer_hiddens = layer_hiddens
)
return x, intermediates
return x
class Encoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on encoder'
super().__init__(causal = False, **kwargs)
class Decoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on decoder'
super().__init__(causal = True, **kwargs)
class CrossAttender(AttentionLayers):
def __init__(self, **kwargs):
super().__init__(cross_attend = True, only_cross = True, **kwargs)
class ViTransformerWrapper(nn.Module):
def __init__(
self,
*,
image_size,
patch_size,
attn_layers,
channels = 3,
num_classes = None,
post_emb_norm = False,
emb_dropout = 0.
):
super().__init__()
assert isinstance(attn_layers, Encoder), 'attention layers must be an Encoder'
assert image_size % patch_size == 0, 'image dimensions must be divisible by the patch size'
dim = attn_layers.dim
num_patches = (image_size // patch_size) ** 2
patch_dim = channels * patch_size ** 2
self.patch_size = patch_size
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches, dim))
self.patch_to_embedding = nn.Sequential(
nn.LayerNorm(patch_dim),
nn.Linear(patch_dim, dim),
nn.LayerNorm(dim)
)
self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity()
self.dropout = nn.Dropout(emb_dropout)
self.attn_layers = attn_layers
self.mlp_head = nn.Linear(dim, num_classes) if exists(num_classes) else nn.Identity()
def forward(
self,
img,
return_embeddings = False
):
p = self.patch_size
x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
x = self.patch_to_embedding(x)
n = x.shape[1]
x = x + self.pos_embedding[:, :n]
x = self.post_emb_norm(x)
x = self.dropout(x)
x = self.attn_layers(x)
if not exists(self.mlp_head) or return_embeddings:
return x
x = x.mean(dim = -2)
return self.mlp_head(x)
class Transformer(nn.Module):
def __init__(
self,
*,
num_tokens,
max_seq_len,
attn_layers,
emb_dim = None,
max_mem_len = 0,
shift_mem_down = 0,
emb_dropout = 0.,
post_emb_norm = False,
num_memory_tokens = None,
tie_embedding = False,
logits_dim = None,
use_abs_pos_emb = True,
scaled_sinu_pos_emb = False,
l2norm_embed = False,
emb_frac_gradient = 1., # GLM-130B and Cogview successfully used this, set at 0.1
attn_z_loss_weight = 1e-4
):
super().__init__()
assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
dim = attn_layers.dim
emb_dim = default(emb_dim, dim)
self.emb_dim = emb_dim
self.num_tokens = num_tokens
self.max_seq_len = max_seq_len
self.max_mem_len = max_mem_len
self.shift_mem_down = shift_mem_down
self.l2norm_embed = l2norm_embed
self.token_emb = TokenEmbedding(emb_dim, num_tokens, l2norm_embed = l2norm_embed)
if not (use_abs_pos_emb and not attn_layers.has_pos_emb):
self.pos_emb = always(0)
elif scaled_sinu_pos_emb:
self.pos_emb = ScaledSinusoidalEmbedding(emb_dim)
else:
self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len, l2norm_embed = l2norm_embed)
self.emb_frac_gradient = emb_frac_gradient # fraction of the gradient that should go to the embedding, https://arxiv.org/abs/2105.13290
self.post_emb_norm = nn.LayerNorm(emb_dim) if post_emb_norm else nn.Identity()
self.emb_dropout = nn.Dropout(emb_dropout)
self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity()
self.attn_layers = attn_layers
self.init_()
logits_dim = default(logits_dim, num_tokens)
self.to_logits = nn.Linear(dim, logits_dim) if not tie_embedding else lambda t: t @ self.token_emb.emb.weight.t()
# memory tokens (like [cls]) from Memory Transformers paper
num_memory_tokens = default(num_memory_tokens, 0)
self.num_memory_tokens = num_memory_tokens
if num_memory_tokens > 0:
self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim))
def init_(self):
if self.l2norm_embed:
nn.init.normal_(self.token_emb.emb.weight, std = 1e-5)
if not isinstance(self.pos_emb, always):
nn.init.normal_(self.pos_emb.emb.weight, std = 1e-5)
return
nn.init.kaiming_normal_(self.token_emb.emb.weight)
def forward(
self,
x,
return_embeddings = False,
return_logits_and_embeddings = False,
return_intermediates = False,
mask = None,
return_mems = False,
return_attn = False,
mems = None,
pos = None,
prepend_embeds = None,
sum_embeds = None,
return_attn_z_loss = False,
attn_z_loss_weight = 1e-4,
**kwargs
):
b, n, device, num_mem, emb_frac_gradient = *x.shape, x.device, self.num_memory_tokens, self.emb_frac_gradient
return_hiddens = return_mems | return_attn | return_intermediates | return_attn_z_loss
# absolute positional embedding
external_pos_emb = exists(pos) and pos.dtype != torch.long
pos_emb = self.pos_emb(x, pos = pos) if not external_pos_emb else pos
x = self.token_emb(x) + pos_emb
# for summing embeddings passed externally - needs this for self-conditioning in non-autoregressive training
if exists(sum_embeds):
x = x + sum_embeds
# post embedding norm, purportedly leads to greater stabilization
x = self.post_emb_norm(x)
# whether to append embeds, as in PaLI, for image embeddings
if exists(prepend_embeds):
prepend_seq, prepend_dim = prepend_embeds.shape[1:]
assert prepend_dim == x.shape[-1], 'prepended embeddings need to have same dimensions as text model dimensions'
x = torch.cat((prepend_embeds, x), dim = -2)
# whether to reduce the gradient going to the embedding, from cogview paper, corroborated by GLM-130B model
if emb_frac_gradient < 1:
assert emb_frac_gradient > 0
x = x * emb_frac_gradient + x.detach() * (1 - emb_frac_gradient)
# embedding dropout
x = self.emb_dropout(x)
x = self.project_emb(x)
if num_mem > 0:
mem = repeat(self.memory_tokens, 'n d -> b n d', b = b)
x = torch.cat((mem, x), dim = 1)
# auto-handle masking after appending memory tokens
if exists(mask):
mask = pad_at_dim(mask, (num_mem, 0), dim = -1, value = True)
if self.shift_mem_down and exists(mems):
mems_l, mems_r = mems[:self.shift_mem_down], mems[self.shift_mem_down:]
mems = [*mems_r, *mems_l]
if return_hiddens:
x, intermediates = self.attn_layers(x, mask = mask, mems = mems, return_hiddens = True, **kwargs)
else:
x = self.attn_layers(x, mask = mask, mems = mems, **kwargs)
mem, x = x[:, :num_mem], x[:, num_mem:]
if return_logits_and_embeddings:
out = (self.to_logits(x), x)
elif return_embeddings:
out = x
else:
out = self.to_logits(x)
if return_attn_z_loss:
pre_softmax_attns = list(map(lambda t: t.pre_softmax_attn, intermediates.attn_intermediates))
intermediates.attn_z_loss = calc_z_loss(pre_softmax_attns, weight = attn_z_loss_weight)
return_intermediates = True
if return_intermediates:
return out, intermediates
if return_mems:
hiddens = intermediates.hiddens
new_mems = list(map(lambda pair: torch.cat(pair, dim = -2), zip(mems, hiddens))) if exists(mems) else hiddens
new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems))
return out, new_mems
if return_attn:
attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
return out, attn_maps
return out | PALM-E-main | palme/transformer.py |
Python-Package-Template-main | example.py |
|
Python-Package-Template-main | package/__init__.py |
|
Python-Package-Template-main | package/main.py |
|
Python-Package-Template-main | package/subfolder/__init__.py |
|
Python-Package-Template-main | package/subfolder/main.py |
|
import torch
import colossalai
from colossalai.core import global_context as gpc
from colossalai.trainer import Trainer, hooks
from colossalai.utils import MultiTimer
from colossalai.logging import disable_existing_loggers, get_dist_logger
import wandb
from lamda_pytorch.config.config import CFG
from lamda_pytorch.build_dataloader import build_dataloaders
from lamda_pytorch.lamda_pytorch import lamda_model
from lamda_pytorch.utils.utils import LaMDA_Loss, AutoregressiveWrapper
from transformers import AutoTokenizer
def LaMDA_Trainer(cfg: CFG):
assert torch.cuda.is_available()
disable_existing_loggers()
parser = colossalai.get_default_parser()
parser.add_argument(
'--use_trainer',
action='store_true',
help='whether to use trainer'
)
args = parser.parse_args()
if cfg.use_zero == True:
pass
else:
colossalai.launch_from_torch(
config='./lamda_pytorch/config/colossal_config.py',
seed = cfg.seed
)
assert hasattr(gpc.config, "EPOCHS"), "Please provide NUM_EPOCHS in your configuration"
# Colossal logger
logger = get_dist_logger()
logger.info("Initialized environment", ranks=[0])
# LaMDA model
model = lamda_model()
model = AutoregressiveWrapper(model)
# setup dataloaders
if cfg.use_huggingface == True:
tokenizer = AutoTokenizer.from_pretrained(cfg.tokenizer_name)
train_dataloader, eval_dataloader = build_dataloaders(cfg, tokenizer)
# loss function
loss_fn = LaMDA_Loss()
# optimizer function
optimizer = torch.optim.AdamW(
model.parameters(),
lr = gpc.config.LEARNING_RATE,
weight_decay=gpc.config.WEIGHT_DECAY
)
# initialze model, optimizer, criterion, and data loaders
engine, train_dataloader, _, _ = colossalai.initialize(
model,
optimizer,
loss_fn,
train_dataloader = train_dataloader
)
def batch_data_process_func(batch_data):
data = batch_data["input_ids"]
labels = batch_data["labels"]
return data, labels
engine.schedule.batch_data_process_func = batch_data_process_func
if cfg.use_wandb == True:
# initialize Weights and Biases Logging
wandb.init(project = cfg.project_name)
engine.train()
for step, batch in enumerate(train_dataloader):
inputs, labels = batch['inputs'].cuda(), batch['labels'].cuda()
engine.zero_grad()
outputs = engine(inputs)
train_loss = engine.loss_fn(outputs, labels)
wandb.log({"train_loss": train_loss})
engine.backward(train_loss)
engine.step()
wandb.log({"step": step})
engine.eval()
for step, batch in enumerate(eval_dataloader):
inputs, labels = batch['inputs'].cuda(), batch['labels'].cuda()
with torch.no_grad():
outputs = engine(inputs)
test_loss = engine.loss_fn(outputs, labels)
wandb.log({"test_loss": test_loss})
engine.backward(test_loss)
engine.step()
wandb.alert(
title = 'Training Complete',
text = "Training complete."
)
else:
# Time session with ColossalAI
timer = MultiTimer()
# trainer
trainer = Trainer(
engine = engine,
timer = timer,
logger = logger
)
hook_list = [
hooks.LogMetricByStepHook(),
hooks.LossHook(),
hooks.LogMetricByEpochHook(logger)
]
trainer.fit(
train_dataloader = train_dataloader,
epochs = gpc.config.EPOCHS,
hooks = hook_list,
display_progress = True
)
if __name__ == "__main__":
cfg = CFG()
LaMDA_Trainer(cfg) | LaMDA-pytorch-main | train.py |
import copy
from itertools import chain
from datasets import load_dataset
from torch.utils.data import DataLoader, DistributedSampler
from torch.distributed import get_world_size
from .config.config import CFG
from transformers import AutoTokenizer, default_data_collator
def build_dataloaders(args: CFG, tokenizer: AutoTokenizer):
"""
Build dataloaders for the model.
"""
# Load training dataset
load_train_data = load_dataset(args.train_dataset_name, split = args.choose_train_split)
# Remove unused columns from the training dataset
load_train_data = load_train_data.remove_columns(args.remove_train_columns)
# Load validation dataset
load_eval_data = load_dataset(args.eval_dataset_name, split = args.choose_eval_split)
# Remove unused columns from the validation dataset
load_eval_data = load_eval_data.remove_columns(args.remove_eval_columns)
# Shuffle the training input files.
shuffled_train_files = load_train_data.shuffle(seed = args.seed)
# Shuffle the validation input files.
shuffled_eval_files = load_eval_data.shuffle(seed = args.seed)
"""
A sequence length of x is used for the model. Input examples are concatenated
together and then split into sequences of exactly x tokens, so that there are
no padding tokens, but examples may be split in the middle.
Tokenize function reference:
https://github.com/hpcaitech/PaLM-colossalai/blob/main/data/wikitext.py
"""
def tokenize(examples):
seq_length = args.tokenizer_seq_length
examples = tokenizer(examples[args.select_input_string])
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
if total_length >= seq_length:
total_length = (total_length // seq_length) * seq_length
result = {
k: [t[i : i + seq_length] for i in range(0, total_length, seq_length)]
for k, t in concatenated_examples.items()
}
result["labels"] = copy.deepcopy(result["input_ids"])
return result
"""
Map the tokenization function to the shuffled training files to create an
Iterable training dataset of batched input sequences of x tokens.
Remove columns from the the shuffled training files so that you are left with
only the input_ids, attention_mask, and labels columns.
"""
tokenized_train_dataset = shuffled_train_files.map(tokenize, batched = True, remove_columns = [args.select_input_string])
"""
Map the tokenization function to the shuffled validation files to create an
Iterable validation dataset of batched input sequences of x tokens.
Remove columns from the the shuffled training files so that you are left with
only the input_ids, attention_mask, and labels columns.
"""
tokenized_eval_dataset = shuffled_eval_files.map(tokenize, batched = True, remove_columns = [args.select_input_string])
# Convert the format of the tokenized train dataset to PyTorch Tensors
train_with_torch = tokenized_train_dataset.set_format(type = "torch")
# Convert the format of the tokenized validation dataset to PyTorch Tensors
eval_with_torch = tokenized_eval_dataset.set_format(type = "torch")
# Train dataset used for sampling.
sample_train_dataset = DistributedSampler(train_with_torch, shuffle = True) if get_world_size() > 1 else None
# Validation dataset used for sampling.
sample_eval_dataset = DistributedSampler(eval_with_torch, shuffle = False) if get_world_size() > 1 else None
# Create the train dataloader. If the length of a tokenized input sequence is less than 2048 drop it.
train_dataloader = DataLoader(tokenized_train_dataset, shuffle = True, sampler = sample_train_dataset, drop_last = True, collate_fn = default_data_collator, batch_size = args.batch_size)
# Create the validation dataloader. If the length of a tokenized input sequence is less than 2048 drop it.
eval_dataloader = DataLoader(tokenized_eval_dataset, sampler = sample_eval_dataset, drop_last = True, collate_fn = default_data_collator, batch_size = args.batch_size)
# Return the training and validation dataloaders to be used in the model
print('Done building dataloaders')
return train_dataloader, eval_dataloader
if __name__ == '__main__':
# Get Dataloader Configuration Arguments
data_loader_args = CFG()
# Get Tokenizer Configuration Arguments
tokenizer_args = 'gpt2'
# Load the pretrained tokenizer of your choosing
tokenizer = AutoTokenizer.from_pretrained(tokenizer_args)
# Test Build Dataloaders
train_loader, eval_loader = build_dataloaders(args = data_loader_args, tokenizer = tokenizer)
print(next(iter(train_loader))['input_ids'])
print(next(iter(train_loader))['input_ids'].shape) | LaMDA-pytorch-main | lamda_pytorch/build_dataloader.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
import math
from einops import rearrange
from lamda_pytorch.config.config import CFG
# residual wrapper
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
# pre-normalization wrapper
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
# gated-GELU activation function
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim = -1)
return x * F.gelu(gates)
# feedforward layer with gated-GELU activation function
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0.):
super().__init__()
inner_dim = int(dim * mult)
self.net = nn.Sequential(
nn.Linear(dim, inner_dim * 2),
GEGLU(),
nn.Dropout(dropout), # optional dropout
nn.Linear(inner_dim, dim)
)
def forward(self, x):
return self.net(x)
# T5 relative positional bias
class T5RelativePositionBias(nn.Module):
def __init__(
self,
scale,
num_buckets = 32,
max_distance = 128,
heads = 8
):
super().__init__()
self.scale = scale
self.num_buckets = num_buckets
self.max_distance = max_distance
self.relative_attention_bias = nn.Embedding(num_buckets, heads)
@staticmethod
def _relative_position_bucket(
relative_position,
num_buckets = 32,
max_distance = 128
):
n = -relative_position
n = torch.max(n, torch.zeros_like(n))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)).long()
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
return torch.where(is_small, n, val_if_large)
def forward(self, qk_dots):
i, j, device = *qk_dots.shape[-2:], qk_dots.device
q_pos = torch.arange(i, dtype = torch.long, device = device)
k_pos = torch.arange(j, dtype = torch.long, device = device)
rel_pos = rearrange(k_pos, 'j -> 1 j') - rearrange(q_pos, 'i -> i 1')
rp_bucket = self._relative_position_bucket(rel_pos, num_buckets = self.num_buckets, max_distance = self.max_distance)
values = self.relative_attention_bias(rp_bucket)
bias = rearrange(values, 'i j h -> () h i j')
return qk_dots + (bias * self.scale)
# attention
class Attention(nn.Module):
def __init__(
self,
*,
dim,
heads = 8,
dim_head = 64,
dropout = 0.
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = heads * dim_head
self.dropout = nn.Dropout(dropout)
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, dim_head * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
self.rel_pos_bias = T5RelativePositionBias(scale = dim_head ** 0.5, heads = heads)
def forward(self, x):
h, device = self.heads, x.device
q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = -1))
q = rearrange(q, 'b n (h d) -> b h n d', h = h)
q = q * self.scale
sim = einsum('b h i d, b j d -> b h i j', q, k)
i, j = sim.shape[-2:]
# T5 Relative Positional Bias
sim = self.rel_pos_bias(sim)
# Causal Mask
causal_mask = torch.ones((i, j), dtype = torch.bool, device = device).triu(j - i + 1)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
attn = sim.softmax(dim = -1)
attn = self.dropout(attn) # Optional dropout
out = einsum('b h i j, b j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# Transformer
class Transformer(nn.Module):
def __init__(self, dim, depth, heads, dim_head, dropout = 0.):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Residual(PreNorm(dim, Attention(dim = dim, heads = heads, dim_head = dim_head, dropout = dropout))),
Residual(PreNorm(dim, FeedForward(dim = dim, dropout = dropout)))
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x)
x = ff(x)
return x
# LaMDA Model
class LaMDA(nn.Module):
def __init__(self, *, num_tokens, dim, depth, dim_head, heads):
super().__init__()
self.token_emb = nn.Embedding(num_tokens, dim)
self.transformer = Transformer(dim, depth, dim_head, heads)
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_tokens)
)
def forward(self, x):
x = self.token_emb(x)
x = self.transformer(x)
logits = self.to_logits(x)
return logits
def lamda_model():
model = LaMDA(
num_tokens = CFG.num_tokens,
dim = CFG.dim,
depth = CFG.depth,
dim_head = CFG.dim_head,
heads = CFG.heads
)
return model
if __name__ == "__main__":
lamda_base = lamda_model()
#lamda = AutoregressiveWrapper(lamda_base, max_seq_len = 2048)
tokens = torch.randint(0, 20000, (1, 2048)) # mock token data
logits = lamda_base(tokens)
print(logits.shape)
n_params_torch = sum(
p.numel() for p in lamda_base.parameters() if p.requires_grad
)
print(f"Number of parameters in torch model: {n_params_torch}") | LaMDA-pytorch-main | lamda_pytorch/lamda_pytorch.py |
from lamda_pytorch.lamda_pytorch import LaMDA | LaMDA-pytorch-main | lamda_pytorch/__init__.py |
from typing import Optional, ClassVar
from dataclasses import dataclass, field
@dataclass
class CFG:
"""
Configuration for ZeRO
"""
use_zero: bool = field(
default = False,
metadata = {'help': 'whether to use zero'}
)
"""
Configuration for optimizer
"""
lr: float = field(
default = 0.0001,
metadata = {'help': 'learning rate'}
)
"""
Configuration class for LaMDA model.
"""
num_tokens: int = field(
default = 50257,
metadata = {'help': 'number of tokens'}
)
dim: int = field(
default = 512,
metadata = {'help': 'dimension of the embedding'}
)
depth: int = field(
default = 6,
metadata = {'help': 'depth of the transformer'}
)
heads: int = field(
default = 4,
metadata = {'help': 'number of heads in the transformer'}
)
dim_head: int = field(
default = 64,
metadata = {'help': 'dimension of the head'}
)
"""
Configuration for data loader.
"""
use_huggingface: bool = field(
default = True,
metadata = {'help': 'Whether to use huggingface datasets'}
)
train_dataset_name: Optional[str] = field(
default="the_pile",
metadata={"help": "Path to Hugging Face training dataset."}
)
eval_dataset_name: Optional[str] = field(
default="the_pile",
metadata={"help": "Path to Hugging Face validation dataset."}
)
choose_train_split: Optional[str] = field(
default="train",
metadata={"help": "Choose Hugging Face training dataset split."}
)
choose_eval_split: Optional[str] = field(
default="train",
metadata={"help": "Choose Hugging Face validation dataset split."}
)
remove_train_columns: ClassVar[list[str]] = field(
default = ['meta'],
metadata={"help": "Train dataset columns to remove."}
)
remove_eval_columns: ClassVar[list[str]] = field(
default = ['meta'],
metadata={"help": "Validation dataset columns to remove."}
)
seed: Optional[int] = field(
default=42,
metadata={"help": "Random seed used for reproducibility."}
)
tokenizer_name: Optional[str] = field(
default="gpt2",
metadata={"help": "Tokenizer name."}
)
tokenizer_seq_length: Optional[int] = field(
default=512,
metadata={"help": "Sequence lengths used for tokenizing examples."}
)
select_input_string: Optional[str] = field(
default="text",
metadata={"help": "Select the key to used as the input string column."}
)
batch_size: Optional[int] = field(
default=16,
metadata={"help": "Batch size for training and validation."}
)
save_to_path: Optional[str] = field(
default="''",
metadata={"help": "Save the dataset to local disk."}
)
"""
Configuration for Weights and Biases
"""
use_wandb: bool = field(
default = False,
metadata = {'help': 'Whether to use Weights and Biases for logging'}
)
project_name: Optional[str] = field(
default="LaMDA pre-training",
metadata = {'help': 'Name of the project'}
) | LaMDA-pytorch-main | lamda_pytorch/config/config.py |
EPOCHS = 1
LEARNING_RATE = 0.0001
WEIGHT_DECAY = 1e-2
gradient_accumulation = 1
clip_grad_norm = 0.0 | LaMDA-pytorch-main | lamda_pytorch/config/colossal_config.py |
from colossalai.zero.shard_utils import TensorShardStrategy
zero = dict(
model_config = dict(
shard_strategy = TensorShardStrategy(),
tensor_placement_policy = 'cpu',
reuse_fp16_shard = False
)
)
gradient_accumulation = 4
clip_grad_norm = 1.0 | LaMDA-pytorch-main | lamda_pytorch/config/zero_config.py |
from datasets import load_dataset
#from tokenizers import SentencePieceBPETokenizer
import io
import sentencepiece as spm
dataset = load_dataset('conceptofmind/pile_wikipedia_en', split='train', streaming=True)
# tokenizer = SentencePieceBPETokenizer()
def batch_iterator(dataset):
for i in dataset:
yield i["text"]
model = io.BytesIO()
spm.SentencePieceTrainer.train(
sentence_iterator = batch_iterator(dataset),
model_writer=model,
vocab_size=32000,
model_type='bpe',
)
# tokenizer.train_from_iterator(
# text='',
# vocab_size=32_000,
# min_frequency=2,
# show_progress=True,
# )
# tokenizer.save()
| LaMDA-pytorch-main | lamda_pytorch/utils/train_sentencepiece_tokenizer.py |
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange
# CrossEntropyLoss
class LaMDA_Loss(nn.Module):
def __init__(self):
super(LaMDA_Loss, self).__init__()
def forward(self, x_inp, x_labels):
x_inp, x_labels = x_inp[:, :-1], x_labels[:, 1:]
loss = F.cross_entropy(rearrange(x_inp, "b c n -> b n c"), x_labels)
return loss
# autoregressive wrapper
def log(t, eps=1e-9):
return torch.log(t + eps)
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, max_seq_len = 512, pad_value = 0):
super().__init__()
self.pad_value = pad_value
self.net = net
self.max_seq_len = max_seq_len
@torch.no_grad()
def generate(
self,
start_tokens,
seq_len,
eos_token = None,
temperature = 1.0,
filter_logits_fn = top_k,
filter_thres = 0.9,
**kwargs
):
was_training = self.net.training
_, t = start_tokens.shape
self.net.eval()
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
logits = self.net(x, **kwargs)[:, -1, :]
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
gumbel_noise = -log(-log(torch.zeros_like(filtered_logits).uniform_(0, 1)))
sample = ((filtered_logits / temperature) + gumbel_noise).argmax(dim=-1)
out = torch.cat((out, sample[:, None]), dim=-1)
if eos_token is not None and (sample == eos_token).all():
break
out = out[:, t:]
self.net.train(was_training)
return out
def forward(self, x, **kwargs):
return self.net(x, **kwargs) | LaMDA-pytorch-main | lamda_pytorch/utils/utils.py |
from odin.model import Odin
model = Odin(
source_weights_path="https://drive.google.com/file/d/10U_gu5Wm3xc8tbGNJ-HDNphttUzTIQX6/view?usp=drive_link",
source_video_path="input_video.mp4",
target_video_path="output_video.mp4",
confidence_threshold=0.5,
iou_threshold=0.6
)
model.run() | Odin-main | example.py |
from odin.model import Odin
| Odin-main | odin/__init__.py |
import argsparse
import supervision as sv
from ultraanalytics import YOLO
from tqdm import tqdm
class Odin:
def __init__(
self,
source_weights_path: str = None,
source_video_path: str = None,
target_video_path: str = None,
confidence_threshold: float = 0.3,
iou_threshold: float = 0.7
):
super(Odin, self).__init__()
self.source_weights_path = source_weights_path
self.source_video_path = source_weights_path
self.target_video_path = target_video_path
self.confidence_threshold = confidence_threshold
self.iou_threshold = iou_threshold
def run(self):
model = YOLO(self.source_weights_path)
tracker = sv.ByteTrack()
box_annotator = sv.BoxAnnotator()
frame_generator = sv.get_video_frames_generator(source_path=self.source_video_path)
video_info = sv.VideoInfo.from_video_path(video_path=self.source_video_path)
with sv.VideoSink(target_path=self.target_video_path, video_info=video_info) as sink:
for frame in tqdm(frame_generator, total=video_info.total_frames):
results = model(
frame,
verbose=True,
conf=self.confidence_threshold,
iou=self.iou_threshold,
)[0]
detections = sv.Detections.from_ultranalytics(results)
detections = tracker.update_with_detections(detections)
labels = [
f"#{tracker_id} {model.model.names[class_id]}"
for _, _, _, class_id, tracker_id
in detections
]
annotated_frame = box_annotator.annotate(
scene=frame.copy(),
detections=detections,
labels=labels
)
result = sink.write_frame(frame=annotated_frame)
return result
| Odin-main | odin/model.py |
ReST-main | rest/__init__.py |
|
ReST-main | rest/main.py |
|
# Copyright 2023 The jax_triton Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import random
from jax.config import config
import jax.numpy as jnp
import jax_triton as jt
import numpy as np
import triton
from triton.compiler import code_generator as code_gen
import triton.language as tl
config.parse_flags_with_absl()
def setUpModule():
config.update("jax_enable_x64", True)
def tearDownModule():
config.update("jax_enable_x64", False)
@triton.jit
def add_kernel(x_ptr, y_ptr, n_elements, output_ptr, BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(x_ptr + offsets, mask=mask)
y = tl.load(y_ptr + offsets, mask=mask)
output = x + y
tl.store(output_ptr + offsets, output, mask=mask)
def add(x, y, *, kernel=add_kernel, **kwargs):
if kernel is add_kernel:
kwargs.setdefault("BLOCK_SIZE", 8)
default_grid = lambda meta: triton.cdiv(x.size, meta["BLOCK_SIZE"])
return jt.triton_call(
x,
y,
x.size,
kernel=kernel,
out_shape=jax.ShapeDtypeStruct(x.shape, x.dtype),
grid=kwargs.pop("grid", default_grid),
**kwargs,
)
@triton.jit
def matmul_kernel(
a_ptr,
b_ptr,
M,
N,
K,
stride_am,
stride_ak,
stride_bk,
stride_bn,
stride_cm,
stride_cn,
c_ptr,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
K_EXACTLY_DIVISIBLE_BY_BLOCK: tl.constexpr,
):
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + (pid % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)
b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k_remaining in range(K, 0, -BLOCK_SIZE_K):
if K_EXACTLY_DIVISIBLE_BY_BLOCK:
a = tl.load(a_ptrs)
b = tl.load(b_ptrs)
else:
mask = tl.arange(0, BLOCK_SIZE_K) < k_remaining
a = tl.load(a_ptrs, mask=mask[None, :], other=0.0)
b = tl.load(b_ptrs, mask=mask[:, None], other=0.0)
accumulator += tl.dot(a, b)
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
c = accumulator.to(tl.float16)
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
def matmul(x, y, *, kernel=matmul_kernel, **kwargs):
m, k = x.shape
_, n = y.shape
def grid(meta):
cdiv = triton.cdiv
return cdiv(m, meta["BLOCK_SIZE_M"]) * cdiv(n, meta["BLOCK_SIZE_N"])
return jt.triton_call(
x,
y,
m,
n,
k,
k, # stride_am
1, # stride_ak
n, # stride_bk
1, # stride_bn
n, # stride_cm
1, # stride_cn
kernel=kernel,
out_shape=jax.ShapeDtypeStruct((m, n), dtype=x.dtype),
grid=grid,
GROUP_SIZE_M=8,
**kwargs,
)
def create_random_inputs(shape1, shape2=None, *, dtype="float32"):
if shape2 is None:
shape2 = shape1
k1, k2 = random.split(random.PRNGKey(0), 2)
if dtype in {"float32", "float16", "float64"}:
x = random.normal(k1, shape1, dtype=dtype)
y = random.normal(k2, shape2, dtype=dtype)
elif dtype in {"int32", "int64"}:
x = random.randint(k1, shape1, -100, 100, dtype=dtype)
y = random.randint(k2, shape2, -100, 100, dtype=dtype)
return x, y
class TritonKernelCallTest(parameterized.TestCase):
@parameterized.product(
size=[1, 5, 100, 1024],
dtype=["int32", "float32", "float16", "int64", "float64"],
block_size=[1, 32, 256],
)
def test_add(self, size, dtype, block_size):
x, y = create_random_inputs([size], dtype=dtype)
out = jax.jit(lambda x, y: add(x, y, BLOCK_SIZE=block_size))(x, y)
expected = x + y
np.testing.assert_allclose(out, expected)
@parameterized.product(
m=[512, 1024],
k=[512],
n=[512],
dtype=["float32", "float16"],
block_size_m=[64, 128],
block_size_n=[128, 256],
block_size_k=[32],
)
def test_matmul(
self,
m,
n,
k,
dtype,
block_size_m,
block_size_n,
block_size_k,
):
if jt.get_compute_capability(0) < 70:
self.skipTest("Matmul only works on GPUs with capability >= sm70")
x, y = create_random_inputs([m, k], [k, n], dtype=dtype)
out = matmul(
x,
y,
BLOCK_SIZE_M=block_size_m,
BLOCK_SIZE_N=block_size_n,
BLOCK_SIZE_K=block_size_k,
K_EXACTLY_DIVISIBLE_BY_BLOCK=k % block_size_k == 0,
)
expected = jnp.matmul(x, y)
np.testing.assert_allclose(out, expected, atol=0.05, rtol=0.05)
@parameterized.product(
size=[1, 5, 100, 1024],
dtype=["int32", "float32", "float16", "int64", "float64"],
block_size=[1, 32, 256],
)
def test_pmap(self, size, dtype, block_size):
n_devices = jax.local_device_count()
if n_devices < 2:
self.skipTest("Not enough devices")
x, y = create_random_inputs([n_devices, size], dtype=dtype)
out = jax.pmap(lambda x, y: add(x, y, BLOCK_SIZE=block_size))(x, y)
expected = x + y
np.testing.assert_allclose(out, expected)
@parameterized.parameters("int", "tuple", "function_int", "function_tuple")
def test_grid_types(self, grid_type):
size = 8
block_size = 1
x, y = create_random_inputs([size])
if grid_type == "int":
grid = triton.cdiv(size, block_size)
elif grid_type == "tuple":
grid = (triton.cdiv(size, block_size),)
elif grid_type == "function_int":
grid = lambda meta: triton.cdiv(size, meta["BLOCK_SIZE"])
elif grid_type == "function_tuple":
grid = lambda meta: (triton.cdiv(size, meta["BLOCK_SIZE"]),)
out = add(x, y, BLOCK_SIZE=block_size, grid=grid)
expected = x + y
np.testing.assert_allclose(out, expected)
def test_input_output_aliasing(self):
@triton.jit
def add_inplace_kernel(_, n_elements, output_ptr, BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(output_ptr + offsets, mask=mask)
output = x + 1
tl.store(output_ptr + offsets, output, mask=mask)
size = 8
x = random.normal(random.PRNGKey(0), [size])
expected = x + 1
out = jt.triton_call(
x,
size,
kernel=add_inplace_kernel,
out_shape=x,
grid=(8,),
BLOCK_SIZE=1,
input_output_aliases={0: 0},
)
np.testing.assert_allclose(out, expected)
@parameterized.parameters(False, True)
def test_zeroed_outputs(self, use_function):
x, y = create_random_inputs([1000000])
# We alias `y` with the output, so are performing the add in-place.
# If we zero the output before the kernel, the result is `x + 0`.
out = add(
x,
y,
input_output_aliases={1: 0},
zeroed_outputs=(lambda _: (0,)) if use_function else (0,),
)
np.testing.assert_allclose(out, x)
def test_multiple_outputs(self):
@triton.jit
def copy_twice_kernel(a_ptr, x_ptr, y_ptr):
a = tl.load(a_ptr)
tl.store(x_ptr, a)
tl.store(y_ptr, a)
a = jnp.array([42])
x, y = jt.triton_call(
a,
kernel=copy_twice_kernel,
out_shape=[a, a],
grid=(1,),
)
np.testing.assert_array_equal(a, x)
np.testing.assert_array_equal(a, y)
def test_kernel_cache_equivalent_kernels(self):
# Create unique JITFunction to avoid conflicts with other tests.
my_add_kernel = triton.jit(add_kernel.fn)
fn1 = jax.jit(lambda x, y: add(x, y, BLOCK_SIZE=32, kernel=my_add_kernel))
fn2 = jax.jit(lambda x, y: add(x, y, BLOCK_SIZE=32, kernel=my_add_kernel))
fn3 = jax.jit(lambda x, y: add(x, y, BLOCK_SIZE=64, kernel=my_add_kernel))
x1, y1 = create_random_inputs([42])
x2, y2 = create_random_inputs([43])
compile_ttir_to_ptx_inplace = jt.triton_lib.compile_ttir_to_ptx_inplace
call_count = [0]
def my_compile(*args, **kwargs):
call_count[0] += 1
return compile_ttir_to_ptx_inplace(*args, **kwargs)
with mock.patch.object(
jt.triton_lib, "compile_ttir_to_ptx_inplace", new=my_compile
):
_ = fn1(x1, y1)
self.assertEqual(call_count[0], 1)
_ = fn2(x2, y2)
self.assertEqual(call_count[0], 1) # Second call hits the cache.
_ = fn3(x1, y1)
self.assertEqual(call_count[0], 2) # Third call misses (block size).
def test_kernel_cache_same_kernel_different_params(self):
@triton.jit
def silly_add_kernel(x_ptr, y_ptr, output_ptr):
pid = tl.program_id(axis=0)
tl.store(output_ptr + pid, tl.load(x_ptr + pid) + tl.load(y_ptr + pid))
def silly_add(n):
x, y = create_random_inputs([n])
return jt.triton_call(
x,
y,
kernel=silly_add_kernel,
out_shape=x,
grid=x.size,
)
get_or_create_triton_kernel = jt.triton_lib.get_or_create_triton_kernel
call_count = [0]
def my_get_or_create_triton_kernel(*args, **kwargs):
call_count[0] += 1
return get_or_create_triton_kernel(*args, **kwargs)
with mock.patch.object(
jt.triton_lib,
"get_or_create_triton_kernel",
new=my_get_or_create_triton_kernel,
):
_ = silly_add(42)
self.assertEqual(call_count[0], 1)
_ = silly_add(42)
self.assertEqual(call_count[0], 1) # Second call hits the cache.
_ = silly_add(43)
self.assertEqual(call_count[0], 2) # Third call misses (grid size).
def test_autotune(self):
autotune_configs = [
triton.Config({"BLOCK_SIZE": 32}, num_warps=1),
triton.Config({"BLOCK_SIZE": 64}, num_warps=1),
triton.Config({"BLOCK_SIZE": 64}, num_warps=2),
]
kernel = triton.autotune(autotune_configs, key=("n_elements",))(add_kernel)
x, y = create_random_inputs([1024])
expected = x + y
out = add(x, y, kernel=kernel)
np.testing.assert_allclose(out, expected)
def test_regression_issue_128(self):
autotune_configs = [
triton.Config({"BLOCK_SIZE": 1024}, num_warps=1),
triton.Config({"BLOCK_SIZE": 32}, num_warps=1),
]
kernel = triton.autotune(autotune_configs, key=("n_elements",))(add_kernel)
x, y = create_random_inputs([1024])
expected = x + y
# Keep alive so each iteration is written to an uninitialized buffer.
outs = []
for _ in range(10):
outs.append(add(x, y, kernel=kernel))
np.testing.assert_allclose(outs[-1], expected)
def test_autotune_pre_hook_error(self):
autotune_configs = [
triton.Config({"BLOCK_SIZE": 32}, num_warps=1, pre_hook=lambda _: None),
]
kernel = triton.autotune(autotune_configs, key=("n_elements",))(add_kernel)
x, y = create_random_inputs([1024])
with self.assertRaises(NotImplementedError):
_ = add(x, y, kernel=kernel)
def test_heuristics(self):
heuristic_returned_values = []
def heuristic_fn(args):
heuristic_returned_values.append(args["K"] % args["BLOCK_SIZE_K"] == 0)
return heuristic_returned_values[-1]
heuristics = {"K_EXACTLY_DIVISIBLE_BY_BLOCK": heuristic_fn}
kernel = triton.heuristics(heuristics)(matmul_kernel)
def do_matmul(m, n, k):
x, y = create_random_inputs([m, k], [k, n])
return matmul(
x,
y,
kernel=kernel,
BLOCK_SIZE_M=32,
BLOCK_SIZE_N=32,
BLOCK_SIZE_K=32,
)
_ = do_matmul(m=128, n=128, k=128)
_ = do_matmul(m=128, n=128, k=144)
self.assertEqual(heuristic_returned_values, [True, False])
def test_autotune_with_heuristics(self):
heuristic_returned_values = []
def heuristic_fn(args):
heuristic_returned_values.append(args["K"] % args["BLOCK_SIZE_K"] == 0)
return heuristic_returned_values[-1]
heuristics = {"K_EXACTLY_DIVISIBLE_BY_BLOCK": heuristic_fn}
autotune_configs = [
triton.Config({"BLOCK_SIZE_K": 32}, num_warps=1),
triton.Config({"BLOCK_SIZE_K": 64}, num_warps=1),
]
kernel = triton.autotune(autotune_configs, key=("M", "N", "K"))(
triton.heuristics(heuristics)(matmul_kernel)
)
def do_matmul(m, n, k):
x, y = create_random_inputs([m, k], [k, n])
return matmul(
x,
y,
kernel=kernel,
BLOCK_SIZE_M=32,
BLOCK_SIZE_N=32,
)
_ = do_matmul(m=128, n=128, k=128)
_ = do_matmul(m=128, n=128, k=160)
self.assertEqual(heuristic_returned_values, [True, True, True, False])
def test_heuristics_does_not_modify_autotune_configs(self):
def heuristic_fn(args):
return args["K"] % args["BLOCK_SIZE_K"] == 0
heuristics = {"K_EXACTLY_DIVISIBLE_BY_BLOCK": heuristic_fn}
autotune_config = triton.Config({"BLOCK_SIZE_K": 32}, num_warps=1)
kernel = triton.autotune([autotune_config], key=("M", "N", "K"))(
triton.heuristics(heuristics)(matmul_kernel)
)
def do_matmul(m, n, k):
x, y = create_random_inputs([m, k], [k, n])
return matmul(
x,
y,
kernel=kernel,
BLOCK_SIZE_M=32,
BLOCK_SIZE_N=32,
)
_ = do_matmul(m=128, n=128, k=128)
self.assertEqual(autotune_config.kwargs, {"BLOCK_SIZE_K": 32})
def test_autotune_with_input_output_aliasing(self):
autotune_configs = [
triton.Config({"BLOCK_SIZE": 32}, num_warps=1),
triton.Config({"BLOCK_SIZE": 64}, num_warps=1),
]
kernel = triton.autotune(autotune_configs, key=("n_elements",))(add_kernel)
x, y = create_random_inputs([1024])
expected = x + y
out = add(x, y, kernel=kernel, input_output_aliases={0: 0})
np.testing.assert_allclose(out, expected)
def test_specialization(self):
do_not_specialize = (
0, # a_ptr
2, # M
6, # stride_ak
7, # stride_bk
11, # c_ptr
)
kernel = triton.jit(do_not_specialize=do_not_specialize)(matmul_kernel.fn)
m, n, k = 128, 128, 99
x, y = create_random_inputs([m, k], [k, n])
with mock.patch.object(code_gen, "ast_to_ttir") as mock_compile:
try:
_ = matmul(
x,
y,
kernel=kernel,
BLOCK_SIZE_M=32,
BLOCK_SIZE_N=32,
BLOCK_SIZE_K=32,
# K_EXACTLY_DIVISIBLE_BY_BLOCK=False,
)
except TypeError:
pass # Error thrown as the mocked method's return value is invalid.
mock_compile.assert_called_once()
specialization = mock_compile.call_args.args[2]
# Pointers are assumed to divide by 16, as do `M`, `N`, `stride_{bk,cm}`.
# However, we've marked `a_ptr`, `M`, `stride_bk`, and `c_ptr` as "do not
# specialize", leaving `b_ptr`, `N`, and `stride_cm`.
self.assertEqual(specialization.divisible_by_16, (1, 3, 9))
# `stride_{ak,bn,cn}` equal 1, but we've marked `stride_ak` as "do not
# specialize" leaving `stride_{bn,cn}`.
self.assertEqual(specialization.equal_to_1, (8, 10))
if __name__ == "__main__":
os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.5"
absltest.main()
| jax-triton-main | tests/triton_call_test.py |
# Copyright 2023 The jax_triton Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
import jax
import jax.numpy as jnp
import jax_triton as jt
import numpy as np
import triton
import triton.language as tl
@triton.jit
def add_kernel(
x_ptr, # *Pointer* to first input vector
y_ptr, # *Pointer* to second input vector
length, # Length of input and output vectors.
output_ptr, # *Pointer* to output vector
BLOCK_SIZE: tl.constexpr,
):
# There are multiple 'program's processing different data. We identify which program
# we are here
pid = tl.program_id(axis=0) # We use a 1D launch grid so axis is 0
# This program will process inputs that are offset from the initial data.
# for instance, if you had a vector of length 256 and block_size of 64, the programs
# would each access the elements [0:64, 64:128, 128:192, 192:256].
# Note that offsets is a list of pointers
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
# Create a mask to guard memory operations against out-of-bounds accesses
mask = offsets < length
# Load x and y from DRAM, masking out any extra elements in case the input is not a
# multiple of the block size
x = tl.load(x_ptr + offsets, mask=mask)
y = tl.load(y_ptr + offsets, mask=mask)
output = x + y
# Write x + y back to DRAM
tl.store(output_ptr + offsets, output, mask=mask)
@triton.jit
def tanh_kernel(
x_ptr, # *Pointer* to first input vector
length, # Length of input and output vectors.
output_ptr, # *Pointer* to output vector
BLOCK_SIZE: tl.constexpr,
):
# There are multiple 'program's processing different data. We identify which program
# we are here
pid = tl.program_id(axis=0) # We use a 1D launch grid so axis is 0
# This program will process inputs that are offset from the initial data.
# for instance, if you had a vector of length 256 and block_size of 64, the programs
# would each access the elements [0:64, 64:128, 128:192, 192:256].
# Note that offsets is a list of pointers
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
# Create a mask to guard memory operations against out-of-bounds accesses
mask = offsets < length
# Load x and y from DRAM, masking out any extra elements in case the input is not a
# multiple of the block size
x = tl.load(x_ptr + offsets, mask=mask)
output = tl.math.tanh(x)
# Write x + y back to DRAM
tl.store(output_ptr + offsets, output, mask=mask)
class TritonTest(absltest.TestCase):
def test_add_kernel(self):
def add(x: jnp.ndarray, y: jnp.ndarray) -> jnp.ndarray:
out_shape = jax.ShapeDtypeStruct(shape=x.shape, dtype=x.dtype)
grid = lambda meta: (triton.cdiv(x.size, meta['BLOCK_SIZE']),)
return jt.triton_call(
x,
y,
x.size,
kernel=add_kernel,
out_shape=out_shape,
grid=grid,
BLOCK_SIZE=8,
)
x = jnp.arange(8, dtype=jnp.float32)
y = jnp.arange(8, dtype=jnp.float32)
np.testing.assert_allclose(add(x, y), x + y)
def test_tanh_kernel(self):
def tanh(x: jnp.ndarray) -> jnp.ndarray:
out_shape = jax.ShapeDtypeStruct(shape=x.shape, dtype=x.dtype)
grid = lambda meta: (triton.cdiv(x.size, meta['BLOCK_SIZE']),)
return jt.triton_call(
x,
x.size,
kernel=tanh_kernel,
out_shape=out_shape,
grid=grid,
BLOCK_SIZE=8,
)
x = jnp.arange(8, dtype=jnp.float32)
np.testing.assert_allclose(tanh(x), np.tanh(x))
if __name__ == '__main__':
absltest.main()
| jax-triton-main | tests/triton_test.py |
# Copyright 2023 The jax_triton Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import itertools
import os
import unittest
os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.5"
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import lax
from jax import linear_util as lu
from jax import random
from jax._src import test_util as jtu
from jax._src import state
from jax._src.lax.control_flow.for_loop import for_loop
from jax.config import config
from jax.interpreters import partial_eval as pe
import jax.numpy as jnp
import jax_triton as jt
from jax_triton import pallas as pl
from jax_triton.pallas.pallas_call import _initial_style_open_jaxpr
from jax_triton.pallas.ops import attention
from jax_triton.pallas.ops import layer_norm
from jax_triton.pallas.ops import rms_norm
from jax_triton.pallas.ops import softmax
try:
from jax_triton.pallas.triton_ir_lowering import compile_jaxpr
except ModuleNotFoundError:
compile_jaxpr = None
import numpy as np
# TODO(sharadmv): Update signatures of pallas_call to correct inputs/outputs.
# pylint: disable=no-value-for-parameter
config.update("jax_traceback_filtering", "off")
config.parse_flags_with_absl()
@functools.partial(jax.jit, static_argnames=["bm", "bn", "gm", "bk",
"interpret", "debug"])
def matmul(x, y, *, bm, bn, gm, bk, interpret, debug=False):
m, n, k = x.shape[0], y.shape[1], x.shape[1]
@functools.partial(
pl.pallas_call, out_shape=jax.ShapeDtypeStruct((m, n), jnp.float32),
interpret=interpret,
debug=debug,
grid=jt.cdiv(m, bm) * jt.cdiv(n, bn))
def matmul_kernel(x_ref, y_ref, o_ref):
pid = pl.program_id(axis=0)
num_pid_m = m // bm
num_pid_n = n // bn
num_pid_in_group = gm * num_pid_n
group_id = lax.div(pid, num_pid_in_group)
first_pid_m = group_id * gm
group_size_m = jnp.minimum(num_pid_m - first_pid_m, gm)
pid_m = first_pid_m + lax.rem(pid, group_size_m)
pid_n = lax.div(lax.rem(pid, num_pid_in_group), group_size_m)
idx_m = pid_m * bm + jnp.arange(bm)
idx_n = pid_n * bn + jnp.arange(bn)
idx_m = pl.max_contiguous(pl.multiple_of(idx_m, bm), bm)
idx_n = pl.max_contiguous(pl.multiple_of(idx_n, bn), bn)
acc = jnp.zeros((bm, bn), dtype=jnp.float32)
def body(i, acc_ref):
idx_k = i * bk + jnp.arange(bk)
x_idx = (
jax.lax.broadcast_in_dim(idx_m, (bm, bk), (0,)),
jax.lax.broadcast_in_dim(idx_k, (bm, bk), (1,)))
y_idx = (
jax.lax.broadcast_in_dim(idx_k, (bk, bn), (0,)),
jax.lax.broadcast_in_dim(idx_n, (bk, bn), (1,)))
x_block, y_block = x_ref[x_idx], y_ref[y_idx]
out = jnp.dot(x_block, y_block)
acc_ref[:, :] += out
acc = for_loop(k // bk, body, acc).astype(o_ref.dtype)
o_idx = (
jax.lax.broadcast_in_dim(idx_m, (bm, bn), (0,)),
jax.lax.broadcast_in_dim(idx_n, (bm, bn), (1,)),
)
o_ref[o_idx] = acc
return matmul_kernel(x, y)
@functools.partial(jax.jit, static_argnames=["bm", "bn", "bk",
"interpret", "debug"])
def matmul_block_spec(x, y, *, bm, bn, bk, interpret, debug=False):
m, n, k = x.shape[0], y.shape[1], x.shape[1]
@functools.partial(
pl.pallas_call, out_shape=jax.ShapeDtypeStruct((m, n), jnp.float32),
interpret=interpret,
debug=debug,
in_specs=[
pl.BlockSpec(lambda i, _: (i, 0), (bm, x.shape[1])),
pl.BlockSpec(lambda _, j: (0, j), (y.shape[0], bn))
],
out_specs=pl.BlockSpec(lambda i, j: (i, j), (bm, bn)),
grid=(jt.cdiv(m, bm), jt.cdiv(n, bn)))
def matmul_kernel(x_ref, y_ref, o_ref):
acc = jnp.zeros(o_ref.shape, dtype=jnp.float32)
def body(i, acc_ref):
x_block = pl.load(x_ref, (slice(None), pl.ds(i * bk, bk)))
y_block = pl.load(y_ref, (pl.ds(i * bk, bk), slice(None)))
acc_ref[:, :] += jnp.dot(x_block, y_block)
acc = for_loop(k // bk, body, acc).astype(o_ref.dtype)
o_ref[:, :] = acc
return matmul_kernel(x, y)
class PallasTest(parameterized.TestCase):
INTERPRET = False
def setUp(self):
super().setUp()
if compile_jaxpr:
compile_jaxpr.cache_clear()
_initial_style_open_jaxpr.cache_clear()
def pallas_call(self, *args, **kwargs):
return pl.pallas_call(*args, **kwargs, interpret=self.INTERPRET)
class PallasCallTest(PallasTest):
def test_add_one(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((), jnp.float32))
def add_one(x_ref, o_ref):
o_ref[()] = x_ref[()] + 1.
x = 0.
self.assertEqual(add_one(x), 1.)
def test_add_singleton_vector(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((1,), jnp.float32),
grid=1)
def add_one(x_ref, o_ref):
o_ref[0] = x_ref[0] + 1.
x = jnp.array([0.], jnp.float32)
np.testing.assert_allclose(add_one(x), jnp.array([1.], jnp.float32))
def test_add_vector_block_spec(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((8,), jnp.int32),
in_specs=(pl.BlockSpec(lambda i: i, (1,)),),
out_specs=(pl.BlockSpec(lambda i: i, (1,)),),
grid=8, debug=False)
def add_one(x_ref, o_ref):
o_ref[0] = x_ref[0] + 1
np.testing.assert_allclose(add_one(jnp.arange(8)), jnp.arange(8) + 1)
def test_add_matrix_block_spec(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((8, 8), jnp.int32),
in_specs=(pl.BlockSpec(lambda i, j: (i, j), (2, 2)),),
out_specs=(pl.BlockSpec(lambda i, j: (i, j), (2, 2)),),
grid=(4, 4))
def add_one(x_ref, o_ref):
o_ref[:, :] = x_ref[:, :] + 1
x = jnp.arange(64).reshape((8, 8))
np.testing.assert_allclose(add_one(x), x + 1)
def test_vector_indexing(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((), jnp.float32),
grid=1)
def index(x_ref, i_ref, o_ref):
o_ref[()] = x_ref[i_ref[()]]
x = jnp.arange(5.)
for i in range(5):
np.testing.assert_allclose(index(x, i), x[i])
def test_vector_slicing(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((2,), jnp.float32),
grid=1)
def index(x_ref, idx_ref, o_ref):
idx = idx_ref[()]
o_ref[:] = x_ref[idx]
x = jnp.arange(5.)
for i in range(4):
idx = jnp.arange(i, i + 2)
np.testing.assert_allclose(index(x, idx), x[idx])
def test_where_broadcasting(self):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct((4, 2, 2), jnp.float32),
grid=1)
def copyitem(x_ref, in_idx_ref, out_idx_ref, o_ref):
mask = (jnp.arange(o_ref.shape[0]) == out_idx_ref[()])[:, None, None]
o_ref[...] = jnp.where(mask, x_ref[in_idx_ref[()]], 0)
x = jnp.arange(7 * 2 * 2.).reshape(7, 2, 2)
for ii in range(7):
for oi in range(4):
out = copyitem(x, ii, oi)
self.assertEqual((4, 2, 2), out.shape)
np.testing.assert_allclose(out[:oi], jnp.zeros_like(out[:oi]))
np.testing.assert_allclose(out[oi], x[ii])
np.testing.assert_allclose(out[oi + 1:], jnp.zeros_like(out[oi + 1:]))
@parameterized.parameters(*[
((), (2,), ()),
((1,), (2,), (0,)),
((1, 1), (2, 2), (0, 1)),
((), (2, 2), ()),
])
def test_broadcast_in_dim(self, in_shape, out_shape, dims):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct(out_shape, jnp.float32),
grid=1)
def f(x_ref, o_ref):
x = x_ref[...]
o_ref[...] = jax.lax.broadcast_in_dim(x, out_shape, dims)
x = jnp.arange(int(np.prod(in_shape)), dtype=jnp.float32).reshape(in_shape)
expected = jax.lax.broadcast_in_dim(x, out_shape, dims)
np.testing.assert_allclose(f(x), expected)
@parameterized.parameters(*[
((2, 4), (8,)),
((2, 4), (8, 1)),
((2, 4), (1, 8)),
((64,), (32, 2)),
])
def test_reshape(self, in_shape, out_shape):
# TODO(sharadmv): re-enable when `reshape` works again
self.skipTest("Reshape not yet supported in Triton-MLIR")
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct(out_shape, jnp.float32),
grid=1)
def f(x_ref, o_ref):
o_ref[...] = x_ref[...].reshape(out_shape)
x = jnp.arange(int(np.prod(in_shape)), dtype=jnp.float32).reshape(in_shape)
expected = x.reshape(out_shape)
np.testing.assert_allclose(f(x), expected)
@parameterized.parameters(*[
((), (1,)),
((), (1, 1)),
((2, 4), (2, 4)),
((2, 4), (2, 4, 1)),
((2, 4, 1), (2, 4)),
((2, 4), (1, 2, 4)),
((1, 2, 4), (2, 4)),
((2, 4), (2, 1, 4)),
((1, 2, 1, 4, 1), (2, 4)),
((2, 4,), (1, 2, 1, 4)),
((2, 4,), (1, 2, 4, 1)),
((1, 2, 4, 1), (1, 2, 1, 4, 1)),
])
def test_reshape_noop_or_singleton_dims(self, in_shape, out_shape):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct(out_shape, jnp.float32),
grid=1)
def f(x_ref, o_ref):
o_ref[...] = x_ref[...].reshape(out_shape)
x = jnp.arange(int(np.prod(in_shape)), dtype=jnp.float32).reshape(in_shape)
expected = x.reshape(out_shape)
np.testing.assert_allclose(f(x), expected)
@parameterized.named_parameters(*[
(f"m_{m}_n_{n}_k_{k}_dtype_{dtype}_bm_{block_size_m}_"
f"bn_{block_size_n}_bk_{block_size_k}_gm_{group_size_m}", m, n, k, dtype,
block_size_m, block_size_n, block_size_k, group_size_m)
for m in [512, 1024]
for k in [512]
for n in [512, 1024]
for dtype in ["float32", "float16"]
for block_size_m in [64, 128]
for block_size_n in [128, 256]
for block_size_k in [32]
for group_size_m in [8]
if block_size_m <= m and block_size_n <= n and block_size_k <= k
])
def test_matmul(self, m, n, k, dtype, bm, bn, bk, gm):
if jt.get_compute_capability(0) < 70:
raise unittest.SkipTest(
"Matmul only works on GPUs with capability >= sm70")
if (jt.get_compute_capability(0) <= 75
and (bm > 128 or bn > 128 or bk > 32)):
raise unittest.SkipTest("Block sizes too big for sm70.")
k1, k2 = random.split(random.PRNGKey(0))
x = random.normal(k1, (m, k), dtype=dtype)
y = random.normal(k2, (k, n), dtype=dtype)
out, expected = matmul(x, y, bm=bm, bn=bn, bk=bk, gm=gm,
interpret=self.INTERPRET), jnp.matmul(x, y)
np.testing.assert_allclose(out, expected, atol=0.05, rtol=0.05)
@parameterized.named_parameters(*[
(f"m_{m}_n_{n}_k_{k}_dtype_{dtype}_bm_{block_size_m}_"
f"bn_{block_size_n}_bk_{block_size_k}", m, n, k, dtype,
block_size_m, block_size_n, block_size_k)
for m in [512, 1024]
for k in [512]
for n in [512, 1024]
for dtype in ["float32", "float16"]
for block_size_m in [64, 128]
for block_size_n in [128, 256]
for block_size_k in [32]
if block_size_m <= m and block_size_n <= n and block_size_k <= k
])
def test_matmul_block_spec(self, m, n, k, dtype, bm, bn, bk):
if jt.get_compute_capability(0) < 70:
raise unittest.SkipTest(
"Matmul only works on GPUs with capability >= sm70")
if (jt.get_compute_capability(0) <= 75
and (bm > 128 or bn > 128 or bk > 32)):
raise unittest.SkipTest("Block sizes too big for sm70.")
k1, k2 = random.split(random.PRNGKey(0))
x = random.normal(k1, (m, k), dtype=dtype)
y = random.normal(k2, (k, n), dtype=dtype)
out, expected = matmul_block_spec(x, y, bm=bm, bn=bn, bk=bk,
interpret=self.INTERPRET), jnp.matmul(x, y)
np.testing.assert_allclose(out, expected, atol=0.05, rtol=0.05)
@parameterized.named_parameters(*(
dict(testcase_name=f"{size}_{dtype}", size=size, dtype=dtype)
for size in [16, 32, 64]
for dtype in ["float32", "float16"]
))
def test_dot(self, size, dtype):
if jt.get_compute_capability(0) < 70:
raise unittest.SkipTest(
"Matmul only works on GPUs with capability >= sm70")
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct((size, size), dtype),
grid=1)
def dot(x_ref, y_ref, o_ref):
x = x_ref[:, :]
y = y_ref[:, :]
o_ref[:, :] = pl.dot(x, y).astype(o_ref.dtype)
k1, k2 = random.split(random.PRNGKey(0))
x = random.normal(k1, (size, size), dtype=dtype)
y = random.normal(k2, (size, size), dtype=dtype)
out, expected = dot(x, y), jnp.dot(x, y)
np.testing.assert_allclose(out, expected, atol=0.05, rtol=0.05)
@parameterized.named_parameters(*(
dict(testcase_name=f"{batch_size}_{size}_{block_size}_{dtype}",
batch_size=batch_size, size=size, block_size=block_size, dtype=dtype)
for batch_size in [1, 2, 4, 23]
for size in [1, 2, 129, 255, 256]
for block_size in [1, 2, 32, 64, 128, 256]
for dtype in ["float32"]
if size < block_size
))
def test_softmax(self, batch_size, size, block_size, dtype):
@functools.partial(self.pallas_call,
out_shape=jax.ShapeDtypeStruct((batch_size, size), dtype),
grid=batch_size)
def softmax(x_ref, o_ref):
row_idx = pl.program_id(0)
x_idx = jnp.arange(block_size)
row_idxs = (row_idx, x_idx)
mask = x_idx < x_ref.shape[1]
row = pl.load(x_ref, row_idxs, mask=mask, other=-float("inf"))
row_minus_max = row - jnp.max(row, axis=0)
numerator = jnp.exp(row_minus_max)
denominator = jnp.sum(numerator, axis=0)
softmax_output = numerator / denominator
pl.store(o_ref, row_idxs, softmax_output, mask=mask)
key = random.PRNGKey(0)
x = random.normal(key, [batch_size, size], dtype=dtype)
np.testing.assert_allclose(softmax(x), jax.nn.softmax(x, axis=-1),
atol=1e-5, rtol=1e-5)
@parameterized.parameters(*(
(size, block_size)
for size in [1, 2, 64, 129, 1021]
for block_size in [1, 2, 32, 64, 128]
))
def test_masked_load_store(self, size, block_size):
@functools.partial(self.pallas_call,
out_shape=(
jax.ShapeDtypeStruct((size,), jnp.float32)
),
grid=jt.cdiv(size, block_size))
def add_one(x_ref, o_ref):
idx = pl.program_id(0) * block_size + jnp.arange(block_size)
mask = idx < x_ref.shape[0]
x = pl.load(x_ref, (idx,), mask=mask)
pl.store(o_ref, (idx,), x + 1., mask=mask)
key = random.PRNGKey(0)
x = random.normal(key, (size,))
np.testing.assert_allclose(add_one(x), x + 1., atol=1e-5, rtol=1e-5)
def test_broadcasted_load_store(self):
m, n = 16, 32
@functools.partial(
self.pallas_call,
out_shape=(
jax.ShapeDtypeStruct((m, n), jnp.float32)
), grid=1)
def load(x_ref, o_ref):
x = pl.load(x_ref, (jnp.arange(m), jnp.arange(n)))
pl.store(o_ref, (jnp.arange(m), jnp.arange(n)), x + 1.)
key = random.PRNGKey(0)
x = random.normal(key, (m, n))
np.testing.assert_allclose(load(x), x + 1., atol=1e-5, rtol=1e-5)
def test_unused_ref(self):
m, n = 16, 32
@functools.partial(
self.pallas_call,
out_shape=(
jax.ShapeDtypeStruct((m, n), jnp.float32)
), grid=1)
def dummy(_, o_ref):
pl.store(o_ref, (jnp.arange(m), jnp.arange(n)), jnp.ones_like(o_ref))
key = random.PRNGKey(0)
x = random.normal(key, (m, n))
np.testing.assert_allclose(dummy(x), jnp.ones_like(x), atol=1e-5, rtol=1e-5)
def test_pallas_call_with_input_output_aliasing(self):
def add_inplace_kernel(_, o_ref, *, block_size):
pid = pl.program_id(axis=0) # we use a 1d launch grid so axis is 0
block_start = pid * block_size
offsets = block_start + jnp.arange(block_size)
mask = offsets < o_ref.shape[0]
x = pl.load(o_ref, (offsets,), mask=mask)
output = x + 1
pl.store(o_ref, (offsets,), output, mask=mask)
grid = (8,)
size = 8
dtype = "float32"
k1 = random.PRNGKey(0)
block_size = 1
x = random.normal(k1, [size], dtype=dtype)
kernel = functools.partial(add_inplace_kernel, block_size=block_size)
out = self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct(x.shape, x.dtype),
grid=grid, input_output_aliases={0: 0})(x)
expected = x + 1
np.testing.assert_allclose(out, expected)
@parameterized.named_parameters(*[
("add_i32", pl.atomic_add, np.array([1, 2, 3, 4], np.int32), np.sum),
("max_i32", pl.atomic_max, np.array([1, 2, 3, 4], np.int32), np.max),
("min_i32", pl.atomic_min, np.array([1, 2, 3, 4], np.int32), np.min),
("add_f16", pl.atomic_add, np.array([1, 2, 3, 4], np.float16), np.sum),
("add_f32", pl.atomic_add, np.array([1, 2, 3, 4], np.float32), np.sum),
("max_f32", pl.atomic_max, np.array([1, 2, 3, 4], np.float32), np.max),
("min_f32", pl.atomic_min, np.array([1, 2, 3, 4], np.float32), np.min),
])
def test_scalar_atomic(self, op, value, numpy_op):
if jt.get_compute_capability(0) < 70:
raise unittest.SkipTest(
"Atomic ops onl works on GPUs with capability >= sm70")
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct((), value.dtype),
grid=value.shape[0],
input_output_aliases={1: 0})
def atomic_kernel(x_ref, _, o_ref):
pid = pl.program_id(axis=0)
op(o_ref, (), x_ref[pid])
if op == pl.atomic_add:
neutral = np.array(0, dtype=value.dtype)
elif op == pl.atomic_max:
if np.issubdtype(value.dtype, np.integer):
neutral = np.array(np.iinfo(value.dtype).min, value.dtype)
else:
neutral = np.array(-float('inf'), value.dtype)
elif op == pl.atomic_min:
if np.issubdtype(value.dtype, np.integer):
neutral = np.array(np.iinfo(value.dtype).max, value.dtype)
else:
neutral = np.array(float('inf'), value.dtype)
elif op == pl.atomic_or:
neutral = np.array(False, value.dtype)
else:
raise NotImplementedError()
out = atomic_kernel(value, neutral)
np.testing.assert_allclose(out, numpy_op(value))
@parameterized.parameters(*[(0,), (1,)])
def test_array_atomic_add(self, axis):
if jt.get_compute_capability(0) < 70:
raise unittest.SkipTest(
"Atomic ops onl works on GPUs with capability >= sm70")
m, n = 32, 8
out_shape = jax.ShapeDtypeStruct((n if axis == 0 else m,), jnp.float32)
@functools.partial(
self.pallas_call,
out_shape=out_shape,
grid=1,
input_output_aliases={1: 0})
def reduce(x_ref, _, y_ref):
x = pl.load(x_ref, (jnp.arange(m), jnp.arange(n)))
y = jnp.sum(x, axis=axis)
pl.atomic_add(y_ref, (jnp.arange(y.shape[0]),), y)
x = random.normal(random.PRNGKey(0), (m, n))
y = jnp.zeros(out_shape.shape, out_shape.dtype)
y = reduce(x, y)
y_ref = np.sum(x, axis=axis)
np.testing.assert_allclose(y, y_ref, atol=1e-2, rtol=1e-2)
@parameterized.parameters(False, True)
def test_reduce_only_dim(self, use_store):
m = 32
x = random.normal(random.PRNGKey(0), (m,), dtype=jnp.float32)
out_shape = jax.ShapeDtypeStruct((), x.dtype)
@functools.partial(
self.pallas_call,
out_shape=out_shape,
grid=1, debug=False)
def reduce(x_ref, y_ref):
x = pl.load(x_ref, (jnp.arange(m),))
y = jnp.sum(x, axis=-1)
if use_store:
pl.store(y_ref, (), y)
else:
y_ref[...] = y
y = reduce(x)
y_ref = jnp.sum(x, axis=-1)
np.testing.assert_allclose(y, y_ref, atol=1e-2, rtol=1e-2)
@parameterized.named_parameters(*[
(f"{op_name}_{dtype}_{axis}", op, dtype, axis)
for op_name, op in [
("add", jnp.sum),
("max", jnp.max),
("min", jnp.min),
("argmax", jnp.argmax),
("argmin", jnp.argmin),
]
for axis in [0, 1, (1,), (0, 1)]
for dtype in ["float16", "float32", "int32", "uint32"]
if isinstance(axis, int) or "arg" not in op_name
])
def test_array_reduce(self, op, dtype, axis):
m, n = 32, 8
out_dtype = dtype
if op in {jnp.argmin, jnp.argmax}:
out_dtype = jnp.int32
def make_x(key):
if jnp.issubdtype(dtype, jnp.integer):
return random.shuffle(key, jnp.arange(m * n, dtype=dtype)).reshape(m, n)
else:
return random.normal(key, (m, n), dtype=dtype)
out_shape = jax.ShapeDtypeStruct(
op(make_x(random.PRNGKey(0)), axis=axis).shape, out_dtype)
@functools.partial(
self.pallas_call,
out_shape=out_shape,
grid=1, debug=not isinstance(axis, int))
def reduce(x_ref, y_ref):
x = pl.load(x_ref, (jnp.arange(m), jnp.arange(n)))
y = op(x, axis=axis)
pl.store(y_ref, tuple(jnp.arange(d) for d in y.shape), y)
for i, key in enumerate(random.split(random.PRNGKey(0), 20)):
x = make_x(key)
y = reduce(x)
y_ref = op(x, axis=axis)
np.testing.assert_allclose(y, y_ref, atol=1e-2, rtol=1e-2, err_msg=i)
def test_using_pallas_slice(self):
m, n = 32, 4
out_shape = jax.ShapeDtypeStruct((4, n), jnp.float32)
@functools.partial(
self.pallas_call,
out_shape=out_shape,
grid=1)
def slice_kernel(x_ref, y_ref):
x = pl.load(x_ref, (pl.dslice(0, 4), pl.dslice(0, 4)))
pl.store(y_ref, (pl.dslice(4), pl.dslice(4)), x)
x = random.normal(random.PRNGKey(0), (m, n))
y = slice_kernel(x)
y_ref = x[:4]
np.testing.assert_allclose(y, y_ref, atol=1e-2, rtol=1e-2)
def test_pallas_trace_cache(self):
trace_count = 0
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((), jnp.float32),
grid=1)
def add_one(x_ref, o_ref):
nonlocal trace_count
o_ref[()] = x_ref[()] + 1.
trace_count += 1
@jax.jit
def f(x):
return add_one(add_one(x))
self.assertEqual(f(0.), 2.)
self.assertEqual(trace_count, 1)
def test_pallas_compilation_cache(self):
if not compile_jaxpr:
self.skipTest("No Triton GPU.")
if self.INTERPRET:
raise unittest.SkipTest("No Triton compilation in interpreter mode.")
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((), jnp.float32),
grid=1)
def add_one(x_ref, o_ref):
o_ref[()] = x_ref[()] + 1.
@jax.jit
def f(x):
return add_one(add_one(x))
self.assertEqual(f(0.), 2.)
num_misses = compile_jaxpr.cache_info().misses
self.assertEqual(num_misses, 1)
@parameterized.parameters(*[
(0, 0, 1),
(0, 1, 1),
(1, 0, 1),
(1, 1, 1),
(2, 1, 1),
(2, 1, 1),
])
def test_atomic_cas(self, init_value, cmp, new_value):
@functools.partial(
self.pallas_call, out_shape=(
jax.ShapeDtypeStruct((), jnp.int32),
jax.ShapeDtypeStruct((), jnp.int32)),
input_output_aliases={0: 0})
def swap(_, lock_ref, out_ref):
out_ref[()] = pl.atomic_cas(lock_ref, cmp, new_value)
lock, out = swap(init_value)
np.testing.assert_allclose(lock, new_value if cmp == init_value else
init_value)
np.testing.assert_allclose(out, init_value)
@parameterized.parameters(*[
1, 2, 3, 4, 8
])
def test_atomic_counter(self, num_threads):
if self.INTERPRET:
self.skipTest("While loop not supported in interpret mode yet.")
@functools.partial(
self.pallas_call, out_shape=(
jax.ShapeDtypeStruct((), jnp.int32),
jax.ShapeDtypeStruct((), jnp.int32)),
input_output_aliases={0: 0, 1: 1},
grid=(num_threads,))
def increment(_, __, lock_ref, counter_ref):
def _cond(_):
return pl.atomic_cas(lock_ref, 0, 1) == 1
lax.while_loop(_cond, lambda a: a, 0)
counter_ref[...] += 1
pl.atomic_xchg(lock_ref, (), 0)
lock, count = increment(0, 0)
np.testing.assert_allclose(lock, 0)
np.testing.assert_allclose(count, num_threads)
class PallasCallInterpreterTest(PallasCallTest):
INTERPRET = True
class PallasControlFlowTest(PallasTest):
def setUp(self):
super().setUp()
if self.INTERPRET:
self.skipTest("Control flow not supported in interpreter mode yet.")
def test_loop_with_float64_carry(self):
# Test that the jnp.zeros(f64) loop init_val is actually f64, and that
# fori_loop handles i64 index variables, i.e. error: 'scf.for' op along
# control flow edge from Region #0 to Region #0: source type #0
# 'tensor<4xf64>' should match input type #0 'tensor<4xf32>'
orig_val = jax.config.jax_enable_x64
jax.config.update("jax_enable_x64", True)
try:
@functools.partial(self.pallas_call,
out_shape=jax.ShapeDtypeStruct((4,), jnp.float64),
grid=1,
debug=False)
def f(x_ref, y_ref):
def body(i, acc):
# TODO(sharadmv): DCE loop index but retain carry breaks scan pattern.
# return acc + x_ref[...]
return acc + x_ref[...] + i * 0
y_ref[...] = lax.fori_loop(
0, 3, body, jnp.zeros((4,), jnp.float64))
np.testing.assert_allclose(np.arange(1, 5.) * 3,
f(jnp.arange(1, 5., dtype=jnp.float64)))
finally:
jax.config.update("jax_enable_x64", orig_val)
def test_cond_simple(self):
arg = jnp.float32(0.)
@functools.partial(self.pallas_call,
out_shape=jax.ShapeDtypeStruct(arg.shape, jnp.float32),
debug=False)
def f(branch_ref, x_ref, y_ref):
y_ref[...] = lax.switch(
branch_ref[...],
(lambda x: x**2, lambda x: -x),
x_ref[...])
y = f(jnp.int32(0), arg + 3.)
self.assertEqual(y, 9.)
y = f(jnp.int32(1), arg + 2.)
self.assertEqual(y, -2.)
def test_cond_threebranch(self):
arg = jnp.float32(0.)
@functools.partial(self.pallas_call,
out_shape=jax.ShapeDtypeStruct(arg.shape, jnp.float32),
grid=1,
debug=False)
def f(branch_ref, x_ref, y_ref):
y_ref[...] = lax.switch(
branch_ref[...],
(lambda x: x**2, lambda x: -x, lambda x: -x**2),
x_ref[...])
y = f(jnp.int32(0), arg + 3.)
self.assertEqual(y, 9.)
y = f(jnp.int32(1), arg + 2.)
self.assertEqual(y, -2.)
y = f(jnp.int32(2), arg + 4.)
self.assertEqual(y, -16.)
@parameterized.parameters(1, 2, 4, 8)
def test_cond_vectors(self, block_size):
arg = jnp.float32([0.] * 8)
@functools.partial(self.pallas_call,
out_shape=jax.ShapeDtypeStruct(arg.shape, jnp.float32),
in_specs=[pl.BlockSpec(lambda _: (), ()),
pl.BlockSpec(lambda i: i, (block_size,))],
out_specs=pl.BlockSpec(lambda i: i, (block_size,)),
grid=jt.cdiv(arg.shape[0], block_size),
debug=False)
def f(branch_ref, x_ref, y_ref):
y_ref[...] = lax.switch(
branch_ref[...],
(lambda x: x**2, lambda x: -x),
x_ref[...])
y = f(jnp.int32(0), arg + 3.)
np.testing.assert_allclose(y, arg + 9.)
y = f(jnp.int32(1), arg + 2.)
np.testing.assert_allclose(y, arg - 2.)
@parameterized.parameters(1, 2, 4, 8)
def test_cond_threebranch_vectors(self, block_size):
arg = jnp.float32([0.] * 8)
@functools.partial(self.pallas_call,
out_shape=jax.ShapeDtypeStruct(arg.shape, jnp.float32),
in_specs=[pl.BlockSpec(lambda _: (), ()),
pl.BlockSpec(lambda i: i, (block_size,))],
out_specs=pl.BlockSpec(lambda i: i, (block_size,)),
grid=jt.cdiv(arg.shape[0], block_size),
debug=False)
def f(branch_ref, x_ref, y_ref):
y_ref[...] = lax.switch(
branch_ref[...],
(lambda x: x**2, lambda x: -x, lambda x: -x**2),
x_ref[...])
y = f(jnp.int32(0), arg + 3.)
np.testing.assert_allclose(y, arg + 9.)
y = f(jnp.int32(1), arg + 2.)
np.testing.assert_allclose(y, arg - 2.)
y = f(jnp.int32(2), arg + 4.)
np.testing.assert_allclose(y, arg - 16.)
@parameterized.parameters(*itertools.product([1, 8], [1, 2, 4]))
def test_cond_threebranch_matrix_out(self, bx, by):
x = jnp.arange(64.)[:, None]
y = jnp.arange(128.)[None, :]
# TODO(sharadmv): Renaming in_specs->in_spec silently breaks.
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct((x.shape[0], y.shape[1]), jnp.float32),
in_specs=[
pl.BlockSpec(lambda _, __: (), ()),
pl.BlockSpec(lambda i, _: (i, 0), (bx, 1)),
pl.BlockSpec(lambda _, j: (0, j), (1, by))],
out_specs=pl.BlockSpec(lambda i, j: (i, j), (bx, by)),
grid=(jt.cdiv(x.shape[0], bx), jt.cdiv(y.shape[1], by)),
debug=False)
def f(branch_ref, x_ref, y_ref, o_ref):
o_ref[...] = lax.switch(
branch_ref[...],
(lambda x, y: (x - y)**2,
lambda x, y: -jnp.abs(x - y),
lambda x, y: jnp.sqrt(jnp.abs(x - y))),
x_ref[...],
y_ref[...])
np.testing.assert_allclose(f(jnp.int32(0), x, y), (x - y)**2)
np.testing.assert_allclose(f(jnp.int32(1), x, y), -jnp.abs(x - y))
np.testing.assert_allclose(f(jnp.int32(2), x, y), jnp.sqrt(jnp.abs(x - y)))
def test_conditional_write(self):
arg = jnp.arange(8, dtype=jnp.float32)
@functools.partial(self.pallas_call,
out_shape=jax.ShapeDtypeStruct(arg.shape, jnp.float32),
debug=False)
def f(branch_ref, x_ref, out_ref):
out_ref[...] = -x_ref[...]
def if_true(z):
out_ref[4] = z
return ()
jax.lax.cond(branch_ref[...], if_true, lambda z: (), x_ref[6])
np.testing.assert_allclose(f(jnp.bool_(True), arg),
jnp.float32([0., -1, -2, -3, 6, -5, -6, -7]))
np.testing.assert_allclose(f(jnp.bool_(False), arg),
-arg)
# We actually expect the assertion failure in linearize, but this also covers another case where an effect was causing an earlier assertion failure.
with self.assertRaises(AssertionError):
# Notably, we should not have a ValueError for mismatched Read<N> effect.
dx = jax.grad(lambda x: jnp.sum(f(jnp.bool_(True), x)**2))(arg)
# np.testing.assert_allclose(
# dx, jnp.float32([0., 2, 4, 6, 0, 10, 12 + 12, 14]))
def test_scan_cond_vm_explicit_ref_arg(self):
program = jnp.int32([0, 1, 2, 3, 2])
params = jnp.arange(len(program) * 3.).reshape(len(program), 3)
x = jnp.arange(7.)
bx = 4
@jax.jit
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct((x.shape[0],), jnp.float32),
in_specs=[
pl.BlockSpec(lambda _: (0,), program.shape), # program
pl.BlockSpec(lambda _: (0, 0), params.shape), # params
pl.BlockSpec(lambda i: (i,), (bx,))], # x
out_specs=pl.BlockSpec(lambda i: (i,), (bx,)),
grid=jt.cdiv(x.shape[0], bx),
debug=False)
def f(program_ref, params_ref, x_ref, out_ref):
x = x_ref[...]
def body_fn(i, args):
state, program_ref, params_ref = args
opcode = program_ref[i]
state = jax.lax.switch(
opcode,
(lambda state, params, i: state + params[i, 0] * 2.**i * x,
lambda state, params, i: state + params[i, 1] * 2.**i * x,
lambda state, params, i: state + params[i, 2] * 2.**i * x,
lambda state, params, i: state + params[i, 1] * 2.**i * x,
),
state, params_ref, i)
return state, program_ref, params_ref
out_ref[...] = jax.lax.fori_loop(
0, len(program), body_fn,
(jnp.zeros(x.shape), program_ref, params_ref))[0]
expected = (x * params[0, 0] +
2 * x * params[1, 1] +
4 * x * params[2, 2] +
8 * x * params[3, 1] +
16 * x * params[4, 2])
np.testing.assert_allclose(f(program, params, x), expected)
with self.assertRaises(AssertionError):
jax.value_and_grad(lambda params, x: f(program, params, x).sum())(
params, x)
def test_scan_cond_vm_closing_over_ref(self):
# ** Difference is the closure over params_ref in the switch branches. **
program = jnp.int32([0, 1, 2, 3, 2, -1])
params = jnp.arange(len(program) * 3.).reshape(len(program), 3)
x = jnp.arange(7.)
bx = 4
@jax.jit
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct((x.shape[0],), jnp.float32),
in_specs=[
pl.BlockSpec(lambda _: (0,), program.shape), # program
pl.BlockSpec(lambda _: (0, 0), params.shape), # params
pl.BlockSpec(lambda i: (i,), (bx,))], # x
out_specs=pl.BlockSpec(lambda i: (i,), (bx,)),
grid=jt.cdiv(x.shape[0], bx),
debug=False)
def f(program_ref, params_ref, x_ref, out_ref):
x = x_ref[...]
def body_fn(i, args):
state, program_ref, params_ref = args
opcode = program_ref[i] + 1
state = jax.lax.switch(
opcode,
(lambda state, *_: state,
lambda state, i: state + params_ref[i, 0] * 2.**i * x,
lambda state, i: state + params_ref[i, 1] * 2.**i * x,
lambda state, i: state + params_ref[i, 2] * 2.**i * x,
lambda state, i: state + params_ref[i, 1] * 2.**i * x,
),
state, i)
return state, program_ref, params_ref
out_ref[...] = jax.lax.fori_loop(
0, len(program), body_fn,
(jnp.zeros(x.shape), program_ref, params_ref))[0]
expected = (x * params[0, 0] +
2 * x * params[1, 1] +
4 * x * params[2, 2] +
8 * x * params[3, 1] +
16 * x * params[4, 2])
np.testing.assert_allclose(f(program, params, x), expected)
with self.assertRaises(AssertionError):
jax.value_and_grad(lambda params, x: f(program, params, x).sum())(
params, x)
def test_fori_loop_simple(self):
@functools.partial(self.pallas_call,
out_shape=jax.ShapeDtypeStruct((), jnp.int32))
def f(x_ref, y_ref):
y_ref[...] = x_ref[...]
def body(i, _):
y_ref[...] += 1
lax.fori_loop(0, 5, body, None)
y = f(0)
self.assertEqual(y, 5)
def test_fori_loop_with_nonzero_lower_bound(self):
@functools.partial(self.pallas_call,
out_shape=jax.ShapeDtypeStruct((), jnp.int32))
def f(x_ref, y_ref):
y_ref[...] = x_ref[...]
def body(i, _):
y_ref[...] += i
lax.fori_loop(2, 5, body, None)
y = f(6)
self.assertEqual(y, 6 + 2 + 3 + 4)
def test_fori_loop_accumulates(self):
@functools.partial(self.pallas_call,
out_shape=jax.ShapeDtypeStruct((), jnp.int32))
def f(x_ref, y_ref):
def body(i, acc):
return acc + 1
acc = lax.fori_loop(0, 5, body, 0)
y_ref[...] = acc
y = f(0)
self.assertEqual(y, 5)
def test_fori_loop_accumulates_with_index(self):
@functools.partial(self.pallas_call,
out_shape=jax.ShapeDtypeStruct((), jnp.int32))
def f(x_ref, y_ref):
def body(i, acc):
return acc + i
acc = lax.fori_loop(0, 5, body, 0)
y_ref[...] = acc
y = f(0)
self.assertEqual(y, 10)
def test_fori_loop_with_writing_to_index(self):
@functools.partial(self.pallas_call,
out_shape=jax.ShapeDtypeStruct((8,), jnp.int32))
def f(y_ref):
def body(i, _):
y_ref[i] = i
lax.fori_loop(0, y_ref.shape[0], body, None)
y = f()
np.testing.assert_allclose(y, jnp.arange(8))
def test_fori_loop_with_dynamic_indices(self):
@functools.partial(self.pallas_call,
out_shape=jax.ShapeDtypeStruct((), jnp.int32))
def f(lb_ref, ub_ref, y_ref):
y_ref[...] = 0
def body(i, a):
y_ref[...] += i
return a
lax.fori_loop(lb_ref[...], ub_ref[...], body, 1)
y = f(2, 5)
np.testing.assert_allclose(y, 2 + 3 + 4)
y = f(1, 8)
np.testing.assert_allclose(y, sum(range(1, 8)))
def test_simple_while(self):
@functools.partial(self.pallas_call,
out_shape=jax.ShapeDtypeStruct((), jnp.int32))
def f(x_ref, y_ref):
x = x_ref[...]
y_ref[...] = 0
def cond(x):
return x < 5
def body(x):
y_ref[...] += 1
return x + 1
lax.while_loop(cond, body, x)
y = f(0)
self.assertEqual(y, 5)
def test_simple_while_with_only_values(self):
@functools.partial(self.pallas_call,
out_shape=jax.ShapeDtypeStruct((), jnp.int32))
def f(y_ref):
def cond(acc):
return acc < 5
def body(acc):
acc += 1
return acc
acc = lax.while_loop(cond, body, 0)
y_ref[...] = acc
y = f()
self.assertEqual(y, 5)
def test_while_with_dynamic_condition(self):
@functools.partial(self.pallas_call,
out_shape=jax.ShapeDtypeStruct((), jnp.int32))
def f(i_ref, y_ref):
y_ref[...] = 0
n_iter = i_ref[...]
def cond(i):
return i < n_iter
def body(i):
y_ref[...] += 1
return i + 1
_ = lax.while_loop(cond, body, 0)
self.assertEqual(f(1), 1)
self.assertEqual(f(4), 4)
self.assertEqual(f(100), 100)
def test_vmap_of_while_with_dynamic_condition(self):
@functools.partial(self.pallas_call,
out_shape=jax.ShapeDtypeStruct((), jnp.int32))
def f(i_ref, y_ref):
y_ref[...] = 0
n_iter = i_ref[...]
def cond(i):
return i < n_iter
def body(i):
y_ref[...] += 1
return i + 1
_ = lax.while_loop(cond, body, 0)
x = jnp.array([1, 4, 100])
np.testing.assert_array_equal(jax.vmap(f)(x), x)
class PallasControlFlowInterpreterTest(PallasControlFlowTest):
INTERPRET = True
AD_TEST_CASES = [
("square", lambda x: x * x),
("square_pow", lambda x: x ** 2),
("square_fn", jnp.square),
("add_one", lambda x: x + 1.),
("exp", jnp.exp),
("reciprocal", jnp.reciprocal),
("one_over_x", lambda x: 1. / x),
("recip_exp_sq", lambda x: jnp.reciprocal(jnp.exp(x) ** 2)),
("exp_neg_sq", lambda x: jnp.exp(-x) ** 2),
("sin", jnp.sin),
("tanh", jnp.tanh),
]
class PallasCallAutodifferentiationTest(PallasTest):
@parameterized.named_parameters(*AD_TEST_CASES)
def test_jvp(self, impl):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((), jnp.float32),
debug=False,
grid=1)
def pallas_impl(x_ref, o_ref):
x = x_ref[()]
o_ref[()] = impl(x)
k1, k2 = random.split(random.PRNGKey(0))
x = random.normal(k1)
t = random.normal(k2)
out_primal, out_tangent = jax.jvp(pallas_impl, (x,), (t,))
out_primal_ref, out_tangent_ref = jax.jvp(impl, (x,), (t,))
np.testing.assert_allclose(out_primal, out_primal_ref, atol=1e-5, rtol=1e-5)
np.testing.assert_allclose(out_tangent, out_tangent_ref, atol=1e-5,
rtol=1e-5)
jtu.check_grads(pallas_impl, (x,), modes=["fwd"], order=2)
@parameterized.named_parameters(*AD_TEST_CASES)
def test_pallas_around_grad(self, impl):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct((), jnp.float32),
name=self.id().split(".")[-1],
debug=True,
grid=1)
def pallas_impl(x_ref, o_ref):
x = x_ref[()]
o_ref[()] = jax.grad(impl)(x)
x = random.normal(random.PRNGKey(0))
out_grad = pallas_impl(x)
out_grad_ref = jax.grad(impl)(x)
np.testing.assert_allclose(out_grad, out_grad_ref, atol=1e-5, rtol=1e-5)
@parameterized.named_parameters(*AD_TEST_CASES)
def test_jvp_slice(self, impl):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((4,), jnp.float32),
debug=False,
grid=1)
def pallas_impl(x_ref, o_ref):
x = x_ref[jnp.arange(2)]
o_ref[jnp.arange(2)] = jnp.zeros(2)
o_ref[2 + jnp.arange(2)] = impl(x)
k1, k2 = random.split(random.PRNGKey(0))
x = random.normal(k1, (8,))
t = random.normal(k2, (8,))
out_primal, out_tangent = jax.jvp(pallas_impl, (x,), (t,))
out_primal_ref, out_tangent_ref = jax.jvp(
lambda x: jnp.concatenate([jnp.zeros(2), impl(x[:2])]), (x,), (t,))
np.testing.assert_allclose(out_primal, out_primal_ref, atol=1e-5, rtol=1e-5)
np.testing.assert_allclose(out_tangent, out_tangent_ref, atol=1e-5,
rtol=1e-5)
jtu.check_grads(pallas_impl, (x,), modes=["fwd"], order=2)
# TODO(sharadmv): enable this when we update Triton
# def test_jvp_matmul(self):
# k1, k2 = random.split(random.PRNGKey(0))
# x = random.normal(k1, (256, 128))
# y = random.normal(k2, (128, 64))
# bm, bn, bk, gm = 64, 128, 32, 8
# mm = functools.partial(matmul, bm=bm, bn=bn, bk=bk, gm=gm,
# interpret=self.INTERPRET)
# jtu.check_grads(mm, (x, y), modes=["fwd"], order=1)
def test_slicing_block_spec(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((4,), jnp.float32),
in_specs=[
pl.BlockSpec(lambda _: (0, 0), (None, 4)),
pl.BlockSpec(lambda _: (1, 0), (None, 4)),
],
out_specs=None,
debug=False, grid=1)
def add_vectors(x_ref, y_ref, o_ref):
o_ref[:] = x_ref[:] + y_ref[:]
xy = jnp.arange(8.).reshape((2, 4))
out = add_vectors(xy, xy)
out_ref = xy[0] + xy[1]
np.testing.assert_allclose(out, out_ref)
class PallasCallVmapTest(PallasTest):
def test_vmap_of_simple_kernel(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((), jnp.int32),
debug=False)
def add_one(x_ref, o_ref):
o_ref[()] = x_ref[()] + 1
out = jax.vmap(add_one)(jnp.arange(8))
out_ref = jnp.arange(1, 9)
np.testing.assert_allclose(out, out_ref)
def test_vmap_of_simple_kernel_with_in_axes_None(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((), jnp.int32),
debug=False)
def add(x_ref, y_ref, o_ref):
o_ref[()] = x_ref[()] + y_ref[()]
out = jax.vmap(add, in_axes=(0, None))(jnp.arange(8), 1)
out_ref = jnp.arange(1, 9)
np.testing.assert_allclose(out, out_ref)
def test_double_vmap_of_simple_kernel(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((), jnp.int32),
debug=False)
def add_one(x_ref, o_ref):
o_ref[()] = x_ref[()] + 1
out = jax.vmap(jax.vmap(add_one))(jnp.arange(8).reshape((4, 2)))
out_ref = jnp.arange(1, 9).reshape((4, 2))
np.testing.assert_allclose(out, out_ref)
def test_quadruple_vmap_of_simple_kernel(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((), jnp.int32),
debug=False)
def add_one(x_ref, o_ref):
o_ref[()] = x_ref[()] + 1
out = jax.vmap(jax.vmap(jax.vmap(jax.vmap(add_one))))(
jnp.arange(15 * 8).reshape((5, 3, 4, 2)))
out_ref = jnp.arange(1, 15 * 8 + 1).reshape((5, 3, 4, 2))
np.testing.assert_allclose(out, out_ref)
def test_quadruple_vmap_of_batched_kernel(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((7,), jnp.int32),
debug=False,
grid=(7,))
def add_one(x_ref, o_ref):
i = pl.program_id(0)
o_ref[i] = x_ref[i] + 1
out = jax.vmap(jax.vmap(jax.vmap(jax.vmap(add_one))))(
jnp.arange(15 * 8 * 7).reshape((5, 3, 4, 2, 7)))
out_ref = jnp.arange(1, 15 * 8 * 7 + 1).reshape((5, 3, 4, 2, 7))
np.testing.assert_allclose(out, out_ref)
def test_vmap_of_slicing_kernel(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((2,), jnp.int32),
debug=False,
grid=(2,))
def add_one(x_ref, o_ref):
i = pl.program_id(0)
o_ref[i] = x_ref[i] + 1
out = jax.vmap(add_one)(jnp.arange(8).reshape((4, 2)))
out_ref = jnp.arange(1, 9).reshape((4, 2))
np.testing.assert_allclose(out, out_ref)
def test_vmap_of_kernel_with_input_output_aliases(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((), jnp.int32),
debug=False,
input_output_aliases={1:0},
grid=())
def add(x_ref, _, o_ref):
o_ref[()] = x_ref[()] + o_ref[()] + 1
out = jax.vmap(add, in_axes=(0, None))(jnp.arange(8), 1)
out_ref = jnp.arange(2, 10)
np.testing.assert_allclose(out, out_ref)
def test_vmap_of_slicing_kernel_different_axes(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((2,), jnp.int32),
debug=False,
grid=(2,))
def add_one(x_ref, o_ref):
i = pl.program_id(0)
o_ref[i] = x_ref[i] + 1
add_one_ref = lambda x: x + 1
x = jnp.arange(8).reshape((2, 4))
out = jax.vmap(add_one, in_axes=1, out_axes=1)(x)
out_ref = jax.vmap(add_one_ref, in_axes=1, out_axes=1)(x)
np.testing.assert_allclose(out, out_ref)
out = jax.vmap(add_one, in_axes=1, out_axes=0)(x)
out_ref = jax.vmap(add_one_ref, in_axes=1, out_axes=0)(x)
np.testing.assert_allclose(out, out_ref)
def test_double_vmap_of_slicing_kernel_different_axes(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((4,), jnp.float32),
debug=False,
grid=(4,))
def sin(x_ref, o_ref):
i = pl.program_id(0)
o_ref[i] = jnp.sin(x_ref[i])
sin_ref = jnp.sin
x = jnp.arange(64.).reshape((8, 4, 2))
out = jax.vmap(jax.vmap(sin, in_axes=1), in_axes=0)(x)
out_ref = jax.vmap(jax.vmap(sin_ref, in_axes=1), in_axes=0)(x)
np.testing.assert_allclose(out, out_ref, atol=1e-3, rtol=1e-3)
class PallasCallInterpreterVmapTest(PallasCallVmapTest):
INTERPRET = True
class PallasOpsTest(PallasTest):
def test_ne(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((8,), jnp.bool_),
grid=1)
def ne(x_ref, y_ref, o_ref):
o_ref[:] = x_ref[...] != y_ref[...]
x = jnp.ones(8)
y = jnp.arange(8)
not_equal = ne(x, y)
np.testing.assert_allclose(not_equal, x != y)
def test_isnan(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((8,), jnp.bool_),
grid=1)
def isnan(x_ref, o_ref):
o_ref[:] = jnp.isnan(x_ref[...])
x = jnp.arange(8.)
x = x.at[3].set(jnp.nan)
np.testing.assert_allclose(isnan(x), jnp.isnan(x))
class PallasOpsInterpretTest(PallasOpsTest):
INTERPRET = True
class PallasPrimitivesTest(parameterized.TestCase):
@parameterized.parameters(*[
(lambda: (pl.dslice(0, 4), slice(None), slice(None)), "<- a[:,:,:]"),
(lambda: (pl.dslice(0, 3), slice(None), slice(None)), "<- a[:3,:,:]"),
(lambda: (pl.dslice(1, 3), slice(None), pl.dslice(0, 4)), "<- a[1:4,:,:4]"),
(lambda: (jnp.arange(5), slice(None), pl.dslice(0, 4)), "<- a[b,:,:4]"),
(lambda: (jnp.arange(5), jnp.arange(3), jnp.arange(4)), "<- a[e,f,g]"),
])
def test_load_pretty_print(self, expr, expected):
def body(x_ref):
x = pl.load(x_ref, expr())
return [x]
jaxpr, _ , _ = pe.trace_to_jaxpr_dynamic(
lu.wrap_init(body), [state.shaped_array_ref((4, 3, 2), jnp.int32)])
self.assertIn(expected, jaxpr.pretty_print(use_color=False))
@parameterized.parameters(*[
(lambda: (pl.dslice(0, 4), slice(None), slice(None)), "a[:,:,:] <-"),
(lambda: (pl.dslice(0, 3), slice(None), slice(None)), "a[:3,:,:] <-"),
(lambda: (pl.dslice(1, 3), slice(None), pl.dslice(0, 4)), "a[1:4,:,:4] <-"),
(lambda: (jnp.arange(5), slice(None), pl.dslice(0, 4)), "a[b,:,:4] <-"),
(lambda: (jnp.arange(5), jnp.arange(3), jnp.arange(4)), "a[l,m,n] <-"),
])
def test_store_pretty_print(self, expr, expected):
def body(x_ref):
pl.store(x_ref, expr(), pl.load(x_ref, expr()))
return []
jaxpr, _ , _ = pe.trace_to_jaxpr_dynamic(
lu.wrap_init(body), [state.shaped_array_ref((4, 3, 2), jnp.int32)])
self.assertIn(expected, jaxpr.pretty_print(use_color=False))
@parameterized.parameters(*[
(lambda: (pl.dslice(0, 4), slice(None), slice(None)),
"c:i32[4,3,2], a[:,:,:] <-"),
(lambda: (pl.dslice(0, 3), slice(None), slice(None)),
"c:i32[3,3,2], a[:3,:,:] <-"),
(lambda: (pl.dslice(1, 3), slice(None), pl.dslice(0, 4)),
"c:i32[3,3,4], a[1:4,:,:4] <-"),
(lambda: (jnp.arange(5), slice(None), pl.dslice(0, 4)),
"e:i32[5,3,4], a[b,:,:4] <-"),
(lambda: (jnp.arange(5), jnp.arange(3), jnp.arange(4)),
"o:i32[5,3,4], a[l,m,n] <-"),
])
def test_swap_pretty_print(self, expr, expected):
def body(x_ref):
x = pl.swap(x_ref, expr(), pl.load(x_ref, expr()))
return [x]
jaxpr, _ , _ = pe.trace_to_jaxpr_dynamic(
lu.wrap_init(body), [state.shaped_array_ref((4, 3, 2), jnp.int32)])
self.assertIn(expected, jaxpr.pretty_print(use_color=False))
class FusedAttentionTest(parameterized.TestCase):
@parameterized.named_parameters(*[
(f"{batch_size=}_{seq_len=}_{num_heads=}_{head_dim=}_{causal=}_{use_fwd=}",
batch_size, seq_len, num_heads, head_dim, causal, use_fwd)
for batch_size, seq_len, num_heads, head_dim, causal, use_fwd in [
(1, 384, 1, 64, False, False),
(2, 384, 2, 64, False, False),
(1, 384, 1, 64, True, False),
(2, 384, 2, 64, True, False),
(1, 384, 8, 64, True, True),
(2, 384, 8, 64, True, True),
]
])
def test_fused_attention_fwd(self, batch_size, seq_len, num_heads, head_dim,
causal, use_fwd):
if jt.get_compute_capability(0) < 80:
raise unittest.SkipTest(
"Fused attention only works on GPUs with capability >= sm80")
k1, k2, k3 = random.split(random.PRNGKey(0), 3)
q = random.normal(k1, (batch_size, seq_len, num_heads, head_dim), dtype=jnp.float16)
k = random.normal(k2, (batch_size, seq_len, num_heads, head_dim), dtype=jnp.float16)
v = random.normal(k3, (batch_size, seq_len, num_heads, head_dim), dtype=jnp.float16)
if use_fwd:
@jax.jit
def impl(q, k, v):
v, _ = jax.vjp(functools.partial(attention.mha, causal=causal), q, k, v)
return v
else:
impl = functools.partial(attention.mha, causal=causal)
o = impl(q, k, v)
o_ref = attention.mha_reference(q, k, v, causal=causal)
np.testing.assert_allclose(o, o_ref, atol=0.05)
@parameterized.named_parameters(*[
(f"{batch_size=}_{seq_len=}_{num_heads=}_{head_dim=}_{causal=}",
batch_size, seq_len, num_heads, head_dim, causal)
for batch_size, seq_len, num_heads, head_dim, causal in [
(1, 384, 1, 32, False),
(2, 384, 2, 32, False),
# TODO(b/283035396): (1, 384, 1, 32, True),
# TODO(b/283035396): (2, 384, 2, 32, True),
]
])
def test_fused_attention_bwd(self, batch_size, seq_len, num_heads, head_dim,
causal):
if jt.get_compute_capability(0) < 80:
raise unittest.SkipTest(
"Fused attention only works on GPUs with capability >= sm80")
k1, k2, k3 = random.split(random.PRNGKey(0), 3)
q = random.normal(k1, (batch_size, seq_len, num_heads, head_dim),
dtype=jnp.float16)
k = random.normal(k2, (batch_size, seq_len, num_heads, head_dim),
dtype=jnp.float16)
v = random.normal(k3, (batch_size, seq_len, num_heads, head_dim),
dtype=jnp.float16)
def f(q, k, v):
return attention.mha(q, k, v, causal=causal).sum()
def f_ref(q, k, v):
return attention.mha_reference(q, k, v, causal=causal).sum()
dq, dk, dv = jax.grad(f, argnums=(0, 1, 2))(q, k, v)
dq_ref, dk_ref, dv_ref = jax.grad(f_ref, argnums=(0, 1, 2))(q, k, v)
np.testing.assert_allclose(dq, dq_ref, atol=0.1)
np.testing.assert_allclose(dk, dk_ref, atol=0.08)
np.testing.assert_allclose(dv, dv_ref, atol=0.05)
class FusedLayerNormTest(parameterized.TestCase):
@parameterized.parameters(*[
(1, 384, 192),
(2, 384, 192),
])
def test_fused_layernorm_fwd(self, batch_size, seq_len, embed_dim):
if jt.get_compute_capability(0) < 70:
raise unittest.SkipTest(
"Fused layernorm only works on GPUs with capability >= sm70")
k1, k2, k3 = random.split(random.PRNGKey(0), 3)
x = random.normal(k1, (batch_size, seq_len, embed_dim), dtype=jnp.float32)
w = jax.random.normal(k2, (embed_dim,), dtype=jnp.float32)
b = jax.random.normal(k3, (embed_dim,), dtype=jnp.float32)
o = layer_norm.layer_norm(x, w, b)
o_ref = layer_norm.layer_norm_reference(x, w, b)
np.testing.assert_allclose(o, o_ref, atol=1e-5)
@parameterized.parameters(*[
(1, 384, 192),
(2, 384, 192),
])
def test_fused_layernorm_bwd(self, batch_size, seq_len, embed_dim):
if jt.get_compute_capability(0) < 70:
raise unittest.SkipTest(
"Fused layernorm only works on GPUs with capability >= sm70")
k1, k2, k3 = random.split(random.PRNGKey(0), 3)
x = random.normal(k1, (batch_size, seq_len, embed_dim), dtype=jnp.float32)
w = jax.random.normal(k2, (embed_dim,), dtype=jnp.float32)
b = jax.random.normal(k3, (embed_dim,), dtype=jnp.float32)
def f(x, w, b):
return layer_norm.layer_norm(x, w, b).sum()
def f_ref(x, w, b):
return layer_norm.layer_norm_reference(x, w, b).sum()
dx, dw, db = jax.grad(f, argnums=(0, 1, 2))(x, w, b)
dx_ref, dw_ref, db_ref = jax.grad(f_ref, argnums=(0, 1, 2))(x, w, b)
np.testing.assert_allclose(dx, dx_ref, rtol=1e-6, atol=1e-6)
np.testing.assert_allclose(dw, dw_ref, rtol=1e-2, atol=1e-2)
np.testing.assert_allclose(db, db_ref, rtol=1e-2, atol=1e-2)
class RmsNormTest(parameterized.TestCase):
@parameterized.parameters(*[
(1, 384, 192),
(2, 384, 192),
])
def test_rms_fwd(self, batch_size, seq_len, embed_dim):
if jt.get_compute_capability(0) < 70:
raise unittest.SkipTest(
"Rms norm only works on GPUs with capability >= sm70")
k1, k2, k3 = random.split(random.PRNGKey(0), 3)
x = random.normal(k1, (batch_size, seq_len, embed_dim), dtype=jnp.float32)
w = jax.random.normal(k2, (embed_dim,), dtype=jnp.float32)
b = jax.random.normal(k3, (embed_dim,), dtype=jnp.float32)
o = rms_norm.rms_norm(x, w, b)
o_ref = rms_norm.rms_norm_reference(x, w, b)
np.testing.assert_allclose(o, o_ref, atol=1e-5)
@parameterized.parameters(*[
(1, 384, 192),
(2, 384, 192),
])
def test_rms_norm_bwd(self, batch_size, seq_len, embed_dim):
if jt.get_compute_capability(0) < 70:
raise unittest.SkipTest(
"Rms norm only works on GPUs with capability >= sm70")
k1, k2, k3 = random.split(random.PRNGKey(0), 3)
x = random.normal(k1, (batch_size, seq_len, embed_dim), dtype=jnp.float32)
w = jax.random.normal(k2, (embed_dim,), dtype=jnp.float32)
b = jax.random.normal(k3, (embed_dim,), dtype=jnp.float32)
def f(x, w, b):
return rms_norm.rms_norm(x, w, b).sum()
def f_ref(x, w, b):
return rms_norm.rms_norm_reference(x, w, b).sum()
dx, dw, db = jax.grad(f, argnums=(0, 1, 2))(x, w, b)
dx_ref, dw_ref, db_ref = jax.grad(f_ref, argnums=(0, 1, 2))(x, w, b)
np.testing.assert_allclose(dx, dx_ref, rtol=1e-6, atol=1e-6)
np.testing.assert_allclose(dw, dw_ref, rtol=1e-2, atol=1e-2)
np.testing.assert_allclose(db, db_ref, rtol=1e-2, atol=1e-2)
class SoftmaxTest(parameterized.TestCase):
@parameterized.parameters(
(shape, dtype)
for shape in [(1024, 125), (4, 1024, 125)]
for dtype in (jnp.bfloat16, jnp.float16, jnp.float32)
)
def test_softmax(self, shape, dtype):
# TODO(bchetioui): add Triton bug reference when filed
if dtype == jnp.bfloat16:
raise absltest.SkipTest("Disabled due to Triton lowering bug")
x = jax.random.normal(random.PRNGKey(0), shape, dtype=dtype)
atol, rtol = {
jnp.bfloat16: (1e-2, 1e-4),
jnp.float16: (1e-2, 1e-4),
jnp.float32: (1e-7, 1e-6),
}[dtype]
np.testing.assert_allclose(
softmax.softmax(x, axis=-1),
jax.nn.softmax(x, axis=-1),
atol=atol,
rtol=rtol,
)
if __name__ == "__main__":
absltest.main()
| jax-triton-main | tests/pallas_test.py |
# Copyright 2023 The jax_triton Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for calling Triton kernels from JAX."""
import functools
import os
import types
from typing import Any, Callable, Dict, Optional, Protocol, Sequence, Tuple, Union
import zlib
from absl import logging
import jax
import jaxlib
from jax import tree_util
from jax._src import core
from jax._src import state
from jax._src import util
from jax._src.lib.mlir import ir
import jax.dlpack
from jax.interpreters import mlir
from jax.interpreters import xla
from jax.lib import xla_client as xc
import jax.numpy as jnp
from jax_triton import utils
import numpy as np
CAN_USE_TRITON = False
try:
import triton
from triton.compiler import code_generator as code_gen
from triton.compiler import compiler as tc
import triton.language as tl
from triton.runtime import autotuner
import triton._C.libtriton.triton as _triton
CAN_USE_TRITON = True
except ModuleNotFoundError:
pass
try:
from jax._src.lib import gpu_triton as triton_kernel_call_lib
except ImportError:
raise ValueError(
"Cannot import jaxlib triton library. You may need a newer version of jaxlib. Try installing a nightly wheel from: https://storage.googleapis.com/jax-releases/jaxlib_nightly_cuda_releases.html or https://storage.googleapis.com/jax-releases/jaxlib_nightly_cuda12_releases.html"
)
os.environ["TRITON_CACHE_DIR"] = ""
map, unsafe_map = util.safe_map, map
zip, unsafe_zip = util.safe_zip, zip
_JAX_TO_TRITON_TYPE_MAP = {
jnp.dtype("bfloat16"): "bf16",
jnp.dtype("float64"): "fp64",
jnp.dtype("float32"): "fp32",
jnp.dtype("float16"): "fp16",
# Triton has 'fp8' as well which Jax doesn't support yet.
jnp.dtype("int64"): "i64",
jnp.dtype("int32"): "i32",
jnp.dtype("int16"): "i16",
jnp.dtype("int8"): "i8",
jnp.dtype("uint64"): "u64",
jnp.dtype("uint32"): "u32",
jnp.dtype("uint16"): "u16",
jnp.dtype("uint8"): "u8",
# Triton defines a 'B' type, which is an alias for both i1 and bool.
jnp.dtype("bool"): "B",
}
def get_triton_type(obj: Any) -> str:
if isinstance(obj, (jax.core.ShapedArray, state.AbstractRef)):
return f"*{_JAX_TO_TRITON_TYPE_MAP[obj.dtype]}"
if isinstance(obj, tl.constexpr):
obj = obj.value
if isinstance(obj, int):
if -2**31 <= obj < 2**31:
return "i32"
elif 2**31 <= obj < 2**32:
return "u32"
elif -2**63 <= obj < 2**63:
return "i64"
elif 2**63 <= obj < 2**64:
return "u64"
else:
raise ValueError(f"integer overflow representing {obj}")
if isinstance(obj, float):
return "f"
if isinstance(obj, bool):
return "B"
if isinstance(obj, str):
return "str"
raise NotImplementedError(
f"could not compute type name for {obj}: {type(obj)}"
)
Grid = Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int]]
GridOrLambda = Union[Grid, Callable[[Dict[str, Any]], Grid]]
triton_kernel_call_p = jax.core.Primitive("triton_kernel_call")
triton_kernel_call_p.multiple_results = True
triton_kernel_call_p.def_impl(
functools.partial(xla.apply_primitive, triton_kernel_call_p))
@triton_kernel_call_p.def_abstract_eval
def triton_kernel_call_abstract_eval(*_, out_shapes, **__):
return [
core.ShapedArray(out_shape.shape, out_shape.dtype)
for out_shape in out_shapes
]
def aval_size_bytes(aval):
return np.dtype(aval.dtype).itemsize * aval.size
def ptx_get_kernel_name(module) -> str:
return tc.get_kernel_name(module, pattern='// .globl')
def compile_ttir_to_ptx_inplace(
ttir,
device: int = 0,
num_warps: int = 4,
num_stages: Optional[int] = None,
dump: bool = False,
) -> Tuple[str, str, int, int]:
compute_capability = triton_kernel_call_lib.get_compute_capability(device)
if num_stages is None:
num_stages = 3 if compute_capability >= 75 else 2
if dump:
print(ttir)
try:
ttir = tc.optimize_ttir(ttir, compute_capability)
ttgir = tc.ttir_to_ttgir(ttir, num_warps)
ttgir = tc.optimize_ttgir(ttgir, num_stages, compute_capability)
except RuntimeError as e:
ttir.dump()
raise ValueError("TTIR->TTGIR pass failed!") from e
if dump:
print(ttgir)
extern_libs = {}
try:
llir = tc.ttgir_to_llir(ttgir, extern_libs, compute_capability)
except RuntimeError as e:
ttgir.dump()
raise ValueError("TTGIR->LLIR pass failed!") from e
shared_mem_bytes = _triton.get_shared_memory_size(ttgir)
if dump:
print(llir)
ptx = tc.llir_to_ptx(llir, compute_capability)
if dump:
print(ptx)
name = ptx_get_kernel_name(ptx)
return ptx, name, shared_mem_bytes, compute_capability
_COMPILED_KERNEL_CACHE = {} # TODO(cjfj): Convert to LRU cache?
def get_or_create_triton_kernel(
fn,
arg_dtypes,
scalar_args,
*,
num_warps,
num_stages,
metaparams,
dump: bool,
) -> Tuple[triton_kernel_call_lib.TritonKernel, Any]:
signature = dict(enumerate(arg_dtypes))
# TODO(sharadmv,zhangqiaorjc): handle differently aligned pointers
# We assume that all arrays are aligned to 16 bytes, and Triton may use this
# assumption, unless array args are include in the `do_not_specialize` list.
# We replace array arguments with mock Torch tensors, to allow us to use
# `JITFunction._get_config` to get the specialization.
mock_torch_tensor = types.SimpleNamespace(data_ptr=lambda: 16)
args_for_specialization = [mock_torch_tensor] * len(arg_dtypes)
for i, _, v in scalar_args:
args_for_specialization[i] = v
specialization = fn._get_config(*args_for_specialization) # pylint: disable=protected-access
constants = {fn.arg_names.index(k): v for k, v in metaparams.items()}
constants.update({i: None for i, _, v in scalar_args if v is None})
constants.update({i: 1 for i in specialization.equal_to_1})
# Cache key should contain any parameter that can affect the compiler output.
cache_key = (
fn,
tuple(signature.items()),
specialization,
tuple(constants.items()),
num_warps,
num_stages,
)
kernel = _COMPILED_KERNEL_CACHE.get(cache_key)
if kernel is None:
# TODO(sharadmv): handle multiple devices, right now we assume device 0
# which is fine when we have multiple of the same GPU but this won't work in
# general.
device = 0
arch = triton_kernel_call_lib.get_compute_capability(device)
module = code_gen.ast_to_ttir(
fn, signature, specialization, constants, debug=dump, arch=arch
)
ttir = str(module) # `module`` is compiled in-place, so copy TTIR here.
ptx, kernel_name, shared_mem_bytes, compute_capability = (
compile_ttir_to_ptx_inplace(
module,
device=device,
num_warps=num_warps,
num_stages=num_stages,
dump=dump,
)
)
kernel = triton_kernel_call_lib.TritonKernel(
kernel_name, num_warps, shared_mem_bytes, ptx, ttir, compute_capability
)
_COMPILED_KERNEL_CACHE[cache_key] = kernel
return kernel, specialization
def triton_kernel_call_lowering(
ctx,
*array_args,
fn,
scalar_args,
call_name,
out_shapes,
grid,
num_warps,
num_stages,
input_output_aliases,
zeroed_outputs,
debug,
serialized_metadata,
**metaparams,
):
if jaxlib.version.__version_info__ < (0, 3, 22) and input_output_aliases:
raise NotImplementedError(
"`input_output_aliases` only supported on `jaxlib>=0.3.22")
args = list(ctx.avals_in)
arg_dtypes = list(map(get_triton_type, ctx.avals_in))
for idx, dtype, v in scalar_args:
args.insert(idx, v)
arg_dtypes.insert(idx, dtype)
args.extend(ctx.avals_out)
arg_dtypes.extend(map(get_triton_type, ctx.avals_out))
named_args = dict(unsafe_zip(fn.arg_names, args))
if isinstance(fn, autotuner.Autotuner):
if any(idx not in fn.key_idx for idx, _, _ in scalar_args):
logging.warning(
"Auto-tuning key does not include all scalar arguments. "
"We may perform redundant auto-tuning."
)
# If any metaparams have been specified explicitly, we prune any configs
# that conflict. Note that this is more permissive than Triton's autotuner
# implementation, which will throw an error if any keys match.
# TODO(cjfj): Prune explicit `num_warps` / `num_stages`.
prev_early_config_prune_fn = fn.early_config_prune
def prune_configs(configs, named_args):
pruned_configs = []
for config in configs:
if config.pre_hook is not None:
raise NotImplementedError("`pre_hook` is not supported")
if all(config.kwargs.get(k, v) == v for k, v in metaparams.items()):
pruned_configs.append(config)
if prev_early_config_prune_fn is not None:
pruned_configs = prev_early_config_prune_fn(pruned_configs, named_args)
return pruned_configs
fn.early_config_prune = prune_configs
fn.nargs = named_args
configs = fn.prune_configs(metaparams)
fn = fn.fn
else:
configs = [triton.Config({}, num_warps=num_warps, num_stages=num_stages)]
if isinstance(fn, autotuner.Heuristics):
updated_configs = []
for config in configs:
kwargs = config.kwargs.copy()
for name, heuristic in fn.values.items():
kwargs[name] = heuristic({**named_args, **metaparams, **kwargs})
updated_configs.append(
triton.Config(
kwargs, num_warps=config.num_warps, num_stages=config.num_stages
)
)
configs = updated_configs
fn = fn.fn
if not isinstance(fn, triton.JITFunction):
raise ValueError(
"`kernel` must be a Triton `JITFunction`, `Heuristics` or `Autotuner`."
)
outputs_offset = len(ctx.avals_in) + len(scalar_args)
config_params = []
for config in configs:
config_metaparams = {**metaparams, **config.kwargs}
config_grid = utils.normalize_grid(grid, config_metaparams)
config_zeroed_outputs = zeroed_outputs
if callable(zeroed_outputs):
config_zeroed_outputs = config_zeroed_outputs(config_metaparams)
zeroed_params_with_sizes = {
i + outputs_offset: aval_size_bytes(ctx.avals_out[i])
for i in sorted(config_zeroed_outputs)
}
config_params.append(
dict(
metaparams=tuple(sorted(config_metaparams.items())),
num_warps=config.num_warps,
num_stages=config.num_stages,
grid=config_grid,
zeroed_params_with_sizes=tuple(zeroed_params_with_sizes.items()),
)
)
kernel_calls = []
for params in config_params:
kernel, specialization = get_or_create_triton_kernel(
fn,
arg_dtypes,
scalar_args,
num_warps=params["num_warps"],
num_stages=params["num_stages"],
metaparams=dict(params["metaparams"]),
dump=debug,
)
kernel_params = []
zeroed_params_with_sizes = dict(params["zeroed_params_with_sizes"])
for i, (arg, dtype) in enumerate(zip(args, arg_dtypes)):
if isinstance(arg, core.ShapedArray):
kernel_params.append(
triton_kernel_call_lib.create_array_parameter(
zeroed_params_with_sizes.get(i, 0),
16 if (i in specialization.divisible_by_16) else 0,
)
)
elif i not in specialization.equal_to_1:
kernel_params.append(
triton_kernel_call_lib.create_scalar_parameter(arg, dtype)
)
kernel_calls.append(
triton_kernel_call_lib.TritonKernelCall(
kernel,
params["grid"][0],
params["grid"][1],
params["grid"][2],
kernel_params,
)
)
if len(kernel_calls) > 1:
named_scalar_args = {fn.arg_names[i]: v for i, _, v in scalar_args}
input_output_aliases_with_sizes = tuple(
(input_idx, output_idx, aval_size_bytes(ctx.avals_in[input_idx]))
for input_idx, output_idx in input_output_aliases
)
kernel_call = triton_kernel_call_lib.TritonAutotunedKernelCall(
f"{fn.fn.__name__} ({call_name=}) {named_scalar_args}",
[(call, str(config)) for call, config in zip(kernel_calls, configs)],
input_output_aliases_with_sizes,
)
else:
kernel_call = kernel_calls[0]
out_types = [
ir.RankedTensorType.get(shape.shape, mlir.dtype_to_ir_type(shape.dtype))
for shape in out_shapes
]
return jaxlib.hlo_helpers.custom_call(
call_target_name=call_name,
out_types=out_types,
operands=array_args,
backend_config=zlib.compress(kernel_call.to_proto(serialized_metadata)),
operand_layouts=utils.avals_to_layouts(ctx.avals_in),
result_layouts=utils.avals_to_layouts(ctx.avals_out),
operand_output_aliases=dict(input_output_aliases),
)
mlir.register_lowering(triton_kernel_call_p, triton_kernel_call_lowering)
class ShapeDtype(Protocol):
@property
def shape(self) -> Tuple[int, ...]:
...
@property
def dtype(self) -> np.dtype:
...
def triton_call(
*args: Union[jax.Array, bool, int, float],
kernel: triton.JITFunction,
out_shape: Union[ShapeDtype, Sequence[ShapeDtype]],
grid: GridOrLambda,
call_name: str = "triton_kernel_call",
num_warps: int = 4,
num_stages: int = 2,
input_output_aliases: Optional[Dict[int, int]] = None,
zeroed_outputs: Union[
Sequence[int], Callable[[Dict[str, Any]], Sequence[int]]
] = (),
debug: bool = False,
serialized_metadata: bytes = b"",
**metaparams: Any,
) -> Any:
"""Calls a Triton kernel with `jax.Array` arguments.
Example usage:
First we define a simple kernel that adds two vectors.
```python
import triton
import triton.language as tl
@triton.jit
def add_kernel(
x_ptr,
y_ptr,
output_ptr,
block_size: tl.constexpr,
):
pid = tl.program_id(axis=0)
block_start = pid * block_size
offsets = block_start + tl.arange(0, block_size)
mask = offsets < 8
x = tl.load(x_ptr + offsets, mask=mask)
y = tl.load(y_ptr + offsets, mask=mask)
output = x + y
tl.store(output_ptr + offsets, output, mask=mask)
```
Then we use `triton_call` to call it from JAX.
```python
import jax
import jax.numpy as jnp
import jax_triton as jt
def add(x: jnp.ndarray, y: jnp.ndarray) -> jnp.ndarray:
out_shape = jax.ShapeDtypeStruct(shape=x.shape, dtype=x.dtype)
block_size = 8
return jt.triton_call(
x,
y,
kernel=add_kernel,
out_shape=out_shape,
grid=(x.size // block_size,),
block_size=block_size)
x_val = jnp.arange(8)
y_val = jnp.arange(8, 16)
print(add(x_val, y_val))
print(jax.jit(add)(x_val, y_val))
```
Args:
*args: Inputs for the Triton kernel.
kernel: A Triton kernel (e.g. a function decorated with `triton.jit`). All
static values should be annotated with `triton.language.constexpr`.
out_shape: A `jax.ShapeDtypeStruct` (or something that has `.shape` and
`.dtype` attributes) or a sequence thereof that specify the output(s) of
the kernel. Pointers for each of the `jax.ShapeDtypeStruct`s in
`out_shape` will be passed into `kernel` following the input parameters.
grid: An integer, tuple of up to 3 integers, or a function that returns a
tuple of up to 3 integers. When `grid` is an integer, `kernel` is
invocated in `grid`-many parallel executions. When `grid` is a sequence of
integers, `kernel` is launched in a `prod(grid)`-many parallel execution.
When `grid` is a function, it is passed `**metaparams` and should return a
tuple of up to 3 integers.
input_output_aliases: A dictionary mapping input argument indices to output
indices. Providing a mapping will alias the corresponding buffers.
zeroed_outputs: A sequence of indices, or a function returning a sequence of
indices, for outputs that should be zeroed before the kernel is launched.
num_warps: The number of warps used to execute the Triton kernel.
num_stages: The number of stages emitted by the Triton compiler.
debug: Prints out intermediate IRs if True for debugging purposes.
serialized_metadata: Arbitrary metadata that will be added into the
serialized kernel call.
**metaparams: Additional keyword arguments that will be provided to a `grid`
(if it is a function) and to the Triton kernel as `constexpr` arguments.
Returns:
Outputs from the Triton kernel.
"""
if not CAN_USE_TRITON:
raise ValueError(
"`triton_call` is only available when `triton` is installed."
)
xc.register_custom_call_target(
call_name, triton_kernel_call_lib.get_custom_call(), platform="CUDA"
)
out_shape = tree_util.tree_map(
lambda a: jax.ShapeDtypeStruct(a.shape, a.dtype), out_shape)
flat_args, _ = tree_util.tree_flatten(args)
# TODO(sharadmv): check in_tree is flat (no Pytrees allowed in triton_call)
flat_out_shapes, out_tree = tree_util.tree_flatten(out_shape)
array_args = []
scalar_args = []
for i, arg in enumerate(flat_args):
if isinstance(arg, (bool, int, float)):
scalar_args.append((i, get_triton_type(arg), arg))
else:
array_args.append(arg)
if input_output_aliases is None:
input_output_aliases = {}
out_flat = triton_kernel_call_p.bind(
*array_args,
fn=kernel,
scalar_args=tuple(scalar_args),
call_name=call_name,
out_shapes=tuple(flat_out_shapes),
grid=grid,
num_warps=num_warps,
num_stages=num_stages,
input_output_aliases=tuple(input_output_aliases.items()),
zeroed_outputs=zeroed_outputs,
debug=debug,
serialized_metadata=serialized_metadata,
**metaparams,
)
return tree_util.tree_unflatten(out_tree, out_flat)
| jax-triton-main | jax_triton/triton_lib.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.