python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
---|---|---|
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tensorflow op that clips gradient for backwards pass."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
@tf.custom_gradient
def _clip_gradient(x, clip_value_min, clip_value_max):
def grad(dy):
return tf.clip_by_value(dy, clip_value_min, clip_value_max), None, None
return x, grad
def clip_gradient(net, clip_value_min, clip_value_max, name=None):
"""Clips respective gradients of a given tensor.
Acts as identity for the forward pass, but clips gradient tensor element-wise
by value during the backward pass. Any gradient values less than
`clip_value_min` or greater than `clip_values_max` are set to the respective
limit values.
Args:
net: A `tf.Tensor`.
clip_value_min: A 0-D Tensor or scalar. The minimum value to clip by.
clip_value_max: A 0-D Tensor or scalar. The maximum value to clip by.
name: A name for the operation (optional, default 'clip_gradient').
Returns:
A `tf.Tensor` with the same type as the input tensor.
Raises:
ValueError: If `net` dtype is non-float.
"""
if not net.dtype.is_floating:
raise ValueError("clip_gradient does not support non-float `net` inputs.")
with tf.name_scope(name, "clip_gradient", values=[net]):
dtype = net.dtype.base_dtype # Convert ref dtypes to regular dtypes.
min_tensor = tf.convert_to_tensor(clip_value_min, dtype=dtype)
max_tensor = tf.convert_to_tensor(clip_value_max, dtype=dtype)
output = _clip_gradient(net, min_tensor, max_tensor)
return output
| sonnet-1 | sonnet/python/modules/clip_gradient.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Basic Modules for TensorFlow snt.
Modules defining the simplest building blocks for Neural Networks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numbers
# Dependency imports
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from sonnet.python.modules import base
from sonnet.python.modules import util
import tensorflow.compat.v1 as tf
from tensorflow.contrib import framework as contrib_framework
nest = contrib_framework.nest
def merge_leading_dims(array_or_tensor, n_dims=2):
"""Merge the first dimensions of a tensor.
Args:
array_or_tensor: Tensor to have its first dimensions merged. Can also
be an array or numerical value, which will be converted to a tensor
for batch application, if needed.
n_dims: Number of dimensions to merge.
Returns:
Either the input value converted to a Tensor, with the requested dimensions
merged, or the unmodified input value if the input has less than `n_dims`
dimensions.
Raises:
ValueError: If the rank of `array_or_tensor` is not well-defined.
"""
tensor = tf.convert_to_tensor(array_or_tensor)
tensor_shape_static = tensor.get_shape()
# Check if the rank of the input tensor is well-defined.
if tensor_shape_static.dims is None:
raise ValueError("Can't merge leading dimensions of tensor of unknown "
"rank.")
tensor_shape_list = tensor_shape_static.as_list()
# We can only merge the n_dims leading dimensions if the rank of the given
# tensor is sufficiently large.
if n_dims > len(tensor_shape_list):
return array_or_tensor
if tensor_shape_static.is_fully_defined():
new_shape = (
[np.prod(tensor_shape_list[:n_dims])] + tensor_shape_list[n_dims:])
return tf.reshape(tensor, new_shape)
# Shape can't be inferred statically.
tensor_shape = tf.shape(tensor)
new_first_dim = tf.reduce_prod(tensor_shape[:n_dims], keepdims=True)
other_dims = tensor_shape[n_dims:]
new_size = tf.concat([new_first_dim, other_dims], 0)
result = tf.reshape(tensor, new_size)
if all(value is not None for value in tensor_shape_list[:n_dims]):
merged_leading_size = np.prod(tensor_shape_list[:n_dims])
else:
merged_leading_size = None
# We need to set the result size of this, as otherwise we won't be able to
# pass to e.g. a Linear. Here we need to know at least the rank of the tensor.
result.set_shape([merged_leading_size] + tensor_shape_list[n_dims:])
return result
def split_leading_dim(tensor, inputs, n_dims=2):
"""Split the first dimension of a tensor.
Args:
tensor: Tensor to have its first dimension split.
inputs: Original reference input to look the dimensions of.
n_dims: Number of dimensions to split.
Returns:
The input tensor, with its first dimension split.
"""
input_shape_static = inputs.get_shape()
input_shape_list = input_shape_static.as_list()
tensor_shape_static = tensor.get_shape()
tensor_shape_list = tensor_shape_static.as_list()
if (input_shape_static.is_fully_defined()
and tensor_shape_static.is_fully_defined()):
new_shape = input_shape_list[:n_dims] + tensor_shape_list[1:]
return tf.reshape(tensor, new_shape)
# Shape can't be inferred statically.
dims_after_first = tf.shape(tensor)[1:]
split_sizes = tf.shape(inputs)[:n_dims]
known_split_sizes = input_shape_list[:n_dims]
known_dims_after_first = tensor_shape_list[1:]
output_size = tf.concat([split_sizes, dims_after_first], 0)
result = tf.reshape(tensor, output_size)
result.set_shape(known_split_sizes + known_dims_after_first)
return result
def create_linear_initializer(input_size, dtype=tf.float32):
"""Returns a default initializer for weights of a linear module."""
stddev = 1 / math.sqrt(input_size)
return tf.truncated_normal_initializer(stddev=stddev, dtype=dtype)
def create_bias_initializer(unused_bias_shape, dtype=tf.float32):
"""Returns a default initializer for the biases of a linear/AddBias module."""
return tf.zeros_initializer(dtype=dtype)
class Linear(base.AbstractModule, base.Transposable):
"""Linear module, optionally including bias."""
def __init__(self,
output_size,
use_bias=True,
initializers=None,
partitioners=None,
regularizers=None,
custom_getter=None,
allow_many_batch_dims=False,
name="linear"):
"""Constructs a linear module.
Linear map from `[batch_size, input_size]` -> `[batch_size, output_size]`.
One can also use this for inputs with multiple batch dimensions, by setting
`allow_many_batch_dims=True`. Then this maps
`[batch_dim_1, ..., batch_dim_{n-1}, input_size]` ->
`[batch_dim_1, ..., batch_dim_{n-1}, output_size]`.
In allow_many_batch_dims mode, this operation is equivalent to flattening
the n-1 leading dimensions of the input and applying a 2-D linear. However
it avoids the flatten and reshape operation.
Args:
output_size: Output dimensionality. `output_size` can be either an integer
or a callable. In the latter case, since the function invocation is
deferred to graph construction time, the user must only ensure that
output_size can be called, returning an integer, when build is called.
use_bias: Whether to include bias parameters. Default `True`.
initializers: Optional dict containing initializers to initialize the
weights (with key 'w') or biases (with key 'b'). The default
initializer for the weights is a truncated normal initializer, which
is commonly used when the inputs are zero centered (see
https://arxiv.org/pdf/1502.03167v3.pdf). The default initializer for
the bias is a zero initializer.
partitioners: Optional dict containing partitioners to partition
weights (with key 'w') or biases (with key 'b'). As a default, no
partitioners are used.
regularizers: Optional dict containing regularizers for the weights
(with key 'w') and the biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes
a single `Tensor` as an input and returns a scalar `Tensor` output, e.g.
the L1 and L2 regularizers in `tf.contrib.layers`.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the `tf.get_variable`
documentation for information about the custom_getter API.
allow_many_batch_dims: If true, enables the use of a batched matmul over
higher-dimensional inputs. The linear is applied to the final dimension.
Namely instead of a `[batch_size, dim]` 2-D input, one can have a
`[batch_dim_1, batch_dim_2, ..., batch_dim_{N-1}, input_size]` N-D
input which will be mapped to a N-D
`[batch_dim_1, batch_dim_2, ..., batch_dim_{N-1}, output_size]` output.
name: Name of the module.
Raises:
KeyError: If `initializers`, `partitioners` or `regularizers` contains any
keys other than 'w' or 'b'.
TypeError: If any of the given initializers, partitioners or regularizers
are not callable.
"""
super(Linear, self).__init__(custom_getter=custom_getter, name=name)
self._output_size = output_size
self._use_bias = use_bias
self._input_shape = None
self._w = None
self._b = None
self._allow_many_batch_dims = allow_many_batch_dims
self.possible_keys = self.get_possible_initializer_keys(use_bias=use_bias)
self._initializers = util.check_initializers(
initializers, self.possible_keys)
self._partitioners = util.check_partitioners(
partitioners, self.possible_keys)
self._regularizers = util.check_regularizers(
regularizers, self.possible_keys)
@classmethod
def get_possible_initializer_keys(cls, use_bias=True):
return {"w", "b"} if use_bias else {"w"}
def _build(self, inputs):
"""Connects the Linear module into the graph, with input Tensor `inputs`.
Linear maps the final dimension.
If this is not the first time the module has been connected to the graph,
the Tensor provided here must have the same final dimension, in order for
the existing variables to be the correct size for the multiplication. The
batch size may differ for each connection.
Args:
inputs: A N-D Tensor of size [dim_1, ..., dim_{N-1}, input_size].
Returns:
A N-D Tensor of size [dim_1, ..., dim_{N-1}, output_size].
Raises:
base.IncompatibleShapeError: If the input is not a 2-D `Tensor` with
the size of the second dimension specified and `allow_many_batch_dims`
is set to False.
base.IncompatibleShapeError: If reconnecting an already connected module
into the graph, and the shape of the input is not compatible with
previous inputs.
"""
input_shape = tuple(inputs.get_shape().as_list())
if len(input_shape) != 2 and not self._allow_many_batch_dims:
raise base.IncompatibleShapeError(
"{}: rank of shape must be 2 not: {}. To apply a batched linear "
"over higher-dimensional inputs, set `allow_many_batch_dims=True`"
.format(self.scope_name, len(input_shape)))
input_size = input_shape[-1]
if input_size is None:
raise base.IncompatibleShapeError(
"{}: Input size must be specified at module build time".format(
self.scope_name))
if self._input_shape is not None and (input_size != self._input_shape[-1]):
if len(self._input_shape) > 2:
raise base.IncompatibleShapeError(
"{}: Input shape must be [..., {}] not: [..., {}]"
.format(self.scope_name, self._input_shape[-1], input_size))
else:
raise base.IncompatibleShapeError(
"{}: Input shape must be [B, {}] not [B, {}]"
.format(self.scope_name, self._input_shape[-1], input_size))
self._input_shape = input_shape
dtype = inputs.dtype
if "w" not in self._initializers:
self._initializers["w"] = create_linear_initializer(input_size,
dtype)
if "b" not in self._initializers and self._use_bias:
self._initializers["b"] = create_bias_initializer(input_size,
dtype)
weight_shape = (input_size, self.output_size)
self._w = tf.get_variable("w",
shape=weight_shape,
dtype=dtype,
initializer=self._initializers["w"],
partitioner=self._partitioners.get("w", None),
regularizer=self._regularizers.get("w", None))
outputs = tf.matmul(inputs, self._w)
if self._use_bias:
bias_shape = (self.output_size,)
self._b = tf.get_variable("b",
shape=bias_shape,
dtype=dtype,
initializer=self._initializers["b"],
partitioner=self._partitioners.get("b", None),
regularizer=self._regularizers.get("b", None))
outputs += self._b
return outputs
@property
def w(self):
"""Returns the Variable containing the weight matrix.
Returns:
Variable object containing the weights, from the most recent __call__.
Raises:
base.NotConnectedError: If the module has not been connected to the
graph yet, meaning the variables do not exist.
"""
self._ensure_is_connected()
return self._w
@property
def b(self):
"""Returns the Variable containing the bias.
Returns:
Variable object containing the bias, from the most recent __call__.
Raises:
base.NotConnectedError: If the module has not been connected to the
graph yet, meaning the variables do not exist.
AttributeError: If the module does not use bias.
"""
self._ensure_is_connected()
if not self._use_bias:
raise AttributeError(
"No bias Variable in Linear Module when `use_bias=False`.")
return self._b
@property
def output_size(self):
"""Returns the module output size."""
if callable(self._output_size):
self._output_size = self._output_size()
return self._output_size
@property
def has_bias(self):
"""Returns `True` if bias Variable is present in the module."""
return self._use_bias
@property
def initializers(self):
"""Returns the initializers dictionary."""
return self._initializers
@property
def partitioners(self):
"""Returns the partitioners dictionary."""
return self._partitioners
@property
def regularizers(self):
"""Returns the regularizers dictionary."""
return self._regularizers
def clone(self, name=None):
"""Returns a cloned `Linear` module.
Args:
name: Optional string assigning name of cloned module. The default name
is constructed by appending "_clone" to `self.module_name`.
Returns:
Cloned `Linear` module.
"""
if name is None:
name = self.module_name + "_clone"
return Linear(output_size=self.output_size,
use_bias=self._use_bias,
initializers=self._initializers,
partitioners=self._partitioners,
regularizers=self._regularizers,
name=name)
# Implements Transposable interface.
@property
def input_shape(self):
"""Returns shape of input `Tensor` passed at last call to `build`."""
self._ensure_is_connected()
return self._input_shape
# Implements Transposable interface
def transpose(self, name=None):
"""Returns transposed `Linear` module.
Args:
name: Optional string assigning name of transpose module. The default name
is constructed by appending "_transpose" to `self.module_name`.
Returns:
Transposed `Linear` module.
"""
if name is None:
name = self.module_name + "_transpose"
return Linear(output_size=lambda: self.input_shape[-1],
use_bias=self._use_bias,
initializers=self._initializers,
partitioners=self._partitioners,
regularizers=self._regularizers,
name=name)
class ConcatLinear(base.AbstractModule):
"""Linear transformation of a number of concatenated inputs.
This class ensures that at initialisation, the relative importance of all
inputs are similar even if they have very different sizes. This assumes
that all inputs have roughly the same range of values.
For example, the following code also concatenates a list of inputs and applies
a linear transform:
```
inp = tf.concat(input_list, axis=-1)
return snt.Linear(output_size)(inp)
```
The issue with the above code is that if `input_list` is made of two Tensors
of very different shapes such as `[batch_size, 1]` and `[batch_size, 128]`,
then almost no signal will be received from the first Tensor. This class works
around this problem by using a weight matrix with relatively larger
coefficients for the first Tensor than for the second one.
"""
def __init__(self,
output_size,
use_bias=True,
initializers=None,
partitioners=None,
regularizers=None,
custom_getter=None,
name="concat_linear"):
"""Constructs a ConcatLinear module.
Args:
output_size: Output dimensionality. `output_size` can be either an integer
or a callable. In the latter case, since the function invocation is
deferred to graph construction time, the user must only ensure that
output_size can be called, returning an integer, when build is called.
use_bias: Whether to include bias parameters. Default `True`.
initializers: Optional dict containing initializers to initialize the
weights (with key 'w') or biases (with key 'b'). The default
initializer for the weights is a truncated normal initializer, which
is commonly used when the inputs are zero centered (see
https://arxiv.org/pdf/1502.03167v3.pdf). The default initializer for
the bias is a zero initializer.
partitioners: Optional dict containing partitioners to partition
weights (with key 'w') or biases (with key 'b'). As a default, no
partitioners are used.
regularizers: Optional dict containing regularizers for the weights
(with key 'w') and the biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes
a single `Tensor` as an input and returns a scalar `Tensor` output, e.g.
the L1 and L2 regularizers in `tf.contrib.layers`.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the `tf.get_variable`
documentation for information about the custom_getter API.
name: Name of the module.
"""
super(ConcatLinear, self).__init__(name=name, custom_getter=custom_getter)
self._output_size = output_size
self._use_bias = use_bias
self._initializers = initializers
self._partitioners = partitioners
self._regularizers = regularizers
def _build(self, inputs_list):
"""Connects the module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided here must have the same final dimensions as when called
the first time, in order for the existing variables to be the correct size
for the multiplication. The batch size may differ for each connection.
Args:
inputs_list: A list of 2D Tensors of rank 2, with leading batch dimension.
Returns:
A 2D Tensor of size [batch_size, output_size].
"""
outputs = []
for idx, tensor in enumerate(inputs_list):
outputs.append(
Linear(
self._output_size,
initializers=self._initializers,
partitioners=self._partitioners,
regularizers=self._regularizers,
# Since we are interpreting this as 'one big linear', we only need
# one bias.
use_bias=(idx == 0 and self._use_bias))(tensor))
return tf.add_n(outputs)
def calculate_bias_shape(input_shape, bias_dims):
"""Calculate `bias_shape` based on the `input_shape` and `bias_dims`.
Args:
input_shape: Shape of the input being passed into the module. The leading
dimension is the minibatch size.
bias_dims: The dimensions that bias should be applied over. The remaining
dimensions will get broadcasted over.
Returns:
bias_shape: Tuple corresponding to the shape of bias Variable to create.
Raises:
ValueError: If the user attempts to add bias over the minibatch dimension,
e.g. `bias_dims=[0]`.
"""
input_rank = len(input_shape)
# If None, default is to use all dimensions.
if bias_dims is None:
return input_shape[1:]
# If empty list, use a scalar bias.
elif not bias_dims:
return ()
# Otherwise, calculate bias_shape from bias_dims.
else:
bias_shape = [1] * input_rank
# Populate bias dimensions.
for dim in bias_dims:
dim %= input_rank
if dim == 0:
raise ValueError("Cannot apply bias across the minibatch dimension.")
bias_shape[dim] = input_shape[dim]
# Strip leading unit dimensions.
start = input_rank
for dim in xrange(1, input_rank):
if bias_shape[dim] != 1:
start = dim
break
return tuple(bias_shape[start:]) # Do not apply across minibatch dimension.
class AddBias(base.AbstractModule, base.Transposable):
"""AddBias module."""
POSSIBLE_INITIALIZER_KEYS = {"b"}
def __init__(self,
output_shape=None,
bias_dims=None,
initializers=None,
partitioners=None,
regularizers=None,
name="add"):
"""Constructs an AddBias module that supports broadcasting.
Args:
output_shape: Output dimensionality. `output_shape` can be either `None`,
a `tuple`, or a `callable`. In the latter case, since the function
invocation is deferred to graph construction time, the user must only
ensure that `output_shape` can be called, returning a tuple, when
build is called. If `output_shape` is left as `None`, the size will be
directly inferred by the input.
bias_dims: List of which dimensions to retain from the input shape when
constructing the bias. The remaining dimensions will get broadcasted
over (given size of 1), and leading dimensions will be removed
completely. For example, for an input of [batch_size, dim1_size,
dim2_size, dim3_size] and `bias_dims=[1, 3]`, the resulting
bias will have shape [dim1_size, 1, dim3_size]. The default is to
retain all dimensions apart from the minibatch dimension. Trying to
retain the bias shape over the minibatch dimension, e.g.
`bias_dims=[0]`, will result in an error at build time. See the
'Example Usage' section below for more information.
initializers: Optional dict containing ops to initialize the biases
(with key 'b'). The default initializer for the bias is a zero
initializer.
partitioners: Optional dict containing a partitioner to partition
the bias (with key 'b'). As a default, no partitioner is used.
regularizers: Optional dict containing regularizers of the biases
(with key 'b'). As a default, no regularizers are used. A regularizer
should be a function that takes a single `Tensor` as an input and
returns a scalar `Tensor` output, e.g. the L1 and L2 regularizers in
`tf.contrib.layers`.
name: Name of the module.
Example Usage:
```python
# Create a 4D input Tensor.
input = tf.random_normal(
shape=(batch_size, dim1_size, dim2_size, dim3_size)))
# Create a scalar bias:
scalar_bias = snt.AddBias(bias_dims=[])
scalar_bias_output = scalar_bias(input)
scalar_bias.b.get_shape() # ()
# Create a bias over all non-minibatch dimensions:
all_bias = snt.AddBias() # or snt.AddBias(bias_dims=None)
all_bias_output = all_bias(input)
all_bias.b.get_shape() # (dim1_size, dim2_size, dim3_size)
# Create a bias over the last non-minibatch dimension:
last_bias = snt.AddBias(bias_dims=[-1])
last_bias_output = last_bias(input)
last_bias.b.get_shape() # (dim3_size)
# Create a bias over the first non-minibatch dimension:
first_bias = snt.AddBias(bias_dims=[1])
first_bias_output = first_bias(input)
first_bias.b.get_shape() # (dim1_size, 1, 1)
# Subtract and later add the same learned bias:
bias = snt.AddBias()
hidden1 = bias(input, multiplier=-1)
# ...
reconstructed_input = bias(hidden4)
```
Raises:
KeyError: If `initializers` contains any keys other than 'b'.
KeyError: If `partitioners` contains any keys other than 'b'.
KeyError: If `regularizers` contains any keys other than 'b'.
TypeError: If any of the given initializers are not callable.
TypeError: If any of the given partitioners are not callable.
TypeError: If any of the given regularizers are not callable.
"""
super(AddBias, self).__init__(name=name)
self._output_shape = output_shape
self._input_shape = None
self._bias_dims = bias_dims
self._b = None
self._initializers = util.check_initializers(
initializers, self.POSSIBLE_INITIALIZER_KEYS)
self._partitioners = util.check_partitioners(
partitioners, self.POSSIBLE_INITIALIZER_KEYS)
self._regularizers = util.check_regularizers(
regularizers, self.POSSIBLE_INITIALIZER_KEYS)
def _build(self, inputs, multiplier=1):
"""Connects the Add module into the graph, with input Tensor `inputs`.
Args:
inputs: A Tensor of size `[batch_size, input_size1, ...]`.
multiplier: A scalar or Tensor which the bias term is multiplied by
before adding it to `inputs`. Anything which works in the expression
`bias * multiplier` is acceptable here. This may be useful if you want
to add a bias in one place and subtract the same bias in another place
via `multiplier=-1`.
Returns:
A Tensor of size `[batch_size, input_size1, ...]`.
Raises:
base.IncompatibleShapeError: If the input is not a >= 2D `Tensor`.
base.IncompatibleShapeError: If connecting the module into the graph
any time after the first time, and the inferred size of the input does
not match previous invocations.
base.IncompatibleShapeError: If the `output_shape` has been specified
but it does not match the input_shape`.
base.ParentNotBuiltError: If the module is a transposed and the original
untransposed module has not been built.
"""
input_shape = tuple(inputs.get_shape().as_list())
bias_shape = calculate_bias_shape(input_shape, self._bias_dims)
# Check always contains minibatched input.
if len(input_shape) < 2:
raise base.IncompatibleShapeError(
"Rank of input shape must be >=2 not: {}.".format(len(input_shape)))
# Check previous input size is same as new input size.
if (self._input_shape is not None and
input_shape[1:] != self._input_shape[1:]):
raise base.IncompatibleShapeError("Input shape has changed.")
# If transposed, make sure that the original Module is built.
if callable(self._output_shape):
self._output_shape = self._output_shape()
if self._output_shape is None:
raise base.ParentNotBuiltError(
"Build the original untransposed module before building this one.")
# If output_shape specified, check that it matches input_shape.
if (self._output_shape is not None and
self._output_shape[1:] != input_shape[1:]):
raise base.IncompatibleShapeError(
"Input shape must be {} not: {}.".format(self._output_shape,
input_shape[1]))
self._input_shape = input_shape
dtype = inputs.dtype
if "b" not in self._initializers:
self._initializers["b"] = create_bias_initializer(bias_shape, dtype)
self._b = tf.get_variable(
"b",
shape=bias_shape,
dtype=dtype,
initializer=self._initializers["b"],
partitioner=self._partitioners.get("b", None),
regularizer=self._regularizers.get("b", None))
bias = self._b
if multiplier != 1:
bias = bias * multiplier # pylint: disable=g-no-augmented-assignment
outputs = inputs + bias
return outputs
@property
def b(self):
"""Returns the Variable containing the bias.
Returns:
Variable object containing the bias, from the most recent __call__.
Raises:
base.NotConnectedError: If the module has not been connected to the
graph yet, meaning the variables do not exist.
"""
self._ensure_is_connected()
return self._b
# Implements Transposable interface.
@property
def input_shape(self):
"""Returns shape of input `Tensor` passed at last call to `build`."""
self._ensure_is_connected()
return self._input_shape
# Implements Transposable interface
def transpose(self, name=None):
"""Returns transposed `AddBias` module.
Args:
name: Optional string assigning name of transpose module. The default name
is constructed by appending "_transpose" to `self.module_name`.
Returns:
Transposed `AddBias` module.
"""
if name is None:
name = self.module_name + "_transpose"
return AddBias(output_shape=lambda: self._input_shape,
bias_dims=self._bias_dims,
initializers=self._initializers,
regularizers=self._regularizers,
name=name)
class BatchReshape(base.AbstractModule, base.Transposable):
"""Reshapes input Tensor, preserving the batch dimension."""
def __init__(self, shape, preserve_dims=1, name="batch_reshape"):
"""Constructs a BatchReshape module.
Args:
shape: Shape to reshape the input Tensor to while preserving its
first `preserve_dims` dimensions; `shape` can be either a tuple/list,
or a callable that returns the actual shape. The callable does not
need to be ready to return something meaningful at construction time,
but it will be required to be able to do so when the module is
connected to the graph. When the special value -1 appears in `shape`
the corresponding size is automatically inferred. Note that -1 can
only appear once in `shape`. To flatten all non-batch dimensions,
the snt.BatchFlatten module can also be used.
preserve_dims: Number of leading dimensions that will not be reshaped.
For example, given an input Tensor with shape `[B, H, W, C, D]`,
and argument `shape` equal to `(-1, D)`:
* `preserve_dims=1` will return a Tensor with shape `[B, H*W*C, D]`.
* `preserve_dims=2` will return a Tensor with
shape `[B, H, W*C, D]`.
* `preserve_dims=3` will return a Tensor with
shape `[B, H, W, C, D]`.
* `preserve_dims=4` will return a Tensor with
shape `[B, H, W, C, 1, D]`.
* `preserve_dims>=5` will throw an error on build unless D=1.
The preserved dimensions can be unknown at building time.
name: Name of the module.
Raises:
ValueError: If `preserve_dims <= 0`.
"""
super(BatchReshape, self).__init__(name=name)
self._input_shape = None
self._shape = shape
self._preserve_dims = preserve_dims
if preserve_dims <= 0:
raise ValueError("Argument preserve_dims should be >= 1.")
if not callable(self._shape):
self._shape = tuple(self._shape)
def _infer_shape(self, dimensions):
"""Replaces the -1 wildcard in the output shape vector.
This function infers the correct output shape given the input dimensions.
Args:
dimensions: List of input non-batch dimensions.
Returns:
Tuple of non-batch output dimensions.
"""
# Size of input
n = np.prod(dimensions)
# Size of output where defined
m = np.prod(abs(np.array(self._shape)))
# Replace wildcard
v = np.array(self._shape)
v[v == -1] = n // m
return tuple(v)
def _build(self, inputs):
"""Connects the module into the graph, with input Tensor `inputs`.
Args:
inputs: A Tensor of shape [b_1, b_2, ..., b_preserve_dims,
b_preserve_dims+1, ...].
Returns:
A Tensor of shape [b_1, b_2, ..., b_preserve_dims,
b_reshape_1, b_reshape_2, ...],
with reshaping defined by the constructor `shape` parameter.
Raises:
ValueError: If output shape is incompatible with input shape; or if
shape array contains non numeric entries; or if shape array contains
more than 1 wildcard -1; or if the input array contains unknown,
non-preserved dimensions (except when the unknown dimension is the
only non-preserved dimension and doesn't actually need reshaping).
"""
full_input_shape = inputs.get_shape().as_list()
if len(full_input_shape) < self._preserve_dims:
raise ValueError("Input tensor has {} dimensions, should have at least "
"as many as preserve_dims={}".format(
len(full_input_shape),
self._preserve_dims))
self._input_shape = full_input_shape[self._preserve_dims:]
if callable(self._shape):
self._shape = tuple(self._shape())
# Special-case of 1 non-preserved dimension, where no reshape is necessary.
# This is useful if the non-preserved dimension of `inputs` is unknown
# at build time.
if len(self._input_shape) == 1 and len(self._shape) == 1:
if self._shape[0] == -1 or self._shape[0] == self._input_shape[0]:
return inputs
else:
if self._input_shape[0] is None:
raise ValueError("Unknown non-preserved dimensions are not allowed "
"in the input to BatchReshape unless it is only one "
"and the desired shape is (-1,).")
else:
raise ValueError("Output shape is incompatible with input shape")
if not all([isinstance(x, numbers.Integral) and (x > 0 or x == -1)
for x in self._shape]):
raise ValueError(
"Desired shape can only contain positive integral numbers "
"and the wildcard -1. Given shape {}".format(self._shape))
if self._shape.count(-1) > 1:
raise ValueError(
"Wildcard -1 can appear only once in desired output shape. "
"Given shape {}".format(self._shape))
preserved_shape = tf.shape(inputs)[:self._preserve_dims]
# Slicing the shape tensor loses information, we keep it in a list.
preserved_shape_list = inputs.get_shape()[:self._preserve_dims]
# Except in the case above where no reshape is needed, we do not allow
# unknown non-preserved dimensions in the input.
if None in self._input_shape:
raise ValueError("Unknown non-preserved dimensions are not allowed in "
"the input to BatchReshape unless it is only one and the"
" desired shape is (-1,). The offending non-preserved "
"input shape is {}".format(self._input_shape))
if self._shape.count(-1) > 0:
trailing_shape = self._infer_shape(self._input_shape)
else:
trailing_shape = self._shape
if np.prod(self._input_shape) != np.prod(trailing_shape):
raise ValueError("Output shape is incompatible with input shape")
shape = tf.concat([preserved_shape, trailing_shape], 0)
output = tf.reshape(inputs, shape)
# Include shape information that was lost when we sliced the shape tensor.
shape_list = preserved_shape_list.concatenate(trailing_shape)
output.set_shape(shape_list)
return output
@property
def input_shape(self):
self._ensure_is_connected()
return self._input_shape
# Implements Transposable interface.
def transpose(self, name=None):
"""Returns transpose batch reshape."""
if name is None:
name = self.module_name + "_transpose"
return BatchReshape(shape=lambda: self.input_shape,
preserve_dims=self._preserve_dims,
name=name)
class BatchFlatten(BatchReshape):
"""Flattens the input Tensor, preserving the batch dimension(s)."""
def __init__(self, preserve_dims=1, name="batch_flatten"):
"""Constructs a BatchFlatten module.
Args:
preserve_dims: Number of leading dimensions that will not be reshaped.
For example, given an input Tensor with shape `[B, H, W, C]`:
* `preserve_dims=1` will return a Tensor with shape `[B, H*W*C]`.
* `preserve_dims=2` will return a Tensor with
shape `[B, H, W*C]`.
* `preserve_dims=3` will return the input itself,
shape `[B, H, W, C]`.
* `preserve_dims=4` will return a Tensor with
shape `[B, H, W, C, 1]`.
* `preserve_dims>=5` will throw an error on build.
The preserved dimensions can be unknown at building time.
name: Name of the module.
"""
super(BatchFlatten, self).__init__(
shape=(-1,), preserve_dims=preserve_dims, name=name)
class FlattenTrailingDimensions(BatchReshape):
"""Flattens trailing dimensions of a Tensor."""
def __init__(self, dim_from, name="batch_dim_from"):
"""Constructs a FlattenTrailingDimensions module.
For example, given an input Tensor with shape `[B, H, W, C]`:
* `dim_from=1` will return a Tensor with shape `[B, H*W*C]`.
* `dim_from=2` will return a Tensor with shape `[B, H, W*C]`.
* `dim_from=3` will return the input itself.
* `dim_from=4` will return a Tensor with shape `[B, H, W, C, 1]`.
* `dim_from>=5` will generate a ValueError when building the module.
The preserved dimensions can be unknown at building time.
Equivalent to BatchFlatten(preserve_dims=dim_from, name=name).
Args:
dim_from: All dimensions after and including `dim_from` will
be flattened into a single dimension.
name: Name of the module.
Raises:
ValueError: If `dim_from <= 0`.
"""
if dim_from <= 0:
raise ValueError("Argument dim_from should be >= 1.")
super(FlattenTrailingDimensions, self).__init__(
shape=(-1,), preserve_dims=dim_from, name=name)
class TrainableVariable(base.AbstractModule):
"""Provides learnable parameter Tensor."""
POSSIBLE_INITIALIZER_KEYS = {"w"}
def __init__(self,
shape,
dtype=tf.float32,
initializers=None,
partitioners=None,
regularizers=None,
custom_getter=None,
name="trainable_variable"):
"""Constructs a TrainableVariable module.
Args:
shape: Tensor shape.
dtype: Tensor data type.
initializers: Optional dictionary containing ops to initialize the weight
Tensor, with key 'w'.
partitioners: Optional dict containing a partitioner to partition
the weight (with key 'w'). As a default, no partitioner is used.
regularizers: Optional dict containing regularizers for the weights
(with key 'w'). As a default, no regularizers are used. A regularizer
should be a function that takes a single `Tensor` as an input and
returns a scalar `Tensor` output, e.g. the L1 and L2 regularizers in
`tf.contrib.layers`.
custom_getter: Optional callable or dictionary of callables to use as
custom_getter for the module.
name: Name of the module.
Raises:
KeyError: If `initializers` contains any keys other than 'w'.
KeyError: If `partitioners` contains any keys other than 'w'.
KeyError: If `regularizers` contains any keys other than 'w'.
TypeError: If any of the given initializers are not callable.
TypeError: If any of the given partitioners are not callable.
TypeError: If any of the given regularizers are not callable.
"""
super(TrainableVariable, self).__init__(custom_getter=custom_getter,
name=name)
self._shape = tuple(shape)
self._dtype = dtype
self._initializers = util.check_initializers(
initializers, self.POSSIBLE_INITIALIZER_KEYS)
self._partitioners = util.check_partitioners(
partitioners, self.POSSIBLE_INITIALIZER_KEYS)
self._regularizers = util.check_regularizers(
regularizers, self.POSSIBLE_INITIALIZER_KEYS)
def _build(self):
"""Connects the TrainableTensor module into the graph.
Returns:
A Tensor of shape as determined in the constructor.
"""
if "w" not in self._initializers:
stddev = 1 / math.sqrt(np.prod(self._shape))
self._initializers["w"] = tf.truncated_normal_initializer(stddev=stddev)
self._w = tf.get_variable("w",
shape=self._shape,
dtype=self._dtype,
initializer=self._initializers["w"],
partitioner=self._partitioners.get("w", None),
regularizer=self._regularizers.get("w", None))
return self._w
@property
def w(self):
"""Returns the Variable containing the weights Tensor.
Returns:
Variable object containing the weights, from the most recent __call__.
Raises:
base.Error: If the module has not been connected to the graph yet,
meaning the variables do not exist.
"""
self._ensure_is_connected()
return self._w
class BatchApply(base.AbstractModule):
"""Merges a number of leading dimensions of an input tensor to manipulate it.
Merges a number of leading dimensions of a tensor into a single dimension,
connects the provided module, then splits the leading dimension of the
result to match the input.
Input tensors whose rank is smaller than the number of dimensions to collapse
(e.g. all scalar values, which are tensors of rank 0), are passed unaltered to
the provided module.
This is useful for applying some module to each timestep of a Time x Batch x N
tensor. If a module is hard coded to only support 2D (Batch x N) then the
full 3D Tensor cannot be provided. BatchApply will 'merge' the first two
dimensions of the sequence tensor by reshaping to a (Time * Batch) x N Tensor,
and then the internal module can be applied. The result of that operation is
reshaped such that its first dimensions are split to match the leading
dimensions of the input.
"""
def __init__(self, module_or_op, n_dims=2, input_example_index=0,
name="batch_apply"):
"""Constructor of the module.
Args:
module_or_op: Module or tensorflow op to apply to an input tensor.
n_dims: Number of dimensions to merge before using module on the input
of BatchApply.
input_example_index: Index of input that has same shape for the first
`n_dims` dimensions as `module_or_op` output(s). This is used for
unflattening the output(s) if static shape inference is not possible.
name: Name of the module.
Raises:
TypeError: If n_dims is not an integer.
ValueError: If n_dims is not greater than zero.
"""
super(BatchApply, self).__init__(name=name)
if not isinstance(n_dims, int):
raise TypeError("n_dims should be an integer, it is a %s instead." %
type(n_dims))
if n_dims <= 0:
raise ValueError("n_dims should be greater than zero.")
self._module = module_or_op
self._n_dims = n_dims
self._input_example_index = input_example_index
def _build(self, *args, **kwargs):
"""Connects the BatchApply module into the graph.
Args:
*args: a Tensor or a nested list or dictionary of Tensors. The input
tensors will have their first dimensions merged, then an op or a
module will be called on the input. The first dimension of the output
tensor(s) will be split again based on the leading dimensions of the
first input tensor.
**kwargs: Dictionary of named arguments; used in the same way as `*args`.
Returns:
A Tensor or nested list or dictionary of Tensors as a result of applying
the process above. ("None" return values are also supported.)
"""
flattened = nest.flatten([args, kwargs])
merged_flattened = [
merge_leading_dims(inp, self._n_dims) if inp is not None else None
for inp in flattened]
merged_args, merged_kwargs = nest.pack_sequence_as([args, kwargs],
merged_flattened)
results = self._module(*merged_args, **merged_kwargs)
# Unmerging takes the sizes of the leading dimensions from an input example
# with equal shape for the leading `n_dims` dimensions. Typically this is
# the first input.
example_input = tf.convert_to_tensor(flattened[self._input_example_index])
def _split_to_original_leading_dims(result):
if result is None:
return None
else:
return split_leading_dim(result, example_input, self._n_dims)
flat_results = nest.flatten(results)
flat_unmerged_results = [_split_to_original_leading_dims(result)
for result in flat_results]
return nest.pack_sequence_as(results, flat_unmerged_results)
class SliceByDim(base.AbstractModule):
"""Slices a tensor along specific dimensions.
The user can slice a tensor by specifying only the list of dimensions that
they want to slice, together with the lists of integers containing the
beginning indices of the slicing, and the size of the slices. Hence, with
`SliceByDim` slicing can be performed without knowing in advance the rank of
the input tensor.
Tensorflow also offers a built-in op performing slicing, `tf.slice`. However,
`tf.slice` requires all the slicing dimensions to be specified, using
wildcards when no slicing is required. For example, with `tf.slice`, slicing
half a 5D tensor along dimension `1` would be:
```python
output = tf.slice(inputs,
begin=[0, 0, 0, 0, 0],
size=[-1, inputs.get_shape()[1].value//2, -1, -1, -1])
```
The same operation using `SliceByDim` would be:
```python
output = SliceByDim(dims=[1], begin=[0], size=[x.get_shape()[1].value//2])(x)
```
`SliceByDim` can be used to specify multiple slicing dimensions, for example:
```python
output = SliceByDim(dims=[1, 3], begin=[0, 0], size=[12, 24])(x)
```
"""
def __init__(self, dims, begin, size, name="slice_by_dim"):
"""Constructs the `SliceByDim` module.
Args:
dims: The dimensions to slice along, as a list of unique integers.
Negative integers index from the final dimension backwards, as in
python arrays.
begin: The beginning indices of the slicing, as a list of integers. Must
be the same length as the `dims` list.
size: The size of the slices, as a list of integers. Must be the same
length as the `dims` list.
name: The name of the module.
Raises:
ValueError: If `dims` has non-unique integers, or if the size of `begin`
is different from the size of `dims`, or if the size of `size` is
different from the size of `dims`.
"""
super(SliceByDim, self).__init__(name=name)
self._dims = dims
self._begin = begin
self._size = size
if np.unique(dims).size != len(dims):
raise ValueError("dims must not have any repeated integers.")
if len(begin) != len(dims):
raise ValueError(
"begin must have the same length as dims: {}.".format(len(dims)))
if len(size) != len(dims):
raise ValueError(
"size must have the same length as dims: {}.".format(len(dims)))
def _build(self, inputs):
"""Connects the SliceByDim module into the graph.
Args:
inputs: `Tensor` to slice. Its rank must be greater than the maximum
dimension specified in `dims` (plus one as python is 0 indexed).
Returns:
The sliced tensor.
Raises:
ValueError: If `inputs` tensor has insufficient rank.
"""
shape_inputs = inputs.get_shape().as_list()
rank = len(shape_inputs)
# Checks that the rank of the tensor.
max_dim = np.max(self._dims) + 1
if rank < max_dim:
raise ValueError("Rank of inputs must be at least {}.".format(max_dim))
# Builds default lists for begin and size to pass to `tf.slice`.
full_begin = [0] * rank
full_size = [-1] * rank
# Updates lists with what the user provided.
for dim, begin, size in zip(self._dims, self._begin, self._size):
full_begin[dim] = begin
full_size[dim] = size
return tf.slice(inputs, begin=full_begin, size=full_size)
class TileByDim(base.AbstractModule):
"""Tile a tensor along specific dimensions.
The user can tile a tensor by specifying only the list of dimensions that
they want to tile, together with the lists of integers containing the
multiples of the tiling. Hence, with `TileByDim` tiling can be performed
without knowing in advance the rank of the input tensor.
Tensorflow also offers a built-in op performing tiling, `tf.tile`. However,
`tf.tile` requires all the tiling dimensions to be specified, using `1`
when no tiling is required. For example, with tf.tiling, tiling a 5D
tensor along dimension `1`, by `2` would be:
```python
output = tf.tile(inputs, multiples=[1, 2, 1, 1, 1])
```
The same operation using `TileByDim` would be:
```python
output = TileByDim(dims=[1], multiples=[2])(x)
```
`TileByDim` can be used to specify multiple tiling dimensions, for example:
```python
output = TileByDim(dims=[1, 3], multiples=[2, 4])(x)
```
"""
def __init__(self, dims, multiples, name="tile_by_dim"):
"""Constructs the `TileByDim` module.
Args:
dims: The dimensions to tile along, as a list of unique integers.
multiples: The multiple of the tiling, as a list of integers. Must
be the same length as the `dims` list.
name: The name of the module.
Raises:
ValueError: If `dims` has non-unique integers, or if the size of
`multiples` is different from the size of `dims`.
"""
super(TileByDim, self).__init__(name=name)
self._dims = dims
self._multiples = multiples
if np.unique(dims).size != len(dims):
raise ValueError("dims must not have any repeated integers.")
if len(multiples) != len(dims):
raise ValueError(
"multiples must have the same length as dims: {}.".format(len(dims)))
def _build(self, inputs):
"""Connects the `TileByDim` module into the graph.
Args:
inputs: `Tensor` to tile.
Returns:
The tiled tensor.
"""
shape_inputs = inputs.get_shape().as_list()
rank = len(shape_inputs)
# Builds default lists for multiples to pass to `tf.tile`.
full_multiples = [1] * rank
# Updates lists with what the user provided.
for dim, multiple in zip(self._dims, self._multiples):
full_multiples[dim] = multiple
return tf.tile(inputs, multiples=full_multiples)
class MergeDims(base.AbstractModule):
"""Merges a tensor or nested list of tensors along a range of dimensions.
Tensors are reshaped by specifying the range of dimensions to merge.
Hence, the reshape can be performed without knowing in advance the rank of
the input tensor.
For example, merging dimensions 1, 2 and 3 together can be performed by
calling:
```python
output = MergeDims(start=1, size=3)(x)
```
A nested list of tensors can be merged:
```python
x = [tf.random_uniform(shape=[5, 5]), [tf.random_uniform(shape=[3, 3, 3])]]
output = MergeDims(start=0, size=2)(x)
```
"""
def __init__(self, start, size, name="merge_dims"):
"""Constructs the MergeDims module.
Args:
start: Start of the range of dimensions to merge.
size: Size the range of dimensions to merge.
name: The name of the module.
Raises:
ValueError: If `size` is not strictly greater than 1.
"""
super(MergeDims, self).__init__(name=name)
self._start = start
self._size = size
# Checks for non consecutive integers.
if size <= 1:
raise ValueError("`size` should be strictly greater than 1.")
def _merge(self, tensor):
static_input_shape = tensor.get_shape().as_list()
rank = len(static_input_shape)
start = self._start
if start < 0:
start += rank # uses negative indexing from right
if rank < start + self._size:
raise ValueError("Rank of inputs must be at least {}."
.format(self._start + self._size))
initial = static_input_shape[:start]
middle = static_input_shape[start:start + self._size]
final = static_input_shape[start + self._size:]
if None in middle:
middle = [None]
else:
middle = [np.prod(middle)]
static_shape = initial + middle + final
if static_shape.count(None) + static_shape.count(0) <= 1:
# At most one undefined (or zero) dimension, so tf.reshape can handle this
# case.
static_shape = [-1 if i is None else i for i in static_shape]
return tf.reshape(tensor, static_shape)
else:
# Need to compute output shape dynamically.
dynamic_input_shape = tf.shape(tensor)
dynamic_initial = dynamic_input_shape[:start]
dynamic_middle = tf.reduce_prod(
dynamic_input_shape[start:start + self._size], keep_dims=True)
dynamic_final = dynamic_input_shape[start + self._size:]
dynamic_shape = tf.concat(
[dynamic_initial, dynamic_middle, dynamic_final], axis=0)
tensor = tf.reshape(tensor, dynamic_shape)
tensor.set_shape(static_shape) # give it some static shape information
return tensor
def _build(self, inputs):
"""Connects the MergeDims module into the graph.
Args:
inputs: Tensor or a nested list of Tensors to merge. Its rank must be
greater than or equal to `start` + `size`.
Returns:
The merged Tensor or a nested list of merged Tensors.
Raises:
ValueError: If any of the `inputs` tensors has insufficient rank.
"""
if nest.is_sequence(inputs):
merged_tensors = [self._merge(tensor) for tensor in nest.flatten(inputs)]
return nest.pack_sequence_as(inputs, merged_tensors)
# inputs is a single tf.Tensor
return self._merge(inputs)
class SelectInput(base.AbstractModule):
"""Returns a subset of its inputs in an arbitrarily nested configuration.
This module can be used for multiple purposes.
The basic usage is to select a tensor or a subset of tensors:
```
output = snt.SelectInput(idx=0, name='select')(input0, input1)
==> input0
output = snt.SelectInput(idx=[0, 2], name='select')(input0, input1, input2)
==> (input0, input2)
```
Another usage is to change the orders of the input tensors:
```
output = snt.SelectInput(idx=[1, 0], name='select')(input0, input1)
==> (input1, input0)
```
Another usage is to duplicate an input:
```
output = snt.SelectInput(idx=[0, 0], name='select')(input0)
==> (input0, input0)
```
Another usage is to add arbitrary nesting:
```
output = snt.SelectInput(
idx=[0, [1, [2]]], name='select')(input0, input1, input2)
==> (input0, (input1, (input2,)))
```
"""
def __init__(self, idx, name="select_input"):
"""Module constructor.
Args:
idx: Indexes of the tensors to select. If `idx` is an integer, then
a `Tensor` is returned. If `idx` is a (nested) list/tuple, then a
(nested) tuple of `Tensor` is returned.
name: Name of the module.
Raises:
TypeError: If `idx` is not an list, tuple or integer.
"""
super(SelectInput, self).__init__(name=name)
self._check_type(idx)
self._idx = idx
def _check_type(self, idx):
if isinstance(idx, (list, tuple)):
for value in idx:
self._check_type(value)
elif not isinstance(idx, int):
raise TypeError("`idx` should be a (nested) array/tuple, or an integer.")
def _select(self, inputs, idx):
if isinstance(idx, (list, tuple)):
return tuple(self._select(inputs, i) for i in idx)
else:
if idx < 0 or idx >= len(inputs):
raise ValueError("`idx` contains out of bound entries (they should be "
"in the range [0, {}))".format(len(inputs)))
# Identity is called otherwise we might get 'placeholder is both fed and
# fetched' errors in some cases when using a feed_dict.
return tf.identity(inputs[idx])
def _build(self, *inputs):
"""Connects the module into the graph.
Args:
*inputs: `Tensor` variables to select.
Returns:
Subset of `inputs` in an arbitrarily nested configuration.
Raises:
ValueError: If any entry of `idx` is out of bounds with respect to the
size of `inputs`.
"""
return self._select(inputs, self._idx)
| sonnet-1 | sonnet/python/modules/basic.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sonnet module information, stored in the graph collections."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Dependency imports
import six
from sonnet.protos import module_pb2
from sonnet.python.modules import base_errors
import tensorflow.compat.v1 as tf
from tensorflow.python.framework import ops
logging = tf.logging
SONNET_COLLECTION_NAME = "sonnet"
ModuleInfo = collections.namedtuple(
"ModuleInfo",
("module_name", "scope_name", "class_name", "connected_subgraphs"))
ConnectedSubGraph = collections.namedtuple(
"ConnectedSubGraph", ("module", "name_scope", "inputs", "outputs"))
_SPARSE_TENSOR_NAME = "SparseTensor"
_SPARSE_TENSOR_FIELD = ("indices", "values", "dense_shape")
class _UnserializableObject(object):
"""Placeholder for object which cannot be serialized."""
# Placeholder for tensor which cannot be found.
_MissingTensor = collections.namedtuple("_MissingTensor", ("name",))
def _is_namedtuple(obj):
"""Returns `True` if `obj` is a `collections.namedtuple`."""
return isinstance(obj, tuple) and hasattr(obj, "_fields")
def _is_iterable(obj):
"""Returns `True` if the object is a supported iterable."""
return isinstance(obj, (list, tuple, dict))
def _graph_element_to_path(graph_element):
"""Returns the path of the given graph element.
Args:
graph_element: A graph element. Currently only `tf.Tensor` is supported.
Returns:
The graph path corresponding to `graph_element` or the empty string if no
path could be found.
"""
if isinstance(graph_element, tf.Tensor):
return graph_element.name
# Returns an empty string when no name is defined. This will be deserialized
# as a `_UnSerializableObject`.
return ""
def _path_to_graph_element(path, graph):
"""Returns the graph element of the given path.
Args:
path: The path of the graph element.
graph: The graph to look into.
Returns:
The graph element or an instance of `_MissingTensor`.
"""
try:
return graph.get_tensor_by_name(path)
except KeyError:
return _MissingTensor(path)
def _to_proto_sparse_tensor(sparse_tensor, nested_proto,
process_leafs, already_processed):
"""Serializes a `tf.SparseTensor` into `nested_proto`.
Args:
sparse_tensor: An instance of `tf.SparseTensor`.
nested_proto: A `module_pb2.NestedData` instance to be filled from
`sparse_tensor`.
process_leafs: A function to be applied to the leaf valued of the nested
structure.
already_processed: Set of already processed objects (used to avoid
infinite recursion).
"""
already_processed.add(id(sparse_tensor))
nested_proto.named_tuple.name = _SPARSE_TENSOR_NAME
for str_key in _SPARSE_TENSOR_FIELD:
tensor = getattr(sparse_tensor, str_key)
nested_proto.named_tuple.map[str_key].value = process_leafs(tensor)
def _from_proto_sparse_tensor(sparse_tensor_proto, process_leafs):
"""Deserializes a `tf.SparseTensor` from `sparse_tensor_proto`.
Args:
sparse_tensor_proto: A proto representing a `tf.SparseTensor`.
process_leafs: A function to be applied to the leaf valued of the nested
structure.
Returns:
An instance of `tf.SparseTensor`.
"""
if not sparse_tensor_proto.HasField("named_tuple"):
raise base_errors.ModuleInfoError(
"Error while deserializing a SparseTensor: expected proto tuple.")
if sparse_tensor_proto.named_tuple.name != _SPARSE_TENSOR_NAME:
raise base_errors.ModuleInfoError(
"Error while deserializing a SparseTensor: The name of the tuple "
"should have been {} but was {}.".format(
_SPARSE_TENSOR_NAME, sparse_tensor_proto.named_tuple.name))
named_tuple_map = sparse_tensor_proto.named_tuple.map
return tf.SparseTensor(
indices=process_leafs(named_tuple_map["indices"].value),
values=process_leafs(named_tuple_map["values"].value),
dense_shape=process_leafs(named_tuple_map["dense_shape"].value))
# This named tuple contains the necessary information to handle a Python
# object which should be handled in a specific way. The "check" field should
# contain a callable returning `True` if the Python object is indeed special
# and the "to_proto" field should contain a custom serializer.
_SpecialTypeInfo = collections.namedtuple("_SpecialTypeInfo",
("check", "to_proto", "from_proto"))
_TO_PROTO_SPECIAL_TYPES = collections.OrderedDict()
_TO_PROTO_SPECIAL_TYPES[_SPARSE_TENSOR_NAME] = _SpecialTypeInfo(
check=lambda obj: isinstance(obj, tf.SparseTensor),
to_proto=_to_proto_sparse_tensor,
from_proto=_from_proto_sparse_tensor)
def _nested_to_proto(nested_value, nested_proto, process_leafs,
already_processed):
"""Serializes `nested_value` into `nested_proto`.
Args:
nested_value: A nested Python value.
nested_proto: A `module_pb2.NestedData` instance to be filled from the value
in `nested_value`.
process_leafs: A function to be applied to the leaf values of the nested
structure.
already_processed: Set of already processed objects (used to avoid
infinite recursion).
Raises:
ModuleInfoError: If `nested_proto` is not an instance of
`module_pb2.NestedData`.
"""
if not isinstance(nested_proto, module_pb2.NestedData):
raise base_errors.ModuleInfoError("Expected module_pb2.NestedData.")
# If this object was already processed, mark as "unserializable"
# to avoid infinite recursion.
if id(nested_value) in already_processed:
nested_proto.value = ""
return
# Check special types.
for type_name, type_info in six.iteritems(_TO_PROTO_SPECIAL_TYPES):
if type_info.check(nested_value):
nested_proto.special_type.name = type_name
type_info.to_proto(
nested_value, nested_proto.special_type.object,
process_leafs, already_processed)
return
# Check standard types.
if _is_iterable(nested_value):
# Mark this container as "already processed" to avoid infinite recursion.
already_processed.add(id(nested_value))
if isinstance(nested_value, dict):
nested_proto.dict.SetInParent()
for key, child in six.iteritems(nested_value):
str_key = str(key)
child_proto = nested_proto.dict.map[str_key]
_nested_to_proto(child, child_proto, process_leafs, already_processed)
elif isinstance(nested_value, tuple):
# NamedTuple?
if _is_namedtuple(nested_value):
nested_proto.named_tuple.name = type(nested_value).__name__
for str_key in nested_value._fields:
child = getattr(nested_value, str_key)
child_proto = nested_proto.named_tuple.map[str_key]
_nested_to_proto(child, child_proto, process_leafs, already_processed)
else:
nested_proto.tuple.SetInParent()
for child in nested_value:
child_proto = nested_proto.tuple.list.add()
_nested_to_proto(child, child_proto, process_leafs, already_processed)
else:
nested_proto.list.SetInParent()
for child in nested_value:
child_proto = nested_proto.list.list.add()
_nested_to_proto(child, child_proto, process_leafs, already_processed)
else:
nested_proto.value = process_leafs(nested_value)
def _module_info_to_proto(module_info, export_scope=None):
"""Serializes `module_into`.
Args:
module_info: An instance of `ModuleInfo`.
export_scope: Optional `string`. Name scope to remove.
Returns:
An instance of `module_pb2.SonnetModule`.
"""
def strip_name_scope(name_scope):
return ops.strip_name_scope(name_scope, export_scope)
def process_leafs(value):
return strip_name_scope(_graph_element_to_path(value))
module_info_def = module_pb2.SonnetModule(
module_name=module_info.module_name,
scope_name=strip_name_scope(module_info.scope_name),
class_name=module_info.class_name)
for connected_subgraph in module_info.connected_subgraphs:
connected_subgraph_info_def = module_info_def.connected_subgraphs.add()
connected_subgraph_info_def.name_scope = strip_name_scope(
connected_subgraph.name_scope)
_nested_to_proto(
connected_subgraph.inputs,
connected_subgraph_info_def.inputs,
process_leafs, set())
_nested_to_proto(
connected_subgraph.outputs,
connected_subgraph_info_def.outputs,
process_leafs, set())
return module_info_def
def _nested_from_proto(nested_proto, process_leafs):
"""Deserializes `nested_proto`.
Args:
nested_proto: An instance of `module_pb2.NestedData`.
process_leafs: A function to be applied to the leaf values of the nested
structure.
Returns:
An instance of `string`, `tuple`, `dict` or `namedtuple`.
Raises:
base_errors.ModuleInfoError: If the probobuf is of the wrong type or
if some of its fields are missing.
"""
if not isinstance(nested_proto, module_pb2.NestedData):
raise base_errors.ModuleInfoError("Expected module_pb2.NestedData.")
if nested_proto.HasField("value"):
value = nested_proto.value
if not value:
value = _UnserializableObject()
else:
value = process_leafs(value)
return value
elif nested_proto.HasField("list"):
return [_nested_from_proto(child, process_leafs)
for child in nested_proto.list.list]
elif nested_proto.HasField("tuple"):
return tuple(_nested_from_proto(child, process_leafs)
for child in nested_proto.tuple.list)
elif nested_proto.HasField("dict"):
return {name: _nested_from_proto(child, process_leafs)
for name, child in six.iteritems(nested_proto.dict.map)}
elif nested_proto.HasField("named_tuple"):
tmp_dict = {name: _nested_from_proto(child, process_leafs)
for name, child in six.iteritems(nested_proto.named_tuple.map)}
# Note that this needs to be a named tuple to work with existing usage.
NamedTuple = collections.namedtuple( # pylint: disable=invalid-name
nested_proto.named_tuple.name, tmp_dict.keys())
return NamedTuple(**tmp_dict)
elif nested_proto.HasField("special_type"):
if nested_proto.special_type.name not in _TO_PROTO_SPECIAL_TYPES:
return _UnserializableObject()
type_info = _TO_PROTO_SPECIAL_TYPES[nested_proto.special_type.name]
return type_info.from_proto(nested_proto.special_type.object, process_leafs)
else:
raise base_errors.ModuleInfoError(
"Cannot deserialize a `ModuleInfo` protobuf with no fields.")
def _module_info_from_proto(module_info_def, import_scope=None):
"""Deserializes `module_info_def` proto.
Args:
module_info_def: An instance of `module_pb2.SonnetModule`.
import_scope: Optional `string`. Name scope to use.
Returns:
An instance of `ModuleInfo`.
Raises:
base_errors.ModuleInfoError: If the probobuf is of the wrong type or
if some of its fields are missing.
"""
graph = tf.get_default_graph()
def prepend_name_scope(name_scope):
return ops.prepend_name_scope(name_scope, import_scope)
def process_leafs(name):
return _path_to_graph_element(prepend_name_scope(name), graph)
connected_subgraphs = []
module_info = ModuleInfo(
module_name=module_info_def.module_name,
scope_name=prepend_name_scope(module_info_def.scope_name),
class_name=module_info_def.class_name,
connected_subgraphs=connected_subgraphs)
for connected_subgraph_def in module_info_def.connected_subgraphs:
connected_subgraph = ConnectedSubGraph(
module=module_info,
name_scope=prepend_name_scope(connected_subgraph_def.name_scope),
inputs=_nested_from_proto(
connected_subgraph_def.inputs, process_leafs),
outputs=_nested_from_proto(
connected_subgraph_def.outputs, process_leafs))
connected_subgraphs.append(connected_subgraph)
return module_info
def _module_info_from_proto_safe(module_info_def, import_scope=None):
"""Deserializes the `module_info_def` proto without raising exceptions.
Args:
module_info_def: An instance of `module_pb2.SonnetModule`.
import_scope: Optional `string`. Name scope to use.
Returns:
An instance of `ModuleInfo`.
"""
try:
return _module_info_from_proto(module_info_def, import_scope)
except Exception as e: # pylint: disable=broad-except
logging.warning(
"Error encountered when deserializing sonnet ModuleInfo:\n%s", str(e))
return None
# `to_proto` is already wrapped into a try...except externally but
# `from_proto` isn't. In order to minimize disruption, catch all the exceptions
# happening during `from_proto` and just log them.
ops.register_proto_function(SONNET_COLLECTION_NAME,
module_pb2.SonnetModule,
to_proto=_module_info_to_proto,
from_proto=_module_info_from_proto_safe)
| sonnet-1 | sonnet/python/modules/base_info.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Basic RNN Cores for TensorFlow snt.
This file contains the definitions of the simplest building blocks for Recurrent
Neural Networks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Dependency imports
import six
from sonnet.python.modules import base
from sonnet.python.modules import basic
from sonnet.python.modules import rnn_core
from sonnet.python.modules import util
import tensorflow.compat.v1 as tf
from tensorflow.contrib import framework as contrib_framework
nest = contrib_framework.nest
def _get_flat_core_sizes(cores):
"""Obtains the list flattened output sizes of a list of cores.
Args:
cores: list of cores to get the shapes from.
Returns:
List of lists that, for each core, contains the list of its output
dimensions.
"""
core_sizes_lists = []
for core in cores:
flat_output_size = nest.flatten(core.output_size)
core_sizes_lists.append(
[tf.TensorShape(size).as_list() for size in flat_output_size])
return core_sizes_lists
def _get_shape_without_batch_dimension(tensor_nest):
"""Converts Tensor nest to a TensorShape nest, removing batch dimension."""
def _strip_batch_and_convert_to_shape(tensor):
return tensor.get_shape()[1:]
return nest.map_structure(_strip_batch_and_convert_to_shape, tensor_nest)
class VanillaRNN(rnn_core.RNNCore):
"""Basic fully connected vanilla RNN core."""
IN_TO_HIDDEN = "in_to_hidden"
HIDDEN_TO_HIDDEN = "hidden_to_hidden"
POSSIBLE_INITIALIZER_KEYS = {IN_TO_HIDDEN, HIDDEN_TO_HIDDEN}
def __init__(self, hidden_size, activation=tf.tanh, initializers=None,
partitioners=None, regularizers=None, name="vanilla_rnn"):
"""Construct a Basic RNN core.
Args:
hidden_size: hidden size dimensionality.
activation: activation function to use.
initializers: optional dict containing ops to initialize the weights. This
dictionary may contain the keys 'in_to_hidden' and/or
'hidden_to_hidden'.
partitioners: optional dict containing ops to partition the weights. This
dictionary may contain the keys 'in_to_hidden' and/or
'hidden_to_hidden'.
regularizers: optional dict containing ops to regularize the weights. This
dictionary may contain the keys 'in_to_hidden' and/or
'hidden_to_hidden'.
name: name of the module.
Raises:
KeyError: if `initializers` contains any keys other than 'in_to_hidden' or
'hidden_to_hidden'.
KeyError: if `partitioners` contains any keys other than 'in_to_hidden' or
'hidden_to_hidden'.
KeyError: if `regularizers` contains any keys other than 'in_to_hidden' or
'hidden_to_hidden'.
TypeError: If any of the given initializers are not callable.
TypeError: If any of the given partitioners are not callable.
TypeError: If any of the given regularizers are not callable.
"""
super(VanillaRNN, self).__init__(name=name)
self._hidden_size = hidden_size
self._activation = activation
self._initializers = util.check_initializers(
initializers, self.POSSIBLE_INITIALIZER_KEYS)
self._partitioners = util.check_partitioners(
partitioners, self.POSSIBLE_INITIALIZER_KEYS)
self._regularizers = util.check_regularizers(
regularizers, self.POSSIBLE_INITIALIZER_KEYS)
def _build(self, input_, prev_state):
"""Connects the VanillaRNN module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as input_ and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may differ for each
connection.
Args:
input_: a 2D Tensor of size [batch_size, input_size].
prev_state: a 2D Tensor of size [batch_size, hidden_size].
Returns:
output: a 2D Tensor of size [batch_size, hidden_size].
next_state: a Tensor of size [batch_size, hidden_size].
Raises:
ValueError: if connecting the module into the graph any time after the
first time, and the inferred size of the inputs does not match previous
invocations.
"""
self._in_to_hidden_linear = basic.Linear(
self._hidden_size, name="in_to_hidden",
initializers=self._initializers.get("in_to_hidden"),
partitioners=self._partitioners.get("in_to_hidden"),
regularizers=self._regularizers.get("in_to_hidden"))
self._hidden_to_hidden_linear = basic.Linear(
self._hidden_size, name="hidden_to_hidden",
initializers=self._initializers.get("hidden_to_hidden"),
partitioners=self._partitioners.get("hidden_to_hidden"),
regularizers=self._regularizers.get("hidden_to_hidden"))
in_to_hidden = self._in_to_hidden_linear(input_)
hidden_to_hidden = self._hidden_to_hidden_linear(prev_state)
output = self._activation(in_to_hidden + hidden_to_hidden)
# For VanillaRNN, the next state of the RNN is the same as the output
return output, output
@property
def in_to_hidden_linear(self):
self._ensure_is_connected()
return self._in_to_hidden_linear
@property
def hidden_to_hidden_linear(self):
self._ensure_is_connected()
return self._hidden_to_hidden_linear
@property
def in_to_hidden_variables(self):
self._ensure_is_connected()
return self._in_to_hidden_linear.get_variables()
@property
def hidden_to_hidden_variables(self):
self._ensure_is_connected()
return self._hidden_to_hidden_linear.get_variables()
@property
def state_size(self):
return tf.TensorShape([self._hidden_size])
@property
def output_size(self):
return tf.TensorShape([self._hidden_size])
class DeepRNN(rnn_core.RNNCore):
"""RNN core that passes data through a number of internal modules or ops.
This module is constructed by passing an iterable of externally constructed
modules or ops. The DeepRNN takes `(input, prev_state)` as input and passes
the input through each internal module in the order they were presented,
using elements from `prev_state` as necessary for internal recurrent cores.
The output is `(output, next_state)` in common with other RNN cores.
By default, skip connections from the input to all internal modules and from
each intermediate output to the final output are used.
E.g.:
```python
lstm1 = snt.LSTM(hidden_size=256)
lstm2 = snt.LSTM(hidden_size=256)
deep_rnn = snt.DeepRNN([lstm1, lstm2])
output, next_state = deep_rnn(input, prev_state)
```
The computation set up inside the DeepRNN has the same effect as:
```python
prev_state1, prev_state2 = prev_state
lstm1_output, next_state1 = lstm1(input, prev_state1)
lstm2_output, next_state2 = lstm2(
tf.concat([input, lstm1_output], 1), prev_state2)
next_state = (next_state1, next_state2)
output = tf.concat([lstm1_output, lstm2_output], 1)
```
Every internal module receives the preceding module's output and the entire
core's input. The output is created by concatenating each internal module's
output. In the case of internal recurrent elements, corresponding elements
of the state are used such that `state[i]` is passed to the `i`'th internal
recurrent element. Note that the state of a `DeepRNN` is always a tuple, which
will contain the same number of elements as there are internal recurrent
cores. If no internal modules are recurrent, the state of the DeepRNN as a
whole is the empty tuple. Wrapping non-recurrent modules into a DeepRNN can
be useful to produce something API compatible with a "real" recurrent module,
simplifying code that handles the cores.
Without skip connections the previous example would become the following
(note the only difference is the addition of `skip_connections=False`):
```python
# ... declare other modules as above
deep_rnn = snt.DeepRNN([lin, tanh, lstm], skip_connections=False)
output, next_state = deep_rnn(input, prev_state)
```
which is equivalent to:
```python
lin_output = lin(input)
tanh_output = tanh(lin_output)
lstm_output, lstm_next_state = lstm(tanh_output, prev_state[0])
next_state = (lstm_next_state,)
output = lstm_output
```
Note: when using skip connections, all the cores should be recurrent.
"""
def __init__(self, cores, skip_connections=True,
concat_final_output_if_skip=True, name="deep_rnn"):
"""Construct a Deep RNN core.
Args:
cores: iterable of modules or ops.
skip_connections: a boolean that indicates whether to use skip
connections. This means that the input is fed to all the layers, after
being concatenated on the last dimension with the output of the previous
layer. The output of the module will be the concatenation of all the
outputs of the internal modules.
concat_final_output_if_skip: A boolean that indicates whether the outputs
of intermediate layers should be concatenated into the timestep-wise
output of the core. By default this is True. If this is set to False,
then the core output is that of the final layer, i.e. that of
`cores[-1]`.
name: name of the module.
Raises:
ValueError: if `cores` is not an iterable, or if `skip_connections` is
True and not all the modules are recurrent.
"""
super(DeepRNN, self).__init__(name=name)
if not isinstance(cores, collections.Iterable):
raise ValueError("Cores should be an iterable object.")
self._cores = tuple(cores)
self._skip_connections = skip_connections
self._concat_final_output_if_skip = concat_final_output_if_skip
self._is_recurrent_list = [isinstance(core, rnn_core.RNNCore)
for core in self._cores]
if self._skip_connections:
tf.logging.log_first_n(
tf.logging.WARN,
"The `skip_connections` argument will be deprecated.",
1
)
if not all(self._is_recurrent_list):
raise ValueError("skip_connections are enabled but not all cores are "
"`snt.RNNCore`s, which is not supported. The following"
" cores were specified: {}.".format(self._cores))
self._check_cores_output_sizes()
self._num_recurrent = sum(self._is_recurrent_list)
self._last_output_size = None
def _check_cores_output_sizes(self):
"""Checks the output_sizes of the cores of the DeepRNN module.
Raises:
ValueError: if the outputs of the cores cannot be concatenated along their
first dimension.
"""
for core_sizes in zip(*tuple(_get_flat_core_sizes(self._cores))):
first_core_list = core_sizes[0][1:]
for i, core_list in enumerate(core_sizes[1:]):
if core_list[1:] != first_core_list:
raise ValueError("The outputs of the provided cores are not able "
"to be concatenated along the first feature "
"dimension. Core 0 has shape %s, whereas Core %d "
"has shape %s - these must only differ in the first "
"dimension" % (core_sizes[0], i + 1, core_list))
def _build(self, inputs, prev_state, **kwargs):
"""Connects the DeepRNN module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as input_ and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may differ for each
connection.
Args:
inputs: a nested tuple of Tensors of arbitrary dimensionality, with at
least an initial batch dimension.
prev_state: a tuple of `prev_state`s that corresponds to the state
of each one of the cores of the `DeepCore`.
**kwargs: optional kwargs to be passed to the `_build` of all sub-modules.
E.g. is_training=True. Note all sub-modules must accept the given kwarg.
Returns:
output: a nested tuple of Tensors of arbitrary dimensionality, with at
least an initial batch dimension.
next_state: a tuple of `next_state`s that corresponds to the updated state
of each one of the cores of the `DeepCore`.
Raises:
ValueError: if connecting the module into the graph any time after the
first time, and the inferred size of the inputs does not match previous
invocations. This may happen if one connects a module any time after the
first time that does not have the configuration of skip connections as
the first time.
"""
current_input = inputs
next_states = []
outputs = []
recurrent_idx = 0
concatenate = lambda *args: tf.concat(args, axis=-1)
for i, core in enumerate(self._cores):
if self._skip_connections and i > 0:
current_input = nest.map_structure(concatenate, inputs, current_input)
# Determine if this core in the stack is recurrent or not and call
# accordingly.
if self._is_recurrent_list[i]:
current_input, next_state = core(current_input,
prev_state[recurrent_idx],
**kwargs)
next_states.append(next_state)
recurrent_idx += 1
else:
current_input = core(current_input, **kwargs)
if self._skip_connections:
outputs.append(current_input)
if self._skip_connections and self._concat_final_output_if_skip:
output = nest.map_structure(concatenate, *outputs)
else:
output = current_input
self._last_output_size = _get_shape_without_batch_dimension(output)
return output, tuple(next_states)
def initial_state(self, batch_size, dtype=tf.float32, trainable=False,
trainable_initializers=None, trainable_regularizers=None,
name=None):
"""Builds the default start state for a DeepRNN.
Args:
batch_size: An int, float or scalar Tensor representing the batch size.
dtype: The data type to use for the state.
trainable: Boolean that indicates whether to learn the initial state.
trainable_initializers: An initializer function or nested structure of
functions with same structure as the `state_size` property of the
core, to be used as initializers of the initial state variable.
trainable_regularizers: Optional regularizer function or nested structure
of functions with the same structure as the `state_size` property of the
core, to be used as regularizers of the initial state variable. A
regularizer should be a function that takes a single `Tensor` as an
input and returns a scalar `Tensor` output, e.g. the L1 and L2
regularizers in `tf.contrib.layers`.
name: Optional string used to prefix the initial state variable names, in
the case of a trainable initial state. If not provided, defaults to
the name of the module.
Returns:
A tensor or nested tuple of tensors with same structure and shape as the
`state_size` property of the core.
Raises:
ValueError: if the number of passed initializers is not the same as the
number of recurrent cores.
"""
initial_state = []
if trainable_initializers is None:
trainable_initializers = [None] * self._num_recurrent
if trainable_regularizers is None:
trainable_regularizers = [None] * self._num_recurrent
num_initializers = len(trainable_initializers)
if num_initializers != self._num_recurrent:
raise ValueError("The number of initializers and recurrent cores should "
"be the same. Received %d initializers for %d specified "
"recurrent cores."
% (num_initializers, self._num_recurrent))
with tf.name_scope(self._initial_state_scope(name)):
recurrent_idx = 0
for is_recurrent, core in zip(self._is_recurrent_list, self._cores):
if is_recurrent:
core_initial_state = core.initial_state(
batch_size, dtype=dtype, trainable=trainable,
trainable_initializers=trainable_initializers[recurrent_idx],
trainable_regularizers=trainable_regularizers[recurrent_idx])
initial_state.append(core_initial_state)
recurrent_idx += 1
return tuple(initial_state)
@property
def state_size(self):
sizes = []
for is_recurrent, core in zip(self._is_recurrent_list, self._cores):
if is_recurrent:
sizes.append(core.state_size)
return tuple(sizes)
@property
def output_size(self):
if self._skip_connections and self._concat_final_output_if_skip:
output_size = []
for core_sizes in zip(*tuple(_get_flat_core_sizes(self._cores))):
added_core_size = core_sizes[0]
added_core_size[-1] = sum([size[-1] for size in core_sizes])
output_size.append(tf.TensorShape(added_core_size))
return nest.pack_sequence_as(structure=self._cores[0].output_size,
flat_sequence=output_size)
else:
# Assumes that an element of cores which does not have the output_size
# property does not affect the output shape. Then the 'last' core in the
# sequence with output_size information should be the output_size of the
# DeepRNN. This heuristic is error prone, but we would lose a lot of
# flexibility if we tried to enforce that the final core must have an
# output_size field (e.g. it would be impossible to add a TF nonlinearity
# as the final "core"), but we should at least print a warning if this
# is the case.
final_core = self._cores[-1]
if hasattr(final_core, "output_size"):
# This is definitely the correct value, so no warning needed.
return final_core.output_size
# If we have connected the module at least once, we can get the output
# size of whatever was actually produced.
if self._last_output_size is not None:
tf.logging.warning(
"Final core does not contain .output_size, but the "
"DeepRNN has been connected into the graph, so inferred output "
"size as %s", self._last_output_size)
return self._last_output_size
# If all else fails, iterate backwards through cores and return the
# first one which has an output_size field. This can be incorrect in
# various ways, so warn loudly.
try:
guessed_output_size = next(core.output_size
for core in reversed(self._cores)
if hasattr(core, "output_size"))
except StopIteration:
raise ValueError("None of the 'cores' have output_size information.")
tf.logging.warning(
"Trying to infer output_size of DeepRNN, but the final core %s does "
"not have the .output_size field. The guessed output_size is %s "
"but this may not be correct. If you see shape errors following this "
"warning, you must change the cores used in the DeepRNN so that "
"the final core used has a correct .output_size property.",
final_core, guessed_output_size)
return guessed_output_size
class ModelRNN(rnn_core.RNNCore):
"""RNNCore that ignores input and uses a model to compute its next state."""
def __init__(self, model, name="model_rnn"):
"""Construct a Basic RNN core.
Args:
model: callable that computes the next state.
name: name of the module.
Raises:
TypeError: if model is not a callable object or if it is an RNNCore.
AttributeError: if model does not have an output_size attribute.
"""
super(ModelRNN, self).__init__(name=name)
if not callable(model):
raise TypeError("Model must be callable.")
if isinstance(model, rnn_core.RNNCore):
raise TypeError("Model should not be an RNNCore.")
try:
self._output_size = model.output_size
except AttributeError:
raise AttributeError("Model should have an output_size attribute.")
self._model = model
def _build(self, inputs, prev_state):
"""Connects the ModelRNN module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as input_ and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may differ for each
connection.
Args:
inputs: Tensor input to the ModelRNN (ignored).
prev_state: Tensor of size `model.output_size`.
Returns:
output: Tensor of size `model.output_size`.
next_state: Tensor of size `model.output_size`.
"""
next_state = self._model(prev_state)
# For ModelRNN, the next state of the RNN is the same as the output
return next_state, next_state
@property
def state_size(self):
return self._output_size
@property
def output_size(self):
return self._output_size
class BidirectionalRNN(base.AbstractModule):
"""Bidirectional RNNCore that processes the sequence forwards and backwards.
Based upon the encoder implementation in: https://arxiv.org/abs/1409.0473
This interface of this module is different than the typical ones found in
the RNNCore family. The primary difference is that it is pre-conditioned on
the full input sequence in order to produce a full sequence of outputs and
states concatenated along the feature dimension among the forward and
backward cores.
"""
def __init__(self, forward_core, backward_core, name="bidir_rnn"):
"""Construct a Bidirectional RNN core.
Args:
forward_core: callable RNNCore module that computes forward states.
backward_core: callable RNNCore module that computes backward states.
name: name of the module.
Raises:
ValueError: if not all the modules are recurrent.
"""
super(BidirectionalRNN, self).__init__(name=name)
self._forward_core = forward_core
self._backward_core = backward_core
def _is_recurrent(core):
has_rnn_core_interface = (hasattr(core, "initial_state") and
hasattr(core, "output_size") and
hasattr(core, "state_size"))
return isinstance(core, rnn_core.RNNCore) or has_rnn_core_interface
if not(_is_recurrent(forward_core) and _is_recurrent(backward_core)):
raise ValueError("Forward and backward cores must both be instances of"
"RNNCore.")
def _build(self, input_sequence, state):
"""Connects the BidirectionalRNN module into the graph.
Args:
input_sequence: tensor (time, batch, [feature_1, ..]). It must be
time_major.
state: tuple of states for the forward and backward cores.
Returns:
A dict with forward/backard states and output sequences:
"outputs":{
"forward": ...,
"backward": ...},
"state": {
"forward": ...,
"backward": ...}
Raises:
ValueError: in case time dimension is not statically known.
"""
input_shape = input_sequence.get_shape()
if input_shape[0] is None:
raise ValueError("Time dimension of input (dim 0) must be statically"
"known.")
seq_length = int(input_shape[0])
forward_state, backward_state = state
# Lists for the forward backward output and state.
output_sequence_f = []
output_sequence_b = []
# Forward pass over the sequence.
with tf.name_scope("forward_rnn"):
core_state = forward_state
for i in six.moves.range(seq_length):
core_output, core_state = self._forward_core(
input_sequence[i, :,], core_state)
output_sequence_f.append((core_output, core_state))
output_sequence_f = nest.map_structure(
lambda *vals: tf.stack(vals), *output_sequence_f)
# Backward pass over the sequence.
with tf.name_scope("backward_rnn"):
core_state = backward_state
for i in six.moves.range(seq_length - 1, -1, -1):
core_output, core_state = self._backward_core(
input_sequence[i, :,], core_state)
output_sequence_b.append((core_output, core_state))
output_sequence_b = nest.map_structure(
lambda *vals: tf.stack(vals), *output_sequence_b)
# Compose the full output and state sequeneces.
return {
"outputs": {
"forward": output_sequence_f[0],
"backward": output_sequence_b[0]
},
"state": {
"forward": output_sequence_f[1],
"backward": output_sequence_b[1]
}
}
def initial_state(self, batch_size, dtype=tf.float32, trainable=False,
trainable_initializers=None, trainable_regularizers=None,
name=None):
"""Builds the default start state for a BidirectionalRNN.
The Bidirectional RNN flattens the states of its forward and backward cores
and concatentates them.
Args:
batch_size: An int, float or scalar Tensor representing the batch size.
dtype: The data type to use for the state.
trainable: Boolean that indicates whether to learn the initial state.
trainable_initializers: An initializer function or nested structure of
functions with same structure as the `state_size` property of the
core, to be used as initializers of the initial state variable.
trainable_regularizers: Optional regularizer function or nested structure
of functions with the same structure as the `state_size` property of the
core, to be used as regularizers of the initial state variable. A
regularizer should be a function that takes a single `Tensor` as an
input and returns a scalar `Tensor` output, e.g. the L1 and L2
regularizers in `tf.contrib.layers`.
name: Optional string used to prefix the initial state variable names, in
the case of a trainable initial state. If not provided, defaults to
the name of the module.
Returns:
Tuple of initial states from forward and backward RNNs.
"""
name = "state" if name is None else name
forward_initial_state = self._forward_core.initial_state(
batch_size, dtype, trainable, trainable_initializers,
trainable_regularizers, name=name+"_forward")
backward_initial_state = self._backward_core.initial_state(
batch_size, dtype, trainable, trainable_initializers,
trainable_regularizers, name=name+"_backward")
return forward_initial_state, backward_initial_state
@property
def state_size(self):
"""Flattened state size of cores."""
return self._forward_core.state_size, self._backward_core.state_size
@property
def output_size(self):
"""Flattened output size of cores."""
return self._forward_core.output_size, self._backward_core.output_size
| sonnet-1 | sonnet/python/modules/basic_rnn.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sonnet exception classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class Error(Exception):
"""Base class for all errors from snt.
This is thrown to indicate a Neural Network specific problem, e.g. wrong
module arity, module is not connected to the graph when it should be,
tried to wire together incompatible modules, etc.
"""
class NotConnectedError(Error):
"""Error raised when operating on a module that has not yet been connected.
Some module properties / methods are valid to access before the module has
been connected into the graph, but some are not. This Error is raised when
the user attempts to do anything not valid before connection.
"""
class ParentNotBuiltError(Error):
"""Error raised when the parent of a module has not been built yet.
For example, when making a transpose of modules that inherit from
`module.Transposable`, the parent has to be connected to the graph before the
child transpose to ensure that shape inference has already occurred.
"""
class IncompatibleShapeError(Error):
"""Error raised when the shape of the input at build time is incompatible."""
class UnderspecifiedError(Error):
"""Error raised when too little information is available.
This does not typically mean the user is trying to do something that doesn't
work (in which case `IncompatibleShapeError` should be used), just that
some more information needs to be provided in order to build the Graph.
"""
class NotSupportedError(Error):
"""Error raised when something that cannot be supported is requested.
For example a Dilated Convolution module cannot be transposed.
"""
class NotInitializedError(Error):
"""Error raised when connecting an uninitialized Sonnet module.
Before they can be connected, all Sonnet modules must call
`AbstractModule.__init__` (e.g. via a `super` call).
"""
class DifferentGraphError(Error):
"""Error raised when trying to connect a Sonnet module to multiple Graphs."""
class ModuleInfoError(Error):
"""Error raised when Sonnet `ModuleInfo` cannot be serialized."""
| sonnet-1 | sonnet/python/modules/base_errors.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for Pondering Recurrent cores in sonnet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import parameterized
import numpy as np
from sonnet.python.modules import basic_rnn
from sonnet.python.modules import gated_rnn
from sonnet.python.modules import pondering_rnn
from sonnet.python.modules import rnn_core
import tensorflow.compat.v1 as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib.eager.python import tfe as contrib_eager
nest = contrib_framework.nest
_VALUES_A = [1., np.array([2, 3.5]), np.array([[-1., -1.], [0., 2.]])]
_VALUES_B = [-0.5, np.array([2.25, 3.]), np.array([[1., -1.], [1., -2.]])]
def _build_nested_tensor(values):
tensor = (tf.constant(values[0], dtype=tf.float32),
(tf.constant(values[1], dtype=tf.float32),
tf.constant(values[2], dtype=tf.float32)))
return tensor
class OutputTupleCore(rnn_core.RNNCore):
"""Dummy core with multiple outputs."""
@property
def output_size(self):
return tf.TensorShape([1]), tf.TensorShape([1])
@property
def state_size(self):
return tf.TensorShape([1])
def _build(self):
pass
class Output2DCore(rnn_core.RNNCore):
"""Dummy core with 2D output."""
@property
def output_size(self):
return tf.TensorShape([1, 1])
@property
def state_size(self):
return tf.TensorShape([1])
def _build(self):
pass
@contrib_eager.run_all_tests_in_graph_and_eager_modes
class ACTCoreTest(tf.test.TestCase, parameterized.TestCase):
def _test_nested(self, tensor, values_expected):
values_out = self.evaluate(tensor)
self.assertLen(values_out, 2)
self.assertLen(values_out[1], 2)
self.assertEqual(values_expected[0], values_out[0])
self.assertTrue(np.all(np.equal(values_expected[1], values_out[1][0])))
self.assertTrue(np.all(np.equal(values_expected[2], values_out[1][1])))
def testNestedAdd(self):
values_c = [a + b for a, b in zip(_VALUES_A, _VALUES_B)]
tf_a = _build_nested_tensor(_VALUES_A)
tf_b = _build_nested_tensor(_VALUES_B)
tf_add = pondering_rnn._nested_add(tf_a, tf_b)
self._test_nested(tf_add, values_c)
def testNestedUnaryMul(self):
mul_constant = 0.5
values_mul = [a * mul_constant for a in _VALUES_A]
tf_a = _build_nested_tensor(_VALUES_A)
tf_mul = pondering_rnn._nested_unary_mul(
tf_a, tf.constant(mul_constant, dtype=tf.float32))
self._test_nested(tf_mul, values_mul)
def testNestedUnaryMul_multiDim(self):
"""Tests _nested_unary_mul broadcasts dimensions correctly."""
nested_a = tf.ones([2, 3, 4])
p = tf.ones([2, 1])
output = pondering_rnn._nested_unary_mul(nested_a, p)
self.assertEqual(output.shape.as_list(), [2, 3, 4])
def testNestedZerosLike(self):
zeros = [0., np.array([0., 0.]), np.array([[0., 0.], [0., 0.]])]
tf_a = _build_nested_tensor(_VALUES_A)
tf_zeros = pondering_rnn._nested_zeros_like(tf_a)
self._test_nested(tf_zeros, zeros)
def _testACT(self, input_size, hidden_size, output_size, seq_len, batch_size,
core, get_state_for_halting, max_steps=0):
threshold = 0.99
act = pondering_rnn.ACTCore(
core, output_size, threshold, get_state_for_halting,
max_steps=max_steps)
seq_input = tf.random_uniform(shape=(seq_len, batch_size, input_size))
initial_state = core.initial_state(batch_size)
seq_output = tf.nn.dynamic_rnn(
act, seq_input, time_major=True, initial_state=initial_state)
for tensor in nest.flatten(seq_output):
self.assertEqual(seq_input.dtype, tensor.dtype)
self.evaluate(tf.global_variables_initializer())
output = self.evaluate(seq_output)
(final_out, (iteration, r_t)), final_cumul_state = output
self.assertEqual((seq_len, batch_size, output_size),
final_out.shape)
self.assertEqual((seq_len, batch_size, 1),
iteration.shape)
self.assertTrue(np.all(iteration == np.floor(iteration)))
state_shape = get_state_for_halting(initial_state).get_shape().as_list()
self.assertEqual(tuple(state_shape),
get_state_for_halting(final_cumul_state).shape)
self.assertEqual((seq_len, batch_size, 1), r_t.shape)
self.assertTrue(np.all(r_t >= 0))
self.assertTrue(np.all(r_t <= threshold))
@parameterized.parameters((13, 11, 7, 3, 5),
(3, 3, 3, 1, 5),
(1, 1, 1, 1, 1))
def testACTLSTM(
self, input_size, hidden_size, output_size, seq_len, batch_size):
"""Tests ACT using an LSTM for the core."""
lstm = gated_rnn.LSTM(hidden_size)
def get_hidden_state(state):
hidden, unused_cell = state
return hidden
self._testACT(input_size, hidden_size, output_size, seq_len, batch_size,
lstm, get_hidden_state)
@parameterized.parameters((13, 11, 7, 3, 5, 0),
(3, 3, 3, 1, 5, 0),
(1, 1, 1, 1, 1, 0),
(1, 1, 1, 1, 1, 10))
def testACTVanilla(
self, input_size, hidden_size, output_size, seq_len, batch_size,
max_steps):
"""Tests ACT using an LSTM for the core."""
vanilla = basic_rnn.VanillaRNN(hidden_size)
def get_state(state):
return state
self._testACT(input_size, hidden_size, output_size, seq_len, batch_size,
vanilla, get_state, max_steps)
def testOutputTuple(self):
core = OutputTupleCore(name="output_tuple_core")
err = "Output of core should be single Tensor."
with self.assertRaisesRegexp(ValueError, err):
pondering_rnn.ACTCore(core, 1, 0.99, lambda state: state)
def testOutput2D(self):
core = Output2DCore(name="output_2d_core")
err = "Output of core should be 1D."
with self.assertRaisesRegexp(ValueError, err):
pondering_rnn.ACTCore(core, 1, 0.99, lambda state: state)
if __name__ == "__main__":
tf.test.main()
| sonnet-1 | sonnet/python/modules/pondering_rnn_test.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Implementation of convolutional Sonnet modules.
Classes defining convolutional operations, inheriting from `snt.Module`, with
easy weight sharing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numbers
# Dependency imports
import numpy as np
import six
from sonnet.python.modules import base
from sonnet.python.modules import util
import tensorflow.compat.v1 as tf
# Strings for TensorFlow convolution padding modes. See the following
# documentation for an explanation of VALID versus SAME:
# https://www.tensorflow.org/api_docs/python/tf/nn/convolution
SAME = "SAME"
VALID = "VALID"
FULL = "FULL"
CAUSAL = "CAUSAL"
REVERSE_CAUSAL = "REVERSE_CAUSAL"
CONV_OP_ALLOWED_PADDINGS = {SAME, VALID}
ALLOWED_PADDINGS = {
SAME, VALID, FULL, CAUSAL, REVERSE_CAUSAL
}
CONSTANT_PADDING = "CONSTANT"
REFLECT_PADDING = "REFLECT"
SYMMETRIC_PADDING = "SYMMETRIC"
ALLOWED_PADDING_VALUES = {CONSTANT_PADDING, REFLECT_PADDING, SYMMETRIC_PADDING}
DATA_FORMAT_NCW = "NCW"
DATA_FORMAT_NWC = "NWC"
SUPPORTED_1D_DATA_FORMATS = {DATA_FORMAT_NCW, DATA_FORMAT_NWC}
DATA_FORMAT_NCHW = "NCHW"
DATA_FORMAT_NHWC = "NHWC"
SUPPORTED_2D_DATA_FORMATS = {DATA_FORMAT_NCHW, DATA_FORMAT_NHWC}
DATA_FORMAT_NDHWC = "NDHWC"
DATA_FORMAT_NCDHW = "NCDHW"
SUPPORTED_3D_DATA_FORMATS = {DATA_FORMAT_NDHWC, DATA_FORMAT_NCDHW}
def _default_transpose_size(input_shape, stride, kernel_shape=None,
padding=SAME):
"""Returns default (maximal) output shape for a transpose convolution.
In general, there are multiple possible output shapes that a transpose
convolution with a given `input_shape` can map to. This function returns the
output shape which evenly divides the stride to produce the input shape in
a forward convolution, i.e. the maximal valid output shape with the given
configuration:
if the padding type is SAME then: output_shape = input_shape * stride
if the padding type is VALID then: output_shape = input_shape * stride +
kernel_shape - 1
See the following documentation for an explanation of VALID versus SAME
padding modes:
https://www.tensorflow.org/versions/r0.8/api_docs/python/nn.html#convolution
Args:
input_shape: Sequence of sizes of each dimension of the input, excluding
batch and channel dimensions.
stride: Sequence or integer of kernel strides, excluding batch and channel
dimension strides.
kernel_shape: Sequence or integer of kernel sizes.
padding: Padding algorithm, either `snt.SAME` or `snt.VALID`.
Returns:
output_shape: A tuple of sizes for a transposed convolution that divide
evenly with the given strides, kernel shapes, and padding algorithm.
"""
if not input_shape:
raise TypeError("input_shape is None; if using Sonnet, are you sure you "
"have connected the module to inputs?")
input_length = len(input_shape)
stride = _fill_and_verify_parameter_shape(stride, input_length, "stride")
padding = _verify_conv_op_supported_padding(padding)
output_shape = tuple(x * y for x, y in zip(input_shape, stride))
if padding == VALID:
kernel_shape = _fill_and_verify_parameter_shape(kernel_shape, input_length,
"kernel")
output_shape = tuple(x + y - 1 for x, y in zip(output_shape, kernel_shape))
return output_shape
def _fill_shape(x, n):
"""Converts a dimension to a tuple of dimensions of a given size.
This is used to allow shorthand notation for various configuration parameters.
A user can provide either, for example, `2` or `[2, 2]` as a kernel shape, and
this function returns `(2, 2)` in both cases. Passing `[1, 2]` will return
`(1, 2)`.
Args:
x: An integer, tf.Dimension, or an iterable of them.
n: An integer, the size of the desired output list
Returns:
If `x` is an integer, a tuple of size `n` containing `n` copies of `x`.
If `x` is an iterable of integers or tf.Dimension of size `n`, it returns
`tuple(x)`.
Raises:
TypeError: If n is not a positive integer;
or if x is neither integer nor an iterable of size n.
"""
if not isinstance(n, numbers.Integral) or n < 1:
raise TypeError("n must be a positive integer")
if (isinstance(x, numbers.Integral) or isinstance(x, tf.Dimension)) and x > 0:
return (x,) * n
try:
if len(x) == n and all(v > 0 for v in x):
return tuple(x)
except TypeError:
pass
raise TypeError("x is {}, must be either a positive integer "
"or an iterable of positive integers of size {}"
.format(x, n))
def _fill_and_verify_parameter_shape(x, n, parameter_label):
"""Expands x if necessary into a `n`-D kernel shape and reports errors."""
try:
return _fill_shape(x, n)
except TypeError as e:
raise base.IncompatibleShapeError("Invalid " + parameter_label + " shape: "
"{}".format(e))
def _verify_conv_op_supported_padding(padding):
"""Verifies that the given padding type is supported for conv ops.
Args:
padding: One of CONV_OP_ALLOWED_PADDINGS.
Returns:
padding.
Raises:
ValueError: If padding is not one of CONV_OP_ALLOWED_PADDINGS.
"""
if padding not in CONV_OP_ALLOWED_PADDINGS:
raise ValueError(
"Padding must be member of '{}', not {}".format(
CONV_OP_ALLOWED_PADDINGS, padding))
return padding
def _verify_padding_value(padding_value):
"""Verifies that the given padding mode is supported.
Args:
padding_value: One of ALLOWED_PADDING_VALUES.
Returns:
padding_value.
Raises:
ValueError: If padding_value is not one of ALLOWED_PADDING_VALUES.
"""
if padding_value not in ALLOWED_PADDING_VALUES:
raise ValueError(
"Padding must be member of '{}', not {}".format(
ALLOWED_PADDING_VALUES, padding_value))
return padding_value
def _fill_and_verify_padding(padding, n):
"""Verifies that the provided padding is supported and expands to size n.
Args:
padding: One of ALLOWED_PADDINGS, or an iterable of them.
n: An integer, the size of the desired output list.
Returns:
If `padding` is one of ALLOWED_PADDINGS, a tuple of size `n` containing `n`
copies of `padding`.
If `padding` is an iterable of ALLOWED_PADDINGS of size `n`, it returns
`padding(x)`.
Raises:
TypeError: If n is not a positive integer; if padding is neither one of
ALLOWED_PADDINGS nor an iterable of ALLOWED_PADDINGS of size n.
"""
if not isinstance(n, numbers.Integral) or n < 1:
raise TypeError("n must be a positive integer")
if isinstance(padding, six.string_types) and padding in ALLOWED_PADDINGS:
return (padding,) * n
try:
if len(padding) == n and all(p in ALLOWED_PADDINGS for p in padding):
return tuple(padding)
except TypeError:
pass
raise TypeError("padding is {}, must be member of '{}' or an iterable of "
"these of size {}".format(padding, ALLOWED_PADDINGS, n))
def _padding_to_conv_op_padding(padding, padding_value):
"""Whether to use SAME or VALID for the underlying convolution op.
Args:
padding: A tuple of members of ALLOWED_PADDINGS, e.g. as returned from
`_fill_and_verify_padding`.
padding_value: A string of ALLOWED_PADDING_VALUES.
Returns:
One of CONV_OP_ALLOWED_PADDINGS, the padding method to use for the
underlying convolution op.
Raises:
ValueError: If padding is not a tuple.
"""
if not isinstance(padding, tuple):
raise ValueError("padding should be a tuple.")
if all(p == SAME for p in padding) and padding_value == CONSTANT_PADDING:
# If we want SAME padding for all dimensions then we can use SAME for the
# conv and avoid doing any extra padding.
return SAME
else:
# Otherwise we prefer to use VALID, since we can implement all the other
# padding types just by adding some extra padding before doing a VALID conv.
# (We could use SAME but then we'd also have to crop outputs in some cases).
return VALID
def _fill_and_one_pad_stride(stride, n, data_format=DATA_FORMAT_NHWC):
"""Expands the provided stride to size n and pads it with 1s."""
if isinstance(stride, numbers.Integral) or (
isinstance(stride, collections.Iterable) and len(stride) <= n):
if data_format.startswith("NC"):
return (1, 1,) + _fill_shape(stride, n)
elif data_format.startswith("N") and data_format.endswith("C"):
return (1,) + _fill_shape(stride, n) + (1,)
else:
raise ValueError(
"Invalid data_format {:s}. Must start with N and have a channel dim "
"either follow the N dim or come at the end".format(data_format))
elif isinstance(stride, collections.Iterable) and len(stride) == n + 2:
return stride
else:
raise base.IncompatibleShapeError(
"stride is {} ({}), must be either a positive integer or an iterable of"
" positive integers of size {}".format(stride, type(stride), n))
def _verify_inputs(inputs, channel_index, data_format):
"""Verifies `inputs` is semantically correct.
Args:
inputs: An input tensor provided by the user.
channel_index: The index of the channel dimension.
data_format: The format of the data in `inputs`.
Raises:
base.IncompatibleShapeError: If the shape of `inputs` doesn't match
`data_format`.
base.UnderspecifiedError: If the channel dimension of `inputs` isn't
defined.
TypeError: If input Tensor dtype is not compatible with either
`tf.float16`, `tf.bfloat16`, `tf.float32` or `tf.float64`.
"""
# Check shape.
input_shape = tuple(inputs.get_shape().as_list())
if len(input_shape) != len(data_format):
raise base.IncompatibleShapeError((
"Input Tensor must have rank {} corresponding to "
"data_format {}, but instead was {} of rank {}.").format(
len(data_format), data_format, input_shape, len(input_shape)))
# Check type.
if not (tf.float16.is_compatible_with(inputs.dtype) or
tf.bfloat16.is_compatible_with(inputs.dtype) or
tf.float32.is_compatible_with(inputs.dtype) or
tf.float64.is_compatible_with(inputs.dtype)):
raise TypeError(
"Input must have dtype tf.float16, tf.bfloat16, tf.float32 or "
"tf.float64, but dtype was {}".format(inputs.dtype))
# Check channel dim.
input_channels = input_shape[channel_index]
if input_channels is None:
raise base.UnderspecifiedError(
"Number of input channels must be known at module build time")
def create_weight_initializer(fan_in_shape, dtype=tf.float32):
"""Returns a default initializer for the weights of a convolutional module."""
stddev = 1 / math.sqrt(np.prod(fan_in_shape))
return tf.truncated_normal_initializer(stddev=stddev, dtype=dtype)
def create_bias_initializer(unused_bias_shape, dtype=tf.float32):
"""Returns a default initializer for the biases of a convolutional module."""
return tf.zeros_initializer(dtype=dtype)
def _find_channel_index(data_format):
"""Returns the index of the channel dimension.
Args:
data_format: A string of characters corresponding to Tensor dimensionality.
Returns:
channel_index: An integer indicating the channel dimension.
Raises:
ValueError: If no channel dimension was found.
"""
for i, c in enumerate(data_format):
if c == "C":
return i
raise ValueError("data_format requires a channel dimension. Got: {}"
.format(data_format))
def _apply_bias(inputs, outputs, channel_index, data_format, output_channels,
initializers, partitioners, regularizers):
"""Initialize and apply a bias to the outputs.
Figures out the shape of the bias vector, initialize it, and applies it.
Args:
inputs: A Tensor of shape `data_format`.
outputs: A Tensor of shape `data_format`.
channel_index: The index of the channel dimension in `inputs`.
data_format: Format of `inputs`.
output_channels: Channel dimensionality for `outputs`.
initializers: Optional dict containing ops to initialize the biases
(with key 'b').
partitioners: Optional dict containing partitioners to partition the
biases (with key 'b').
regularizers: Optional dict containing regularizers for the biases
(with key 'b').
Returns:
b: The constructed bias variable.
outputs: The `outputs` argument that has had a bias applied.
"""
bias_shape = (output_channels,)
if "b" not in initializers:
initializers["b"] = create_bias_initializer(bias_shape,
dtype=inputs.dtype)
b = tf.get_variable("b",
shape=bias_shape,
dtype=inputs.dtype,
initializer=initializers["b"],
partitioner=partitioners.get("b", None),
regularizer=regularizers.get("b", None))
# tf.nn.bias_add only supports 2 data formats.
if data_format in (DATA_FORMAT_NHWC, DATA_FORMAT_NCHW):
# Supported as-is.
outputs = tf.nn.bias_add(outputs, b, data_format=data_format)
else:
# Create our own bias vector.
bias_correct_dim = [1] * len(data_format)
bias_correct_dim[channel_index] = output_channels
outputs += tf.reshape(b, bias_correct_dim)
return b, outputs
class _ConvND(base.AbstractModule):
"""N-dimensional convolution and dilated convolution module, including bias.
This acts as a light wrapper around the TensorFlow ops `tf.nn.convolution`
abstracting away variable creation and sharing.
"""
def __init__(self, output_channels, kernel_shape, stride=1, rate=1,
padding=SAME, use_bias=True, initializers=None,
partitioners=None, regularizers=None,
mask=None, data_format=DATA_FORMAT_NHWC,
padding_value=CONSTANT_PADDING, custom_getter=None,
name="conv_nd"):
"""Constructs a _ConvND module.
Args:
output_channels: Number of output channels. `output_channels` can be
either a number or a callable. In the latter case, since the function
invocation is deferred to graph construction time, the user must only
ensure that output_channels can be called, returning an integer,
when `build` is called.
kernel_shape: Sequence of kernel sizes (up to size N), or an integer.
`kernel_shape` will be expanded to define a kernel size in all
dimensions.
stride: Sequence of strides (up to size N), or an integer.
`stride` will be expanded to define stride in all dimensions.
rate: Sequence of dilation rates (of size N), or integer that is used to
define dilation rate in all dimensions. 1 corresponds to standard ND
convolution, `rate > 1` corresponds to dilated convolution. Cannot be
> 1 if any of `stride` is also > 1.
padding: Padding algorithm. Either `snt.SAME`, `snt.VALID`, `snt.FULL`,
`snt.CAUSAL`, `snt.REVERSE_CAUSAL`, or a sequence of these paddings
(up to size N).
* snt.SAME and snt.VALID are explained in the Tensorflow docs at
https://www.tensorflow.org/api_docs/python/tf/nn/convolution.
* snt.FULL pre- and post-pads with the maximum padding which does not
result in a convolution over just padded elements.
* snt.CAUSAL pre-pads to ensure that each output value only depends on
input values at the same or preceding indices ("no dependence on the
future").
* snt.REVERSE_CAUSAL post-pads to ensure that each output value only
depends on input values at the same or *greater* indices ("no
dependence on the past").
If you use the same padding for all dimensions, and it is one of SAME
or VALID, then this is supported directly by the underlying
convolution op. In all other cases, the input data will be padded
using tf.pad before calling the convolution op.
use_bias: Whether to include bias parameters. Default `True`.
initializers: Optional dict containing ops to initialize the filters (with
key 'w') or biases (with key 'b'). The default initializer for the
weights is a truncated normal initializer, which is commonly used
when the inputs are zero centered (see
https://arxiv.org/pdf/1502.03167v3.pdf). The default initializer for
the bias is a zero initializer.
partitioners: Optional dict containing partitioners to partition
weights (with key 'w') or biases (with key 'b'). As a default, no
partitioners are used.
regularizers: Optional dict containing regularizers for the filters
(with key 'w') and the biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes
a single `Tensor` as an input and returns a scalar `Tensor` output,
e.g. the L1 and L2 regularizers in `tf.contrib.layers`.
mask: A convertible to a ND tensor which is multiplied
component-wise with the weights (Optional).
data_format: The data format of the input.
padding_value: The type of padding to use, either "CONSTANT", "SYMMETRIC"
or "REFLECT", as supported by the underlying tf.pad
(https://www.tensorflow.org/api_docs/python/tf/pad). Can only be set
globally for all dimensions. Defaults to "CONSTANT" which will pad
with zeros, potentially directly via the underlying convolution op if
the padding is SAME or VALID for all dimensions.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the
`tf.get_variable` documentation for information about the
custom_getter API.
name: Name of the module.
Raises:
base.IncompatibleShapeError: If the given kernel shape is not an integer;
or if the given kernel shape is not a sequence of two integers.
base.IncompatibleShapeError: If the given stride is not an integer; or if
the given stride is not a sequence of two integers.
base.IncompatibleShapeError: If the given rate is not an integer; or if
the given rate is not a sequence of two integers.
base.IncompatibleShapeError: If a mask is a TensorFlow Tensor with
a not fully defined shape.
base.NotSupportedError: If rate in any dimension and the stride in any
dimension are simultaneously > 1.
ValueError: If the given padding is not `snt.VALID`, `snt.SAME`,
`snt.FULL`, `snt.CAUSAL`, `snt.REVERSE_CAUSAL` or a sequence of these.
KeyError: If `initializers`, `partitioners` or `regularizers` contain any
keys other than 'w' or 'b'.
TypeError: If any of the given initializers, partitioners or regularizers
are not callable.
TypeError: If mask is given and it is not convertible to a Tensor.
ValueError: If the passed-in data_format doesn't have a channel dimension.
"""
super(_ConvND, self).__init__(custom_getter=custom_getter, name=name)
self._n = len(data_format) - 2
self._input_channels = None
self._output_channels = output_channels
self._kernel_shape = _fill_and_verify_parameter_shape(kernel_shape, self._n,
"kernel")
self._data_format = data_format
# The following is for backwards-compatibility from when we used to accept
# N-strides of the form [1, ..., 1].
if (isinstance(stride, collections.Sequence) and
len(stride) == len(data_format)):
self._stride = tuple(stride)[1:-1]
else:
self._stride = _fill_and_verify_parameter_shape(stride, self._n, "stride")
self._rate = _fill_and_verify_parameter_shape(rate, self._n, "rate")
if any(x > 1 for x in self._stride) and any(x > 1 for x in self._rate):
raise base.NotSupportedError("Cannot have stride > 1 with rate > 1")
self._padding = _fill_and_verify_padding(padding, self._n)
self._padding_value = _verify_padding_value(padding_value)
self._conv_op_padding = _padding_to_conv_op_padding(
self._padding, self._padding_value)
self._use_bias = use_bias
self.possible_keys = self.get_possible_initializer_keys(use_bias=use_bias)
self._initializers = util.check_initializers(
initializers, self.possible_keys)
self._partitioners = util.check_partitioners(
partitioners, self.possible_keys)
self._regularizers = util.check_regularizers(
regularizers, self.possible_keys)
if mask is not None:
if isinstance(mask, (tf.Tensor, list, tuple, np.ndarray)):
self._mask = tf.convert_to_tensor(mask)
if not (tf.float16.is_compatible_with(self._mask.dtype) or
tf.bfloat16.is_compatible_with(self._mask.dtype) or
tf.float32.is_compatible_with(self._mask.dtype) or
tf.float64.is_compatible_with(self._mask.dtype)):
raise TypeError(
"Mask needs to have dtype float16, bfloat16, float32 or float64")
if not self._mask.get_shape().is_fully_defined():
base.IncompatibleShapeError(
"Mask needs to have a statically defined shape")
else:
raise TypeError("Invalid type for mask: {}".format(type(mask)))
else:
self._mask = None
self._channel_index = _find_channel_index(self._data_format)
@classmethod
def get_possible_initializer_keys(cls, use_bias=True):
return {"w", "b"} if use_bias else {"w"}
def _build(self, inputs):
"""Connects the _ConvND module into the graph, with input Tensor `inputs`.
If this is not the first time the module has been connected to the graph,
the input Tensor provided here must have the same number of channels, in
order for the existing variables to be the correct size for the
multiplication; the batch size and input spatial dimensions may differ for
each connection.
Args:
inputs: A ND Tensor of the same rank as `data_format`, and either of types
`tf.float16`, `tf.bfloat16`, `tf.float32` or `tf.float64`.
Returns:
A ND Tensor of shape [batch_size, output_dim_1, output_dim_2, ...,
output_channels].
Raises:
ValueError: If connecting the module into the graph any time after the
first time and the inferred size of the input does not match previous
invocations.
base.IncompatibleShapeError: If the input tensor has the wrong number
of dimensions.
base.UnderspecifiedError: If the channel dimension of `inputs` isn't
defined.
base.IncompatibleShapeError: If a mask is present and its shape is
incompatible with the shape of the weights.
TypeError: If input Tensor dtype is not compatible with either
`tf.float16`, `tf.bfloat16`, `tf.float32` or `tf.float64`.
"""
_verify_inputs(inputs, self._channel_index, self._data_format)
self._input_shape = tuple(inputs.get_shape().as_list())
self._input_channels = self._input_shape[self._channel_index]
self._w = self._construct_w(inputs)
if self._mask is not None:
w = self._apply_mask()
else:
w = self._w
inputs = self._pad_input(inputs)
outputs = self._apply_conv(inputs, w)
if self._use_bias:
self._b, outputs = _apply_bias(
inputs, outputs, self._channel_index, self._data_format,
self.output_channels, self._initializers, self._partitioners,
self._regularizers)
return outputs
def _pad_input(self, inputs):
"""Pad input in case the desired padding type requires it.
VALID and SAME padding types are directly supported by tensorflow
convolution ops, so don't require us to pad input ourselves, at least
in cases where the same method is used for all dimensions.
Other padding types (FULL, CAUSAL, REVERSE_CAUSAL) aren't directly supported
by conv ops but can be implemented by using VALID and padding the input
appropriately ourselves.
If different padding types are used for different dimensions, we use VALID
but pad the input ourselves along any dimensions that require other padding
types.
Args:
inputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16`, `tf.float32` or `tf.float64`.
Returns:
inputs: The `inputs` argument that has had any required padding added.
"""
if all(p == self._conv_op_padding for p in self._padding):
# All axes require the same padding type that we're going to use for the
# underlying convolution op and we use the padding mode that is used by
# the convolution op, so nothing needs to be done:
return inputs
# In all other cases we use VALID as the underlying padding type, and for
# the axes which require something other than VALID, we pad inputs ourselves
# before the convolution.
assert self._conv_op_padding == VALID
def pad_amount(kernel_size, rate, padding):
"""Pre- and post-padding required for a particular axis before conv op."""
# The effective kernel size includes any holes/gaps introduced by the
# dilation rate. It's equal to kernel_size when rate == 1.
effective_kernel_size = int((kernel_size - 1) * rate + 1)
if padding == FULL:
return [effective_kernel_size - 1, effective_kernel_size - 1]
if padding == CAUSAL:
return [effective_kernel_size - 1, 0]
if padding == REVERSE_CAUSAL:
return [0, effective_kernel_size - 1]
if padding == SAME:
return [(effective_kernel_size - 1) // 2, effective_kernel_size // 2]
# padding == VALID
return [0, 0]
paddings = map(pad_amount, self._kernel_shape, self._rate, self._padding)
if self._data_format.startswith("NC"): # N, C, ...
paddings = [[0, 0], [0, 0]] + list(paddings)
else: # N, ..., C
paddings = [[0, 0]] + list(paddings) + [[0, 0]]
return tf.pad(inputs, paddings, mode=self._padding_value)
def _apply_conv(self, inputs, w):
"""Apply a convolution operation on `inputs` using variable `w`.
Args:
inputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16`, `tf.float32` or `tf.float64`.
w: A weight matrix of the same type as `inputs`.
Returns:
outputs: The result of the convolution operation on `inputs`.
"""
outputs = tf.nn.convolution(inputs, w, strides=self._stride,
padding=self._conv_op_padding,
dilation_rate=self._rate,
data_format=self._data_format)
return outputs
def _construct_w(self, inputs):
"""Construct the convolution weight matrix.
Figures out the shape of the weight matrix, initialize it, and return it.
Args:
inputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16`, `tf.float32` or `tf.float64`.
Returns:
w: A weight matrix of the same type as `inputs`.
"""
weight_shape = self._kernel_shape + (self._input_channels,
self.output_channels)
if "w" not in self._initializers:
self._initializers["w"] = create_weight_initializer(weight_shape[:-1],
dtype=inputs.dtype)
w = tf.get_variable("w",
shape=weight_shape,
dtype=inputs.dtype,
initializer=self._initializers["w"],
partitioner=self._partitioners.get("w", None),
regularizer=self._regularizers.get("w", None))
return w
def _apply_mask(self):
"""Applies the passed-in mask to the convolution matrix.
Returns:
w: A copy of the convolution matrix that has had the mask applied.
Raises:
base.IncompatibleShapeError: If the mask shape has more dimensions than
the weight matrix.
base.IncompatibleShapeError: If the mask and the weight matrix don't
match on shape.
"""
w = self._w
w_shape = w.get_shape()
mask_shape = self._mask.get_shape()
if mask_shape.ndims > w_shape.ndims:
raise base.IncompatibleShapeError(
"Invalid mask shape: {}. Max shape: {}".format(
mask_shape.ndims, len(self._data_format)
)
)
if mask_shape != w_shape[:mask_shape.ndims]:
raise base.IncompatibleShapeError(
"Invalid mask shape: {}. Weight shape: {}".format(
mask_shape, w_shape
)
)
# TF broadcasting is a bit fragile.
# Expand the shape of self._mask by one dim at a time to the right
# until the rank matches `weight_shape`.
while self._mask.get_shape().ndims < w_shape.ndims:
self._mask = tf.expand_dims(self._mask, -1)
# tf.Variable & tf.ResourceVariable don't support *=.
w = w * self._mask # pylint: disable=g-no-augmented-assignment
return w
@property
def output_channels(self):
"""Returns the number of output channels."""
if callable(self._output_channels):
self._output_channels = self._output_channels()
# Channel must be integer.
self._output_channels = int(self._output_channels)
return self._output_channels
@property
def kernel_shape(self):
"""Returns the kernel shape."""
return self._kernel_shape
@property
def stride(self):
"""Returns the stride."""
# Backwards compatibility with old stride format.
return _fill_and_one_pad_stride(self._stride, self._n, self._data_format)
@property
def rate(self):
"""Returns the dilation rate."""
return self._rate
@property
def padding(self):
"""Returns the padding algorithm used, if this is the same for all dims.
Use `.paddings` if you want a tuple with the padding algorithm used for each
dimension.
Returns:
The padding algorithm used, if this is the same for all dimensions.
Raises:
ValueError: If different padding algorithms are used for different
dimensions.
"""
# This is for backwards compatibility -- previously only a single
# padding setting was supported across all dimensions.
if all(p == self._padding[0] for p in self._padding):
return self._padding[0]
else:
raise ValueError("This layer uses different paddings for different "
"dimensions. Use .paddings if you want a tuple of "
"per-dimension padding settings.")
@property
def paddings(self):
"""Returns a tuple with the padding algorithm used for each dimension."""
return self._padding
@property
def conv_op_padding(self):
"""Returns the padding algorithm used for the underlying convolution op."""
return self._conv_op_padding
@property
def w(self):
"""Returns the Variable containing the weight matrix."""
self._ensure_is_connected()
return self._w
@property
def b(self):
"""Returns the Variable containing the bias.
Returns:
Variable object containing the bias, from the most recent __call__.
Raises:
base.NotConnectedError: If the module has not been connected to the graph
yet, meaning the variables do not exist.
AttributeError: If the module does not use bias.
"""
self._ensure_is_connected()
if not self._use_bias:
raise AttributeError(
"No bias Variable in Conv2D Module when `use_bias=False`.")
return self._b
@property
def has_bias(self):
"""Returns `True` if bias Variable is present in the module."""
return self._use_bias
@property
def initializers(self):
"""Returns the initializers dictionary."""
return self._initializers
@property
def partitioners(self):
"""Returns the partitioners dictionary."""
return self._partitioners
@property
def regularizers(self):
"""Returns the regularizers dictionary."""
return self._regularizers
@property
def mask(self):
"""Returns the mask."""
return self._mask
@property
def data_format(self):
"""Returns the data format."""
return self._data_format
# Implements Transposable interface.
@property
def input_shape(self):
"""Returns the input shape."""
self._ensure_is_connected()
return self._input_shape
@property
def input_channels(self):
"""Returns the number of input channels."""
if self._input_channels is None:
self._ensure_is_connected()
return self._input_channels
def clone(self, name=None):
"""Returns a cloned `_ConvND` module.
Args:
name: Optional string assigning name of cloned module. The default name
is constructed by appending "_clone" to `self.module_name`.
Returns:
A copy of the current class.
"""
if name is None:
name = self.module_name + "_clone"
return type(self)(output_channels=self.output_channels,
kernel_shape=self._kernel_shape,
stride=self._stride,
rate=self._rate,
padding=self._padding,
use_bias=self._use_bias,
initializers=self._initializers,
partitioners=self._partitioners,
regularizers=self._regularizers,
mask=self._mask,
data_format=self._data_format,
custom_getter=self._custom_getter,
name=name)
class _ConvNDTranspose(base.AbstractModule):
"""Spatial transposed / reverse / up ND convolution module, including bias.
This acts as a light wrapper around the TensorFlow `conv_nd_transpose` ops,
abstracting away variable creation and sharing.
"""
def __init__(self, output_channels, output_shape=None, kernel_shape=None,
stride=1, padding=SAME, use_bias=True, initializers=None,
partitioners=None, regularizers=None,
data_format=DATA_FORMAT_NHWC, custom_getter=None,
name="conv_nd_transpose"):
"""Constructs a `ConvNDTranspose module`. Support for N = (1, 2, 3).
See the following documentation for an explanation of VALID versus SAME
padding modes:
https://www.tensorflow.org/api_docs/python/tf/nn/convolution
Args:
output_channels: Number of output channels.
Can be either a number or a callable. In the latter case, since the
function invocation is deferred to graph construction time, the user
must only ensure `output_channels` can be called, returning an
integer, when build is called.
output_shape: Output shape of transpose convolution.
Can be either an iterable of integers or `Dimension`s, a
`TensorShape`, or a callable. In the latter case, since the function
invocation is deferred to graph construction time, the user must only
ensure that `output_shape` can be called, returning an iterable of
output shapes when `build` is called. Note that `output_shape` defines
the size of output signal domain, as opposed to the shape of the
output `Tensor`. If a None value is given, a default shape is
automatically calculated (see docstring of
`_default_transpose_size` function for more details).
kernel_shape: Sequence of kernel sizes (of size N), or integer that is
used to define kernel size in all dimensions.
stride: Sequence of kernel strides (of size N), or integer that is used
to define stride in all dimensions.
padding: Padding algorithm, either `snt.SAME` or `snt.VALID`.
use_bias: Whether to include bias parameters. Default `True`.
initializers: Optional dict containing ops to initialize the filters (with
key 'w') or biases (with key 'b').
partitioners: Optional dict containing partitioners to partition
weights (with key 'w') or biases (with key 'b'). As a default, no
partitioners are used.
regularizers: Optional dict containing regularizers for the filters
(with key 'w') and the biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes
a single `Tensor` as an input and returns a scalar `Tensor` output,
e.g. the L1 and L2 regularizers in `tf.contrib.layers`.
data_format: The data format of the input.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the
`tf.get_variable` documentation for information about the
custom_getter API.
name: Name of the module.
Raises:
base.IncompatibleShapeError: If the given kernel shape is neither an
integer nor a sequence of two integers.
base.IncompatibleShapeError: If the given stride is neither an integer nor
a sequence of two or four integers.
ValueError: If the given padding is not `snt.VALID` or `snt.SAME`.
ValueError: If the given kernel_shape is `None`.
KeyError: If `initializers`, `partitioners` or `regularizers` contain any
keys other than 'w' or 'b'.
TypeError: If any of the given initializers, partitioners or regularizers
are not callable.
ValueError: If the passed-in data_format doesn't have a channel dimension.
"""
super(_ConvNDTranspose, self).__init__(custom_getter=custom_getter,
name=name)
self._data_format = data_format
self._n = len(self._data_format) - 2
if self._n > 3:
raise base.NotSupportedError(
"We only support (1, 2, 3) convolution transpose operations. "
"Received data format of: {}".format(self._data_format))
self._output_channels = output_channels
if output_shape is None:
self._output_shape = None
self._use_default_output_shape = True
else:
self._use_default_output_shape = False
if callable(output_shape):
self._output_shape = output_shape
else:
self._output_shape = _fill_and_verify_parameter_shape(output_shape,
self._n,
"output_shape")
if kernel_shape is None:
raise ValueError("`kernel_shape` cannot be None.")
self._kernel_shape = _fill_and_verify_parameter_shape(kernel_shape, self._n,
"kernel")
if (isinstance(stride, collections.Sequence) and
len(stride) == len(data_format)):
if self._data_format.startswith("N") and self._data_format.endswith("C"):
if not stride[0] == stride[-1] == 1:
raise base.IncompatibleShapeError(
"Invalid stride: First and last element must be 1.")
elif self._data_format.startswith("NC"):
if not stride[0] == stride[1] == 1:
raise base.IncompatibleShapeError(
"Invalid stride: First and second element must be 1.")
self._stride = tuple(stride)
else:
self._stride = _fill_and_one_pad_stride(stride, self._n,
self._data_format)
self._padding = _verify_conv_op_supported_padding(padding)
self._use_bias = use_bias
self.possible_keys = self.get_possible_initializer_keys(use_bias=use_bias)
self._initializers = util.check_initializers(
initializers, self.possible_keys)
self._partitioners = util.check_partitioners(
partitioners, self.possible_keys)
self._regularizers = util.check_regularizers(
regularizers, self.possible_keys)
self._channel_index = _find_channel_index(self._data_format)
@classmethod
def get_possible_initializer_keys(cls, use_bias=True):
return {"w", "b"} if use_bias else {"w"}
def _build(self, inputs):
"""Connects the _ConvNDTranspose module into the graph.
If this is not the first time the module has been connected to the graph,
the input Tensor provided here must have the same final N dimensions, in
order for the existing variables to be the correct size for the
multiplication. The batch size may differ for each connection.
Args:
inputs: A Tensor of shape `data_format` and of type
`tf.float16`, `tf.bfloat16`, `tf.float32` or `tf.float64`.
Returns:
A Tensor of shape `data_format` and of type `tf.float16`, `tf.bfloat16`,
`tf.float32` or `tf.float64`.
Raises:
ValueError: If connecting the module into the graph any time after the
first time and the inferred size of the input does not match previous
invocations.
base.IncompatibleShapeError: If the input tensor has the wrong number
of dimensions.
base.UnderspecifiedError: If the channel dimension of `inputs` isn't
defined.
base.IncompatibleShapeError: If `output_shape` is an iterable and is not
in the format `(out_height, out_width)`.
TypeError: If input Tensor dtype is not compatible with either
`tf.float16`, `tf.bfloat16`, `tf.float32` or `tf.float64`.
"""
_verify_inputs(inputs, self._channel_index, self._data_format)
self._input_shape = tuple(inputs.get_shape().as_list())
self._input_channels = self._input_shape[self._channel_index]
# First, figure out what the non-(N,C) dims will be.
if self._use_default_output_shape:
def _default_transpose_size_wrapper():
if self._data_format.startswith("NC"):
input_size = self._input_shape[2:]
stride = self.stride[2:]
else: # self._data_format == N*WC
input_size = self._input_shape[1:-1]
stride = self.stride[1:-1]
return _default_transpose_size(input_size,
stride,
kernel_shape=self._kernel_shape,
padding=self._padding)
self._output_shape = _default_transpose_size_wrapper
if len(self.output_shape) != self._n:
raise base.IncompatibleShapeError(
"Output shape must have rank {}, but instead was {}".format(
self._n, len(self.output_shape)))
# Now, construct the size of the output, including the N + C dims.
output_shape = self._infer_all_output_dims(inputs)
self._w = self._construct_w(inputs)
if self._n == 1:
# Add a dimension for the height.
if self._data_format == DATA_FORMAT_NWC:
h_dim = 1
two_dim_conv_data_format = DATA_FORMAT_NHWC
else: # self._data_format == DATA_FORMAT_NCW
h_dim = 2
two_dim_conv_data_format = DATA_FORMAT_NCHW
inputs = tf.expand_dims(inputs, h_dim)
two_dim_conv_stride = self.stride[:h_dim] + (1,) + self.stride[h_dim:]
outputs = tf.nn.conv2d_transpose(inputs,
self._w,
output_shape,
strides=two_dim_conv_stride,
padding=self._padding,
data_format=two_dim_conv_data_format)
# Remove the height dimension to return a 3D tensor.
outputs = tf.squeeze(outputs, [h_dim])
elif self._n == 2:
outputs = tf.nn.conv2d_transpose(inputs,
self._w,
output_shape,
strides=self._stride,
padding=self._padding,
data_format=self._data_format)
else:
outputs = tf.nn.conv3d_transpose(inputs,
self._w,
output_shape,
strides=self._stride,
padding=self._padding,
data_format=self._data_format)
if self._use_bias:
self._b, outputs = _apply_bias(
inputs, outputs, self._channel_index, self._data_format,
self._output_channels, self._initializers, self._partitioners,
self._regularizers)
outputs = self._recover_shape_information(inputs, outputs)
return outputs
def _construct_w(self, inputs):
"""Construct the convolution weight matrix.
Figures out the shape of the weight matrix, initialize it, and return it.
Args:
inputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16`, `tf.float32` or `tf.float64`.
Returns:
w: A weight matrix of the same type as `inputs`.
"""
# Height dim needs to be added to everything for 1D Conv
# as we'll be using the 2D Conv Transpose op.
if self._n == 1:
weight_shape = (1,) + self._kernel_shape + (self.output_channels,
self._input_channels)
else:
weight_shape = self._kernel_shape + (self.output_channels,
self._input_channels)
if "w" not in self._initializers:
fan_in_shape = self._kernel_shape + (self._input_channels,)
self._initializers["w"] = create_weight_initializer(fan_in_shape,
dtype=inputs.dtype)
w = tf.get_variable("w",
shape=weight_shape,
dtype=inputs.dtype,
initializer=self._initializers["w"],
partitioner=self._partitioners.get("w", None),
regularizer=self._regularizers.get("w", None))
return w
def _infer_all_output_dims(self, inputs):
"""Calculate the output shape for `inputs` after a deconvolution.
Args:
inputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16`, `tf.float32` or `tf.float64`.
Returns:
output_shape: A tensor of shape (`batch_size`, `conv_output_shape`).
"""
# Use tensorflow shape op to manipulate inputs shape, so that unknown batch
# size - which can happen when using input placeholders - is handled
# correcly.
batch_size = tf.expand_dims(tf.shape(inputs)[0], 0)
out_channels = (self.output_channels,)
# Height dim needs to be added to everything for 1D Conv
# as we'll be using the 2D Conv Transpose op.
if self._n == 1:
out_shape = (1,) + self.output_shape
else:
out_shape = self.output_shape
if self._data_format.startswith("NC"):
out_shape_tuple = out_channels + out_shape
elif self._data_format.startswith("N") and self._data_format.endswith("C"):
out_shape_tuple = out_shape + out_channels
else:
raise ValueError("Unsupported data format: {}".format(self._data_format))
output_shape = tf.concat([batch_size, out_shape_tuple], 0)
return output_shape
def _recover_shape_information(self, inputs, outputs):
"""Recover output tensor shape value to enable shape inference.
The batch size of `inputs` isn't preserved by the convolution op. Calculate
what the proper output shape will be for `outputs`.
Args:
inputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16`, `tf.float32` or `tf.float64`.
outputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16`, `tf.float32` or `tf.float64`. The output of `inputs`
from a transpose convolution op.
Returns:
outputs: The passed-in `outputs` with all shape information filled in.
"""
batch_size_value = inputs.get_shape()[0]
if self._data_format.startswith("NC"):
output_shape_value = ((batch_size_value, self.output_channels) +
self.output_shape)
elif self._data_format.startswith("N") and self._data_format.endswith("C"):
output_shape_value = ((batch_size_value,) + self.output_shape +
(self.output_channels,))
else:
raise ValueError("Unsupported data format: {}".format(self._data_format))
outputs.set_shape(output_shape_value)
return outputs
@property
def output_channels(self):
"""Returns the number of output channels."""
if callable(self._output_channels):
self._output_channels = self._output_channels()
# Channel must be integer.
self._output_channels = int(self._output_channels)
return self._output_channels
@property
def kernel_shape(self):
"""Returns the kernel shape."""
return self._kernel_shape
@property
def stride(self):
"""Returns the stride."""
return self._stride
@property
def output_shape(self):
"""Returns the output shape."""
if self._output_shape is None:
self._ensure_is_connected()
if callable(self._output_shape):
self._output_shape = tuple(self._output_shape())
return self._output_shape
@property
def padding(self):
"""Returns the padding algorithm."""
return self._padding
@property
def conv_op_padding(self):
"""Returns the padding algorithm used for the underlying convolution op."""
return self._padding
@property
def w(self):
"""Returns the Variable containing the weight matrix."""
self._ensure_is_connected()
return self._w
@property
def b(self):
"""Returns the Variable containing the bias.
Returns:
Variable object containing the bias, from the most recent __call__.
Raises:
base.NotConnectedError: If the module has not been connected to the graph
yet, meaning the variables do not exist.
AttributeError: If the module does not use bias.
"""
self._ensure_is_connected()
if not self._use_bias:
raise AttributeError(
"No bias Variable in Conv2DTranspose Module when `use_bias=False`.")
return self._b
@property
def has_bias(self):
"""Returns `True` if bias Variable is present in the module."""
return self._use_bias
@property
def initializers(self):
"""Returns the initializers dictionary."""
return self._initializers
@property
def partitioners(self):
"""Returns the partitioners dictionary."""
return self._partitioners
@property
def regularizers(self):
"""Returns the regularizers dictionary."""
return self._regularizers
@property
def input_shape(self):
"""Returns the input shape."""
self._ensure_is_connected()
return self._input_shape
@property
def input_channels(self):
"""Returns the number of input channels."""
self._ensure_is_connected()
return self._input_channels
class Conv1D(_ConvND, base.Transposable):
"""1D convolution module, including optional bias.
This acts as a light wrapper around the class `_ConvND`.
"""
def __init__(self, output_channels, kernel_shape, stride=1, rate=1,
padding=SAME, use_bias=True, initializers=None,
partitioners=None, regularizers=None, mask=None,
data_format=DATA_FORMAT_NWC, padding_value=CONSTANT_PADDING,
custom_getter=None, name="conv_1d"):
"""Constructs a Conv1D module.
See the following documentation for an explanation of VALID versus SAME
padding modes:
https://www.tensorflow.org/api_docs/python/tf/nn/convolution
Args:
output_channels: Number of output channels. `output_channels` can be
either a number or a callable. In the latter case, since the function
invocation is deferred to graph construction time, the user must only
ensure that output_channels can be called, returning an integer,
when `build` is called.
kernel_shape: Sequence of kernel sizes (of size 1), or integer that is
used to define kernel size in all dimensions.
stride: Sequence of kernel strides (of size 1), or integer that is used to
define stride in all dimensions.
rate: Sequence of dilation rates (of size 1), or integer that is used to
define dilation rate in all dimensions. 1 corresponds to standard
convolution, `rate > 1` corresponds to dilated convolution. Cannot be
> 1 if any of `stride` is also > 1.
padding: Padding algorithm. Either `snt.SAME`, `snt.VALID`, `snt.FULL`,
`snt.CAUSAL`, `snt.REVERSE_CAUSAL`, or a sequence of these paddings
of length 1.
* snt.SAME and snt.VALID are explained in the Tensorflow docs at
https://www.tensorflow.org/api_docs/python/tf/nn/convolution.
* snt.FULL pre- and post-pads with the maximum padding which does not
result in a convolution over just padded elements.
* snt.CAUSAL pre-pads to ensure that each output value only depends on
input values at the same or preceding indices ("no dependence on the
future").
* snt.REVERSE_CAUSAL post-pads to ensure that each output value only
depends on input values at the same or *greater* indices ("no
dependence on the past").
If you use the same padding for all dimensions, and it is one of SAME
or VALID, then this is supported directly by the underlying
convolution op. In all other cases, the input data will be padded
using tf.pad before calling the convolution op.
use_bias: Whether to include bias parameters. Default `True`.
initializers: Optional dict containing ops to initialize the filters (with
key 'w') or biases (with key 'b'). The default initializer for the
weights is a truncated normal initializer, which is commonly used
when the inputs are zero centered (see
https://arxiv.org/pdf/1502.03167v3.pdf). The default initializer for
the bias is a zero initializer.
partitioners: Optional dict containing partitioners to partition
weights (with key 'w') or biases (with key 'b'). As a default, no
partitioners are used.
regularizers: Optional dict containing regularizers for the filters
(with key 'w') and the biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes
a single `Tensor` as an input and returns a scalar `Tensor` output,
e.g. the L1 and L2 regularizers in `tf.contrib.layers`.
mask: A convertible to a 3D tensor which is multiplied
component-wise with the weights (Optional).
data_format: A string. Specifies whether the channel dimension
of the input and output is the last dimension (default, NWC), or the
second dimension (NCW).
padding_value: The type of padding to use, either "CONSTANT", "SYMMETRIC"
or "REFLECT", as supported by the underlying tf.pad
(https://www.tensorflow.org/api_docs/python/tf/pad). Defaults to
"CONSTANT" which will pad with zeros, potentially directly via the
underlying convolution op if the padding is SAME.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the
`tf.get_variable` documentation for information about the
custom_getter API.
name: Name of the module.
Raises:
base.IncompatibleShapeError: If the given kernel shape is not an integer;
or if the given kernel shape is not a sequence of two integers.
base.IncompatibleShapeError: If the given stride is not an integer; or if
the given stride is not a sequence of two integers.
base.IncompatibleShapeError: If the given rate is not an integer; or if
the given rate is not a sequence of two integers.
base.IncompatibleShapeError: If a mask is a TensorFlow Tensor with
a not fully defined shape.
base.NotSupportedError: If rate in any dimension and the stride in any
dimension are simultaneously > 1.
ValueError: If the given padding is not `snt.VALID` or `snt.SAME`.
KeyError: If `initializers`, `partitioners` or `regularizers` contain any
keys other than 'w' or 'b'.
TypeError: If any of the given initializers, partitioners or regularizers
are not callable.
TypeError: If mask is given and it is not convertible to a Tensor.
ValueError: If the passed-in data_format doesn't have a channel dimension.
ValueError: If the given data_format is not a supported format (see
`SUPPORTED_1D_DATA_FORMATS`).
"""
if data_format not in SUPPORTED_1D_DATA_FORMATS:
raise ValueError("Invalid data_format {:s}. Allowed formats "
"{}".format(data_format, SUPPORTED_1D_DATA_FORMATS))
super(Conv1D, self).__init__(
output_channels=output_channels, kernel_shape=kernel_shape,
stride=stride, rate=rate, padding=padding, padding_value=padding_value,
use_bias=use_bias, initializers=initializers, partitioners=partitioners,
regularizers=regularizers, mask=mask, data_format=data_format,
custom_getter=custom_getter, name=name)
# Implement Transposable interface
def transpose(self, name=None):
"""Returns matching `Conv1DTranspose` module.
Args:
name: Optional string assigning name of transpose module. The default name
is constructed by appending "_transpose" to `self.name`.
Returns:
`Conv1DTranspose` module.
Raises:
base.NotSupportedError: If `rate` in any dimension > 1.
"""
if any(x > 1 for x in self._rate):
raise base.NotSupportedError(
"Cannot transpose a dilated convolution module.")
if any(p != self._conv_op_padding for p in self._padding):
raise base.NotSupportedError(
"Cannot tranpose a convolution using mixed paddings or paddings "
"other than SAME or VALID.")
def output_shape():
if self._data_format == DATA_FORMAT_NCW:
return (self._input_shape[2],)
else: # data_format = DATA_FORMAT_NWC
return (self._input_shape[1],)
if name is None:
name = self.module_name + "_transpose"
return Conv1DTranspose(output_channels=lambda: self._input_channels,
output_shape=output_shape,
kernel_shape=self._kernel_shape,
stride=self._stride,
padding=self._conv_op_padding,
use_bias=self._use_bias,
initializers=self._initializers,
partitioners=self._partitioners,
regularizers=self._regularizers,
data_format=self._data_format,
custom_getter=self._custom_getter,
name=name)
class Conv1DTranspose(_ConvNDTranspose, base.Transposable):
"""1D transposed / reverse / up 1D convolution module, including bias.
This performs a 1D transpose convolution by lightly wrapping the TensorFlow op
`tf.nn.conv2d_transpose`, setting the size of the height dimension of the
image to 1.
"""
def __init__(self, output_channels, output_shape=None, kernel_shape=None,
stride=1, padding=SAME, use_bias=True, initializers=None,
partitioners=None, regularizers=None,
data_format=DATA_FORMAT_NWC, custom_getter=None,
name="conv_1d_transpose"):
"""Constructs a Conv1DTranspose module.
See the following documentation for an explanation of VALID versus SAME
padding modes:
https://www.tensorflow.org/api_docs/python/tf/nn/convolution
Args:
output_channels: Number of output channels. Can be either a number or a
callable. In the latter case, since the function invocation is
deferred to graph construction time, the user must only ensure
`output_channels` can be called, returning an integer, when build is
called.
output_shape: Output shape of transpose convolution. Can be either a
number or a callable. In the latter case, since the function
invocation is deferred to graph construction time, the user must only
ensure that `output_shape` can be called, returning an iterable of
format `(out_length)` when build is called. If a None
value is given, a default shape is automatically calculated (see
docstring of _default_transpose_size function for more details).
kernel_shape: Sequence of kernel sizes (of size 1), or integer that is
used to define kernel size in all dimensions.
stride: Sequence of kernel strides (of size 1), or integer that is used to
define stride in all dimensions.
padding: Padding algorithm, either `snt.SAME` or `snt.VALID`.
use_bias: Whether to include bias parameters. Default `True`.
initializers: Optional dict containing ops to initialize the filters (with
key 'w') or biases (with key 'b').
partitioners: Optional dict containing partitioners to partition
weights (with key 'w') or biases (with key 'b'). As a default, no
partitioners are used.
regularizers: Optional dict containing regularizers for the filters
(with key 'w') and the biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes
a single `Tensor` as an input and returns a scalar `Tensor` output,
e.g. the L1 and L2 regularizers in `tf.contrib.layers`.
data_format: A string. Specifies whether the channel dimension
of the input and output is the last dimension (default, NWC), or the
second dimension (NCW).
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the
`tf.get_variable` documentation for information about the
custom_getter API.
name: Name of the module.
Raises:
base.IncompatibleShapeError: If the given kernel shape is not an integer;
or if the given kernel shape is not a sequence of two integers.
base.IncompatibleShapeError: If the given stride is not an integer; or if
the given stride is not a sequence of two or four integers.
ValueError: If the given padding is not `snt.VALID` or `snt.SAME`.
ValueError: If the given kernel_shape is `None`.
KeyError: If `initializers`, `partitioners` or `regularizers` contain any
keys other than 'w' or 'b'.
TypeError: If any of the given initializers, partitioners or regularizers
are not callable.
ValueError: If the passed-in data_format doesn't have a channel dimension.
ValueError: If the given data_format is not a supported format (see
`SUPPORTED_1D_DATA_FORMATS`).
"""
if data_format not in SUPPORTED_1D_DATA_FORMATS:
raise ValueError("Invalid data_format {:s}. Allowed formats "
"{}".format(data_format, SUPPORTED_1D_DATA_FORMATS))
super(Conv1DTranspose, self).__init__(
output_channels=output_channels, output_shape=output_shape,
kernel_shape=kernel_shape, stride=stride, padding=padding,
use_bias=use_bias, initializers=initializers,
partitioners=partitioners, regularizers=regularizers,
data_format=data_format, custom_getter=custom_getter, name=name
)
# Implement Transposable interface.
def transpose(self, name=None):
"""Returns matching `Conv1D` module.
Args:
name: Optional string assigning name of transpose module. The default name
is constructed by appending "_transpose" to `self.name`.
Returns:
`Conv1D` module.
"""
if name is None:
name = self.module_name + "_transpose"
if self._data_format == DATA_FORMAT_NWC:
stride = self._stride[1:-1]
else: # self._data_format == DATA_FORMAT_NCW
stride = self._stride[2:]
return Conv1D(output_channels=lambda: self.input_channels,
kernel_shape=self.kernel_shape,
stride=stride,
padding=self.padding,
use_bias=self._use_bias,
initializers=self.initializers,
partitioners=self.partitioners,
regularizers=self.regularizers,
data_format=self._data_format,
custom_getter=self._custom_getter,
name=name)
class CausalConv1D(_ConvND):
"""1D convolution module, including optional bias.
This is deprecated, please use the padding=CAUSAL argument to Conv1D.
This acts as a light wrapper around _ConvND ensuring that the outputs at index
`i` only depend on indices smaller than `i` (also known as a causal
convolution). For further details on the theoretical background, refer to:
https://arxiv.org/abs/1610.10099
"""
def __init__(self, output_channels, kernel_shape,
stride=1, rate=1, use_bias=True, initializers=None,
partitioners=None, regularizers=None, mask=None,
padding=CAUSAL, data_format=DATA_FORMAT_NWC,
padding_value=CONSTANT_PADDING, custom_getter=None,
name="causal_conv_1d"):
"""Constructs a CausalConv1D module.
This is deprecated, please use the padding=CAUSAL argument to Conv1D.
Args:
output_channels: Number of output channels. `output_channels` can be
either a number or a callable. In the latter case, since the function
invocation is deferred to graph construction time, the user must only
ensure that output_channels can be called, returning an integer,
when `build` is called.
kernel_shape: Sequence of kernel sizes (of size 1), or integer that is
used to define kernel size in all dimensions.
stride: Sequence of kernel strides (of size 1), or integer that is used to
define stride in all dimensions.
rate: Sequence of dilation rates (of size 1), or integer that is used to
define dilation rate in all dimensions. 1 corresponds to standard
convolution, `rate > 1` corresponds to dilated convolution. Cannot be
> 1 if any of `stride` is also > 1.
use_bias: Whether to include bias parameters. Default `True`.
initializers: Optional dict containing ops to initialize the filters (with
key 'w') or biases (with key 'b'). The default initializer for the
weights is a truncated normal initializer, which is commonly used
when the inputs are zero centered (see
https://arxiv.org/pdf/1502.03167v3.pdf). The default initializer for
the bias is a zero initializer.
partitioners: Optional dict containing partitioners to partition
weights (with key 'w') or biases (with key 'b'). As a default, no
partitioners are used.
regularizers: Optional dict containing regularizers for the filters
(with key 'w') and the biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes
a single `Tensor` as an input and returns a scalar `Tensor` output,
e.g. the L1 and L2 regularizers in `tf.contrib.layers`.
mask: A convertible to a 3D tensor which is multiplied
component-wise with the weights (Optional).
padding: Padding algorithm. Should be `snt.CAUSAL`.
data_format: A string. Specifies whether the channel dimension
of the input and output is the last dimension (default, NWC), or the
second dimension (NCW).
padding_value: The type of padding to use, either "CONSTANT", "SYMMETRIC"
or "REFLECT", as supported by the underlying tf.pad
(https://www.tensorflow.org/api_docs/python/tf/pad). Defaults to
"CONSTANT" which will pad with zeros.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the
`tf.get_variable` documentation for information about the
custom_getter API.
name: Name of the module.
Raises:
base.IncompatibleShapeError: If the given kernel shape is not an integer;
or if the given kernel shape is not a sequence of two integers.
base.IncompatibleShapeError: If the given stride is not an integer; or if
the given stride is not a sequence of two integers.
base.IncompatibleShapeError: If the given rate is not an integer; or if
the given rate is not a sequence of two integers.
base.IncompatibleShapeError: If a mask is a TensorFlow Tensor with
a not fully defined shape.
base.NotSupportedError: If rate in any dimension and the stride in any
dimension are simultaneously > 1.
KeyError: If `initializers`, `partitioners` or `regularizers` contain any
keys other than 'w' or 'b'.
TypeError: If any of the given initializers, partitioners or regularizers
are not callable.
TypeError: If mask is given and it is not convertible to a Tensor.
ValueError: If the passed-in data_format doesn't have a channel dimension.
ValueError: If the given data_format is not a supported format (see
`SUPPORTED_1D_DATA_FORMATS`).
"""
util.deprecation_warning(
"CausalConv1D is deprecated, please use Conv1D with padding=CAUSAL.")
if data_format not in SUPPORTED_1D_DATA_FORMATS:
raise ValueError("Invalid data_format {:s}. Allowed formats "
"{}".format(data_format, SUPPORTED_1D_DATA_FORMATS))
if padding != CAUSAL:
# This used to be required to be VALID, which is now rather ambiguous.
# Supporting VALID for now but with a warning:
util.deprecation_warning(
"You specified a non-casual padding type for CausalConv1D, this has "
"been ignored and you will get CAUSAL padding. Note CausalConv1D is "
"deprecated, please switch to Conv1D with padding=CAUSAL.")
super(CausalConv1D, self).__init__(
output_channels=output_channels, kernel_shape=kernel_shape,
stride=stride, rate=rate, padding=CAUSAL, padding_value=padding_value,
use_bias=use_bias, initializers=initializers, partitioners=partitioners,
regularizers=regularizers, mask=mask, data_format=data_format,
custom_getter=custom_getter, name=name)
class Conv2D(_ConvND, base.Transposable):
"""Spatial convolution and dilated convolution module, including bias.
This acts as a light wrapper around the class `_ConvND`.
"""
def __init__(self, output_channels, kernel_shape, stride=1, rate=1,
padding=SAME, use_bias=True, initializers=None,
partitioners=None, regularizers=None, mask=None,
data_format=DATA_FORMAT_NHWC, padding_value=CONSTANT_PADDING,
custom_getter=None, name="conv_2d"):
"""Constructs a Conv2D module.
See the following documentation for an explanation of VALID versus SAME
padding modes:
https://www.tensorflow.org/api_docs/python/tf/nn/convolution
Args:
output_channels: Number of output channels. `output_channels` can be
either a number or a callable. In the latter case, since the function
invocation is deferred to graph construction time, the user must only
ensure that output_channels can be called, returning an integer,
when `build` is called.
kernel_shape: Sequence of kernel sizes (of size 2), or integer that is
used to define kernel size in all dimensions.
stride: Sequence of kernel strides (of size 2), or integer that is used to
define stride in all dimensions.
rate: Sequence of dilation rates (of size 2), or integer that is used to
define dilation rate in all dimensions. 1 corresponds to standard 2D
convolution, `rate > 1` corresponds to dilated convolution. Cannot be
> 1 if any of `stride` is also > 1.
padding: Padding algorithm. Either `snt.SAME`, `snt.VALID`, `snt.FULL`,
`snt.CAUSAL`, `snt.REVERSE_CAUSAL`, or a sequence of these paddings
of length 2.
* snt.SAME and snt.VALID are explained in the Tensorflow docs at
https://www.tensorflow.org/api_docs/python/tf/nn/convolution.
* snt.FULL pre- and post-pads with the maximum padding which does not
result in a convolution over just padded elements.
* snt.CAUSAL pre-pads to ensure that each output value only depends on
input values at the same or preceding indices ("no dependence on the
future").
* snt.REVERSE_CAUSAL post-pads to ensure that each output value only
depends on input values at the same or *greater* indices ("no
dependence on the past").
If you use the same padding for all dimensions, and it is one of SAME
or VALID, then this is supported directly by the underlying
convolution op. In all other cases, the input data will be padded
using tf.pad before calling the convolution op.
use_bias: Whether to include bias parameters. Default `True`.
initializers: Optional dict containing ops to initialize the filters (with
key 'w') or biases (with key 'b'). The default initializer for the
weights is a truncated normal initializer, which is commonly used
when the inputs are zero centered (see
https://arxiv.org/pdf/1502.03167v3.pdf). The default initializer for
the bias is a zero initializer.
partitioners: Optional dict containing partitioners to partition
weights (with key 'w') or biases (with key 'b'). As a default, no
partitioners are used.
regularizers: Optional dict containing regularizers for the filters
(with key 'w') and the biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes
a single `Tensor` as an input and returns a scalar `Tensor` output,
e.g. the L1 and L2 regularizers in `tf.contrib.layers`.
mask: A convertible to a 4D tensor which is multiplied
component-wise with the weights (Optional).
data_format: A string. Specifies whether the channel dimension
of the input and output is the last dimension (default, NHWC), or the
second dimension (NCHW).
padding_value: The type of padding to use, either "CONSTANT", "SYMMETRIC"
or "REFLECT", as supported by the underlying tf.pad
(https://www.tensorflow.org/api_docs/python/tf/pad). Can only be set
globally for all dimensions. Defaults to "CONSTANT" which will pad
with zeros, potentially directly via the underlying convolution op if
the padding is SAME for all dimensions.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the
`tf.get_variable` documentation for information about the
custom_getter API.
name: Name of the module.
Raises:
base.IncompatibleShapeError: If the given kernel shape is not an integer;
or if the given kernel shape is not a sequence of two integers.
base.IncompatibleShapeError: If the given stride is not an integer; or if
the given stride is not a sequence of two integers.
base.IncompatibleShapeError: If the given rate is not an integer; or if
the given rate is not a sequence of two integers.
base.IncompatibleShapeError: If a mask is given and its rank is neither 2
nor 4, or if it is a TensorFlow Tensor with a not fully defined shape.
base.NotSupportedError: If rate in any dimension and the stride in any
dimension are simultaneously > 1.
ValueError: If the given padding is not `snt.VALID`, `snt.SAME`,
`snt.FULL`, `snt.CAUSAL`, `snt.REVERSE_CAUSAL` or a sequence of these.
KeyError: If `initializers`, `partitioners` or `regularizers` contain any
keys other than 'w' or 'b'.
TypeError: If any of the given initializers, partitioners or regularizers
are not callable.
TypeError: If mask is given and it is not convertible to a Tensor.
ValueError: If the passed-in data_format doesn't have a channel dimension.
ValueError: If the given data_format is not a supported format (see
`SUPPORTED_2D_DATA_FORMATS`).
"""
if data_format not in SUPPORTED_2D_DATA_FORMATS:
raise ValueError("Invalid data_format {:s}. Allowed formats "
"{}".format(data_format, SUPPORTED_2D_DATA_FORMATS))
super(Conv2D, self).__init__(
output_channels=output_channels, kernel_shape=kernel_shape,
stride=stride, rate=rate, padding=padding, padding_value=padding_value,
use_bias=use_bias, initializers=initializers, partitioners=partitioners,
regularizers=regularizers, mask=mask, data_format=data_format,
custom_getter=custom_getter, name=name)
# Implements Transposable interface.
def transpose(self, name=None):
"""Returns matching `Conv2DTranspose` module.
Args:
name: Optional string assigning name of transpose module. The default name
is constructed by appending "_transpose" to `self.name`.
Returns:
`Conv2DTranspose` module.
Raises:
base.NotSupportedError: If `rate` in any dimension > 1.
"""
if any(x > 1 for x in self._rate):
raise base.NotSupportedError(
"Cannot transpose a dilated convolution module.")
if any(p != self._conv_op_padding for p in self._padding):
raise base.NotSupportedError(
"Cannot tranpose a convolution using mixed paddings or paddings "
"other than SAME or VALID.")
if name is None:
name = self.module_name + "_transpose"
def output_shape():
if self._data_format == DATA_FORMAT_NCHW:
return self.input_shape[2:4]
else: # data_format == DATA_FORMAT_NHWC
return self.input_shape[1:3]
return Conv2DTranspose(output_channels=lambda: self._input_channels,
output_shape=output_shape,
kernel_shape=self._kernel_shape,
stride=self._stride,
padding=self._conv_op_padding,
use_bias=self._use_bias,
initializers=self._initializers,
partitioners=self._partitioners,
regularizers=self._regularizers,
data_format=self._data_format,
custom_getter=self._custom_getter,
name=name)
class Conv2DTranspose(_ConvNDTranspose, base.Transposable):
"""Spatial transposed / reverse / up 2D convolution module, including bias.
This acts as a light wrapper around the TensorFlow op `tf.nn.conv2d_transpose`
abstracting away variable creation and sharing.
"""
def __init__(self, output_channels, output_shape=None, kernel_shape=None,
stride=1, padding=SAME, use_bias=True, initializers=None,
partitioners=None, regularizers=None,
data_format=DATA_FORMAT_NHWC, custom_getter=None,
name="conv_2d_transpose"):
"""Constructs a `Conv2DTranspose module`.
See the following documentation for an explanation of VALID versus SAME
padding modes:
https://www.tensorflow.org/api_docs/python/tf/nn/convolution
Args:
output_channels: Number of output channels.
Can be either a number or a callable. In the latter case, since the
function invocation is deferred to graph construction time, the user
must only ensure `output_channels` can be called, returning an
integer, when build is called.
output_shape: Output shape of transpose convolution.
Can be either an iterable of integers or a callable. In the latter
case, since the function invocation is deferred to graph construction
time, the user must only ensure that `output_shape` can be called,
returning an iterable of format `(out_height, out_width)` when `build`
is called. Note that `output_shape` defines the size of output signal
domain, as opposed to the shape of the output `Tensor`. If a None
value is given, a default shape is automatically calculated (see
docstring of _default_transpose_size function for more details).
kernel_shape: Sequence of kernel sizes (of size 2), or integer that is
used to define kernel size in all dimensions.
stride: Sequence of kernel strides (of size 2), or integer that is used to
define stride in all dimensions.
padding: Padding algorithm, either `snt.SAME` or `snt.VALID`.
use_bias: Whether to include bias parameters. Default `True`.
initializers: Optional dict containing ops to initialize the filters (with
key 'w') or biases (with key 'b').
partitioners: Optional dict containing partitioners to partition
weights (with key 'w') or biases (with key 'b'). As a default, no
partitioners are used.
regularizers: Optional dict containing regularizers for the filters
(with key 'w') and the biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes
a single `Tensor` as an input and returns a scalar `Tensor` output,
e.g. the L1 and L2 regularizers in `tf.contrib.layers`.
data_format: A string. Specifies whether the channel dimension
of the input and output is the last dimension (default, NHWC), or the
second dimension ("NCHW").
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the`
tf.get_variable` documentation for information about the
custom_getter API.
name: Name of the module.
Raises:
base.IncompatibleShapeError: If the given kernel shape is neither an
integer nor a sequence of two integers.
base.IncompatibleShapeError: If the given stride is neither an integer nor
a sequence of two or four integers.
ValueError: If the given padding is not `snt.VALID` or `snt.SAME`.
ValueError: If the given kernel_shape is `None`.
KeyError: If `initializers`, `partitioners` or `regularizers` contain any
keys other than 'w' or 'b'.
TypeError: If any of the given initializers, partitioners or regularizers
are not callable.
ValueError: If the passed-in data_format doesn't have a channel dimension.
ValueError: If the given data_format is not a supported format (see
`SUPPORTED_2D_DATA_FORMATS`).
"""
if data_format not in SUPPORTED_2D_DATA_FORMATS:
raise ValueError("Invalid data_format {:s}. Allowed formats "
"{}".format(data_format, SUPPORTED_2D_DATA_FORMATS))
super(Conv2DTranspose, self).__init__(
output_channels=output_channels, output_shape=output_shape,
kernel_shape=kernel_shape, stride=stride, padding=padding,
use_bias=use_bias, initializers=initializers,
partitioners=partitioners, regularizers=regularizers,
data_format=data_format, custom_getter=custom_getter, name=name
)
# Implements Transposable interface.
def transpose(self, name=None):
"""Returns matching `Conv2D` module.
Args:
name: Optional string assigning name of transpose module. The default name
is constructed by appending "_transpose" to `self.name`.
Returns:
`Conv2D` module.
"""
if name is None:
name = self.module_name + "_transpose"
if self._data_format == DATA_FORMAT_NHWC:
stride = self._stride[1:-1]
else: # self._data_format == DATA_FORMAT_NCHW
stride = self._stride[2:]
return Conv2D(output_channels=lambda: self.input_channels,
kernel_shape=self._kernel_shape,
stride=stride,
padding=self._padding,
use_bias=self._use_bias,
initializers=self._initializers,
partitioners=self._partitioners,
regularizers=self._regularizers,
data_format=self._data_format,
custom_getter=self._custom_getter,
name=name)
class Conv3D(_ConvND, base.Transposable):
"""Volumetric convolution module, including optional bias.
This acts as a light wrapper around the class `_ConvND`.
"""
def __init__(self, output_channels, kernel_shape, stride=1, rate=1,
padding=SAME, use_bias=True, initializers=None,
partitioners=None, regularizers=None, mask=None,
data_format=DATA_FORMAT_NDHWC, padding_value=CONSTANT_PADDING,
custom_getter=None, name="conv_3d"):
"""Constructs a Conv3D module.
See the following documentation for an explanation of VALID versus SAME
padding modes:
https://www.tensorflow.org/api_docs/python/tf/nn/convolution
Args:
output_channels: Number of output channels. `output_channels` can be
either a number or a callable. In the latter case, since the function
invocation is deferred to graph construction time, the user must only
ensure that output_channels can be called, returning an integer,
when `build` is called.
kernel_shape: Sequence of kernel sizes (of size 3), or integer that is
used to define kernel size in all dimensions.
stride: Sequence of kernel strides (of size 3), or integer that is used to
define stride in all dimensions.
rate: Sequence of dilation rates (of size 3), or integer that is used to
define dilation rate in all dimensions. 1 corresponds to standard 3D
convolution, `rate > 1` corresponds to dilated convolution. Cannot be
> 1 if any of `stride` is also > 1.
padding: Padding algorithm. Either `snt.SAME`, `snt.VALID`, `snt.FULL`,
`snt.CAUSAL`, `snt.REVERSE_CAUSAL`, or a sequence of these paddings
of length 3.
* snt.SAME and snt.VALID are explained in the Tensorflow docs at
https://www.tensorflow.org/api_docs/python/tf/nn/convolution.
* snt.FULL pre- and post-pads with the maximum padding which does not
result in a convolution over just padded elements.
* snt.CAUSAL pre-pads to ensure that each output value only depends on
input values at the same or preceding indices ("no dependence on the
future").
* snt.REVERSE_CAUSAL post-pads to ensure that each output value only
depends on input values at the same or *greater* indices ("no
dependence on the past").
If you use the same padding for all dimensions, and it is one of SAME
or VALID, then this is supported directly by the underlying
convolution op. In all other cases, the input data will be padded
using tf.pad before calling the convolution op.
use_bias: Whether to include bias parameters. Default `True`.
initializers: Optional dict containing ops to initialize the filters (with
key 'w') or biases (with key 'b'). The default initializer for the
weights is a truncated normal initializer, which is commonly used
when the inputs are zero centered (see
https://arxiv.org/pdf/1502.03167v3.pdf). The default initializer for
the bias is a zero initializer.
partitioners: Optional dict containing partitioners to partition
weights (with key 'w') or biases (with key 'b'). As a default, no
partitioners are used.
regularizers: Optional dict containing regularizers for the filters
(with key 'w') and the biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes
a single `Tensor` as an input and returns a scalar `Tensor` output,
e.g. the L1 and L2 regularizers in `tf.contrib.layers`.
mask: An object convertible to a 5D tensor which is multiplied
component-wise with the weights (Optional).
data_format: A string. Specifies whether the channel dimension
of the input and output is the last dimension (default, NDHWC), or
the second dimension (NCDHW).
padding_value: The type of padding to use, either "CONSTANT", "SYMMETRIC"
or "REFLECT", as supported by the underlying tf.pad
(https://www.tensorflow.org/api_docs/python/tf/pad). Can only be set
globally for all dimensions. Defaults to "CONSTANT" which will pad
with zeros, potentially directly via the underlying convolution op if
the padding is SAME for all dimensions.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the
`tf.get_variable` documentation for information about the
custom_getter API.
name: Name of the module.
Raises:
base.IncompatibleShapeError: If the given kernel shape is not an integer;
or if the given kernel shape is not a sequence of two integers.
base.IncompatibleShapeError: If the given stride is not an integer; or if
the given stride is not a sequence of two or four integers.
base.IncompatibleShapeError: If the given rate is not an integer; or if
the given rate is not a sequence of two integers.
base.NotSupportedError: If rate in any dimension and the stride in any
dimension are simultaneously > 1.
ValueError: If the given padding is not `snt.VALID`, `snt.SAME`,
`snt.FULL`, `snt.CAUSAL`, `snt.REVERSE_CAUSAL` or a sequence of these.
KeyError: If `initializers`, `partitioners` or `regularizers` contain any
keys other than 'w' or 'b'.
TypeError: If any of the given initializers, partitioners or regularizers
are not callable.
ValueError: If the passed-in data_format doesn't have a channel dimension.
ValueError: If the given data_format is not a supported format (see
`SUPPORTED_3D_DATA_FORMATS`).
"""
if data_format not in SUPPORTED_3D_DATA_FORMATS:
raise ValueError("Invalid data_format {:s}. Allowed formats "
"{}".format(data_format, SUPPORTED_3D_DATA_FORMATS))
super(Conv3D, self).__init__(
output_channels=output_channels, kernel_shape=kernel_shape,
stride=stride, rate=rate, padding=padding, padding_value=padding_value,
use_bias=use_bias, initializers=initializers, partitioners=partitioners,
regularizers=regularizers, mask=mask, data_format=data_format,
custom_getter=custom_getter, name=name)
# Implements Transposable interface.
def transpose(self, name=None):
"""Returns matching `Conv3DTranspose` module.
Args:
name: Optional string assigning name of transpose module. The default name
is constructed by appending "_transpose" to `self.name`.
Returns:
`Conv3DTranspose` module.
Raises:
base.NotSupportedError: If `rate` in any dimension > 1.
"""
if any(x > 1 for x in self._rate):
raise base.NotSupportedError(
"Cannot transpose a dilated convolution module.")
if any(p != self._conv_op_padding for p in self._padding):
raise base.NotSupportedError(
"Cannot tranpose a convolution using mixed paddings or paddings "
"other than SAME or VALID.")
def output_shape():
if self._data_format == DATA_FORMAT_NCDHW:
return self.input_shape[2:]
else: # data_format == DATA_FORMAT_NDHWC
return self.input_shape[1:4]
if name is None:
name = self.module_name + "_transpose"
return Conv3DTranspose(output_channels=lambda: self._input_channels,
output_shape=output_shape,
kernel_shape=self._kernel_shape,
stride=self._stride,
padding=self._conv_op_padding,
use_bias=self._use_bias,
initializers=self._initializers,
partitioners=self._partitioners,
regularizers=self._regularizers,
data_format=self._data_format,
custom_getter=self._custom_getter,
name=name)
class Conv3DTranspose(_ConvNDTranspose, base.Transposable):
"""Volumetric transposed / reverse / up 3D convolution module, including bias.
This acts as a light wrapper around the TensorFlow op `tf.nn.conv3d_transpose`
abstracting away variable creation and sharing.
"""
def __init__(self, output_channels, output_shape=None, kernel_shape=None,
stride=1, padding=SAME, use_bias=True, initializers=None,
partitioners=None, regularizers=None,
data_format=DATA_FORMAT_NDHWC, custom_getter=None,
name="conv_3d_transpose"):
"""Constructs a `Conv3DTranspose` module.
See the following documentation for an explanation of VALID versus SAME
padding modes:
https://www.tensorflow.org/api_docs/python/tf/nn/convolution
Args:
output_channels: Number of output channels. `output_channels` can be
either a number or a callable. In the latter case, since the function
invocation is deferred to graph construction time, the user must only
ensure `output_channels` can be called, returning an integer, when
`build` is called.
output_shape: Output shape of transpose convolution.
Can be either an iterable of integers or a callable. In the latter
case, since the function invocation is deferred to graph construction
time, the user must only ensure that `output_shape` can be called,
returning an iterable of format `(out_depth, out_height, out_width)`
when `build` is called. Note that `output_shape` defines the size of
output signal domain, as opposed to the shape of the output `Tensor`.
If a None value is given, a default shape is automatically calculated
(see docstring of _default_transpose_size function for more details).
kernel_shape: Sequence of kernel sizes (of size 3), or integer that is
used to define kernel size in all dimensions.
stride: Sequence of kernel strides (of size 3), or integer that is used to
define stride in all dimensions.
padding: Padding algorithm, either `snt.SAME` or `snt.VALID`.
use_bias: Whether to include bias parameters. Default `True`.
initializers: Optional dict containing ops to initialize the filters (with
key 'w') or biases (with key 'b').
partitioners: Optional dict containing partitioners to partition
weights (with key 'w') or biases (with key 'b'). As a default, no
partitioners are used.
regularizers: Optional dict containing regularizers for the filters
(with key 'w') and the biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes
a single `Tensor` as an input and returns a scalar `Tensor` output,
e.g. the L1 and L2 regularizers in `tf.contrib.layers`.
data_format: A string. Specifies whether the channel dimension
of the input and output is the last dimension (default, NDHWC), or the
second dimension (NCDHW).
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the
`tf.get_variable` documentation for information about the
custom_getter API.
name: Name of the module.
Raises:
module.IncompatibleShapeError: If the given kernel shape is neither an
integer nor a sequence of three integers.
module.IncompatibleShapeError: If the given stride is neither an integer
nor a sequence of three or five integers.
ValueError: If the given padding is not `snt.VALID` or `snt.SAME`.
ValueError: If the given kernel_shape is `None`.
KeyError: If `initializers`, `partitioners` or `regularizers` contain any
keys other than 'w' or 'b'.
TypeError: If any of the given initializers, partitioners or regularizers
are not callable.
ValueError: If the passed-in data_format doesn't have a channel dimension.
ValueError: If the given data_format is not a supported format (see
`SUPPORTED_3D_DATA_FORMATS`).
"""
if data_format not in SUPPORTED_3D_DATA_FORMATS:
raise ValueError("Invalid data_format {:s}. Allowed formats "
"{}".format(data_format, SUPPORTED_3D_DATA_FORMATS))
super(Conv3DTranspose, self).__init__(
output_channels=output_channels, output_shape=output_shape,
kernel_shape=kernel_shape, stride=stride, padding=padding,
use_bias=use_bias, initializers=initializers,
partitioners=partitioners, regularizers=regularizers,
data_format=data_format, custom_getter=custom_getter, name=name
)
# Implement Transposable interface
def transpose(self, name=None):
"""Returns transposed Conv3DTranspose module, i.e. a Conv3D module."""
if name is None:
name = self.module_name + "_transpose"
if self._data_format == DATA_FORMAT_NDHWC:
stride = self._stride[1:-1]
else: # self._data_format == DATA_FORMAT_NCDHW
stride = self._stride[2:]
return Conv3D(output_channels=lambda: self.input_channels,
kernel_shape=self._kernel_shape,
stride=stride,
padding=self._padding,
use_bias=self._use_bias,
initializers=self._initializers,
partitioners=self._partitioners,
regularizers=self._regularizers,
data_format=self._data_format,
custom_getter=self._custom_getter,
name=name)
class InPlaneConv2D(_ConvND):
"""Applies an in-plane convolution to each channel with tied filter weights.
This acts as a light wrapper around the TensorFlow op
`tf.nn.depthwise_conv2d`; it differs from the DepthWiseConv2D module in that
it has tied weights (i.e. the same filter) for all the in-out channel pairs.
"""
def __init__(self, kernel_shape, stride=1, padding=SAME, use_bias=True,
initializers=None, partitioners=None, regularizers=None,
data_format=DATA_FORMAT_NHWC, padding_value=CONSTANT_PADDING,
custom_getter=None, name="in_plane_conv2d"):
"""Constructs an InPlaneConv2D module.
See the following documentation for an explanation of VALID versus SAME
padding modes:
https://www.tensorflow.org/api_docs/python/tf/nn/convolution
Args:
kernel_shape: Iterable with 2 elements in the layout [filter_height,
filter_width]; or integer that is used to define the list in all
dimensions.
stride: Iterable with 2 or 4 elements of kernel strides, or integer that
is used to define stride in all dimensions.
padding: Padding algorithm. Either `snt.SAME`, `snt.VALID`, `snt.FULL`,
`snt.CAUSAL`, `snt.REVERSE_CAUSAL`, or a sequence of these paddings
of length 2.
* snt.SAME and snt.VALID are explained in the Tensorflow docs at
https://www.tensorflow.org/api_docs/python/tf/nn/convolution.
* snt.FULL pre- and post-pads with the maximum padding which does not
result in a convolution over just padded elements.
* snt.CAUSAL pre-pads to ensure that each output value only depends on
input values at the same or preceding indices ("no dependence on the
future").
* snt.REVERSE_CAUSAL post-pads to ensure that each output value only
depends on input values at the same or *greater* indices ("no
dependence on the past").
If you use the same padding for all dimensions, and it is one of SAME
or VALID, then this is supported directly by the underlying
convolution op. In all other cases, the input data will be padded
using tf.pad before calling the convolution op.
use_bias: Whether to include bias parameters. Default `True`.
initializers: Optional dict containing ops to initialize the filters (with
key 'w') or biases (with key 'b').
partitioners: Optional dict containing partitioners to partition the
filters (with key 'w') or biases (with key 'b'). As a default, no
partitioners are used.
regularizers: Optional dict containing regularizers for the filters
(with key 'w') and the biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes
a single `Tensor` as an input and returns a scalar `Tensor` output,
e.g. the L1 and L2 regularizers in `tf.contrib.layers`.
data_format: A string. Specifies whether the channel dimension
of the input and output is the last dimension (default, NHWC), or the
second dimension (NCHW).
padding_value: The type of padding to use, either "CONSTANT", "SYMMETRIC"
or "REFLECT", as supported by the underlying tf.pad
(https://www.tensorflow.org/api_docs/python/tf/pad). Can only be set
globally for all dimensions. Defaults to "CONSTANT" which will pad
with zeros, potentially directly via the underlying convolution op if
the padding is SAME for all dimensions.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the
`tf.get_variable` documentation for information about the
custom_getter API.
name: Name of the module.
Raises:
ValueError: If the given data_format is not a supported format (see
`SUPPORTED_2D_DATA_FORMATS`).
base.IncompatibleShapeError: If the given kernel shape is not an integer;
or if the given kernel shape is not a sequence of two integers.
base.IncompatibleShapeError: If the given stride is not an integer; or if
the given stride is not a sequence of two integers.
ValueError: If the given padding is not `snt.VALID`, `snt.SAME`,
`snt.FULL`, `snt.CAUSAL`, `snt.REVERSE_CAUSAL` or a sequence of these.
KeyError: If `initializers`, `partitioners` or `regularizers` contain any
keys other than 'w' or 'b'.
TypeError: If any of the given initializers, partitioners or regularizers
are not callable.
ValueError: If the passed-in data_format doesn't have a channel dimension.
"""
if data_format not in SUPPORTED_2D_DATA_FORMATS:
raise ValueError("Invalid data_format {:s}. Allowed formats "
"{}".format(data_format, SUPPORTED_2D_DATA_FORMATS))
super(InPlaneConv2D, self).__init__(
output_channels=lambda: self.input_channels,
kernel_shape=kernel_shape, stride=stride, padding=padding,
padding_value=padding_value, use_bias=use_bias,
initializers=initializers, partitioners=partitioners,
regularizers=regularizers, data_format=data_format,
custom_getter=custom_getter, name=name)
def _construct_w(self, inputs):
"""Construct the convolution weight matrix.
Figures out the shape of the weight matrix, initialize it, and return it.
Args:
inputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16`, `tf.float32` or `tf.float64`.
Returns:
w: A weight matrix of the same type as `inputs` and of shape
[kernel_shape, 1, 1].
"""
weight_shape = self._kernel_shape + (1, 1)
if "w" not in self._initializers:
self._initializers["w"] = create_weight_initializer(weight_shape[:2],
dtype=inputs.dtype)
w = tf.get_variable("w",
shape=weight_shape,
dtype=inputs.dtype,
initializer=self._initializers["w"],
partitioner=self._partitioners.get("w", None),
regularizer=self._regularizers.get("w", None))
return w
def _apply_conv(self, inputs, w):
"""Apply a depthwise_conv2d operation on `inputs` using variable `w`.
Args:
inputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16`, `tf.float32` or `tf.float64`.
w: A weight matrix of the same type as `inputs`.
Returns:
outputs: The result of the convolution operation on `inputs`.
"""
tiled_weights = tf.tile(w, [1, 1, self._input_channels, 1])
outputs = tf.nn.depthwise_conv2d(inputs,
tiled_weights,
strides=self.stride,
padding=self._conv_op_padding,
data_format=self._data_format)
return outputs
class DepthwiseConv2D(_ConvND):
"""Spatial depthwise 2D convolution module, including bias.
This acts as a light wrapper around the TensorFlow ops
`tf.nn.depthwise_conv2d`, abstracting away variable creation and sharing.
"""
def __init__(self,
channel_multiplier,
kernel_shape,
stride=1,
padding=SAME,
use_bias=True,
initializers=None,
partitioners=None,
regularizers=None,
data_format=DATA_FORMAT_NHWC,
padding_value=CONSTANT_PADDING,
custom_getter=None,
name="conv_2d_depthwise"):
"""Constructs a DepthwiseConv2D module.
See the following documentation for an explanation of VALID versus SAME
padding modes:
https://www.tensorflow.org/api_docs/python/tf/nn/convolution
Args:
channel_multiplier: Number of channels to expand convolution to. Must be
an integer. Must be > 0. When `channel_multiplier` is set to 1, apply
a different filter to each input channel producing one output channel
per input channel. Numbers larger than 1 cause multiple different
filters to be applied to each input channel, with their outputs being
concatenated together, producing `channel_multiplier` *
`input_channels` output channels.
kernel_shape: Iterable with 2 elements in the following layout:
[filter_height, filter_width] or integer that is
used to define the list in all dimensions.
stride: Iterable with 2 or 4 elements of kernel strides, or integer that
is used to define stride in all dimensions. Layout of list:
In case of 4 elements: `[1, stride_height, stride_widith, 1]`
In case of 2 elements: `[stride_height, stride_width]`.
padding: Padding algorithm. Either `snt.SAME`, `snt.VALID`, `snt.FULL`,
`snt.CAUSAL`, `snt.REVERSE_CAUSAL`, or a sequence of these paddings
of length 2.
* snt.SAME and snt.VALID are explained in the Tensorflow docs at
https://www.tensorflow.org/api_docs/python/tf/nn/convolution.
* snt.FULL pre- and post-pads with the maximum padding which does not
result in a convolution over just padded elements.
* snt.CAUSAL pre-pads to ensure that each output value only depends on
input values at the same or preceding indices ("no dependence on the
future").
* snt.REVERSE_CAUSAL post-pads to ensure that each output value only
depends on input values at the same or *greater* indices ("no
dependence on the past").
If you use the same padding for all dimensions, and it is one of SAME
or VALID, then this is supported directly by the underlying
convolution op. In all other cases, the input data will be padded
using tf.pad before calling the convolution op.
use_bias: Whether to include bias parameters. Default `True`.
initializers: Optional dict containing ops to initialize the filters (with
key 'w') or biases (with key 'b').
partitioners: Optional dict containing partitioners for the filters
(with key 'w') and the biases (with key 'b'). As a default, no
partitioners are used.
regularizers: Optional dict containing regularizers for the filters
(with key 'w') and the biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes
a single `Tensor` as an input and returns a scalar `Tensor` output,
e.g. the L1 and L2 regularizers in `tf.contrib.layers`.
data_format: A string. Specifies whether the channel dimension
of the input and output is the last dimension (default, NHWC), or the
second dimension ("NCHW").
padding_value: The type of padding to use, either "CONSTANT", "SYMMETRIC"
or "REFLECT", as supported by the underlying tf.pad
(https://www.tensorflow.org/api_docs/python/tf/pad). Can only be set
globally for all dimensions. Defaults to "CONSTANT" which will pad
with zeros, potentially directly via the underlying convolution op if
the padding is SAME for all dimensions.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the
`tf.get_variable` documentation for information about the
custom_getter API.
name: Name of the module.
Raises:
ValueError: If `channel_multiplier` isn't of type (`numbers.Integral` or
`tf.Dimension`).
ValueError: If `channel_multiplier` is less than 1.
ValueError: If the given data_format is not a supported format (see
`SUPPORTED_2D_DATA_FORMATS`).
base.IncompatibleShapeError: If the given kernel shape is not an integer;
or if the given kernel shape is not a sequence of two integers.
base.IncompatibleShapeError: If the given stride is not an integer; or if
the given stride is not a sequence of two integers.
ValueError: If the given padding is not `snt.VALID`, `snt.SAME`,
`snt.FULL`, `snt.CAUSAL`, `snt.REVERSE_CAUSAL` or a sequence of these.
KeyError: If `initializers`, `partitioners` or `regularizers` contain any
keys other than 'w' or 'b'.
TypeError: If any of the given initializers, partitioners or regularizers
are not callable.
ValueError: If the passed-in data_format doesn't have a channel dimension.
"""
if (not isinstance(channel_multiplier, numbers.Integral) and
not isinstance(channel_multiplier, tf.Dimension)):
raise ValueError(("channel_multiplier ({}), must be of type "
"(`tf.Dimension`, `numbers.Integral`).").format(
channel_multiplier))
if channel_multiplier < 1:
raise ValueError("channel_multiplier ({}), must be >= 1".format(
channel_multiplier))
self._channel_multiplier = channel_multiplier
if data_format not in SUPPORTED_2D_DATA_FORMATS:
raise ValueError("Invalid data_format {:s}. Allowed formats "
"{}".format(data_format, SUPPORTED_2D_DATA_FORMATS))
super(DepthwiseConv2D, self).__init__(
output_channels=lambda: self._input_channels * self._channel_multiplier,
kernel_shape=kernel_shape,
stride=stride, padding=padding,
padding_value=padding_value, use_bias=use_bias,
initializers=initializers, partitioners=partitioners,
regularizers=regularizers, data_format=data_format,
custom_getter=custom_getter, name=name)
def _construct_w(self, inputs):
"""Construct the convolution weight matrix.
Figures out the shape of the weight matrix, initializes it, and returns it.
Args:
inputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16`, `tf.float32` or `tf.float64`.
Returns:
w: A weight matrix of the same type as `inputs` and of shape
[kernel_sizes, input_channels, channel_multiplier].
"""
# For depthwise conv, output_channels = in_channels * channel_multiplier.
# By default, depthwise conv applies a different filter to every input
# channel. If channel_multiplier > 1, one input channel is used to produce
# `channel_multiplier` outputs, which are then concatenated together.
# This results in:
weight_shape = self._kernel_shape + (self._input_channels,
self._channel_multiplier)
if "w" not in self._initializers:
self._initializers["w"] = create_weight_initializer(weight_shape[:2],
dtype=inputs.dtype)
w = tf.get_variable("w",
shape=weight_shape,
dtype=inputs.dtype,
initializer=self._initializers["w"],
partitioner=self._partitioners.get("w", None),
regularizer=self._regularizers.get("w", None))
return w
def _apply_conv(self, inputs, w):
"""Apply a depthwise_conv2d operation on `inputs` using variable `w`.
Args:
inputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16`, `tf.float32` or `tf.float64`.
w: A weight matrix of the same type as `inputs`.
Returns:
outputs: The result of the convolution operation on `inputs`.
"""
outputs = tf.nn.depthwise_conv2d(inputs,
w,
strides=self.stride,
padding=self._conv_op_padding,
data_format=self._data_format)
return outputs
@property
def channel_multiplier(self):
"""Returns the channel multiplier argument."""
return self._channel_multiplier
class SeparableConv2D(_ConvND):
"""Performs an in-plane convolution to each channel independently.
This acts as a light wrapper around the TensorFlow op
`tf.nn.separable_conv2d`, abstracting away variable creation and sharing.
"""
def __init__(self,
output_channels,
channel_multiplier,
kernel_shape,
stride=1,
rate=1,
padding=SAME,
use_bias=True,
initializers=None,
partitioners=None,
regularizers=None,
data_format=DATA_FORMAT_NHWC,
padding_value=CONSTANT_PADDING,
custom_getter=None,
name="separable_conv2d"):
"""Constructs a SeparableConv2D module.
See the following documentation for an explanation of VALID versus SAME
padding modes:
https://www.tensorflow.org/api_docs/python/tf/nn/convolution
Args:
output_channels: Number of output channels. Must be an integer.
channel_multiplier: Number of channels to expand pointwise (depthwise)
convolution to. Must be an integer. Must be > 0.
When `channel_multiplier` is set to 1, applies a different filter to
each input channel. Numbers larger than 1 cause the filter to be
applied to `channel_multiplier` input channels. Outputs are
concatenated together.
kernel_shape: List with 2 elements in the following layout:
[filter_height, filter_width] or integer that is
used to define the list in all dimensions.
stride: List with 4 elements of kernel strides, or integer that is used to
define stride in all dimensions. Layout of list:
[1, stride_y, stride_x, 1].
rate: Sequence of dilation rates (of size 2), or integer that is used to
define dilation rate in all dimensions. 1 corresponds to standard 2D
convolution, `rate > 1` corresponds to dilated convolution. Cannot be
> 1 if any of `stride` is also > 1.
padding: Padding algorithm. Either `snt.SAME`, `snt.VALID`, `snt.FULL`,
`snt.CAUSAL`, `snt.REVERSE_CAUSAL`, or a sequence of these paddings
of length 2.
* snt.SAME and snt.VALID are explained in the Tensorflow docs at
https://www.tensorflow.org/api_docs/python/tf/nn/convolution.
* snt.FULL pre- and post-pads with the maximum padding which does not
result in a convolution over just padded elements.
* snt.CAUSAL pre-pads to ensure that each output value only depends on
input values at the same or preceding indices ("no dependence on the
future").
* snt.REVERSE_CAUSAL post-pads to ensure that each output value only
depends on input values at the same or *greater* indices ("no
dependence on the past").
If you use the same padding for all dimensions, and it is one of SAME
or VALID, then this is supported directly by the underlying
convolution op. In all other cases, the input data will be padded
using tf.pad before calling the convolution op.
use_bias: Whether to include bias parameters. Default `True`.
initializers: Optional dict containing ops to initialize the filters (with
keys 'w_dw' for depthwise and 'w_pw' for pointwise) or biases
(with key 'b').
partitioners: Optional dict containing partitioners to partition the
filters (with key 'w') or biases (with key 'b'). As a default, no
partitioners are used.
regularizers: Optional dict containing regularizers for the filters
(with keys 'w_dw' for depthwise and 'w_pw' for pointwise) and the
biases (with key 'b'). As a default, no regularizers are used.
A regularizer should be a function that takes a single `Tensor` as an
input and returns a scalar `Tensor` output, e.g. the L1 and L2
regularizers in `tf.contrib.layers`.
data_format: A string. Specifies whether the channel dimension
of the input and output is the last dimension (default, NHWC), or the
second dimension ("NCHW").
padding_value: The type of padding to use, either "CONSTANT", "SYMMETRIC"
or "REFLECT", as supported by the underlying tf.pad
(https://www.tensorflow.org/api_docs/python/tf/pad). Can only be set
globally for all dimensions. Defaults to "CONSTANT" which will pad
with zeros, potentially directly via the underlying convolution op if
the padding is SAME for all dimensions.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the
`tf.get_variable` documentation for information about the
custom_getter API.
name: Name of the module.
Raises:
ValueError: If `channel_multiplier` isn't of type (`numbers.Integral` or
`tf.Dimension`).
ValueError: If `channel_multiplier` is less than 1.
ValueError: If the given data_format is not a supported format (see
`SUPPORTED_2D_DATA_FORMATS`).
base.IncompatibleShapeError: If the given kernel shape is not an integer;
or if the given kernel shape is not a sequence of two integers.
base.IncompatibleShapeError: If the given stride is not an integer; or if
the given stride is not a sequence of two integers.
base.IncompatibleShapeError: If the given rate is not an integer; or if
the given rate is not a sequence of two integers.
base.IncompatibleShapeError: If a mask is a TensorFlow Tensor with
a not fully defined shape.
base.NotSupportedError: If rate in any dimension and the stride in any
dimension are simultaneously > 1.
ValueError: If the given padding is not `snt.VALID`, `snt.SAME`,
`snt.FULL`, `snt.CAUSAL`, `snt.REVERSE_CAUSAL` or a sequence of these.
KeyError: If `initializers`, `partitioners` or `regularizers` contain any
keys other than 'w_dw', 'w_pw' or 'b'.
TypeError: If any of the given initializers, partitioners or regularizers
are not callable.
TypeError: If mask is given and it is not convertible to a Tensor.
ValueError: If the passed-in data_format doesn't have a channel dimension.
"""
if (not isinstance(channel_multiplier, numbers.Integral) and
not isinstance(channel_multiplier, tf.Dimension)):
raise ValueError(("channel_multiplier ({}), must be of type "
"(`tf.Dimension`, `numbers.Integral`).").format(
channel_multiplier))
if channel_multiplier < 1:
raise ValueError("channel_multiplier ({}), must be >= 1".format(
channel_multiplier))
self._channel_multiplier = channel_multiplier
if data_format not in SUPPORTED_2D_DATA_FORMATS:
raise ValueError("Invalid data_format {:s}. Allowed formats "
"{}".format(data_format, SUPPORTED_2D_DATA_FORMATS))
super(SeparableConv2D, self).__init__(
output_channels=output_channels,
kernel_shape=kernel_shape,
stride=stride, padding=padding, padding_value=padding_value, rate=rate,
use_bias=use_bias, initializers=initializers, partitioners=partitioners,
regularizers=regularizers, data_format=data_format,
custom_getter=custom_getter, name=name)
@classmethod
def get_possible_initializer_keys(cls, use_bias=True):
return {"w_dw", "w_pw", "b"} if use_bias else {"w_dw", "w_pw"}
def _construct_w(self, inputs):
"""Connects the module into the graph, with input Tensor `inputs`.
Args:
inputs: A 4D Tensor of shape:
[batch_size, input_height, input_width, input_channels]
and of type `tf.float16`, `tf.bfloat16`, `tf.float32` or `tf.float64`.
Returns:
A tuple of two 4D Tensors, each with the same dtype as `inputs`:
1. w_dw, the depthwise weight matrix, of shape:
[kernel_size, input_channels, channel_multiplier]
2. w_pw, the pointwise weight matrix, of shape:
[1, 1, channel_multiplier * input_channels, output_channels].
"""
depthwise_weight_shape = self._kernel_shape + (self._input_channels,
self._channel_multiplier)
pointwise_input_size = self._channel_multiplier * self._input_channels
pointwise_weight_shape = (1, 1, pointwise_input_size, self._output_channels)
if "w_dw" not in self._initializers:
fan_in_shape = depthwise_weight_shape[:2]
self._initializers["w_dw"] = create_weight_initializer(fan_in_shape,
dtype=inputs.dtype)
if "w_pw" not in self._initializers:
fan_in_shape = pointwise_weight_shape[:3]
self._initializers["w_pw"] = create_weight_initializer(fan_in_shape,
dtype=inputs.dtype)
w_dw = tf.get_variable(
"w_dw",
shape=depthwise_weight_shape,
dtype=inputs.dtype,
initializer=self._initializers["w_dw"],
partitioner=self._partitioners.get("w_dw", None),
regularizer=self._regularizers.get("w_dw", None))
w_pw = tf.get_variable(
"w_pw",
shape=pointwise_weight_shape,
dtype=inputs.dtype,
initializer=self._initializers["w_pw"],
partitioner=self._partitioners.get("w_pw", None),
regularizer=self._regularizers.get("w_pw", None))
return w_dw, w_pw
def _apply_conv(self, inputs, w):
"""Apply a `separable_conv2d` operation on `inputs` using `w`.
Args:
inputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16`, `tf.float32` or `tf.float64`.
w: A tuple of weight matrices of the same type as `inputs`, the first
being the depthwise weight matrix, and the second being the pointwise
weight matrix.
Returns:
outputs: The result of the convolution operation on `inputs`.
"""
w_dw, w_pw = w
outputs = tf.nn.separable_conv2d(inputs,
w_dw,
w_pw,
rate=self._rate,
strides=self.stride,
padding=self._conv_op_padding,
data_format=self._data_format)
return outputs
@property
def channel_multiplier(self):
"""Returns the channel multiplier argument."""
return self._channel_multiplier
@property
def w_dw(self):
"""Returns the Variable containing the depthwise weight matrix."""
self._ensure_is_connected()
return self._w[0]
@property
def w_pw(self):
"""Returns the Variable containing the pointwise weight matrix."""
self._ensure_is_connected()
return self._w[1]
class SeparableConv1D(_ConvND):
"""Performs an in-plane convolution to each channel independently.
This acts as a light wrapper around the TensorFlow op
`tf.nn.separable_conv2d`, abstracting away variable creation and sharing.
"""
def __init__(self,
output_channels,
channel_multiplier,
kernel_shape,
stride=1,
rate=1,
padding=SAME,
use_bias=True,
initializers=None,
partitioners=None,
regularizers=None,
data_format=DATA_FORMAT_NWC,
padding_value=CONSTANT_PADDING,
custom_getter=None,
name="separable_conv1d"):
"""Constructs a SeparableConv1D module.
See the following documentation for an explanation of VALID versus SAME
padding modes:
https://www.tensorflow.org/api_docs/python/tf/nn/convolution
Args:
output_channels: Number of output channels. Must be an integer.
channel_multiplier: Number of channels to expand pointwise (depthwise)
convolution to. Must be an integer. Must be > 0.
When `channel_multiplier` is set to 1, applies a different filter to
each input channel. Numbers larger than 1 cause the filter to be
applied to `channel_multiplier` input channels. Outputs are
concatenated together.
kernel_shape: List with 2 elements in the following layout:
[filter_height, filter_width] or integer that is
used to define the list in all dimensions.
stride: List with 4 elements of kernel strides, or integer that is used to
define stride in all dimensions. Layout of list:
[1, stride_y, stride_x, 1].
rate: Sequence of dilation rates (of size 1), or integer that is used to
define dilation rate in all dimensions. 1 corresponds to standard 1D
convolution, `rate > 1` corresponds to dilated convolution. Cannot be
> 1 if any of `stride` is also > 1.
padding: Padding algorithm. Either `snt.SAME`, `snt.VALID`, `snt.FULL`,
`snt.CAUSAL`, `snt.REVERSE_CAUSAL`, or a sequence of these paddings
of length 1.
* snt.SAME and snt.VALID are explained in the Tensorflow docs at
https://www.tensorflow.org/api_docs/python/tf/nn/convolution.
* snt.FULL pre- and post-pads with the maximum padding which does not
result in a convolution over just padded elements.
* snt.CAUSAL pre-pads to ensure that each output value only depends on
input values at the same or preceding indices ("no dependence on the
future").
* snt.REVERSE_CAUSAL post-pads to ensure that each output value only
depends on input values at the same or *greater* indices ("no
dependence on the past").
If you use the same padding for all dimensions, and it is one of SAME
or VALID, then this is supported directly by the underlying
convolution op. In all other cases, the input data will be padded
using tf.pad before calling the convolution op.
use_bias: Whether to include bias parameters. Default `True`.
initializers: Optional dict containing ops to initialize the filters (with
keys 'w_dw' for depthwise and 'w_pw' for pointwise) or biases
(with key 'b').
partitioners: Optional dict containing partitioners to partition the
filters (with key 'w') or biases (with key 'b'). As a default, no
partitioners are used.
regularizers: Optional dict containing regularizers for the filters
(with keys 'w_dw' for depthwise and 'w_pw' for pointwise) and the
biases (with key 'b'). As a default, no regularizers are used.
A regularizer should be a function that takes a single `Tensor` as an
input and returns a scalar `Tensor` output, e.g. the L1 and L2
regularizers in `tf.contrib.layers`.
data_format: A string. Specifies whether the channel dimension
of the input and output is the last dimension (default, NWC), or the
second dimension ("NCW").
padding_value: The type of padding to use, either "CONSTANT", "SYMMETRIC"
or "REFLECT", as supported by the underlying tf.pad
(https://www.tensorflow.org/api_docs/python/tf/pad). Can only be set
globally for all dimensions. Defaults to "CONSTANT" which will pad
with zeros, potentially directly via the underlying convolution op if
the padding is SAME for all dimensions.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the
`tf.get_variable` documentation for information about the
custom_getter API.
name: Name of the module.
Raises:
ValueError: If `channel_multiplier` isn't of type (`numbers.Integral` or
`tf.Dimension`).
ValueError: If `channel_multiplier` is less than 1.
ValueError: If the given data_format is not a supported format (see
`SUPPORTED_1D_DATA_FORMATS`).
base.IncompatibleShapeError: If the given kernel shape is not an integer;
or if the given kernel shape is not a sequence of one integer.
base.IncompatibleShapeError: If the given stride is not an integer; or if
the given stride is not a sequence of two integers.
base.IncompatibleShapeError: If the given rate is not an integer; or if
the given rate is not a sequence of two integers.
base.IncompatibleShapeError: If a mask is a TensorFlow Tensor with
a not fully defined shape.
base.NotSupportedError: If rate in any dimension and the stride in any
dimension are simultaneously > 1.
ValueError: If the given padding is not `snt.VALID`, `snt.SAME`,
`snt.FULL`, `snt.CAUSAL`, `snt.REVERSE_CAUSAL` or a sequence of these.
KeyError: If `initializers`, `partitioners` or `regularizers` contain any
keys other than 'w_dw', 'w_pw' or 'b'.
TypeError: If any of the given initializers, partitioners or regularizers
are not callable.
TypeError: If mask is given and it is not convertible to a Tensor.
ValueError: If the passed-in data_format doesn't have a channel dimension.
"""
if (not isinstance(channel_multiplier, numbers.Integral) and
not isinstance(channel_multiplier, tf.Dimension)):
raise ValueError(("channel_multiplier ({}), must be of type "
"(`tf.Dimension`, `numbers.Integral`).").format(
channel_multiplier))
if channel_multiplier < 1:
raise ValueError("channel_multiplier ({}), must be >= 1".format(
channel_multiplier))
self._channel_multiplier = channel_multiplier
if data_format not in SUPPORTED_1D_DATA_FORMATS:
raise ValueError("Invalid data_format {:s}. Allowed formats "
"{}".format(data_format, SUPPORTED_1D_DATA_FORMATS))
super(SeparableConv1D, self).__init__(
output_channels=output_channels,
kernel_shape=kernel_shape,
stride=stride, rate=rate, padding=padding, padding_value=padding_value,
use_bias=use_bias, initializers=initializers, partitioners=partitioners,
regularizers=regularizers, data_format=data_format,
custom_getter=custom_getter, name=name)
@classmethod
def get_possible_initializer_keys(cls, use_bias=True):
return {"w_dw", "w_pw", "b"} if use_bias else {"w_dw", "w_pw"}
def _construct_w(self, inputs):
"""Connects the module into the graph, with input Tensor `inputs`.
Args:
inputs: A 4D Tensor of shape:
[batch_size, input_height, input_width, input_channels]
and of type `tf.float16`, `tf.bfloat16`, `tf.float32` or `tf.float64`.
Returns:
A tuple of two 4D Tensors, each with the same dtype as `inputs`:
1. w_dw, the depthwise weight matrix, of shape:
[kernel_size, input_channels, channel_multiplier]
2. w_pw, the pointwise weight matrix, of shape:
[1, 1, channel_multiplier * input_channels, output_channels].
"""
depthwise_weight_shape = ((1,) + self._kernel_shape +
(self._input_channels, self._channel_multiplier))
pointwise_input_size = self._channel_multiplier * self._input_channels
pointwise_weight_shape = (1, 1, pointwise_input_size, self._output_channels)
if "w_dw" not in self._initializers:
fan_in_shape = depthwise_weight_shape[:2]
self._initializers["w_dw"] = create_weight_initializer(fan_in_shape,
dtype=inputs.dtype)
if "w_pw" not in self._initializers:
fan_in_shape = pointwise_weight_shape[:3]
self._initializers["w_pw"] = create_weight_initializer(fan_in_shape,
dtype=inputs.dtype)
w_dw = tf.get_variable(
"w_dw",
shape=depthwise_weight_shape,
dtype=inputs.dtype,
initializer=self._initializers["w_dw"],
partitioner=self._partitioners.get("w_dw", None),
regularizer=self._regularizers.get("w_dw", None))
w_pw = tf.get_variable(
"w_pw",
shape=pointwise_weight_shape,
dtype=inputs.dtype,
initializer=self._initializers["w_pw"],
partitioner=self._partitioners.get("w_pw", None),
regularizer=self._regularizers.get("w_pw", None))
return w_dw, w_pw
def _apply_conv(self, inputs, w):
"""Apply a `separable_conv2d` operation on `inputs` using `w`.
Args:
inputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16`, `tf.float32` or `tf.float64`.
w: A tuple of weight matrices of the same type as `inputs`, the first
being the depthwise weight matrix, and the second being the pointwise
weight matrix.
Returns:
outputs: The result of the convolution operation on `inputs`.
"""
if self._data_format == DATA_FORMAT_NWC:
h_dim = 1
two_dim_conv_data_format = DATA_FORMAT_NHWC
else:
h_dim = 2
two_dim_conv_data_format = DATA_FORMAT_NCHW
inputs = tf.expand_dims(inputs, axis=h_dim)
two_dim_conv_stride = self.stride[:h_dim] + (1,) + self.stride[h_dim:]
# Height always precedes width.
two_dim_conv_rate = (1,) + self._rate
w_dw, w_pw = w
outputs = tf.nn.separable_conv2d(inputs,
w_dw,
w_pw,
strides=two_dim_conv_stride,
rate=two_dim_conv_rate,
padding=self._conv_op_padding,
data_format=two_dim_conv_data_format)
outputs = tf.squeeze(outputs, [h_dim])
return outputs
@property
def channel_multiplier(self):
"""Returns the channel multiplier argument."""
return self._channel_multiplier
@property
def w_dw(self):
"""Returns the Variable containing the depthwise weight matrix."""
self._ensure_is_connected()
return self._w[0]
@property
def w_pw(self):
"""Returns the Variable containing the pointwise weight matrix."""
self._ensure_is_connected()
return self._w[1]
| sonnet-1 | sonnet/python/modules/conv.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for sonnet.python.modules.base."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Dependency imports
from sonnet.python.modules import base
from sonnet.python.modules import base_info
from sonnet.python.modules import basic
import tensorflow.compat.v1 as tf
from tensorflow.contrib import framework as contrib_framework
nest = contrib_framework.nest
logging = tf.logging
THIS_MODULE = "__main__"
LINEAR_MODULE = "sonnet.python.modules.basic"
DumbNamedTuple = collections.namedtuple("DumbNamedTuple", ("arg1", "arg2"))
class NotATensor(object):
pass
class DumbModule(base.AbstractModule):
"""Dumb module to test ModuleInfo."""
def __init__(self, name, no_nest=False):
base.AbstractModule.__init__(self, name=name)
self.no_nest = no_nest
def _build(self, inputs):
if isinstance(inputs, (NotATensor, tf.SparseTensor)):
outputs = inputs
else:
if self.no_nest:
outputs = inputs
else:
outputs = nest.map_structure(tf.identity, inputs)
return outputs
def _copy_default_graph():
# Save default graph into `meta_graph_def`.
meta_graph_def = tf.train.export_meta_graph()
# Reset default graph.
tf.reset_default_graph()
# Load default graph from `meta_graph_def`.
tf.train.import_meta_graph(meta_graph_def)
class ModuleInfoTest(tf.test.TestCase):
def testIsNamedTuple(self):
self.assertTrue(base_info._is_namedtuple(DumbNamedTuple(1, 2)))
self.assertFalse(base_info._is_namedtuple((1, 2, 3)))
self.assertFalse(base_info._is_namedtuple([1, 2, 3]))
self.assertFalse(base_info._is_namedtuple(NotATensor()))
def testIsIterable(self):
self.assertTrue(base_info._is_iterable((1, 2, 3)))
self.assertTrue(base_info._is_iterable([1, 2, 3]))
self.assertTrue(base_info._is_iterable({1: 1, 2: 2, 3: 3}))
self.assertTrue(base_info._is_iterable(
collections.OrderedDict([(1, 1), (2, 2)])))
self.assertTrue(base_info._is_iterable(DumbNamedTuple(1, 2)))
tensor = tf.placeholder(dtype=tf.float32, shape=(1, 10,))
self.assertFalse(base_info._is_iterable(set([1, 2, 3])))
self.assertFalse(base_info._is_iterable(tensor))
sparse_tensor = tf.SparseTensor(
indices=tf.placeholder(dtype=tf.int64, shape=(10, 2,)),
values=tf.placeholder(dtype=tf.float32, shape=(10,)),
dense_shape=tf.placeholder(dtype=tf.int64, shape=(2,)))
self.assertFalse(base_info._is_iterable(sparse_tensor))
self.assertFalse(base_info._is_iterable(NotATensor()))
self.assertFalse(base_info._is_iterable("foo"))
def generator():
for count in xrange(3):
self.assertFalse(False)
yield count
self.assertFalse(base_info._is_iterable(generator))
def testModuleInfo_multiple_modules(self):
# pylint: disable=not-callable
tf.reset_default_graph()
dumb = DumbModule(name="dumb")
dumb_1 = DumbModule(name="dumb")
linear = basic.Linear(10, name="linear")
ph_0 = tf.placeholder(dtype=tf.float32, shape=(1, 10,))
dumb(ph_0)
with tf.name_scope("foo"):
dumb_1(ph_0)
linear(ph_0)
def check():
sonnet_collection = tf.get_default_graph().get_collection(
base_info.SONNET_COLLECTION_NAME)
self.assertEqual(len(sonnet_collection), 3)
# item 0.
self.assertEqual(sonnet_collection[0].module_name, "dumb")
self.assertEqual(sonnet_collection[0].class_name,
"{}.DumbModule".format(THIS_MODULE))
self.assertEqual(sonnet_collection[0].scope_name, "dumb")
self.assertEqual(len(sonnet_collection[0].connected_subgraphs), 1)
self.assertEqual(
sonnet_collection[0].connected_subgraphs[0].name_scope, "dumb")
# item 1.
self.assertEqual(sonnet_collection[1].module_name, "dumb_1")
self.assertEqual(sonnet_collection[1].scope_name, "dumb_1")
self.assertEqual(sonnet_collection[1].class_name,
"{}.DumbModule".format(THIS_MODULE))
self.assertEqual(sonnet_collection[1].scope_name, "dumb_1")
self.assertEqual(len(sonnet_collection[1].connected_subgraphs), 1)
self.assertEqual(
sonnet_collection[1].connected_subgraphs[0].name_scope, "foo/dumb_1")
# item 2.
self.assertEqual(sonnet_collection[2].module_name, "linear")
self.assertEqual(sonnet_collection[2].scope_name, "linear")
self.assertEqual(sonnet_collection[2].class_name,
"{}.Linear".format(LINEAR_MODULE))
self.assertEqual(sonnet_collection[2].scope_name, "linear")
self.assertEqual(len(sonnet_collection[2].connected_subgraphs), 1)
self.assertEqual(
sonnet_collection[2].connected_subgraphs[0].name_scope, "linear")
check()
_copy_default_graph()
check()
def testModuleInfo_multiple_subgraph(self):
# pylint: disable=not-callable
tf.reset_default_graph()
dumb = DumbModule(name="dumb_a")
ph_0 = tf.placeholder(dtype=tf.float32, shape=(1, 10,))
dumb(ph_0)
with tf.name_scope("foo"):
dumb(ph_0)
def check():
sonnet_collection = tf.get_default_graph().get_collection(
base_info.SONNET_COLLECTION_NAME)
self.assertEqual(len(sonnet_collection), 1)
self.assertEqual(len(sonnet_collection[0].connected_subgraphs), 2)
connected_subgraph_0 = sonnet_collection[0].connected_subgraphs[0]
connected_subgraph_1 = sonnet_collection[0].connected_subgraphs[1]
self.assertEqual(connected_subgraph_0.name_scope, "dumb_a")
self.assertEqual(connected_subgraph_1.name_scope, "foo/dumb_a")
check()
_copy_default_graph()
check()
def testModuleInfo_tensor(self):
# pylint: disable=not-callable
tf.reset_default_graph()
dumb = DumbModule(name="dumb_a")
ph_0 = tf.placeholder(dtype=tf.float32, shape=(1, 10,))
dumb(ph_0)
def check():
sonnet_collection = tf.get_default_graph().get_collection(
base_info.SONNET_COLLECTION_NAME)
connected_subgraph = sonnet_collection[0].connected_subgraphs[0]
self.assertIsInstance(connected_subgraph.inputs["inputs"], tf.Tensor)
self.assertIsInstance(connected_subgraph.outputs, tf.Tensor)
check()
_copy_default_graph()
check()
def testModuleInfo_sparsetensor(self):
# pylint: disable=not-callable
tf.reset_default_graph()
dumb = DumbModule(name="dumb_a")
sparse_tensor = tf.SparseTensor(
indices=tf.placeholder(dtype=tf.int64, shape=(10, 2,)),
values=tf.placeholder(dtype=tf.float32, shape=(10,)),
dense_shape=tf.placeholder(dtype=tf.int64, shape=(2,)))
dumb(sparse_tensor)
def check():
sonnet_collection = tf.get_default_graph().get_collection(
base_info.SONNET_COLLECTION_NAME)
connected_subgraph = sonnet_collection[0].connected_subgraphs[0]
self.assertIsInstance(
connected_subgraph.inputs["inputs"], tf.SparseTensor)
self.assertIsInstance(connected_subgraph.outputs, tf.SparseTensor)
check()
_copy_default_graph()
check()
def testModuleInfo_tuple(self):
# pylint: disable=not-callable
tf.reset_default_graph()
dumb = DumbModule(name="dumb_a")
ph_0 = tf.placeholder(dtype=tf.float32, shape=(1, 10,))
ph_1 = tf.placeholder(dtype=tf.float32, shape=(1, 10,))
dumb((ph_0, ph_1))
def check():
sonnet_collection = tf.get_default_graph().get_collection(
base_info.SONNET_COLLECTION_NAME)
connected_subgraph = sonnet_collection[0].connected_subgraphs[0]
self.assertIsInstance(connected_subgraph.inputs["inputs"], tuple)
self.assertIsInstance(connected_subgraph.outputs, tuple)
check()
_copy_default_graph()
check()
def testModuleInfo_namedtuple(self):
# pylint: disable=not-callable
tf.reset_default_graph()
dumb = DumbModule(name="dumb_a")
ph_0 = tf.placeholder(dtype=tf.float32, shape=(1, 10,))
ph_1 = tf.placeholder(dtype=tf.float32, shape=(1, 10,))
dumb(DumbNamedTuple(ph_0, ph_1))
def check():
sonnet_collection = tf.get_default_graph().get_collection(
base_info.SONNET_COLLECTION_NAME)
connected_subgraph = sonnet_collection[0].connected_subgraphs[0]
self.assertTrue(
base_info._is_namedtuple(connected_subgraph.inputs["inputs"]))
self.assertTrue(base_info._is_namedtuple(connected_subgraph.outputs))
check()
_copy_default_graph()
check()
def testModuleInfo_dict(self):
# pylint: disable=not-callable
tf.reset_default_graph()
dumb = DumbModule(name="dumb_a")
ph_0 = tf.placeholder(dtype=tf.float32, shape=(1, 10,))
ph_1 = tf.placeholder(dtype=tf.float32, shape=(1, 10,))
dumb({"ph_0": ph_0, "ph_1": ph_1})
def check():
sonnet_collection = tf.get_default_graph().get_collection(
base_info.SONNET_COLLECTION_NAME)
connected_subgraph = sonnet_collection[0].connected_subgraphs[0]
self.assertIsInstance(connected_subgraph.inputs["inputs"], dict)
self.assertIsInstance(connected_subgraph.outputs, dict)
check()
_copy_default_graph()
check()
def testModuleInfo_not_a_tensor(self):
# pylint: disable=not-callable
tf.reset_default_graph()
dumb = DumbModule(name="dumb_a")
dumb(NotATensor())
def check(check_type):
sonnet_collection = tf.get_default_graph().get_collection(
base_info.SONNET_COLLECTION_NAME)
connected_subgraph = sonnet_collection[0].connected_subgraphs[0]
self.assertIsInstance(connected_subgraph.inputs["inputs"], check_type)
self.assertIsInstance(connected_subgraph.outputs, check_type)
check(NotATensor)
_copy_default_graph()
check(base_info._UnserializableObject)
def testModuleInfo_recursion(self):
# pylint: disable=not-callable
tf.reset_default_graph()
dumb = DumbModule(name="dumb_a", no_nest=True)
ph_0 = tf.placeholder(dtype=tf.float32, shape=(1, 10,))
val = {"one": ph_0, "self": None}
val["self"] = val
dumb(val)
def check(check_type):
sonnet_collection = tf.get_default_graph().get_collection(
base_info.SONNET_COLLECTION_NAME)
connected_subgraph = sonnet_collection[0].connected_subgraphs[0]
self.assertIsInstance(connected_subgraph.inputs["inputs"]["one"],
tf.Tensor)
self.assertIsInstance(
connected_subgraph.inputs["inputs"]["self"], check_type)
self.assertIsInstance(connected_subgraph.outputs["one"], tf.Tensor)
self.assertIsInstance(connected_subgraph.outputs["self"], check_type)
check(dict)
_copy_default_graph()
check(base_info._UnserializableObject)
if __name__ == "__main__":
tf.test.main()
| sonnet-1 | sonnet/python/modules/base_info_test.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""LSTM based modules for TensorFlow snt.
This python module contains LSTM-like cores that fall under the broader group
of RNN cores. In general, initializers for the gate weights and other
model parameters may be passed to the constructor.
Typical usage example of the standard LSTM without peephole connections:
```
import sonnet as snt
hidden_size = 10
batch_size = 2
# Simple LSTM op on some input
rnn = snt.LSTM(hidden_size)
input = tf.placeholder(tf.float32, shape=[batch_size, hidden_size])
out, next_state = rnn(input, rnn.initial_state(batch_size))
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Dependency imports
from six.moves import xrange # pylint: disable=redefined-builtin
from sonnet.python.modules import base
from sonnet.python.modules import basic
from sonnet.python.modules import batch_norm
from sonnet.python.modules import conv
from sonnet.python.modules import layer_norm
from sonnet.python.modules import rnn_core
from sonnet.python.modules import util
import tensorflow.compat.v1 as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import rnn as contrib_rnn
LSTMState = collections.namedtuple("LSTMState", ("hidden", "cell"))
class LSTM(rnn_core.RNNCore):
"""LSTM recurrent network cell with optional peepholes & layer normalization.
The implementation is based on: http://arxiv.org/abs/1409.2329. We add
forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
#### Layer normalization
This is described in https://arxiv.org/pdf/1607.06450.pdf
#### Peep-hole connections
Peep-hole connections may optionally be used by specifying a flag in the
constructor. These connections can aid increasing the precision of output
timing, for more details see:
https://research.google.com/pubs/archive/43905.pdf
#### Recurrent projections
Projection of the recurrent state, to reduce model parameters and speed up
computation. For more details see:
https://arxiv.org/abs/1402.1128
Attributes:
state_size: Tuple of `tf.TensorShape`s indicating the size of state tensors.
output_size: `tf.TensorShape` indicating the size of the core output.
use_peepholes: Boolean indicating whether peephole connections are used.
"""
# Keys that may be provided for parameter initializers.
W_GATES = "w_gates" # weight for gates
B_GATES = "b_gates" # bias of gates
W_F_DIAG = "w_f_diag" # weight for prev_cell -> forget gate peephole
W_I_DIAG = "w_i_diag" # weight for prev_cell -> input gate peephole
W_O_DIAG = "w_o_diag" # weight for prev_cell -> output gate peephole
W_H_PROJECTION = "w_h_projection" # weight for (opt) projection of h in state
POSSIBLE_INITIALIZER_KEYS = {
W_GATES, B_GATES, W_F_DIAG, W_I_DIAG, W_O_DIAG, W_H_PROJECTION}
def __init__(self,
hidden_size,
forget_bias=1.0,
initializers=None,
partitioners=None,
regularizers=None,
use_peepholes=False,
use_layer_norm=False,
hidden_clip_value=None,
projection_size=None,
cell_clip_value=None,
custom_getter=None,
name="lstm"):
"""Construct LSTM.
Args:
hidden_size: (int) Hidden size dimensionality.
forget_bias: (float) Bias for the forget activation.
initializers: Dict containing ops to initialize the weights.
This dictionary may contain any of the keys returned by
`LSTM.get_possible_initializer_keys`.
partitioners: Optional dict containing partitioners to partition
the weights and biases. As a default, no partitioners are used. This
dict may contain any of the keys returned by
`LSTM.get_possible_initializer_keys`.
regularizers: Optional dict containing regularizers for the weights and
biases. As a default, no regularizers are used. This dict may contain
any of the keys returned by
`LSTM.get_possible_initializer_keys`.
use_peepholes: Boolean that indicates whether peephole connections are
used.
use_layer_norm: Boolean that indicates whether to apply layer
normalization.
hidden_clip_value: Optional number; if set, then the LSTM hidden state
vector is clipped by this value.
projection_size: Optional number; if set, then the LSTM hidden state is
projected to this size via a learnable projection matrix.
cell_clip_value: Optional number; if set, then the LSTM cell vector is
clipped by this value.
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method. See the
`tf.get_variable` documentation for more details.
name: Name of the module.
Raises:
KeyError: if `initializers` contains any keys not returned by
`LSTM.get_possible_initializer_keys`.
KeyError: if `partitioners` contains any keys not returned by
`LSTM.get_possible_initializer_keys`.
KeyError: if `regularizers` contains any keys not returned by
`LSTM.get_possible_initializer_keys`.
ValueError: if a peephole initializer is passed in the initializer list,
but `use_peepholes` is False.
"""
super(LSTM, self).__init__(custom_getter=custom_getter, name=name)
self._hidden_size = hidden_size
self._forget_bias = forget_bias
self._use_peepholes = use_peepholes
self._use_layer_norm = use_layer_norm
self._hidden_clip_value = hidden_clip_value
self._cell_clip_value = cell_clip_value
self._use_projection = projection_size is not None
self._hidden_state_size = projection_size or hidden_size
self.possible_keys = self.get_possible_initializer_keys(
use_peepholes=use_peepholes, use_projection=self._use_projection)
self._initializers = util.check_initializers(initializers,
self.possible_keys)
self._partitioners = util.check_initializers(partitioners,
self.possible_keys)
self._regularizers = util.check_initializers(regularizers,
self.possible_keys)
if hidden_clip_value is not None and hidden_clip_value < 0:
raise ValueError("The value of hidden_clip_value should be nonnegative.")
if cell_clip_value is not None and cell_clip_value < 0:
raise ValueError("The value of cell_clip_value should be nonnegative.")
@classmethod
def get_possible_initializer_keys(cls, use_peepholes=False,
use_projection=False):
"""Returns the keys the dictionary of variable initializers may contain.
The set of all possible initializer keys are:
w_gates: weight for gates
b_gates: bias of gates
w_f_diag: weight for prev_cell -> forget gate peephole
w_i_diag: weight for prev_cell -> input gate peephole
w_o_diag: weight for prev_cell -> output gate peephole
Args:
cls:The class.
use_peepholes: Boolean that indicates whether peephole connections are
used.
use_projection: Boolean that indicates whether a recurrent projection
layer is used.
Returns:
Set with strings corresponding to the strings that may be passed to the
constructor.
"""
possible_keys = cls.POSSIBLE_INITIALIZER_KEYS.copy()
if not use_peepholes:
possible_keys.difference_update(
{cls.W_F_DIAG, cls.W_I_DIAG, cls.W_O_DIAG})
if not use_projection:
possible_keys.difference_update({cls.W_H_PROJECTION})
return possible_keys
def _build(self, inputs, prev_state):
"""Connects the LSTM module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as inputs and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may differ for each
connection.
Args:
inputs: Tensor of size `[batch_size, input_size]`.
prev_state: Tuple (prev_hidden, prev_cell).
Returns:
A tuple (output, next_state) where 'output' is a Tensor of size
`[batch_size, hidden_size]` and 'next_state' is a `LSTMState` namedtuple
(next_hidden, next_cell) where `next_hidden` and `next_cell` have size
`[batch_size, hidden_size]`. If `projection_size` is specified, then
`next_hidden` will have size `[batch_size, projection_size]`.
Raises:
ValueError: If connecting the module into the graph any time after the
first time, and the inferred size of the inputs does not match previous
invocations.
"""
prev_hidden, prev_cell = prev_state
# pylint: disable=invalid-unary-operand-type
if self._hidden_clip_value is not None:
prev_hidden = tf.clip_by_value(
prev_hidden, -self._hidden_clip_value, self._hidden_clip_value)
if self._cell_clip_value is not None:
prev_cell = tf.clip_by_value(
prev_cell, -self._cell_clip_value, self._cell_clip_value)
# pylint: enable=invalid-unary-operand-type
self._create_gate_variables(inputs.get_shape(), inputs.dtype)
# pylint false positive: calling module of same file;
# pylint: disable=not-callable
# Parameters of gates are concatenated into one multiply for efficiency.
inputs_and_hidden = tf.concat([inputs, prev_hidden], 1)
gates = tf.matmul(inputs_and_hidden, self._w_xh)
if self._use_layer_norm:
gates = layer_norm.LayerNorm()(gates)
gates += self._b
# i = input_gate, j = next_input, f = forget_gate, o = output_gate
i, j, f, o = tf.split(value=gates, num_or_size_splits=4, axis=1)
if self._use_peepholes: # diagonal connections
self._create_peephole_variables(inputs.dtype)
f += self._w_f_diag * prev_cell
i += self._w_i_diag * prev_cell
forget_mask = tf.sigmoid(f + self._forget_bias)
next_cell = forget_mask * prev_cell + tf.sigmoid(i) * tf.tanh(j)
cell_output = next_cell
if self._use_peepholes:
cell_output += self._w_o_diag * cell_output
next_hidden = tf.tanh(cell_output) * tf.sigmoid(o)
if self._use_projection:
next_hidden = tf.matmul(next_hidden, self._w_h_projection)
return next_hidden, LSTMState(hidden=next_hidden, cell=next_cell)
def _create_gate_variables(self, input_shape, dtype):
"""Initialize the variables used for the gates."""
if len(input_shape) != 2:
raise ValueError(
"Rank of shape must be {} not: {}".format(2, len(input_shape)))
equiv_input_size = self._hidden_state_size + input_shape.dims[1].value
initializer = basic.create_linear_initializer(equiv_input_size)
self._w_xh = tf.get_variable(
self.W_GATES,
shape=[equiv_input_size, 4 * self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.W_GATES, initializer),
partitioner=self._partitioners.get(self.W_GATES),
regularizer=self._regularizers.get(self.W_GATES))
self._b = tf.get_variable(
self.B_GATES,
shape=[4 * self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.B_GATES, initializer),
partitioner=self._partitioners.get(self.B_GATES),
regularizer=self._regularizers.get(self.B_GATES))
if self._use_projection:
w_h_initializer = basic.create_linear_initializer(self._hidden_size)
self._w_h_projection = tf.get_variable(
self.W_H_PROJECTION,
shape=[self._hidden_size, self._hidden_state_size],
dtype=dtype,
initializer=self._initializers.get(self.W_H_PROJECTION,
w_h_initializer),
partitioner=self._partitioners.get(self.W_H_PROJECTION),
regularizer=self._regularizers.get(self.W_H_PROJECTION))
def _create_peephole_variables(self, dtype):
"""Initialize the variables used for the peephole connections."""
self._w_f_diag = tf.get_variable(
self.W_F_DIAG,
shape=[self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.W_F_DIAG),
partitioner=self._partitioners.get(self.W_F_DIAG),
regularizer=self._regularizers.get(self.W_F_DIAG))
self._w_i_diag = tf.get_variable(
self.W_I_DIAG,
shape=[self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.W_I_DIAG),
partitioner=self._partitioners.get(self.W_I_DIAG),
regularizer=self._regularizers.get(self.W_I_DIAG))
self._w_o_diag = tf.get_variable(
self.W_O_DIAG,
shape=[self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.W_O_DIAG),
partitioner=self._partitioners.get(self.W_O_DIAG),
regularizer=self._regularizers.get(self.W_O_DIAG))
@property
def state_size(self):
"""Tuple of `tf.TensorShape`s indicating the size of state tensors."""
return LSTMState(tf.TensorShape([self._hidden_state_size]),
tf.TensorShape([self._hidden_size]))
@property
def output_size(self):
"""`tf.TensorShape` indicating the size of the core output."""
return tf.TensorShape([self._hidden_state_size])
@property
def use_peepholes(self):
"""Boolean indicating whether peephole connections are used."""
return self._use_peepholes
@property
def use_layer_norm(self):
"""Boolean indicating whether layer norm is enabled."""
return self._use_layer_norm
class RecurrentDropoutWrapper(rnn_core.RNNCore):
"""Wraps an RNNCore so that recurrent dropout can be applied."""
def __init__(self, core, keep_probs):
"""Builds a new wrapper around a given core.
Args:
core: the RNN core to be wrapped.
keep_probs: the recurrent dropout keep probabilities to apply.
This should have the same structure has core.init_state. No dropout is
applied for leafs set to None.
"""
super(RecurrentDropoutWrapper, self).__init__(
custom_getter=None, name=core.module_name + "_recdropout")
self._core = core
self._keep_probs = keep_probs
# self._dropout_state_size is a list of shape for the state parts to which
# dropout is to be applied.
# self._dropout_index has the same shape as the core state. Leafs contain
# either None if no dropout is applied or an integer representing an index
# in self._dropout_state_size.
self._dropout_state_size = []
def set_dropout_state_size(keep_prob, state_size):
if keep_prob is not None:
self._dropout_state_size.append(state_size)
return len(self._dropout_state_size) - 1
return None
self._dropout_indexes = contrib_framework.nest.map_structure(
set_dropout_state_size, keep_probs, core.state_size)
def _build(self, inputs, prev_state):
core_state, dropout_masks = prev_state
output, next_core_state = self._core(inputs, core_state)
# Dropout masks are generated via tf.nn.dropout so they actually include
# rescaling: the mask value is 1/keep_prob if no dropout is applied.
next_core_state = contrib_framework.nest.map_structure(
lambda i, state: state if i is None else state * dropout_masks[i],
self._dropout_indexes, next_core_state)
return output, (next_core_state, dropout_masks)
def initial_state(self, batch_size, dtype=tf.float32, trainable=False,
trainable_initializers=None, trainable_regularizers=None,
name=None):
"""Builds the default start state tensor of zeros."""
core_initial_state = self._core.initial_state(
batch_size, dtype=dtype, trainable=trainable,
trainable_initializers=trainable_initializers,
trainable_regularizers=trainable_regularizers, name=name)
dropout_masks = [None] * len(self._dropout_state_size)
def set_dropout_mask(index, state, keep_prob):
if index is not None:
ones = tf.ones_like(state, dtype=dtype)
dropout_masks[index] = tf.nn.dropout(ones, keep_prob=keep_prob)
contrib_framework.nest.map_structure(set_dropout_mask,
self._dropout_indexes,
core_initial_state, self._keep_probs)
return core_initial_state, dropout_masks
@property
def state_size(self):
return self._core.state_size, self._dropout_state_size
@property
def output_size(self):
return self._core.output_size
def lstm_with_recurrent_dropout(hidden_size, keep_prob=0.5, **kwargs):
"""LSTM with recurrent dropout.
Args:
hidden_size: the LSTM hidden size.
keep_prob: the probability to keep an entry when applying dropout.
**kwargs: Extra keyword arguments to pass to the LSTM.
Returns:
A tuple (train_lstm, test_lstm) where train_lstm is an LSTM with
recurrent dropout enabled to be used for training and test_lstm
is the same LSTM without recurrent dropout.
"""
lstm = LSTM(hidden_size, **kwargs)
return RecurrentDropoutWrapper(lstm, LSTMState(keep_prob, None)), lstm
class ZoneoutWrapper(rnn_core.RNNCore):
"""Wraps an RNNCore so that zoneout can be applied.
Zoneout was introduced in https://arxiv.org/abs/1606.01305
It consists of randomly freezing some RNN state in the same way recurrent
dropout would replace this state with zero.
"""
def __init__(self, core, keep_probs, is_training):
"""Builds a new wrapper around a given core.
Args:
core: the RNN core to be wrapped.
keep_probs: the probabilities to use the updated states rather than
keeping the old state values. This is one minus the probability
that zoneout gets applied.
This should have the same structure has core.init_state. No zoneout is
applied for leafs set to None.
is_training: when set, apply some stochastic zoneout. Otherwise perform
a linear combination of the previous state and the current state based
on the zoneout probability.
"""
super(ZoneoutWrapper, self).__init__(
custom_getter=None, name=core.module_name + "_zoneout")
self._core = core
self._keep_probs = keep_probs
self._is_training = is_training
def _build(self, inputs, prev_state):
output, next_state = self._core(inputs, prev_state)
def apply_zoneout(keep_prob, next_s, prev_s): # pylint: disable=missing-docstring
if keep_prob is None:
return next_s
if self._is_training:
diff = next_s - prev_s
# The dropout returns 0 with probability 1 - keep_prob and in this case
# this function returns prev_s
# It returns diff / keep_prob otherwise and then this function returns
# prev_s + diff = next_s
return prev_s + tf.nn.dropout(diff, keep_prob) * keep_prob
else:
return prev_s * (1 - keep_prob) + next_s * keep_prob
next_state = contrib_framework.nest.map_structure(apply_zoneout,
self._keep_probs,
next_state, prev_state)
return output, next_state
def initial_state(self, batch_size, dtype=tf.float32, trainable=False,
trainable_initializers=None, trainable_regularizers=None,
name=None):
"""Builds the default start state tensor of zeros."""
return self._core.initial_state(
batch_size, dtype=dtype, trainable=trainable,
trainable_initializers=trainable_initializers,
trainable_regularizers=trainable_regularizers, name=name)
@property
def state_size(self):
return self._core.state_size
@property
def output_size(self):
return self._core.output_size
def lstm_with_zoneout(hidden_size, keep_prob_c=0.5, keep_prob_h=0.95, **kwargs):
"""LSTM with recurrent dropout.
Args:
hidden_size: the LSTM hidden size.
keep_prob_c: the probability to use the new value of the cell state rather
than freezing it.
keep_prob_h: the probability to use the new value of the hidden state
rather than freezing it.
**kwargs: Extra keyword arguments to pass to the LSTM.
Returns:
A tuple (train_lstm, test_lstm) where train_lstm is an LSTM with
recurrent dropout enabled to be used for training and test_lstm
is the same LSTM without zoneout.
"""
lstm = LSTM(hidden_size, **kwargs)
keep_probs = LSTMState(keep_prob_h, keep_prob_c)
train_lstm = ZoneoutWrapper(lstm, keep_probs, is_training=True)
test_lstm = ZoneoutWrapper(lstm, keep_probs, is_training=False)
return train_lstm, test_lstm
class BatchNormLSTM(rnn_core.RNNCore):
"""LSTM recurrent network cell with optional peepholes, batch normalization.
The base implementation is based on: http://arxiv.org/abs/1409.2329. We add
forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
#### Peep-hole connections
Peep-hole connections may optionally be used by specifying a flag in the
constructor. These connections can aid increasing the precision of output
timing, for more details see:
https://research.google.com/pubs/archive/43905.pdf
#### Batch normalization
The batch norm transformation (in training mode) is
batchnorm(x) = gamma * (x - mean(x)) / stddev(x) + beta,
where gamma is a learnt scaling factor and beta is a learnt offset.
Batch normalization may optionally be used at different places in the LSTM by
specifying flag(s) in the constructor. These are applied when calculating
the gate activations and cell-to-hidden transformation. The set-up is based on
https://arxiv.org/pdf/1603.09025.pdf
##### Batch normalization: where to apply?
Batch norm can be applied in three different places in the LSTM:
(h) To the W_h h_{t-1} contribution to the gates from the previous hiddens.
(x) To the W_x x_t contribution to the gates from the current input.
(c) To the cell value c_t when calculating the output h_t from the cell.
(The notation here is consistent with the Recurrent Batch Normalization
paper). Each of these can be controlled individually, because batch norm is
expensive, and not all are necessary. The paper doesn't mention the relative
effects of these different batch norms; however, experimentation with a
shallow LSTM for the `permuted_mnist` sequence task suggests that (h) is the
most important and the other two can be left off. For other tasks or deeper
(stacked) LSTMs, other batch norm combinations may be more effective.
##### Batch normalization: collecting stats (training vs test)
When switching to testing (see `LSTM.with_batch_norm_control`), we can use a
mean and stddev learnt from the training data instead of using the statistics
from the test data. (This both increases test accuracy because the statistics
have less variance, and if the test data does not have the same distribution
as the training data then we must use the training statistics to ensure the
effective network does not change when switching to testing anyhow.)
This does however introduces a slight subtlety. The first few time steps of
the RNN tend to have varying statistics (mean and variance) before settling
down to a steady value. Therefore in general, better performance is obtained
by using separate statistics for the first few time steps, and then using the
final set of statistics for all subsequent time steps. This is controlled by
the parameter `max_unique_stats`. (We can't have an unbounded number of
distinct statistics for both technical reasons and also for the case where
test sequences are longer than anything seen in training.)
You may be fine leaving it at its default value of 1. Small values (like 10)
may achieve better performance on some tasks when testing with cached
statistics.
Attributes:
state_size: Tuple of `tf.TensorShape`s indicating the size of state tensors.
output_size: `tf.TensorShape` indicating the size of the core output.
use_peepholes: Boolean indicating whether peephole connections are used.
use_batch_norm_h: Boolean indicating whether batch norm (h) is enabled.
use_batch_norm_x: Boolean indicating whether batch norm (x) is enabled.
use_batch_norm_c: Boolean indicating whether batch norm (c) is enabled.
"""
# Keys that may be provided for parameter initializers.
W_GATES = "w_gates" # weight for gates
B_GATES = "b_gates" # bias of gates
W_F_DIAG = "w_f_diag" # weight for prev_cell -> forget gate peephole
W_I_DIAG = "w_i_diag" # weight for prev_cell -> input gate peephole
W_O_DIAG = "w_o_diag" # weight for prev_cell -> output gate peephole
GAMMA_H = "gamma_h" # batch norm scaling for previous_hidden -> gates
GAMMA_X = "gamma_x" # batch norm scaling for input -> gates
GAMMA_C = "gamma_c" # batch norm scaling for cell -> output
BETA_C = "beta_c" # (batch norm) bias for cell -> output
POSSIBLE_INITIALIZER_KEYS = {W_GATES, B_GATES, W_F_DIAG, W_I_DIAG, W_O_DIAG,
GAMMA_H, GAMMA_X, GAMMA_C, BETA_C}
def __init__(self,
hidden_size,
forget_bias=1.0,
initializers=None,
partitioners=None,
regularizers=None,
use_peepholes=False,
use_batch_norm_h=True,
use_batch_norm_x=False,
use_batch_norm_c=False,
max_unique_stats=1,
hidden_clip_value=None,
cell_clip_value=None,
custom_getter=None,
name="batch_norm_lstm"):
"""Construct `BatchNormLSTM`.
Args:
hidden_size: (int) Hidden size dimensionality.
forget_bias: (float) Bias for the forget activation.
initializers: Dict containing ops to initialize the weights.
This dictionary may contain any of the keys returned by
`BatchNormLSTM.get_possible_initializer_keys`.
The gamma and beta variables control batch normalization values for
different batch norm transformations inside the cell; see the paper for
details.
partitioners: Optional dict containing partitioners to partition
the weights and biases. As a default, no partitioners are used. This
dict may contain any of the keys returned by
`BatchNormLSTM.get_possible_initializer_keys`.
regularizers: Optional dict containing regularizers for the weights and
biases. As a default, no regularizers are used. This dict may contain
any of the keys returned by
`BatchNormLSTM.get_possible_initializer_keys`.
use_peepholes: Boolean that indicates whether peephole connections are
used.
use_batch_norm_h: Boolean that indicates whether to apply batch
normalization at the previous_hidden -> gates contribution. If you are
experimenting with batch norm then this may be the most effective to
use, and is enabled by default.
use_batch_norm_x: Boolean that indicates whether to apply batch
normalization at the input -> gates contribution.
use_batch_norm_c: Boolean that indicates whether to apply batch
normalization at the cell -> output contribution.
max_unique_stats: The maximum number of steps to use unique batch norm
statistics for. (See module description above for more details.)
hidden_clip_value: Optional number; if set, then the LSTM hidden state
vector is clipped by this value.
cell_clip_value: Optional number; if set, then the LSTM cell vector is
clipped by this value.
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method. See the
`tf.get_variable` documentation for more details.
name: Name of the module.
Raises:
KeyError: if `initializers` contains any keys not returned by
`BatchNormLSTM.get_possible_initializer_keys`.
KeyError: if `partitioners` contains any keys not returned by
`BatchNormLSTM.get_possible_initializer_keys`.
KeyError: if `regularizers` contains any keys not returned by
`BatchNormLSTM.get_possible_initializer_keys`.
ValueError: if a peephole initializer is passed in the initializer list,
but `use_peepholes` is False.
ValueError: if a batch norm initializer is passed in the initializer list,
but batch norm is disabled.
ValueError: if none of the `use_batch_norm_*` options are True.
ValueError: if `max_unique_stats` is < 1.
"""
if not any([use_batch_norm_h, use_batch_norm_x, use_batch_norm_c]):
raise ValueError("At least one use_batch_norm_* option is required for "
"BatchNormLSTM")
super(BatchNormLSTM, self).__init__(custom_getter=custom_getter, name=name)
self._hidden_size = hidden_size
self._forget_bias = forget_bias
self._use_peepholes = use_peepholes
self._max_unique_stats = max_unique_stats
self._use_batch_norm_h = use_batch_norm_h
self._use_batch_norm_x = use_batch_norm_x
self._use_batch_norm_c = use_batch_norm_c
self._hidden_clip_value = hidden_clip_value
self._cell_clip_value = cell_clip_value
self.possible_keys = self.get_possible_initializer_keys(
use_peepholes=use_peepholes, use_batch_norm_h=use_batch_norm_h,
use_batch_norm_x=use_batch_norm_x, use_batch_norm_c=use_batch_norm_c)
self._initializers = util.check_initializers(initializers,
self.possible_keys)
self._partitioners = util.check_initializers(partitioners,
self.possible_keys)
self._regularizers = util.check_initializers(regularizers,
self.possible_keys)
if max_unique_stats < 1:
raise ValueError("max_unique_stats must be >= 1")
if max_unique_stats != 1 and not (
use_batch_norm_h or use_batch_norm_x or use_batch_norm_c):
raise ValueError("max_unique_stats specified but batch norm disabled")
if hidden_clip_value is not None and hidden_clip_value < 0:
raise ValueError("The value of hidden_clip_value should be nonnegative.")
if cell_clip_value is not None and cell_clip_value < 0:
raise ValueError("The value of cell_clip_value should be nonnegative.")
if use_batch_norm_h:
self._batch_norm_h = BatchNormLSTM.IndexedStatsBatchNorm(max_unique_stats,
"batch_norm_h")
if use_batch_norm_x:
self._batch_norm_x = BatchNormLSTM.IndexedStatsBatchNorm(max_unique_stats,
"batch_norm_x")
if use_batch_norm_c:
self._batch_norm_c = BatchNormLSTM.IndexedStatsBatchNorm(max_unique_stats,
"batch_norm_c")
def with_batch_norm_control(self, is_training, test_local_stats=True):
"""Wraps this RNNCore with the additional control input to the `BatchNorm`s.
Example usage:
lstm = snt.BatchNormLSTM(4)
is_training = tf.placeholder(tf.bool)
rnn_input = ...
my_rnn = rnn.rnn(lstm.with_batch_norm_control(is_training), rnn_input)
Args:
is_training: Boolean that indicates whether we are in
training mode or testing mode. When in training mode, the batch norm
statistics are taken from the given batch, and moving statistics are
updated. When in testing mode, the moving statistics are not updated,
and in addition if `test_local_stats` is False then the moving
statistics are used for the batch statistics. See the `BatchNorm` module
for more details.
test_local_stats: Boolean scalar indicated whether to use local
batch statistics in test mode.
Returns:
snt.RNNCore wrapping this class with the extra input(s) added.
"""
return BatchNormLSTM.CoreWithExtraBuildArgs(
self, is_training=is_training, test_local_stats=test_local_stats)
@classmethod
def get_possible_initializer_keys(
cls, use_peepholes=False, use_batch_norm_h=True, use_batch_norm_x=False,
use_batch_norm_c=False):
"""Returns the keys the dictionary of variable initializers may contain.
The set of all possible initializer keys are:
w_gates: weight for gates
b_gates: bias of gates
w_f_diag: weight for prev_cell -> forget gate peephole
w_i_diag: weight for prev_cell -> input gate peephole
w_o_diag: weight for prev_cell -> output gate peephole
gamma_h: batch norm scaling for previous_hidden -> gates
gamma_x: batch norm scaling for input -> gates
gamma_c: batch norm scaling for cell -> output
beta_c: batch norm bias for cell -> output
Args:
cls:The class.
use_peepholes: Boolean that indicates whether peephole connections are
used.
use_batch_norm_h: Boolean that indicates whether to apply batch
normalization at the previous_hidden -> gates contribution. If you are
experimenting with batch norm then this may be the most effective to
turn on.
use_batch_norm_x: Boolean that indicates whether to apply batch
normalization at the input -> gates contribution.
use_batch_norm_c: Boolean that indicates whether to apply batch
normalization at the cell -> output contribution.
Returns:
Set with strings corresponding to the strings that may be passed to the
constructor.
"""
possible_keys = cls.POSSIBLE_INITIALIZER_KEYS.copy()
if not use_peepholes:
possible_keys.difference_update(
{cls.W_F_DIAG, cls.W_I_DIAG, cls.W_O_DIAG})
if not use_batch_norm_h:
possible_keys.remove(cls.GAMMA_H)
if not use_batch_norm_x:
possible_keys.remove(cls.GAMMA_X)
if not use_batch_norm_c:
possible_keys.difference_update({cls.GAMMA_C, cls.BETA_C})
return possible_keys
def _build(self, inputs, prev_state, is_training=None, test_local_stats=True):
"""Connects the LSTM module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as inputs and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may differ for each
connection.
Args:
inputs: Tensor of size `[batch_size, input_size]`.
prev_state: Tuple (prev_hidden, prev_cell), or if batch norm is enabled
and `max_unique_stats > 1`, then (prev_hidden, prev_cell, time_step).
Here, prev_hidden and prev_cell are tensors of size
`[batch_size, hidden_size]`, and time_step is used to indicate the
current RNN step.
is_training: Boolean indicating whether we are in training mode (as
opposed to testing mode), passed to the batch norm
modules. Note to use this you must wrap the cell via the
`with_batch_norm_control` function.
test_local_stats: Boolean indicating whether to use local batch statistics
in test mode. See the `BatchNorm` documentation for more on this.
Returns:
A tuple (output, next_state) where 'output' is a Tensor of size
`[batch_size, hidden_size]` and 'next_state' is a tuple
(next_hidden, next_cell) or (next_hidden, next_cell, time_step + 1),
where next_hidden and next_cell have size `[batch_size, hidden_size]`.
Raises:
ValueError: If connecting the module into the graph any time after the
first time, and the inferred size of the inputs does not match previous
invocations.
"""
if is_training is None:
raise ValueError("Boolean is_training flag must be explicitly specified "
"when using batch normalization.")
if self._max_unique_stats == 1:
prev_hidden, prev_cell = prev_state
time_step = None
else:
prev_hidden, prev_cell, time_step = prev_state
# pylint: disable=invalid-unary-operand-type
if self._hidden_clip_value is not None:
prev_hidden = tf.clip_by_value(
prev_hidden, -self._hidden_clip_value, self._hidden_clip_value)
if self._cell_clip_value is not None:
prev_cell = tf.clip_by_value(
prev_cell, -self._cell_clip_value, self._cell_clip_value)
# pylint: enable=invalid-unary-operand-type
self._create_gate_variables(inputs.get_shape(), inputs.dtype)
self._create_batch_norm_variables(inputs.dtype)
# pylint false positive: calling module of same file;
# pylint: disable=not-callable
if self._use_batch_norm_h or self._use_batch_norm_x:
gates_h = tf.matmul(prev_hidden, self._w_h)
gates_x = tf.matmul(inputs, self._w_x)
if self._use_batch_norm_h:
gates_h = self._gamma_h * self._batch_norm_h(gates_h,
time_step,
is_training,
test_local_stats)
if self._use_batch_norm_x:
gates_x = self._gamma_x * self._batch_norm_x(gates_x,
time_step,
is_training,
test_local_stats)
gates = gates_h + gates_x
else:
# Parameters of gates are concatenated into one multiply for efficiency.
inputs_and_hidden = tf.concat([inputs, prev_hidden], 1)
gates = tf.matmul(inputs_and_hidden, self._w_xh)
gates += self._b
# i = input_gate, j = next_input, f = forget_gate, o = output_gate
i, j, f, o = tf.split(value=gates, num_or_size_splits=4, axis=1)
if self._use_peepholes: # diagonal connections
self._create_peephole_variables(inputs.dtype)
f += self._w_f_diag * prev_cell
i += self._w_i_diag * prev_cell
forget_mask = tf.sigmoid(f + self._forget_bias)
next_cell = forget_mask * prev_cell + tf.sigmoid(i) * tf.tanh(j)
cell_output = next_cell
if self._use_batch_norm_c:
cell_output = (self._beta_c
+ self._gamma_c * self._batch_norm_c(cell_output,
time_step,
is_training,
test_local_stats))
if self._use_peepholes:
cell_output += self._w_o_diag * cell_output
next_hidden = tf.tanh(cell_output) * tf.sigmoid(o)
if self._max_unique_stats == 1:
return next_hidden, (next_hidden, next_cell)
else:
return next_hidden, (next_hidden, next_cell, time_step + 1)
def _create_batch_norm_variables(self, dtype):
"""Initialize the variables used for the `BatchNorm`s (if any)."""
# The paper recommends a value of 0.1 for good gradient flow through the
# tanh nonlinearity (although doesn't say whether this is for all gammas,
# or just some).
gamma_initializer = tf.constant_initializer(0.1)
if self._use_batch_norm_h:
self._gamma_h = tf.get_variable(
self.GAMMA_H,
shape=[4 * self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.GAMMA_H, gamma_initializer),
partitioner=self._partitioners.get(self.GAMMA_H),
regularizer=self._regularizers.get(self.GAMMA_H))
if self._use_batch_norm_x:
self._gamma_x = tf.get_variable(
self.GAMMA_X,
shape=[4 * self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.GAMMA_X, gamma_initializer),
partitioner=self._partitioners.get(self.GAMMA_X),
regularizer=self._regularizers.get(self.GAMMA_X))
if self._use_batch_norm_c:
self._gamma_c = tf.get_variable(
self.GAMMA_C,
shape=[self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.GAMMA_C, gamma_initializer),
partitioner=self._partitioners.get(self.GAMMA_C),
regularizer=self._regularizers.get(self.GAMMA_C))
self._beta_c = tf.get_variable(
self.BETA_C,
shape=[self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.BETA_C),
partitioner=self._partitioners.get(self.BETA_C),
regularizer=self._regularizers.get(self.BETA_C))
def _create_gate_variables(self, input_shape, dtype):
"""Initialize the variables used for the gates."""
if len(input_shape) != 2:
raise ValueError(
"Rank of shape must be {} not: {}".format(2, len(input_shape)))
input_size = input_shape.dims[1].value
b_shape = [4 * self._hidden_size]
equiv_input_size = self._hidden_size + input_size
initializer = basic.create_linear_initializer(equiv_input_size)
if self._use_batch_norm_h or self._use_batch_norm_x:
self._w_h = tf.get_variable(
self.W_GATES + "_H",
shape=[self._hidden_size, 4 * self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.W_GATES, initializer),
partitioner=self._partitioners.get(self.W_GATES),
regularizer=self._regularizers.get(self.W_GATES))
self._w_x = tf.get_variable(
self.W_GATES + "_X",
shape=[input_size, 4 * self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.W_GATES, initializer),
partitioner=self._partitioners.get(self.W_GATES),
regularizer=self._regularizers.get(self.W_GATES))
else:
self._w_xh = tf.get_variable(
self.W_GATES,
shape=[self._hidden_size + input_size, 4 * self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.W_GATES, initializer),
partitioner=self._partitioners.get(self.W_GATES),
regularizer=self._regularizers.get(self.W_GATES))
self._b = tf.get_variable(
self.B_GATES,
shape=b_shape,
dtype=dtype,
initializer=self._initializers.get(self.B_GATES, initializer),
partitioner=self._partitioners.get(self.B_GATES),
regularizer=self._regularizers.get(self.B_GATES))
def _create_peephole_variables(self, dtype):
"""Initialize the variables used for the peephole connections."""
self._w_f_diag = tf.get_variable(
self.W_F_DIAG,
shape=[self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.W_F_DIAG),
partitioner=self._partitioners.get(self.W_F_DIAG),
regularizer=self._regularizers.get(self.W_F_DIAG))
self._w_i_diag = tf.get_variable(
self.W_I_DIAG,
shape=[self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.W_I_DIAG),
partitioner=self._partitioners.get(self.W_I_DIAG),
regularizer=self._regularizers.get(self.W_I_DIAG))
self._w_o_diag = tf.get_variable(
self.W_O_DIAG,
shape=[self._hidden_size],
dtype=dtype,
initializer=self._initializers.get(self.W_O_DIAG),
partitioner=self._partitioners.get(self.W_O_DIAG),
regularizer=self._regularizers.get(self.W_O_DIAG))
def initial_state(self, batch_size, dtype=tf.float32, trainable=False,
trainable_initializers=None, trainable_regularizers=None,
name=None):
"""Builds the default start state tensor of zeros.
Args:
batch_size: An int, float or scalar Tensor representing the batch size.
dtype: The data type to use for the state.
trainable: Boolean that indicates whether to learn the initial state.
trainable_initializers: An optional pair of initializers for the
initial hidden state and cell state.
trainable_regularizers: Optional regularizer function or nested structure
of functions with the same structure as the `state_size` property of the
core, to be used as regularizers of the initial state variable. A
regularizer should be a function that takes a single `Tensor` as an
input and returns a scalar `Tensor` output, e.g. the L1 and L2
regularizers in `tf.contrib.layers`.
name: Optional string used to prefix the initial state variable names, in
the case of a trainable initial state. If not provided, defaults to
the name of the module.
Returns:
A tensor tuple `([batch_size, state_size], [batch_size, state_size], ?)`
filled with zeros, with the third entry present when batch norm is enabled
with `max_unique_stats > 1', with value `0` (representing the time step).
"""
if self._max_unique_stats == 1:
return super(BatchNormLSTM, self).initial_state(
batch_size, dtype=dtype, trainable=trainable,
trainable_initializers=trainable_initializers,
trainable_regularizers=trainable_regularizers, name=name)
else:
with tf.name_scope(self._initial_state_scope(name)):
if not trainable:
state = self.zero_state(batch_size, dtype)
else:
# We have to manually create the state ourselves so we don't create a
# variable that never gets used for the third entry.
state = rnn_core.trainable_initial_state(
batch_size,
(tf.TensorShape([self._hidden_size]),
tf.TensorShape([self._hidden_size])),
dtype=dtype,
initializers=trainable_initializers,
regularizers=trainable_regularizers,
name=self._initial_state_scope(name))
return state[0], state[1], tf.constant(0, dtype=tf.int32)
@property
def state_size(self):
"""Tuple of `tf.TensorShape`s indicating the size of state tensors."""
if self._max_unique_stats == 1:
return (tf.TensorShape([self._hidden_size]),
tf.TensorShape([self._hidden_size]))
else:
return (tf.TensorShape([self._hidden_size]),
tf.TensorShape([self._hidden_size]),
tf.TensorShape(1))
@property
def output_size(self):
"""`tf.TensorShape` indicating the size of the core output."""
return tf.TensorShape([self._hidden_size])
@property
def use_peepholes(self):
"""Boolean indicating whether peephole connections are used."""
return self._use_peepholes
@property
def use_batch_norm_h(self):
"""Boolean indicating whether batch norm for hidden -> gates is enabled."""
return self._use_batch_norm_h
@property
def use_batch_norm_x(self):
"""Boolean indicating whether batch norm for input -> gates is enabled."""
return self._use_batch_norm_x
@property
def use_batch_norm_c(self):
"""Boolean indicating whether batch norm for cell -> output is enabled."""
return self._use_batch_norm_c
class IndexedStatsBatchNorm(base.AbstractModule):
"""BatchNorm module where batch statistics are selected by an input index.
This is used by LSTM+batchnorm, where we have distinct batch norm statistics
for the first `max_unique_stats` time steps, and then use the final set of
statistics for subsequent time steps.
The module has as input (x, index, is_training, test_local_stats). During
training or when test_local_stats=True, the output is simply batchnorm(x)
(where mean(x) and stddev(x) are used), and during training the
`BatchNorm` module accumulates statistics in mean_i, etc, where
i = min(index, max_unique_stats - 1).
During testing with test_local_stats=False, the output is batchnorm(x),
where mean_i and stddev_i are used instead of mean(x) and stddev(x).
See the `BatchNorm` module for more on is_training and test_local_stats.
No offset `beta` or scaling `gamma` are learnt.
"""
def __init__(self, max_unique_stats, name=None):
"""Create an IndexedStatsBatchNorm.
Args:
max_unique_stats: number of different indices to have statistics for;
indices beyond this will use the final statistics.
name: Name of the module.
"""
super(BatchNormLSTM.IndexedStatsBatchNorm, self).__init__(name=name)
self._max_unique_stats = max_unique_stats
def _build(self, inputs, index, is_training, test_local_stats):
"""Add the IndexedStatsBatchNorm module to the graph.
Args:
inputs: Tensor to apply batch norm to.
index: Scalar TensorFlow int32 value to select the batch norm index.
is_training: Boolean to indicate to `snt.BatchNorm` if we are
currently training.
test_local_stats: Boolean to indicate to `snt.BatchNorm` if batch
normalization should use local batch statistics at test time.
Returns:
Output of batch norm operation.
"""
def create_batch_norm():
return batch_norm.BatchNorm(offset=False, scale=False)(
inputs, is_training, test_local_stats)
if self._max_unique_stats > 1:
# NOTE: This could be quite a bit faster with `tf.switch_case` however
# that currently has bugs (bug: 136667318) when combined with
# tf.gradients and control flow (such as dynamic unroll).
pred_fn_pairs = [(tf.equal(i, index), create_batch_norm)
for i in xrange(self._max_unique_stats - 1)]
out = tf.case(pred_fn_pairs, create_batch_norm)
out.set_shape(inputs.get_shape()) # needed for tf.case shape inference
return out
else:
return create_batch_norm()
class CoreWithExtraBuildArgs(rnn_core.RNNCore):
"""Wraps an RNNCore so that the build method receives extra args and kwargs.
This will pass the additional input `args` and `kwargs` to the _build
function of the snt.RNNCore after the input and prev_state inputs.
"""
def __init__(self, core, *args, **kwargs):
"""Construct the CoreWithExtraBuildArgs.
Args:
core: The snt.RNNCore to wrap.
*args: Extra arguments to pass to _build.
**kwargs: Extra keyword arguments to pass to _build.
"""
super(BatchNormLSTM.CoreWithExtraBuildArgs, self).__init__(
name=core.module_name + "_extra_args")
self._core = core
self._args = args
self._kwargs = kwargs
def _build(self, inputs, state):
return self._core(inputs, state, *self._args, **self._kwargs)
@property
def state_size(self):
"""Tuple indicating the size of nested state tensors."""
return self._core.state_size
@property
def output_size(self):
"""`tf.TensorShape` indicating the size of the core output."""
return self._core.output_size
class ConvLSTM(rnn_core.RNNCore):
"""Convolutional LSTM."""
@classmethod
def get_possible_initializer_keys(cls, conv_ndims, use_bias=True):
conv_class = cls._get_conv_class(conv_ndims)
return conv_class.get_possible_initializer_keys(use_bias)
@classmethod
def _get_conv_class(cls, conv_ndims):
if conv_ndims == 1:
return conv.Conv1D
elif conv_ndims == 2:
return conv.Conv2D
elif conv_ndims == 3:
return conv.Conv3D
else:
raise ValueError("Invalid convolution dimensionality.")
def __init__(self,
conv_ndims,
input_shape,
output_channels,
kernel_shape,
stride=1,
rate=1,
padding=conv.SAME,
use_bias=True,
legacy_bias_behaviour=True,
forget_bias=1.0,
initializers=None,
partitioners=None,
regularizers=None,
use_layer_norm=False,
custom_getter=None,
name="conv_lstm"):
"""Construct ConvLSTM.
Args:
conv_ndims: Convolution dimensionality (1, 2 or 3).
input_shape: Shape of the input as an iterable, excluding the batch size.
output_channels: Number of output channels of the conv LSTM.
kernel_shape: Sequence of kernel sizes (of size conv_ndims), or integer
that is used to define kernel size in all dimensions.
stride: Sequence of kernel strides (of size conv_ndims), or integer that
is used to define stride in all dimensions.
rate: Sequence of dilation rates (of size conv_ndims), or integer that is
used to define dilation rate in all dimensions. 1 corresponds to a
standard convolution, while rate > 1 corresponds to a dilated
convolution. Cannot be > 1 if any of stride is also > 1.
padding: Padding algorithm, either `snt.SAME` or `snt.VALID`.
use_bias: Use bias in convolutions.
legacy_bias_behaviour: If True, bias is applied to both input and hidden
convolutions, creating a redundant bias variable. If False, bias is only
applied to input convolution, removing the redundancy.
forget_bias: Forget bias.
initializers: Dict containing ops to initialize the convolutional weights.
partitioners: Optional dict containing partitioners to partition
the convolutional weights and biases. As a default, no partitioners are
used.
regularizers: Optional dict containing regularizers for the convolutional
weights and biases. As a default, no regularizers are used.
use_layer_norm: Boolean that indicates whether to apply layer
normalization. This is applied across the entire layer, normalizing
over all non-batch dimensions.
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method. See the
`tf.get_variable` documentation for more details.
name: Name of the module.
Raises:
ValueError: If `skip_connection` is `True` and stride is different from 1
or if `input_shape` is incompatible with `conv_ndims`.
"""
super(ConvLSTM, self).__init__(custom_getter=custom_getter, name=name)
self._conv_class = self._get_conv_class(conv_ndims)
if conv_ndims != len(input_shape)-1:
raise ValueError("Invalid input_shape {} for conv_ndims={}.".format(
input_shape, conv_ndims))
self._conv_ndims = conv_ndims
self._input_shape = tuple(input_shape)
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._stride = stride
self._rate = rate
self._padding = padding
self._use_bias = use_bias
self._legacy_bias_behaviour = legacy_bias_behaviour
self._forget_bias = forget_bias
self._initializers = initializers
self._partitioners = partitioners
self._regularizers = regularizers
if use_layer_norm:
util.deprecation_warning(
"`use_layer_norm` kwarg is being deprecated as the implementation is "
"currently incorrect - scale and offset params are created for "
"spatial_dims * channels instead of just channels.")
self._use_layer_norm = use_layer_norm
self._total_output_channels = output_channels
if self._stride != 1:
self._total_output_channels //= self._stride * self._stride
self._convolutions = dict()
if self._use_bias and self._legacy_bias_behaviour:
tf.logging.warning(
"ConvLSTM will create redundant bias variables for input and hidden "
"convolutions. To avoid this, invoke the constructor with option "
"`legacy_bias_behaviour=False`. In future, this will be the default.")
def _new_convolution(self, use_bias):
"""Returns new convolution.
Args:
use_bias: Use bias in convolutions. If False, clean_dict removes bias
entries from initializers, partitioners and regularizers passed to
the constructor of the convolution.
"""
def clean_dict(input_dict):
if input_dict and not use_bias:
cleaned_dict = input_dict.copy()
cleaned_dict.pop("b", None)
return cleaned_dict
return input_dict
return self._conv_class(
output_channels=4*self._output_channels,
kernel_shape=self._kernel_shape,
stride=self._stride,
rate=self._rate,
padding=self._padding,
use_bias=use_bias,
initializers=clean_dict(self._initializers),
partitioners=clean_dict(self._partitioners),
regularizers=clean_dict(self._regularizers),
name="conv")
@property
def convolutions(self):
return self._convolutions
@property
def state_size(self):
"""Tuple of `tf.TensorShape`s indicating the size of state tensors."""
hidden_size = tf.TensorShape(
self._input_shape[:-1] + (self._output_channels,))
return hidden_size, hidden_size
@property
def output_size(self):
"""`tf.TensorShape` indicating the size of the core output."""
return tf.TensorShape(
self._input_shape[:-1] + (self._total_output_channels,))
def _build(self, inputs, state):
hidden, cell = state
if "input" not in self._convolutions:
self._convolutions["input"] = self._new_convolution(self._use_bias)
if "hidden" not in self._convolutions:
if self._legacy_bias_behaviour:
self._convolutions["hidden"] = self._new_convolution(self._use_bias)
else:
# Do not apply bias a second time
self._convolutions["hidden"] = self._new_convolution(use_bias=False)
input_conv = self._convolutions["input"]
hidden_conv = self._convolutions["hidden"]
next_hidden = input_conv(inputs) + hidden_conv(hidden)
if self._use_layer_norm:
# Normalize over all non-batch dimensions.
# Temporarily flatten the spatial and channel dimensions together.
flatten = basic.BatchFlatten()
unflatten = basic.BatchReshape(next_hidden.get_shape().as_list()[1:])
next_hidden = flatten(next_hidden)
next_hidden = layer_norm.LayerNorm()(next_hidden)
next_hidden = unflatten(next_hidden)
gates = tf.split(value=next_hidden, num_or_size_splits=4,
axis=self._conv_ndims+1)
input_gate, next_input, forget_gate, output_gate = gates
next_cell = tf.sigmoid(forget_gate + self._forget_bias) * cell
next_cell += tf.sigmoid(input_gate) * tf.tanh(next_input)
output = tf.tanh(next_cell) * tf.sigmoid(output_gate)
return output, (output, next_cell)
@property
def use_layer_norm(self):
"""Boolean indicating whether layer norm is enabled."""
return self._use_layer_norm
class Conv1DLSTM(ConvLSTM):
"""1D convolutional LSTM."""
@classmethod
def get_possible_initializer_keys(cls, use_bias=True):
return super(Conv1DLSTM, cls).get_possible_initializer_keys(1, use_bias)
def __init__(self, name="conv_1d_lstm", **kwargs):
"""Construct Conv1DLSTM. See `snt.ConvLSTM` for more details."""
super(Conv1DLSTM, self).__init__(conv_ndims=1, name=name, **kwargs)
class Conv2DLSTM(ConvLSTM):
"""2D convolutional LSTM."""
@classmethod
def get_possible_initializer_keys(cls, use_bias=True):
return super(Conv2DLSTM, cls).get_possible_initializer_keys(2, use_bias)
def __init__(self, name="conv_2d_lstm", **kwargs):
"""Construct Conv2DLSTM. See `snt.ConvLSTM` for more details."""
super(Conv2DLSTM, self).__init__(conv_ndims=2, name=name, **kwargs)
class GRU(rnn_core.RNNCore):
"""GRU recurrent network cell.
The implementation is based on: https://arxiv.org/pdf/1412.3555v1.pdf.
Attributes:
state_size: Integer indicating the size of state tensor.
output_size: Integer indicating the size of the core output.
"""
# Keys that may be provided for parameter initializers.
WZ = "wz" # weight for input -> update cell
UZ = "uz" # weight for prev_state -> update cell
BZ = "bz" # bias for update_cell
WR = "wr" # weight for input -> reset cell
UR = "ur" # weight for prev_state -> reset cell
BR = "br" # bias for reset cell
WH = "wh" # weight for input -> candidate activation
UH = "uh" # weight for prev_state -> candidate activation
BH = "bh" # bias for candidate activation
POSSIBLE_INITIALIZER_KEYS = {WZ, UZ, BZ, WR, UR, BR, WH, UH, BH}
def __init__(self, hidden_size, initializers=None, partitioners=None,
regularizers=None, custom_getter=None, name="gru"):
"""Construct GRU.
Args:
hidden_size: (int) Hidden size dimensionality.
initializers: Dict containing ops to initialize the weights. This
dict may contain any of the keys returned by
`GRU.get_possible_initializer_keys`.
partitioners: Optional dict containing partitioners to partition
the weights and biases. As a default, no partitioners are used. This
dict may contain any of the keys returned by
`GRU.get_possible_initializer_keys`
regularizers: Optional dict containing regularizers for the weights and
biases. As a default, no regularizers are used. This
dict may contain any of the keys returned by
`GRU.get_possible_initializer_keys`
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method. See the
`tf.get_variable` documentation for more details.
name: Name of the module.
Raises:
KeyError: if `initializers` contains any keys not returned by
`GRU.get_possible_initializer_keys`.
KeyError: if `partitioners` contains any keys not returned by
`GRU.get_possible_initializer_keys`.
KeyError: if `regularizers` contains any keys not returned by
`GRU.get_possible_initializer_keys`.
"""
super(GRU, self).__init__(custom_getter=custom_getter, name=name)
self._hidden_size = hidden_size
self._initializers = util.check_initializers(
initializers, self.POSSIBLE_INITIALIZER_KEYS)
self._partitioners = util.check_partitioners(
partitioners, self.POSSIBLE_INITIALIZER_KEYS)
self._regularizers = util.check_regularizers(
regularizers, self.POSSIBLE_INITIALIZER_KEYS)
@classmethod
def get_possible_initializer_keys(cls):
"""Returns the keys the dictionary of variable initializers may contain.
The set of all possible initializer keys are:
wz: weight for input -> update cell
uz: weight for prev_state -> update cell
bz: bias for update_cell
wr: weight for input -> reset cell
ur: weight for prev_state -> reset cell
br: bias for reset cell
wh: weight for input -> candidate activation
uh: weight for prev_state -> candidate activation
bh: bias for candidate activation
Returns:
Set with strings corresponding to the strings that may be passed to the
constructor.
"""
return super(GRU, cls).get_possible_initializer_keys(cls)
def _build(self, inputs, prev_state):
"""Connects the GRU module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as inputs and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may differ for each
connection.
Args:
inputs: Tensor of size `[batch_size, input_size]`.
prev_state: Tensor of size `[batch_size, hidden_size]`.
Returns:
A tuple (output, next_state) where `output` is a Tensor of size
`[batch_size, hidden_size]` and `next_state` is a Tensor of size
`[batch_size, hidden_size]`.
Raises:
ValueError: If connecting the module into the graph any time after the
first time, and the inferred size of the inputs does not match previous
invocations.
"""
input_size = inputs.get_shape()[1]
weight_shape = (input_size, self._hidden_size)
u_shape = (self._hidden_size, self._hidden_size)
bias_shape = (self._hidden_size,)
self._wz = tf.get_variable(GRU.WZ, weight_shape, dtype=inputs.dtype,
initializer=self._initializers.get(GRU.WZ),
partitioner=self._partitioners.get(GRU.WZ),
regularizer=self._regularizers.get(GRU.WZ))
self._uz = tf.get_variable(GRU.UZ, u_shape, dtype=inputs.dtype,
initializer=self._initializers.get(GRU.UZ),
partitioner=self._partitioners.get(GRU.UZ),
regularizer=self._regularizers.get(GRU.UZ))
self._bz = tf.get_variable(GRU.BZ, bias_shape, dtype=inputs.dtype,
initializer=self._initializers.get(GRU.BZ),
partitioner=self._partitioners.get(GRU.BZ),
regularizer=self._regularizers.get(GRU.BZ))
z = tf.sigmoid(tf.matmul(inputs, self._wz) +
tf.matmul(prev_state, self._uz) + self._bz)
self._wr = tf.get_variable(GRU.WR, weight_shape, dtype=inputs.dtype,
initializer=self._initializers.get(GRU.WR),
partitioner=self._partitioners.get(GRU.WR),
regularizer=self._regularizers.get(GRU.WR))
self._ur = tf.get_variable(GRU.UR, u_shape, dtype=inputs.dtype,
initializer=self._initializers.get(GRU.UR),
partitioner=self._partitioners.get(GRU.UR),
regularizer=self._regularizers.get(GRU.UR))
self._br = tf.get_variable(GRU.BR, bias_shape, dtype=inputs.dtype,
initializer=self._initializers.get(GRU.BR),
partitioner=self._partitioners.get(GRU.BR),
regularizer=self._regularizers.get(GRU.BR))
r = tf.sigmoid(tf.matmul(inputs, self._wr) +
tf.matmul(prev_state, self._ur) + self._br)
self._wh = tf.get_variable(GRU.WH, weight_shape, dtype=inputs.dtype,
initializer=self._initializers.get(GRU.WH),
partitioner=self._partitioners.get(GRU.WH),
regularizer=self._regularizers.get(GRU.WH))
self._uh = tf.get_variable(GRU.UH, u_shape, dtype=inputs.dtype,
initializer=self._initializers.get(GRU.UH),
partitioner=self._partitioners.get(GRU.UH),
regularizer=self._regularizers.get(GRU.UH))
self._bh = tf.get_variable(GRU.BH, bias_shape, dtype=inputs.dtype,
initializer=self._initializers.get(GRU.BH),
partitioner=self._partitioners.get(GRU.BH),
regularizer=self._regularizers.get(GRU.BH))
h_twiddle = tf.tanh(tf.matmul(inputs, self._wh) +
tf.matmul(r * prev_state, self._uh) + self._bh)
state = (1 - z) * prev_state + z * h_twiddle
return state, state
@property
def state_size(self):
return tf.TensorShape([self._hidden_size])
@property
def output_size(self):
return tf.TensorShape([self._hidden_size])
class HighwayCore(rnn_core.RNNCore):
"""Recurrent Highway Network cell.
The implementation is based on: https://arxiv.org/pdf/1607.03474v5.pdf
As per the first lines of section 5 of the reference paper, 1 - T is
used instead of a dedicated C gate.
Attributes:
state_size: Integer indicating the size of state tensor.
output_size: Integer indicating the size of the core output.
"""
# Keys that may be provided for parameter initializers.
WT = "wt" # weight for input or previous state -> T gate
BT = "bt" # bias for previous state -> T gate
WH = "wh" # weight for input or previous state -> H gate
BH = "bh" # bias for previous state -> H gate
def __init__(
self,
hidden_size,
num_layers,
initializers=None,
partitioners=None,
regularizers=None,
custom_getter=None,
name="highwaycore"):
"""Construct a new Recurrent Highway core.
Args:
hidden_size: (int) Hidden size dimensionality.
num_layers: (int) Number of highway layers.
initializers: Dict containing ops to initialize the weights. This
dict may contain any of the keys returned by
`HighwayCore.get_possible_initializer_keys`.
partitioners: Optional dict containing partitioners to partition
the weights and biases. As a default, no partitioners are used. This
dict may contain any of the keys returned by
`HighwayCore.get_possible_initializer_keys`.
regularizers: Optional dict containing regularizers for the weights and
biases. As a default, no regularizers are used. This
dict may contain any of the keys returned by
`HighwayCore.get_possible_initializer_keys`.
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method. See the
`tf.get_variable` documentation for more details.
name: Name of the module.
Raises:
KeyError: if `initializers` contains any keys not returned by
`HighwayCore.get_possible_initializer_keys`.
KeyError: if `partitioners` contains any keys not returned by
`HighwayCore.get_possible_initializer_keys`.
KeyError: if `regularizers` contains any keys not returned by
`HighwayCore.get_possible_initializer_keys`.
"""
super(HighwayCore, self).__init__(custom_getter=custom_getter, name=name)
self._hidden_size = hidden_size
self._num_layers = num_layers
self._initializers = util.check_initializers(
initializers, self.get_possible_initializer_keys(num_layers))
self._partitioners = util.check_partitioners(
partitioners, self.get_possible_initializer_keys(num_layers))
self._regularizers = util.check_regularizers(
regularizers, self.get_possible_initializer_keys(num_layers))
@classmethod
def get_possible_initializer_keys(cls, num_layers):
"""Returns the keys the dictionary of variable initializers may contain.
The set of all possible initializer keys are:
wt: weight for input -> T gate
wh: weight for input -> H gate
wtL: weight for prev state -> T gate for layer L (indexed from 0)
whL: weight for prev state -> H gate for layer L (indexed from 0)
btL: bias for prev state -> T gate for layer L (indexed from 0)
bhL: bias for prev state -> H gate for layer L (indexed from 0)
Args:
num_layers: (int) Number of highway layers.
Returns:
Set with strings corresponding to the strings that may be passed to the
constructor.
"""
keys = [cls.WT, cls.WH]
for layer_index in xrange(num_layers):
layer_str = str(layer_index)
keys += [
cls.WT + layer_str,
cls.BT + layer_str,
cls.WH + layer_str,
cls.BH + layer_str]
return set(keys)
def _build(self, inputs, prev_state):
"""Connects the highway core module into the graph.
Args:
inputs: Tensor of size `[batch_size, input_size]`.
prev_state: Tensor of size `[batch_size, hidden_size]`.
Returns:
A tuple (output, next_state) where `output` is a Tensor of size
`[batch_size, hidden_size]` and `next_state` is a Tensor of size
`[batch_size, hidden_size]`.
Raises:
ValueError: If connecting the module into the graph any time after the
first time, and the inferred size of the inputs does not match previous
invocations.
"""
input_size = inputs.get_shape()[1]
weight_shape = (input_size, self._hidden_size)
u_shape = (self._hidden_size, self._hidden_size)
bias_shape = (self._hidden_size,)
def _get_variable(name, shape):
return tf.get_variable(
name,
shape,
dtype=inputs.dtype,
initializer=self._initializers.get(name),
partitioner=self._partitioners.get(name),
regularizer=self._regularizers.get(name))
pre_highway_wt = _get_variable(self.WT, weight_shape)
pre_highway_wh = _get_variable(self.WH, weight_shape)
state = prev_state
for layer_index in xrange(self._num_layers):
layer_str = str(layer_index)
layer_wt = _get_variable(self.WT + layer_str, u_shape)
layer_bt = _get_variable(self.BT + layer_str, bias_shape)
layer_wh = _get_variable(self.WH + layer_str, u_shape)
layer_bh = _get_variable(self.BH + layer_str, bias_shape)
linear_t = tf.matmul(state, layer_wt) + layer_bt
linear_h = tf.matmul(state, layer_wh) + layer_bh
if layer_index == 0:
linear_t += tf.matmul(inputs, pre_highway_wt)
linear_h += tf.matmul(inputs, pre_highway_wh)
output_t = tf.sigmoid(linear_t)
output_h = tf.tanh(linear_h)
state = state * (1 - output_t) + output_h * output_t
return state, state
@property
def state_size(self):
return tf.TensorShape([self._hidden_size])
@property
def output_size(self):
return tf.TensorShape([self._hidden_size])
def highway_core_with_recurrent_dropout(
hidden_size,
num_layers,
keep_prob=0.5,
**kwargs):
"""Highway core with recurrent dropout.
Args:
hidden_size: (int) Hidden size dimensionality.
num_layers: (int) Number of highway layers.
keep_prob: the probability to keep an entry when applying dropout.
**kwargs: Extra keyword arguments to pass to the highway core.
Returns:
A tuple (train_core, test_core) where train_core is a higway core with
recurrent dropout enabled to be used for training and test_core is the
same highway core without recurrent dropout.
"""
core = HighwayCore(hidden_size, num_layers, **kwargs)
return RecurrentDropoutWrapper(core, keep_prob), core
class LSTMBlockCell(rnn_core.RNNCellWrapper):
"""Wraps the TensorFlow LSTMBlockCell as a Sonnet RNNCore."""
@rnn_core.with_doc(contrib_rnn.LSTMBlockCell.__init__)
def __init__(self, *args, **kwargs):
super(LSTMBlockCell, self).__init__(contrib_rnn.LSTMBlockCell, *args,
**kwargs)
| sonnet-1 | sonnet/python/modules/gated_rnn.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Batch normalization module for Sonnet.
This contains the module BatchNorm, which performs batch normalization on
its inputs. It has an optional post-normalization scale and offset, and it
maintains moving averages of the statistics for use at test time.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from sonnet.python.modules import base
from sonnet.python.modules import util
import tensorflow.compat.v1 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.layers import utils
from tensorflow.python.training import moving_averages
# pylint: enable=g-direct-tensorflow-import
def create_beta_initializer():
"""Returns a default initializer for the `beta` in batch norm."""
return tf.zeros_initializer()
def create_gamma_initializer():
"""Returns a default initializer for the `gamma` in batch norm."""
return tf.ones_initializer()
def create_mean_initializer():
"""Returns a default initializer for the `moving_mean` in batch norm."""
return tf.zeros_initializer()
def create_variance_initializer():
"""Returns a default initializer for the `moving_variance` in batch norm."""
return tf.ones_initializer()
class BatchNorm(base.AbstractModule):
"""Batch normalization module, including optional affine transformation.
This module maintains exponential moving averages of the mean and
variance, which can be optionally used to normalize at test time.
At training time, batch statistics (mean, variance) are not shared between
separate connections. The moving averages are shared between separate
connections. At both training and test time, the optional affine
transformation (`* gamma + beta`) is shared between separate connections.
This is also the case for distributed replica training, where the batch
statistics are not aggregated across replicas, but the moving averages are
shared globally.
When connecting the module to the graph, `is_training=True` means that
- Update ops are created to update the moving averages with the current
batch's statistics.
- Features are normalized using the *current batch's statistics*. The
`test_local_stats` setting is ignored. The moving averages are
**not** used.
whereas `is_training=False` means that
- Update ops are not created.
- Features are normalized using either:
- The test batch statistics if `test_local_stats=True` (default).
- The moving averages if `test_local_stats=False`.
Local batch statistics are used by default at test time, but the moving
averages can be used by specifying a flag when connecting. One often wants
to use local batch statistics at test time to track the progress while the
model is trained as it would ensure that moving average updates do not affect
the training curves. Once the training is finished, it's often advantageous
to use moving average statistics, since it would make evaluation agnostic to
the batch size, and might even lead to small improvements over the local
batch statistics.
You can either update the moving averages automatically by setting
`update_ops_collection=None` or by running the ops in the given collection,
by default tf.GraphKeys.UPDATE_OPS.
For example, to run the updates automatically:
bn = BatchNorm(update_ops_collection=None)
train_net = bn(train_inputs, is_training=True)
this does, however, have the effect of blocking the forwards pass of the
network until the update ops have been run and may have a small performance
penalty.
For example, to run the updates manually:
bn = BatchNorm()
train_net = bn(train_inputs, is_training=True)
...
update_ops = tf.group(*tf.get_collection(
tf.GraphKeys.UPDATE_OPS))
train_op = tf.group(train_op, update_ops)
Then, whenever `train_op` is run so also are the moving average update ops.
Some batch normalization caveats:
- Batch normalization will remove the effect of adding a bias, so e.g.
`use_bias=False` should be used for an immediately preceding snt.Linear
module.
- If your data batches aren't i.i.d. then batch normalization can allow your
network to 'cheat' by using the batch statistics to peek at the rest of
the batch. This can exhibit itself as a higher test score with
`test_local_stats=True` than `test_local_stats=False`.
"""
GAMMA = "gamma"
BETA = "beta"
MOVING_MEAN = "moving_mean"
MOVING_VARIANCE = "moving_variance"
POSSIBLE_INITIALIZER_KEYS = {GAMMA, BETA, MOVING_MEAN, MOVING_VARIANCE}
POSSIBLE_PARTITIONER_KEYS = {GAMMA, BETA}
POSSIBLE_REGULARIZER_KEYS = {GAMMA, BETA}
def __init__(self, axis=None, offset=True, scale=False,
decay_rate=0.999, eps=1e-3, initializers=None,
partitioners=None, regularizers=None,
update_ops_collection="update_ops", fused=False,
name="batch_norm"):
"""Constructs a BatchNorm module.
By default reduces over all input tensor dimensions apart from the final
dimension. This has the effect of treating pixels in 1D/2D/3D images as
additional elements of the minibatch.
If this is not the desired behaviour, the user can specify the tensor
indices to reduce over with `axis`.
Args:
axis: Optional iterable of indices of dimensions to reduce over. By
default `None` and all dimensions except the last are reduced over.
offset: Optional boolean to specify whether or not to apply a trained
component-wise bias after the batch normalization and scaling.
scale: Optional boolean to specify whether or not to apply a trained
component-wise scale after the batch normalization.
decay_rate: Decay rate of the exponential moving averages of the mean
and variance.
eps: Small number to avoid dividing by zero when diving by the standard
deviation.
initializers: Optional dict containing ops to initialize the weights of
the affine transform (`gamma` and `beta`).
partitioners: Optional dict containing partitioners to partition the
weights of the affine transform (`gamma` and `beta`).
regularizers: Optional dict containing regularizers for the weights of the
affine transform ('gamma' and 'beta'). As a default, no regularizers are
used. A regularizer should be a function that takes a single `Tensor` as
an input and returns a scalar `Tensor` output, e.g. the L1 and L2
regularizers in `tf.contrib.layers`.
update_ops_collection: Name of TensorFlow variable collection to add the
moving average update ops to. If `None`, we instead add the update ops
as control dependencies of the output of the module. This may result in
some slowdown, as the feed-forward of the network is now blocked. By
default, `tf.GraphKeys.UPDATE_OPS`.
fused: Use nn.fused_batch_norm if True, nn.batch_normalization otherwise.
name: Name of the module.
Raises:
KeyError: If `initializers` contains any keys other than `gamma`, `beta`,
`moving_mean` or `moving_variance`.
KeyError: If `partitioners` or `regularizers` contains any keys other
than `gamma` or `beta`.
TypeError: If any of the given initializers, partitioners or regularizers
are not callable.
"""
super(BatchNorm, self).__init__(name=name)
self._axis = axis
self._offset = offset
self._scale = scale
self._decay_rate = decay_rate
self._eps = eps
self._update_ops_collection = update_ops_collection
self._fused = fused
self._initializers = util.check_initializers(
initializers, self.POSSIBLE_INITIALIZER_KEYS)
self._partitioners = util.check_partitioners(
partitioners, self.POSSIBLE_PARTITIONER_KEYS)
self._regularizers = util.check_regularizers(
regularizers, self.POSSIBLE_REGULARIZER_KEYS)
def _build_statistics(self, input_batch, axis, use_batch_stats, stat_dtype):
"""Builds the statistics part of the graph when using moving variance.
Args:
input_batch: Input batch Tensor.
axis: Indices of `input_batch` to reduce over.
use_batch_stats: Boolean to indicate if batch statistics should be
calculated, otherwise moving averages are returned.
stat_dtype: TensorFlow datatype to use for the moving mean and variance.
Returns:
Tuple of (mean, variance), each of the same datatype as `input_batch`.
"""
# Set up our moving statistics. When connecting in parallel, this is shared.
if self.MOVING_MEAN not in self._initializers:
self._initializers[self.MOVING_MEAN] = create_mean_initializer()
self._moving_mean = tf.get_variable(
"moving_mean",
dtype=stat_dtype,
shape=self._mean_shape,
collections=[
tf.GraphKeys.MOVING_AVERAGE_VARIABLES,
tf.GraphKeys.GLOBAL_VARIABLES,
],
initializer=self._initializers[self.MOVING_MEAN],
trainable=False)
if self.MOVING_VARIANCE not in self._initializers:
self._initializers[self.MOVING_VARIANCE] = create_variance_initializer()
self._moving_variance = tf.get_variable(
"moving_variance",
dtype=stat_dtype,
shape=self._mean_shape,
collections=[
tf.GraphKeys.MOVING_AVERAGE_VARIABLES,
tf.GraphKeys.GLOBAL_VARIABLES,
],
initializer=self._initializers[self.MOVING_VARIANCE],
trainable=False)
def build_batch_stats():
"""Builds the batch statistics calculation ops."""
mean, variance = tf.nn.moments(input_batch, axis,
keep_dims=True, name="normalize_moments")
return mean, variance
def build_moving_stats():
"""Retrieves the moving statistics."""
# If necessary, cast the moving statistics to match the input type.
# This is required by tf.nn.batch_normalization.
input_dtype = input_batch.dtype.base_dtype
if stat_dtype == input_dtype:
return (
tf.identity(self._moving_mean),
tf.identity(self._moving_variance),
)
else:
return (
tf.cast(self._moving_mean, input_dtype),
tf.cast(self._moving_variance, input_dtype),
)
mean, variance = utils.smart_cond(
use_batch_stats,
build_batch_stats,
build_moving_stats,
)
return mean, variance
def _build_update_ops(self, mean, variance, is_training):
"""Builds the moving average update ops when using moving variance.
Args:
mean: The mean value to update with.
variance: The variance value to update with.
is_training: Boolean Tensor to indicate if we're currently in
training mode.
Returns:
Tuple of `(update_mean_op, update_variance_op)` when `is_training` is or
could be `True`. Returns `None` when `is_training=False`.
"""
def build_update_ops():
"""Builds the exponential moving average update ops."""
update_mean_op = moving_averages.assign_moving_average(
variable=self._moving_mean,
value=mean,
decay=self._decay_rate,
zero_debias=False,
name="update_moving_mean").op
update_variance_op = moving_averages.assign_moving_average(
variable=self._moving_variance,
value=variance,
decay=self._decay_rate,
zero_debias=False,
name="update_moving_variance").op
return update_mean_op, update_variance_op
def build_no_ops():
return tf.no_op(), tf.no_op()
# Only make the ops if we know that `is_training=True`, or the value of
# `is_training` is unknown.
is_training_const = utils.constant_value(is_training)
if is_training_const is None or is_training_const:
update_mean_op, update_variance_op = utils.smart_cond(
is_training,
build_update_ops,
build_no_ops,
)
return update_mean_op, update_variance_op
else:
return None
def _infer_fused_data_format(self, input_batch):
"""Infers the data format for the fused batch norm.
It uses the axis option to infer this information. Specifically, the
axis value (0, 1, 2) corresponds to data format NHWC and the
axis value (0, 2, 3) to data format NCHW.
Args:
input_batch: A Tensor of arbitrary dimension.
Returns:
A string description of the data format NHWC or NCHW.
Raises:
NotImplementedError: for input of dimensionality different from 4.
ValueError: for axis configuration different from (0, 1, 2) and (0, 2, 3).
"""
input_shape = input_batch.get_shape().as_list()
input_shape_len = len(input_shape)
if input_shape_len != 4:
raise NotImplementedError("fused batch norm supports only input with "
"4 dimensions, it received input of "
"dimensionality {:d}".format(input_shape_len))
axis = range(input_shape_len)[:-1] if self._axis is None else self._axis
axis = tuple(axis)
if axis == (0, 1, 2):
# Reduce over the last dimension.
return "NHWC"
elif axis == (0, 2, 3):
# Reduce over the second dimension.
return "NCHW"
else:
raise ValueError("Invalid axis option {}. This does not correspond to"
" either the NHWC format (0, 1, 2) or the NCHW "
"(0, 2, 3).".format(axis))
def _fused_batch_norm_op(self, input_batch, mean, variance, use_batch_stats):
"""Creates a fused batch normalization op."""
# Store the original shape of the mean and variance.
mean_shape = mean.get_shape()
variance_shape = variance.get_shape()
# The fused batch norm expects the mean, variance, gamma and beta
# tensors to have dimension 1, so we flatten them to remove the
# extra dimensions.
gamma_flatten = tf.reshape(self._gamma, shape=(-1,))
beta_flatten = tf.reshape(self._beta, shape=(-1,))
flatten_mean = tf.reshape(mean, shape=(-1,))
flatten_variance = tf.reshape(variance, shape=(-1,))
use_batch_stats = tf.convert_to_tensor(use_batch_stats)
common_args = {
"scale": gamma_flatten,
"offset": beta_flatten,
"epsilon": self._eps,
"data_format": self._infer_fused_data_format(input_batch),
"name": "batch_norm"
}
def use_batch_stats_fused_batch_norm():
return tf.nn.fused_batch_norm(input_batch, mean=None, variance=None,
is_training=True, **common_args)
def moving_average_fused_batch_norm():
return tf.nn.fused_batch_norm(input_batch, mean=flatten_mean,
variance=flatten_variance,
is_training=False, **common_args)
batch_norm_op, mean, variance = utils.smart_cond(
use_batch_stats, use_batch_stats_fused_batch_norm,
moving_average_fused_batch_norm)
mean = tf.reshape(mean, mean_shape)
variance = tf.reshape(variance, variance_shape)
return batch_norm_op, mean, variance
def _batch_norm_op(self, input_batch, mean, variance, use_batch_stats,
stat_dtype):
"""Creates a batch normalization op.
It uses the tf.nn.batch_normalization op by default and the
tf.nn.fused_batch_norm op to support fused batch normalization.
Args:
input_batch: A input Tensor of arbitrary dimension.
mean: A mean tensor, of the same dtype as `input_batch`.
variance: A variance tensor, of the same dtype as `input_batch`.
use_batch_stats: A bool value that indicates whether the operation should
use the batch statistics.
stat_dtype: TensorFlow datatype used for the moving mean and variance.
Returns:
A batch normalization operation.
The current mean tensor, of datatype `stat_dtype`.
The current variance tensor, of datatype `stat_dtype`.
"""
if self._fused:
# For the non-training case where not using batch stats,
# pass in the moving statistic variables directly.
# These will already be in the correct dtype, even for float16 input.
batch_norm_op, mean, variance = self._fused_batch_norm_op(
input_batch,
self._moving_mean, self._moving_variance, use_batch_stats)
else:
batch_norm_op = tf.nn.batch_normalization(
input_batch,
mean,
variance,
self._beta,
self._gamma,
self._eps,
name="batch_norm")
# We'll echo the supplied mean and variance so that they can also be used
# to update the moving statistics. Cast to matching type if necessary.
if input_batch.dtype.base_dtype != stat_dtype:
mean = tf.cast(mean, stat_dtype)
variance = tf.cast(variance, stat_dtype)
return batch_norm_op, mean, variance
def _build_scale_offset(self, dtype):
"""Sets up optional scale and offset factors."""
# tf.nn.fused_batch_norm accepts float16 batch data, but not scale/offset.
if self._fused and dtype == tf.float16:
dtype = tf.float32
# The fused batch norm operation needs the beta, gamma variables,
# so in this case we build them and set the trainable option according
# to the values of _offset and _scale.
self._beta = None
if self._offset or self._fused:
if self.BETA not in self._initializers:
self._initializers[self.BETA] = create_beta_initializer()
self._beta = tf.get_variable(
self.BETA,
dtype=dtype,
shape=self._mean_shape,
initializer=self._initializers[self.BETA],
partitioner=self._partitioners.get(self.BETA, None),
regularizer=self._regularizers.get(self.BETA, None),
trainable=self._offset)
self._gamma = None
if self._scale or self._fused:
if self.GAMMA not in self._initializers:
self._initializers[self.GAMMA] = create_gamma_initializer()
self._gamma = tf.get_variable(
self.GAMMA,
dtype=dtype,
shape=self._mean_shape,
initializer=self._initializers[self.GAMMA],
partitioner=self._partitioners.get(self.GAMMA, None),
regularizer=self._regularizers.get(self.GAMMA, None),
trainable=self._scale)
def _build(self, input_batch, is_training, test_local_stats=True):
"""Connects the BatchNorm module into the graph.
Args:
input_batch: A Tensor of arbitrary dimension. By default, the final
dimension is not reduced over when computing the minibatch statistics.
is_training: A boolean to indicate if the module should be connected in
training mode, meaning the moving averages are updated. Can be a Tensor.
test_local_stats: A boolean to indicate if local batch statistics should
be used when `is_training=False`. If not, moving averages are used.
By default `True`. Can be a Tensor.
Returns:
A tensor with the same shape as `input_batch`.
Raises:
base.IncompatibleShapeError: If `axis` is not valid for the
input shape or has negative entries.
base.NotSupportedError: If `input_batch` has data type of `tf.bfloat16`.
"""
input_shape = input_batch.get_shape()
if self._axis is not None:
if len(self._axis) > len(input_shape):
raise base.IncompatibleShapeError(
"Too many indices specified in axis: len({}) > len({}).".format(
self._axis, input_shape))
if max(self._axis) >= len(input_shape):
raise base.IncompatibleShapeError(
"One or more index in axis is too large for "
"input shape: {} >= {:d}.".format(self._axis, len(input_shape)))
if min(self._axis) < 0:
raise base.IncompatibleShapeError(
"Indices in axis must be non-negative: {} < 0.".format(
self._axis))
axis = self._axis
else:
# Reduce over all dimensions except the last.
axis = tuple(range(len(input_shape))[:-1])
dtype = input_batch.dtype.base_dtype
if self._fused and dtype == tf.bfloat16:
raise base.NotSupportedError(
"Fused batch norm does not support tf.bfloat16.")
# Maintain moving averages at a minimum precision of tf.float32.
stat_dtype = tf.float32 if dtype in [tf.float16, tf.bfloat16] else dtype
self._mean_shape = input_batch.get_shape().as_list()
for index in axis:
self._mean_shape[index] = 1
use_batch_stats = is_training | test_local_stats
mean, variance = self._build_statistics(input_batch, axis,
use_batch_stats, stat_dtype)
# Sets up optional gamma and beta parameters
self._build_scale_offset(dtype)
# Sets up the batch normalization op.
out, mean, variance = self._batch_norm_op(input_batch, mean, variance,
use_batch_stats, stat_dtype)
# Sets up the update op.
update_ops = self._build_update_ops(mean, variance, is_training)
# Put update ops in the update ops collection if given, otherwise add as
# control dependencies of the output.
if update_ops:
if self._update_ops_collection:
for update_op in update_ops:
tf.add_to_collection(self._update_ops_collection, update_op)
else:
with tf.control_dependencies(update_ops):
out = tf.identity(out)
return out
@property
def initializers(self):
return self._initializers
@property
def partitioners(self):
return self._partitioners
@property
def regularizers(self):
return self._regularizers
@property
def moving_mean(self):
self._ensure_is_connected()
return self._moving_mean
@property
def moving_variance(self):
self._ensure_is_connected()
return self._moving_variance
@property
def beta(self):
self._ensure_is_connected()
if self._beta is None:
raise base.Error(
"Batch normalization doesn't have an offset, so no beta")
else:
return self._beta
@property
def gamma(self):
self._ensure_is_connected()
if self._gamma is None:
raise base.Error(
"Batch normalization doesn't have a scale, so no gamma")
else:
return self._gamma
| sonnet-1 | sonnet/python/modules/batch_norm.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import parameterized
import numpy as np
import sonnet as snt
import tensorflow.compat.v1 as tf
class ConstantZero(snt.AbstractModule):
"""A module that always outputs zero for each example."""
def __init__(self, output_rank=2, name="constant_zero"):
"""Initialize ConstantZero module.
Args:
output_rank: int. Rank of value returned by build(). The default value (2)
imitates the output of the Linear module.
name: string. Name of module.
"""
super(ConstantZero, self).__init__(name=name)
self._output_rank = output_rank
def _build(self, inputs):
"""Attach ConstantZero module to graph.
Args:
inputs: [batch_size, input_size]-shaped Tensor of dtype float32.
Returns:
A Tensor with rank output_rank where the first dimension has length
batch_size and all others have length 1.
"""
# A module like Linear would require the final dimension to be known in
# order to construct weights.
assert inputs.get_shape().as_list()[-1] is not None
batch_size = tf.shape(inputs)[0]
result_shape = [batch_size] + [1] * (self._output_rank - 1)
return tf.zeros(result_shape, dtype=inputs.dtype)
class AttentiveReadTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(AttentiveReadTest, self).setUp()
self._batch_size = 3
self._memory_size = 4
self._memory_word_size = 1
self._query_word_size = 2
self._memory = tf.reshape(
tf.cast(tf.range(0, 3 * 4 * 1), dtype=tf.float32), shape=[3, 4, 1])
self._query = tf.reshape(
tf.cast(tf.range(0, 3 * 2), dtype=tf.float32), shape=[3, 2])
self._memory_mask = tf.convert_to_tensor(
[
[True, True, True, True],
[True, True, True, False],
[True, True, False, False],
],
dtype=tf.bool)
self._attention_logit_mod = ConstantZero()
self._attention_mod = snt.AttentiveRead(self._attention_logit_mod)
def testShape(self):
# Shape should be inferred if it's known at graph construction time.
attention_output = self._attention_mod(self._memory, self._query)
x = attention_output.read
self.assertTrue(x.get_shape().is_compatible_with(
[self._batch_size, self._memory_word_size]))
self.assertEqual(x.dtype, tf.float32)
def testComputation(self):
# Since all attention weight logits are zero, all memory slots get an equal
# weight. Thus, the expected attentive read should return the average of
# all memory slots for each example.
attention_output = self._attention_mod(self._memory, self._query)
x = attention_output.read
with self.test_session() as sess:
x_ = sess.run(x)
self.assertAllClose(x_, [[1.5], [5.5], [9.5]])
def testMemoryMask(self):
# Ignore some time steps.
attention_output = self._attention_mod(
self._memory, self._query, memory_mask=self._memory_mask)
x = attention_output.read
with self.test_session() as sess:
x_ = sess.run(x)
self.assertAllClose(x_, [[1.5], [5.0], [8.5]])
def testMemoryMaskWithNonuniformLogits(self):
memory = np.random.randn(2, 3, 10)
logits = np.array([[-1, 1, 0], [-1, 1, 0]])
mask = np.array([[True, True, True], [True, True, False]])
# Calculate expected output.
expected_weights = np.exp(logits)
expected_weights[1, 2] = 0
expected_weights /= np.sum(expected_weights, axis=1, keepdims=True)
expected_output = np.matmul(expected_weights[:, np.newaxis, :],
memory)[:, 0]
# Run attention model.
attention = snt.AttentiveRead(
lambda _: tf.constant(logits.reshape([6, 1]), dtype=tf.float32))
attention_output = attention(
memory=tf.constant(memory, dtype=tf.float32),
query=tf.constant(np.zeros([2, 5]), dtype=tf.float32),
memory_mask=tf.constant(mask))
with self.test_session() as sess:
actual = sess.run(attention_output)
# Check output.
self.assertAllClose(actual.read, expected_output)
self.assertAllClose(actual.weights, expected_weights)
# The actual logit for the masked value should be tiny. First check without.
masked_actual_weight_logits = np.array(actual.weight_logits, copy=True)
masked_actual_weight_logits[1, 2] = logits[1, 2]
self.assertAllClose(masked_actual_weight_logits, logits)
self.assertLess(actual.weight_logits[1, 2], -1e35)
def testUndefinedWordSizes(self):
# memory_word_size must be defined.
memory = tf.placeholder(
dtype=tf.float32, shape=[self._batch_size, self._memory_size, None])
with self.assertRaises(snt.UnderspecifiedError):
self._attention_mod(memory, self._query)
# query_word_size must be defined.
query = tf.placeholder(dtype=tf.float32, shape=[self._batch_size, None])
with self.assertRaises(snt.UnderspecifiedError):
self._attention_mod(self._memory, query)
def testMemoryShape(self):
# memory must have rank 3.
memory = tf.placeholder(
dtype=tf.float32, shape=[self._batch_size, self._memory_size])
with self.assertRaises(snt.IncompatibleShapeError):
self._attention_mod(memory, self._query)
def testQueryShape(self):
# query must have rank 2.
query = tf.placeholder(
dtype=tf.float32, shape=[self._batch_size, self._query_word_size, 1])
with self.assertRaises(snt.IncompatibleShapeError):
self._attention_mod(self._memory, query)
def testMemoryMaskShape(self):
# memory_mask must have rank 2.
memory_mask = tf.placeholder(
dtype=tf.bool, shape=[self._batch_size, self._memory_size, 1])
with self.assertRaises(snt.IncompatibleShapeError):
self._attention_mod(self._memory, self._query, memory_mask=memory_mask)
@parameterized.parameters(1, 3)
def testAttentionLogitsModuleShape(self, output_rank):
# attention_logit_mod must produce a rank 2 Tensor.
attention_mod = snt.AttentiveRead(ConstantZero(output_rank=output_rank))
with self.assertRaises(snt.IncompatibleShapeError):
attention_mod(self._memory, self._query)
def testNoMemorySlotsLeft(self):
# Every example must have at least one unmasked memory slot for attention
# to work.
memory_mask = tf.convert_to_tensor(
[
[True, True, True, True],
[True, True, True, False],
[False, False, False, False],
],
dtype=tf.bool)
attention_output = self._attention_mod(
self._memory, self._query, memory_mask=memory_mask)
x = attention_output.read
with self.test_session() as sess:
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run(x)
def testInvalidBatchSize(self):
# Both memory and query need to agree on batch_size.
memory = tf.placeholder(shape=[None, 1, 1], dtype=tf.float32)
query = tf.placeholder(shape=[None, 1], dtype=tf.float32)
attention_output = self._attention_mod(memory, query)
x = attention_output.read
with self.test_session() as sess:
with self.assertRaises(tf.errors.InvalidArgumentError):
feed_dict = {
memory: np.zeros([1, 1, 1], dtype=np.float32),
query: np.zeros([2, 1], dtype=np.float32)
}
sess.run(x, feed_dict=feed_dict)
@parameterized.parameters({
"module_cstr": snt.Linear,
"module_kwargs": {
"output_size": 1
}
}, {"module_cstr": snt.nets.MLP,
"module_kwargs": {
"output_sizes": [1]
}})
def testWorksWithCommonModules(self, module_cstr, module_kwargs):
# In the academic literature, attentive reads are most commonly implemented
# with Linear or MLP modules. This integration test ensures that
# AttentiveRead works safely with these.
attention_logit_mod = module_cstr(**module_kwargs)
attention_mod = snt.AttentiveRead(attention_logit_mod)
x = attention_mod(self._memory, self._query)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(x)
def testAttentionWeightLogitsShape(self):
# Expected to be [batch_size, memory_size].
x = self._attention_mod(self._memory, self._query).weight_logits
self.assertTrue(x.get_shape().is_compatible_with(
[self._batch_size, self._memory_size]))
self.assertEqual(x.dtype, tf.float32)
def testWeightsIsSoftmaxOfLogits(self):
attention_output = self._attention_mod(self._memory, self._query)
softmax_of_weight_logits = tf.nn.softmax(attention_output.weight_logits)
with self.test_session() as sess:
expected, obtained = sess.run([attention_output.weights,
softmax_of_weight_logits])
self.assertAllClose(expected, obtained)
if __name__ == "__main__":
tf.test.main()
| sonnet-1 | sonnet/python/modules/attention_test.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================
"""Tools for constrained optimization.
These classes and methods implement the logic described in:
Danilo Rezende and Fabio Viola, 'Taming VAEs': https://arxiv.org/abs/1810.00597
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
# Dependency imports
import numpy as np
from sonnet.python.modules import basic
from sonnet.python.modules import scale_gradient
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
from tensorflow.python.framework import function # pylint: disable=g-direct-tensorflow-import
LAGRANGE_MULTIPLIERS = 'Lagrange Multipliers'
_GEQ_OPS = ('Greater', 'GreaterEqual')
_LEQ_OPS = ('Less', 'LessEqual')
class OptimizationConstraints(object):
"""Container for optimization constraints.
Users can add to an OptimizationConstraints instance multiple inequality
constraints, either implicitly passing inequality ops, such as
`optimization_constraints.add(x < y)`, or explicitly specifying the constraint
type, as in `optimization_constraints.add_geq(x, y)`.
Users can finally add the constraints to the TensorFlow graph calling
`optimization_constraints()`; when doing so, Lagrange multipliers are
automatically added to the graph, so that users can optimize them alongside
other variables in the graph, using the same optimizer and `minimize()`.
Example usage:
```
regularization_loss = model.regularization_loss(data)
reconstruction_error = model.reconstruction_error(data)
avg_reconstruction_error = snt.MovingAverage()(reconstruction_error)
constraints = snt.OptimizationConstraints()
constraints.add(avg_reconstruction_error < reconstruction_threshold)
loss = regularization_loss + constraints()
# The following call actually performs an update step for
# min_{theta} max_{lambda} (
# regularization_loss(theta) +
# lambda * (avg_reconstruction_error - reconstruction_threshold))
# where theta are the model parameters and lambda are the Lagrange
# multipliers.
update = optimizer.minimize(loss)
```
"""
def __init__(self, rate=1.0, valid_range=None):
"""Instantiates a container for optimization constraints.
Args:
rate: optional float, default 1.0. Default factor for Lagrange multiplier
gradient scaling. Use there `rate` argument to scale the gradients of
the Lagrange multipliers - note that this parameter has no effect when
using optimisers such as Adam. This parameter can be overridden
when adding constraints to the container.
valid_range: optional tuple of length 2, default None. Default valid range
for Lagrange multipliers. This parameter can be overridden when adding
constraints to the container.
"""
self._constraints = []
self._lagrange_multipliers = []
self._valid_ranges = []
self._rate = rate
self._valid_range = valid_range
self._penalty = None
@property
def constraints(self):
return self._constraints
@property
def lagrange_multipliers(self):
return self._lagrange_multipliers
def add(self, expression, rate=None, valid_range=None, initializer=None):
"""Add inequality constraint whose type depends on analysis of input op.
Args:
expression: op of type `Greater`, `GreaterEqual`, `Less` or `LessEqual`.
Note that `GreaterEqual` and `LessEqual` are accepted only for
convenience, and will result in the same behavior as `Greater` and
`Less` respectively.
rate: optional float, default None. Factor for Lagrange multiplier
gradient scaling. Use there `rate` argument to scale the gradients of
the Lagrange multipliers - note that this parameter has no effect when
using optimisers such as Adam. This parameter overrides the defaults
defined instantiating the container.
valid_range: optional tuple of length 2, default None. Default valid
range for Lagrange multipliers. This parameter overrides the defaults
defined instantiating the container.
initializer: optional tensorflow initializer, array or value to be used
for the Lagrange multiplier initialization. By default Lagrange
multiplier will be initialized to 1.0.
Returns:
Self.
Raises:
`TypeError`, when input expression op is not one of `Greater`,
`GreaterEqual`, `Less`, `LessEqual`.
"""
self._assert_is_not_connected()
lhs = expression.op.inputs[0]
rhs = expression.op.inputs[1]
op_type = expression.op.type
if op_type in _GEQ_OPS:
self.add_geq(
lhs, rhs, rate=rate, valid_range=valid_range, initializer=initializer)
elif op_type in _LEQ_OPS:
self.add_leq(
lhs, rhs, rate=rate, valid_range=valid_range, initializer=initializer)
else:
raise TypeError(
'add currently only supports parsing of the following ops: {}'.format(
_GEQ_OPS + _LEQ_OPS))
return self
def add_leq(self, lhs, rhs=0.0, rate=None, valid_range=None,
initializer=None):
"""Add a 'less than' inequality constraint.
Args:
lhs: left hand argument of inequality expression.
rhs: reft hand argument of inequality expression, defaults to 0.0.
rate: optional float, default None. Factor for Lagrange multiplier
gradient scaling. Use there `rate` argument to scale the gradients of
the Lagrange multipliers - note that this parameter has no effect when
using optimisers such as Adam. This parameter overrides the defaults
defined instantiating the container.
valid_range: optional tuple of length 2, default None. Default valid
range for Lagrange multipliers. This parameter overrides the defaults
defined instantiating the container.
initializer: optional tensorflow initializer, array or value to be used
for the Lagrange multiplier initialization. By default Lagrange
multiplier will be initialized to 1.0.
Returns:
Self.
"""
self._assert_is_not_connected()
constraint_op = lhs - rhs
self._constraints.append(constraint_op)
valid_range = valid_range or self._valid_range
self._valid_ranges.append(valid_range)
if rate is None:
rate = self._rate
lag_mul = get_lagrange_multiplier(
shape=constraint_op.shape, rate=rate, initializer=initializer,
valid_range=valid_range)
self._lagrange_multipliers.append(lag_mul)
return self
def add_geq(self, lhs, rhs=0.0, rate=None, valid_range=None,
initializer=None):
"""Add a 'greater than' inequality constraint.
Args:
lhs: left hand argument of inequality expression.
rhs: reft hand argument of inequality expression, defaults to 0.0.
rate: optional float, default None. Factor for Lagrange multiplier
gradient scaling. Use there `rate` argument to scale the gradients of
the Lagrange multipliers - note that this parameter has no effect when
using optimisers such as Adam. This parameter overrides the defaults
defined instantiating the container.
valid_range: optional tuple of length 2, default None. Default valid
range for Lagrange multipliers. This parameter overrides the defaults
defined instantiating the container.
initializer: optional tensorflow initializer, array or value to be used
for the Lagrange multiplier initialization. By default Lagrange
multiplier will be initialized to 1.0.
Returns:
Self.
"""
self._assert_is_not_connected()
constraint_op = rhs - lhs
return self.add_leq(
constraint_op, rate=rate, valid_range=valid_range,
initializer=initializer)
def __call__(self):
"""Adds constrains and Lagrange multipliers to graph."""
if self._is_connected:
return self._penalty
self._penalty = tf.zeros(())
for l, c in zip(self._lagrange_multipliers, self._constraints):
self._penalty += tf.reduce_sum(l * c)
return self._penalty
@property
def _is_connected(self):
return self._penalty is not None
def _assert_is_not_connected(self):
if self._is_connected:
raise ValueError(
'Cannot add further constraints once OptimizationConstraints has '
'been connected to the graph by calling it.')
def get_lagrange_multiplier(shape=(),
rate=1.0,
initializer=1.0,
maximize=True,
valid_range=None,
name='lagrange_multiplier'):
"""Lagrange multiplier factory.
This factory returns ops that help setting up constrained optimization
problems in Tensorflow. Given a constraint function op (either scalar or
vectorial), use this function to instantiate a Lagrange multiplier op, then
dot product the two and add them to the loss that is being optimized over.
There is no need to instantiate a second optimizer to solve the minmax
problem, as the Lagrange Multiplier op is setup to manipulate its own
gradients so that a single optmizer can be used to update all the variables
correctly.
Args:
shape: Lagrange multipliers can be used with both scalar and vector
constraint functions; when using vector constraints use the shape kwarg
to pass in shape information and instantiate variables of the correct
shape.
rate: Scalar used to scale the magnitude of gradients of the Lagrange
multipliers, defaulting to 1e-2. Using the default value will make the
Lagrange multipliers updates slower compared to the ones for the model's
parameters.
initializer: Initializer for the Lagrange multipliers. Note that
when using inequality constraints the initial value of the multiplier
will be transformed via the parametrization function.
maximize: Boolean, True if we want to maximize the loss w.r.t. the Lagrange
multipliers, False otherwise.
valid_range: tuple, or list. of values used to clip the value of the
(possibly reparametrized) Lagrange multipliers.
name: Name of the Lagrange multiplier op.
Returns:
An op to be inserted in the graph, by multipling it with a constraint op
and adding the resulting op to a loss. The Lagrange multiplier
gradients are modified to that by calling minimize on the loss the
optimizer will actually minimize w.r.t. to the model's parameters and
maximize w.r.t. the Lagrande multipliers, hence enforcing the
constraints.
Raises:
ValueError: If the Lagrange multiplier is set to enforce an equality
constraint and a parametrization function is also provided.
"""
initializer = initializer or np.ones(shape=shape)
if isinstance(initializer, (numbers.Number, np.ndarray, list, tuple)):
initializer = tf.constant_initializer(initializer)
initializer = _LagrangeMultiplierInitializer(initializer)
lambda_var = basic.TrainableVariable(
name=name, shape=shape, initializers={'w': initializer})()
tf.add_to_collection(LAGRANGE_MULTIPLIERS, lambda_var)
lag_multiplier = _parametrize(lambda_var, rate=rate)
lag_multiplier.set_shape(shape)
if valid_range:
lag_multiplier = _constrain_to_range(lag_multiplier, *valid_range)
return lag_multiplier if maximize else -lag_multiplier
def _squared_softplus(x):
return tf.nn.softplus(x) ** 2
def _inv_squared_softplus(x):
# Support TFP 0.7 (which has `tfp.distributions.softplus_inverse`) and
# TFP 0.8.0+ (where `softplus_inverse` was moved to `tfp.math`).
softplus_inverse = (getattr(tfp.math, 'softplus_inverse', None) or
getattr(tfp.distributions, 'softplus_inverse', None))
return softplus_inverse(tf.sqrt(x))
def _parametrize(x, rate=1.0):
return scale_gradient.scale_gradient(_squared_softplus(x), -rate)
class _LagrangeMultiplierInitializer(object):
"""Initializer applying inv squared softplus to a user defined initializer."""
def __init__(self, initializer, dtype=tf.float32):
self.dtype = tf.as_dtype(dtype)
self._initializer = initializer
def __call__(self, shape, dtype=None, partition_info=None):
initial_values = self._initializer(shape, dtype, partition_info)
return _inv_squared_softplus(initial_values)
def get_config(self):
return {'dtype': self.dtype.name}
# Implement simple memoization mechanism using a global dict.
_op_ctors = dict()
def _get_constrain_to_range_op(dtype):
"""Creates an op that keep values within a given range using a Defun.
This method produces a new op the first time it is called with a given `dtype`
argument, and then uses the cached op each time it is called after that with
the same `dtype`. The min and max valuee are given as arguments for the
forward pass method so that it can be used in the backwards pass.
Args:
dtype: the dtype of the input whose values are clipped.
Returns:
The op that clips the values.
"""
def _instantiate_op(dtype):
"""Instantiate constrain to range op constructor for given dtype."""
def constrain_to_range_forward(x, clip_value_min, clip_value_max):
return tf.clip_by_value(x, clip_value_min, clip_value_max)
def constrain_to_range_backward(op, grad):
"""Forwards the gradients moving the inputs within the valid range."""
x = op.inputs[0]
clip_value_min = op.inputs[1]
clip_value_max = op.inputs[2]
zeros = tf.zeros_like(grad)
condition = tf.logical_and(x < clip_value_min, grad < 0)
grad = tf.where(condition, zeros, grad)
condition = tf.logical_and(x > clip_value_max, grad > 0)
grad = tf.where(condition, zeros, grad)
return grad, None, None
func_name = 'ConstrainToRange_{}'.format(dtype.name)
return function.Defun(
dtype, dtype, dtype, python_grad_func=constrain_to_range_backward,
func_name=func_name)(constrain_to_range_forward)
if dtype.name not in _op_ctors:
_op_ctors[dtype.name] = _instantiate_op(dtype)
return _op_ctors[dtype.name]
def _constrain_to_range(x, min_value, max_value, name='constrain_to_range'):
"""Clips the inputs to a given range, whilst forwarding gradients."""
if not x.dtype.is_floating:
raise ValueError('_clip_by_value does not support non-float `x` inputs.')
with tf.name_scope(name, 'constrain_to_range', values=[x]):
dtype = x.dtype.base_dtype # Convert ref dtypes to regular dtypes.
min_tensor = tf.convert_to_tensor(min_value, dtype=dtype)
max_tensor = tf.convert_to_tensor(max_value, dtype=dtype)
constrain_to_range_op = _get_constrain_to_range_op(dtype)
output = constrain_to_range_op(x, min_tensor, max_tensor)
output.set_shape(x.get_shape())
return output
| sonnet-1 | sonnet/python/modules/optimization_constraints.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for snt.clip_gradient."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import sonnet as snt
import tensorflow.compat.v1 as tf
class ClipGradientTest(tf.test.TestCase):
def testOpClip(self):
x = tf.placeholder(tf.float32, shape=[2, 1])
y = snt.clip_gradient(x, 2, 3)
z = tf.reduce_sum(y * y)
dzdy = tf.gradients(z, y)[0]
dzdx = tf.gradients(z, x)[0]
x_np = np.array([[0.5], [2]])
with self.test_session() as sess:
y_np, dzdy_np, dzdx_np = sess.run([y, dzdy, dzdx], feed_dict={x: x_np})
self.assertAllEqual(y_np, x_np)
# We do not expect the gradients with respect to the output to be clipped.
self.assertAllEqual(dzdy_np, np.array([[1], [4]]))
# We expect the gradients with respect to the input to be clipped [2, 3].
self.assertAllEqual(dzdx_np, np.array([[2], [3]]))
def testOpClipDifferentClipValues(self):
x = tf.placeholder(tf.float32, shape=[2, 1])
y_1 = snt.clip_gradient(x, 1, 2)
y_2 = snt.clip_gradient(x, 2, 3)
z_1 = tf.reduce_sum(y_1 * y_1)
z_2 = tf.reduce_sum(y_2 * y_2)
dzdy_1 = tf.gradients(z_1, y_1)[0]
dzdy_2 = tf.gradients(z_2, y_2)[0]
dzdx_1 = tf.gradients(z_1, x)[0]
dzdx_2 = tf.gradients(z_2, x)[0]
x_np = np.array([[0.5], [2]])
with self.test_session() as sess:
y_np_1, dzdy_np_1, dzdx_np_1, y_np_2, dzdy_np_2, dzdx_np_2 = sess.run(
[y_1, dzdy_1, dzdx_1, y_2, dzdy_2, dzdx_2], feed_dict={x: x_np})
self.assertAllEqual(y_np_1, x_np)
self.assertAllEqual(y_np_2, x_np)
# We do not expect the gradients with respect to the output to be clipped.
self.assertAllEqual(dzdy_np_1, np.array([[1], [4]]))
self.assertAllEqual(dzdy_np_2, np.array([[1], [4]]))
# We expect the gradients w.r.t. the input to be clipped [1, 2] or [2, 3].
self.assertAllEqual(dzdx_np_1, np.array([[1], [2]]))
self.assertAllEqual(dzdx_np_2, np.array([[2], [3]]))
def testOpClipDifferentDtypes(self):
x_1 = tf.placeholder(tf.float16, shape=())
snt.clip_gradient(x_1, 0, 1)
# clip_gradient throws here if the Defun func_name does not use the dtype.
x_2 = tf.placeholder(tf.float32, shape=())
snt.clip_gradient(x_2, 0, 1)
def testShape(self):
x = tf.placeholder(tf.float32, [None, 10, 13])
y = snt.clip_gradient(x, 0, 1)
z = tf.reduce_sum(y * y)
dzdx = tf.gradients(z, x)[0]
self.assertAllEqual(y.get_shape().as_list(), [None, 10, 13])
self.assertAllEqual(dzdx.get_shape().as_list(), [None, 10, 13])
if __name__ == "__main__":
tf.test.main()
| sonnet-1 | sonnet/python/modules/clip_gradient_test.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Base class for TensorFlow snt.
This file contains the Abstract Base Class for defining Modules in TensorFlow.
A Module is an object that can be connected into the Graph multiple times
using the __call__ method, sharing variables automatically with no need to
explicitly use scopes or specify reuse=True.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import contextlib
import inspect
import threading
import types
# Dependency imports
import contextlib2
import six
from sonnet.python.modules import base_info
from sonnet.python.modules import util
import tensorflow.compat.v1 as tf
import wrapt
# Import error class from base_errors for backward compatibility.
from sonnet.python.modules.base_errors import Error
from sonnet.python.modules.base_errors import NotConnectedError
from sonnet.python.modules.base_errors import ParentNotBuiltError
from sonnet.python.modules.base_errors import IncompatibleShapeError
from sonnet.python.modules.base_errors import UnderspecifiedError
from sonnet.python.modules.base_errors import NotSupportedError
from sonnet.python.modules.base_errors import NotInitializedError
from sonnet.python.modules.base_errors import DifferentGraphError
from sonnet.python.modules.base_errors import ModuleInfoError
from tensorflow.contrib.eager.python import tfe as contrib_eager
# pylint: enable=g-bad-import-order
# pylint: enable=unused-import
_LOCAL_STACKS = threading.local()
def _get_or_create_stack(name):
"""Returns a thread local stack uniquified by the given name."""
stack = getattr(_LOCAL_STACKS, name, None)
if stack is None:
stack = []
setattr(_LOCAL_STACKS, name, stack)
return stack
get_module_stack = lambda: _get_or_create_stack("modules")
get_connection_stack = lambda: _get_or_create_stack("connections")
@contextlib.contextmanager
def observe_connections(observer):
"""Notifies the observer whenever any Sonnet module is connected to the graph.
If a module contains nested modules, the observer is notified once for each
nested module, followed by the containing module.
For example:
```python
def logging_observer(connected_subgraph):
logging.info(connected_subgraph.module.module_name)
with snt.observe_connections(logging_observer):
output = imagenet_module(input_tensor)
```
Args:
observer: Callable accepting a single argument. Will be called with a
`ConnectedSubGraph` each time a module is connected to the graph.
Yields:
None: just yields control to the inner context.
"""
connection_stack = get_connection_stack()
connection_stack.append(observer)
try:
yield
finally:
connection_stack.pop()
@six.add_metaclass(abc.ABCMeta)
class AbstractModule(object):
"""Superclass for Sonnet Modules.
This class defines the functionality that every module should implement,
principally the `build` method which is wrapped using `tf.make_template`
and called from `__call__`. Every time the module is called it will
be connected into the graph but using the same shared set of variables, thanks
to the template.
For this to work correctly, the `build` implementation in the derived class
must access all variables using `tf.get_variable`, not `tf.Variable`. The same
set of variables must be created each time, if this is not the case an Error
will be raised.
Every subclass must call this class' `__init__` at the start of their
`__init__`, passing the relevant name. If this step is omitted variable
sharing will not work.
"""
def __init__(self, _sentinel=None, custom_getter=None,
name=None): # pylint: disable=invalid-name
"""Performs the initialisation necessary for all AbstractModule instances.
Every subclass of AbstractModule must begin their constructor with a call to
this constructor, i.e.
`super(MySubModule, self).__init__(custom_getter=custom_getter, name=name)`.
If you instantiate sub-modules in __init__ you must create them within the
`_enter_variable_scope` context manager to ensure they are in the module's
variable scope. Alternatively, instantiate sub-modules in `_build`.
Args:
_sentinel: Variable that only carries a non-None value if `__init__` was
called without named parameters. If this is the case, a deprecation
warning is issued in form of a `ValueError`.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the `tf.get_variable`
documentation for information about the custom_getter API.
name: Name of this module. Used to construct the Templated build function.
If `None` the module's class name is used (converted to snake case).
Raises:
TypeError: If `name` is not a string.
TypeError: If a given `custom_getter` is not callable.
ValueError: If `__init__` was called without named arguments.
"""
if _sentinel is not None:
raise ValueError("Calling AbstractModule.__init__ without named "
"arguments is not supported.")
if name is None:
name = util.to_snake_case(self.__class__.__name__)
elif not isinstance(name, six.string_types):
raise TypeError("Name must be a string, not {} of type {}.".format(
name, type(name)))
self._is_connected = False
self._connected_subgraphs = []
# If the given custom getter is a dictionary with a per-variable custom
# getter, wrap it into a single custom getter.
if isinstance(custom_getter, collections.Mapping):
self._custom_getter = util.custom_getter_router(
custom_getter_map=custom_getter,
name_fn=lambda name: name[len(self.scope_name) + 1:])
elif custom_getter is not None and not callable(custom_getter):
raise TypeError("Given custom_getter is not callable.")
else:
self._custom_getter = custom_getter
self._template = tf.make_template(
name,
self._build_wrapper,
create_scope_now_=True,
custom_getter_=self._custom_getter)
self._original_name = name
self._unique_name = self._template.variable_scope.name.split("/")[-1]
# Copy signature of _build to __call__.
adapter_fn = getattr(self._build, "__func__", self._build)
@wrapt.decorator(adapter=adapter_fn)
def copy_signature(method, unused_instance, args, kwargs):
return method(*args, **kwargs)
@copy_signature
def __call__(instance, *args, **kwargs): # pylint: disable=invalid-name
return AbstractModule.__call__(instance, *args, **kwargs)
# use __dict__ instead of setting directly to avoid a Callable pytype error
self.__dict__["__call__"] = types.MethodType(__call__, self)
# Update __call__ and the object docstrings to enable better introspection.
self.__doc__ = self._build.__doc__
self.__call__.__func__.__doc__ = self._build.__doc__
# Keep track of which graph this module has been connected to. Sonnet
# modules cannot be connected to multiple graphs, as transparent variable
# sharing is impossible in that case.
self._graph = None
# Container for all variables created in this module and its sub-modules.
self._all_variables = set([])
# Calling `.defun()` causes the module's call method to become wrapped as
# a graph function.
self._defun_wrapped = False
def _build_wrapper(self, *args, **kwargs):
"""Function which will be wrapped in a Template to do variable sharing.
Passes through all arguments to the _build method, and returns the
corresponding outputs, plus the name_scope generated by this call of the
template.
Args:
*args: args list for self._build
**kwargs: kwargs dict for self._build
Returns:
A tuple containing (output from _build, scope_name).
"""
output = self._build(*args, **kwargs)
# Make a dummy subscope to check the name scope we are in. We could read
# the name scope from one of the outputs produced, except that the outputs
# could have been produced from a subscope instantiated by the build
# function, for example if inner modules are present. Calling name_scope
# here and creating a new subscope guarantees we get the right answer.
# Because we don't create an ops inside this dummy scope, no extra memory
# will be consumed.
with tf.name_scope("dummy") as scope_name:
this_scope_name = scope_name[:-len("/dummy/")]
return output, this_scope_name
def _check_init_called(self):
"""Checks that the base class's __init__ method has been called.
Raises:
NotInitializedError: `AbstractModule.__init__` has not been called.
"""
try:
self._template
except AttributeError:
raise NotInitializedError("You may have forgotten to call super at the "
"start of %s.__init__."
% self.__class__.__name__)
def _set_module_info(self):
"""Creates a `ModuleInfo` and adds it to the graph collections."""
self._module_info = base_info.ModuleInfo(
module_name=self.module_name,
scope_name=self.scope_name,
class_name="{}.{}".format(
self.__class__.__module__, self.__class__.__name__),
connected_subgraphs=self._connected_subgraphs)
self._graph.add_to_collection(base_info.SONNET_COLLECTION_NAME,
self._module_info)
def _check_same_graph(self):
"""Checks that the module is not being connect to multiple Graphs.
An instance of a Sonnet module 'owns' the variables it contains, and permits
seamless variable sharing. As such, connecting a single module instance to
multiple Graphs is not possible - this function will raise an error should
that occur.
Raises:
DifferentGraphError: if the module is connected to a different Graph than
it was previously used in.
"""
with tf.init_scope():
# We need `init_scope` incase we're running inside a defun. In that case
# what we want is information about where the function will be called not
# where the function is being built.
current_graph = tf.get_default_graph()
will_call_in_eager_context = tf.executing_eagerly()
if self._graph is None:
self._graph = current_graph
self._set_module_info()
if not will_call_in_eager_context:
# Same graph checks only make sense when calling from graph mode (in eager
# mode there is a single process level context where all modules are
# created).
if self._graph != current_graph:
raise DifferentGraphError("Cannot connect module to multiple Graphs.")
@abc.abstractmethod
def _build(self, *args, **kwargs):
"""Add elements to the Graph, computing output Tensors from input Tensors.
Subclasses must implement this method, which will be wrapped in a Template.
Args:
*args: Input Tensors.
**kwargs: Additional Python flags controlling connection.
Returns:
output Tensor(s).
"""
@contextlib.contextmanager
def _capture_variables(self):
"""Adds variables used by this module to self._all_variables.
Upon entering this context manager the module adds itself onto the top
of the module call stack. Any variables created with `tf.get_variable()`
inside `_build()` or `_enter_variable_scope()` while this module is on top
of the call stack will be added to `self._all_variables`.
Before exiting the context the module removes itself from the top of the
call stack, and adds all of the variables in `self._all_variables` to its
parent module (the new top) of the call stack.
Yields:
Nothing, the yield just transfers focus back to the inner context.
"""
module_stack = get_module_stack()
module_stack.append(self)
try:
with contextlib2.ExitStack() as stack:
# Ideally move re-entering store into Template.variable_scope.
template_store = getattr(self._template, "_template_store", None)
if template_store is not None:
# In eager mode, the template store keeps references to created
# variables such that they survive even if there are no references to
# them in Python code. Variables added to an eager template store are
# also added to TensorFlow global collections (unlike regular
# variables created in eager mode).
stack.enter_context(template_store.as_default())
stack.enter_context(
util.notify_about_new_variables(self._all_variables.add))
yield
if self._original_name:
self._all_variables.update(self._template.variables)
finally:
# Remove `self` from `module_stack`, this happens as part of cleanup
# even if an error is raised.
module_stack.pop()
if module_stack:
# Peek into the stack to add created variables to the parent
parent_module = module_stack[-1]
parent_module._all_variables.update(self._all_variables) # pylint: disable=protected-access
def _add_connected_subgraph(self, call_method, outputs, subgraph_name_scope,
inputs_args, inputs_kwargs):
"""Adds a newly connected subgraph.
Args:
call_method: the function used to connect this Sonnet module to the graph.
outputs: `call_method` outputs.
subgraph_name_scope: name scope of the newly connected subgraph.
inputs_args: `self._build` inputs `*args`.
inputs_kwargs: `self._build` inputs `*kwargs`.
"""
build_inputs = inspect.getcallargs(call_method,
*inputs_args, **inputs_kwargs)
# "self" should normally be in `build_inputs` but some people are decorating
# their `_build` function with `memoize`, in which case the function
# signature doesn't contain `self` anymore.
if "self" in build_inputs:
del build_inputs["self"]
connected_subgraph = base_info.ConnectedSubGraph(
module=self, name_scope=subgraph_name_scope,
inputs=build_inputs,
outputs=outputs)
self._connected_subgraphs.append(connected_subgraph)
for observer in get_connection_stack():
observer(connected_subgraph)
@property
def defun_wrapped(self):
"""Returns boolean indicating whether this module is defun wrapped."""
return self._defun_wrapped
def defun(self):
"""Wraps this modules call method in a callable graph function."""
if not self._defun_wrapped:
self._defun_wrapped = True
self._call = contrib_eager.defun(self._call)
def __call__(self, *args, **kwargs):
return self._call(*args, **kwargs)
def _call(self, *args, **kwargs):
"""Entry point when a module is called to connect it to the graph.
This is the entry point when users connect a Module into the Graph. The
underlying _build method will have been wrapped in a Template by the
constructor, and we call this template with the provided inputs here.
Note we use `_call` instead of `__call__` to allow instance level monkey
patching (see `defun`).
Args:
*args: Arguments for underlying _build method.
**kwargs: Keyword arguments for underlying _build method.
Returns:
The result of the underlying _build method.
"""
self._check_init_called()
self._check_same_graph()
with self._capture_variables():
outputs, subgraph_name_scope = self._template(*args, **kwargs)
self._is_connected = True
if not tf.executing_eagerly():
# In eager mode the module is called a lot more frequently than in graph
# mode (for each training step) and so we don't keep track of connected
# subgraphs (since there will be orders of magnitude more of them).
self._add_connected_subgraph(self._build, outputs, subgraph_name_scope,
args, kwargs)
return outputs
@property
def name_scopes(self):
"""Returns a tuple of all name_scopes generated by this module."""
if tf.executing_eagerly():
raise NotSupportedError(
"The name_scopes property is not supported in eager mode.")
return tuple(subgraph.name_scope for subgraph in self._connected_subgraphs)
@property
def variable_scope(self):
"""Returns the variable_scope declared by the module.
It is valid for library users to access the internal templated
variable_scope, but only makes sense to do so after connection. Therefore we
raise an error here if the variable_scope is requested before connection.
The only case where it does make sense to access the variable_scope before
connection is to get the post-uniquification name, which we support using
the separate .scope_name property.
Returns:
variable_scope: `tf.VariableScope` instance of the internal `tf.Template`.
Raises:
NotConnectedError: If the module is not connected to the Graph.
"""
self._ensure_is_connected()
return self._template.variable_scope
@property
def scope_name(self):
"""Returns the full name of the Module's variable scope."""
return self._template.variable_scope.name
@property
def module_name(self):
"""Returns the name of the Module."""
return self._unique_name
@property
def is_connected(self):
"""Returns true iff the Module been connected to the Graph at least once."""
return self._is_connected
@property
def graph(self):
"""Returns the Graph instance which the module is connected to, or None."""
return self._graph
@property
def connected_subgraphs(self):
"""Returns the subgraphs created by this module so far."""
if tf.executing_eagerly():
raise NotSupportedError(
"Connected sub-graphs are not tracked in eager mode.")
return tuple(self._connected_subgraphs)
@property
def last_connected_subgraph(self):
"""Returns the last subgraph created by this module.
Returns:
The last connected subgraph.
Raises:
NotConnectedError: If the module is not connected to the Graph.
"""
if tf.executing_eagerly():
raise NotSupportedError(
"Connected sub-graphs are not tracked in eager mode.")
self._ensure_is_connected()
return self._connected_subgraphs[-1]
@classmethod
def get_possible_initializer_keys(cls):
"""Returns the keys the dictionary of variable initializers may contain.
This provides the user with a way of knowing the initializer keys that are
available without having to instantiate a sonnet module. Subclasses may
override this class method if they need additional arguments to determine
what initializer keys may be provided.
Returns:
Set with strings corresponding to the strings that may be passed to the
constructor.
"""
return getattr(cls, "POSSIBLE_INITIALIZER_KEYS", set())
def _ensure_is_connected(self):
"""Raise an Error if the module has not been connected yet.
Until the module is connected into the Graph, any variables created do
not exist yet and cannot be created in advance due to not knowing the size
of the input Tensor(s). This assertion ensures that any variables contained
in this module must now exist.
Raises:
NotConnectedError: If the module is not connected to the Graph.
"""
if not self.is_connected:
raise NotConnectedError(
"Variables in {} not instantiated yet, __call__ the module "
"first.".format(self.scope_name))
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def _enter_variable_scope(self, reuse=None, check_same_graph=True):
"""Returns a contextlib.contextmanager to enter the internal variable scope.
This is useful for situations where submodules must be declared in the
constructor, or somewhere else that is not called under the `_build` method.
If such a case arises, calling `with self._enter_variable_scope():` will
cause the variables in the submodule to be correctly scoped.
An example justification for this is to allow the `Transposable` interface
to be implemented - you might want to construct all the submodules at
construction time so that you can call `.transpose()` and connect the
result of that before connecting the non-transposed module.
```python
class SomeModule(snt.AbstractModule):
def __init__(self, name="some_module"):
super(SomeModule, self).__init__(name=name)
with self._enter_variable_scope():
# We need to construct this submodule before we get to the _build
# method, for some reason.
self._sub_mod = snt.SomeSubmodule(name="some_submodule")
def _build(self, input):
# Connect to the already constructed submodule.
return self._sub_mod(input)
```
If you omit this then the submodule and parent module will appear to
be "side by side" rather than nested when viewed in the Graph viewer, and
functions such as `snt.get_variables_in_module()` or the `get_variables()`
method will not know about variables defined in the submodule.
Args:
reuse: Boolean passed to `tf.variable_scope`.
check_same_graph: Boolean to determine if same graph check should run. If
you are only entering the scope to name other variable scopes (e.g. not
to create/reuse variables) then it is legitimate to set this to False.
Yields:
The variable_scope inside the template.
"""
self._check_init_called()
if check_same_graph:
self._check_same_graph()
with self._capture_variables():
with tf.variable_scope(self._template.variable_scope, reuse=reuse) as vs:
yield vs
# pylint: enable=g-doc-return-or-yield
@property
def variables(self):
"""**All** `tf.Variable`s used when the module is connected.
This property does not rely on global collections and should generally be
preferred vs. `get_variables` and `get_all_variables`.
See the documentation for `AbstractModule._capture_variables()` for more
information about what variables are captured.
Returns:
A sorted (by variable name) tuple of `tf.Variable` objects.
Raises:
NotConnectedError: If the module is not connected to the Graph.
"""
self._ensure_is_connected()
return util.sort_by_name(self._all_variables)
@property
def trainable_variables(self):
"""All **trainable** `tf.Variable`s used when the module is connected.
This property does not rely on global collections and should generally be
preferred vs. `get_variables` and `get_all_variables`.
See the documentation for `AbstractModule._capture_variables()` for more
information about what variables are captured.
Returns:
A sorted (by variable name) tuple of `tf.Variable` objects.
Raises:
NotConnectedError: If the module is not connected to the Graph.
"""
return tuple(v for v in self.variables if v.trainable)
@property
def non_trainable_variables(self):
"""All **non-trainable** `tf.Variable`s used when the module is connected.
This property does not rely on global collections and should generally be
preferred vs. `get_variables` and `get_all_variables`.
See the documentation for `AbstractModule._capture_variables()` for more
information about what variables are captured.
Returns:
A sorted (by variable name) tuple of `tf.Variable` objects.
Raises:
NotConnectedError: If the module is not connected to the Graph.
"""
return tuple(v for v in self.variables if not v.trainable)
def get_variables(self,
collection=tf.GraphKeys.TRAINABLE_VARIABLES):
"""Returns tuple of `tf.Variable`s declared inside this module.
Note that this operates by searching this module's variable scope,
and so does not know about any modules that were constructed elsewhere but
used inside this module.
This method explicitly re-enters the Graph which this module has been
connected to.
Args:
collection: Collection to restrict query to. By default this is
`tf.GraphKeys.TRAINABLE_VARIABLES`, which doesn't
include non-trainable variables such as moving averages.
Returns:
A tuple of `tf.Variable` objects.
Raises:
NotConnectedError: If the module is not connected to the Graph.
"""
self._ensure_is_connected()
if self._defun_wrapped and tf.executing_eagerly():
raise NotSupportedError(
"`module.get_variables()` relies on TensorFlow collections which are "
"not supported when your module is wrapped with defun. Instead use "
"`module.trainable_variables` or `module.variables`.")
# Explicitly re-enter Graph, in case the module is being queried with a
# different default Graph from the one it was connected to. If this was not
# here then querying the variables from a different graph scope would
# produce an empty tuple.
with self._graph.as_default():
return util.get_variables_in_scope(
self.variable_scope, collection=collection)
def get_all_variables(self,
collection=tf.GraphKeys.TRAINABLE_VARIABLES):
"""Returns all `tf.Variable`s used when the module is connected.
See the documentation for `AbstractModule._capture_variables()` for more
information.
Args:
collection: Collection to restrict query to. By default this is
`tf.GraphKeys.TRAINABLE_VARIABLES`, which doesn't
include non-trainable variables such as moving averages.
Returns:
A sorted (by variable name) tuple of `tf.Variable` objects.
Raises:
NotConnectedError: If the module is not connected to the Graph.
"""
self._ensure_is_connected()
collection_variables = set(tf.get_collection(collection))
# Return variables in self._all_variables that are in `collection`
return util.sort_by_name(self._all_variables & collection_variables)
def __getstate__(self):
raise NotSupportedError(
"Sonnet AbstractModule instances cannot be serialized. You should "
"instead serialize all necessary configuration which will allow "
"modules to be rebuilt.")
def _maybe_log(self, fstr, *args, **kwargs):
"""Logs a message to tf.logging.info, if the `verbose` kwarg is true."""
# If verbose is not set, we don't do any logging. This allows users to
# put logging throughout their code, and enable or disable it with one
# variable, rather than needing lots of if statements.
if "verbose" in kwargs and kwargs["verbose"]:
del kwargs["verbose"]
tf.logging.info("%s: " + fstr, self.scope_name, *args, **kwargs)
@six.add_metaclass(abc.ABCMeta)
class Transposable(object):
"""Transposable module interface.
The Transposable interface requires that transposable modules implement
a method called `transpose`, returning a module that is the transposed
version of the one the method is called on.
Calling the method twice should return a module with the same specifications
as the original module.
When implementing a transposable module, special care is required to make
sure that parameters needed to instantiate the module are provided as
functions whose invocation is deferred to graph construction time.
For example, in Linear we might want to call:
```python
linear = snt.Linear(name="linear", output_size=output_size)
linear_transpose = linear.transpose()
```
where the output_size for linear_transpose is not known yet, as linear is
not yet connected to the graph: output_size is passed to linear_transpose's
constructor as a lambda returning linear.input_size. The lambda will return
the correct value once linear is given an input.
Notice that linear_transpose's output_size value does not need to be defined
until the module is connected to the graph.
"""
@abc.abstractmethod
def transpose(self, name=None, **kwargs):
"""Builds and returns transposed version of module.
Args:
name: Name of the transposed module.
**kwargs: Additional Python flags controlling transposition.
Returns:
Transposed version of the module.
"""
@abc.abstractmethod
def input_shape(self):
"""Returns shape of input `Tensor` passed at last call to `build`."""
class Module(AbstractModule):
"""Module wrapping a function provided by the user."""
def __init__(self, build, custom_getter=None, name=None):
"""Constructs a module with a given build function.
The Module class can be used to wrap a function assembling a network into a
module.
For example, the following code implements a simple one-hidden-layer MLP
model by defining a function called make_model and using a Module instance
to wrap it.
```python
def make_model(inputs):
lin1 = snt.Linear(name="lin1", output_size=10)(inputs)
relu1 = tf.nn.relu(lin1, name="relu1")
lin2 = snt.Linear(name="lin2", output_size=20)(relu1)
return lin2
model = snt.Module(name='simple_mlp', build=make_model)
outputs = model(inputs)
```
The `partial` package from `functools` can be used to bake configuration
parameters into the function at construction time, as shown in the following
example.
```python
from functools import partial
def make_model(inputs, output_sizes):
lin1 = snt.Linear(name="lin1", output_size=output_sizes[0])(inputs)
relu1 = tf.nn.relu(lin1, name="relu1")
lin2 = snt.Linear(name="lin2", output_size=output_sizes[1])(relu1)
return lin2
model = snt.Module(name='simple_mlp',
build=partial(make_model, output_sizes=[10, 20])
outputs = model(inputs)
```
Args:
build: Callable to be invoked when connecting the module to the graph.
The `build` function is invoked when the module is called, and its
role is to specify how to add elements to the Graph, and how to
compute output Tensors from input Tensors.
The `build` function signature can include the following parameters:
*args - Input Tensors.
**kwargs - Additional Python parameters controlling connection.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the
`tf.get_variable` documentation for information about the
custom_getter API.
name: Module name. If set to `None` (the default), the name will be set to
that of the `build` callable converted to `snake_case`. If `build` has
no name, the name will be 'module'.
Raises:
TypeError: If build is not callable.
TypeError: If a given `custom_getter` is not callable.
"""
if not callable(build):
raise TypeError("Input 'build' must be callable.")
if name is None:
name = util.name_for_callable(build)
super(Module, self).__init__(custom_getter=custom_getter, name=name)
self._build_function = build
def _build(self, *args, **kwargs):
"""Forwards call to the passed-in build function."""
return self._build_function(*args, **kwargs)
| sonnet-1 | sonnet/python/modules/base.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for sonnet.python.modules.embed."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import parameterized
import numpy as np
import sonnet as snt
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
from tensorflow.python.ops import variables
class EmbedTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super(EmbedTest, self).setUp()
self._batch_size = 3
self._vocab_size = 7
self._embed_dim = 1
self._embed_mod = snt.Embed(
vocab_size=self._vocab_size, embed_dim=self._embed_dim)
self._embed_mod_dense = snt.Embed(
vocab_size=self._vocab_size, embed_dim=self._embed_dim,
densify_gradients=True)
self._ids = np.asarray([[0, 1, 2, 3, 4], [1, 2, 3, 4, 5], [2, 3, 4, 5, 6]])
def testOutputType(self):
# Output shape should be same as ids, except with a full embedding for each
# value.
embeddings = self._embed_mod(tf.convert_to_tensor(self._ids))
expected_shape = list(self._ids.shape) + [self._embed_dim]
self.assertTrue(embeddings.get_shape().is_compatible_with(expected_shape))
self.assertEqual(embeddings.dtype, tf.float32)
@parameterized.named_parameters(
("Index-Slices", False),
("Dense Tensor", True),
)
def testComputation(self, densify_gradients):
# Initialize each embedding to its index. Thus, the lookup ids are the same
# as the embeddings themselves.
if densify_gradients:
embed_mod = self._embed_mod_dense
else:
embed_mod = self._embed_mod
initializers = {"embeddings": tf.constant_initializer(
[[0], [1], [2], [3], [4], [5], [6]], dtype=tf.float32)}
embed_mod = snt.Embed(
vocab_size=self._vocab_size,
embed_dim=self._embed_dim,
initializers=initializers)
embeddings = embed_mod(tf.convert_to_tensor(self._ids))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
embeddings_ = sess.run(embeddings)
expected_embeddings = np.reshape(
self._ids, newshape=list(self._ids.shape) + [self._embed_dim])
self.assertAllClose(embeddings_, expected_embeddings)
def testVocabTooSmall(self):
# If an index doesn't fit in the vocab, there will be no embedding for it
# and an exception should be raised.
ids = self._ids.copy()
ids[0, 0] = self._vocab_size
ids = tf.convert_to_tensor(ids)
embeddings = self._embed_mod(ids)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run(embeddings)
def testNegativeIds(self):
# Negative ids are not allowed.
ids = self._ids.copy()
ids[0, 0] = -1
ids = tf.convert_to_tensor(ids)
embeddings = self._embed_mod(ids)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run(embeddings)
def testDefaultVocabSize(self):
embed_mod = snt.Embed(vocab_size=100, embed_dim=None, name="embed_small")
self.assertEqual(embed_mod.embed_dim, 19)
embed_mod = snt.Embed(vocab_size=1000000, embed_dim=None, name="embed_big")
self.assertEqual(embed_mod.embed_dim, 190)
def testInitializers(self):
# Since all embeddings are initialized to zero, the extracted embeddings
# should be as well.
initializers = {"embeddings": tf.zeros_initializer()}
embed_mod = snt.Embed(
vocab_size=self._vocab_size,
embed_dim=self._embed_dim,
initializers=initializers)
embeddings = embed_mod(tf.convert_to_tensor(self._ids))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
embeddings_ = sess.run(embeddings)
self.assertAllEqual(embeddings_, np.zeros_like(embeddings_))
def testPartitioners(self):
# Partition embeddings such that there's one variable per vocabulary entry.
partitioners = {"embeddings": tf.variable_axis_size_partitioner(
4 * self._embed_dim)}
embed_mod = snt.Embed(
vocab_size=self._vocab_size,
embed_dim=self._embed_dim,
partitioners=partitioners)
embeddings = embed_mod(tf.convert_to_tensor(self._ids))
self.assertEqual(type(embed_mod.embeddings), variables.PartitionedVariable)
self.assertEqual(len(embed_mod.embeddings), self._vocab_size)
# Ensure that tf.nn.embedding_lookup() plays nicely with embedding
# variables.
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(embeddings)
def testInvalidRegularizationParameters(self):
regularizer = contrib_layers.l1_regularizer(scale=0.5)
with self.assertRaisesRegexp(KeyError, "Invalid regularizer keys.*"):
snt.Embed(
vocab_size=self._vocab_size,
embed_dim=self._embed_dim,
regularizers={"not_embeddings": regularizer})
err = "Regularizer for 'embeddings' is not a callable function"
with self.assertRaisesRegexp(TypeError, err):
snt.Embed(vocab_size=self._vocab_size,
embed_dim=self._embed_dim,
regularizers={"embeddings": tf.zeros([1, 2, 3])})
def testRegularizersInRegularizationLosses(self):
regularizer = contrib_layers.l1_regularizer(scale=0.5)
embed = snt.Embed(
vocab_size=self._vocab_size,
embed_dim=self._embed_dim,
regularizers={"embeddings": regularizer})
embed(tf.convert_to_tensor(self._ids))
regularizers = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
self.assertRegexpMatches(regularizers[0].name, ".*l1_regularizer.*")
def testProperties(self):
self.assertEqual(self._embed_mod.vocab_size, self._vocab_size)
self.assertEqual(self._embed_mod.embed_dim, self._embed_dim)
# Embeddings aren't accessible until module is connected to a graph.
with self.assertRaises(snt.NotConnectedError):
_ = self._embed_mod.embeddings
self._embed_mod(tf.convert_to_tensor(self._ids))
self.assertIsInstance(self._embed_mod.embeddings, tf.Variable)
def testExistingVocab(self):
# Check that the module can be initialised with an existing vocabulary.
existing = np.array(
[[1, 0, 0, 0], [1, 0, 1, 0], [0, 1, 0, 1]], dtype=np.int32)
expected = np.array(
[[1, 0, 0, 0], [0, 1, 0, 1], [1, 0, 1, 0]], dtype=np.int32)
true_vocab_size, true_embed_dim = existing.shape
inputs = tf.constant(np.array([0, 2, 1]), dtype=tf.int32)
embed_mod = snt.Embed(existing_vocab=existing)
embeddings = embed_mod(inputs)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
embeddings_ = sess.run(embeddings)
self.assertAllClose(embeddings_, expected)
self.assertEqual(embed_mod.vocab_size, true_vocab_size)
self.assertEqual(embed_mod.embed_dim, true_embed_dim)
if __name__ == "__main__":
tf.test.main()
| sonnet-1 | sonnet/python/modules/embed_test.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Layer normalization module for Sonnet.
This contains the module LayerNorm, which performs layer normalization over
configurable axes of its inputs.
Original paper: https://arxiv.org/abs/1607.06450.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from sonnet.python.modules import base
from sonnet.python.modules import util
import tensorflow.compat.v1 as tf
class LayerNorm(base.AbstractModule):
"""Layer normalization module.
Implementation based on:
https://arxiv.org/abs/1607.06450
This module transforms input x into:
outputs = gamma * (x - mu) / sigma + beta
where mu and sigma are respectively the mean and standard deviation of x.
Gamma and beta are trainable parameters for scaling and shifting respectively.
Since the axes over which normalization is perfomed is configurable, this also
subsumes instance normalization.
"""
GAMMA = "gamma" # Layer norm scaling.
BETA = "beta" # Layer norm bias.
POSSIBLE_INITIALIZER_KEYS = {GAMMA, BETA}
def __init__(self, axis=None, offset=True, scale=True, eps=1e-5,
initializers=None, partitioners=None, regularizers=None,
name="layer_norm"):
"""Constructs a LayerNorm module.
Args:
axis: Optional dimension or iterable of indices of dimensions to normalize
and reduce over. By default `None` and all dimensions except the
first/batch dimension are reduced over. If the input tensor represents
an image, summing over all except the batch and channel dimensions (e.g.
for image format NHWC, axes=[1,2]), then this module corresponds to
Instance Normalization (https://arxiv.org/abs/1607.08022).
offset: Optional boolean to specify whether or not to apply a trained
component-wise bias after the layer normalization and scaling.
scale: Optional boolean to specify whether or not to apply a trained
component-wise scale after the layer normalization.
eps: small epsilon to avoid division by zero variance. Defaults to
1e-5 as used in the paper.
initializers: Dict containing ops to initialize the scale
(with key 'gamma') and bias (with key 'beta').
partitioners: Optional dict containing partitioners to partition
the scale (with key 'gamma') and bias (with key 'beta'). As a default,
no partitioners are used.
regularizers: Optional dict containing regularizers for the scale (with
key 'gamma') and bias (with key 'beta').. As a default, no regularizers
are used.
name: name of the module.
Raises:
KeyError: If `initializers`, `partitioners` or `regularizers` contain
any keys other than `gamma` or `beta`.
TypeError: If any of the given initializers, partitioners or regularizers
are not callable.
"""
super(LayerNorm, self).__init__(name=name)
if axis is not None:
if isinstance(axis, int):
axis = [axis]
int_not_bool = lambda x: isinstance(x, int) and not isinstance(x, bool)
if (not isinstance(axis, collections.Iterable) or
not all(int_not_bool(ax) for ax in axis)):
raise ValueError("axis should be an int or an iterable of ints")
self._axis = axis
self._offset = offset
self._scale = scale
self._eps = eps
self._initializers = util.check_initializers(initializers,
self.POSSIBLE_INITIALIZER_KEYS)
self._partitioners = util.check_partitioners(partitioners,
self.POSSIBLE_INITIALIZER_KEYS)
self._regularizers = util.check_regularizers(regularizers,
self.POSSIBLE_INITIALIZER_KEYS)
def _build(self, inputs):
"""Connects the LayerNorm module into the graph.
Args:
inputs: a Tensor of dimensionality >= 2.
Returns:
normalized: layer normalized outputs with same shape as inputs.
Raises:
base.NotSupportedError: If `inputs` has less than 2 dimensions.
"""
if self._axis is None:
axis = list(range(1, inputs.shape.ndims))
else:
axis = self._axis
original_dtype = inputs.dtype
if original_dtype in [tf.float16, tf.bfloat16]:
inputs = tf.cast(inputs, tf.float32)
if inputs.get_shape().ndims < 2:
raise base.NotSupportedError(
"Layer normalization expects inputs of at least rank 2."
" Got inputs of rank {}.".format(inputs.get_shape().ndims))
# Shape for the learnable scale and offset is the number of channels. See
# https://arxiv.org/pdf/1803.08494.pdf around equation 6.
params_shape = inputs.get_shape()[-1:]
if self._scale:
if self.GAMMA not in self._initializers:
self._initializers[self.GAMMA] = create_gamma_initializer()
self._gamma = tf.get_variable(
self.GAMMA,
shape=params_shape,
dtype=inputs.dtype,
initializer=self._initializers[self.GAMMA],
partitioner=self._partitioners.get(self.GAMMA),
regularizer=self._regularizers.get(self.GAMMA))
else:
self._gamma = None
if self._offset:
if self.BETA not in self._initializers:
self._initializers[self.BETA] = create_beta_initializer()
self._beta = tf.get_variable(
self.BETA,
shape=params_shape,
dtype=inputs.dtype,
initializer=self._initializers[self.BETA],
partitioner=self._partitioners.get(self.BETA),
regularizer=self._regularizers.get(self.BETA))
else:
self._beta = None
mean, var = tf.nn.moments(inputs, axis, keep_dims=True)
normalized = tf.nn.batch_normalization(inputs, mean, var, self._beta,
self._gamma, self._eps)
if original_dtype in [tf.float16, tf.bfloat16]:
normalized = tf.cast(normalized, dtype=original_dtype)
return normalized
@property
def initializers(self):
return self._initializers
@property
def partitioners(self):
return self._partitioners
@property
def regularizers(self):
return self._regularizers
@property
def beta(self):
self._ensure_is_connected()
return self._beta
@property
def gamma(self):
self._ensure_is_connected()
return self._gamma
def create_beta_initializer():
"""Returns a default initializer for the `beta` in layer norm."""
return tf.zeros_initializer()
def create_gamma_initializer():
"""Returns a default initializer for the `gamma` in layer norm."""
return tf.ones_initializer()
| sonnet-1 | sonnet/python/modules/layer_norm.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""""Implementation of Spatial Transformer networks core components."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import itertools
# Dependency imports
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from sonnet.python.modules import base
from sonnet.python.modules import basic
import tensorflow.compat.v1 as tf
class GridWarper(base.AbstractModule):
"""Grid warper interface class.
An object implementing the `GridWarper` interface generates a reference grid
of feature points at construction time, and warps it via a parametric
transformation model, specified at run time by an input parameter Tensor.
Grid warpers must then implement a `create_features` function used to generate
the reference grid to be warped in the forward pass (according to a determined
warping model).
"""
def __init__(self, source_shape, output_shape, num_coeff, name, **kwargs):
"""Constructs a GridWarper module and initializes the source grid params.
`source_shape` and `output_shape` are used to define the size of the source
and output signal domains, as opposed to the shape of the respective
Tensors. For example, for an image of size `width=W` and `height=H`,
`{source,output}_shape=[H, W]`; for a volume of size `width=W`, `height=H`
and `depth=D`, `{source,output}_shape=[H, W, D]`.
Args:
source_shape: Iterable of integers determining the size of the source
signal domain.
output_shape: Iterable of integers determining the size of the destination
resampled signal domain.
num_coeff: Number of coefficients parametrizing the grid warp.
For example, a 2D affine transformation will be defined by the 6
parameters populating the corresponding 2x3 affine matrix.
name: Name of Module.
**kwargs: Extra kwargs to be forwarded to the `create_features` function,
instantiating the source grid parameters.
Raises:
Error: If `len(output_shape) > len(source_shape)`.
TypeError: If `output_shape` and `source_shape` are not both iterable.
"""
super(GridWarper, self).__init__(name=name)
self._source_shape = tuple(source_shape)
self._output_shape = tuple(output_shape)
if len(self._output_shape) > len(self._source_shape):
raise base.Error('Output domain dimensionality ({}) must be equal or '
'smaller than source domain dimensionality ({})'
.format(len(self._output_shape),
len(self._source_shape)))
self._num_coeff = num_coeff
self._psi = self._create_features(**kwargs)
@abc.abstractmethod
def _create_features(self, **kwargs):
"""Generates matrix of features, of size `[num_coeff, num_points]`."""
pass
@property
def n_coeff(self):
"""Returns number of coefficients of warping function."""
return self._n_coeff
@property
def psi(self):
"""Returns a list of features used to compute the grid warp."""
return self._psi
@property
def source_shape(self):
"""Returns a tuple containing the shape of the source signal."""
return self._source_shape
@property
def output_shape(self):
"""Returns a tuple containing the shape of the output grid."""
return self._output_shape
def _create_affine_features(output_shape, source_shape):
"""Generates n-dimensional homogenous coordinates for a given grid definition.
`source_shape` and `output_shape` are used to define the size of the source
and output signal domains, as opposed to the shape of the respective
Tensors. For example, for an image of size `width=W` and `height=H`,
`{source,output}_shape=[H, W]`; for a volume of size `width=W`, `height=H`
and `depth=D`, `{source,output}_shape=[H, W, D]`.
Args:
output_shape: Iterable of integers determining the shape of the grid to be
warped.
source_shape: Iterable of integers determining the domain of the signal to be
resampled.
Returns:
List of flattened numpy arrays of coordinates in range `[-1, 1]^N`, for
example:
```
[[x_0_0, .... , x_0_{n-1}],
....
[x_{M-1}_0, .... , x_{M-1}_{n-1}],
[x_{M}_0=0, .... , x_{M}_{n-1}=0],
...
[x_{N-1}_0=0, .... , x_{N-1}_{n-1}=0],
[1, ..., 1]]
```
where N is the dimensionality of the sampled space, M is the
dimensionality of the output space, i.e. 2 for images
and 3 for volumes, and n is the number of points in the output grid.
When the dimensionality of `output_shape` is smaller that that of
`source_shape` the last rows before [1, ..., 1] will be filled with 0.
"""
ranges = [np.linspace(-1, 1, x, dtype=np.float32)
for x in reversed(output_shape)]
psi = [x.reshape(-1) for x in np.meshgrid(*ranges, indexing='xy')]
dim_gap = len(source_shape) - len(output_shape)
for _ in xrange(dim_gap):
psi.append(np.zeros_like(psi[0], dtype=np.float32))
psi.append(np.ones_like(psi[0], dtype=np.float32))
return psi
class AffineGridWarper(GridWarper):
"""Affine Grid Warper class.
The affine grid warper generates a reference grid of n-dimensional points
and warps it via an affine transormation model determined by an input
parameter Tensor. Some of the transformation parameters can be fixed at
construction time via an `AffineWarpConstraints` object.
"""
def __init__(self,
source_shape,
output_shape,
constraints=None,
name='affine_grid_warper'):
"""Constructs an AffineGridWarper.
`source_shape` and `output_shape` are used to define the size of the source
and output signal domains, as opposed to the shape of the respective
Tensors. For example, for an image of size `width=W` and `height=H`,
`{source,output}_shape=[H, W]`; for a volume of size `width=W`, `height=H`
and `depth=D`, `{source,output}_shape=[H, W, D]`.
Args:
source_shape: Iterable of integers determining the size of the source
signal domain.
output_shape: Iterable of integers determining the size of the destination
resampled signal domain.
constraints: Either a double list of shape `[N, N+1]` defining constraints
on the entries of a matrix defining an affine transformation in N
dimensions, or an `AffineWarpConstraints` object. If the double list is
passed, a numeric value bakes in a constraint on the corresponding
entry in the transformation matrix, whereas `None` implies that the
corresponding entry will be specified at run time.
name: Name of module.
Raises:
Error: If constraints fully define the affine transformation; or if
input grid shape and contraints have different dimensionality.
TypeError: If output_shape and source_shape are not both iterable.
"""
self._source_shape = tuple(source_shape)
self._output_shape = tuple(output_shape)
num_dim = len(source_shape)
if isinstance(constraints, AffineWarpConstraints):
self._constraints = constraints
elif constraints is None:
self._constraints = AffineWarpConstraints.no_constraints(num_dim)
else:
self._constraints = AffineWarpConstraints(constraints=constraints)
if self._constraints.num_free_params == 0:
raise base.Error('Transformation is fully constrained.')
if self._constraints.num_dim != num_dim:
raise base.Error('Incompatible set of constraints provided: '
'input grid shape and constraints have different '
'dimensionality.')
super(AffineGridWarper, self).__init__(source_shape=source_shape,
output_shape=output_shape,
num_coeff=6,
name=name,
constraints=self._constraints)
def _create_features(self, constraints):
"""Creates all the matrices needed to compute the output warped grids."""
affine_warp_constraints = constraints
if not isinstance(affine_warp_constraints, AffineWarpConstraints):
affine_warp_constraints = AffineWarpConstraints(affine_warp_constraints)
mask = affine_warp_constraints.mask
psi = _create_affine_features(output_shape=self._output_shape,
source_shape=self._source_shape)
scales = [(x - 1.0) * .5 for x in reversed(self._source_shape)]
offsets = scales
# Transforming a point x's i-th coordinate via an affine transformation
# is performed via the following dot product:
#
# x_i' = s_i * (T_i * x) + t_i (1)
#
# where Ti is the i-th row of an affine matrix, and the scalars s_i and t_i
# define a decentering and global scaling into the source space.
# In the AffineGridWarper some of the entries of Ti are provided via the
# input, some others are instead fixed, according to the constraints
# assigned in the constructor.
# In create_features the internal dot product (1) is accordingly broken down
# into two parts:
#
# x_i' = Ti[uncon_i] * x[uncon_i, :] + offset(con_var) (2)
#
# i.e. the sum of the dot product of the free parameters (coming
# from the input) indexed by uncond_i and an offset obtained by
# precomputing the fixed part of (1) according to the constraints.
# This step is implemented by analyzing row by row the constraints matrix
# and saving into a list the x[uncon_i] and offset(con_var) data matrices
# for each output dimension.
features = []
for row, scale in zip(mask, scales):
x_i = np.array([x for x, is_active in zip(psi, row) if is_active])
features.append(x_i * scale if len(x_i) else None)
for row_i, row in enumerate(mask):
x_i = None
s = scales[row_i]
for i, is_active in enumerate(row):
if is_active:
continue
# In principle a whole row of the affine matrix can be fully
# constrained. In that case the corresponding dot product between input
# parameters and grid coordinates doesn't need to be implemented in the
# computation graph since it can be precomputed.
# When a whole row if constrained, x_i - which is initialized to
# None - will still be None at the end do the loop when it is appended
# to the features list; this value is then used to detect this setup
# in the build function where the graph is assembled.
if x_i is None:
x_i = np.array(psi[i]) * affine_warp_constraints[row_i][i] * s
else:
x_i += np.array(psi[i]) * affine_warp_constraints[row_i][i] * s
features.append(x_i)
features += offsets
return features
def _build(self, inputs):
"""Assembles the module network and adds it to the graph.
The internal computation graph is assembled according to the set of
constraints provided at construction time.
Args:
inputs: Tensor containing a batch of transformation parameters.
Returns:
A batch of warped grids.
Raises:
Error: If the input tensor size is not consistent with the constraints
passed at construction time.
"""
input_shape = tf.shape(inputs)
input_dtype = inputs.dtype.as_numpy_dtype
batch_size = tf.expand_dims(input_shape[0], 0)
number_of_params = inputs.get_shape()[1]
if number_of_params != self._constraints.num_free_params:
raise base.Error('Input size is not consistent with constraint '
'definition: {} parameters expected, {} provided.'
.format(self._constraints.num_free_params,
number_of_params))
num_output_dimensions = len(self._psi) // 3
def get_input_slice(start, size):
"""Extracts a subset of columns from the input 2D Tensor."""
return basic.SliceByDim([1], [start], [size])(inputs)
warped_grid = []
var_index_offset = 0
number_of_points = np.prod(self._output_shape)
for i in xrange(num_output_dimensions):
if self._psi[i] is not None:
# The i-th output dimension is not fully specified by the constraints,
# the graph is setup to perform matrix multiplication in batch mode.
grid_coord = self._psi[i].astype(input_dtype)
num_active_vars = self._psi[i].shape[0]
active_vars = get_input_slice(var_index_offset, num_active_vars)
warped_coord = tf.matmul(active_vars, grid_coord)
warped_coord = tf.expand_dims(warped_coord, 1)
var_index_offset += num_active_vars
offset = self._psi[num_output_dimensions + i]
if offset is not None:
offset = offset.astype(input_dtype)
# Some entries in the i-th row of the affine matrix were constrained
# and the corresponding matrix multiplications have been precomputed.
tiling_params = tf.concat(
[
batch_size, tf.constant(
1, shape=(1,)), tf.ones_like(offset.shape)
],
0)
offset = offset.reshape((1, 1) + offset.shape)
warped_coord += tf.tile(offset, tiling_params)
else:
# The i-th output dimension is fully specified by the constraints, and
# the corresponding matrix multiplications have been precomputed.
warped_coord = self._psi[num_output_dimensions + i].astype(input_dtype)
tiling_params = tf.concat(
[
batch_size, tf.constant(
1, shape=(1,)), tf.ones_like(warped_coord.shape)
],
0)
warped_coord = warped_coord.reshape((1, 1) + warped_coord.shape)
warped_coord = tf.tile(warped_coord, tiling_params)
warped_coord += self._psi[i + 2 * num_output_dimensions]
# Need to help TF figuring out shape inference since tiling information
# is held in Tensors which are not known until run time.
warped_coord.set_shape([None, 1, number_of_points])
warped_grid.append(warped_coord)
# Reshape all the warped coordinates tensors to match the specified output
# shape and concatenate into a single matrix.
grid_shape = self._output_shape + (1,)
warped_grid = [basic.BatchReshape(grid_shape)(grid) for grid in warped_grid]
return tf.concat(warped_grid, len(grid_shape))
@property
def constraints(self):
return self._constraints
def inverse(self, name=None):
"""Returns a `sonnet` module to compute inverse affine transforms.
The function first assembles a network that given the constraints of the
current AffineGridWarper and a set of input parameters, retrieves the
coefficients of the corresponding inverse affine transform, then feeds its
output into a new AffineGridWarper setup to correctly warp the `output`
space into the `source` space.
Args:
name: Name of module implementing the inverse grid transformation.
Returns:
A `sonnet` module performing the inverse affine transform of a reference
grid of points via an AffineGridWarper module.
Raises:
tf.errors.UnimplementedError: If the function is called on a non 2D
instance of AffineGridWarper.
"""
if self._num_coeff != 6:
raise tf.errors.UnimplementedError('AffineGridWarper currently supports'
'inversion only for the 2D case.')
def _affine_grid_warper_inverse(inputs):
"""Assembles network to compute inverse affine transformation.
Each `inputs` row potentially contains [a, b, tx, c, d, ty]
corresponding to an affine matrix:
A = [a, b, tx],
[c, d, ty]
We want to generate a tensor containing the coefficients of the
corresponding inverse affine transformation in a constraints-aware
fashion.
Calling M:
M = [a, b]
[c, d]
the affine matrix for the inverse transform is:
A_in = [M^(-1), M^-1 * [-tx, -tx]^T]
where
M^(-1) = (ad - bc)^(-1) * [ d, -b]
[-c, a]
Args:
inputs: Tensor containing a batch of transformation parameters.
Returns:
A tensorflow graph performing the inverse affine transformation
parametrized by the input coefficients.
"""
batch_size = tf.expand_dims(tf.shape(inputs)[0], 0)
constant_shape = tf.concat([batch_size, tf.convert_to_tensor((1,))], 0)
index = iter(range(6))
def get_variable(constraint):
if constraint is None:
i = next(index)
return inputs[:, i:i+1]
else:
return tf.fill(constant_shape, tf.constant(constraint,
dtype=inputs.dtype))
constraints = itertools.chain.from_iterable(self.constraints)
a, b, tx, c, d, ty = (get_variable(constr) for constr in constraints)
det = a * d - b * c
a_inv = d / det
b_inv = -b / det
c_inv = -c / det
d_inv = a / det
m_inv = basic.BatchReshape(
[2, 2])(tf.concat([a_inv, b_inv, c_inv, d_inv], 1))
txy = tf.expand_dims(tf.concat([tx, ty], 1), 2)
txy_inv = basic.BatchFlatten()(tf.matmul(m_inv, txy))
tx_inv = txy_inv[:, 0:1]
ty_inv = txy_inv[:, 1:2]
inverse_gw_inputs = tf.concat(
[a_inv, b_inv, -tx_inv, c_inv, d_inv, -ty_inv], 1)
agw = AffineGridWarper(self.output_shape,
self.source_shape)
return agw(inverse_gw_inputs) # pylint: disable=not-callable
if name is None:
name = self.module_name + '_inverse'
return base.Module(_affine_grid_warper_inverse, name=name)
class AffineWarpConstraints(object):
"""Affine warp contraints class.
`AffineWarpConstraints` allow for very succinct definitions of constraints on
the values of entries in affine transform matrices.
"""
def __init__(self, constraints=((None,) * 3,) * 2):
"""Creates a constraint definition for an affine transformation.
Args:
constraints: A doubly-nested iterable of shape `[N, N+1]` defining
constraints on the entries of a matrix that represents an affine
transformation in `N` dimensions. A numeric value bakes in a constraint
on the corresponding entry in the transformation matrix, whereas `None`
implies that the corresponding entry will be specified at run time.
Raises:
TypeError: If `constraints` is not a nested iterable.
ValueError: If the double iterable `constraints` has inconsistent
dimensions.
"""
try:
self._constraints = tuple(tuple(x) for x in constraints)
except TypeError:
raise TypeError('constraints must be a nested iterable.')
# Number of rows
self._num_dim = len(self._constraints)
expected_num_cols = self._num_dim + 1
if any(len(x) != expected_num_cols for x in self._constraints):
raise ValueError('The input list must define a Nx(N+1) matrix of '
'contraints.')
def _calc_mask(self):
"""Computes a boolean mask from the user defined constraints."""
mask = []
for row in self._constraints:
mask.append(tuple(x is None for x in row))
return tuple(mask)
def _calc_num_free_params(self):
"""Computes number of non constrained parameters."""
return sum(row.count(None) for row in self._constraints)
@property
def num_free_params(self):
return self._calc_num_free_params()
@property
def mask(self):
return self._calc_mask()
@property
def constraints(self):
return self._constraints
@property
def num_dim(self):
return self._num_dim
def __getitem__(self, i):
"""Returns the list of constraints for the i-th row of the affine matrix."""
return self._constraints[i]
def _combine(self, x, y):
"""Combines two constraints, raising an error if they are not compatible."""
if x is None or y is None:
return x or y
if x != y:
raise ValueError('Incompatible set of constraints provided.')
return x
def __and__(self, rhs):
"""Combines two sets of constraints into a coherent single set."""
return self.combine_with(rhs)
def combine_with(self, additional_constraints):
"""Combines two sets of constraints into a coherent single set."""
x = additional_constraints
if not isinstance(additional_constraints, AffineWarpConstraints):
x = AffineWarpConstraints(additional_constraints)
new_constraints = []
for left, right in zip(self._constraints, x.constraints):
new_constraints.append([self._combine(x, y) for x, y in zip(left, right)])
return AffineWarpConstraints(new_constraints)
# Collection of utlities to initialize an AffineGridWarper in 2D and 3D.
@classmethod
def no_constraints(cls, num_dim=2):
"""Empty set of constraints for a num_dim-ensional affine transform."""
return cls(((None,) * (num_dim + 1),) * num_dim)
@classmethod
def translation_2d(cls, x=None, y=None):
"""Assign contraints on translation components of affine transform in 2d."""
return cls([[None, None, x],
[None, None, y]])
@classmethod
def translation_3d(cls, x=None, y=None, z=None):
"""Assign contraints on translation components of affine transform in 3d."""
return cls([[None, None, None, x],
[None, None, None, y],
[None, None, None, z]])
@classmethod
def scale_2d(cls, x=None, y=None):
"""Assigns contraints on scaling components of affine transform in 2d."""
return cls([[x, None, None],
[None, y, None]])
@classmethod
def scale_3d(cls, x=None, y=None, z=None):
"""Assigns contraints on scaling components of affine transform in 3d."""
return cls([[x, None, None, None],
[None, y, None, None],
[None, None, z, None]])
@classmethod
def shear_2d(cls, x=None, y=None):
"""Assigns contraints on shear components of affine transform in 2d."""
return cls([[None, x, None],
[y, None, None]])
@classmethod
def no_shear_2d(cls):
return cls.shear_2d(x=0, y=0)
@classmethod
def no_shear_3d(cls):
"""Assigns contraints on shear components of affine transform in 3d."""
return cls([[None, 0, 0, None],
[0, None, 0, None],
[0, 0, None, None]])
| sonnet-1 | sonnet/python/modules/spatial_transformer.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for sonnet.python.modules.basic."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Dependency imports
from absl.testing import parameterized
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import sonnet as snt
from sonnet.python.modules import basic
from sonnet.python.ops import nest
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib import nn as contrib_nn
from tensorflow.contrib.eager.python import tfe as contrib_eager
from tensorflow.python.client import device_lib # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.ops import variables # pylint: disable=g-direct-tensorflow-import
def _test_initializer(mu=0.0, sigma=1.0, dtype=tf.float32):
"""Custom initializer for Linear tests."""
def _initializer(shape,
dtype=dtype,
partition_info=None): # pylint: disable=unused-argument
random_normal_tensor = np.asarray(np.random.randn(*shape)) * sigma + mu
return random_normal_tensor.astype(dtype.as_numpy_dtype)
return _initializer
@contrib_eager.run_all_tests_in_graph_and_eager_modes
class ConcatLinearTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(ConcatLinearTest, self).setUp()
self.batch_size = 11
self.in_sizes = [5, 19]
self.out_size = 17
self.seed = 42
@parameterized.named_parameters(
("WithBias", True),
("WithoutBias", False))
def testShape(self, use_bias):
inputs = [tf.ones(shape=[self.batch_size, size]) for size in self.in_sizes]
lin = snt.ConcatLinear(output_size=self.out_size, use_bias=use_bias)
output = lin(inputs)
self.assertTrue(
output.get_shape().is_compatible_with([self.batch_size, self.out_size]))
def testName(self):
mod_name = "unique_name"
with tf.variable_scope("scope"):
lin = snt.ConcatLinear(name=mod_name, output_size=self.out_size)
self.assertEqual(lin.scope_name, "scope/" + mod_name)
self.assertEqual(lin.module_name, mod_name)
@contrib_eager.run_all_tests_in_graph_and_eager_modes
class LinearTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(LinearTest, self).setUp()
self.batch_size = 11
self.in_size = 13
self.out_size = 17
self.seed = 42
@parameterized.named_parameters(
("WithBias", True),
("WithoutBias", False))
def testShape(self, use_bias):
inputs = tf.ones(dtype=tf.float32, shape=[self.batch_size, self.in_size])
lin = snt.Linear(output_size=self.out_size, use_bias=use_bias)
output = lin(inputs)
self.assertTrue(
output.get_shape().is_compatible_with([self.batch_size, self.out_size]))
def testName(self):
mod_name = "unique_name"
with tf.variable_scope("scope"):
lin = snt.Linear(name=mod_name, output_size=self.out_size)
self.assertEqual(lin.scope_name, "scope/" + mod_name)
self.assertEqual(lin.module_name, mod_name)
@parameterized.named_parameters(
("WithBias", True),
("WithoutBias", False))
def testVariables(self, use_bias):
inputs = tf.ones(dtype=tf.float32, shape=[self.batch_size, self.in_size])
lin = snt.Linear(output_size=self.out_size, use_bias=use_bias)
err = r"Variables in {} not instantiated yet, __call__ the module first."
with self.assertRaisesRegexp(snt.NotConnectedError,
err.format(lin.scope_name)):
lin.get_variables()
err = "Variables in {} not instantiated yet, __call__ the module first."
with self.assertRaisesRegexp(snt.NotConnectedError,
err.format(lin.scope_name)):
_ = lin.w
err = "Variables in {} not instantiated yet, __call__ the module first."
with self.assertRaisesRegexp(snt.NotConnectedError,
err.format(lin.scope_name)):
_ = lin.b
lin(inputs) # Connect the module, but ignore the return value.
variables_ = lin.get_variables()
if use_bias:
self.assertLen(variables_, 2, "Linear should have 2 variables.")
else:
err = "No bias Variable in Linear Module when `use_bias=False`."
with self.assertRaisesRegexp(AttributeError, err):
_ = lin.b
self.assertLen(variables_, 1, "Linear should have 1 variable.")
for v in variables_:
self.assertRegexpMatches(v.name,
r"{}/[wb]:0".format(lin.scope_name))
if v.name.endswith("w:0"):
shape = np.ndarray((self.in_size, self.out_size))
else:
shape = np.ndarray(self.out_size)
self.assertShapeEqual(shape, tf.convert_to_tensor(v))
def testCustomGetter(self):
"""Check that custom getters work appropriately."""
def custom_getter(getter, *args, **kwargs):
kwargs["trainable"] = False
return getter(*args, **kwargs)
inputs = tf.ones(dtype=tf.float32, shape=[self.batch_size, self.in_size])
# Make w and b non-trainable.
lin1 = snt.Linear(output_size=self.out_size,
custom_getter=custom_getter)
lin1(inputs)
self.assertEmpty(tf.trainable_variables())
self.assertLen(tf.global_variables(), 2)
# Make w non-trainable.
lin2 = snt.Linear(output_size=self.out_size,
custom_getter={"w": custom_getter})
lin2(inputs)
self.assertLen(tf.trainable_variables(), 1)
self.assertLen(tf.global_variables(), 4)
@parameterized.named_parameters(
("WithBias", True),
("WithoutBias", False))
def testComputation(self, use_bias):
np.random.seed(self.seed)
types = (tf.float16, tf.float32, tf.float64)
tol = (1e-2, 1e-6, 1e-9)
tolerance_map = dict(zip(types, tol))
for dtype in types:
# With random data, check the TF calculation matches the Numpy version.
input_data = np.random.randn(self.batch_size,
self.in_size).astype(dtype.as_numpy_dtype)
inputs = tf.constant(input_data)
if use_bias:
initializers = {"w": _test_initializer(), "b": _test_initializer()}
else:
initializers = {"w": _test_initializer()}
lin = snt.Linear(output_size=self.out_size,
use_bias=use_bias,
initializers=initializers)
output = lin(inputs)
self.evaluate(tf.global_variables_initializer())
if use_bias:
output_data, w, b = self.evaluate([output, lin.w, lin.b])
else:
output_data, w = self.evaluate([output, lin.w])
if use_bias:
result = (np.dot(input_data, w.astype(dtype.as_numpy_dtype)) +
b.astype(dtype.as_numpy_dtype))
else:
result = np.dot(input_data, w.astype(dtype.as_numpy_dtype))
self.assertAllClose(
result,
output_data,
atol=tolerance_map[dtype],
rtol=tolerance_map[dtype])
@parameterized.named_parameters(
("WithBias", True),
("WithoutBias", False))
def testBatchedComputation(self, use_bias):
np.random.seed(self.seed)
types = (tf.float16, tf.float32, tf.float64)
tol = (1e-1, 1e-6, 1e-9)
tolerance_map = dict(zip(types, tol))
for dtype in types:
# With random data, check the TF calculation matches the Numpy version.
input_data = np.random.randn(
self.batch_size, self.batch_size, self.in_size)
flat_input_data = np.reshape(
input_data, [self.batch_size * self.batch_size, self.in_size])
input_data = input_data.astype(dtype.as_numpy_dtype)
inputs = tf.constant(input_data)
if use_bias:
initializers = {"w": _test_initializer(), "b": _test_initializer()}
else:
initializers = {"w": _test_initializer()}
lin = snt.Linear(output_size=self.out_size, use_bias=use_bias,
allow_many_batch_dims=True, initializers=initializers)
output = lin(inputs)
self.evaluate(tf.global_variables_initializer())
if use_bias:
output_data, w, b = self.evaluate([output, lin.w, lin.b])
else:
output_data, w = self.evaluate([output, lin.w])
if use_bias:
result = (np.dot(flat_input_data, w.astype(dtype.as_numpy_dtype)) +
b.astype(dtype.as_numpy_dtype))
else:
result = np.dot(flat_input_data, w.astype(dtype.as_numpy_dtype))
result = np.reshape(
result, [self.batch_size, self.batch_size, self.out_size])
self.assertAllClose(
output_data,
result,
atol=tolerance_map[dtype],
rtol=tolerance_map[dtype])
@parameterized.named_parameters(
("WithBias", True),
("WithoutBias", False))
def testSharing(self, use_bias):
np.random.seed(self.seed)
input_data = np.random.randn(self.batch_size,
self.in_size).astype(np.float32)
inp_1 = tf.constant(input_data)
inp_2 = tf.constant(input_data)
if use_bias:
initializers = {"w": _test_initializer(), "b": _test_initializer()}
else:
initializers = {"w": _test_initializer()}
lin = snt.Linear(output_size=self.out_size,
use_bias=use_bias,
initializers=initializers)
out_1 = lin(inp_1)
out_2 = lin(inp_2)
# With the same data into each input, outputs should be identical.
self.evaluate(tf.global_variables_initializer())
out_data_1, out_data_2 = self.evaluate([out_1, out_2])
self.assertAllEqual(out_data_1, out_data_2)
def testUniquifying(self):
# Create three modules in same scope with same name - make_template will
# uniquify them.
inp = tf.ones(dtype=tf.float32, shape=[self.batch_size, self.in_size])
mod_name = "another_linear_module"
lin1 = snt.Linear(name=mod_name, output_size=self.out_size)
lin2 = snt.Linear(name=mod_name, output_size=self.out_size)
lin3 = snt.Linear(name=mod_name, output_size=self.out_size)
# Connect all the modules to instantiate the variables.
lin1(inp)
lin2(inp)
lin3(inp)
# Ensure the module name property has been uniquified and is accessible.
self.assertEqual(lin1.scope_name, mod_name)
self.assertEqual(lin2.scope_name, mod_name + "_1")
self.assertEqual(lin3.scope_name, mod_name + "_2")
self.assertEqual(lin1.module_name, mod_name)
self.assertEqual(lin2.module_name, mod_name + "_1")
self.assertEqual(lin3.module_name, mod_name + "_2")
vars1 = lin1.get_variables()
vars2 = lin2.get_variables()
vars3 = lin3.get_variables()
# Ensure variable names have been made unique.
for v in vars1:
self.assertRegexpMatches(v.name, r"{}/[wb]:0".format(lin1.scope_name))
for v in vars2:
self.assertRegexpMatches(v.name, r"{}/[wb]:0".format(lin2.scope_name))
for v in vars3:
self.assertRegexpMatches(v.name, r"{}/[wb]:0".format(lin3.scope_name))
def testIsConnected(self):
bad_inputs = tf.ones(
dtype=tf.float32, shape=[self.batch_size, self.in_size, self.in_size])
lin = snt.Linear(output_size=self.out_size)
self.assertFalse(lin.is_connected)
# This will raise a snt.IncompatibleShapeError because bad_inputs has
# too many dimensions.
try:
lin(bad_inputs)
except snt.IncompatibleShapeError:
pass
self.assertFalse(lin.is_connected)
def testUnknownInputSize(self):
if tf.executing_eagerly():
self.skipTest("Inputs with unknown shape are not supported in eager.")
bad_inputs = tf.placeholder(tf.float32, shape=[self.batch_size, None])
lin = snt.Linear(output_size=self.out_size)
self.assertFalse(lin.is_connected)
err = "Input size must be specified at module build time"
with self.assertRaisesRegexp(snt.IncompatibleShapeError, err):
lin(bad_inputs)
self.assertFalse(lin.is_connected)
def testInvalidInitializationParameters(self):
with self.assertRaisesRegexp(KeyError, "Invalid initializer keys.*"):
snt.Linear(
output_size=self.out_size,
initializers={"not_w": tf.truncated_normal_initializer(stddev=1.0)})
err = "Initializer for 'w' is not a callable function"
with self.assertRaisesRegexp(TypeError, err):
snt.Linear(output_size=self.out_size,
initializers={"w": tf.zeros([1, 2, 3])})
def testInvalidPartitionerParameters(self):
with self.assertRaisesRegexp(KeyError, "Invalid partitioner keys.*"):
snt.Linear(
output_size=self.out_size,
partitioners={"not_w": tf.fixed_size_partitioner(num_shards=2)})
err = "Partitioner for 'w' is not a callable function"
with self.assertRaisesRegexp(TypeError, err):
snt.Linear(output_size=self.out_size,
partitioners={"w": tf.zeros([1, 2, 3])})
def testInvalidRegularizationParameters(self):
with self.assertRaisesRegexp(KeyError, "Invalid regularizer keys.*"):
snt.Linear(
output_size=self.out_size,
regularizers={"not_w": contrib_layers.l1_regularizer(scale=0.5)})
err = "Regularizer for 'w' is not a callable function"
with self.assertRaisesRegexp(TypeError, err):
snt.Linear(output_size=self.out_size,
regularizers={"w": tf.zeros([1, 2, 3])})
def testRegularizersInRegularizationLosses(self):
inputs = tf.zeros([1, 100])
w_regularizer = contrib_layers.l1_regularizer(scale=0.5)
b_regularizer = contrib_layers.l2_regularizer(scale=0.5)
lin = snt.Linear(output_size=100,
regularizers={"w": w_regularizer, "b": b_regularizer})
lin(inputs)
regularizers = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
self.assertLen(regularizers, 2)
if not tf.executing_eagerly():
self.assertRegexpMatches(regularizers[0].name, ".*l1_regularizer.*")
self.assertRegexpMatches(regularizers[1].name, ".*l2_regularizer.*")
def testClone(self):
inputs = tf.zeros([1, 100])
linear = snt.Linear(output_size=self.out_size)
clone1 = linear.clone()
clone2 = linear.clone(name="clone2")
linear(inputs)
clone1(inputs)
clone2(inputs)
all_vars = tf.trainable_variables()
linear_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
scope=linear.variable_scope.name + "/")
clone1_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
scope=clone1.variable_scope.name + "/")
clone2_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
scope=clone2.variable_scope.name + "/")
self.assertEqual(linear.output_size, clone1.output_size)
self.assertEqual(linear.module_name + "_clone", clone1.module_name)
self.assertEqual("clone2", clone2.module_name)
self.assertLen(all_vars, 3*len(linear_vars))
self.assertLen(linear_vars, len(clone1_vars))
self.assertLen(linear_vars, len(clone2_vars))
@parameterized.named_parameters(
("WithBias", True),
("WithoutBias", False))
def testTranspose(self, use_bias):
with tf.variable_scope("scope1"):
linear1 = snt.Linear(output_size=self.out_size,
use_bias=use_bias,
name="linear")
linear2 = snt.Linear(output_size=self.out_size,
use_bias=use_bias,
name="linear")
with tf.variable_scope("scope2"):
linear_transpose1 = linear1.transpose()
linear_transpose2 = linear1.transpose()
linear_transpose3 = linear2.transpose()
self.assertEqual("scope1/linear", linear1.scope_name)
self.assertEqual("linear", linear1.module_name)
self.assertEqual("scope1/linear_1", linear2.scope_name)
self.assertEqual("linear_1", linear2.module_name)
self.assertEqual("scope2/linear_transpose", linear_transpose1.scope_name)
self.assertEqual("linear_transpose", linear_transpose1.module_name)
self.assertEqual("scope2/linear_transpose_1", linear_transpose2.scope_name)
self.assertEqual("linear_transpose_1", linear_transpose2.module_name)
self.assertEqual("scope2/linear_1_transpose", linear_transpose3.scope_name)
self.assertEqual("linear_1_transpose", linear_transpose3.module_name)
input_to_linear = tf.ones(
dtype=tf.float32, shape=[self.batch_size, self.in_size])
err = ("Variables in {} not instantiated yet, __call__ the "
"module first.".format(linear1.scope_name))
with self.assertRaisesRegexp(snt.NotConnectedError, err):
linear_transpose1(input_to_linear)
linear_transpose1 = linear1.transpose()
self.assertEqual(linear1.has_bias, linear_transpose1.has_bias)
linear_out = linear1(input_to_linear)
linear_transposed_output = linear_transpose1(linear_out)
self.assertEqual(linear_transposed_output.get_shape(),
input_to_linear.get_shape())
def testGradientColocation(self):
"""Tests a particular device (e.g. gpu, cpu) placement.
This test ensures that the following device placement is possible:
* The Linear module is on the gpu,
* the optimizer is declared to be on the cpu,
* but when calling minimize on the optimizer, we pass True to
colocate_gradients_with_ops.
The test exists because while one may expect tf.matmul(X, w) + b to be
equivalent to tf.nn.xw_plus_b(X, w, b), with the latter this placement
results in an InvalidArgumentError.
Warning: if there is no gpu available to tensorflow this test will be
skipped with just a warning! This is because the test requires that
tensorflow has access to a gpu, but often this is not the case.
"""
if not any(x.device_type == "GPU" for x in device_lib.list_local_devices()):
tf.logging.warn("Skipping the gradient colocation test as there is no "
"gpu available to tensorflow.")
return
n_outputs = 5
n_inputs = 3
batch_size = 7
linear = snt.Linear(n_outputs)
with tf.device("/cpu:*"):
# Set up data.
inputs = tf.ones(dtype=tf.float32, shape=[batch_size, n_inputs])
labels = tf.to_int64(np.ones((batch_size)))
# Predictions.
with tf.device("/gpu:*"):
outputs = linear(inputs)
# Calculate the loss.
cross_entropy = contrib_nn.deprecated_flipped_sparse_softmax_cross_entropy_with_logits( # pylint: disable=line-too-long
outputs, labels, name="xentropy")
loss = tf.reduce_mean(cross_entropy, name="xentropy_mean")
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
optimizer.minimize(loss, colocate_gradients_with_ops=True)
init = tf.global_variables_initializer()
try:
if tf.executing_eagerly():
# Unify on evaluate once force_gpu supports eager.
self.evaluate(init)
else:
with self.test_session(force_gpu=True) as sess:
sess.run(init)
except tf.errors.InvalidArgumentError as e:
self.fail("Cannot start the session. Details:\n" + e.message)
def testPartitioners(self):
if tf.executing_eagerly():
self.skipTest("Partitioned variables are not supported in eager mode.")
inputs = tf.zeros([1, 100])
partitioners = {
"w": tf.variable_axis_size_partitioner(10000),
"b": tf.variable_axis_size_partitioner(100),
}
linear = snt.Linear(100, partitioners=partitioners)
linear(inputs)
self.assertEqual(type(linear.w), variables.PartitionedVariable)
self.assertEqual(type(linear.b), variables.PartitionedVariable)
@parameterized.named_parameters(
("float16", tf.float16),
("bfloat16", tf.bfloat16),
("float32", tf.float32),
("float64", tf.float64))
def testFloatDataTypeConsistent(self, dtype):
inputs = tf.ones(dtype=dtype, shape=[3, 7])
linear = snt.Linear(11)
outputs = linear(inputs)
self.assertEqual(linear.w.dtype.base_dtype, dtype)
self.assertEqual(linear.b.dtype.base_dtype, dtype)
self.assertEqual(outputs.dtype.base_dtype, dtype)
def testIntegerDataTypeFailsWithDefaultInitializers(self):
dtype = tf.int32
inputs = tf.ones(dtype=dtype, shape=[3, 7])
linear = snt.Linear(11)
with self.assertRaisesRegexp(ValueError, "Expected floating point type"):
unused_outputs = linear(inputs)
def testIntegerDataTypeConsistentWithCustomWeightInitializer(self):
dtype = tf.int32
inputs = tf.ones(dtype=dtype, shape=[3, 7])
linear = snt.Linear(
11, initializers={"w": tf.zeros_initializer(dtype=dtype)})
outputs = linear(inputs)
self.assertEqual(linear.w.dtype.base_dtype, dtype)
self.assertEqual(linear.b.dtype.base_dtype, dtype)
self.assertEqual(outputs.dtype.base_dtype, dtype)
@contrib_eager.run_all_tests_in_graph_and_eager_modes
class AddBiasTest(tf.test.TestCase, parameterized.TestCase):
BATCH_SIZE = 11
IN_SHAPE = (13, 7, 5)
OUT_SHAPE = IN_SHAPE
BIAS_DIMS_PARAMETERS = [
("DefaultBiasDims", None, IN_SHAPE),
("AllBiasDims", [1, 2, 3], IN_SHAPE),
("ScalarBiasDims", [], ()),
("LastBiasDims", [-1], (IN_SHAPE[2],)),
("ExplicitLastBiasDims", [3], (IN_SHAPE[2],)),
("FirstBiasDims", [1], (IN_SHAPE[0], 1, 1)),
("MiddleBiasDims", [2], (IN_SHAPE[1], 1)),
]
def setUp(self):
super(AddBiasTest, self).setUp()
self.mb_in_shape = (self.BATCH_SIZE,) + self.IN_SHAPE
self.mb_out_shape = (self.BATCH_SIZE,) + self.OUT_SHAPE
self.seed = 42
@parameterized.named_parameters(*BIAS_DIMS_PARAMETERS)
def testShape(self, bias_dims, unused_bias_shape):
inputs = tf.ones(dtype=tf.float32, shape=self.mb_in_shape)
add = snt.AddBias(bias_dims=bias_dims)
output = add(inputs)
self.assertTrue(
output.get_shape().is_compatible_with(self.mb_out_shape))
@parameterized.named_parameters(*BIAS_DIMS_PARAMETERS)
def testName(self, bias_dims, unused_bias_shape):
mod_name = "unique_name"
with tf.variable_scope("scope"):
add = snt.AddBias(name=mod_name, bias_dims=bias_dims)
self.assertEqual(add.scope_name, "scope/" + mod_name)
self.assertEqual(add.module_name, mod_name)
@parameterized.named_parameters(*BIAS_DIMS_PARAMETERS)
def testVariables(self, bias_dims, bias_shape):
inputs = tf.ones(dtype=tf.float32, shape=self.mb_in_shape)
add = snt.AddBias(bias_dims=bias_dims)
err = ("Variables in {} not instantiated yet, __call__ "
"the module first.".format(add.scope_name))
with self.assertRaisesRegexp(snt.NotConnectedError, err):
add.get_variables()
err = ("Variables in {} not instantiated yet, __call__ "
"the module first.".format(add.scope_name))
with self.assertRaisesRegexp(snt.NotConnectedError, err):
_ = add.b
add(inputs) # Connect the module, but ignore the return value.
variables_ = add.get_variables()
self.assertLen(variables_, 1, "Add should have 1 variable.")
for v in variables_:
if not tf.executing_eagerly():
self.assertRegexpMatches(v.name, r"{}/[b]:0".format(add.scope_name))
shape = np.ndarray(bias_shape)
self.assertShapeEqual(shape, tf.convert_to_tensor(v))
@parameterized.named_parameters(*BIAS_DIMS_PARAMETERS)
def testComputation(self, bias_dims, bias_shape):
np.random.seed(self.seed)
types = (tf.float16, tf.float32, tf.float64)
tol = (1e-2, 1e-6, 1e-9)
tolerance_map = dict(zip(types, tol))
b_regularizer = contrib_layers.l2_regularizer(scale=0.5)
for dtype in types:
# With random data, check the TF calculation matches the Numpy version.
input_data = np.random.randn(*self.mb_in_shape).astype(
dtype.as_numpy_dtype)
inputs = tf.constant(input_data)
add = snt.AddBias(bias_dims=bias_dims,
initializers={"b": _test_initializer()},
regularizers={"b": b_regularizer})
output = add(inputs)
output_subtract = add(inputs, multiplier=-1)
self.evaluate(tf.global_variables_initializer())
output_data, output_subtract_data, b = self.evaluate(
[output, output_subtract, add.b])
regularizers = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
if not tf.executing_eagerly():
self.assertRegexpMatches(regularizers[0].name, ".*l2_regularizer.*")
if not bias_shape: # Scalar bias.
b_array = np.array([b]).astype(dtype.as_numpy_dtype(b))
else:
b_array = b.astype(dtype.as_numpy_dtype)
result = input_data + b_array
result_subtract = input_data - b_array
self.assertAllClose(
result,
output_data,
atol=tolerance_map[dtype],
rtol=tolerance_map[dtype])
self.assertAllClose(
result_subtract,
output_subtract_data,
atol=tolerance_map[dtype],
rtol=tolerance_map[dtype])
@parameterized.named_parameters(*BIAS_DIMS_PARAMETERS)
def testSharing(self, bias_dims, unused_bias_shape):
np.random.seed(self.seed)
input_data = np.random.randn(*self.mb_in_shape).astype(np.float32)
inp_1 = tf.constant(input_data)
inp_2 = tf.constant(input_data)
add = snt.AddBias(bias_dims=bias_dims,
initializers={"b": _test_initializer()})
out_1 = add(inp_1)
out_2 = add(inp_2)
# Put the same data into each input, outputs should be identical.
self.evaluate(tf.global_variables_initializer())
out_data_1, out_data_2 = self.evaluate([out_1, out_2])
self.assertAllEqual(out_data_1, out_data_2)
@parameterized.named_parameters(*BIAS_DIMS_PARAMETERS)
def testUniquifying(self, bias_dims, unused_bias_shape):
# Create three modules in same scope with same name - make_template will
# uniquify them.
inp = tf.ones(dtype=tf.float32, shape=self.mb_in_shape)
mod_name = "another_linear_module"
add1 = snt.AddBias(bias_dims=bias_dims, name=mod_name)
add2 = snt.AddBias(bias_dims=bias_dims, name=mod_name)
add3 = snt.AddBias(bias_dims=bias_dims, name=mod_name)
# Connect all the modules to instantiate the variables.
add1(inp)
add2(inp)
add3(inp)
# Ensure the module name property has been uniquified and is accessible.
self.assertEqual(add1.module_name, mod_name)
self.assertEqual(add2.module_name, mod_name + "_1")
self.assertEqual(add3.module_name, mod_name + "_2")
vars1 = add1.get_variables()
vars2 = add2.get_variables()
vars3 = add3.get_variables()
# Ensure variable names have been made unique.
for v in vars1:
self.assertRegexpMatches(v.name, r"{}/[b]:0".format(add1.scope_name))
for v in vars2:
self.assertRegexpMatches(v.name, r"{}/[b]:0".format(add2.scope_name))
for v in vars3:
self.assertRegexpMatches(v.name, r"{}/[b]:0".format(add3.scope_name))
@parameterized.named_parameters(*BIAS_DIMS_PARAMETERS)
def testInvalidInitializationParameters(self, bias_dims, unused_bias_shape):
err = "Invalid initializer keys.*"
with self.assertRaisesRegexp(KeyError, err):
snt.AddBias(
bias_dims=bias_dims,
initializers={"not_b": tf.truncated_normal_initializer(stddev=1.0)})
err = "Initializer for 'b' is not a callable function"
with self.assertRaisesRegexp(TypeError, err):
snt.AddBias(
bias_dims=bias_dims,
initializers={"b": tf.zeros([1, 2, 3])})
@parameterized.named_parameters(*BIAS_DIMS_PARAMETERS)
def testInvalidPartitionerParameters(self, bias_dims, unused_bias_shape):
with self.assertRaisesRegexp(KeyError, "Invalid partitioner keys.*"):
snt.AddBias(
bias_dims=bias_dims,
partitioners={"not_b": tf.fixed_size_partitioner(num_shards=2)})
err = "Partitioner for 'b' is not a callable function"
with self.assertRaisesRegexp(TypeError, err):
snt.AddBias(
bias_dims=bias_dims,
partitioners={"b": tf.zeros([1, 2, 3])})
@parameterized.named_parameters(*BIAS_DIMS_PARAMETERS)
def testInvalidRegularizationParameters(self, bias_dims, unused_bias_shape):
with self.assertRaisesRegexp(KeyError, "Invalid regularizer keys.*"):
snt.AddBias(
bias_dims=bias_dims,
regularizers={"not_b": contrib_layers.l1_regularizer(scale=0.5)})
err = "Regularizer for 'b' is not a callable function"
with self.assertRaisesRegexp(TypeError, err):
snt.AddBias(bias_dims=bias_dims,
regularizers={"b": tf.zeros([1, 2, 3])})
@parameterized.named_parameters(*BIAS_DIMS_PARAMETERS)
def testTranspose(self, bias_dims, unused_bias_shape):
add = snt.AddBias(bias_dims=bias_dims)
input_to_add = tf.ones(dtype=tf.float32, shape=self.mb_in_shape)
# Check error occurs when we build the transposed module before the
# original.
add_transpose = add.transpose()
err = "Build the original untransposed module before building this one."
with self.assertRaisesRegexp(snt.ParentNotBuiltError, err):
add_transpose(input_to_add)
# Check that building the original before the transposed works as intended.
add_transpose = add.transpose()
add_out = add(input_to_add)
add_transpose_out = add_transpose(add_out)
self.assertEqual(add_transpose_out.get_shape(),
input_to_add.get_shape())
self.assertEqual(add_transpose.b.get_shape(),
add.b.get_shape())
def testPartitioners(self):
if tf.executing_eagerly():
self.skipTest("Partitioned variables are not supported in eager mode.")
inputs = tf.zeros([1, 100])
partitioners = {
"b": tf.variable_axis_size_partitioner(10000),
}
bias = snt.AddBias(partitioners=partitioners)
bias(inputs)
self.assertEqual(type(bias.b), variables.PartitionedVariable)
@contrib_eager.run_all_tests_in_graph_and_eager_modes
class TrainableVariableTest(tf.test.TestCase, parameterized.TestCase):
def testName(self):
mod_name = "unique_name"
with tf.variable_scope("scope"):
mod = snt.TrainableVariable(name=mod_name, shape=[1])
self.assertEqual(mod.scope_name, "scope/" + mod_name)
self.assertEqual(mod.module_name, mod_name)
def testInitialization(self):
# Checks that the module initialization correctly sets the shape of the
# internal variable w.
shape = [1, 2, 3]
var = snt.TrainableVariable(
shape=shape,
dtype=tf.float32,
initializers={"w": tf.zeros_initializer()})
# We need to connect the module to the graph in order to inspect its
# variables
var()
self.assertEqual(var.w.get_shape(), shape)
def testVariableInitialization(self):
# Check that a simple operation involving the TrainableVariable
# matches the result of the corresponding operation in numpy
np.random.seed(100)
types = (tf.float16, tf.float32, tf.float64)
tol = (1e-2, 1e-6, 1e-9)
tolerance_map = dict(zip(types, tol))
lhs_shape = [3, 4]
rhs_shape = [4, 6]
for dtype in types:
lhs_matrix = np.random.randn(*lhs_shape).astype(dtype.as_numpy_dtype)
x = tf.constant(lhs_matrix)
var = snt.TrainableVariable(shape=rhs_shape,
dtype=dtype,
initializers={"w": _test_initializer()})
y = tf.matmul(x, var())
self.evaluate(tf.global_variables_initializer())
product, w = self.evaluate([y, var.w])
self.assertAllClose(product,
np.dot(
lhs_matrix.astype(dtype.as_numpy_dtype),
w.astype(dtype.as_numpy_dtype)),
atol=tolerance_map[dtype],
rtol=tolerance_map[dtype])
def testInvalidInitializationParameters(self):
variable_name = "trainable_variable"
with self.assertRaisesRegexp(KeyError, "Invalid initializer keys.*"):
snt.TrainableVariable(
name=variable_name,
shape=[1],
initializers={"w": tf.truncated_normal_initializer(stddev=1.0),
"extra": tf.truncated_normal_initializer(stddev=1.0)})
with self.assertRaisesRegexp(KeyError, "Invalid initializer keys.*"):
snt.TrainableVariable(
name=variable_name,
shape=[1],
initializers={"not_w": tf.truncated_normal_initializer(stddev=1.0)})
err = "Initializer for 'w' is not a callable function"
with self.assertRaisesRegexp(TypeError, err):
snt.TrainableVariable(name=variable_name,
shape=[1],
initializers={"w": tf.zeros([1, 2, 3])})
def testCallBeforeInstantiation(self):
variable_name = "trainable_variable"
var = snt.TrainableVariable(name=variable_name, shape=[1])
err = r"Variables in {} not instantiated yet.*".format(variable_name)
with self.assertRaisesRegexp(snt.NotConnectedError, err):
var.get_variables()
err = r"Variables in {} not instantiated yet.*".format(variable_name)
with self.assertRaisesRegexp(snt.NotConnectedError, err):
_ = var.w
def testInvalidPartitionerParameters(self):
with self.assertRaisesRegexp(KeyError, "Invalid partitioner keys.*"):
snt.TrainableVariable(
shape=[1],
partitioners={"not_w": tf.fixed_size_partitioner(num_shards=2)})
err = "Partitioner for 'w' is not a callable function"
with self.assertRaisesRegexp(TypeError, err):
snt.TrainableVariable(
shape=[1],
partitioners={"w": tf.zeros([1, 2, 3])})
def testInvalidRegularizationParameters(self):
variable_name = "trainable_variable"
with self.assertRaisesRegexp(KeyError, "Invalid regularizer keys.*"):
snt.TrainableVariable(
name=variable_name,
shape=[1],
regularizers={"not_w": contrib_layers.l1_regularizer(scale=0.5)})
err = "Regularizer for 'w' is not a callable function"
with self.assertRaisesRegexp(TypeError, err):
snt.TrainableVariable(name=variable_name, shape=[1],
regularizers={"w": tf.zeros([1, 2, 3])})
def testRegularizersInRegularizationLosses(self):
variable_name = "trainable_variable"
w_regularizer = contrib_layers.l1_regularizer(scale=0.5)
var = snt.TrainableVariable(
name=variable_name, shape=[1], regularizers={"w": w_regularizer})
var()
regularizers = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
if tf.executing_eagerly():
# Tensor name is not supported in eager mode.
self.assertLen(regularizers, 1)
else:
self.assertRegexpMatches(regularizers[0].name, ".*l1_regularizer.*")
def testPartitioners(self):
if tf.executing_eagerly():
self.skipTest("Partitioned variables are not supported in eager mode.")
partitioners = {
"w": tf.variable_axis_size_partitioner(10000),
}
var = snt.TrainableVariable(
shape=[10, 13],
partitioners=partitioners)
var()
self.assertEqual(type(var.w), variables.PartitionedVariable)
@parameterized.parameters(
(True,),
(False,))
def testCustomGetter(self, with_stop_gradient):
if tf.executing_eagerly():
self.skipTest("tf.gradients is not supported when executing eagerly.")
def maybe_stop_gradients_custom_getter(getter, *args, **kwargs):
actual_variable = getter(*args, **kwargs)
if with_stop_gradient:
return tf.stop_gradient(actual_variable)
else:
return actual_variable
var = snt.TrainableVariable(
shape=(), custom_getter=maybe_stop_gradients_custom_getter,
name="non_trainable_variable")
output = (var() * tf.constant(1.0)) ** 2
# We need to differentiate with respect to the actual variable object,
# rather than var.w which is the output of the custom_getter (and possibly
# the stop_gradient).
actual_var = var.get_variables()[0]
grads = tf.gradients(output, actual_var)
# Gradient may or mnay not exist depending on the custom getter.
if with_stop_gradient:
self.assertIsNone(grads[0])
else:
self.assertIsNotNone(grads[0])
@contrib_eager.run_all_tests_in_graph_and_eager_modes
class BatchReshapeTest(tf.test.TestCase, parameterized.TestCase):
def testName(self):
mod_name = "unique_name"
with tf.variable_scope("scope"):
mod = snt.BatchReshape(name=mod_name, shape=[-1])
self.assertEqual(mod.scope_name, "scope/" + mod_name)
self.assertEqual(mod.module_name, mod_name)
def testReshape(self):
batch_size = 10
in_shape = [2, 3, 4, 5]
out_shape = [2 * 3, 5, 4]
assert np.prod(in_shape) == np.prod(out_shape)
inputs = tf.ones(dtype=tf.float32, shape=[batch_size] + in_shape)
mod = snt.BatchReshape(shape=out_shape)
output = mod(inputs)
self.assertEqual(output.get_shape(), [batch_size] + out_shape)
def testInvalidReshapeParameters(self):
batch_size = 10
in_shape = [2, 3, 4, 5]
inputs = tf.ones(dtype=tf.float32, shape=[batch_size] + in_shape)
# Shape array has invalid format
err = "Wildcard -1 can appear only once in desired output shape. "
with self.assertRaisesRegexp(ValueError, err):
output_invalid_shape_format = [-1, -1]
snt.BatchReshape(shape=output_invalid_shape_format)(inputs)
err = ("Desired shape can only contain positive integral numbers "
"and the wildcard -1. ")
with self.assertRaisesRegexp(ValueError, err):
output_invalid_shape_format = [2, 3, -2]
snt.BatchReshape(shape=output_invalid_shape_format)(inputs)
# Shape array contains invalid entries
err = ("Desired shape can only contain positive integral numbers "
"and the wildcard -1. ")
with self.assertRaisesRegexp(ValueError, err):
invalid_shape_type = [7, "string"]
snt.BatchReshape(shape=invalid_shape_type)(inputs)
# Incompatible input and output shapes
err = "Output shape is incompatible with input shape"
with self.assertRaisesRegexp(ValueError, err):
out_shape = [2 * 2, 5, 4]
snt.BatchReshape(shape=out_shape)(inputs)
# Checks the 2D case.
with self.assertRaisesRegexp(ValueError, err):
snt.BatchReshape(shape=[batch_size, 1])(tf.zeros([batch_size, 2]))
def testCallable(self):
inputs = tf.ones(dtype=tf.float32, shape=[2, 3])
out_shape_lambda = lambda: [3]
mod = snt.BatchReshape(shape=out_shape_lambda)
output = mod(inputs)
self.assertEqual(output.get_shape(), [2, 3])
def testInferShape(self):
batch_size = 10
in_shape = [2, 3, 4, 5]
out_size = [2, -1, 5]
correct_out_size = [2, 3 * 4, 5]
inputs = tf.ones(dtype=tf.float32, shape=[batch_size] + in_shape)
mod = snt.BatchReshape(shape=out_size)
output = mod(inputs)
self.assertEqual(output.get_shape(), [batch_size] + correct_out_size)
def testAddDimensions(self):
batch_size = 10
in_shape = []
out_size = [1, 1]
correct_out_size = [1, 1]
inputs = tf.ones(dtype=tf.float32, shape=[batch_size] + in_shape)
mod = snt.BatchReshape(shape=out_size)
output = mod(inputs)
self.assertEqual(output.get_shape(), [batch_size] + correct_out_size)
# Transposition should also work
mod_t = mod.transpose()
t_output = mod_t(output)
self.assertEqual(t_output.get_shape(), [batch_size] + in_shape)
def testNoReshapeNeeded(self):
batch_size = 10
if not tf.executing_eagerly():
in_shape = [None]
out_size = [-1]
inputs = tf.placeholder(tf.float32, shape=[batch_size] + in_shape)
mod = snt.BatchReshape(shape=out_size)
output = mod(inputs)
self.assertIs(output, inputs)
in_shape = [10]
out_size = [10]
inputs = tf.ones(dtype=tf.float32, shape=[batch_size] + in_shape)
mod = snt.BatchReshape(shape=out_size)
output = mod(inputs)
self.assertIs(output, inputs)
@parameterized.named_parameters(
("BadUnknown1", (None,), (5,)),
("BadUnknown2", (None, None), (5,)),
("BadUnknown3", (None, None), (5, 5)),
("BadUnknown4", (5, None), (5, 5)),
("BadUnknown5", (None, 5), (5, 5)),
)
def testBadUnknownNonPreservedDimensions(self, input_shape, output_shape):
if tf.executing_eagerly():
self.skipTest("Partial shapes are not supported in eager mode.")
preserved_shape = (10,)
shape = preserved_shape + input_shape
preserve_dims = len(preserved_shape)
inputs = tf.placeholder(tf.float32, shape)
mod = snt.BatchReshape(shape=output_shape,
preserve_dims=preserve_dims)
err = "Unknown non-preserved dimensions are not allowed"
with self.assertRaisesRegexp(ValueError, err):
_ = mod(inputs)
def testFlatten(self):
batch_size = 10
in_shape = [2, 3, 4, 5]
out_size = [-1]
inputs = tf.ones(dtype=tf.float32, shape=[batch_size] + in_shape)
mod = snt.BatchReshape(shape=out_size)
output = mod(inputs)
flattened_shape = np.prod(in_shape, dtype=int)
self.assertEqual(output.get_shape(), [batch_size, flattened_shape])
def testUnknown(self):
if tf.executing_eagerly():
self.skipTest("Partial shapes are not supported in eager mode.")
batch_size = None
in_shape = [2, 3, 4, 5]
out_size = [-1]
inputs = tf.placeholder(tf.float32, shape=[batch_size] + in_shape)
mod = snt.BatchReshape(shape=out_size)
output = mod(inputs)
flattened_shape = np.prod(in_shape, dtype=int)
self.assertEqual(output.get_shape().as_list(),
[batch_size, flattened_shape])
def testTranspose(self):
batch_size = 10
in_shape = [2, 3, 4, 5]
out_size = [2, -1, 5]
correct_out_size = [2, 3 * 4, 5]
inputs = tf.random_uniform(shape=[batch_size] + in_shape)
mod = snt.BatchReshape(shape=out_size)
mod_t = mod.transpose()
mod_t_t = mod_t.transpose()
intermediate_output = mod(inputs)
self.assertEqual(intermediate_output.get_shape(),
[batch_size] + correct_out_size)
output = mod_t(intermediate_output)
self.assertEqual(output.get_shape(), [batch_size] + in_shape)
further_output = mod_t_t(output)
self.assertEqual(further_output.get_shape(),
[batch_size] + correct_out_size)
input_data, out = self.evaluate([inputs, output])
self.assertAllClose(out, input_data)
def testInvalidPreserveDimsError(self):
with self.assertRaisesRegexp(ValueError, "preserve_dims"):
snt.BatchReshape((-1,), preserve_dims=0)
def testBuildDimError(self):
mod = snt.BatchReshape((-1,), preserve_dims=2)
input_tensor = tf.ones(dtype=tf.float32, shape=[50])
with self.assertRaisesRegexp(ValueError, "preserve_dims"):
mod(input_tensor)
def testBuildUnknown(self):
if tf.executing_eagerly():
self.skipTest(
"Inputs with unknown shapes are not supported in eager mode.")
mod = snt.BatchReshape(shape=(2, 9), preserve_dims=2)
shape = [50, None, 6, 3]
inputs = tf.placeholder(dtype=tf.float32, shape=shape)
output = mod(inputs)
self.assertEqual(output.get_shape().as_list(), [50, None, 2, 9])
@parameterized.named_parameters(
("Preserve1", (1,)),
("Preserve24", (2, 4)),
("Preserve?", (None,)),
("Preserve?5", (None, 5)),
("Preserve5?", (5, None)),
("Preserve??", (None, None)))
def testPreserve(self, preserve):
if None in preserve and tf.executing_eagerly():
self.skipTest(
"Inputs with unknown shapes are not supported in eager mode.")
shape = list(preserve) + [13, 84, 3, 2]
output_shape = [13, 21, 3, 8]
preserve_dims = len(preserve)
if None in shape:
inputs = tf.placeholder(dtype=tf.float32, shape=shape)
else:
inputs = tf.ones(dtype=tf.float32, shape=shape)
mod = snt.BatchReshape(shape=output_shape,
preserve_dims=preserve_dims)
output = mod(inputs)
self.assertEqual(output.get_shape().as_list(),
list(preserve) + output_shape)
@parameterized.named_parameters(
("Session1", (1,), (2, 3), (-1,)),
("Session2", (1, 7), (2, 3), (-1,)),
("Session3", (None,), (2, 3), (-1,)),
("Session4", (None, 5, None), (2, 3, 4), (4, 6)),
("Session5", (None, None, None), (2, 3, 4), (-1,)),
("Session6", (5, None, None), (1, 3, 1), (-1,)),
("Session7", (1,), (4, 3), (2, 2, 1, 3)),
("Session8", (None,), (4, 3), (2, 2, 1, 3)),
("Session9", (1, None, 5, None), (4, 3), (2, 2, -1, 3)))
def testRun(self, preserve, trailing_in, trailing_out):
if tf.executing_eagerly():
self.skipTest("Inputs with unknown shapes are not supported in eager.")
rng = np.random.RandomState(0)
input_shape = preserve + trailing_in
output_shape = preserve + np.zeros(trailing_in).reshape(trailing_out).shape
inputs = tf.placeholder(tf.float32, input_shape)
mod = snt.BatchReshape(shape=trailing_out,
preserve_dims=len(preserve))
output = mod(inputs)
self.assertEqual(output.get_shape().as_list(), list(output_shape))
actual_input_shape = [13 if i is None else i for i in input_shape]
expected_output_shape = [13 if i is None else i for i in output_shape]
actual_input = rng.rand(*actual_input_shape).astype(np.float32)
expected_output = actual_input.reshape(expected_output_shape)
with self.test_session() as sess:
actual_output = sess.run(output, feed_dict={inputs: actual_input})
self.assertAllEqual(actual_output, expected_output)
@contrib_eager.run_all_tests_in_graph_and_eager_modes
class MergeLeadingDimsTest(tf.test.TestCase, parameterized.TestCase):
"""Tests the merge_leading_dims function."""
@parameterized.named_parameters(
("Float", 1.0),
("Integer", 1),
("Boolean", False))
def testScalarInput(self, scalar):
"""Tests if a statically shaped scalar stays a scalar."""
# Act.
result = basic.merge_leading_dims(scalar)
# Assert.
# This should be the same object we passed in.
self.assertIs(result, scalar)
def testExceptionUnknownRank(self):
"""Checks if an exception is thrown if the rank of the tensor is unknown."""
if tf.executing_eagerly():
self.skipTest("Unknown input shapes are not supported in eager mode.")
# Arrange.
tensor_scalar = tf.placeholder(dtype=tf.float32)
# Act / assert.
err = "unknown rank"
with self.assertRaisesRegexp(ValueError, err):
basic.merge_leading_dims(tensor_scalar)
@parameterized.parameters(
([3, 5, 7, 11, None, 13], [3 * 5 * 7, 11, None, 13]),
([3, None, 7, 11, None, 13], [None, 11, None, 13]),
)
def testPartialShape(self, input_shape, expected_output_shape):
"""Tests that resulting partial shape is best guess.."""
if tf.executing_eagerly():
self.skipTest("Partial input shapes are not supported in eager mode.")
input_ = tf.placeholder(tf.float32, shape=input_shape)
output = basic.merge_leading_dims(input_, 3)
self.assertEqual(output.shape.as_list(), expected_output_shape)
@contrib_eager.run_all_tests_in_graph_and_eager_modes
class BatchFlattenTest(tf.test.TestCase, parameterized.TestCase):
def testName(self):
mod_name = "unique_name"
with tf.variable_scope("scope"):
mod = snt.BatchFlatten(name=mod_name)
self.assertEqual(mod.scope_name, "scope/" + mod_name)
self.assertEqual(mod.module_name, mod_name)
def testFlatten(self):
batch_size = 10
in_shape = [2, 3, 4, 5]
inputs = tf.ones(dtype=tf.float32, shape=[batch_size] + in_shape)
mod = snt.BatchFlatten()
output = mod(inputs)
flattened_size = np.prod(in_shape, dtype=int)
self.assertEqual(output.get_shape(), [batch_size, flattened_size])
@parameterized.parameters(1, 2, 3, 4)
def testPreserveDimsOk(self, preserve_dims):
in_shape = [10, 2, 3, 4]
inputs = tf.ones(dtype=tf.float32, shape=in_shape)
mod = snt.BatchFlatten(preserve_dims=preserve_dims)
output = mod(inputs)
flattened_shape = (in_shape[:preserve_dims] +
[np.prod(in_shape[preserve_dims:], dtype=int)])
self.assertEqual(output.get_shape(), flattened_shape)
@parameterized.parameters(5, 6, 7, 10)
def testPreserveDimsError(self, preserve_dims):
in_shape = [10, 2, 3, 4]
inputs = tf.ones(dtype=tf.float32, shape=in_shape)
err = "Input tensor has 4 dimensions"
mod = snt.BatchFlatten(preserve_dims=preserve_dims)
with self.assertRaisesRegexp(ValueError, err):
_ = mod(inputs)
def testFlattenWithZeroDim(self):
inputs = tf.ones(dtype=tf.float32, shape=[1, 0])
output = snt.BatchFlatten()(inputs)
self.assertEqual(output.get_shape(), [1, 0])
@contrib_eager.run_all_tests_in_graph_and_eager_modes
class FlattenTrailingDimensionsTest(tf.test.TestCase, parameterized.TestCase):
def testName(self):
mod_name = "unique_name"
with tf.variable_scope("scope"):
mod = snt.FlattenTrailingDimensions(dim_from=2, name=mod_name)
self.assertEqual(mod.scope_name, "scope/" + mod_name)
self.assertEqual(mod.module_name, mod_name)
def testInvalidFlattenFromError(self):
with self.assertRaisesRegexp(ValueError, "dim_from"):
snt.FlattenTrailingDimensions(dim_from=0)
def testBuildDimError(self):
mod = snt.FlattenTrailingDimensions(dim_from=2)
input_tensor = tf.ones(dtype=tf.float32, shape=[50])
with self.assertRaisesRegexp(ValueError, "dim_from"):
mod(input_tensor)
def testBuildUnknown(self):
if tf.executing_eagerly():
self.skipTest("Partial shapes are not supported in eager mode.")
mod = snt.FlattenTrailingDimensions(dim_from=2)
shape = [50, None, 5]
inputs = tf.placeholder(tf.float32, shape)
output = mod(inputs)
self.assertEqual(output.get_shape().as_list(), shape)
@parameterized.named_parameters(
("BatchSize1", 1),
("BatchSize5", 5),
("BatchSize?", None))
def testFlatten(self, batch_size):
if tf.executing_eagerly() and batch_size is None:
self.skipTest("Unknown batch size not supported in eager mode.")
shape = [batch_size, 5, 84, 84, 3, 2]
if batch_size is None:
inputs = tf.placeholder(dtype=tf.float32, shape=shape)
else:
inputs = tf.ones(dtype=tf.float32, shape=shape)
for dim_from in xrange(1, len(shape)):
mod = snt.FlattenTrailingDimensions(dim_from)
output = mod(inputs)
trailing = np.prod(shape[dim_from:], dtype=int)
self.assertEqual(output.get_shape().as_list(),
shape[:dim_from] + [trailing])
@parameterized.named_parameters(
("BatchSize1", 1),
("BatchSize5", 5),
("BatchSize?", None))
def testTranspose(self, batch_size):
if tf.executing_eagerly() and batch_size is None:
self.skipTest("Unknown batch size not supported in eager mode.")
mod = snt.FlattenTrailingDimensions(dim_from=4)
mod_trans = mod.transpose()
initial_shape = [batch_size, 5, 84, 84, 3, 2]
if batch_size is None:
original = tf.placeholder(dtype=tf.float32, shape=initial_shape)
else:
original = tf.ones(dtype=tf.float32, shape=initial_shape)
flat = mod(original)
self.assertEqual(flat.get_shape().as_list(), initial_shape[:4] + [6])
final = mod_trans(flat)
self.assertEqual(final.get_shape().as_list(), initial_shape)
@contrib_eager.run_all_tests_in_graph_and_eager_modes
class BatchApplyTest(tf.test.TestCase, parameterized.TestCase):
def testName(self):
mod_name = "unique_name"
with tf.variable_scope("scope"):
mod = snt.BatchApply(name=mod_name, module_or_op=snt.Linear(2))
self.assertEqual(mod.scope_name, "scope/" + mod_name)
self.assertEqual(mod.module_name, mod_name)
@parameterized.parameters(False, True)
def testInferShape(self, test_with_none):
if tf.executing_eagerly() and test_with_none:
self.skipTest("Inferring input shapes not supported in eager mode.")
if test_with_none:
in_shape = [2, None, 4]
inputs = tf.placeholder(dtype=tf.float32, shape=in_shape)
else:
in_shape = [2, 3, 4]
inputs = tf.ones(dtype=tf.float32, shape=in_shape)
hidden_size = 5
out_shape1 = in_shape[:2] + [hidden_size]
out_shape2 = in_shape
linear = snt.Linear(hidden_size)
merge_linear = snt.BatchApply(module_or_op=linear)
outputs1 = merge_linear(inputs)
self.assertEqual(outputs1.get_shape().as_list(), out_shape1)
merge_tanh = snt.BatchApply(module_or_op=tf.tanh)
outputs2 = merge_tanh(inputs)
self.assertEqual(outputs2.get_shape().as_list(), out_shape2)
def testComputation(self):
np.random.seed(100)
in_shape = [2, 3, 4]
in_shape_flat = [6, 4]
hidden_size = 5
out_shape1 = in_shape[:2] + [hidden_size]
out_shape2 = in_shape
inputs = tf.random_uniform(shape=in_shape)
inputs_flat = tf.reshape(inputs, shape=in_shape_flat)
linear = snt.Linear(hidden_size,
initializers={"w": _test_initializer(),
"b": _test_initializer()})
merge_linear = snt.BatchApply(module_or_op=linear)
outputs1 = merge_linear(inputs)
outputs1_flat = linear(inputs_flat)
merge_tanh = snt.BatchApply(module_or_op=tf.tanh)
outputs2 = merge_tanh(inputs)
outputs2_flat = merge_tanh(inputs_flat)
self.evaluate(tf.global_variables_initializer())
out1, out_flat1 = self.evaluate([outputs1, outputs1_flat])
out2, out_flat2 = self.evaluate([outputs2, outputs2_flat])
self.assertAllClose(out1, out_flat1.reshape(out_shape1))
self.assertAllClose(out2, out_flat2.reshape(out_shape2))
def testVariables(self):
hidden_size = 5
in_shape = [2, 3, 4]
inputs = tf.ones(dtype=tf.float32, shape=in_shape)
linear = snt.Linear(hidden_size)
merge_linear = snt.BatchApply(module_or_op=linear)
merge_tanh = snt.BatchApply(module_or_op=tf.tanh)
merge_linear(inputs)
merge_tanh(inputs)
# BatchApply doesn't contain any variables inside scope.
self.assertEqual(merge_linear.get_variables(), ())
self.assertEqual(merge_tanh.get_variables(), ())
def testOverTwoDims(self):
hidden_size = 42
in_shape = (3, 4, 5, 6)
expected_out_shape = in_shape[:-1] + (hidden_size,)
inputs = tf.ones(dtype=tf.float32, shape=in_shape)
linear = snt.Linear(output_size=hidden_size)
merge_linear = snt.BatchApply(module_or_op=linear, n_dims=3)
output = merge_linear(inputs)
self.evaluate(tf.global_variables_initializer())
out_np = self.evaluate(output)
self.assertEqual(expected_out_shape, out_np.shape)
def testDifferentOutputStructure(self):
in1 = np.random.randn(3, 5, 7)
in2 = np.random.randn(3, 5, 11, 8)
inputs = [tf.constant(in1), tf.constant(in2)]
def build(inputs):
a, b = inputs
a.get_shape().assert_is_compatible_with([3 * 5, 7])
b.get_shape().assert_is_compatible_with([3 * 5, 11, 8])
return b
op = snt.Module(build)
module = snt.BatchApply(op)
output = module(inputs)
out_np = self.evaluate(output)
self.assertAllEqual(in2, out_np)
def testNested(self):
# Make a complicated nested input, where we want to flatten the first
# dimensions of each Tensor before applying
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
ab = ab_tuple(
a=tf.ones(dtype=tf.float32, shape=[3, 4, 5]),
b=(tf.ones(dtype=tf.float32, shape=[3, 4, 7]),
tf.ones(dtype=tf.float32, shape=[3, 4, 8])))
class SizeChecker(snt.AbstractModule):
"""Dummy module checking input is correct structure & size."""
def __init__(self, tester, name="size_checker"):
super(SizeChecker, self).__init__(name=name)
self._tester = tester
def _build(self, inputs):
# Structure of the nesting should be the same, even though the Tensors
# will have been reshaped at this point.
snt.nest.assert_same_structure(ab, inputs)
self._tester.assertListEqual(inputs.a.get_shape().as_list(), [12, 5])
self._tester.assertListEqual(inputs.b[0].get_shape().as_list(), [12, 7])
self._tester.assertListEqual(inputs.b[1].get_shape().as_list(), [12, 8])
return inputs # Return the inputs unmodified
output = snt.BatchApply(module_or_op=SizeChecker(self), n_dims=2)(ab)
snt.nest.assert_same_structure(output, ab)
self.assertShapeEqual(np.zeros((3, 4, 5)), output.a)
self.assertShapeEqual(np.zeros((3, 4, 7)), output.b[0])
self.assertShapeEqual(np.zeros((3, 4, 8)), output.b[1])
def testInputExampleIndex(self):
in1 = tf.random_normal((3, 5))
in2 = tf.random_normal((3, 9))
def build(inputs):
a, b = inputs
a.get_shape().assert_is_compatible_with([3 * 5])
b.get_shape().assert_is_compatible_with([3 * 9])
return b
op = snt.Module(build)
# Checks an error is thrown when the input example contains a different
# shape for the leading dimensions as the output.
exc = tf.errors.InvalidArgumentError if tf.executing_eagerly() else (
ValueError)
with self.assertRaises(exc):
snt.BatchApply(op, n_dims=2, input_example_index=0)((in1, in2))
# Check correct operation when the specified input example contains the same
# shape for the leading dimensions as the output.
output = snt.BatchApply(op, n_dims=2, input_example_index=1)((in1, in2))
in2_np, out_np = self.evaluate([in2, output])
self.assertAllEqual(in2_np, out_np)
def testMultipleArgs(self):
in1 = np.random.randn(2, 3, 4, 5)
in2 = np.random.randn(2, 3, 5, 8)
module = snt.BatchApply(tf.matmul)
output = module(in1, in2)
output.get_shape().assert_is_compatible_with([2, 3, 4, 8])
expected_output = tf.matmul(in1, in2)
out_expected, out_result = self.evaluate([expected_output, output])
self.assertAllClose(out_expected, out_result)
def testKWArgs(self):
in1 = np.random.randn(2, 3, 4, 5)
in2 = np.random.randn(2, 3, 5, 8)
module = snt.BatchApply(tf.matmul)
output = module(a=in1, b=in2)
output.get_shape().assert_is_compatible_with([2, 3, 4, 8])
expected_output = tf.matmul(in1, in2)
out_expected, out_result = self.evaluate([expected_output, output])
self.assertAllClose(out_expected, out_result)
def testHandlesReturnedNone(self):
def fn(input_):
del input_
return None
result = snt.BatchApply(fn)(tf.zeros([1, 1]))
self.assertEqual(result, None)
def testSomeInputsAreNone(self):
in1 = np.random.randn(2, 3, 4, 5)
in2 = np.random.randn(2, 3, 5, 8)
in3 = None
def build(input1, input2, input3):
output = tf.matmul(input1, input2)
if input3 is not None:
output = tf.matmul(input3)
return output
module = snt.BatchApply(build)
output = module(in1, in2, in3)
output.get_shape().assert_is_compatible_with([2, 3, 4, 8])
expected_output = tf.matmul(in1, in2)
out_expected, out_result = self.evaluate([expected_output, output])
self.assertAllClose(out_expected, out_result)
@parameterized.named_parameters(
("flagArgTrue", [True], {}),
("flagArgFalse", [False], {}),
("flagKwargTrue", [], {"is_training": True}),
("flagKwargFalse", [], {"is_training": False}))
def testNonTensor(self, flag_args, flag_kawargs):
"""Tests if non-tensor inputs are simply forwarded to the module."""
# Arrange.
# We work around the Python closure issue by writing to a list instead of
# a primitive variable.
received_flag_value = [None]
x = tf.ones(shape=(5, 3, 10), dtype=tf.float32)
def _build(inputs, is_training):
"""Builds a network that requires a flag at construction time."""
net = snt.Linear(output_size=10)(inputs)
net = snt.BatchNorm()(net, is_training=is_training)
# We record the value of the flag here to make sure that the value
# is correctly passed on to this module.
received_flag_value[0] = is_training
return net
# Act.
snt.BatchApply(snt.Module(build=_build))(x, *flag_args, **flag_kawargs)
# Assert.
self.assertIsNotNone(received_flag_value[0])
# Recover the flag value from the test inputs.
flag_value = nest.flatten_iterable([flag_args, flag_kawargs])[0]
self.assertEqual(received_flag_value[0], flag_value)
@contrib_eager.run_all_tests_in_graph_and_eager_modes
class SliceByDimTest(tf.test.TestCase):
def testName(self):
mod_name = "unique_name"
with tf.variable_scope("scope"):
mod = snt.SliceByDim(name=mod_name, dims=[0, 2],
begin=[0, 0], size=[2, 4])
self.assertEqual(mod.scope_name, "scope/" + mod_name)
self.assertEqual(mod.module_name, mod_name)
def testInferShape(self):
in_shape = [2, 3, 4, 5, 6]
dims = [0, 2, 4]
begin = [0, 1, 2]
size = [1, 2, 3]
out_shape = [1, 3, 2, 5, 3]
inputs = tf.ones(dtype=tf.float32, shape=in_shape)
mod = snt.SliceByDim(dims=dims, begin=begin, size=size)
output = mod(inputs)
self.assertEqual(output.get_shape(), out_shape)
def testComparison(self):
# Here we compare the output with the tf.slice equivalent.
in_shape = [2, 3, 4]
inputs = tf.random_uniform(shape=in_shape)
dims = [0, 2]
begin = [1, 2]
size = [1, 2]
mod = snt.SliceByDim(dims=dims, begin=begin, size=size)
output = mod(inputs)
begin_tf = [1, 0, 2]
size_tf = [1, -1, 2]
ref_output = tf.slice(inputs, begin=begin_tf, size=size_tf)
actual, expected = self.evaluate([output, ref_output])
self.assertAllEqual(actual, expected)
def testComputation(self):
inputs = tf.constant(dtype=tf.int32, value=[[1, 2, 3], [1, 2, 3]])
dims = [0, 1]
begin = [0, 1]
size = [1, 2]
mod = snt.SliceByDim(dims=dims, begin=begin, size=size)
output = mod(inputs)
actual = self.evaluate(output)
expected = [[2, 3]]
self.assertAllEqual(actual, expected)
def testNegativeDim(self):
inputs = tf.constant(dtype=tf.int32, value=[[1, 2, 3], [4, 5, 6]])
dims = [0, -1]
begin = [0, 1]
size = [-1, 2]
mod = snt.SliceByDim(dims=dims, begin=begin, size=size)
output = mod(inputs)
actual = self.evaluate(output)
expected = [[2, 3], [5, 6]]
self.assertAllEqual(actual, expected)
def testInvalidSliceParameters(self):
dims = [0, 2, 4]
begin = [0, 0, 0]
size = [1, 2, 3]
err = "begin must have the same length as dims: {}.".format(len(dims))
with self.assertRaisesRegexp(ValueError, err):
invalid_begin_format = [0, 0]
_ = snt.SliceByDim(
dims=dims, begin=invalid_begin_format, size=size)
err = "size must have the same length as dims: {}.".format(len(dims))
with self.assertRaisesRegexp(ValueError, err):
invalid_size_format = [1, 2, 3, 4]
_ = snt.SliceByDim(
dims=dims, begin=begin, size=invalid_size_format)
def testInvalidTensorRank(self):
dims = [0, 2, 4]
begin = [0, 0, 0]
size = [1, 2, 3]
mod = snt.SliceByDim(dims=dims, begin=begin, size=size)
in_shape = [2, 3, 4, 5]
inputs = tf.ones(dtype=tf.float32, shape=in_shape)
err = "Rank of inputs must be at least {}.".format(np.max(dims) + 1)
with self.assertRaisesRegexp(ValueError, err):
_ = mod(inputs)
def testUniqueDimensions(self):
dims = [0, 0, 1]
begin = [0, 0, 0]
size = [1, 2, 3]
err = "dims must not have any repeated integers."
with self.assertRaisesRegexp(ValueError, err):
_ = snt.SliceByDim(dims=dims, begin=begin, size=size)
@contrib_eager.run_all_tests_in_graph_and_eager_modes
class TileByDimTest(tf.test.TestCase):
def testName(self):
mod_name = "unique_name"
with tf.variable_scope("scope"):
mod = snt.TileByDim(name=mod_name, dims=[0, 2], multiples=[1, 2])
self.assertEqual(mod.scope_name, "scope/" + mod_name)
self.assertEqual(mod.module_name, mod_name)
def testInferShape(self):
in_shape = [2, 3, 4, 5, 6]
dims = [0, 2, 4]
multiples = [1, 2, 3]
out_shape = [2, 3, 8, 5, 18]
inputs = tf.ones(dtype=tf.float32, shape=in_shape)
mod = snt.TileByDim(dims=dims, multiples=multiples)
output = mod(inputs)
self.assertEqual(output.get_shape(), out_shape)
def testComparison(self):
# Here we compare the output with the `tf.tile` equivalent.
in_shape = [2, 3, 4]
inputs = tf.random_uniform(shape=in_shape)
dims = [0, 2]
multiples = [2, 4]
mod = snt.TileByDim(dims=dims, multiples=multiples)
output = mod(inputs)
multiple_tf = [2, 1, 4]
ref_output = tf.tile(inputs, multiples=multiple_tf)
actual, expected = self.evaluate([output, ref_output])
self.assertAllEqual(actual, expected)
def testComputation(self):
inputs = tf.constant(dtype=tf.int32, value=[[1, 2, 3], [1, 2, 3]])
dims = [1]
multiples = [2]
mod = snt.TileByDim(dims=dims, multiples=multiples)
output = mod(inputs)
actual = self.evaluate(output)
expected = [[1, 2, 3, 1, 2, 3], [1, 2, 3, 1, 2, 3]]
self.assertAllEqual(actual, expected)
def testInvalidTileParameters(self):
dims = [0, 2, 4]
invalid_multiples_format = [1, 2]
err = "multiples must have the same length as dims: {}.".format(len(dims))
with self.assertRaisesRegexp(ValueError, err):
snt.TileByDim(dims=dims, multiples=invalid_multiples_format)
def testUniqueDimensions(self):
dims = [0, 0, 1]
multiples = [1, 2, 3]
err = "dims must not have any repeated integers."
with self.assertRaisesRegexp(ValueError, err):
snt.TileByDim(dims=dims, multiples=multiples)
@contrib_eager.run_all_tests_in_graph_and_eager_modes
class MergeDimsTest(tf.test.TestCase, parameterized.TestCase):
def testName(self):
mod_name = "unique_name"
with tf.variable_scope("scope"):
mod = snt.MergeDims(name=mod_name, start=0, size=2)
self.assertEqual(mod.scope_name, "scope/" + mod_name)
self.assertEqual(mod.module_name, mod_name)
def testInferShape(self):
in_shape = [2, 3, 4, 5, 6]
start = 1
size = 3
out_shape = [2, 3 * 4 * 5, 6]
inputs = tf.ones(dtype=tf.float32, shape=in_shape)
mod = snt.MergeDims(start=start, size=size)
output = mod(inputs)
self.assertEqual(output.get_shape(), out_shape)
def testInferShape_negStart(self):
in_shape = [2, 3, 4, 5, 6]
start = -4
size = 3
out_shape = [2, 3 * 4 * 5, 6]
inputs = tf.ones(dtype=tf.float32, shape=in_shape)
mod = snt.MergeDims(start=start, size=size)
output = mod(inputs)
self.assertEqual(output.get_shape(), out_shape)
@parameterized.parameters(
([2, None, 4, 5, 6],),
([None, None, 4, 5, 6],),
([2, 3, None, 5, 6],),
([2, None, None, None, 6],))
def testWithUndefinedDims(self, in_shape):
if tf.executing_eagerly():
self.skipTest("Inputs with partial unknown are not supported in eager.")
start = 2
size = 2
inputs = tf.placeholder(tf.float32, shape=in_shape)
mod = snt.MergeDims(start=start, size=size)
output = mod(inputs)
static_shape = in_shape
static_shape[2:4] = [None] if None in in_shape[2:4] else [4 * 5]
self.assertEqual(output.get_shape().as_list(), static_shape)
with self.test_session():
output = output.eval(feed_dict={inputs: np.zeros([2, 3, 4, 5, 6])})
self.assertEqual(list(output.shape), [2, 3, 4 * 5, 6])
def testWithUndefinedAndZeroDim(self):
if tf.executing_eagerly():
self.skipTest("Unspecified input shapes are not supported in eager mode.")
in_shape = [0, None, 2, 3]
start = 1
size = 2
inputs = tf.placeholder(tf.float32, shape=in_shape)
mod = snt.MergeDims(start=start, size=size)
output = mod(inputs)
self.assertEqual(output.get_shape().as_list(), [0, None, 3])
with self.test_session() as session:
output = session.run(output, feed_dict={inputs: np.zeros([0, 5, 2, 3])})
self.assertEqual(list(output.shape), [0, 10, 3])
def testComputation(self):
# Here we compare the output with the tf.reshape equivalent.
in_shape = [2, 3, 4, 5, 6]
inputs = tf.random_uniform(shape=in_shape)
start = 1
size = 2
mod = snt.MergeDims(start=start, size=size)
output = mod(inputs)
ref_output = tf.reshape(inputs, shape=[2, 3 * 4, 5, 6])
out = self.evaluate([output, ref_output])
self.assertAllEqual(out[0], out[1])
def testInvalidDimsParameters(self):
start = 3
invalid_size = 1
err = "`size` should be strictly greater than 1."
with self.assertRaisesRegexp(ValueError, err):
snt.MergeDims(start=start, size=invalid_size)
def testInvalidTensorRank(self):
start = 0
size = 4
mod = snt.MergeDims(start=start, size=size)
in_shape = [2, 3, 4]
inputs = tf.ones(dtype=tf.float32, shape=in_shape)
err = "Rank of inputs must be at least {}.".format(start + size)
with self.assertRaisesRegexp(ValueError, err):
mod(inputs)
def testNestedInput(self):
start = 0
size = 2
mod = snt.MergeDims(start=start, size=size)
namedtuple_type = collections.namedtuple("abc", ["a", "b", "c"])
nested_tensors = [
tf.random_uniform(shape=[3, 4, 5, 44]),
[
tf.random_uniform(shape=[101, 3]),
tf.random_uniform(shape=[4, 5, 123, 87]),
],
[
[tf.random_uniform(shape=[1, 2, 3, 4, 5])],
],
namedtuple_type(a=tf.random_uniform(shape=[3, 2, 1]),
b=tf.random_uniform(shape=[6, 8, 10, 12]),
c=tf.random_uniform(shape=[20, 10]))
]
merged_tensors = mod(nested_tensors)
nest.assert_same_structure(nested_tensors, merged_tensors)
for original_tensor, merged_tensor in zip(nest.flatten(nested_tensors),
nest.flatten(merged_tensors)):
original_shape = original_tensor.get_shape()
merged_shape = merged_tensor.get_shape()
self.assertEqual(original_shape.ndims - (size - 1),
merged_shape.ndims)
self.assertEqual(np.prod(original_shape[start:start + size], dtype=int),
merged_shape[start])
self.assertEqual(original_shape.num_elements(),
merged_shape.num_elements())
@contrib_eager.run_all_tests_in_graph_and_eager_modes
class SelectInputTest(tf.test.TestCase):
def testName(self):
mod_name = "unique_name"
with tf.variable_scope("scope"):
mod = snt.SelectInput(name=mod_name, idx=0)
self.assertEqual(mod.scope_name, "scope/" + mod_name)
self.assertEqual(mod.module_name, mod_name)
def testBasicSelect(self):
"""Test where idx is an integer."""
shape0 = [2, 3]
shape1 = [2, 3, 4]
input0 = tf.random_uniform(shape=shape0)
input1 = tf.random_uniform(shape=shape1)
mod = snt.SelectInput(idx=0)
output = mod(input0, input1)
output0 = tf.identity(input0)
out = self.evaluate([output, output0])
self.assertAllEqual(out[0], out[1])
def testTupleSelect(self):
"""Test where idx is a tuple."""
shape0 = [1, 2]
shape1 = [1, 2, 3]
shape2 = [1, 2, 3, 4]
input0 = tf.random_uniform(shape=shape0)
input1 = tf.random_uniform(shape=shape1)
input2 = tf.random_uniform(shape=shape2)
mod = snt.SelectInput(idx=(0, 2))
output = mod(input0, input1, input2)
output0 = tf.identity(input0)
output2 = tf.identity(input2)
out = self.evaluate([output, [output0, output2]])
self.assertAllEqual(out[0][0], out[1][0])
self.assertAllEqual(out[0][1], out[1][1])
def testNestedListSelect(self):
"""Test where idx is a nested list."""
shape0 = [1, 2]
shape1 = [1, 2, 3]
shape2 = [1, 2, 3, 4]
input0 = tf.random_uniform(shape=shape0)
input1 = tf.random_uniform(shape=shape1)
input2 = tf.random_uniform(shape=shape2)
mod = snt.SelectInput(idx=[2, [1, 0, 1]])
output = mod(input0, input1, input2)
output0 = tf.identity(input0)
output1 = tf.identity(input1)
output2 = tf.identity(input2)
out = self.evaluate([output, [output2, [output1, output0, output1]]])
self.assertAllEqual(out[0][0], out[1][0])
self.assertAllEqual(out[0][1][0], out[1][1][0])
self.assertAllEqual(out[0][1][1], out[1][1][1])
self.assertAllEqual(out[0][1][2], out[1][1][2])
def testInvalidIdxValue(self):
"""Checks error on invalid idx value."""
input1 = tf.ones(dtype=tf.float32, shape=[2, 3, 4, 5, 6])
input2 = tf.ones(dtype=tf.float32, shape=[7, 8])
invalid_idx = 2
mod = snt.SelectInput(idx=[invalid_idx])
err = (r"`idx` contains out of bound entries \(they should be in the "
r"range \[0, 2\)\)")
with self.assertRaisesRegexp(ValueError, err):
mod(input1, input2)
def testInvalidIdxType(self):
"""Checks error on invalid idx type."""
invalid_idx = 0.5
err = r"`idx` should be a \(nested\) array/tuple, or an integer."
with self.assertRaisesRegexp(TypeError, err):
snt.SelectInput(idx=invalid_idx)
if __name__ == "__main__":
tf.test.main()
| sonnet-1 | sonnet/python/modules/basic_test.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Functions and modules for implementing Spectral Normalization.
This implementation follows the use in:
https://arxiv.org/abs/1802.05957
https://arxiv.org/abs/1805.08318
https://arxiv.org/abs/1809.11096
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
# Dependency imports
from sonnet.python.custom_getters import context
from sonnet.python.modules import base
from sonnet.python.modules import util
import tensorflow.compat.v1 as tf
def wrap_with_spectral_norm(module_class,
sn_kwargs=None,
pow_iter_collection=None):
"""Returns a constructor for the inner class with spectral normalization.
This function accepts a Sonnet AbstractModule class as argument (the class,
*not* an instance of that class) alongside an optional dictionary of keyword
arguments for the spectral_norm function, and returns a constructor which can
be treated identically to the constructor of the input class, but with
spectral normalization applied to the weights created by the class.
Internally, this is just a partially evaluated SpectralNormWrapper module.
`pow_iter_collection`, if not None, is treated as the name of a TensorFlow
global collection. Each time the module's weight matrix is accessed ops are
built for performing one step of power iteration to approximate that weight's
first singular follow and ops are created for saving this new approximation in
an internal variable. At build-time the resulting object takes a special
boolean 'enable_power_iteration' keyword argument. If this is True (the
default), a control dependency on the operation for updating this internal
variable is attached to the returned weight. Otherwise, the update is *not*
attached as a control dependency, but an op is placed into the
`pow_iter_collection` global collection which causes the internal variable to
be updated. It is then up to the user to choose whether to run this update.
Args:
module_class: A constructor/class reference for a Sonnet module you would
like to wrap and automatically apply spectral normalization.
sn_kwargs: Keyword arguments to be passed to the spectral_norm function
in addition to the weight tensor.
pow_iter_collection: The name of a global collection for potentially
storing ops for updating internal variables.
Returns:
An snt.AbstractModule class representing the original with spectral norm.
"""
sn_kwargs = sn_kwargs or {}
return functools.partial(
SpectralNormWrapper, module_class, sn_kwargs, pow_iter_collection)
class SpectralNormWrapper(base.AbstractModule):
"""Wraps a Sonnet Module to selectively apply Spectral Normalization."""
def __init__(self, module, sn_kwargs, pow_iter_collection, *args, **kwargs):
"""Constructs a wrapped Sonnet module with Spectral Normalization.
The module expects a first argument which should be a Sonnet AbstractModule
and a second argument which is a dictionary which is passed to the inner
spectral_norm function as kwargs.
When connecting this module to the graph,the argument 'pow_iter_collection'
is treated specially for this wrapper (rather than for the _build
method of the inner module). If pow_iter_collection is None (the default),
the approximate first singular value for weights will *not* be updated based
on the inputs passed at the given _build call. However an op for updating
the singular value will be placed into the pow_iter_collection global
collection.
If pow_iter_collection is None or not passed, a control dependency on the
update op will be applied to the output of the _build function. Regardless,
the kwarg is deleted from the list of keywords passed to the inner module.
Args:
module: A constructor/class reference for a Sonnet module you would like
to construct.
sn_kwargs: Keyword arguments to be passed to the spectral_norm function
in addition to the weight tensor.
pow_iter_collection: The name of a global collection for potentially
storing ops for updating internal variables.
*args: Construction-time arguments to the module.
**kwargs: Construction-time keyword arguments to the module.
"""
name = kwargs.get('name', 'sn') + '_wrapper'
# Our getter needs to be able to be disabled.
getter_immediate_update, getter_deferred_update = self.sn_getter(sn_kwargs)
w_getter = lambda g: util.custom_getter_router({'.*/w$': g}, lambda s: s)
getter_immediate_update = w_getter(getter_immediate_update)
getter_deferred_update = w_getter(getter_deferred_update)
self._context_getter = context.Context(
getter_immediate_update, default_getter=getter_deferred_update)
self.pow_iter_collection = pow_iter_collection
super(SpectralNormWrapper, self).__init__(
name=name, custom_getter=self._context_getter)
# Let's construct our model.
with self._enter_variable_scope():
self._module = module(*args, **kwargs)
def _build(self, *args, **kwargs):
if kwargs.pop('enable_power_iteration', True):
with self._context_getter:
return self._module(*args, **kwargs)
else:
return self._module(*args, **kwargs)
def sn_getter(self, spectral_norm_kwargs):
"""Returns a curried spectral normalization Custom Getter."""
def getter_immediate_update(getter, *args, **kwargs):
w = getter(*args, **kwargs) # This is our variable.
w_spectral_normalized = spectral_norm(
w, update_collection=None, **spectral_norm_kwargs)['w_bar']
return w_spectral_normalized
def getter_deferred_update(getter, *args, **kwargs):
w = getter(*args, **kwargs) # This is our variable.
w_spectral_normalized = spectral_norm(
w, update_collection=self.pow_iter_collection,
**spectral_norm_kwargs)['w_bar']
return w_spectral_normalized
return getter_immediate_update, getter_deferred_update
def _l2_normalize(t, axis=None, eps=1e-12):
"""Normalizes along dimension `axis` using an L2 norm.
We use this over tf.nn.l2_normalize for numerical stability reasons.
Args:
t: A `Tensor`.
axis: Dimension along which to normalize, e.g. `1` to separately normalize
vectors in a batch. Passing `None` views `t` as a flattened vector when
calculating the norm (equivalent to Frobenius norm).
eps: Epsilon to avoid dividing by zero.
Returns:
A `Tensor` with the same shape as `t`.
"""
return t * tf.rsqrt(tf.reduce_sum(tf.square(t), axis, keepdims=True) + eps)
def spectral_norm(weight,
num_iters=1,
update_collection=None,
eps=1e-4):
"""Spectral Weight Normalization.
Applies first-singular-value spectral normalization to weight and returns a
tensor equivalent to weight with spectral normalization applies. By default,
it also updates an inner variable for keeping track of the spectral values of
this weight matrix. If update_collection is not None, however, this function
does not update the variable automatically, instead placing an op for this
update in the 'update_collection' global collection.
Args:
weight: The weight tensor which requires spectral normalization
num_iters: Number of SN iterations.
update_collection: The update collection for assigning persisted variable u.
If None, the function will update u0 during the forward pass. Otherwise if
the update_collection equals 'update_collection', it will put the
assignment in a collection defined by the user. Then the user will need to
run the assignment explicitly.
eps: numerical stability constant > 0.
Returns:
A dictionary of:
w_bar: The normalized weight tensor
sigma: The estimated singular value for the weight tensor.
u0: The internal persisted variable.
"""
if num_iters < 1:
raise ValueError('num_iters must be a positive integer. {} given.'.format(
num_iters))
original_dtype = weight.dtype
weight = tf.cast(weight, tf.float32)
w_shape = weight.shape.as_list()
w_mat = tf.reshape(weight, [-1, w_shape[-1]])
u0 = tf.get_variable(
'u0', [1, w_shape[-1]],
initializer=tf.truncated_normal_initializer(),
trainable=False)
u0_ = u0
# Power iteration for the weight's singular value.
for _ in range(num_iters):
v0_ = _l2_normalize(tf.matmul(u0_, w_mat, transpose_b=True), eps=eps)
u0_ = _l2_normalize(tf.matmul(v0_, w_mat), eps=eps)
u0_ = tf.stop_gradient(u0_)
v0_ = tf.stop_gradient(v0_)
sigma = tf.squeeze(tf.matmul(tf.matmul(v0_, w_mat), u0_, transpose_b=True),
axis=[0, 1])
w_mat /= sigma
w_bar = tf.reshape(w_mat, w_shape)
# Potentially add a control dependency on u0s update.
if update_collection is None:
u_assign_ops = [u0.assign(u0_)]
with tf.control_dependencies(u_assign_ops):
w_bar = tf.identity(w_bar)
else:
tf.add_to_collection(update_collection, u0.assign(u0_))
return {
'w_bar': tf.cast(w_bar, original_dtype),
'sigma': tf.cast(sigma, original_dtype),
'u0': tf.cast(u0, original_dtype)
}
| sonnet-1 | sonnet/python/modules/spectral_normalization.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for sonnet transformer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import sonnet as snt
import tensorflow.compat.v1 as tf
class TransformerTowerTest(tf.test.TestCase):
def test_forward(self):
batch_size = 8
window_size = 15
value_size = 6
num_heads = 16
input_size = num_heads * value_size
output_size = 128
inputs = tf.random_normal([batch_size, window_size, input_size])
transformer = snt.nets.TransformerTower(
value_size=value_size,
num_heads=num_heads,
num_layers=3,
causal=False,
shared_attention=False,
output_size=output_size,
mlp_hidden_sizes=tuple([64]))
output, _ = transformer(inputs)
self.assertAllEqual(output.get_shape().as_list(),
[batch_size, window_size, output_size])
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(output)
def test_invalid_input(self):
batch_size = 8
window_size = 15
value_size = 6
num_heads = 16
# invalid input size because it is odd
input_size = num_heads * value_size + 1
output_size = 128
invalid_inputs = tf.random_normal([batch_size, window_size, input_size])
transformer = snt.nets.TransformerTower(
value_size=value_size,
num_heads=num_heads,
num_layers=3,
causal=False,
shared_attention=False,
output_size=output_size,
use_relative_positions=False,
mlp_hidden_sizes=tuple([64]))
with self.assertRaises(ValueError):
transformer(invalid_inputs)
class TransformerXLTest(tf.test.TestCase):
def check_memory_gradients(self,
state,
output,
session,
start=0,
end=-1,
zero=True):
"""Checks masking via norm of state gradient with respect to output.
Args:
state: transformer.AttentionCoreState.
output: tensor of model outputs.
session: tensorflow session.
start: inspect gradients from [start:] slots in memory.
end: inspect gradients up to [:end] slots in memory.
zero: if true, checks equal to zero, otherwise checks greater than zero.
"""
for state_i in state:
grad_i = tf.gradients(output, state_i)[0][:, start:end]
grad_norm_i = tf.reduce_sum(tf.square(grad_i))
grad_norm_np = session.run(grad_norm_i)
if zero:
self.assertEqual(grad_norm_np, 0)
else:
self.assertGreater(grad_norm_np, 0)
def test_forward(self):
batch_size = 8
window_size = 15
input_size = 16
output_size = 128
num_layers = 3
key_size = 4
value_size = 6
num_heads = 16
memory_size = 48
inputs = tf.random_normal([batch_size, window_size, input_size])
core_config = {
'value_size': value_size,
'key_size': key_size,
'num_heads': num_heads,
'num_layers': num_layers,
'causal': True,
'shared_attention': False,
'output_size': output_size,
'mlp_hidden_sizes': tuple([64]),
}
transformer_xl = snt.nets.transformer.TransformerXL(
core_config=core_config,
memory_size=memory_size,
chunk_size=window_size,
)
initial_state = transformer_xl.initial_state(batch_size)
output, next_state = transformer_xl(inputs, initial_state)
output2, final_state = transformer_xl(inputs, next_state)
self.assertAllEqual(output.get_shape().as_list(),
[batch_size, window_size, output_size])
self.assertEqual(len(next_state), num_layers)
def check_state_size(state_list):
for i in range(num_layers):
state = state_list[i]
self.assertAllEqual(state.get_shape().as_list(),
[batch_size, memory_size, value_size * num_heads])
check_state_size(next_state)
check_state_size(final_state)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(output)
sess.run(next_state)
sess.run(output2)
sess.run(final_state)
def test_mask_op(self):
logits = tf.zeros(shape=(1, 1, 5, 5))
masked_logits = logits + snt.nets.transformer.future_mask(
chunk_size=5, dtype=logits.dtype)
weights = tf.nn.softmax(logits)
masked_weights = tf.nn.softmax(masked_logits)
with self.test_session() as sess:
weights_v, masked_weights_v = sess.run([weights, masked_weights])
expected_weights_v = np.array([
[0.2, 0.2, 0.2, 0.2, 0.2],
[0.2, 0.2, 0.2, 0.2, 0.2],
[0.2, 0.2, 0.2, 0.2, 0.2],
[0.2, 0.2, 0.2, 0.2, 0.2],
[0.2, 0.2, 0.2, 0.2, 0.2],
]).reshape([1, 1, 5, 5])
self.assertAllClose(weights_v, expected_weights_v)
expected_masked_weights_v = np.array(
[[1. / 1, 0.00, 0.00, 0.00, 0.00], [1. / 2, 1. / 2, 0.00, 0.00, 0.00],
[1. / 3, 1. / 3, 1. / 3, 0.00, 0.00],
[1. / 4, 1. / 4, 1. / 4, 1. / 4, 0.00],
[1. / 5, 1. / 5, 1. / 5, 1. / 5, 1. / 5]]).reshape([1, 1, 5, 5])
self.assertAllClose(masked_weights_v, expected_masked_weights_v)
def test_masking_no_memory(self):
"""Checks that masking disallows information flow from future to present."""
batch_size = 1
value_size = 6
num_heads = 16
hidden_size = value_size * num_heads
seq_length = 3
decoder_input = tf.random_normal([batch_size, seq_length, hidden_size])
transformer = snt.nets.TransformerTower(
value_size=value_size,
num_heads=num_heads,
num_layers=3,
causal=True,
shared_attention=False,
output_size=hidden_size,
mlp_hidden_sizes=tuple([64]))
decoder_output, _ = transformer(decoder_input)
# For each time step of the output sequence, compute the
# derivative with respect to each component of whole input tensor.
# Sum over input and output channels.
gradients = []
for time_idx in range(seq_length):
tf.logging.info('Creating gradient ops for time %d/%d.' %
(time_idx, seq_length))
time_gradients = tf.gradients(
decoder_output[0, time_idx], # Sums over output channels
decoder_input)[0]
gradients.append(time_gradients)
gradients = tf.stack(gradients, 0)
tf.logging.info('Done creating gradient ops.')
with self.test_session() as session:
tf.global_variables_initializer().run()
tf.logging.info('Fetching gradient ops.')
output_v, grad_v = session.run([decoder_output, gradients])
tf.logging.info('Done fetching gradient ops.')
# Pick out the subset of derivatives which should be zero
# and test for exact equality with zero.
time_grad_v = np.sum( # Sum over input channels.
grad_v[:, 0, :], axis=2)
grad_triu = time_grad_v[np.triu_indices(seq_length, k=1)]
self.assertAllEqual(grad_triu, np.zeros_like(grad_triu))
# Make sure there are no nans in the output.
self.assertTrue(np.all(np.logical_not(np.isnan(output_v))))
def test_no_dropout_during_eval(self):
"""Checks that dropout is only applied during training, and not eval."""
batch_size = 2
sequence_length = 10
memory_size = 48
core_config = {
'key_size': 3,
'value_size': 4,
'num_heads': 5,
'num_layers': 2,
'dropout_rate': 0.5,
}
inputs = tf.ones([batch_size, sequence_length, 16], dtype=tf.float32)
transformer_xl = snt.nets.transformer.TransformerXL(
core_config, memory_size, chunk_size=sequence_length)
initial_state = transformer_xl.initial_state(batch_size, dtype=inputs.dtype)
eval_output, _ = transformer_xl(inputs, initial_state, is_training=False)
train_output, _ = transformer_xl(inputs, initial_state, is_training=True)
with self.test_session() as session:
tf.global_variables_initializer().run()
# Ensures dropout is being applied during training (output changes).
train_out_sum = session.run(tf.reduce_sum(train_output))
train_out_sum2 = session.run(tf.reduce_sum(train_output))
self.assertNotAlmostEqual(train_out_sum, train_out_sum2)
# Ensures dropout is not being applied during eval (output is same).
eval_out_sum = session.run(tf.reduce_sum(eval_output))
eval_out_sum2 = session.run(tf.reduce_sum(eval_output))
self.assertAlmostEqual(eval_out_sum, eval_out_sum2)
def test_zero_chunk_size(self):
"""Tests a chunk size of 0 corresponds to a regular RNN core."""
batch_size = 2
core_config = {
'key_size': 3,
'value_size': 4,
'num_heads': 5,
'num_layers': 2,
'dropout_rate': 0.5,
}
inputs = tf.ones([10, batch_size, 16], dtype=tf.float32)
transformer_xl = snt.nets.transformer.TransformerXL(
core_config, memory_size=8, chunk_size=0)
initial_state = transformer_xl.initial_state(batch_size)
output, final_state = tf.nn.dynamic_rnn(
transformer_xl, inputs, time_major=True, initial_state=initial_state)
with self.test_session() as session:
tf.global_variables_initializer().run()
session.run([output, final_state])
def test_zero_memory_size(self):
"""Tests a memory size of 0 corresponds to a regular RNN core."""
batch_size = 2
memory_size = 0
sequence_length = 10
core_config = {
'key_size': 3,
'value_size': 4,
'num_heads': 5,
'num_layers': 2,
'dropout_rate': 0.5,
}
inputs = tf.ones([batch_size, sequence_length, 16], dtype=tf.float32)
transformer_xl = snt.nets.transformer.TransformerXL(
core_config, memory_size=memory_size, chunk_size=sequence_length)
initial_state = transformer_xl.initial_state(batch_size)
output, final_state = transformer_xl(inputs, initial_state)
with self.test_session() as session:
tf.global_variables_initializer().run()
session.run([output, final_state])
def test_dynamic_batch_size(self):
"""Tests operation with changing batch size."""
memory_size = 0
sequence_length = 10
core_config = {
'key_size': 3,
'value_size': 4,
'num_heads': 5,
'num_layers': 2,
'dropout_rate': 0.5,
}
inputs = tf.placeholder(tf.float32, shape=(None, sequence_length, 16))
batch_size = tf.shape(inputs)[0]
transformer_xl = snt.nets.transformer.TransformerXL(
core_config, memory_size=memory_size, chunk_size=sequence_length)
initial_state = transformer_xl.initial_state(batch_size)
output, final_state = transformer_xl(inputs, initial_state)
with self.test_session() as session:
tf.global_variables_initializer().run()
batch_size_1 = 2
final_output_1, _ = session.run(
[output, final_state],
feed_dict={inputs: np.ones([batch_size_1, sequence_length, 16])})
self.assertAllEqual(final_output_1.shape[0], batch_size_1)
batch_size_2 = 4
final_output_2, _ = session.run(
[output, final_state],
feed_dict={inputs: np.ones([batch_size_2, sequence_length, 16])})
self.assertAllEqual(final_output_2.shape[0], batch_size_2)
class CompressiveTransformerTest(tf.test.TestCase):
def test_forward(self):
batch_size = 8
window_size = 18
input_size = 16
output_size = 128
num_layers = 3
key_size = 4
value_size = 6
num_heads = 16
em_memory_size = 18
cm_memory_size = 7
inputs = tf.random_normal([batch_size, window_size, input_size])
core_config = {
'value_size': value_size,
'key_size': key_size,
'num_heads': num_heads,
'num_layers': num_layers,
'causal': True,
'shared_attention': False,
'output_size': output_size,
'mlp_hidden_sizes': tuple([64]),
}
compressive_transformer = snt.nets.CompressiveTransformer(
core_config=core_config,
episodic_memory_size=em_memory_size,
compressed_memory_size=cm_memory_size,
chunk_size=window_size,
)
initial_state = compressive_transformer.initial_state(batch_size)
output, next_state = compressive_transformer(inputs, initial_state)
output2, final_state = compressive_transformer(inputs, next_state)
compression_loss = tf.get_collection('auxiliary_losses')
self.assertAllEqual(output.get_shape().as_list(),
[batch_size, window_size, output_size])
self.assertEqual(len(next_state), num_layers)
self.assertEqual(len(next_state[0]), 3) # index, cm, em
def check_state_size(state_list):
for state in state_list:
self.assertAllEqual(
state.episodic_memory.get_shape().as_list(),
[batch_size, em_memory_size, value_size * num_heads])
self.assertAllEqual(
state.compressed_memory.get_shape().as_list(),
[batch_size, cm_memory_size, value_size * num_heads])
check_state_size(next_state)
check_state_size(final_state)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(output)
sess.run(next_state)
sess.run(output2)
sess.run(final_state)
compression_loss_np = sess.run(compression_loss)
# Compression loss is zero because em and cm are zero.
self.assertEqual(compression_loss_np[0], 0)
# Compression loss is > 0 because em is populated.
self.assertGreater(compression_loss_np[1], 0)
if __name__ == '__main__':
tf.test.main()
| sonnet-1 | sonnet/python/modules/nets/transformer_test.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import parameterized
import numpy as np
import sonnet as snt
from sonnet.python.modules.nets import dilation
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib.eager.python import tfe as contrib_eager
@contrib_eager.run_all_tests_in_graph_and_eager_modes
class IdentityKernelInitializerTest(tf.test.TestCase,
parameterized.TestCase):
@parameterized.named_parameters(("Rank4", [2, 2]),
("SquareFilters", [2, 3, 1, 1]),
("OddHeighAndWidth", [2, 2, 1, 1]),
("EqualInAndOutChannels", [3, 3, 2, 1]))
def testInvalidShapes(self, shape):
with self.assertRaises(ValueError):
snt.nets.identity_kernel_initializer(shape)
def testComputation(self):
x = self.evaluate(snt.nets.identity_kernel_initializer([3, 3, 5, 5]))
# Iterate over elements. Assert that only the middle pixel is on when in
# and out channels are same.
it = np.nditer(x, flags=["multi_index"])
while not it.finished:
value, idx = it[0], it.multi_index
(filter_height, filter_width, in_channel, out_channel) = idx
if (filter_height == 1 and filter_width == 1 and
in_channel == out_channel):
self.assertEqual(value, 1)
else:
self.assertEqual(value, 0)
it.iternext()
@contrib_eager.run_all_tests_in_graph_and_eager_modes
class NoisyIdentityKernelInitializerTest(tf.test.TestCase,
parameterized.TestCase):
@parameterized.named_parameters(
("Rank4", [2, 2]),
("SquareFilters", [2, 3, 1, 1]),
("OddHeighAndWidth", [2, 2, 1, 1]),
("InAndOutChannelsAreMultiples", [3, 3, 2, 7]))
def testInvalidShapes(self, shape):
with self.assertRaises(ValueError):
initializer = snt.nets.noisy_identity_kernel_initializer(2)
initializer(shape)
def testComputation(self):
tf.set_random_seed(0)
initializer = snt.nets.noisy_identity_kernel_initializer(2, stddev=1e-20)
x = initializer([3, 3, 4, 8])
x = tf.reduce_sum(x, axis=[3])
x_ = self.evaluate(x)
# Iterate over elements. After summing over depth, assert that only the
# middle pixel is on.
it = np.nditer(x_, flags=["multi_index"])
while not it.finished:
value, idx = it[0], it.multi_index
(filter_height, filter_width, _) = idx
if filter_height == 1 and filter_width == 1:
self.assertAllClose(value, 1)
else:
self.assertAllClose(value, 0)
it.iternext()
@contrib_eager.run_all_tests_in_graph_and_eager_modes
class DilationTest(tf.test.TestCase, parameterized.TestCase):
def setUpWithNumOutputClasses(self, num_output_classes, depth=None):
"""Initialize Dilation module and test images.
Args:
num_output_classes: int. Number of output classes the dilation module
should predict per pixel.
depth: None or int. Input depth of image. If None, same as
num_output_classes.
"""
self._num_output_classes = num_output_classes
self._model_size = "basic"
self._module = snt.nets.Dilation(
num_output_classes=self._num_output_classes,
model_size=self._model_size)
self._batch_size = 1
self._height = self._width = 5
self._depth = depth or num_output_classes
# Generate images with all-positive values. This means that so long as
# convolution kernels are initialized to identity operators, applying the
# network should be an identity operation (negative values get zeroed out by
# ReLUs).
self._rng = np.random.RandomState(0)
self._images = np.abs(
self._rng.randn(self._batch_size, self._height, self._width,
self._depth).astype(np.float32))
@parameterized.parameters(1, 3)
def testShapeInference(self, num_output_classes):
self.setUpWithNumOutputClasses(num_output_classes)
x = self._module(tf.convert_to_tensor(self._images))
self.assertTrue(x.get_shape().is_compatible_with(
[self._batch_size, self._height, self._width, num_output_classes]))
@parameterized.parameters(1, 3)
def testBasicComputation(self, num_output_classes):
self.setUpWithNumOutputClasses(num_output_classes)
x = self._module(tf.convert_to_tensor(self._images))
# Default initialization produces an identity operator.
self.evaluate(tf.global_variables_initializer())
x_ = self.evaluate(x)
self.assertAllClose(x_, self._images)
@parameterized.parameters(1, 3)
def testLargeComputation(self, num_output_classes):
self.setUpWithNumOutputClasses(
num_output_classes, depth=3 * num_output_classes)
self.setUpWithNumOutputClasses(num_output_classes)
module = snt.nets.Dilation(
num_output_classes=num_output_classes, model_size="large")
x = module(tf.convert_to_tensor(self._images))
self.evaluate(tf.global_variables_initializer())
x_ = self.evaluate(x)
# Default initialization produces something like an operator, but the
# number of channels differs. However, summing across channels should
# recover a near-identical magnitude per-pixel.
self.assertAllClose(
np.sum(x_, axis=3), np.sum(self._images, axis=3), atol=1e-3)
def testInvalidShape(self):
self.setUpWithNumOutputClasses(1)
images = self._rng.randn(self._batch_size, self._height, self._width)
with self.assertRaisesRegexp(snt.IncompatibleShapeError, "must have shape"):
self._module(tf.convert_to_tensor(images))
def testInvalidModelSize(self):
self.setUpWithNumOutputClasses(1)
module = snt.nets.Dilation(
num_output_classes=self._num_output_classes,
model_size="invalid_model_size")
with self.assertRaisesRegexp(ValueError, "Unrecognized model_size"):
module(tf.convert_to_tensor(self._images))
# The other check for model_size being valid is only reached when
# weight initializers are provided. We need to test this as well to get
# 100% test coverage.
module = snt.nets.Dilation(
num_output_classes=self._num_output_classes,
initializers={"w": snt.nets.noisy_identity_kernel_initializer(1)},
model_size="invalid_model_size")
with self.assertRaisesRegexp(ValueError, "Unrecognized model_size"):
module(tf.convert_to_tensor(self._images))
def test_properties(self):
self.setUpWithNumOutputClasses(1)
with self.assertRaises(snt.NotConnectedError):
_ = self._module.conv_modules
self._module(tf.convert_to_tensor(self._images))
self.assertEqual(type(self._module.conv_modules), list)
def testInvalidRegularizationParameters(self):
regularizer = contrib_layers.l1_regularizer(scale=0.5)
with self.assertRaisesRegexp(KeyError, "Invalid regularizer keys.*"):
self.setUpWithNumOutputClasses(1)
snt.nets.Dilation(num_output_classes=self._num_output_classes,
regularizers={"not_w": regularizer})
err = "Regularizer for 'w' is not a callable function"
with self.assertRaisesRegexp(TypeError, err):
self.setUpWithNumOutputClasses(1)
snt.nets.Dilation(num_output_classes=self._num_output_classes,
regularizers={"w": tf.zeros([1, 2, 3])})
def testRegularizersInRegularizationLosses(self):
w_regularizer = contrib_layers.l1_regularizer(scale=0.5)
b_regularizer = contrib_layers.l2_regularizer(scale=0.5)
self.setUpWithNumOutputClasses(1)
dilation_mod = snt.nets.Dilation(
num_output_classes=self._num_output_classes,
regularizers={"w": w_regularizer, "b": b_regularizer})
dilation_mod(tf.convert_to_tensor(self._images))
regularizers = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
# There are two regularizers per level
layers_number = 8
self.assertEqual(len(regularizers), 2 * layers_number)
if not tf.executing_eagerly():
for i in range(0, 2 * layers_number, 2):
self.assertRegexpMatches(regularizers[i].name, ".*l1_regularizer.*")
self.assertRegexpMatches(regularizers[i + 1].name, ".*l2_regularizer.*")
def testUtilities(self):
err = "Cannot calculate range along non-existent index."
with self.assertRaisesRegexp(ValueError, err):
# Valid rank here would be either 0 or 1.
dilation._range_along_dimension(2, [2, 4])
if __name__ == "__main__":
tf.test.main()
| sonnet-1 | sonnet/python/modules/nets/dilation_test.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A minimal interface convolutional networks module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from sonnet.python.modules import base
from sonnet.python.modules import batch_norm
from sonnet.python.modules import batch_norm_v2
from sonnet.python.modules import conv
from sonnet.python.modules import util
import tensorflow.compat.v1 as tf
DATA_FORMAT_NCHW = "NCHW"
DATA_FORMAT_NHWC = "NHWC"
SUPPORTED_2D_DATA_FORMATS = {DATA_FORMAT_NCHW, DATA_FORMAT_NHWC}
def _replicate_elements(input_iterable, num_times):
"""Replicates entry in `input_iterable` if `input_iterable` is of length 1."""
if len(input_iterable) == 1:
return (input_iterable[0],) * num_times
return tuple(input_iterable)
class ConvNet2D(base.AbstractModule, base.Transposable):
"""A 2D Convolutional Network module."""
POSSIBLE_INITIALIZER_KEYS = {"w", "b"}
def __init__(self,
output_channels,
kernel_shapes,
strides,
paddings,
rates=(1,),
activation=tf.nn.relu,
activate_final=False,
normalization_ctor=None,
normalization_kwargs=None,
normalize_final=None,
initializers=None,
partitioners=None,
regularizers=None,
use_batch_norm=None, # Deprecated.
use_bias=True,
batch_norm_config=None, # Deprecated.
data_format=DATA_FORMAT_NHWC,
custom_getter=None,
name="conv_net_2d"):
"""Constructs a `ConvNet2D` module.
By default, neither batch normalization nor activation are applied to the
output of the final layer.
Args:
output_channels: Iterable of output channels, as defined in
`conv.Conv2D`. Output channels can be defined either as number or via a
callable. In the latter case, since the function invocation is deferred
to graph construction time, the user must only ensure that entries can
be called when build is called. Each entry in the iterable defines
properties in the corresponding convolutional layer.
kernel_shapes: Iterable of kernel sizes as defined in `conv.Conv2D`; if
the list contains one element only, the same kernel shape is used in
each layer of the network.
strides: Iterable of kernel strides as defined in `conv.Conv2D`; if the
list contains one element only, the same stride is used in each layer of
the network.
paddings: Iterable of padding options as defined in `conv.Conv2D`. Each
can be `snt.SAME`, `snt.VALID`, `snt.FULL`, `snt.CAUSAL`,
`snt.REVERSE_CAUSAL` or a pair of these to use for height and width.
If the Iterable contains one element only, the same padding is used in
each layer of the network.
rates: Iterable of dilation rates as defined in `conv.Conv2D`; if the
list contains one element only, the same rate is used in each layer of
the network.
activation: An activation op.
activate_final: Boolean determining if the activation and batch
normalization, if turned on, are applied to the final layer.
normalization_ctor: Constructor to return a callable which will perform
normalization at each layer. Defaults to None / no normalization.
Examples of what could go here: `snt.BatchNormV2`, `snt.LayerNorm`. If
a string is provided, importlib is used to convert the string to a
callable, so either `snt.LayerNorm` or `"snt.LayerNorm"` can be
provided.
normalization_kwargs: kwargs to be provided to `normalization_ctor` when
it is called.
normalize_final: Whether to apply normalization after the final conv
layer. Default is to take the value of activate_final.
initializers: Optional dict containing ops to initialize the filters of
the whole network (with key 'w') or biases (with key 'b').
partitioners: Optional dict containing partitioners to partition
weights (with key 'w') or biases (with key 'b'). As a default, no
partitioners are used.
regularizers: Optional dict containing regularizers for the filters of the
whole network (with key 'w') or biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes a
single `Tensor` as an input and returns a scalar `Tensor` output, e.g.
the L1 and L2 regularizers in `tf.contrib.layers`.
use_batch_norm: Boolean determining if batch normalization is applied
after convolution. Deprecated, use `normalization_ctor` instead.
use_bias: Boolean or iterable of booleans determining whether to include
bias parameters in the convolutional layers. Default `True`.
batch_norm_config: Optional mapping of additional configuration for the
`snt.BatchNorm` modules. Deprecated, use `normalization_kwargs` instead.
data_format: A string, one of "NCHW" or "NHWC". Specifies whether the
channel dimension of the input and output is the last dimension
(default, "NHWC"), or the second dimension ("NCHW").
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the
`tf.get_variable` documentation for information about the
custom_getter API. Note that this `custom_getter` will not be passed
to the `transpose` method. If you want to use a custom getter with
the transposed of this convolutional network, you should provide one
to the `transpose` method instead.
name: Name of the module.
Raises:
TypeError: If `output_channels` is not iterable; or if `kernel_shapes` is
not iterable; or `strides` is not iterable; or `paddings` is not
iterable; or if `activation` is not callable.
ValueError: If `output_channels` is empty; or if `kernel_shapes` has not
length 1 or `len(output_channels)`; or if `strides` has not
length 1 or `len(output_channels)`; or if `paddings` has not
length 1 or `len(output_channels)`; or if `rates` has not
length 1 or `len(output_channels)`; or if the given data_format is not a
supported format ("NHWC" or "NCHW"); or if `normalization_ctor` is
provided but cannot be mapped to a callable.
KeyError: If `initializers`, `partitioners` or `regularizers` contain any
keys other than 'w' or 'b'.
TypeError: If any of the given initializers, partitioners or regularizers
are not callable.
"""
if not isinstance(output_channels, collections.Iterable):
raise TypeError("output_channels must be iterable")
output_channels = tuple(output_channels)
if not isinstance(kernel_shapes, collections.Iterable):
raise TypeError("kernel_shapes must be iterable")
kernel_shapes = tuple(kernel_shapes)
if not isinstance(strides, collections.Iterable):
raise TypeError("strides must be iterable")
strides = tuple(strides)
if not isinstance(paddings, collections.Iterable):
raise TypeError("paddings must be iterable")
paddings = tuple(paddings)
if not isinstance(rates, collections.Iterable):
raise TypeError("rates must be iterable")
rates = tuple(rates)
if isinstance(use_batch_norm, collections.Iterable):
raise TypeError("use_batch_norm must be a boolean. Per-layer use of "
"batch normalization is not supported. Previously, a "
"test erroneously suggested use_batch_norm can be an "
"iterable of booleans.")
super(ConvNet2D, self).__init__(name=name, custom_getter=custom_getter)
if not output_channels:
raise ValueError("output_channels must not be empty")
self._output_channels = tuple(output_channels)
self._num_layers = len(self._output_channels)
self._input_shape = None
if data_format not in SUPPORTED_2D_DATA_FORMATS:
raise ValueError("Invalid data_format {}. Allowed formats "
"{}".format(data_format, SUPPORTED_2D_DATA_FORMATS))
self._data_format = data_format
self._initializers = util.check_initializers(
initializers, self.POSSIBLE_INITIALIZER_KEYS)
self._partitioners = util.check_partitioners(
partitioners, self.POSSIBLE_INITIALIZER_KEYS)
self._regularizers = util.check_regularizers(
regularizers, self.POSSIBLE_INITIALIZER_KEYS)
if not callable(activation):
raise TypeError("Input 'activation' must be callable")
self._activation = activation
self._activate_final = activate_final
self._kernel_shapes = _replicate_elements(kernel_shapes, self._num_layers)
if len(self._kernel_shapes) != self._num_layers:
raise ValueError(
"kernel_shapes must be of length 1 or len(output_channels)")
self._strides = _replicate_elements(strides, self._num_layers)
if len(self._strides) != self._num_layers:
raise ValueError(
"""strides must be of length 1 or len(output_channels)""")
self._paddings = _replicate_elements(paddings, self._num_layers)
if len(self._paddings) != self._num_layers:
raise ValueError(
"""paddings must be of length 1 or len(output_channels)""")
self._rates = _replicate_elements(rates, self._num_layers)
if len(self._rates) != self._num_layers:
raise ValueError(
"""rates must be of length 1 or len(output_channels)""")
self._parse_normalization_kwargs(
use_batch_norm, batch_norm_config,
normalization_ctor, normalization_kwargs)
if normalize_final is None:
util.deprecation_warning(
"normalize_final is not specified, so using the value of "
"activate_final = {}. Change your code to set this kwarg explicitly. "
"In the future, normalize_final will default to True.".format(
activate_final))
self._normalize_final = activate_final
else:
# User has provided an override, so don't link to activate_final.
self._normalize_final = normalize_final
if isinstance(use_bias, bool):
use_bias = (use_bias,)
else:
if not isinstance(use_bias, collections.Iterable):
raise TypeError("use_bias must be either a bool or an iterable")
use_bias = tuple(use_bias)
self._use_bias = _replicate_elements(use_bias, self._num_layers)
self._instantiate_layers()
def _check_and_assign_normalization_members(self, normalization_ctor,
normalization_kwargs):
"""Checks that the normalization constructor is callable."""
if isinstance(normalization_ctor, six.string_types):
normalization_ctor = util.parse_string_to_constructor(normalization_ctor)
if normalization_ctor is not None and not callable(normalization_ctor):
raise ValueError(
"normalization_ctor must be a callable or a string that specifies "
"a callable, got {}.".format(normalization_ctor))
self._normalization_ctor = normalization_ctor
self._normalization_kwargs = normalization_kwargs
def _parse_normalization_kwargs(self, use_batch_norm, batch_norm_config,
normalization_ctor, normalization_kwargs):
"""Sets up normalization, checking old and new flags."""
if use_batch_norm is not None:
# Delete this whole block when deprecation is done.
util.deprecation_warning(
"`use_batch_norm` kwarg is deprecated. Change your code to instead "
"specify `normalization_ctor` and `normalization_kwargs`.")
if not use_batch_norm:
# Explicitly set to False - normalization_{ctor,kwargs} has precedence.
self._check_and_assign_normalization_members(normalization_ctor,
normalization_kwargs or {})
else: # Explicitly set to true - new kwargs must not be used.
if normalization_ctor is not None or normalization_kwargs is not None:
raise ValueError(
"if use_batch_norm is specified, normalization_ctor and "
"normalization_kwargs must not be.")
self._check_and_assign_normalization_members(batch_norm.BatchNorm,
batch_norm_config or {})
else:
# Old kwargs not set, this block will remain after removing old kwarg.
self._check_and_assign_normalization_members(normalization_ctor,
normalization_kwargs or {})
def _instantiate_layers(self):
"""Instantiates all the convolutional modules used in the network."""
# Here we are entering the module's variable scope to name our submodules
# correctly (not to create variables). As such it's safe to not check
# whether we're in the same graph. This is important if we're constructing
# the module in one graph and connecting it in another (e.g. with `defun`
# the module is created in some default graph, and connected to a capturing
# graph in order to turn it into a graph function).
with self._enter_variable_scope(check_same_graph=False):
self._layers = tuple(conv.Conv2D(name="conv_2d_{}".format(i), # pylint: disable=g-complex-comprehension
output_channels=self._output_channels[i],
kernel_shape=self._kernel_shapes[i],
stride=self._strides[i],
rate=self._rates[i],
padding=self._paddings[i],
use_bias=self._use_bias[i],
initializers=self._initializers,
partitioners=self._partitioners,
regularizers=self._regularizers,
data_format=self._data_format)
for i in xrange(self._num_layers))
def _build(self, inputs, **normalization_build_kwargs):
"""Assembles the `ConvNet2D` and connects it to the graph.
Args:
inputs: A 4D Tensor of shape `[batch_size, input_height, input_width,
input_channels]`.
**normalization_build_kwargs: kwargs passed to the normalization module
at _build time.
Returns:
A 4D Tensor of shape `[batch_size, output_height, output_width,
output_channels[-1]]`.
Raises:
ValueError: If `is_training` is not explicitly specified when using
batch normalization.
"""
if (self._normalization_ctor in {batch_norm.BatchNorm,
batch_norm_v2.BatchNormV2} and
"is_training" not in normalization_build_kwargs):
raise ValueError("Boolean is_training flag must be explicitly specified "
"when using batch normalization.")
self._input_shape = tuple(inputs.get_shape().as_list())
net = inputs
final_index = len(self._layers) - 1
for i, layer in enumerate(self._layers):
net = layer(net)
if i != final_index or self._normalize_final:
if self._normalization_ctor is not None:
# The name 'batch_norm' is used even if something else like
# LayerNorm is being used. This is to avoid breaking old checkpoints.
normalizer = self._normalization_ctor(
name="batch_norm_{}".format(i),
**self._normalization_kwargs)
net = normalizer(
net, **util.remove_unsupported_kwargs(
normalizer, normalization_build_kwargs))
else:
if normalization_build_kwargs:
tf.logging.warning(
"No normalization configured, but extra kwargs "
"provided: {}".format(normalization_build_kwargs))
if i != final_index or self._activate_final:
net = self._activation(net)
return net
@property
def layers(self):
"""Returns a tuple containing the convolutional layers of the network."""
return self._layers
@property
def initializers(self):
return self._initializers
@property
def partitioners(self):
return self._partitioners
@property
def regularizers(self):
return self._regularizers
@property
def strides(self):
return self._strides
@property
def paddings(self):
return self._paddings
@property
def rates(self):
return self._rates
@property
def kernel_shapes(self):
return self._kernel_shapes
@property
def output_channels(self):
return tuple([l() if callable(l) else l for l in self._output_channels])
@property
def use_bias(self):
return self._use_bias
@property
def use_batch_norm(self):
util.deprecation_warning(
"The `.use_batch_norm` property is deprecated. Check "
"`.normalization_ctor` instead.")
return self._normalization_ctor == batch_norm.BatchNorm
@property
def batch_norm_config(self):
util.deprecation_warning(
"The `.batch_norm_config` property is deprecated. Check "
"`.normalization_kwargs` instead.")
return self._normalization_kwargs
@property
def normalization_ctor(self):
return self._normalization_ctor
@property
def normalization_kwargs(self):
return self._normalization_kwargs
@property
def normalize_final(self):
return self._normalize_final
@property
def activation(self):
return self._activation
@property
def activate_final(self):
return self._activate_final
# Implements Transposable interface.
@property
def input_shape(self):
"""Returns shape of input `Tensor` passed at last call to `build`."""
self._ensure_is_connected()
return self._input_shape
def _transpose(self,
transpose_constructor,
name=None,
output_channels=None,
kernel_shapes=None,
strides=None,
paddings=None,
activation=None,
activate_final=None,
normalization_ctor=None,
normalization_kwargs=None,
normalize_final=None,
initializers=None,
partitioners=None,
regularizers=None,
use_bias=None,
data_format=None):
"""Returns transposed version of this network.
Args:
transpose_constructor: A method that creates an instance of the transposed
network type. The method must accept the same kwargs as this methods
with the exception of the `transpose_constructor` argument.
name: Optional string specifying the name of the transposed module. The
default name is constructed by appending "_transpose"
to `self.module_name`.
output_channels: Optional iterable of numbers of output channels.
kernel_shapes: Optional iterable of kernel sizes. The default value is
constructed by reversing `self.kernel_shapes`.
strides: Optional iterable of kernel strides. The default value is
constructed by reversing `self.strides`.
paddings: Optional iterable of padding options, either `snt.SAME` or
`snt.VALID`; The default value is constructed by reversing
`self.paddings`.
activation: Optional activation op. Default value is `self.activation`.
activate_final: Optional boolean determining if the activation and batch
normalization, if turned on, are applied to the final layer.
normalization_ctor: Constructor to return a callable which will perform
normalization at each layer. Defaults to None / no normalization.
Examples of what could go here: `snt.BatchNormV2`, `snt.LayerNorm`. If
a string is provided, importlib is used to convert the string to a
callable, so either `snt.LayerNorm` or `"snt.LayerNorm"` can be
provided.
normalization_kwargs: kwargs to be provided to `normalization_ctor` when
it is called.
normalize_final: Whether to apply normalization after the final conv
layer. Default is to take the value of activate_final.
initializers: Optional dict containing ops to initialize the filters of
the whole network (with key 'w') or biases (with key 'b'). The default
value is `self.initializers`.
partitioners: Optional dict containing partitioners to partition
weights (with key 'w') or biases (with key 'b'). The default value is
`self.partitioners`.
regularizers: Optional dict containing regularizers for the filters of the
whole network (with key 'w') or biases (with key 'b'). The default is
`self.regularizers`.
use_bias: Optional boolean or iterable of booleans determining whether to
include bias parameters in the convolutional layers. Default
is constructed by reversing `self.use_bias`.
data_format: Optional string, one of "NCHW" or "NHWC". Specifies whether
the channel dimension of the input and output is the last dimension.
Default is `self._data_format`.
Returns:
Matching transposed module.
Raises:
ValueError: If output_channels is specified and its length does not match
the number of layers.
"""
if data_format is None:
data_format = self._data_format
if output_channels is None:
output_channels = []
channel_dim = -1 if data_format == DATA_FORMAT_NHWC else 1
for layer in reversed(self._layers):
output_channels.append(lambda l=layer: l.input_shape[channel_dim])
elif len(output_channels) != len(self._layers):
# Note that we only have to do this check for the output channels. Any
# other inconsistencies will be picked up by ConvNet2D.__init__.
raise ValueError("Iterable output_channels length must match the "
"number of layers ({}), but is {} instead.".format(
len(self._layers), len(output_channels)))
if kernel_shapes is None:
kernel_shapes = reversed(self.kernel_shapes)
if strides is None:
strides = reversed(self.strides)
if paddings is None:
paddings = reversed(self.paddings)
if activation is None:
activation = self.activation
if activate_final is None:
activate_final = self.activate_final
if normalization_ctor is None:
normalization_ctor = self.normalization_ctor
if normalization_kwargs is None:
normalization_kwargs = self.normalization_kwargs
if normalize_final is None:
normalize_final = self.normalize_final
if initializers is None:
initializers = self.initializers
if partitioners is None:
partitioners = self.partitioners
if regularizers is None:
regularizers = self.regularizers
if use_bias is None:
use_bias = reversed(self.use_bias)
if name is None:
name = self.module_name + "_transpose"
return transpose_constructor(
output_channels=output_channels,
kernel_shapes=kernel_shapes,
strides=strides,
paddings=paddings,
activation=activation,
activate_final=activate_final,
normalization_ctor=normalization_ctor,
normalization_kwargs=normalization_kwargs,
normalize_final=normalize_final,
initializers=initializers,
partitioners=partitioners,
regularizers=regularizers,
use_bias=use_bias,
data_format=data_format,
name=name)
# Implements Transposable interface.
def transpose(self,
name=None,
output_channels=None,
kernel_shapes=None,
strides=None,
paddings=None,
activation=None,
activate_final=None,
normalization_ctor=None,
normalization_kwargs=None,
normalize_final=None,
initializers=None,
partitioners=None,
regularizers=None,
use_batch_norm=None,
use_bias=None,
batch_norm_config=None,
data_format=None,
custom_getter=None):
"""Returns transposed version of this network.
Args:
name: Optional string specifying the name of the transposed module. The
default name is constructed by appending "_transpose"
to `self.module_name`.
output_channels: Optional iterable of numbers of output channels.
kernel_shapes: Optional iterable of kernel sizes. The default value is
constructed by reversing `self.kernel_shapes`.
strides: Optional iterable of kernel strides. The default value is
constructed by reversing `self.strides`.
paddings: Optional iterable of padding options, either `snt.SAME` or
`snt.VALID`; The default value is constructed by reversing
`self.paddings`.
activation: Optional activation op. Default value is `self.activation`.
activate_final: Optional boolean determining if the activation and batch
normalization, if turned on, are applied to the final layer.
normalization_ctor: Constructor to return a callable which will perform
normalization at each layer. Defaults to None / no normalization.
Examples of what could go here: `snt.BatchNormV2`, `snt.LayerNorm`. If
a string is provided, importlib is used to convert the string to a
callable, so either `snt.LayerNorm` or `"snt.LayerNorm"` can be
provided.
normalization_kwargs: kwargs to be provided to `normalization_ctor` when
it is called.
normalize_final: Whether to apply normalization after the final conv
layer. Default is to take the value of activate_final.
initializers: Optional dict containing ops to initialize the filters of
the whole network (with key 'w') or biases (with key 'b'). The default
value is `self.initializers`.
partitioners: Optional dict containing partitioners to partition
weights (with key 'w') or biases (with key 'b'). The default value is
`self.partitioners`.
regularizers: Optional dict containing regularizers for the filters of the
whole network (with key 'w') or biases (with key 'b'). The default is
`self.regularizers`.
use_batch_norm: Optional boolean determining if batch normalization is
applied after convolution. The default value is `self.use_batch_norm`.
use_bias: Optional boolean or iterable of booleans determining whether to
include bias parameters in the convolutional layers. Default
is constructed by reversing `self.use_bias`.
batch_norm_config: Optional mapping of additional configuration for the
`snt.BatchNorm` modules. Default is `self.batch_norm_config`.
data_format: Optional string, one of "NCHW" or "NHWC". Specifies whether
the channel dimension of the input and output is the last dimension.
Default is `self._data_format`.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the
`tf.get_variable` documentation for information about the
custom_getter API.
Returns:
Matching `ConvNet2DTranspose` module.
Raises:
ValueError: If output_channels is specified and its length does not match
the number of layers.
ValueError: If the given data_format is not a supported format ("NHWC" or
"NCHW").
NotImplementedError: If the convolutions are dilated.
"""
for rate in self._rates:
if rate != 1:
raise NotImplementedError("Transpose dilated convolutions "
"are not supported")
output_shapes = []
if data_format is None:
data_format = self._data_format
if data_format == DATA_FORMAT_NHWC:
start_dim, end_dim = 1, -1
elif data_format == DATA_FORMAT_NCHW:
start_dim, end_dim = 2, 4
else:
raise ValueError("Invalid data_format {:s}. Allowed formats "
"{}".format(data_format, SUPPORTED_2D_DATA_FORMATS))
if custom_getter is None and self._custom_getter is not None:
tf.logging.warning(
"This convnet was constructed with a custom getter, but the "
"`transpose` method was not given any. The transposed ConvNet will "
"not be using any custom_getter.")
for layer in reversed(self._layers):
output_shapes.append(lambda l=layer: l.input_shape[start_dim:end_dim])
transpose_constructor = functools.partial(ConvNet2DTranspose,
output_shapes=output_shapes,
custom_getter=custom_getter)
return self._transpose(
transpose_constructor=transpose_constructor,
name=name,
output_channels=output_channels,
kernel_shapes=kernel_shapes,
strides=strides,
paddings=paddings,
activation=activation,
activate_final=activate_final,
normalization_ctor=normalization_ctor,
normalization_kwargs=normalization_kwargs,
normalize_final=normalize_final,
initializers=initializers,
partitioners=partitioners,
regularizers=regularizers,
use_bias=use_bias,
data_format=data_format)
class ConvNet2DTranspose(ConvNet2D):
"""A 2D Transpose-Convolutional Network module."""
def __init__(self,
output_channels,
output_shapes,
kernel_shapes,
strides,
paddings,
activation=tf.nn.relu,
activate_final=False,
normalization_ctor=None,
normalization_kwargs=None,
normalize_final=None,
initializers=None,
partitioners=None,
regularizers=None,
use_batch_norm=False,
use_bias=True,
batch_norm_config=None,
data_format=DATA_FORMAT_NHWC,
custom_getter=None,
name="conv_net_2d_transpose"):
"""Constructs a `ConvNetTranspose2D` module.
`output_{shapes,channels}` can be defined either as iterable of
{iterables,integers} or via a callable. In the latter case, since the
function invocation is deferred to graph construction time, the user
must only ensure that entries can be called returning meaningful values when
build is called. Each entry in the iterable defines properties in the
corresponding convolutional layer.
By default, neither batch normalization nor activation are applied to the
output of the final layer.
Args:
output_channels: Iterable of numbers of output channels.
output_shapes: Iterable of output shapes as defined in
`conv.conv2DTranpose`; if the iterable contains one element only, the
same shape is used in each layer of the network.
kernel_shapes: Iterable of kernel sizes as defined in `conv.Conv2D`; if
the list contains one element only, the same kernel shape is used in
each layer of the network.
strides: Iterable of kernel strides as defined in `conv.Conv2D`; if the
list contains one element only, the same stride is used in each layer of
the network.
paddings: Iterable of padding options, either `snt.SAME` or
`snt.VALID`; if the Iterable contains one element only, the same padding
is used in each layer of the network.
activation: An activation op.
activate_final: Boolean determining if the activation and batch
normalization, if turned on, are applied to the final layer.
normalization_ctor: Constructor to return a callable which will perform
normalization at each layer. Defaults to None / no normalization.
Examples of what could go here: `snt.BatchNormV2`, `snt.LayerNorm`. If
a string is provided, importlib is used to convert the string to a
callable, so either `snt.LayerNorm` or `"snt.LayerNorm"` can be
provided.
normalization_kwargs: kwargs to be provided to `normalization_ctor` when
it is called.
normalize_final: Whether to apply normalization after the final conv
layer. Default is to take the value of activate_final.
initializers: Optional dict containing ops to initialize the filters of
the whole network (with key 'w') or biases (with key 'b').
partitioners: Optional dict containing partitioners to partition
weights (with key 'w') or biases (with key 'b'). As a default, no
partitioners are used.
regularizers: Optional dict containing regularizers for the filters of the
whole network (with key 'w') or biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes a
single `Tensor` as an input and returns a scalar `Tensor` output, e.g.
the L1 and L2 regularizers in `tf.contrib.layers`.
use_batch_norm: Boolean determining if batch normalization is applied
after convolution.
use_bias: Boolean or iterable of booleans determining whether to include
bias parameters in the convolutional layers. Default `True`.
batch_norm_config: Optional mapping of additional configuration for the
`snt.BatchNorm` modules.
data_format: A string, one of "NCHW" or "NHWC". Specifies whether the
channel dimension of the input and output is the last dimension
(default, "NHWC"), or the second dimension ("NCHW").
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the
`tf.get_variable` documentation for information about the
custom_getter API.
name: Name of the module.
Raises:
TypeError: If `output_channels` is not iterable; or if `output_shapes`
is not iterable; or if `kernel_shapes` is not iterable; or if `strides`
is not iterable; or if `paddings` is not iterable; or if `activation` is
not callable.
ValueError: If `output_channels` is empty; or if `kernel_shapes` has not
length 1 or `len(output_channels)`; or if `strides` has not
length 1 or `len(output_channels)`; or if `paddings` has not
length 1 or `len(output_channels)`.
ValueError: If the given data_format is not a supported format ("NHWC" or
"NCHW").
ValueError: If `normalization_ctor` is provided but cannot be converted
to a callable.
KeyError: If `initializers`, `partitioners` or `regularizers` contain any
keys other than 'w' or 'b'.
TypeError: If any of the given initializers, partitioners or regularizers
are not callable.
"""
if not isinstance(output_channels, collections.Iterable):
raise TypeError("output_channels must be iterable")
output_channels = tuple(output_channels)
num_layers = len(output_channels)
if not isinstance(output_shapes, collections.Iterable):
raise TypeError("output_shapes must be iterable")
output_shapes = tuple(output_shapes)
self._output_shapes = _replicate_elements(output_shapes, num_layers)
if len(self._output_shapes) != num_layers:
raise ValueError(
"output_shapes must be of length 1 or len(output_channels)")
super(ConvNet2DTranspose, self).__init__(
output_channels,
kernel_shapes,
strides,
paddings,
activation=activation,
activate_final=activate_final,
normalization_ctor=normalization_ctor,
normalization_kwargs=normalization_kwargs,
normalize_final=normalize_final,
initializers=initializers,
partitioners=partitioners,
regularizers=regularizers,
use_batch_norm=use_batch_norm,
use_bias=use_bias,
batch_norm_config=batch_norm_config,
data_format=data_format,
custom_getter=custom_getter,
name=name)
def _instantiate_layers(self):
"""Instantiates all the convolutional modules used in the network."""
# See `ConvNet2D._instantiate_layers` for more information about why we are
# using `check_same_graph=False`.
with self._enter_variable_scope(check_same_graph=False):
self._layers = tuple(
conv.Conv2DTranspose(name="conv_2d_transpose_{}".format(i), # pylint: disable=g-complex-comprehension
output_channels=self._output_channels[i],
output_shape=self._output_shapes[i],
kernel_shape=self._kernel_shapes[i],
stride=self._strides[i],
padding=self._paddings[i],
initializers=self._initializers,
partitioners=self._partitioners,
regularizers=self._regularizers,
data_format=self._data_format,
use_bias=self._use_bias[i])
for i in xrange(self._num_layers))
@property
def output_shapes(self):
return tuple([l() if callable(l) else l for l in self._output_shapes])
# Implements Transposable interface.
def transpose(self,
name=None,
output_channels=None,
kernel_shapes=None,
strides=None,
paddings=None,
activation=None,
activate_final=None,
normalization_ctor=None,
normalization_kwargs=None,
normalize_final=None,
initializers=None,
partitioners=None,
regularizers=None,
use_batch_norm=None,
use_bias=None,
batch_norm_config=None,
data_format=None,
custom_getter=None):
"""Returns transposed version of this network.
Args:
name: Optional string specifying the name of the transposed module. The
default name is constructed by appending "_transpose"
to `self.module_name`.
output_channels: Optional iterable of numbers of output channels.
kernel_shapes: Optional iterable of kernel sizes. The default value is
constructed by reversing `self.kernel_shapes`.
strides: Optional iterable of kernel strides. The default value is
constructed by reversing `self.strides`.
paddings: Optional iterable of padding options, either `snt.SAME` or
`snt.VALID`; The default value is constructed by reversing
`self.paddings`.
activation: Optional activation op. Default value is `self.activation`.
activate_final: Optional boolean determining if the activation and batch
normalization, if turned on, are applied to the final layer.
normalization_ctor: Constructor to return a callable which will perform
normalization at each layer. Defaults to None / no normalization.
Examples of what could go here: `snt.BatchNormV2`, `snt.LayerNorm`. If
a string is provided, importlib is used to convert the string to a
callable, so either `snt.LayerNorm` or `"snt.LayerNorm"` can be
provided.
normalization_kwargs: kwargs to be provided to `normalization_ctor` when
it is called.
normalize_final: Whether to apply normalization after the final conv
layer. Default is to take the value of activate_final.
initializers: Optional dict containing ops to initialize the filters of
the whole network (with key 'w') or biases (with key 'b'). The default
value is `self.initializers`.
partitioners: Optional dict containing partitioners to partition
weights (with key 'w') or biases (with key 'b'). The default value is
`self.partitioners`.
regularizers: Optional dict containing regularizers for the filters of the
whole network (with key 'w') or biases (with key 'b'). The default is
`self.regularizers`.
use_batch_norm: Optional boolean determining if batch normalization is
applied after convolution. The default value is `self.use_batch_norm`.
use_bias: Optional boolean or iterable of booleans determining whether to
include bias parameters in the convolutional layers. Default
is constructed by reversing `self.use_bias`.
batch_norm_config: Optional mapping of additional configuration for the
`snt.BatchNorm` modules. Default is `self.batch_norm_config`.
data_format: Optional string, one of "NCHW" or "NHWC". Specifies whether
the channel dimension of the input and output is the last dimension.
Default is `self._data_format`.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the
`tf.get_variable` documentation for information about the
custom_getter API.
Returns:
Matching `ConvNet2D` module.
Raises:
ValueError: If output_channels is specified and its length does not match
the number of layers.
"""
if use_batch_norm is not None:
if normalization_ctor is not None or normalization_kwargs is not None:
raise ValueError(
"If use_batch_norm is specified, normalization_ctor and "
"normalization_kwargs must not be.")
if use_batch_norm:
normalization_ctor = batch_norm.BatchNorm
else:
normalization_ctor = None
normalization_kwargs = batch_norm_config
if custom_getter is None and self._custom_getter is not None:
tf.logging.warning(
"This convnet was constructed with a custom getter, but the "
"`transpose` method was not given any. The transposed ConvNet will "
"not be using any custom_getter.")
transpose_constructor = functools.partial(ConvNet2D,
custom_getter=custom_getter)
return self._transpose(
transpose_constructor=transpose_constructor,
name=name,
output_channels=output_channels,
kernel_shapes=kernel_shapes,
strides=strides,
paddings=paddings,
activation=activation,
activate_final=activate_final,
normalization_ctor=normalization_ctor,
normalization_kwargs=normalization_kwargs,
normalize_final=normalize_final,
initializers=initializers,
partitioners=partitioners,
regularizers=regularizers,
use_bias=use_bias,
data_format=data_format)
| sonnet-1 | sonnet/python/modules/nets/convnet.py |
# Copyright 2018 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for sonnet.python.modules.nets.vqvae."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import sonnet as snt
import tensorflow.compat.v1 as tf
class VqvaeTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(snt.nets.VectorQuantizer,
{'embedding_dim': 4, 'num_embeddings': 8,
'commitment_cost': 0.25}),
(snt.nets.VectorQuantizerEMA,
{'embedding_dim': 6, 'num_embeddings': 13,
'commitment_cost': 0.5, 'decay': 0.1})
)
def testConstruct(self, constructor, kwargs):
vqvae = constructor(**kwargs)
# Batch of input vectors to quantize
inputs_np = np.random.randn(16, kwargs['embedding_dim']).astype(np.float32)
inputs = tf.constant(inputs_np)
# Set is_training to False, otherwise for the EMA case just evaluating the
# forward pass will change the embeddings, meaning that some of our computed
# closest embeddings will be incorrect.
vq_output = vqvae(inputs, is_training=False)
# Output shape is correct
self.assertEqual(vq_output['quantize'].shape, inputs.shape)
init_op = tf.global_variables_initializer()
with self.test_session() as session:
session.run(init_op)
vq_output_np, embeddings_np = session.run([vq_output, vqvae.embeddings])
self.assertEqual(embeddings_np.shape, (kwargs['embedding_dim'],
kwargs['num_embeddings']))
# Check that each input was assigned to the embedding it is closest to.
distances = ((inputs_np ** 2).sum(axis=1, keepdims=True)
- 2 * np.dot(inputs_np, embeddings_np)
+ (embeddings_np**2).sum(axis=0, keepdims=True))
closest_index = np.argmax(-distances, axis=1)
self.assertAllEqual(closest_index,
np.argmax(vq_output_np['encodings'], axis=1))
@parameterized.parameters(
(snt.nets.VectorQuantizer,
{'embedding_dim': 4, 'num_embeddings': 8,
'commitment_cost': 0.25}),
(snt.nets.VectorQuantizerEMA,
{'embedding_dim': 6, 'num_embeddings': 13,
'commitment_cost': 0.5, 'decay': 0.1})
)
def testShapeChecking(self, constructor, kwargs):
vqvae = constructor(**kwargs)
wrong_shape_input = np.random.randn(100, kwargs['embedding_dim'] * 2)
with self.assertRaisesRegexp(ValueError, 'Cannot reshape a tensor'):
vqvae(tf.constant(wrong_shape_input.astype(np.float32)),
is_training=False)
@parameterized.parameters(
(snt.nets.VectorQuantizer,
{'embedding_dim': 4, 'num_embeddings': 8,
'commitment_cost': 0.25}),
(snt.nets.VectorQuantizerEMA,
{'embedding_dim': 6, 'num_embeddings': 13,
'commitment_cost': 0.5, 'decay': 0.1})
)
def testNoneBatch(self, constructor, kwargs):
"""Check that vqvae can be built on input with a None batch dimension."""
vqvae = constructor(**kwargs)
inputs = tf.placeholder(tf.float32, (None, 5, 5, kwargs['embedding_dim']))
vqvae(inputs, is_training=False)
def testEmaUpdating(self):
embedding_dim = 6
vqvae = snt.nets.VectorQuantizerEMA(
embedding_dim=embedding_dim, num_embeddings=7,
commitment_cost=0.5, decay=0.1)
batch_size = 16
input_ph = tf.placeholder(shape=[batch_size, embedding_dim],
dtype=tf.float32)
output = vqvae(input_ph, is_training=True)
embeddings = vqvae.embeddings
init_op = tf.global_variables_initializer()
with self.test_session() as session:
session.run(init_op)
# embedding should change every time we put some data through, even though
# we are not passing any gradients through.
prev_w = session.run(embeddings)
for _ in range(10):
session.run(output, {input_ph: np.random.randn(batch_size,
embedding_dim)})
current_w = session.run(embeddings)
self.assertFalse((prev_w == current_w).all())
prev_w = current_w
if __name__ == '__main__':
tf.test.main()
| sonnet-1 | sonnet/python/modules/nets/vqvae_test.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Test sonnet.python.modules.nets.convnet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import itertools
# Dependency imports
from absl.testing import parameterized
import numpy as np
import sonnet as snt
from sonnet.python.modules.conv import _fill_shape as fill_shape
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib.eager.python import tfe as contrib_eager
from tensorflow.python.ops import variables # pylint: disable=g-direct-tensorflow-import
@contrib_eager.run_all_tests_in_graph_and_eager_modes
class SharedConvNets2DTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super(SharedConvNets2DTest, self).setUp()
self.output_channels = [2, 3, 4]
self.kernel_shapes = [[3, 3]]
self.strides = [1]
self.rates = [1, 2, 1]
self.paddings = [snt.SAME]
@parameterized.named_parameters(
("ConvNet2D", snt.nets.ConvNet2D),
("ConvNet2DTranspose", functools.partial(snt.nets.ConvNet2DTranspose,
output_shapes=[[100, 100]])))
def testName(self, module):
unique_name = "unique_name"
with tf.variable_scope("scope"):
net = module(name=unique_name,
output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings)
self.assertEqual(net.scope_name, "scope/" + unique_name)
self.assertEqual(net.module_name, unique_name)
@parameterized.named_parameters(
("ConvNet2D", snt.nets.ConvNet2D),
("ConvNet2DTranspose", functools.partial(snt.nets.ConvNet2DTranspose,
output_shapes=[[100, 100]])))
def testConstructor(self, module):
with self.assertRaisesRegexp(ValueError,
"output_channels must not be empty"):
module(output_channels=[],
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings)
with self.assertRaisesRegexp(ValueError,
"kernel_shapes must be of length 1 or *"):
module(output_channels=self.output_channels,
kernel_shapes=[],
strides=self.strides,
paddings=self.paddings)
with self.assertRaisesRegexp(ValueError,
"kernel_shapes must be of length 1 or *"):
module(output_channels=self.output_channels,
kernel_shapes=[1, 2],
strides=self.strides,
paddings=self.paddings)
with self.assertRaisesRegexp(ValueError,
"strides must be of length 1 or *"):
module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=[],
paddings=self.paddings)
with self.assertRaisesRegexp(ValueError,
"strides must be of length 1 or *"):
module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=[1, 1],
paddings=self.paddings)
with self.assertRaisesRegexp(ValueError,
"paddings must be of length 1 or *"):
module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.paddings,
paddings=[])
with self.assertRaisesRegexp(ValueError,
"paddings must be of length 1 or *"):
module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=[snt.SAME, snt.SAME])
with self.assertRaisesRegexp(KeyError,
"Invalid initializer keys.*"):
module(
output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
initializers={"not_w": tf.truncated_normal_initializer(stddev=1.0)})
with self.assertRaisesRegexp(TypeError,
"Initializer for 'w' is not a callable "
"function or dictionary"):
module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
initializers={"w": tf.zeros([1, 2, 3])})
with self.assertRaisesRegexp(KeyError,
"Invalid regularizer keys.*"):
module(
output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
regularizers={"not_w": contrib_layers.l1_regularizer(scale=0.5)})
with self.assertRaisesRegexp(TypeError,
"Regularizer for 'w' is not a callable "
"function or dictionary"):
module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
regularizers={"w": tf.zeros([1, 2, 3])})
with self.assertRaisesRegexp(TypeError,
"Input 'activation' must be callable"):
module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
activation="not_a_function")
err = "output_channels must be iterable"
with self.assertRaisesRegexp(TypeError, err):
module(output_channels=42,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings)
err = "kernel_shapes must be iterable"
with self.assertRaisesRegexp(TypeError, err):
module(output_channels=self.output_channels,
kernel_shapes=None,
strides=self.strides,
paddings=self.paddings)
err = "strides must be iterable"
with self.assertRaisesRegexp(TypeError, err):
module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=True,
paddings=self.paddings)
err = "paddings must be iterable"
with self.assertRaisesRegexp(TypeError, err):
module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=lambda x: x + 42)
err = "use_bias must be either a bool or an iterable"
with self.assertRaisesRegexp(TypeError, err):
module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
use_bias=2)
err = "use_batch_norm must be a boolean"
with self.assertRaisesRegexp(TypeError, err):
module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
use_batch_norm=[True])
err = "Invalid data_format"
# Also checks that the error works with non-string types
for data_format in ["NHCW", 3]:
with self.assertRaisesRegexp(ValueError, err):
module(
output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
data_format=data_format)
@parameterized.named_parameters(
("ConvNet2D", snt.nets.ConvNet2D),
("ConvNet2DTranspose",
functools.partial(snt.nets.ConvNet2DTranspose,
output_shapes=[[100, 100]])))
def testBatchNormBuildFlag(self, module):
model = module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
use_batch_norm=True)
self.assertTrue(model.use_batch_norm)
input_to_net = tf.random_normal(dtype=tf.float32, shape=(1, 100, 100, 3))
# Check that an error is raised if we don't specify the is_training flag
err = "is_training flag must be explicitly specified"
with self.assertRaisesRegexp(ValueError, err):
model(input_to_net)
@parameterized.named_parameters(
("ConvNet2D", snt.nets.ConvNet2D),
("ConvNet2DTranspose",
functools.partial(snt.nets.ConvNet2DTranspose,
output_shapes=[[100, 100]])))
def testBatchNorm(self, module):
model = module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
use_batch_norm=True)
self.assertTrue(model.use_batch_norm)
input_to_net = tf.random_normal(dtype=tf.float32, shape=(1, 100, 100, 3))
# Check Tensorflow flags work
is_training = tf.constant(False)
test_local_stats = tf.constant(False)
model(input_to_net,
is_training=is_training,
test_local_stats=test_local_stats)
# Check Python is_training flag works
model(input_to_net, is_training=False, test_local_stats=False)
model_variables = model.get_variables()
self.assertLen(model_variables, len(self.output_channels) * 3 - 1)
# Check that the appropriate moving statistics variables have been created.
self.assertTrue(
any("moving_variance" in var.name
for var in tf.global_variables()))
self.assertTrue(
any("moving_mean" in var.name
for var in tf.global_variables()))
@parameterized.named_parameters(
("ConvNet2D", snt.nets.ConvNet2D),
("ConvNet2DTranspose", functools.partial(snt.nets.ConvNet2DTranspose,
output_shapes=[[100, 100]])))
def testBatchNormConfig(self, module):
batch_norm_config = {
"scale": True,
}
model = module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
use_batch_norm=True,
batch_norm_config=batch_norm_config)
input_to_net = tf.random_normal(dtype=tf.float32, shape=(1, 100, 100, 3))
model(input_to_net, is_training=True)
model_variables = model.get_variables()
self.assertLen(model_variables, len(self.output_channels) * 4 - 2)
@parameterized.named_parameters(
("ConvNet2D", snt.nets.ConvNet2D),
("ConvNet2DTranspose", functools.partial(snt.nets.ConvNet2DTranspose,
output_shapes=[[100, 100]])))
def testNoBias(self, module):
model = module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
use_bias=False)
self.assertEqual(model.use_bias, (False,) * len(self.output_channels))
input_to_net = tf.random_normal(dtype=tf.float32, shape=(1, 100, 100, 3))
model(input_to_net)
model_variables = model.get_variables()
self.assertLen(model_variables, len(self.output_channels))
@parameterized.named_parameters(
("ConvNet2D", snt.nets.ConvNet2D),
("ConvNet2DTranspose", functools.partial(snt.nets.ConvNet2DTranspose,
output_shapes=[[100, 100]])))
def testNoBiasIterable(self, module):
use_bias = (True,) * (len(self.output_channels) - 1) + (False,)
model = module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
use_bias=use_bias)
actual_use_biases = tuple(layer.has_bias for layer in model.layers)
self.assertEqual(model.use_bias, actual_use_biases)
self.assertEqual(use_bias, actual_use_biases)
model_transpose = model.transpose()
actual_use_biases = tuple(layer.has_bias
for layer in model_transpose.layers)
self.assertEqual(model_transpose.use_bias, actual_use_biases)
self.assertEqual(tuple(reversed(use_bias)), actual_use_biases)
@parameterized.named_parameters(("ConvNet2DNoBias", False, False),
("ConvNet2DBias", False, True),
("ConvNet2DTransposeNoBias", True, False),
("ConvNet2DTransposeBias", True, True))
def testRegularizersInRegularizationLosses(self, transpose, use_bias):
if transpose:
module = functools.partial(snt.nets.ConvNet2DTranspose,
output_shapes=[[100, 100]])
else:
module = snt.nets.ConvNet2D
if use_bias:
regularizers = {
"w": contrib_layers.l1_regularizer(scale=0.5),
"b": contrib_layers.l2_regularizer(scale=0.5)
}
else:
regularizers = {"w": contrib_layers.l1_regularizer(scale=0.5)}
model = module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
use_bias=use_bias,
regularizers=regularizers)
input_to_net = tf.random_normal(dtype=tf.float32, shape=(1, 100, 100, 3))
model(input_to_net)
regularizers = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
expected_num_regularizers = 3 * (2 if use_bias else 1)
self.assertLen(regularizers, expected_num_regularizers)
if not tf.executing_eagerly():
self.assertRegexpMatches(regularizers[0].name, ".*l1_regularizer.*")
if use_bias:
self.assertRegexpMatches(regularizers[1].name, ".*l2_regularizer.*")
@parameterized.named_parameters(
("ConvNet2D", snt.nets.ConvNet2D, False),
("ConvNet2DFinal", snt.nets.ConvNet2D, True),
("ConvNet2DTranspose",
functools.partial(snt.nets.ConvNet2DTranspose,
output_shapes=[[100, 100]]),
False),
("ConvNet2DTransposeFinal",
functools.partial(snt.nets.ConvNet2DTranspose,
output_shapes=[[100, 100]]),
True))
def testActivateFinal(self, module, activate_final):
model = module(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
activate_final=activate_final,
use_batch_norm=True,
use_bias=False)
self.assertEqual(activate_final, model.activate_final)
input_to_net = tf.random_normal(dtype=tf.float32, shape=(1, 100, 100, 3))
model(input_to_net, is_training=True)
model_variables = model.get_variables()
# Batch norm variable missing for final activation
if activate_final:
self.assertLen(model_variables, len(self.output_channels) * 2)
else:
self.assertLen(model_variables, len(self.output_channels) * 2 - 1)
# Test transpose method's activate_final arg.
transposed_model_activate_final = model.transpose(activate_final=True)
transposed_model_no_activate_final = model.transpose(activate_final=False)
transposed_model_inherit_activate_final = model.transpose()
self.assertEqual(True, transposed_model_activate_final.activate_final)
self.assertEqual(False, transposed_model_no_activate_final.activate_final)
self.assertEqual(model.activate_final,
transposed_model_inherit_activate_final.activate_final)
@parameterized.parameters(
*itertools.product(
[snt.nets.ConvNet2D,
functools.partial(snt.nets.ConvNet2DTranspose,
output_shapes=[[100, 100]])],
["kernel_shapes", "strides", "paddings", "activation", "initializers",
"partitioners", "regularizers", "use_bias", "batch_norm_config"]))
def testTransposeDefaultParameter(self, module, param_name):
"""Tests if .transpose correctly chooses the default parameters.
Args:
module: The conv net class.
param_name: The name of the parameter to test.
"""
# For these parameters, the expected values are their reversed values
expected_reversed = ["kernel_shapes", "strides", "paddings", "use_bias"]
# We have to choose asymmetric parameter values here in order for the test
# to be effective. This is why we don't take the default ones.
model = module(output_channels=[2, 3, 4],
kernel_shapes=[[3, 3], [5, 5], [7, 7]],
strides=[[1, 1], [2, 2], [3, 3]],
paddings=[snt.SAME, snt.SAME, snt.VALID],
use_bias=[True, True, False])
# We don't pass the parameter on to .transpose, None should be the default
transpose_model = model.transpose()
if param_name in expected_reversed:
self.assertEqual(tuple(reversed(getattr(model, param_name))),
getattr(transpose_model, param_name))
else:
self.assertEqual(getattr(model, param_name),
getattr(transpose_model, param_name))
@parameterized.parameters(
*itertools.product(
[snt.nets.ConvNet2D,
functools.partial(snt.nets.ConvNet2DTranspose,
output_shapes=[[100, 100]])],
[("kernel_shapes", [[3, 3], [3, 3], [3, 3]]),
("strides", [[1, 1], [1, 1], [1, 1]]),
("paddings", [snt.SAME, snt.SAME, snt.SAME]),
("activation", tf.nn.tanh),
("initializers", {}),
("partitioners", {}),
("regularizers", {}),
("use_bias", [True, True, True]),
("normalization_ctor", snt.BatchNorm)]))
def testTransposePassThroughParameter(self, module, param_name_and_value):
"""Tests if .transpose correctly passes through the given parameters.
Args:
module: The conv net class.
param_name_and_value: Tuple consisting of the parameter name and value.
"""
param_name, param_value = param_name_and_value
# The given parameter values are all for three-layer networks. Changing
# the default parameters would therefore break this test. Thus, we choose
# fixed/independent parameters.
model = module(output_channels=[2, 3, 4],
kernel_shapes=[[3, 3], [5, 5], [7, 7]],
strides=[[1, 1], [2, 2], [3, 3]],
paddings=[snt.SAME, snt.SAME, snt.VALID],
use_bias=[True, True, False])
transpose_model = model.transpose(**{param_name: param_value})
if isinstance(param_value, collections.Mapping):
self.assertDictEqual(param_value, getattr(transpose_model, param_name))
elif isinstance(param_value, collections.Iterable):
self.assertCountEqual(param_value, getattr(transpose_model, param_name))
else:
self.assertEqual(param_value, getattr(transpose_model, param_name))
@parameterized.named_parameters(
("ConvNet2DNHWC", snt.nets.ConvNet2D, "NHWC"),
("ConvNet2DNCHW", snt.nets.ConvNet2D, "NCHW"),
("ConvNet2DTransposeNHWC", functools.partial(
snt.nets.ConvNet2DTranspose, output_shapes=[[100, 100]]), "NHWC"),
("ConvNet2DTransposeNCHW", functools.partial(
snt.nets.ConvNet2DTranspose, output_shapes=[[100, 100]]), "NCHW"),)
def testDataFormat(self, module, data_format):
net = module(
output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
data_format=data_format)
input_height, input_width, input_channels = 100, 100, 3
batch_size = 10
final_channel = self.output_channels[-1]
if data_format == "NHWC":
input_shape = [batch_size, input_height, input_width, input_channels]
expected_output_shape = [
batch_size, input_height, input_width, final_channel
]
else:
input_shape = [batch_size, input_channels, input_height, input_width]
expected_output_shape = [
batch_size, final_channel, input_height, input_width
]
input_to_net = tf.random_normal(dtype=tf.float32, shape=input_shape)
if tf.executing_eagerly() and data_format == "NCHW":
if module == snt.nets.ConvNet2D:
expected_exception = tf.errors.UnimplementedError
else:
expected_exception = tf.errors.InvalidArgumentError
with self.assertRaisesRegexp(expected_exception, "only supports.*NHWC"):
output = net(input_to_net)
else:
output = net(input_to_net)
self.assertEqual(output.get_shape().as_list(), expected_output_shape)
@parameterized.parameters(
# Regular Layer Normalization
{"conv_ctor": snt.nets.ConvNet2D,
"norm_ctor": "snt.LayerNorm",
"norm_kwargs": {"scale": False, "offset": False}},
{"conv_ctor": functools.partial(
snt.nets.ConvNet2DTranspose, output_shapes=[[48, 64]]),
"norm_ctor": snt.LayerNorm,
"norm_kwargs": {"scale": False, "offset": False}},
# Instance normalization: sum over spatial dimensions but not channels.
{"conv_ctor": snt.nets.ConvNet2D,
"norm_ctor": "LayerNorm",
"norm_kwargs": {"scale": False, "offset": False, "axis": [1, 2]}},
{"conv_ctor": functools.partial(
snt.nets.ConvNet2DTranspose, output_shapes=[[48, 64]]),
"norm_ctor": snt.LayerNorm,
"norm_kwargs": {"scale": False, "offset": False, "axis": [1, 2]}},
)
def testNormalizations(self, conv_ctor, norm_ctor, norm_kwargs):
if tf.executing_eagerly():
self.skipTest("Cannot test normalization correctness in Eager.")
module = conv_ctor(
output_channels=[16, 16],
kernel_shapes=(3,),
strides=(1,),
paddings=("SAME",),
normalization_ctor=norm_ctor,
normalization_kwargs=norm_kwargs,
normalize_final=True,
activate_final=False) # No final activation, that would un-normalize.
inputs = tf.random_uniform([16, 48, 64, 3])
output = module(inputs)
with tf.train.SingularMonitoredSession() as session:
output_np = session.run(output)
# Convert the output into something where all the dimensions that should be
# jointly normalized are combined to be on axis=1.
if "axis" in norm_kwargs and norm_kwargs["axis"] == [1, 2]:
# Check for instance normalization - combine spatial dimensions.
output_np = np.reshape(output_np, [16, -1, 3])
else:
# Check for layer normalization - combine all non-batch dimensions.
output_np = np.reshape(output_np, [16, -1])
mean = np.mean(output_np, axis=1)
std_dev = np.std(output_np, axis=1)
# High tolerance - summing across big images, this normalization is fairly
# approximate.
self.assertAllClose(mean, np.zeros_like(mean), atol=3e-2)
self.assertAllClose(std_dev, np.ones_like(std_dev), atol=2e-2)
@parameterized.parameters(
(snt.nets.ConvNet2D,
{"use_batch_norm": True, "normalization_ctor": snt.LayerNorm},
ValueError, "if use_batch_norm is specified"),
(functools.partial(snt.nets.ConvNet2DTranspose, output_shapes=[[48, 64]]),
{"use_batch_norm": True, "normalization_ctor": "LayerNorm"},
ValueError, "if use_batch_norm is specified"),)
def testNormalizationBadConfig(self, conv_ctor, conv_kwargs,
error_type, error_message):
"""Old and new normalization flags should not be combined."""
with self.assertRaisesRegexp(error_type, error_message):
conv_ctor(
output_channels=[16, 16],
kernel_shapes=(3,),
strides=(1,),
paddings=("SAME",),
**conv_kwargs)
@contrib_eager.run_all_tests_in_graph_and_eager_modes
class ConvNet2DTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super(ConvNet2DTest, self).setUp()
self.output_channels = [2, 3, 4]
self.kernel_shapes = [[3, 3]]
self.strides = [1]
self.rates = [2]
self.paddings = [snt.SAME]
def testConstructor(self):
net = snt.nets.ConvNet2D(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
rates=self.rates,
strides=self.strides,
paddings=self.paddings)
self.assertLen(net.layers, len(self.output_channels))
for i, layer in enumerate(net.layers):
self.assertEqual(layer.output_channels, self.output_channels[i])
self.assertEqual(layer.stride,
(1,) + fill_shape(self.strides[0], 2) + (1,))
self.assertEqual(layer.kernel_shape, fill_shape(self.kernel_shapes[0], 2))
self.assertEqual(layer.padding, self.paddings[0])
self.assertEqual(layer.rate, (self.rates[0], self.rates[0]))
self.assertEqual(layer.output_channels, net.output_channels[i])
self.assertEqual(layer.stride,
(1,) + fill_shape(net.strides[i], 2) + (1,))
self.assertEqual(layer.kernel_shape, fill_shape(net.kernel_shapes[i], 2))
self.assertEqual(layer.padding, net.paddings[i])
def testTranspose(self):
with tf.variable_scope("scope1"):
net = snt.nets.ConvNet2D(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
name="conv_net_2d")
err = "Iterable output_channels length must match the number of layers"
with self.assertRaisesRegexp(ValueError, err):
net.transpose(output_channels=[42] * 18)
with tf.variable_scope("scope2"):
net_transpose = net.transpose()
self.assertEqual("scope1/conv_net_2d", net.scope_name)
self.assertEqual("conv_net_2d", net.module_name)
self.assertEqual("scope2/conv_net_2d_transpose", net_transpose.scope_name)
self.assertEqual("conv_net_2d_transpose", net_transpose.module_name)
input_shape = [10, 100, 100, 3]
input_to_net = tf.random_normal(dtype=tf.float32, shape=input_shape)
# Tests that trying to connect the trasposed network before connecting the
# original nets raises an error. The reason is that the output_shapes and
# output_channels are laziliy evaluated and not yet known.
with self.assertRaisesRegexp(snt.Error,
"Variables in {} not instantiated yet, "
"__call__ the module first.".format(
net.layers[-1].scope_name)):
net_transpose(input_to_net)
net_transpose = net.transpose(name="another_net_transpose")
net_out = net(input_to_net, is_training=True)
self.assertEqual(net.input_shape, tuple(input_shape))
net_transposed_output = net_transpose(net_out)
self.assertEqual(net_transposed_output.get_shape(),
input_to_net.get_shape())
for i in range(len(net.layers)):
self.assertEqual(net_transpose.layers[i].output_shape,
net.layers[-1 - i].input_shape[1:-1])
self.assertEqual(net_transpose.layers[i].output_channels,
net.layers[-1 - i].input_shape[-1])
def testCustomGetterTranspose(self):
"""Tests passing a custom getter to the transpose method."""
conv2d = snt.nets.ConvNet2D(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings)
input_shape = [10, 100, 100, 3]
output_of_conv2d = conv2d(tf.zeros(dtype=tf.float32, shape=input_shape))
# We'll be able to check if the custom_getter was used by checking for
# gradients.
conv2d_transpose = conv2d.transpose(
custom_getter=snt.custom_getters.stop_gradient)
if tf.executing_eagerly():
with tf.GradientTape() as tape:
output_of_transpose = conv2d_transpose(output_of_conv2d)
conv2d_transpose_vars = conv2d_transpose.get_variables()
self.assertTrue(len(conv2d_transpose_vars))
for tensor in tape.gradient(output_of_transpose, conv2d_transpose_vars):
self.assertIsNone(tensor)
else:
output_of_transpose = conv2d_transpose(output_of_conv2d)
conv2d_transpose_vars = conv2d_transpose.get_variables()
self.assertTrue(len(conv2d_transpose_vars))
for tensor in tf.gradients(output_of_transpose, conv2d_transpose_vars):
self.assertIsNone(tensor)
def testNoCustomGetterTranspose(self):
"""Tests not passing a custom getter to the transpose method."""
conv2d = snt.nets.ConvNet2D(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
custom_getter=snt.custom_getters.stop_gradient)
input_shape = [10, 100, 100, 3]
input_to_conv2d = tf.zeros(dtype=tf.float32, shape=input_shape)
if tf.executing_eagerly():
with tf.GradientTape() as tape0:
output_of_conv2d = conv2d(input_to_conv2d)
# Create a transpose without a custom getter
conv2d_transpose = conv2d.transpose()
with tf.GradientTape() as tape1:
output_of_transpose = conv2d_transpose(output_of_conv2d)
conv2d_vars = conv2d.get_variables()
conv2d_grads = tape0.gradient(output_of_conv2d, conv2d_vars)
conv2d_transpose_vars = conv2d_transpose.get_variables()
conv2d_transpose_grads = tape1.gradient(output_of_transpose,
conv2d_transpose_vars)
else:
output_of_conv2d = conv2d(input_to_conv2d)
conv2d_vars = conv2d.get_variables()
conv2d_grads = tf.gradients(output_of_conv2d, conv2d_vars)
# Create a transpose without a custom getter
conv2d_transpose = conv2d.transpose()
output_of_transpose = conv2d_transpose(output_of_conv2d)
conv2d_transpose_vars = conv2d_transpose.get_variables()
conv2d_transpose_grads = tf.gradients(output_of_transpose,
conv2d_transpose_vars)
# Sanity check that the custom getter was indeed used for the conv net.
self.assertTrue(len(conv2d_vars))
for tensor in conv2d_grads:
self.assertIsNone(tensor)
# Check the transpose did not use the custom getter that was passed to the
# original conv net.
self.assertTrue(len(conv2d_transpose_vars))
for tensor in conv2d_transpose_grads:
self.assertIsNotNone(tensor)
def testVariableMap(self):
"""Tests for regressions in variable names."""
use_bias = True
use_batch_norm = True
var_names_w = [
u"conv_net_2d/conv_2d_0/w:0",
u"conv_net_2d/conv_2d_1/w:0",
u"conv_net_2d/conv_2d_2/w:0",
]
var_names_b = [
u"conv_net_2d/conv_2d_0/b:0",
u"conv_net_2d/conv_2d_1/b:0",
u"conv_net_2d/conv_2d_2/b:0",
]
var_names_bn = [
u"conv_net_2d/batch_norm_0/beta:0",
u"conv_net_2d/batch_norm_1/beta:0",
]
correct_variable_names = set(var_names_w + var_names_b + var_names_bn)
module = snt.nets.ConvNet2D(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
rates=self.rates,
strides=self.strides,
paddings=self.paddings,
use_bias=use_bias,
use_batch_norm=use_batch_norm)
input_shape = [10, 100, 100, 3]
input_to_net = tf.random_normal(dtype=tf.float32, shape=input_shape)
_ = module(input_to_net, is_training=True)
variable_names = [var.name for var in module.get_variables()]
self.assertEqual(set(variable_names), correct_variable_names)
def testPartitioners(self):
if tf.executing_eagerly():
self.skipTest("Eager does not support partitioned variables.")
partitioners = {
"w": tf.variable_axis_size_partitioner(10),
"b": tf.variable_axis_size_partitioner(8),
}
module = snt.nets.ConvNet2D(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
rates=self.rates,
strides=self.strides,
paddings=self.paddings,
partitioners=partitioners)
input_shape = [10, 100, 100, 3]
input_to_net = tf.placeholder(tf.float32, shape=input_shape)
_ = module(input_to_net)
for layer in module._layers:
self.assertEqual(type(layer.w), variables.PartitionedVariable)
self.assertEqual(type(layer.b), variables.PartitionedVariable)
def testCustomGetter(self):
custom_getter = snt.custom_getters.Context(snt.custom_getters.stop_gradient)
module = snt.nets.ConvNet2D(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
rates=self.rates,
strides=self.strides,
paddings=self.paddings,
custom_getter=custom_getter)
input_shape = [10, 100, 100, 3]
input_to_net = tf.random_normal(dtype=tf.float32, shape=input_shape)
if tf.executing_eagerly():
with tf.GradientTape() as tape0:
out0 = module(input_to_net)
with tf.GradientTape() as tape1:
with custom_getter:
out1 = module(input_to_net)
all_vars = tf.trainable_variables()
out0_grads = tape0.gradient(out0, all_vars)
out1_grads = tape1.gradient(out1, all_vars)
else:
out0 = module(input_to_net)
with custom_getter:
out1 = module(input_to_net)
all_vars = tf.trainable_variables()
out0_grads = tf.gradients(out0, all_vars)
out1_grads = tf.gradients(out1, all_vars)
for grad in out0_grads:
self.assertNotEqual(None, grad)
self.assertEqual([None] * len(out1_grads), out1_grads)
def testIncorrectRatesLength(self):
rates = [1, 2]
self.assertNotEqual(len(rates), len(self.output_channels))
with self.assertRaisesRegexp(
ValueError, "rates must be of length 1 * or"):
_ = snt.nets.ConvNet2D(output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
rates=rates,
strides=self.strides,
paddings=self.paddings)
@parameterized.parameters(
{"normalization_ctor": None,
"normalization_kwargs": {}},
{"normalization_ctor": snt.LayerNorm,
"normalization_kwargs": {}},
{"normalization_ctor": snt.LayerNorm,
"normalization_kwargs": {"axis": [1, 2]}}, # Instance Norm.
{"normalization_ctor": snt.BatchNorm,
"normalization_kwargs": {}})
def testAlwaysProvideIsTraining(self, normalization_ctor,
normalization_kwargs):
# Test whether we can always provide is_training=True, even when it is
# not supported by the underlying normalization constructor.
mod = snt.nets.ConvNet2D(
output_channels=[32, 32],
kernel_shapes=(3,),
strides=(1,),
paddings=("SAME",),
normalization_ctor=normalization_ctor,
normalization_kwargs=normalization_kwargs)
input_ = tf.random_uniform([16, 48, 48, 3])
_ = mod(input_, is_training=True)
@contrib_eager.run_all_tests_in_graph_and_eager_modes
class ConvNet2DTransposeTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super(ConvNet2DTransposeTest, self).setUp()
self.output_channels = [2, 3, 4]
self.output_shapes = [[100, 100]]
self.kernel_shapes = [[3, 3]]
self.strides = [1]
self.paddings = [snt.SAME]
def testConstructor(self):
with self.assertRaisesRegexp(ValueError,
"output_shapes must be of length 1 or *"):
snt.nets.ConvNet2DTranspose(output_channels=self.output_channels,
output_shapes=[],
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings)
with self.assertRaisesRegexp(ValueError,
"output_shapes must be of length 1 or *"):
snt.nets.ConvNet2DTranspose(output_channels=self.output_channels,
output_shapes=[[1, 2], [1, 2]],
kernel_shapes=self.kernel_shapes,
strides=[],
paddings=self.paddings)
with self.assertRaisesRegexp(KeyError,
"Invalid initializer keys.*"):
snt.nets.ConvNet2DTranspose(
output_channels=self.output_channels,
output_shapes=self.output_shapes,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
initializers={"not_w": tf.truncated_normal_initializer(stddev=1.0)})
net = snt.nets.ConvNet2DTranspose(output_channels=self.output_channels,
output_shapes=self.output_shapes,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings)
self.assertEqual(net.output_shapes,
tuple(self.output_shapes) * len(self.output_channels))
self.assertLen(net.layers, len(self.output_channels))
for i, layer in enumerate(net.layers):
self.assertEqual(layer.output_channels, self.output_channels[i])
self.assertEqual(layer.stride,
(1,) + fill_shape(self.strides[0], 2) + (1,))
self.assertEqual(layer.kernel_shape, fill_shape(self.kernel_shapes[0], 2))
self.assertEqual(layer.padding, self.paddings[0])
self.assertEqual(layer.output_channels, net.output_channels[i])
self.assertEqual(layer.stride,
(1,) + fill_shape(net.strides[i], 2) + (1,))
self.assertEqual(layer.kernel_shape, fill_shape(net.kernel_shapes[i], 2))
self.assertEqual(layer.padding, net.paddings[i])
with self.assertRaisesRegexp(TypeError, "output_shapes must be iterable"):
snt.nets.ConvNet2DTranspose(output_channels=self.output_channels,
output_shapes=False,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings)
def testTranspose(self):
net = snt.nets.ConvNet2DTranspose(output_channels=self.output_channels,
output_shapes=self.output_shapes,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings)
err = "Iterable output_channels length must match the number of layers"
with self.assertRaisesRegexp(ValueError, err):
net.transpose(output_channels=[42] * 18)
net_transpose = net.transpose()
input_shape = [10, 100, 100, 3]
input_to_net = tf.random_normal(dtype=tf.float32, shape=input_shape)
# Tests that trying to connect the trasposed network before connecting the
# original nets raises an error. The reason is that the output_shapes and
# output_channels are laziliy evaluated and not yet known.
with self.assertRaisesRegexp(snt.Error,
"Variables in {} not instantiated yet, "
"__call__ the module first.".format(
net.layers[-1].scope_name)):
net_transpose(input_to_net)
net_transpose = net.transpose(name="another_net_transpose")
net_out = net(input_to_net, is_training=True)
net_transposed_output = net_transpose(net_out)
self.assertEqual(net_transposed_output.get_shape(),
input_to_net.get_shape())
for i in range(len(net.layers)):
self.assertEqual(net_transpose.layers[i].input_shape[1:-1],
net.layers[-1 - i].output_shape)
self.assertEqual(net_transpose.layers[i].output_channels,
net.layers[-1 - i].input_shape[-1])
init = tf.global_variables_initializer()
self.evaluate(init)
self.evaluate(net_transposed_output)
def testPartitioners(self):
if tf.executing_eagerly():
self.skipTest("Eager does not support partitioned variables.")
partitioners = {
"w": tf.variable_axis_size_partitioner(10),
"b": tf.variable_axis_size_partitioner(8),
}
module = snt.nets.ConvNet2DTranspose(output_channels=self.output_channels,
output_shapes=self.output_shapes,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
partitioners=partitioners)
input_shape = [10, 100, 100, 3]
input_to_net = tf.placeholder(tf.float32, shape=input_shape)
_ = module(input_to_net)
for layer in module._layers:
self.assertEqual(type(layer.w), variables.PartitionedVariable)
self.assertEqual(type(layer.b), variables.PartitionedVariable)
@parameterized.parameters(
{"normalization_ctor": None,
"normalization_kwargs": {}},
{"normalization_ctor": snt.LayerNorm,
"normalization_kwargs": {}},
{"normalization_ctor": snt.LayerNorm,
"normalization_kwargs": {"axis": [1, 2]}}, # Instance Norm.
{"normalization_ctor": snt.BatchNorm,
"normalization_kwargs": {}})
def testAlwaysProvideIsTraining(self, normalization_ctor,
normalization_kwargs):
# Test whether we can always provide is_training=True, even when it is
# not supported by the underlying normalization constructor.
mod = snt.nets.ConvNet2DTranspose(
output_shapes=self.output_shapes,
output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
normalization_ctor=normalization_ctor,
normalization_kwargs=normalization_kwargs)
input_ = tf.random_uniform([16, 100, 100, 3])
_ = mod(input_, is_training=True)
def testCustomGetter(self):
custom_getter = snt.custom_getters.Context(snt.custom_getters.stop_gradient)
module = snt.nets.ConvNet2DTranspose(
output_shapes=self.output_shapes,
output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
custom_getter=custom_getter)
input_shape = [10, 100, 100, 3]
input_to_net = tf.random_normal(dtype=tf.float32, shape=input_shape)
if tf.executing_eagerly():
with tf.GradientTape() as tape0:
out0 = module(input_to_net)
with tf.GradientTape() as tape1:
with custom_getter:
out1 = module(input_to_net)
all_vars = tf.trainable_variables()
out0_grads = tape0.gradient(out0, all_vars)
out1_grads = tape1.gradient(out1, all_vars)
else:
out0 = module(input_to_net)
with custom_getter:
out1 = module(input_to_net)
all_vars = tf.trainable_variables()
out0_grads = tf.gradients(out0, all_vars)
out1_grads = tf.gradients(out1, all_vars)
for grad in out0_grads:
self.assertIsNotNone(grad)
self.assertEqual([None] * len(out1_grads), out1_grads)
def testCustomGetterTranspose(self):
"""Tests passing a custom getter to the transpose method."""
conv2d_t = snt.nets.ConvNet2DTranspose(
output_shapes=self.output_shapes,
output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings)
input_shape = [10, 100, 100, 3]
output_of_conv2d_t = conv2d_t(tf.zeros(dtype=tf.float32, shape=input_shape))
# We'll be able to check if the custom_getter was used by checking for
# gradients.
conv2d = conv2d_t.transpose(custom_getter=snt.custom_getters.stop_gradient)
if tf.executing_eagerly():
with tf.GradientTape() as tape:
output_of_conv = conv2d(output_of_conv2d_t)
conv2d_vars = conv2d.get_variables()
self.assertTrue(len(conv2d_vars))
for tensor in tape.gradient(output_of_conv, conv2d_vars):
self.assertIsNone(tensor)
else:
output_of_conv = conv2d(output_of_conv2d_t)
conv2d_vars = conv2d.get_variables()
self.assertTrue(len(conv2d_vars))
for tensor in tf.gradients(output_of_conv, conv2d_vars):
self.assertIsNone(tensor)
def testNoCustomGetterTranspose(self):
"""Tests not passing a custom getter to the transpose method."""
conv2d_t = snt.nets.ConvNet2DTranspose(
output_shapes=self.output_shapes,
output_channels=self.output_channels,
kernel_shapes=self.kernel_shapes,
strides=self.strides,
paddings=self.paddings,
custom_getter=snt.custom_getters.stop_gradient)
input_shape = [10, 100, 100, 3]
input_to_conv2d_t = tf.zeros(dtype=tf.float32, shape=input_shape)
if tf.executing_eagerly():
with tf.GradientTape() as tape0:
output_of_conv2d_t = conv2d_t(input_to_conv2d_t)
# Create a transpose without a custom getter
conv2d = conv2d_t.transpose()
with tf.GradientTape() as tape1:
output_of_conv = conv2d(output_of_conv2d_t)
conv2d_t_vars = conv2d_t.get_variables()
conv2d_t_grads = tape0.gradient(output_of_conv2d_t, conv2d_t_vars)
conv2d_vars = conv2d.get_variables()
conv2d_grads = tape1.gradient(output_of_conv, conv2d_vars)
else:
output_of_conv2d_t = conv2d_t(input_to_conv2d_t)
conv2d_t_vars = conv2d_t.get_variables()
conv2d_t_grads = tf.gradients(output_of_conv2d_t, conv2d_t_vars)
# Create a transpose without a custom getter
conv2d = conv2d_t.transpose()
output_of_conv = conv2d(output_of_conv2d_t)
conv2d_vars = conv2d.get_variables()
conv2d_grads = tf.gradients(output_of_conv, conv2d_vars)
# Sanity check that the custom getter was indeed used for the conv net.
self.assertTrue(len(conv2d_t_vars))
for tensor in conv2d_t_grads:
self.assertIsNone(tensor)
# Check the transpose did not use the custom getter that was passed to the
# original conv net.
self.assertTrue(len(conv2d_vars))
for tensor in conv2d_grads:
self.assertIsNotNone(tensor)
@contrib_eager.run_all_tests_in_graph_and_eager_modes
class DefunTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.named_parameters(
("ConvNet2D", snt.nets.ConvNet2D),
("ConvNet2DTranspose",
functools.partial(snt.nets.ConvNet2DTranspose,
output_shapes=[[100, 100]])))
def testDefun(self, module):
model = module(output_channels=[2, 3, 4],
kernel_shapes=[[3, 3]],
strides=[1],
paddings=[snt.SAME])
model = contrib_eager.defun(model)
input_to_net = tf.random_normal([1, 100, 100, 3])
output = model(input_to_net)
self.assertListEqual(output.shape.as_list(), [1, 100, 100, 4])
if __name__ == "__main__":
tf.test.main()
| sonnet-1 | sonnet/python/modules/nets/convnet_test.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Common network architectures implemented as Sonnet modules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sonnet.python.modules.nets.alexnet import AlexNet
from sonnet.python.modules.nets.alexnet import AlexNetFull
from sonnet.python.modules.nets.alexnet import AlexNetMini
from sonnet.python.modules.nets.convnet import ConvNet2D
from sonnet.python.modules.nets.convnet import ConvNet2DTranspose
from sonnet.python.modules.nets.dilation import Dilation
from sonnet.python.modules.nets.dilation import identity_kernel_initializer
from sonnet.python.modules.nets.dilation import noisy_identity_kernel_initializer
from sonnet.python.modules.nets.mlp import MLP
from sonnet.python.modules.nets.transformer import CompressiveTransformer
from sonnet.python.modules.nets.transformer import future_mask
from sonnet.python.modules.nets.transformer import TransformerTower
from sonnet.python.modules.nets.transformer import TransformerXL
from sonnet.python.modules.nets.vqvae import VectorQuantizer
from sonnet.python.modules.nets.vqvae import VectorQuantizerEMA
| sonnet-1 | sonnet/python/modules/nets/__init__.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for snt.nets.alexnet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
# Dependency imports
from absl.testing import parameterized
import numpy as np
import sonnet as snt
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib.eager.python import tfe as contrib_eager
from tensorflow.python.ops import variables
@contrib_eager.run_all_tests_in_graph_and_eager_modes
class AlexNetTest(parameterized.TestCase, tf.test.TestCase):
def testCalcMinSize(self):
"""Test the minimum input size calculator."""
net = snt.nets.AlexNetMini()
self.assertEqual(net._calc_min_size([(None, (3, 1), None)]), 3)
self.assertEqual(net._calc_min_size([(None, (3, 1), (3, 2))]), 5)
self.assertEqual(net._calc_min_size([(None, (3, 1), (3, 2)),
(None, (3, 2), (5, 2))]), 25)
@parameterized.named_parameters(
("full", functools.partial(snt.nets.AlexNet, mode=snt.nets.AlexNet.FULL)),
("mini", functools.partial(snt.nets.AlexNet, mode=snt.nets.AlexNet.MINI)),
("full_module", snt.nets.AlexNetFull),
("mini_module", snt.nets.AlexNetMini),
)
def testModes(self, module):
"""Test that each mode can be instantiated."""
keep_prob = 0.7
net = module()
input_shape = [1, net._min_size, net._min_size, 3]
inputs = tf.ones(dtype=tf.float32, shape=input_shape)
net(inputs, keep_prob, is_training=True)
@parameterized.named_parameters(
("all_layers", True),
("conv_only", False))
def testBatchNorm(self, bn_on_fc_layers):
"""Test that batch norm can be instantiated."""
net = snt.nets.AlexNet(
mode=snt.nets.AlexNet.FULL,
use_batch_norm=True,
bn_on_fc_layers=bn_on_fc_layers)
input_shape = [net._min_size, net._min_size, 3]
inputs = tf.ones(dtype=tf.float32, shape=[1] + input_shape)
output = net(inputs, is_training=True)
self.evaluate(tf.global_variables_initializer())
self.evaluate(output)
# Check that an error is raised if we don't specify the is_training flag
err = "is_training flag must be explicitly specified"
with self.assertRaisesRegexp(ValueError, err):
net(inputs)
# Check Tensorflow flags work
is_training = tf.constant(False)
test_local_stats = tf.constant(False)
net(inputs,
is_training=is_training,
test_local_stats=test_local_stats)
# Check Python is_training flag works
net(inputs, is_training=False, test_local_stats=False)
# Check that the appropriate moving statistics variables have been created.
variance_name = "alex_net/batch_norm/moving_variance:0"
mean_name = "alex_net/batch_norm/moving_mean:0"
var_names = [var.name for var in tf.global_variables()]
self.assertIn(variance_name, var_names)
self.assertIn(mean_name, var_names)
if bn_on_fc_layers:
self.assertEqual(35, len(var_names))
else:
self.assertEqual(29, len(var_names))
def testBatchNormConfig(self):
batch_norm_config = {
"scale": True,
}
model = snt.nets.AlexNetFull(use_batch_norm=True,
batch_norm_config=batch_norm_config)
input_to_net = tf.ones(dtype=tf.float32, shape=(1, 224, 224, 3))
model(input_to_net, is_training=True)
model_variables = model.get_variables()
self.assertEqual(len(model_variables), 6 * 4)
def testNoDropoutInTesting(self):
"""An exception should be raised if trying to use dropout when testing."""
net = snt.nets.AlexNetFull()
input_shape = [net._min_size, net._min_size, 3]
inputs = tf.ones(dtype=tf.float32, shape=[1] + input_shape)
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, "keep_prob"):
output = net(inputs, keep_prob=0.7, is_training=False)
self.evaluate(tf.global_variables_initializer())
self.evaluate(output)
# No exception if keep_prob=1
output = net(inputs, keep_prob=1.0, is_training=False)
self.evaluate(output)
def testInputTooSmall(self):
"""Check that an error is raised if the input image is too small."""
keep_prob = 0.7
net = snt.nets.AlexNetFull()
input_shape = [1, net._min_size, net._min_size, 1]
inputs = tf.ones(dtype=tf.float32, shape=input_shape)
net(inputs, keep_prob, is_training=True)
with self.assertRaisesRegexp(snt.IncompatibleShapeError,
"Image shape too small: (.*?, .*?) < .*?"):
input_shape = [1, net._min_size - 1, net._min_size - 1, 1]
inputs = tf.ones(dtype=tf.float32, shape=input_shape)
net(inputs, keep_prob, is_training=True)
def testSharing(self):
"""Check that the correct number of variables are made when sharing."""
net = snt.nets.AlexNetMini()
inputs1 = tf.ones(dtype=tf.float32, shape=[1, 64, 64, 3])
inputs2 = tf.ones(dtype=tf.float32, shape=[1, 64, 64, 3])
keep_prob1 = 0.7
keep_prob2 = 0.5
net(inputs1, keep_prob1, is_training=True)
net(inputs2, keep_prob2, is_training=True)
self.assertLen(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES),
7 * 2)
model_variables = net.get_variables()
self.assertEqual(len(model_variables), 7 * 2)
def testInvalidInitializationParameters(self):
err = "Invalid initializer keys.*"
with self.assertRaisesRegexp(KeyError, err):
snt.nets.AlexNetMini(
initializers={"not_w": tf.truncated_normal_initializer(stddev=1.0)})
err = "Initializer for 'w' is not a callable function"
with self.assertRaisesRegexp(TypeError, err):
snt.nets.AlexNetMini(
initializers={"w": tf.zeros([1, 2, 3])})
def testInvalidRegularizationParameters(self):
with self.assertRaisesRegexp(KeyError, "Invalid regularizer keys.*"):
snt.nets.AlexNetMini(
regularizers={"not_w": contrib_layers.l1_regularizer(scale=0.5)})
err = "Regularizer for 'w' is not a callable function"
with self.assertRaisesRegexp(TypeError, err):
snt.nets.AlexNetMini(
regularizers={"w": tf.zeros([1, 2, 3])})
def testRegularizersInRegularizationLosses(self):
regularizers = {
"w": contrib_layers.l1_regularizer(scale=0.5),
"b": contrib_layers.l2_regularizer(scale=0.5)
}
alex_net = snt.nets.AlexNetMini(
regularizers=regularizers, name="alexnet1")
input_shape = [alex_net._min_size, alex_net._min_size, 3]
inputs = tf.ones(dtype=tf.float32, shape=[1] + input_shape)
alex_net(inputs)
graph_regularizers = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
alex_net_conv_layers = len(alex_net.conv_modules)
self.assertEqual(len(graph_regularizers), 2 * alex_net_conv_layers)
def testInitializers(self):
initializers = {
"w": tf.constant_initializer(1.5),
"b": tf.constant_initializer(2.5),
}
alex_net = snt.nets.AlexNetFull(initializers=initializers)
input_shape = [1, alex_net.min_input_size, alex_net.min_input_size, 3]
inputs = tf.ones(dtype=tf.float32, shape=input_shape)
alex_net(inputs)
init = tf.global_variables_initializer()
self.evaluate(init)
for module in alex_net.conv_modules + alex_net.linear_modules:
w_v, b_v = self.evaluate([module.w, module.b])
self.assertAllClose(w_v, 1.5 * np.ones(w_v.shape))
self.assertAllClose(b_v, 2.5 * np.ones(b_v.shape))
def testPartitioners(self):
if tf.executing_eagerly():
self.skipTest("Eager does not support partitioned variables.")
partitioners = {
"w": tf.fixed_size_partitioner(num_shards=2),
"b": tf.fixed_size_partitioner(num_shards=2),
}
alex_net = snt.nets.AlexNetMini(
partitioners=partitioners, name="alexnet1")
input_shape = [alex_net._min_size, alex_net._min_size, 3]
inputs = tf.placeholder(tf.float32, shape=[None] + input_shape)
alex_net(inputs)
for conv_module in alex_net.conv_modules:
self.assertEqual(type(conv_module.w), variables.PartitionedVariable)
self.assertEqual(type(conv_module.b), variables.PartitionedVariable)
for linear_module in alex_net.linear_modules:
self.assertEqual(type(linear_module.w), variables.PartitionedVariable)
self.assertEqual(type(linear_module.b), variables.PartitionedVariable)
def testErrorHandling(self):
err = "AlexNet construction mode 'BLAH' not recognised"
with self.assertRaisesRegexp(snt.Error, err):
snt.nets.AlexNet(mode="BLAH")
def testGetLinearModules(self):
alex_net = snt.nets.AlexNetFull()
input_shape = [1, alex_net.min_input_size, alex_net.min_input_size, 3]
inputs = tf.ones(dtype=tf.float32, shape=input_shape)
alex_net(inputs)
for mod in alex_net.linear_modules:
self.assertEqual(mod.output_size, 4096)
def testCustomGetterUsed(self):
const = 42.
def set_to_const(getter, *args, **kwargs):
variable = getter(*args, **kwargs)
return 0.0 * variable + const
alex_net = snt.nets.AlexNetFull(custom_getter=set_to_const)
input_shape = [1, alex_net.min_input_size, alex_net.min_input_size, 3]
inputs = tf.ones(dtype=tf.float32, shape=input_shape)
alex_net(inputs)
self.evaluate(tf.global_variables_initializer())
for module in alex_net.conv_modules + alex_net.linear_modules:
var_w, var_b = self.evaluate([module.w, module.b])
self.assertAllClose(var_w, np.zeros_like(var_w) + const)
self.assertAllClose(var_b, np.zeros_like(var_b) + const)
if __name__ == "__main__":
tf.test.main()
| sonnet-1 | sonnet/python/modules/nets/alexnet_test.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A minimal interface mlp module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from six.moves import xrange # pylint: disable=redefined-builtin
from sonnet.python.modules import base
from sonnet.python.modules import basic
from sonnet.python.modules import util
import tensorflow.compat.v1 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.layers import utils
# pylint: enable=g-direct-tensorflow-import
class MLP(base.AbstractModule, base.Transposable):
"""A Multi-Layer perceptron module."""
def __init__(self,
output_sizes,
activation=tf.nn.relu,
activate_final=False,
initializers=None,
partitioners=None,
regularizers=None,
use_bias=True,
use_dropout=False,
custom_getter=None,
name="mlp"):
"""Constructs an MLP module.
Args:
output_sizes: An iterable of output dimensionalities as defined in
`basic.Linear`. Output size can be defined either as number or via a
callable. In the latter case, since the function invocation is deferred
to graph construction time, the user must only ensure that entries can
be called when build is called. Each entry in the iterable defines
properties in the corresponding linear layer.
activation: An activation op. The activation is applied to intermediate
layers, and optionally to the output of the final layer.
activate_final: Boolean determining if the activation is applied to
the output of the final layer. Default `False`.
initializers: Optional dict containing ops to initialize the linear
layers' weights (with key 'w') or biases (with key 'b').
partitioners: Optional dict containing partitioners to partition the
linear layers' weights (with key 'w') or biases (with key 'b').
regularizers: Optional dict containing regularizers for the linear layers'
weights (with key 'w') and the biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes
a single `Tensor` as an input and returns a scalar `Tensor` output, e.g.
the L1 and L2 regularizers in `tf.contrib.layers`.
use_bias: Whether to include bias parameters in the linear layers.
Default `True`.
use_dropout: Whether to perform dropout on the linear layers.
Default `False`.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the `tf.get_variable`
documentation for information about the custom_getter API.
name: Name of the module.
Raises:
KeyError: If initializers contains any keys other than 'w' or 'b'.
KeyError: If regularizers contains any keys other than 'w' or 'b'.
ValueError: If output_sizes is empty.
TypeError: If `activation` is not callable; or if `output_sizes` is not
iterable.
"""
super(MLP, self).__init__(custom_getter=custom_getter, name=name)
if not isinstance(output_sizes, collections.Iterable):
raise TypeError("output_sizes must be iterable")
output_sizes = tuple(output_sizes)
if not output_sizes:
raise ValueError("output_sizes must not be empty")
self._output_sizes = output_sizes
self._num_layers = len(self._output_sizes)
self._input_shape = None
self.possible_keys = self.get_possible_initializer_keys(use_bias=use_bias)
self._initializers = util.check_initializers(
initializers, self.possible_keys)
self._partitioners = util.check_partitioners(
partitioners, self.possible_keys)
self._regularizers = util.check_regularizers(
regularizers, self.possible_keys)
if not callable(activation):
raise TypeError("Input 'activation' must be callable")
self._activation = activation
self._activate_final = activate_final
self._use_bias = use_bias
self._use_dropout = use_dropout
self._instantiate_layers()
def _instantiate_layers(self):
"""Instantiates all the linear modules used in the network.
Layers are instantiated in the constructor, as opposed to the build
function, because MLP implements the Transposable interface, and the
transpose function can be called before the module is actually connected
to the graph and build is called.
Notice that this is safe since layers in the transposed module are
instantiated using a lambda returning input_size of the mlp layers, and
this doesn't have to return sensible values until the original module is
connected to the graph.
"""
# Here we are entering the module's variable scope to name our submodules
# correctly (not to create variables). As such it's safe to not check
# whether we're in the same graph. This is important if we're constructing
# the module in one graph and connecting it in another (e.g. with `defun`
# the module is created in some default graph, and connected to a capturing
# graph in order to turn it into a graph function).
with self._enter_variable_scope(check_same_graph=False):
# pylint: disable=g-complex-comprehension
self._layers = [basic.Linear(self._output_sizes[i],
name="linear_{}".format(i),
initializers=self._initializers,
partitioners=self._partitioners,
regularizers=self._regularizers,
use_bias=self.use_bias)
for i in xrange(self._num_layers)]
# pylint: enable=g-complex-comprehension
@classmethod
def get_possible_initializer_keys(cls, use_bias=True):
return basic.Linear.get_possible_initializer_keys(use_bias=use_bias)
def _build(self, inputs, is_training=True, dropout_keep_prob=0.5):
"""Assembles the `MLP` and connects it to the graph.
Args:
inputs: A 2D Tensor of size `[batch_size, input_size]`.
is_training: A bool or tf.Bool Tensor. Indicates whether we are
currently training. Defaults to `True`.
dropout_keep_prob: The probability that each element is kept when
both `use_dropout` and `is_training` are True. Defaults to 0.5.
Returns:
A 2D Tensor of size `[batch_size, output_sizes[-1]]`.
"""
self._input_shape = tuple(inputs.get_shape().as_list())
net = inputs
final_index = self._num_layers - 1
for layer_id in xrange(self._num_layers):
net = self._layers[layer_id](net)
if final_index != layer_id or self._activate_final:
# Only perform dropout whenever we are activating the layer's outputs.
if self._use_dropout:
keep_prob = utils.smart_cond(
is_training, true_fn=lambda: dropout_keep_prob,
false_fn=lambda: tf.constant(1.0)
)
net = tf.nn.dropout(net, rate=1-keep_prob)
net = self._activation(net)
return net
@property
def layers(self):
"""Returns a tuple containing the linear layers of the `MLP`."""
return self._layers
@property
def output_sizes(self):
"""Returns a tuple of all output sizes of all the layers."""
return tuple([l() if callable(l) else l for l in self._output_sizes])
@property
def output_size(self):
"""Returns the size of the module output, not including the batch dimension.
This allows the MLP to be used inside a DeepRNN.
Returns:
The scalar size of the module output.
"""
last_size = self._output_sizes[-1]
return last_size() if callable(last_size) else last_size
@property
def use_bias(self):
return self._use_bias
@property
def use_dropout(self):
return self._use_dropout
@property
def initializers(self):
"""Returns the intializers dictionary."""
return self._initializers
@property
def partitioners(self):
"""Returns the partitioners dictionary."""
return self._partitioners
@property
def regularizers(self):
"""Returns the regularizers dictionary."""
return self._regularizers
@property
def activation(self):
return self._activation
@property
def activate_final(self):
return self._activate_final
# Implements Transposable interface
@property
def input_shape(self):
"""Returns shape of input `Tensor` passed at last call to `build`."""
self._ensure_is_connected()
return self._input_shape
# Implements Transposable interface
def transpose(self, name=None, activate_final=None):
"""Returns transposed `MLP`.
Args:
name: Optional string specifying the name of the transposed module. The
default name is constructed by appending "_transpose"
to `self.module_name`.
activate_final: Optional boolean determining if the activation and batch
normalization, if turned on, are applied to the final layer.
Returns:
Matching transposed `MLP` module.
"""
if name is None:
name = self.module_name + "_transpose"
if activate_final is None:
activate_final = self.activate_final
output_sizes = [lambda l=layer: l.input_shape[1] for layer in self._layers]
output_sizes.reverse()
return MLP(
name=name,
output_sizes=output_sizes,
activation=self.activation,
activate_final=activate_final,
initializers=self.initializers,
partitioners=self.partitioners,
regularizers=self.regularizers,
use_bias=self.use_bias,
use_dropout=self.use_dropout)
def clone(self, name=None):
"""Creates a new MLP with the same structure.
Args:
name: Optional string specifying the name of the new module. The default
name is constructed by appending "_clone" to the original name.
Returns:
A cloned `MLP` module.
"""
if name is None:
name = self.module_name + "_clone"
return MLP(
name=name,
output_sizes=self.output_sizes,
activation=self.activation,
activate_final=self.activate_final,
initializers=self.initializers,
partitioners=self.partitioners,
regularizers=self.regularizers,
use_bias=self.use_bias,
use_dropout=self.use_dropout)
| sonnet-1 | sonnet/python/modules/nets/mlp.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Implementation of (Yu & Koltun, 2016)'s Dilation module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from sonnet.python.modules import base
from sonnet.python.modules import conv
from sonnet.python.modules import sequential
from sonnet.python.modules import util
import tensorflow.compat.v1 as tf
def _range_along_dimension(range_dim, shape):
"""Construct a Tensor whose values are the index along a dimension.
Construct a Tensor that counts the distance along a single dimension. This is
useful, for example, when constructing an identity matrix,
>>> x = _range_along_dimension(0, [2, 2]).eval()
>>> x
array([[0, 0],
[1, 1]], dtype=int32)
>>> y = _range_along_dimension(1, [2, 2]).eval()
>>> y
array([[0, 1],
[0, 1]], dtype=int32)
>>> tf.cast(tf.equal(x, y), dtype=tf.int32).eval()
array([[1, 0],
[0, 1]], dtype=int32)
Args:
range_dim: int. Dimension to count indices on.
shape: 1D Tensor of ints. Shape of Tensor to construct.
Returns:
A Tensor whose values are the same as the range along dimension range_dim.
Raises:
ValueError: If range_dim isn't a valid dimension.
"""
rank = len(shape)
if range_dim >= rank:
raise ValueError("Cannot calculate range along non-existent index.")
indices = tf.range(start=0, limit=shape[range_dim])
indices = tf.reshape(
indices,
shape=[1 if i != range_dim else shape[range_dim] for i in range(rank)])
return tf.tile(indices,
[shape[i] if i != range_dim else 1 for i in range(rank)])
# pylint: disable=unused-argument
def identity_kernel_initializer(shape, dtype=tf.float32, partition_info=None):
"""An initializer for constructing identity convolution kernels.
Constructs a convolution kernel such that applying it is the same as an
identity operation on the input. Formally, the kernel has entry [i, j, in,
out] = 1 if in equals out and i and j are the middle of the kernel and 0
otherwise.
Args:
shape: List of integers. Represents shape of result.
dtype: data type for values in result.
partition_info: Partition information for initializer functions. Ignored.
Returns:
Tensor of desired shape and dtype such that applying it as a convolution
kernel results in the identity operation.
Raises:
ValueError: If shape does not define a valid kernel.
If filter width and height differ.
If filter width and height are not odd numbers.
If number of input and output channels differ.
"""
if len(shape) != 4:
raise ValueError("Convolution kernels must be rank 4.")
filter_height, filter_width, in_channels, out_channels = shape
if filter_width != filter_height:
raise ValueError("Identity initializer only works for square filters.")
if filter_width % 2 != 1:
raise ValueError(
"Identity initializer requires filters have odd height and width.")
if in_channels != out_channels:
raise ValueError(
"in_channels must equal out_channels in order to construct per-channel"
" identities.")
middle_pixel = filter_height // 2
is_middle_pixel = tf.logical_and(
tf.equal(_range_along_dimension(0, shape), middle_pixel),
tf.equal(_range_along_dimension(1, shape), middle_pixel))
is_same_channel = tf.equal(
_range_along_dimension(2, shape), _range_along_dimension(3, shape))
return tf.cast(tf.logical_and(is_same_channel, is_middle_pixel), dtype=dtype)
def noisy_identity_kernel_initializer(base_num_channels, stddev=1e-8):
"""Build an initializer for constructing near-identity convolution kernels.
Construct a convolution kernel where in_channels and out_channels are
multiples of base_num_channels, but need not be equal. This initializer is
essentially the same as identity_kernel_initializer, except that magnitude
is "spread out" across multiple copies of the input.
Args:
base_num_channels: int. Number that divides both in_channels and
out_channels.
stddev: float. Standard deviation of truncated normal noise added to
off-entries to break ties.
Returns:
Initializer function for building a noisy identity kernel.
"""
# pylint: disable=unused-argument
def _noisy_identity_kernel_initializer(shape,
dtype=tf.float32,
partition_info=None):
"""Constructs a noisy identity kernel.
Args:
shape: List of integers. Represents shape of result.
dtype: data type for values in result.
partition_info: Partition information for initializer functions. Ignored.
Returns:
Tensor of desired shape and dtype such that applying it as a convolution
kernel results in a noisy near-identity operation.
Raises:
ValueError: If shape does not define a valid kernel.
If filter width and height differ.
If filter width and height are not odd numbers.
If number of input and output channels are not multiples of
base_num_channels.
"""
if len(shape) != 4:
raise ValueError("Convolution kernels must be rank 4.")
filter_height, filter_width, in_channels, out_channels = shape
if filter_width != filter_height:
raise ValueError(
"Noisy identity initializer only works for square filters.")
if filter_width % 2 != 1:
raise ValueError(
"Noisy identity initializer requires filters have odd height and "
"width.")
if (in_channels % base_num_channels != 0 or
out_channels % base_num_channels != 0):
raise ValueError("in_channels and out_channels must both be multiples of "
"base_num_channels.")
middle_pixel = filter_height // 2
is_middle_pixel = tf.logical_and(
tf.equal(_range_along_dimension(0, shape), middle_pixel),
tf.equal(_range_along_dimension(1, shape), middle_pixel))
is_same_channel_multiple = tf.equal(
tf.floordiv(
_range_along_dimension(2, shape) * base_num_channels, in_channels),
tf.floordiv(
_range_along_dimension(3, shape) * base_num_channels, out_channels))
noise = tf.truncated_normal(shape, stddev=stddev, dtype=dtype)
return tf.where(
tf.logical_and(is_same_channel_multiple, is_middle_pixel),
tf.ones(
shape, dtype=dtype) * (base_num_channels / out_channels),
noise)
return _noisy_identity_kernel_initializer
class Dilation(base.AbstractModule):
"""A convolutional module for per-pixel classification.
Consists of 8 convolutional layers, 4 of which are dilated. When applied to
the output of a model like VGG-16 (before fully connected layers), can be used
to make predictions on a per-pixel basis.
Note that the default initializers for the 'basic' model size require that
the number of input channels be equal to the number of output classes, and the
initializers for the 'large' model require it be a multiple.
Based on:
'Multi-Scale Context Aggregation by Dilated Convolutions'
Fisher Yu, Vladlen Koltun, ICLR 2016
https://arxiv.org/abs/1511.07122
Properties:
conv_modules: list of sonnet modules. The 8 convolution layers used in the
Dilation module.
"""
# Size of model to build.
BASIC = "basic"
LARGE = "large"
# Keys for initializers.
WEIGHTS = "w"
BIASES = "b"
POSSIBLE_INITIALIZER_KEYS = {WEIGHTS, BIASES}
def __init__(self,
num_output_classes,
initializers=None,
regularizers=None,
model_size="basic",
name="dilation"):
"""Creates a dilation module.
Args:
num_output_classes: Int. Number of output classes to predict for
each pixel in an image.
initializers: Optional dict containing ops to initialize filters (with key
'w') or biases (with key 'b'). The default initializer makes this module
equivalent to the identity.
regularizers: Optional dict containing regularizers for the weights
(with key 'w') or biases (with key 'b'). As a default, no regularizers
are used. A regularizer should be a function that takes a single
`Tensor` as an input and returns a scalar `Tensor` output, e.g. the L1
and L2 regularizers in `tf.contrib.layers`.
model_size: string. One of 'basic' or 'large'.
name: string. Name of module.
"""
super(Dilation, self).__init__(name=name)
self._num_output_classes = num_output_classes
self._model_size = model_size
self._initializers = util.check_initializers(
initializers, self.POSSIBLE_INITIALIZER_KEYS)
self._regularizers = util.check_regularizers(
regularizers, self.POSSIBLE_INITIALIZER_KEYS)
def _build(self, images):
"""Build dilation module.
Args:
images: Tensor of shape [batch_size, height, width, depth]
and dtype float32. Represents a set of images with an arbitrary depth.
Note that when using the default initializer, depth must equal
num_output_classes.
Returns:
Tensor of shape [batch_size, height, width, num_output_classes] and dtype
float32. Represents, for each image and pixel, logits for per-class
predictions.
Raises:
IncompatibleShapeError: If images is not rank 4.
ValueError: If model_size is not one of 'basic' or 'large'.
"""
num_classes = self._num_output_classes
if len(images.get_shape()) != 4:
raise base.IncompatibleShapeError(
"'images' must have shape [batch_size, height, width, depth].")
if self.WEIGHTS not in self._initializers:
if self._model_size == self.BASIC:
self._initializers[self.WEIGHTS] = identity_kernel_initializer
elif self._model_size == self.LARGE:
self._initializers[self.WEIGHTS] = noisy_identity_kernel_initializer(
num_classes)
else:
raise ValueError("Unrecognized model_size: %s" % self._model_size)
if self.BIASES not in self._initializers:
self._initializers[self.BIASES] = tf.zeros_initializer()
if self._model_size == self.BASIC:
self._conv_modules = [
self._dilated_conv_layer(num_classes, 1, True, "conv1"),
self._dilated_conv_layer(num_classes, 1, True, "conv2"),
self._dilated_conv_layer(num_classes, 2, True, "conv3"),
self._dilated_conv_layer(num_classes, 4, True, "conv4"),
self._dilated_conv_layer(num_classes, 8, True, "conv5"),
self._dilated_conv_layer(num_classes, 16, True, "conv6"),
self._dilated_conv_layer(num_classes, 1, True, "conv7"),
self._dilated_conv_layer(num_classes, 1, False, "conv8"),
]
elif self._model_size == self.LARGE:
self._conv_modules = [
self._dilated_conv_layer(2 * num_classes, 1, True, "conv1"),
self._dilated_conv_layer(2 * num_classes, 1, True, "conv2"),
self._dilated_conv_layer(4 * num_classes, 2, True, "conv3"),
self._dilated_conv_layer(8 * num_classes, 4, True, "conv4"),
self._dilated_conv_layer(16 * num_classes, 8, True, "conv5"),
self._dilated_conv_layer(32 * num_classes, 16, True, "conv6"),
self._dilated_conv_layer(32 * num_classes, 1, True, "conv7"),
self._dilated_conv_layer(num_classes, 1, False, "conv8"),
]
else:
raise ValueError("Unrecognized model_size: %s" % self._model_size)
dilation_mod = sequential.Sequential(self._conv_modules, name="dilation")
return dilation_mod(images)
def _dilated_conv_layer(self, output_channels, dilation_rate, apply_relu,
name):
"""Create a dilated convolution layer.
Args:
output_channels: int. Number of output channels for each pixel.
dilation_rate: int. Represents how many pixels each stride offset will
move. A value of 1 indicates a standard convolution.
apply_relu: bool. If True, a ReLU non-linearlity is added.
name: string. Name for layer.
Returns:
a sonnet Module for a dilated convolution.
"""
layer_components = [
conv.Conv2D(
output_channels, [3, 3],
initializers=self._initializers,
regularizers=self._regularizers,
rate=dilation_rate,
name="dilated_conv_" + name),
]
if apply_relu:
layer_components.append(lambda net: tf.nn.relu(net, name="relu_" + name))
return sequential.Sequential(layer_components, name=name)
@property
def conv_modules(self):
self._ensure_is_connected()
return self._conv_modules
| sonnet-1 | sonnet/python/modules/nets/dilation.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Implementation of Transformer networks.
Size glossary:
* Batch size (B).
* Sequence length (N).
* Memory size (M). The size of the optional memory, passed in via `state`.
* Number of heads (H): the number of attention heads.
* Value size (V): the size of each value embedding per head.
* Key size (K): the size of each key embedding per head. Equally, the size
of each query embedding per head. Typically K <= V.
* Embedding size (HV). The size of the activation or embedding relating to
each input between layers. Equal to value_size * num_heads.
* All attention size (F). The size of all attention activations over every
head.
* QKV size (F / H): The size of the query, key and value per head. Equal to
2K + V or equivalently F / H.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Dependency imports
import numpy as np
from sonnet.python import custom_getters
from sonnet.python.modules import base
from sonnet.python.modules import basic
from sonnet.python.modules import conv as snt_conv
from sonnet.python.modules import layer_norm as snt_ln
from sonnet.python.modules import rnn_core
from sonnet.python.modules import util
from sonnet.python.modules.nets import mlp as snt_mlp
import tensorflow.compat.v1 as tf
AttentionState = collections.namedtuple('AttentionState',
('queries', 'keys', 'values', 'logits',
'weights', 'embeddings', 'read_words'))
CompressedMemoryState = collections.namedtuple(
'CompressedMemoryState', ('episodic_memory', 'compressed_memory', 'index'))
def rel_shift(position_logits):
"""Shifting of logits for relative attention.
Args:
position_logits: A tensor of shape [B, H, N, N + M].
Returns:
The shifted logits. Example, for input (H=1, B=1):
[5, 4, 3, 2, 1]
[5, 4, 3, 2, 1]
[5, 4, 3, 2, 1]
[5, 4, 3, 2, 1]
[5, 4, 3, 2, 1]
the function outputs:
[1, 0, 5, 4, 3]
[2, 1, 0, 5, 4]
[3, 2, 1, 0, 5]
[4, 3, 2, 1, 0]
[5, 4, 3, 2, 1]
Raises:
ValueError if position_logits is not 4D.
Note: this is not an exact shift as the upper triangle is non-zero. This
works as intended in the causally-masked case. If this is used with un-masked
attention, we'd want these to also be zero.
"""
if position_logits.get_shape().ndims != 4:
raise ValueError('Expected 4D position logits.')
input_shape = tf.shape(position_logits)
batch_size = input_shape[0]
num_heads = input_shape[1]
t1 = input_shape[2]
t2 = input_shape[3]
# We prepend zeros on the final timescale dimension.
to_pad = tf.zeros([batch_size, num_heads, t1, 1])
position_logits = tf.concat([to_pad, position_logits], -1)
# Reshape trick to shift input.
position_logits = tf.reshape(position_logits,
[batch_size, num_heads, t2 + 1, t1])
# Remove extra time dimension and re-shape.
position_logits = position_logits[:, :, 1:]
position_logits = tf.reshape(position_logits, input_shape)
return position_logits
def _layer_norm(inputs):
if inputs.get_shape().ndims > 2:
return basic.BatchApply(snt_ln.LayerNorm())(inputs)
else:
return snt_ln.LayerNorm()(inputs)
def _concat_and_slice(prev_memory, new_memory):
original_memory_size = prev_memory.get_shape().as_list()[1]
concat_memory = tf.concat([prev_memory, new_memory], 1)
memory = concat_memory[:, -original_memory_size:]
return memory, concat_memory
def simple_attention(queries, keys, values):
logits = tf.matmul(queries, keys, transpose_b=True)
weights = tf.nn.softmax(logits)
return tf.matmul(weights, values)
class ResidualDropoutWrapper(base.AbstractModule):
"""Wrapper class that applies residual connections, dropout and layer norm.
By default applies a relu to the module output before the other operations.
"""
def __init__(self,
layer,
dropout_rate,
layer_norm='input',
name='residual_dropout_wrapper'):
self._module = layer
self._dropout_rate = dropout_rate
self._layer_norm = layer_norm
super(ResidualDropoutWrapper, self).__init__(name=name)
def _build(self, inputs, *args, **kwargs):
if self._layer_norm in ('both', 'input'):
normed_inputs = _layer_norm(inputs)
else:
normed_inputs = inputs
module_output = self._module(normed_inputs, *args, **kwargs)
module_state = None
# If module outputs multiple items, assumes (output, state) tuple.
if isinstance(module_output, tuple):
module_output, module_state = module_output
if kwargs['is_training']: # kwargs must contain is_training.
module_output = tf.nn.dropout(module_output, rate=self._dropout_rate)
output = inputs + module_output
if self._layer_norm in ('both', 'output'):
output = _layer_norm(output)
if module_state is None:
return output
else:
return output, module_state
def future_mask(chunk_size, dtype):
"""Creates attention mask to ensure an element i cannot attend to j > i."""
square = tf.ones([chunk_size, chunk_size], dtype=dtype)
# Create upper diagonal matrix and remove diagonal entries (allow self-attn).
mask = tf.matrix_band_part(square, 0, -1) - tf.matrix_band_part(square, 0, 0)
# Multiply by -1e6 and expand to broadcast with [B, H, N, N] logits.
mask = -1e6 * tf.reshape(mask, [1, 1, chunk_size, chunk_size])
return mask
def _memory_size(state):
if isinstance(state, CompressedMemoryState):
return (state.episodic_memory.get_shape().as_list()[1] +
state.compressed_memory.get_shape().as_list()[1])
else:
return state.get_shape().as_list()[1]
def create_mask(inputs, state, equal_window):
"""Creates mask for future sequence positions.
Args:
inputs: inputs tensor of shape [B, N, D]
state: optional tensor of shape [B, M, D], CompressedMemoryState or a list
where the ith entry corresponds to the ith layer's state.
equal_window: if True, then each activation has an equally-sized attention
window of length 'M'. This only makes sense if a state is given.
Returns:
Float tensor of shape [1, 1, N, N + M], to be summed with logits.
"""
chunk_size = inputs.get_shape().as_list()[1]
dtype = inputs.dtype
mask = future_mask(chunk_size, dtype)
if state is not None:
if isinstance(state, (tuple, list)):
largest_memory_layer = np.argmax([_memory_size(s) for s in state])
state = state[largest_memory_layer]
mem_size = _memory_size(state)
mask = tf.concat(
[tf.zeros([1, 1, chunk_size, mem_size], dtype=dtype), mask], 3)
if equal_window:
attn_mask = tf.ones([chunk_size, chunk_size], dtype=dtype)
mask_dia = tf.cast(tf.matrix_band_part(attn_mask, 0, 0), dtype=dtype)
mask_l = tf.cast(tf.matrix_band_part(attn_mask, -1, 0), dtype=dtype)
start_mask = tf.reshape(mask_l - mask_dia,
[1, 1, chunk_size, chunk_size]) * -1e6
mask = tf.concat(
[mask[:, :, :, :chunk_size] + start_mask, mask[:, :, :, chunk_size:]],
3)
return mask
def default_mlp(hidden_sizes, activate_final=False, init_std=2., **kwargs):
"""Standard batch-applied MLP for transformer modules."""
init = {'w': tf.variance_scaling_initializer(init_std, distribution='normal')}
mlp = snt_mlp.MLP(
hidden_sizes,
activate_final=activate_final,
use_dropout=True,
initializers=init,
**kwargs)
return basic.BatchApply(mlp)
def get_position_encodings(sequence_length,
hidden_size,
clamp_value,
max_timescale=10000.,
min_timescale=2.0):
"""Creates sinusoidal encodings of shape [1, N + M, D]."""
# NOTE: when not using relative position encodings, min_timescale must be 2.0
# and hidden_size must be an even number. Otherwise, the dimensions do not
# match.
pos_seq = tf.range(sequence_length - 1, -1, -1.0)
if clamp_value > 0:
pos_seq = tf.minimum(pos_seq, clamp_value)
freqs = tf.range(0, hidden_size, min_timescale)
inv_freq = 1 / (max_timescale**(freqs / hidden_size))
sinusoid_inp = tf.einsum('i,j->ij', pos_seq, inv_freq)
pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], -1)
pos_emb = tf.expand_dims(pos_emb, 0)
output_dim = pos_emb.get_shape().as_list()[-1]
if output_dim != hidden_size:
raise ValueError(
'position embedding dimension ({}) does not match that of the input ({}).'
.format(output_dim, hidden_size))
return pos_emb
class MultiheadAttention(base.AbstractModule):
"""Implements multi-head attention with optional state context."""
def __init__(self,
value_size,
key_size,
num_heads,
mask=None,
scaling=True,
positional_encodings=None,
use_relative_positions=False,
init_std=2.,
name='multihead_attention'):
"""Creates a MultiheadAttention module.
Args:
value_size: V parameter. See size glossary in class docstring.
key_size: K parameter. See size glossary in class docstring.
num_heads: The number of independent queries per timestep.
mask: Optional mask to attention logits. This can prevent attending to
future positions or unused memory slots.
scaling: Whether to scale the attention logits.
positional_encodings: Either None (none given), or an iterable of
`(key_positional_encodings, query_positional_encodings)` tuples, where
the first encodings in the list indicate the oldest entries in memory
and the final encodings indicate the newest entries in memory and the
sequence.
use_relative_positions: If True then relative positions are incorporated,
vs absolute, into the attention logits. This is done exactly as
described in the TransformerXL, Dai et al. 2019.
init_std: scaling of standard deviation for weight matrices init.
name: Name of module.
"""
super(MultiheadAttention, self).__init__(name=name)
self._value_size = value_size
self._key_size = key_size
self._sizes = {
'value': self._value_size,
'key': self._key_size,
'query': self._key_size,
'relative_keys': self._key_size,
'relative_keys_0': self._key_size,
}
self._num_heads = num_heads
self._mask = mask
self._scaling = scaling
self._positional_encodings = positional_encodings
self._use_relative_positions = use_relative_positions
self._init = {'w': tf.variance_scaling_initializer(init_std)}
@util.reuse_variables
def multihead_linear(self, inputs, name):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
hidden_size = self._sizes[name]
input_size = inputs.shape[-1].value
w = tf.get_variable(
'linear/w',
shape=[input_size, self._num_heads * hidden_size],
initializer=self._init['w'])
w = tf.reshape(w, [input_size, self._num_heads, hidden_size])
out = tf.einsum('bij,jhk->bhik', inputs, w)
return out
def _build(self,
inputs,
query_inputs=None,
state=None,
is_training=False,
dropout_keep_prob=0.5):
embedding_size = self._value_size * self._num_heads
q_inputs = inputs if query_inputs is None else query_inputs
# Denoted by L. If query_inputs is None, L = N.
_, query_size = q_inputs.get_shape().as_list()[:2]
if state is not None:
if isinstance(state, CompressedMemoryState):
state_memory_list = [state.compressed_memory, state.episodic_memory]
else:
state_memory_list = [state]
k_inputs = tf.concat(state_memory_list + [inputs], 1)
v_inputs = k_inputs
else:
k_inputs = inputs
v_inputs = inputs
# Batch size denoted by B
batch_size = tf.shape(inputs)[0]
# Chunk_size denoted by N
chunk_size = inputs.get_shape().as_list()[1]
# Denoted by N + M
att_size = k_inputs.get_shape().as_list()[1]
if self._positional_encodings and not self._use_relative_positions:
key_positions, query_positions = self._positional_encodings
k_inputs += key_positions
q_inputs += query_positions
# [B, H, L, K]
q = self.multihead_linear(q_inputs, 'query')
# [B, H, N + M, K]
k = self.multihead_linear(k_inputs, 'key')
# [B, H, N + M, V]
v = self.multihead_linear(v_inputs, 'value')
# Scaling the dot-product
if self._scaling:
q *= self._key_size**-0.5
# [B, H, L, N + M]
if self._use_relative_positions:
r_w_bias = tf.get_variable(
'r_w_bias', [1, self._num_heads, 1, self._key_size],
dtype=inputs.dtype)
content_logits = tf.matmul(q + r_w_bias, k, transpose_b=True)
all_relative_logits = []
# Loop over multiple positional encodings, for the case of multiple
# memory types.
for i, positional_encodings in enumerate(self._positional_encodings):
key_positions, query_positions = positional_encodings
if key_positions.get_shape().as_list()[-1] != att_size:
key_positions = key_positions[:, -att_size:] # Crop to layer mem size
is_final = i == len(self._positional_encodings) - 1
suffix = '' if is_final else '_%d' % i
relative_keys = self.multihead_linear(
key_positions, name='relative_keys' + suffix)
# [B, H, N, D]
r_r_bias = tf.get_variable(
'r_r_bias' + suffix, [1, self._num_heads, 1, self._key_size],
dtype=inputs.dtype)
relative_keys = tf.tile(relative_keys, [batch_size, 1, 1, 1])
relative_logits = tf.matmul(
q + r_r_bias, relative_keys, transpose_b=True)
relative_logits = rel_shift(relative_logits)
if not is_final: # Include relative positions for input sequence.
relative_logits = relative_logits[:, :, :, :-chunk_size]
all_relative_logits.append(relative_logits)
all_relative_logits = tf.concat(all_relative_logits, 3)
logits = content_logits + all_relative_logits
else:
# [B, H, N, N + M]
logits = tf.matmul(q, k, transpose_b=True)
content_logits = logits
if self._mask is not None:
if self._mask.get_shape().as_list()[-1] != att_size:
mask = self._mask[:, :, :, -att_size:]
else:
mask = self._mask
logits += mask
weights = tf.nn.softmax(logits)
if is_training:
weights = tf.nn.dropout(weights, dropout_keep_prob)
# [B, L, H, V], where V is value_size
output_transpose = tf.einsum('bhij,bhjk->bihk', weights, v)
# [B, L, H, V] -> [B, L, HV]
attended_inputs = basic.BatchReshape([query_size, embedding_size])(
output_transpose)
# Apply final mlp to mix information between heads.
output = basic.BatchApply(basic.Linear(embedding_size))(attended_inputs)
attention_state = AttentionState(
queries=q,
keys=k,
values=v,
weights=weights,
logits=content_logits,
embeddings=inputs,
read_words=output)
return output, attention_state
class TransformerTower(base.AbstractModule):
"""Transformer tower.
Deep residual network using blocks of attention and MLPs, specified in
Vaswani et al. 2017.
"""
def __init__(self,
value_size,
num_heads,
num_layers,
causal=True,
key_size=None,
shared_attention=False,
output_size=None,
mlp_hidden_sizes=tuple([1024]),
dropout_rate=0.1,
use_relative_positions=True,
clamp_time_range=0,
same_attention_length=False,
layer_norm='input',
name='transformer_tower'):
"""Initializes TransformerTower.
Args:
value_size: dimensionality of values per-head.
num_heads: number of attention heads.
num_layers: number of transformer blocks, where each block contains a
multi-head attention layer and an MLP.
causal: if True, applies a causal mask.
key_size: optional dimensionality of key size. If unspecified then it is
set to `value_size`.
shared_attention: if True, attention params are shared across all layers.
output_size: if set, the desired output dimensionality. By default the
output size is `value_size` x `num_heads`.
mlp_hidden_sizes: tuple containing dimensionality of mlp layer(s). If
multiple values are specified, the mlp contains multiple layers for each
transformer block.
dropout_rate: dropout rate applied to hidden activations, attention, and
positional encodings.
use_relative_positions: if False, applies absolute positional encodings.
If true, uses relative positional encodings from Dai et al. 2019.
clamp_time_range: clamps max temporal positional encoding if specified.
same_attention_length: if True, attention is masked to ensure each
position in the sequence contains the same length of attention.
layer_norm: Where to apply layer-norm in Transformer block. Can be one of
'input' (Vaswani et al. 2017), 'output', or 'both'.
name: name of variable scope.
"""
super(TransformerTower, self).__init__(name=name)
self._causal = causal
self._mask = None
if key_size is None:
key_size = value_size
self._key_size = key_size
self._value_size = value_size
self._shared_attention = shared_attention
self._num_heads = num_heads
self._num_layers = num_layers
self._output_size = output_size
self._embedding_size = self._value_size * self._num_heads
self._mlp_hidden_sizes = list(mlp_hidden_sizes) + [self._embedding_size]
self._multihead_attention = None
self._object_embeddings = None
self._dropout_rate = dropout_rate
self._positional_encodings = None
self._use_relative_positions = use_relative_positions
self._clamp_time_range = clamp_time_range
self._same_attention_length = same_attention_length
self._layer_norm = layer_norm
self._attention_modules = []
self._object_mlps = []
def get_sublayers(self, is_training):
if self._multihead_attention is None or not self._shared_attention:
attention_module = MultiheadAttention(
value_size=self._value_size,
key_size=self._key_size,
num_heads=self._num_heads,
mask=self._mask,
positional_encodings=self._positional_encodings,
use_relative_positions=self._use_relative_positions,
init_std=2. / np.sqrt(self._num_layers),
)
self._multihead_attention = ResidualDropoutWrapper(
attention_module, self._dropout_rate, layer_norm=self._layer_norm)
mlp = default_mlp(
self._mlp_hidden_sizes, init_std=2. / np.sqrt(self._num_layers))
object_mlp = ResidualDropoutWrapper(
mlp, self._dropout_rate, layer_norm=self._layer_norm)
self._attention_modules.append(attention_module)
self._object_mlps.append(mlp)
return self._multihead_attention, object_mlp
def _build(self, inputs, state=None, condition=None, is_training=True):
"""Calculates multi-layer self attention and mlp transformation.
Args:
inputs: Tensor of shape [batch_size, num_steps, dim_size].
state: optional tensor of shape [batch_size, memory_size, dim_size].
condition: optional tensor to condition on. The shape is shape
[batch_size, dim_size].
is_training: If true, dropout is applied.
Returns:
output: tensor of shape [batch_size, num_steps, output_dim_size].
state: list of length `num_layers` containing AttentionState tuples.
"""
# inputs: [B, N, F]
if condition is not None:
condition_tile = tf.tile(
tf.expand_dims(condition, 1), [1, tf.shape(inputs)[1], 1])
inputs = tf.concat([inputs, condition_tile], -1)
if state is None:
memory_sizes = [0]
elif isinstance(state[0], CompressedMemoryState):
cm_mem_size = max(_memory_size(s.compressed_memory) for s in state)
em_mem_size = max(_memory_size(s.episodic_memory) for s in state)
memory_sizes = [cm_mem_size, em_mem_size]
else:
memory_sizes = [max([_memory_size(s) for s in state])]
chunk_size = inputs.get_shape().as_list()[1]
self._positional_encodings = []
# Creates positional encodings for different memory types.
for i, memory_size in enumerate(memory_sizes):
seq_len = chunk_size + memory_size
key_positions = get_position_encodings(
sequence_length=seq_len,
hidden_size=inputs.get_shape().as_list()[2],
clamp_value=self._clamp_time_range,
)
if is_training:
key_positions = tf.nn.dropout(key_positions, rate=self._dropout_rate)
key_positions = tf.cast(key_positions, dtype=inputs.dtype)
query_positions = key_positions[:, -chunk_size:, :]
self._positional_encodings.append((key_positions, query_positions))
if self._causal:
self._mask = create_mask(inputs, state, self._same_attention_length)
layer_i_inputs = inputs
attention_states = []
for i in range(self._num_layers):
with tf.variable_scope('layer_%d' % i, reuse=tf.AUTO_REUSE):
multihead_attention, object_mlp = self.get_sublayers(is_training)
# Multihead attention with residuals.
state_i = None if state is None else state[i]
attention_outputs, attention_state = multihead_attention(
layer_i_inputs,
state=state_i,
is_training=is_training,
dropout_keep_prob=1. - self._dropout_rate)
attention_states.append(attention_state)
# Feed-forward with residuals.
output = object_mlp(
attention_outputs,
is_training=is_training,
dropout_keep_prob=1 - self._dropout_rate)
layer_i_inputs = output
if self._output_size is not None:
output = basic.BatchApply(
basic.Linear(self._output_size, use_bias=False))(
output)
return output, attention_states
def attention_module(self, i):
"""Returns the i-th layer attention module."""
return self._attention_modules[i]
class TransformerXL(rnn_core.RNNCore):
"""Transformer with memory of past activations.
From Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context
dai et al. 2019, https://arxiv.org/abs/1901.02860.
The TransformerXL can be used in two modes:
* batched, i.e. when chunk_size > 0. Here the model expects 3D input of size
`[batch_size, chunk_size, input_dim]`. In practice, the input chunk size
can be of varying (but statically defined) shape.
* single-step, i.e. when chunk_size = 0. Here the model expects 2D input
`[batch_size, input_dim]`.
"""
def __init__(self,
core_config,
memory_size,
chunk_size,
name='transformer_xl'):
"""Constructs TransformerXL graph.
Args:
core_config: dictionary with TransformerTower config.
memory_size: size of memory.
chunk_size: expected chunk size of inputs, if greater than zero inputs are
of size [batch_size, chunk_size, input_dim]. If equal to zero inputs are
of size [batch_size, input_dim].
name: name of variable scope.
"""
super(TransformerXL, self).__init__(name=name)
self._core_config = core_config
self._memory_size = memory_size
self._chunk_size = chunk_size
# Extract some size information from the core config.
self._num_layers = self._core_config['num_layers']
self._value_size = self._core_config['value_size']
self._key_size = self._core_config.get('key_size') or self._value_size
self._num_heads = self._core_config['num_heads']
self._dropout_rate = self._core_config.get('dropout_rate', 0.)
self._embedding_size = self._num_heads * self._value_size
def _build(self, inputs, prev_state, is_training=True):
input_shape = inputs.get_shape().as_list()
if len(input_shape) == 2:
inputs = tf.expand_dims(inputs, 1)
chunk_size = 1
else:
_, chunk_size, _ = input_shape
inputs = default_mlp([self._embedding_size], activate_final=True)(
inputs,
is_training=is_training,
dropout_keep_prob=1 - self._dropout_rate)
transformer = TransformerTower(**self._core_config)
state_for_transformer = None if self._memory_size == 0 else prev_state
output, attention_state = transformer(
inputs, state=state_for_transformer, is_training=is_training)
next_state = []
for i, state_i in enumerate(prev_state):
# Append new elements to memory.
attn_state_i = attention_state[i]
memory = tf.concat([state_i, attn_state_i.embeddings], 1)[:, chunk_size:]
next_state.append(memory)
if self._chunk_size == 0: # For the use-case as a single-step RNN.
output = tf.squeeze(output, 1)
return output, next_state
@property
def state_size(self):
memory_shape = tf.TensorShape([self._memory_size, self._embedding_size])
return [memory_shape] * self._num_layers
@property
def output_size(self):
if self._chunk_size == 0:
return tf.TensorShape([self._embedding_size])
else:
return tf.TensorShape([self._chunk_size, self._embedding_size])
class PoolCompressor(base.AbstractModule):
"""Compress sequence using simple pooling."""
def __init__(self,
compression_rate=2,
kernel_size=0,
pooling='AVG',
compressed_memory_size=None,
episodic_memory_size=None,
name='pool_compressor'):
"""Instantiates compression module.
Args:
compression_rate: integer >= 1. The memory will be compressed from T
time-steps to T / compression_rate. In implementation, this corresponds
to the stride size of the module.
kernel_size: The additional kernel size, if we wish for overlapping
compressed vectors. The total conv1d kernel size is 'compression_rate +
kernel_size', (as the stride is 'compression_rate').
pooling: AVG or MAX pooling.
compressed_memory_size: Size of compressed memory store.
episodic_memory_size: Size of episodic memory store.
name: module name, used for variable scoping.
"""
super(PoolCompressor, self).__init__(name=name)
self._compression_rate = compression_rate
self._compressed_memory_size = compressed_memory_size
self._episodic_memory_size = episodic_memory_size
self._pooling = pooling
self._kernel_size = kernel_size
def _build(self, memory, **unused_kwargs):
pooled_memories = tf.nn.pool(
memory,
window_shape=(self._compression_rate + self._kernel_size,),
data_format='NWC',
strides=(self._compression_rate,),
padding='VALID',
pooling_type=self._pooling)
return pooled_memories, tf.zeros([], dtype=memory.dtype)
class ConvCompressor(base.AbstractModule):
"""Compress sequence using convolutions, with respect to a desired loss."""
def __init__(self,
compression_rate,
compressed_memory_size,
episodic_memory_size,
kernel_size=0,
dilation_rates=None,
loss='mha',
name='conv_compressor'):
"""Instantiates convolutional compression module.
Args:
compression_rate: integer >= 1. The memory will be compressed from T
time-steps to T / compression_rate. In implementation, this corresponds
to the stride size of the module.
compressed_memory_size: Size of compressed memory store.
episodic_memory_size: Size of regular memory equiv. to TXL's memory.
kernel_size: The additional kernel size, if we wish for overlapping
compressed vectors. The total conv1d kernel size is 'compression_rate +
kernel_size', (as the stride is 'compression_rate').
dilation_rates: optional iterable of dilation rates for deep dilated
convnet, e.g. [1, 2, 4].
loss: Either 'ae' for an auto-encoder compression loss, or 'mha' for a
multi-head attention loss. The multi-head attention loss attempts to
reconstruct the attention outputs between the (sequence, memory) and
(sequence, compressed_memory).
name: module name, used for variable scoping.
"""
super(ConvCompressor, self).__init__(name=name)
self._loss = loss
self._stride = compression_rate
self._kernel_size = kernel_size
self._dilation_rates = dilation_rates
self._compressed_memory_size = compressed_memory_size
self._episodic_memory_size = episodic_memory_size
def _build(self,
memory,
attention_state=None,
attention_module=None,
is_training=False,
dropout_keep_prob=0.5):
"""Builds graph to compress memory and return auxiliary loss.
Args:
memory: [batch, chunk_size, hidden_size] tensor to be compressed.
attention_state: AttentionState named tuple containing the queries, keys,
and values that were computed at a given layer.
attention_module: the attention module (sonnet class). Useful for
accessing the multi-head attention sub-modules, used to transform hidden
states into queries, keys, and values.
is_training: if is training, useful for dropout gating.
dropout_keep_prob: the probability of dropout. Currently unused!
Returns:
(compressed_memory, loss) tuple. Compressed_memory is of size
[batch, time / compression_rate, hidden_size]. The loss is a scalar.
"""
_, chunk_size, hidden_size = memory.get_shape().as_list()
# Start of queryable sequence, from sequence of hiddens. If the memory is
# larger than the chunk size, the whole sequence will be used for queries.
seq_s = max(chunk_size - self._episodic_memory_size, 0)
memory = tf.stop_gradient(memory)
compressed_memory = memory
if self._dilation_rates is not None:
for rate in self._dilation_rates:
conv = snt_conv.Conv1D(
hidden_size,
kernel_shape=2,
rate=rate,
use_bias=False,
padding=snt_conv.VALID,
name='conv_rate_%d' % rate,
)
compressed_memory = conv(compressed_memory)
compressed_memory = tf.nn.relu(compressed_memory)
conv = snt_conv.Conv1D(
hidden_size,
kernel_shape=self._stride + self._kernel_size,
stride=self._stride,
use_bias=False,
padding=snt_conv.VALID,
)
# We stop gradients for the compression inputs. This is to avoid the network
# shaping them to be compressible. We would like to compress them
# *conditioned* on the task-specific representations that are learned.
# Queries from current sequence.
queries = tf.stop_gradient(attention_state.queries[:, :, seq_s:])
# Memory of past hidden activations to be compressed.
compressed_memory = conv(compressed_memory)
if self._loss == 'ae':
transpose_conv = conv.transpose()
recovered_memory = transpose_conv(compressed_memory)
loss = tf.reduce_mean(tf.square(recovered_memory - memory))
elif self._loss == 'mha':
# We share the attention module's parameters, but we stop gradients from
# flowing to these parameters with respect to the auxiliary loss, as we
# don't want the attention module to shape queries, keys, and values to
# be compressible.
stop_gradient_getter = custom_getters.Context(
custom_getters.stop_gradient)
with stop_gradient_getter:
# Calculates attention from sequence over memory.
memory_keys = attention_module.multihead_linear(memory, name='key')
memory_values = attention_module.multihead_linear(memory, name='value')
read_words_with_memory = simple_attention(queries, memory_keys,
memory_values)
# Calculates attention from sequence over compressed memory.
compressed_keys = attention_module.multihead_linear(
compressed_memory, name='key')
compressed_values = attention_module.multihead_linear(
compressed_memory, name='value')
read_words_with_compressed_memory = simple_attention(
queries, compressed_keys, compressed_values)
loss = tf.reduce_mean(
tf.square(read_words_with_memory - read_words_with_compressed_memory))
else:
raise NotImplementedError(
'Unrecognised loss: %r, expected `ae` or `mha`' % self._loss)
return compressed_memory, loss
def _compute_avg_attention(attention_state,
compressed_memory_size,
episodic_memory_size,
chunk_size,
n_buckets=6):
"""Computes average attention for Compressive Transformer.
Computes average attention for `n_buckets` over the sequence,
episodic memory, and compressed memory. In total there are 3 x n_buckets.
Args:
attention_state: An AttentionState object.
compressed_memory_size: scalar size of compressed memory.
episodic_memory_size: scalar size of episodic memory.
chunk_size: size of input sequence.
n_buckets: number of buckets to average attention per memory,
compressed memory, and sequence.
Returns:
Tuple of (names, avg_weights) where each is a list. The names are
<segment_type>_<bucket_id>, i.e. cm_0, cm_1, em_0, em_1, seq_0, seq_1.
The bucket index is ordered by time, higher values are for attention
over more recent buckets of [seq/cm/em]. The avg_weights are the list
of corresponding values.
"""
cm_size = compressed_memory_size
em_size = episodic_memory_size
split_sizes = []
split_names = []
if cm_size > 0:
split_sizes += [int(cm_size / n_buckets)] * (n_buckets - 1)
split_sizes += [cm_size - int(cm_size / n_buckets) * (n_buckets - 1)]
split_names += ['cm_p%d' % i for i in range(n_buckets)]
if em_size > 0:
split_sizes += [int(em_size / n_buckets)] * (n_buckets - 1)
split_sizes += [em_size - int(em_size / n_buckets) * (n_buckets - 1)]
split_names += ['em_p%d' % i for i in range(n_buckets)]
split_sizes += [int(chunk_size / n_buckets)] * (n_buckets - 1)
split_sizes += [chunk_size - int(chunk_size / n_buckets) * (n_buckets - 1)]
split_names += ['seq_p%d' % i for i in range(n_buckets)]
avg_weights = tf.reduce_mean(attention_state.weights, axis=[0, 1, 2])
split_avg_weights = tf.split(avg_weights, split_sizes)
split_avg_weights = [tf.reduce_sum(x) for x in split_avg_weights]
return split_names, split_avg_weights
class CompressiveTransformer(rnn_core.RNNCore):
"""Transformer with compressive memory.
From "Compressive Transformers for Long-Range Sequence Modelling"
Rae et al. 2019, https://arxiv.org/abs/1911.05507
"""
def __init__(self,
core_config,
chunk_size,
episodic_memory_size,
compressed_memory_size,
compression_rate=2,
compression_ctor=ConvCompressor,
compression_config=None,
export_stats=False,
name='compressive_transformer'):
"""Constructs Compressive Transformer.
Wraps a TransformerTower and includes a slot-based memory (like the
TransformerXL) alongside a compressed memory which is populated from
the oldest slot-based memories, passed through a compression network.
To train the compression network, an auxiliary compression loss is
added to the collection 'auxiliary_losses'.
Args:
core_config: dictionary with TransformerTower config.
chunk_size: expected chunk size of inputs, if greater than zero inputs are
of size [batch_size, chunk_size, input_dim]. If equal to zero inputs are
of size [batch_size, input_dim].
episodic_memory_size: size of slot-based memory (i.e. TransformerXL mem).
compressed_memory_size: size of compressed memory. Total attention len is
episodic_memory_size + compressed_memory_size + chunk_size.
compression_rate: Factor of compression from episodic memories to
compressed memories, i.e. `2` means M memories are mapped to M/2
compressed memories.
compression_ctor: Constructor of compression network, e.g. ConvCompressor,
PoolCompressor, or any newly specified network.
compression_config: optional dictionary with keyword arguments for
compression network.
export_stats: exports compression loss and attention weight per layer to a
tf collection 'stats_export' if true. Can slow down training.
name: name of variable scope.
"""
super(CompressiveTransformer, self).__init__(name=name)
self._core_config = core_config
self._episodic_memory_size = episodic_memory_size
self._compressed_memory_size = compressed_memory_size
self._chunk_size = chunk_size
self._compression_config = dict(compression_config or [])
self._compression_rate = compression_rate
self._compression_config.update({
'compression_rate': compression_rate,
'compressed_memory_size': self._compressed_memory_size,
'episodic_memory_size': self._episodic_memory_size,
})
self._compression_ctor = compression_ctor
self._export_stats = export_stats
# Extract some size information from the core config.
self._num_layers = self._core_config['num_layers']
self._value_size = self._core_config['value_size']
self._key_size = self._core_config.get('key_size') or self._value_size
self._num_heads = self._core_config['num_heads']
self._dropout_rate = self._core_config.get('dropout_rate', 0.)
self._embedding_size = self._num_heads * self._value_size
def _build(self, inputs, prev_state, is_training=True):
"""Builds graph.
Args:
inputs: 3D tensor of shape [batch_size, chunk_size, input_dim] or
2D tensor of shape [batch_size, input_dim].
prev_state: list of length `num_layers` containing `CompressedMemoryState`
tuples.
is_training: applies dropout if true.
Returns:
output: tensor equal in rank to `inputs` with final dimension equal to
`embedding_size` = `key_size` * `num_heads`.
next_state: list of length `num_layers` containing `CompressedMemoryState`
tuples.
"""
input_shape = inputs.get_shape().as_list()
if len(input_shape) == 2:
inputs = tf.expand_dims(inputs, 1)
_, chunk_size, _ = inputs.get_shape().as_list()
num_layers_t = tf.constant(self._num_layers, dtype=inputs.dtype)
inputs = default_mlp([self._embedding_size], activate_final=True)(
inputs,
is_training=is_training,
dropout_keep_prob=1 - self._dropout_rate)
transformer = TransformerTower(**self._core_config)
state_for_transformer = (None
if self._episodic_memory_size == 0 else prev_state)
output, attention_state = transformer(
inputs, state=state_for_transformer, is_training=is_training)
min_num_to_compress = (
self._compression_rate + self._compression_config.get('kernel_size', 0))
num_to_compress = min(max(min_num_to_compress, chunk_size),
chunk_size + self._episodic_memory_size - 1)
def apply_compression_generic(attn_state, attn_module, mem_to_compress,
prev_compressed_memory):
"""Instantiates compression module and returns fn to build graph."""
compress_module = self._compression_ctor(**self._compression_config)
def _inner_fn():
"""Returns (updated compressed memory, compression loss)."""
next_compressed_memory, compression_loss = compress_module(
mem_to_compress,
attention_state=attn_state,
attention_module=attn_module,
is_training=is_training,
dropout_keep_prob=1 - self._dropout_rate,
)
compressed_memory, _ = _concat_and_slice(prev_compressed_memory,
next_compressed_memory)
return compressed_memory, compression_loss
return _inner_fn
def dont_apply_compression_generic(prev_compressed_memory):
"""Instantiates fn to build dummy graph that skips any compression."""
def _inner_fn():
return (prev_compressed_memory,
tf.zeros([], dtype=prev_compressed_memory.dtype))
return _inner_fn
next_state = []
compression_loss = tf.zeros([], dtype=inputs.dtype)
global_attention_weights = []
stats_export_dict = {}
for i, state_i in enumerate(prev_state):
# Append new elements to memory.
attn_state_i = attention_state[i]
memory, concat_memory = _concat_and_slice(state_i.episodic_memory,
attn_state_i.embeddings)
sequence_index = state_i.index[0]
# We special-case chunk_size=1, which is useful for sampling. In the
# single time-step setting we only compress the memory every
# 'compression_rate' steps. Otherwise we assume chunk_size is a multiple
# of `compression_rate`, and thus multiple compressions can be performed
# in parallel.
to_compress = tf.logical_or(
chunk_size > 1,
tf.equal(sequence_index % self._compression_rate,
self._compression_rate - 1))[0]
apply_compression_fn = apply_compression_generic(
attn_state=attn_state_i,
attn_module=transformer.attention_module(i),
mem_to_compress=concat_memory[:, :num_to_compress],
prev_compressed_memory=state_i.compressed_memory,
)
dont_apply_compression_fn = dont_apply_compression_generic(
prev_compressed_memory=state_i.compressed_memory)
compression_output = tf.cond(to_compress, apply_compression_fn,
dont_apply_compression_fn)
compressed_memory, compression_loss_i = compression_output
compression_loss += compression_loss_i
# Log useful stats, compression loss per layer.
stats_export_dict['compression_loss_l%02d' % i] = compression_loss_i
# Attention weights per layer.
attn_names, attn_weights = _compute_avg_attention(
attn_state_i, self._compressed_memory_size,
self._episodic_memory_size, chunk_size)
attn_names_i = [name + '_l%02d' % i for name in attn_names]
stats_export_dict.update(dict(zip(attn_names_i, attn_weights)))
# Avg global attention weights.
if i == 0:
global_attention_weights = [y / num_layers_t for y in attn_weights]
else:
global_attention_weights = [
(x + y / num_layers_t)
for x, y in zip(global_attention_weights, attn_weights)
]
next_state.append(
CompressedMemoryState(
index=state_i.index + 1,
episodic_memory=memory,
compressed_memory=compressed_memory))
next_state = tuple(next_state)
compression_loss /= num_layers_t
stats_export_dict.update(dict(zip(attn_names, global_attention_weights)))
if is_training:
tf.add_to_collections('auxiliary_losses', compression_loss)
if self._export_stats:
tf.add_to_collections('stats_export', stats_export_dict)
if self._chunk_size == 0: # For the use-case as a single-step RNN.
output = tf.squeeze(output, 1)
return output, next_state
@property
def state_size(self):
memory_shape = tf.TensorShape(
[self._episodic_memory_size, self._embedding_size])
cm_shape = tf.TensorShape(
[self._compressed_memory_size, self._embedding_size])
index_shape = tf.TensorShape([1])
shape_per_layer = CompressedMemoryState(
index=index_shape,
episodic_memory=memory_shape,
compressed_memory=cm_shape)
return tuple([shape_per_layer] * self._num_layers)
@property
def output_size(self):
if self._chunk_size == 0:
return tf.TensorShape([self._embedding_size])
else:
return tf.TensorShape([self._chunk_size, self._embedding_size])
| sonnet-1 | sonnet/python/modules/nets/transformer.py |
# Copyright 2018 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sonnet implementation of VQ-VAE https://arxiv.org/abs/1711.00937."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sonnet.python.modules import base
import tensorflow.compat.v1 as tf
from tensorflow.python.training import moving_averages
class VectorQuantizer(base.AbstractModule):
"""Sonnet module representing the VQ-VAE layer.
Implements the algorithm presented in
'Neural Discrete Representation Learning' by van den Oord et al.
https://arxiv.org/abs/1711.00937
Input any tensor to be quantized. Last dimension will be used as space in
which to quantize. All other dimensions will be flattened and will be seen
as different examples to quantize.
The output tensor will have the same shape as the input.
For example a tensor with shape [16, 32, 32, 64] will be reshaped into
[16384, 64] and all 16384 vectors (each of 64 dimensions) will be quantized
independently.
Args:
embedding_dim: integer representing the dimensionality of the tensors in the
quantized space. Inputs to the modules must be in this format as well.
num_embeddings: integer, the number of vectors in the quantized space.
commitment_cost: scalar which controls the weighting of the loss terms
(see equation 4 in the paper - this variable is Beta).
"""
def __init__(self, embedding_dim, num_embeddings, commitment_cost,
name='vq_layer'):
super(VectorQuantizer, self).__init__(name=name)
self._embedding_dim = embedding_dim
self._num_embeddings = num_embeddings
self._commitment_cost = commitment_cost
with self._enter_variable_scope():
initializer = tf.uniform_unit_scaling_initializer()
self._w = tf.get_variable('embedding', [embedding_dim, num_embeddings],
initializer=initializer, trainable=True)
def _build(self, inputs, is_training):
"""Connects the module to some inputs.
Args:
inputs: Tensor, final dimension must be equal to embedding_dim. All other
leading dimensions will be flattened and treated as a large batch.
is_training: boolean, whether this connection is to training data.
Returns:
dict containing the following keys and values:
quantize: Tensor containing the quantized version of the input.
loss: Tensor containing the loss to optimize.
perplexity: Tensor containing the perplexity of the encodings.
encodings: Tensor containing the discrete encodings, ie which element
of the quantized space each input element was mapped to.
encoding_indices: Tensor containing the discrete encoding indices, ie
which element of the quantized space each input element was mapped to.
"""
# Assert last dimension is same as self._embedding_dim
input_shape = tf.shape(inputs)
with tf.control_dependencies([
tf.Assert(tf.equal(input_shape[-1], self._embedding_dim),
[input_shape])]):
flat_inputs = tf.reshape(inputs, [-1, self._embedding_dim])
distances = (tf.reduce_sum(flat_inputs**2, 1, keepdims=True)
- 2 * tf.matmul(flat_inputs, self._w)
+ tf.reduce_sum(self._w ** 2, 0, keepdims=True))
encoding_indices = tf.argmax(- distances, 1)
encodings = tf.one_hot(encoding_indices, self._num_embeddings)
encoding_indices = tf.reshape(encoding_indices, tf.shape(inputs)[:-1])
quantized = self.quantize(encoding_indices)
e_latent_loss = tf.reduce_mean((tf.stop_gradient(quantized) - inputs) ** 2)
q_latent_loss = tf.reduce_mean((quantized - tf.stop_gradient(inputs)) ** 2)
loss = q_latent_loss + self._commitment_cost * e_latent_loss
quantized = inputs + tf.stop_gradient(quantized - inputs)
avg_probs = tf.reduce_mean(encodings, 0)
perplexity = tf.exp(- tf.reduce_sum(avg_probs * tf.log(avg_probs + 1e-10)))
return {'quantize': quantized,
'loss': loss,
'perplexity': perplexity,
'encodings': encodings,
'encoding_indices': encoding_indices,}
@property
def embeddings(self):
return self._w
def quantize(self, encoding_indices):
with tf.control_dependencies([encoding_indices]):
w = tf.transpose(self.embeddings.read_value(), [1, 0])
return tf.nn.embedding_lookup(w, encoding_indices, validate_indices=False)
class VectorQuantizerEMA(base.AbstractModule):
"""Sonnet module representing the VQ-VAE layer.
Implements a slightly modified version of the algorithm presented in
'Neural Discrete Representation Learning' by van den Oord et al.
https://arxiv.org/abs/1711.00937
The difference between VectorQuantizerEMA and VectorQuantizer is that
this module uses exponential moving averages to update the embedding vectors
instead of an auxiliary loss. This has the advantage that the embedding
updates are independent of the choice of optimizer (SGD, RMSProp, Adam, K-Fac,
...) used for the encoder, decoder and other parts of the architecture. For
most experiments the EMA version trains faster than the non-EMA version.
Input any tensor to be quantized. Last dimension will be used as space in
which to quantize. All other dimensions will be flattened and will be seen
as different examples to quantize.
The output tensor will have the same shape as the input.
For example a tensor with shape [16, 32, 32, 64] will be reshaped into
[16384, 64] and all 16384 vectors (each of 64 dimensions) will be quantized
independently.
Args:
embedding_dim: integer representing the dimensionality of the tensors in the
quantized space. Inputs to the modules must be in this format as well.
num_embeddings: integer, the number of vectors in the quantized space.
commitment_cost: scalar which controls the weighting of the loss terms (see
equation 4 in the paper).
decay: float, decay for the moving averages.
epsilon: small float constant to avoid numerical instability.
"""
def __init__(self, embedding_dim, num_embeddings, commitment_cost, decay,
epsilon=1e-5, name='VectorQuantizerEMA'):
super(VectorQuantizerEMA, self).__init__(name=name)
self._embedding_dim = embedding_dim
self._num_embeddings = num_embeddings
self._decay = decay
self._commitment_cost = commitment_cost
self._epsilon = epsilon
with self._enter_variable_scope():
initializer = tf.random_normal_initializer()
# w is a matrix with an embedding in each column. When training, the
# embedding is assigned to be the average of all inputs assigned to that
# embedding.
self._w = tf.get_variable(
'embedding', [embedding_dim, num_embeddings],
initializer=initializer, use_resource=True)
self._ema_cluster_size = tf.get_variable(
'ema_cluster_size', [num_embeddings],
initializer=tf.constant_initializer(0), use_resource=True)
self._ema_w = tf.get_variable(
'ema_dw', initializer=self._w.initialized_value(), use_resource=True)
def _build(self, inputs, is_training):
"""Connects the module to some inputs.
Args:
inputs: Tensor, final dimension must be equal to embedding_dim. All other
leading dimensions will be flattened and treated as a large batch.
is_training: boolean, whether this connection is to training data. When
this is set to False, the internal moving average statistics will not be
updated.
Returns:
dict containing the following keys and values:
quantize: Tensor containing the quantized version of the input.
loss: Tensor containing the loss to optimize.
perplexity: Tensor containing the perplexity of the encodings.
encodings: Tensor containing the discrete encodings, ie which element
of the quantized space each input element was mapped to.
encoding_indices: Tensor containing the discrete encoding indices, ie
which element of the quantized space each input element was mapped to.
"""
# Ensure that the weights are read fresh for each timestep, which otherwise
# would not be guaranteed in an RNN setup. Note that this relies on inputs
# having a data dependency with the output of the previous timestep - if
# this is not the case, there is no way to serialize the order of weight
# updates within the module, so explicit external dependencies must be used.
with tf.control_dependencies([inputs]):
w = self._w.read_value()
input_shape = tf.shape(inputs)
with tf.control_dependencies([
tf.Assert(tf.equal(input_shape[-1], self._embedding_dim),
[input_shape])]):
flat_inputs = tf.reshape(inputs, [-1, self._embedding_dim])
distances = (tf.reduce_sum(flat_inputs**2, 1, keepdims=True)
- 2 * tf.matmul(flat_inputs, w)
+ tf.reduce_sum(w ** 2, 0, keepdims=True))
encoding_indices = tf.argmax(- distances, 1)
encodings = tf.one_hot(encoding_indices, self._num_embeddings)
encoding_indices = tf.reshape(encoding_indices, tf.shape(inputs)[:-1])
quantized = self.quantize(encoding_indices)
e_latent_loss = tf.reduce_mean((tf.stop_gradient(quantized) - inputs) ** 2)
if is_training:
updated_ema_cluster_size = moving_averages.assign_moving_average(
self._ema_cluster_size, tf.reduce_sum(encodings, 0), self._decay)
dw = tf.matmul(flat_inputs, encodings, transpose_a=True)
updated_ema_w = moving_averages.assign_moving_average(self._ema_w, dw,
self._decay)
n = tf.reduce_sum(updated_ema_cluster_size)
updated_ema_cluster_size = (
(updated_ema_cluster_size + self._epsilon)
/ (n + self._num_embeddings * self._epsilon) * n)
normalised_updated_ema_w = (
updated_ema_w / tf.reshape(updated_ema_cluster_size, [1, -1]))
with tf.control_dependencies([e_latent_loss]):
update_w = tf.assign(self._w, normalised_updated_ema_w)
with tf.control_dependencies([update_w]):
loss = self._commitment_cost * e_latent_loss
else:
loss = self._commitment_cost * e_latent_loss
quantized = inputs + tf.stop_gradient(quantized - inputs)
avg_probs = tf.reduce_mean(encodings, 0)
perplexity = tf.exp(- tf.reduce_sum(avg_probs * tf.log(avg_probs + 1e-10)))
return {'quantize': quantized,
'loss': loss,
'perplexity': perplexity,
'encodings': encodings,
'encoding_indices': encoding_indices,}
@property
def embeddings(self):
return self._w
def quantize(self, encoding_indices):
with tf.control_dependencies([encoding_indices]):
w = tf.transpose(self.embeddings.read_value(), [1, 0])
return tf.nn.embedding_lookup(w, encoding_indices, validate_indices=False)
| sonnet-1 | sonnet/python/modules/nets/vqvae.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# pylint: disable=line-too-long
"""Implementation of AlexNet as a Sonnet module.
`AlexNet` is a Sonnet module that implements two variants of
'ImageNet Classification with Deep Convolutional Neural Networks'
Alex Krizhevsky, Ilya Sutskever, Geoffrey E. Hinton, NIPS 2012
http://papers.nips.cc/paper/4824-imagenet-classification-w
The two modes are FULL and MINI, corresponding to the full dual-gpu version and
a cut-down version that is able to run on Cifar10.
AlexNet is no longer state of the art and isn't considered a good starting point
for a vision network.
"""
# pylint: enable=line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from sonnet.python.modules import base
from sonnet.python.modules import basic
from sonnet.python.modules import batch_norm
from sonnet.python.modules import conv
from sonnet.python.modules import util
import tensorflow.compat.v1 as tf
class AlexNet(base.AbstractModule):
"""Implementation of AlexNet with full and mini versions.
Based on:
'ImageNet Classification with Deep Convolutional Neural Networks'
Alex Krizhevsky, Ilya Sutskever, Geoffrey E. Hinton, NIPS 2012
http://papers.nips.cc/paper/4824-imagenet-classification-w
"""
FULL = "FULL"
MINI = "MINI"
POSSIBLE_INITIALIZER_KEYS = {"w", "b"}
def __init__(self,
mode,
use_batch_norm=False,
batch_norm_config=None,
initializers=None,
partitioners=None,
regularizers=None,
bn_on_fc_layers=True,
custom_getter=None,
name="alex_net"):
"""Constructs AlexNet.
Args:
mode: Construction mode of network: `AlexNet.FULL` or `AlexNet.MINI`.
use_batch_norm: Whether to use batch normalization between the output of
a layer and the activation function.
batch_norm_config: Optional mapping of additional configuration for the
`snt.BatchNorm` modules.
initializers: Optional dict containing ops to initialize the filters (with
key 'w') or biases (with key 'b'). The default initializers are
truncated normal initializers, which are commonly used when the inputs
are zero centered (see https://arxiv.org/pdf/1502.03167v3.pdf).
partitioners: Optional dict containing partitioners for the filters
(with key 'w') and the biases (with key 'b'). As a default, no
partitioners are used.
regularizers: Optional dict containing regularizers for the filters
(with key 'w') and the biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes
a single `Tensor` as an input and returns a scalar `Tensor` output, e.g.
the L1 and L2 regularizers in `tf.contrib.layers`.
bn_on_fc_layers: If `use_batch_norm` is True, add batch normalization to
the fully-connected layers. This is deprecated.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the `tf.get_variable`
documentation for information about the custom_getter API.
name: Name of the module.
Raises:
base.Error: If the given `mode` is not one of `AlexNet.FULL`,
or `AlexNet.MINI`.
KeyError: If `initializers`, `partitioners` or `regularizers` contains any
keys other than 'w' or 'b'.
"""
super(AlexNet, self).__init__(custom_getter=custom_getter, name=name)
self._mode = mode
self._use_batch_norm = use_batch_norm
self._bn_on_fc_layers = bn_on_fc_layers
if self._bn_on_fc_layers:
tf.logging.warn("Using BatchNorm on the fully connected layers in "
"AlexNet is not recommended. 'bn_on_fc_layers' is a "
"deprecated option and will likely be removed.")
self._batch_norm_config = batch_norm_config or {}
if self._mode == self.FULL:
# The full AlexNet, i.e. originally ran on two GPUs
self._conv_layers = [
(96, (11, 4), (3, 2)),
(256, (5, 1), (3, 2)),
(384, (3, 1), None),
(384, (3, 1), None),
(256, (3, 1), (3, 2)),
]
self._fc_layers = [4096, 4096]
elif self._mode == self.MINI:
# A cut down version of the half net for testing with Cifar10
self._conv_layers = [
(48, (3, 1), (3, 1)),
(128, (3, 1), (3, 1)),
(192, (3, 1), None),
(192, (3, 1), None),
(128, (3, 1), (3, 1)),
]
self._fc_layers = [1024, 1024]
else:
raise base.Error("AlexNet construction mode '{}' not recognised, "
"must be one of: '{}', '{}'".format(
mode, self.FULL, self.MINI))
self._min_size = self._calc_min_size(self._conv_layers)
self._conv_modules = []
self._linear_modules = []
self._initializers = util.check_initializers(
initializers, self.POSSIBLE_INITIALIZER_KEYS)
self._partitioners = util.check_partitioners(
partitioners, self.POSSIBLE_INITIALIZER_KEYS)
self._regularizers = util.check_regularizers(
regularizers, self.POSSIBLE_INITIALIZER_KEYS)
def _calc_min_size(self, conv_layers):
"""Calculates the minimum size of the input layer.
Given a set of convolutional layers, calculate the minimum value of
the `input_height` and `input_width`, i.e. such that the output has
size 1x1. Assumes snt.VALID padding.
Args:
conv_layers: List of tuples `(output_channels, (kernel_size, stride),
(pooling_size, pooling_stride))`
Returns:
Minimum value of input height and width.
"""
input_size = 1
for _, conv_params, max_pooling in reversed(conv_layers):
if max_pooling is not None:
kernel_size, stride = max_pooling
input_size = input_size * stride + (kernel_size - stride)
if conv_params is not None:
kernel_size, stride = conv_params
input_size = input_size * stride + (kernel_size - stride)
return input_size
def _build(self, inputs, keep_prob=None, is_training=None,
test_local_stats=True):
"""Connects the AlexNet module into the graph.
The is_training flag only controls the batch norm settings, if `False` it
does not force no dropout by overriding any input `keep_prob`. To avoid any
confusion this may cause, if `is_training=False` and `keep_prob` would cause
dropout to be applied, an error is thrown.
Args:
inputs: A Tensor of size [batch_size, input_height, input_width,
input_channels], representing a batch of input images.
keep_prob: A scalar Tensor representing the dropout keep probability.
When `is_training=False` this must be None or 1 to give no dropout.
is_training: Boolean to indicate if we are currently training. Must be
specified if batch normalization or dropout is used.
test_local_stats: Boolean to indicate to `snt.BatchNorm` if batch
normalization should use local batch statistics at test time.
By default `True`.
Returns:
A Tensor of size [batch_size, output_size], where `output_size` depends
on the mode the network was constructed in.
Raises:
base.IncompatibleShapeError: If any of the input image dimensions
(input_height, input_width) are too small for the given network mode.
ValueError: If `keep_prob` is not None or 1 when `is_training=False`.
ValueError: If `is_training` is not explicitly specified when using
batch normalization.
"""
# Check input shape
if (self._use_batch_norm or keep_prob is not None) and is_training is None:
raise ValueError("Boolean is_training flag must be explicitly specified "
"when using batch normalization or dropout.")
input_shape = inputs.get_shape().as_list()
if input_shape[1] < self._min_size or input_shape[2] < self._min_size:
raise base.IncompatibleShapeError(
"Image shape too small: ({:d}, {:d}) < {:d}".format(
input_shape[1], input_shape[2], self._min_size))
net = inputs
# Check keep prob
if keep_prob is not None:
valid_inputs = tf.logical_or(is_training, tf.equal(keep_prob, 1.))
keep_prob_check = tf.assert_equal(
valid_inputs, True,
message="Input `keep_prob` must be None or 1 if `is_training=False`.")
with tf.control_dependencies([keep_prob_check]):
net = tf.identity(net)
for i, params in enumerate(self._conv_layers):
output_channels, conv_params, max_pooling = params
kernel_size, stride = conv_params
conv_mod = conv.Conv2D(
name="conv_{}".format(i),
output_channels=output_channels,
kernel_shape=kernel_size,
stride=stride,
padding=conv.VALID,
initializers=self._initializers,
partitioners=self._partitioners,
regularizers=self._regularizers)
if not self.is_connected:
self._conv_modules.append(conv_mod)
net = conv_mod(net)
if self._use_batch_norm:
bn = batch_norm.BatchNorm(**self._batch_norm_config)
net = bn(net, is_training, test_local_stats)
net = tf.nn.relu(net)
if max_pooling is not None:
pooling_kernel_size, pooling_stride = max_pooling
net = tf.nn.max_pool(
net,
ksize=[1, pooling_kernel_size, pooling_kernel_size, 1],
strides=[1, pooling_stride, pooling_stride, 1],
padding=conv.VALID)
net = basic.BatchFlatten(name="flatten")(net)
for i, output_size in enumerate(self._fc_layers):
linear_mod = basic.Linear(
name="fc_{}".format(i),
output_size=output_size,
initializers=self._initializers,
partitioners=self._partitioners)
if not self.is_connected:
self._linear_modules.append(linear_mod)
net = linear_mod(net)
if self._use_batch_norm and self._bn_on_fc_layers:
bn = batch_norm.BatchNorm(**self._batch_norm_config)
net = bn(net, is_training, test_local_stats)
net = tf.nn.relu(net)
if keep_prob is not None:
net = tf.nn.dropout(net, keep_prob=keep_prob)
return net
@property
def initializers(self):
return self._initializers
@property
def partitioners(self):
return self._partitioners
@property
def regularizers(self):
return self._regularizers
@property
def min_input_size(self):
"""Returns integer specifying the minimum width and height for the input.
Note that the input can be non-square, but both the width and height must
be >= this number in size.
Returns:
The minimum size as an integer.
"""
return self._min_size
@property
def conv_modules(self):
"""Returns list containing convolutional modules of network.
Returns:
A list containing the Conv2D modules.
"""
self._ensure_is_connected()
return self._conv_modules
@property
def linear_modules(self):
"""Returns list containing linear modules of network.
Returns:
A list containing the Linear modules.
"""
self._ensure_is_connected()
return self._linear_modules
class AlexNetFull(AlexNet):
"""AlexNet constructed in the 'FULL' mode."""
def __init__(self,
use_batch_norm=False,
batch_norm_config=None,
initializers=None,
partitioners=None,
regularizers=None,
custom_getter=None,
name="alex_net_full"):
"""Constructs AlexNet.
Args:
use_batch_norm: Whether to use batch normalization between the output of
a layer and the activation function.
batch_norm_config: Optional mapping of additional configuration for the
`snt.BatchNorm` modules.
initializers: Optional dict containing ops to initialize the filters (with
key 'w') or biases (with key 'b'). The default initializers are
truncated normal initializers, which are commonly used when the inputs
are zero centered (see https://arxiv.org/pdf/1502.03167v3.pdf).
partitioners: Optional dict containing partitioners for the filters
(with key 'w') and the biases (with key 'b'). As a default, no
partitioners are used.
regularizers: Optional dict containing regularizers for the filters
(with key 'w') and the biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes
a single `Tensor` as an input and returns a scalar `Tensor` output, e.g.
the L1 and L2 regularizers in `tf.contrib.layers`.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the `tf.get_variable`
documentation for information about the custom_getter API.
name: Name of the module.
Raises:
KeyError: If `initializers`, `partitioners` or `regularizers` contains any
keys other than 'w' or 'b'.
"""
super(AlexNetFull, self).__init__(
mode=self.FULL,
use_batch_norm=use_batch_norm,
batch_norm_config=batch_norm_config,
initializers=initializers,
partitioners=partitioners,
regularizers=regularizers,
bn_on_fc_layers=False,
custom_getter=custom_getter,
name=name)
class AlexNetMini(AlexNet):
"""AlexNet constructed in the 'MINI' mode."""
def __init__(self,
use_batch_norm=False,
batch_norm_config=None,
initializers=None,
partitioners=None,
regularizers=None,
custom_getter=None,
name="alex_net_mini"):
"""Constructs AlexNet.
Args:
use_batch_norm: Whether to use batch normalization between the output of
a layer and the activation function.
batch_norm_config: Optional mapping of additional configuration for the
`snt.BatchNorm` modules.
initializers: Optional dict containing ops to initialize the filters (with
key 'w') or biases (with key 'b'). The default initializers are
truncated normal initializers, which are commonly used when the inputs
are zero centered (see https://arxiv.org/pdf/1502.03167v3.pdf).
partitioners: Optional dict containing partitioners for the filters
(with key 'w') and the biases (with key 'b'). As a default, no
partitioners are used.
regularizers: Optional dict containing regularizers for the filters
(with key 'w') and the biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes
a single `Tensor` as an input and returns a scalar `Tensor` output, e.g.
the L1 and L2 regularizers in `tf.contrib.layers`.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the `tf.get_variable`
documentation for information about the custom_getter API.
name: Name of the module.
Raises:
KeyError: If `initializers`, `partitioners` or `regularizers` contains any
keys other than 'w' or 'b'.
"""
super(AlexNetMini, self).__init__(
mode=self.MINI,
use_batch_norm=use_batch_norm,
batch_norm_config=batch_norm_config,
initializers=initializers,
partitioners=partitioners,
regularizers=regularizers,
bn_on_fc_layers=False,
custom_getter=custom_getter,
name=name)
| sonnet-1 | sonnet/python/modules/nets/alexnet.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests sonnet.python.modules.nets.mlp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import parameterized
import numpy as np
import sonnet as snt
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib.eager.python import tfe as contrib_eager
@contrib_eager.run_all_tests_in_graph_and_eager_modes
class MLPTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super(MLPTest, self).setUp()
self.output_sizes = [11, 13, 17]
self.batch_size = 5
self.input_size = 7
self.module_name = "mlp"
self.initializers = {
"w": tf.truncated_normal_initializer(stddev=1.0),
}
self.regularizers = {
"w": contrib_layers.l1_regularizer(scale=0.1),
}
self.partitioners = {
"w": tf.fixed_size_partitioner(num_shards=2),
}
def testName(self):
unique_name = "unique_name"
with tf.variable_scope("scope"):
mlp = snt.nets.MLP(name=unique_name, output_sizes=self.output_sizes)
self.assertEqual(mlp.scope_name, "scope/" + unique_name)
self.assertEqual(mlp.module_name, unique_name)
@parameterized.named_parameters(
("MLPNoFinalActBiasDropout", False, True, True),
("MLPNoFinalActBiasNoDropout", False, True, False),
("MLPNoFinalActNoBiasDropout", False, False, True),
("MLPNoFinalActNoBiasNoDropout", False, False, False),
("MLPFinalActBiasDropout", True, True, True),
("MLPFinalActBiasNoDropout", True, True, False),
("MLPFinalActNoBiasDropout", True, False, True),
("MLPFinalActNoBiasNoDropout", True, False, False),
)
def testConstructor(self, activate_final, use_bias, use_dropout):
with self.assertRaisesRegexp(ValueError, "output_sizes must not be empty"):
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=[],
activate_final=activate_final,
use_bias=use_bias,
use_dropout=use_dropout)
with self.assertRaisesRegexp(KeyError, "Invalid initializer keys.*"):
mlp = snt.nets.MLP(
name=self.module_name,
output_sizes=self.output_sizes,
initializers={"not_w": tf.truncated_normal_initializer(stddev=1.0)},
activate_final=activate_final,
use_bias=use_bias,
use_dropout=use_dropout)
with self.assertRaisesRegexp(TypeError,
"Initializer for 'w' is not a callable "
"function or dictionary"):
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=self.output_sizes,
initializers={"w": tf.zeros([1, 2, 3])},
activate_final=activate_final,
use_bias=use_bias,
use_dropout=use_dropout)
with self.assertRaisesRegexp(TypeError,
"Input 'activation' must be callable"):
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=self.output_sizes,
activation="not_a_function",
activate_final=activate_final,
use_bias=use_bias,
use_dropout=use_dropout)
with self.assertRaisesRegexp(TypeError,
"output_sizes must be iterable"):
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=None,
activate_final=activate_final,
use_bias=use_bias,
use_dropout=use_dropout)
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=self.output_sizes,
initializers=self.initializers,
partitioners=self.partitioners,
regularizers=self.regularizers,
activate_final=activate_final,
use_bias=use_bias,
use_dropout=use_dropout)
self.assertEqual(self.initializers, mlp.initializers)
self.assertEqual(self.regularizers, mlp.regularizers)
self.assertEqual(self.partitioners, mlp.partitioners)
self.assertEqual(len(mlp.layers), len(self.output_sizes))
for i in range(0, len(mlp.layers)):
self.assertEqual(mlp.layers[i].output_size, self.output_sizes[i])
@parameterized.named_parameters(
("MLPNoFinalActBiasDropout", False, True, True),
("MLPNoFinalActBiasNoDropout", False, True, False),
("MLPNoFinalActNoBiasDropout", False, False, True),
("MLPNoFinalActNoBiasNoDropout", False, False, False),
("MLPFinalActBiasDropout", True, True, True),
("MLPFinalActBiasNoDropout", True, True, False),
("MLPFinalActNoBiasDropout", True, False, True),
("MLPFinalActNoBiasNoDropout", True, False, False),
)
def testActivateBiasFlags(self, activate_final, use_bias, use_dropout):
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=self.output_sizes,
activate_final=activate_final,
use_bias=use_bias,
use_dropout=use_dropout)
inputs = tf.random_normal(
dtype=tf.float32, shape=[self.batch_size, self.input_size])
net = mlp(inputs)
if not tf.executing_eagerly():
if activate_final:
self.assertEqual(net.op.type, "Relu")
elif use_bias:
self.assertIn(net.op.type, ("Add", "AddV2"))
else:
self.assertEqual(net.op.type, "MatMul")
variables = mlp.get_variables()
if use_bias:
self.assertEqual(len(variables), len(self.output_sizes) * 2)
else:
self.assertEqual(len(variables), len(self.output_sizes))
def testShape(self):
inputs = tf.random_normal(
dtype=tf.float32, shape=[self.batch_size, self.input_size])
mlp = snt.nets.MLP(name=self.module_name, output_sizes=self.output_sizes)
output = mlp(inputs)
self.assertTrue(output.get_shape().is_compatible_with(
[self.batch_size, self.output_sizes[-1]]))
self.assertEqual((self.batch_size, self.input_size), mlp.input_shape)
self.assertEqual(self.output_sizes, list(mlp.output_sizes))
@parameterized.named_parameters(
("MLPNoFinalActBiasDropout", False, True, True),
("MLPNoFinalActBiasNoDropout", False, True, False),
("MLPNoFinalActNoBiasDropout", False, False, True),
("MLPNoFinalActNoBiasNoDropout", False, False, False),
("MLPFinalActBiasDropout", True, True, True),
("MLPFinalActBiasNoDropout", True, True, False),
("MLPFinalActNoBiasDropout", True, False, True),
("MLPFinalActNoBiasNoDropout", True, False, False),
)
def testRegularizersInRegularizationLosses(self, active_final, use_bias,
use_dropout):
if use_bias:
regularizers = {
"w": contrib_layers.l1_regularizer(scale=0.5),
"b": contrib_layers.l2_regularizer(scale=0.5)
}
else:
regularizers = {"w": contrib_layers.l1_regularizer(scale=0.5)}
inputs = tf.random_normal(
dtype=tf.float32, shape=[self.batch_size, self.input_size])
mlp = snt.nets.MLP(name=self.module_name, output_sizes=self.output_sizes,
regularizers=regularizers, use_dropout=use_dropout)
mlp(inputs)
graph_regularizers = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(graph_regularizers), 3 * (2 if use_bias else 1))
if not tf.executing_eagerly():
self.assertRegexpMatches(graph_regularizers[0].name, ".*l1_regularizer.*")
if use_bias:
self.assertRegexpMatches(graph_regularizers[1].name,
".*l2_regularizer.*")
def testClone(self):
with tf.variable_scope("scope1"):
mlp = snt.nets.MLP(name=self.module_name, output_sizes=self.output_sizes)
with tf.variable_scope("scope2"):
mlp_clone = mlp.clone()
self.assertEqual("scope1/" + self.module_name, mlp.scope_name)
self.assertEqual(self.module_name, mlp.module_name)
self.assertEqual("scope2/" + self.module_name + "_clone",
mlp_clone.scope_name)
input_to_mlp = tf.random_normal(
dtype=tf.float32, shape=[self.batch_size, self.input_size])
mlp_out = mlp(input_to_mlp)
mlp_clone_output = mlp_clone(mlp_out)
self.assertEqual(mlp_out.get_shape(), mlp_clone_output.get_shape())
variables = mlp.get_variables()
clone_variables = mlp_clone.get_variables()
self.assertEqual(len(variables), len(clone_variables))
self.assertNotEqual(
set(var.name for var in variables),
set(var.name for var in clone_variables))
@parameterized.named_parameters(
("MLPNoFinalActBiasDropout", False, True, True),
("MLPNoFinalActBiasNoDropout", False, True, False),
("MLPNoFinalActNoBiasDropout", False, False, True),
("MLPNoFinalActNoBiasNoDropout", False, False, False),
("MLPFinalActBiasDropout", True, True, True),
("MLPFinalActBiasNoDropout", True, True, False),
("MLPFinalActNoBiasDropout", True, False, True),
("MLPFinalActNoBiasNoDropout", True, False, False),
)
def testTranspose(self, activate_final, use_bias, use_dropout):
with tf.variable_scope("scope1"):
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=self.output_sizes,
activate_final=activate_final,
use_bias=use_bias,
use_dropout=use_dropout)
with tf.variable_scope("scope2"):
mlp_transpose = mlp.transpose()
self.assertEqual("scope1/" + self.module_name, mlp.scope_name)
self.assertEqual(self.module_name, mlp.module_name)
self.assertEqual("scope2/" + self.module_name + "_transpose",
mlp_transpose.scope_name)
self.assertEqual(self.module_name + "_transpose",
mlp_transpose.module_name)
input_to_mlp = tf.random_normal(
dtype=tf.float32, shape=[self.batch_size, self.input_size])
with self.assertRaisesRegexp(snt.Error,
"Variables in {} not instantiated yet, "
"__call__ the module first."
.format(mlp.layers[-1].scope_name)):
mlp_transpose(input_to_mlp)
mlp_transpose = mlp.transpose(name="another_mlp_transpose")
mlp_out = mlp(input_to_mlp)
mlp_transposed_output = mlp_transpose(mlp_out)
self.assertEqual(mlp_transposed_output.get_shape(),
input_to_mlp.get_shape())
self.assertEqual(mlp_transpose.use_bias, mlp.use_bias)
self.assertEqual(mlp_transpose.activate_final, mlp.activate_final)
if not tf.executing_eagerly():
if activate_final:
self.assertEqual(mlp_transposed_output.op.type, "Relu")
elif use_bias:
self.assertIn(mlp_transposed_output.op.type, ("Add", "AddV2"))
else:
self.assertEqual(mlp_transposed_output.op.type, "MatMul")
for i in range(0, len(mlp.layers)):
self.assertEqual(mlp_transpose.layers[i].output_size,
mlp.layers[-1 - i].input_shape[1])
self.evaluate(tf.global_variables_initializer())
self.evaluate(mlp_transposed_output)
variables = mlp_transpose.get_variables()
if use_bias:
self.assertEqual(len(variables), len(self.output_sizes) * 2)
else:
self.assertEqual(len(variables), len(self.output_sizes))
# Test transpose method's activate_final arg.
mlp_activate_final = mlp.transpose(activate_final=True)
mlp_no_activate_final = mlp.transpose(activate_final=False)
mlp_inherit_activate_final = mlp.transpose()
self.assertEqual(True, mlp_activate_final.activate_final)
self.assertEqual(False, mlp_no_activate_final.activate_final)
self.assertEqual(mlp.activate_final,
mlp_inherit_activate_final.activate_final)
def testVariableMap(self):
"""Tests for regressions in variable names."""
use_bias = True
use_dropout = True
var_names_w = [
u"mlp/linear_0/w:0",
u"mlp/linear_1/w:0",
u"mlp/linear_2/w:0",
]
var_names_b = [
u"mlp/linear_0/b:0",
u"mlp/linear_1/b:0",
u"mlp/linear_2/b:0",
]
correct_variable_names = set(var_names_w + var_names_b)
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=self.output_sizes,
activate_final=False,
use_bias=use_bias,
use_dropout=use_dropout)
input_shape = [10, 100]
input_to_net = tf.random_normal(dtype=tf.float32, shape=input_shape)
_ = mlp(input_to_net)
variable_names = [var.name for var in mlp.get_variables()]
self.assertEqual(set(variable_names), set(correct_variable_names))
def testCustomGettersUsed(self):
pi = 3.1415
def get_pi(getter, *args, **kwargs):
"""A custom getter which sets all variables to pi."""
variable = getter(*args, **kwargs)
return variable * 0.0 + pi
mlpi = snt.nets.MLP(output_sizes=[10], custom_getter=get_pi)
mlpi(tf.zeros(shape=(2, 1)))
mlp_variables = [mlpi.layers[0].w, mlpi.layers[0].b]
self.evaluate(tf.global_variables_initializer())
for var_value in self.evaluate(mlp_variables):
self.assertAllClose(var_value, np.zeros_like(var_value) + pi)
def testDefun(self):
mlp = snt.nets.MLP([1, 2, 3])
mlp = contrib_eager.defun(mlp)
y = mlp(tf.ones([1, 1]))
self.assertListEqual(y.shape.as_list(), [1, 3])
def testDropoutOff(self):
"""Make sure dropout layers aren't added to the computation graph."""
if tf.executing_eagerly():
self.skipTest("Test not supported when executing eagerly")
mlp_name = "test_dropout_on_mlp"
mlp = snt.nets.MLP([1], use_dropout=False, use_bias=False,
activate_final=True, name=mlp_name)
_ = mlp(tf.ones([1, 1]), is_training=True,
dropout_keep_prob=0.5)
op_names = [op.name for op in tf.get_default_graph().get_operations()]
op_to_look_for = "{}_1/dropout/Shape".format(mlp_name)
self.assertNotIn(op_to_look_for, op_names)
def testDropout(self):
if tf.executing_eagerly():
self.skipTest("Test not supported when executing eagerly")
mlp_name = "test_dropout_on_mlp"
mlp = snt.nets.MLP([1], use_dropout=True, use_bias=False,
activate_final=True, name=mlp_name)
_ = mlp(tf.ones([1, 1]), is_training=True,
dropout_keep_prob=0.5)
op_names = [op.name for op in tf.get_default_graph().get_operations()]
op_to_look_for = "{}_1/dropout/Shape".format(mlp_name)
self.assertIn(op_to_look_for, op_names)
def testDropoutTensor(self):
"""Checks support for tf.Bool Tensors."""
if tf.executing_eagerly():
self.skipTest("Test not supported when executing eagerly")
mlp_name = "test_dropout_on_mlp"
mlp = snt.nets.MLP([1], use_dropout=True, use_bias=False,
activate_final=True, name=mlp_name)
_ = mlp(tf.ones([1, 1]), is_training=tf.convert_to_tensor(True, tf.bool),
dropout_keep_prob=0.5)
op_names = [op.name for op in tf.get_default_graph().get_operations()]
op_to_look_for = "{}_1/dropout/Shape".format(mlp_name)
self.assertIn(op_to_look_for, op_names)
if __name__ == "__main__":
tf.test.main()
| sonnet-1 | sonnet/python/modules/nets/mlp_test.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Example script to train a multi-layer perceptron (MLP) on MNIST."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# Dependency imports
import numpy as np
import sonnet as snt
from sonnet.examples import dataset_mnist_cifar10 as dataset_mnist
import tensorflow.compat.v1 as tf
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_float("learning_rate", 0.1, "Learning rate")
tf.flags.DEFINE_integer("num_hidden", 100, "Number of hidden units in MLP.")
tf.flags.DEFINE_integer("num_train_steps", 1001,
"How many training steps to take.")
tf.flags.DEFINE_integer("report_every", 10,
"Interval, in mini-batches, to report progress.")
tf.flags.DEFINE_integer("test_batch_size", 10000, "Batch size for test.")
tf.flags.DEFINE_integer("test_every", 200,
"Interval, in train mini-batches, to run test pass.")
tf.flags.DEFINE_integer("train_batch_size", 200, "Batch size for training.")
tf.flags.DEFINE_boolean("gpu_auto_mixed_precision", False,
"Enable GPU automatic mixed precision training")
def train_and_eval(train_batch_size, test_batch_size, num_hidden, learning_rate,
num_train_steps, report_every, test_every,
gpu_auto_mixed_precision=False):
"""Creates a basic MNIST model using Sonnet, then trains and evaluates it."""
data_dict = dataset_mnist.get_data("mnist", train_batch_size, test_batch_size)
train_data = data_dict["train_iterator"]
test_data = data_dict["test_iterator"]
# Sonnet separates the configuration of a model from its attachment into the
# graph. Here we configure the shape of the model, but this call does not
# place any ops into the graph.
mlp = snt.nets.MLP([num_hidden, data_dict["num_classes"]])
train_images, train_labels = train_data.get_next()
test_images, test_labels = test_data.get_next()
# Flatten images to pass to model.
train_images = snt.BatchFlatten()(train_images)
test_images = snt.BatchFlatten()(test_images)
# Call our model, which creates it in the graph. Our build function
# is parameterized by the source of images, and here we connect the model to
# the training images.
train_logits = mlp(train_images)
# Training loss and optimizer.
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=train_labels, logits=train_logits)
loss_avg = tf.reduce_mean(loss)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
env_enable_mixed_precision = (
os.environ.get("TF_ENABLE_AUTO_MIXED_PRECISION", default="0") == "1")
if env_enable_mixed_precision or gpu_auto_mixed_precision:
tf_version_list = tf.__version__.split(".")
if int(tf_version_list[0]) < 2:
if int(tf_version_list[1]) < 14:
raise RuntimeError(
"TensorFlow 1.14.0 or newer is required "
"for GPU automatic mixed precision training.")
optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(
optimizer)
optimizer_step = optimizer.minimize(loss_avg)
# As before, we make a second instance of our model in the graph, which shares
# its parameters with the first instance of the model. The Sonnet Module code
# takes care of the variable sharing for us: because we are calling the same
# instance of Model, we will automatically reference the same, shared
# variables.
test_logits = mlp(test_images)
test_classes = tf.nn.softmax(test_logits)
test_correct = tf.nn.in_top_k(test_classes, test_labels, k=1)
with tf.train.SingularMonitoredSession() as sess:
for step_idx in range(num_train_steps):
current_loss, _ = sess.run([loss_avg, optimizer_step])
if step_idx % report_every == 0:
tf.logging.info("Step: %4d of %d - loss: %.02f.",
step_idx + 1, num_train_steps, current_loss)
if step_idx % test_every == 0:
sess.run(test_data.initializer)
current_correct = sess.run(test_correct)
correct_count = np.count_nonzero(current_correct)
tf.logging.info("Test: %d of %d correct.",
correct_count, test_batch_size)
def main(unused_argv):
train_and_eval(FLAGS.train_batch_size, FLAGS.test_batch_size,
FLAGS.num_hidden, FLAGS.learning_rate, FLAGS.num_train_steps,
FLAGS.report_every, FLAGS.test_every,
FLAGS.gpu_auto_mixed_precision)
if __name__ == "__main__":
tf.app.run()
| sonnet-1 | sonnet/examples/mnist_mlp.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Example script to train the Relational Memory Core.
This is a reduced size version of the "Nth Farthest" task defined in:
https://arxiv.org/abs/1806.01822
This resource intensive task and is advisable to run on GPU with 16GB RAM.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
# Dependency imports
from absl import flags
import six
import sonnet as snt
from sonnet.examples import dataset_nth_farthest
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
flags.DEFINE_float("learning_rate", 1e-4, "Initial learning rate.")
flags.DEFINE_float("min_learning_rate", 8e-5, "Minimum learning rate.")
flags.DEFINE_integer("batch_size", 1600, "Batch size.")
flags.DEFINE_integer("head_size", 2048, "Total memory size for the RMC.")
flags.DEFINE_integer("num_heads", 1, "Attention heads for RMC.")
flags.DEFINE_integer("num_mems", 4, "Number of memories for RMC.")
flags.DEFINE_integer("num_blocks", 1, "Number of attention blocks for RMC.")
flags.DEFINE_string("gate_style", "unit", "Gating style for RMC.")
flags.DEFINE_integer("num_objects", 4, "Number of objects per dataset sample.")
flags.DEFINE_integer("num_features", 4, "Feature size per object.")
flags.DEFINE_integer("epochs", 1000000, "Total training epochs.")
flags.DEFINE_integer("log_stride", 100, "Iterations between reports.")
class SequenceModel(snt.AbstractModule):
"""Model to process n-th farthest sequence batches."""
def __init__(
self,
core,
target_size,
final_mlp,
name="sequence_model"):
super(SequenceModel, self).__init__(name=name)
self._core = core
self._target_size = target_size
self._final_mlp = final_mlp
def _build(self, inputs):
"""Dynamic unroll across input objects.
Args:
inputs: tensor (batch x num_objects x feature). Objects to sort.
Returns:
Tensor (batch x num_objects); logits indicating the reference objects.
"""
batch_size = inputs.get_shape()[0]
output_sequence, _ = tf.nn.dynamic_rnn(
cell=self._core,
inputs=inputs,
time_major=False,
initial_state=self._core.initial_state(
batch_size, trainable=False)
)
outputs = snt.BatchFlatten()(output_sequence[:, -1, :])
outputs = self._final_mlp(outputs)
logits = snt.Linear(self._target_size)(outputs)
return logits
def build_and_train(iterations, log_stride, test=False):
"""Construct the data, model, loss and optimizer then train."""
# Test mode settings.
batch_size = 2 if test else FLAGS.batch_size
num_mems = 2 if test else FLAGS.num_mems
num_heads = 1 if test else FLAGS.num_mems
num_blocks = 1 if test else FLAGS.num_mems
head_size = 4 if test else FLAGS.head_size
num_objects = 2 if test else FLAGS.num_objects
num_features = 4 if test else FLAGS.num_features
mlp_size = (20,) if test else (256, 256, 256, 256)
with tf.Graph().as_default():
t0 = time.time()
# Initialize the dataset.
dataset = dataset_nth_farthest.NthFarthest(
batch_size, num_objects, num_features)
# Create the model.
core = snt.RelationalMemory(
mem_slots=num_mems,
head_size=head_size,
num_heads=num_heads,
num_blocks=num_blocks,
gate_style=FLAGS.gate_style)
final_mlp = snt.nets.MLP(
output_sizes=mlp_size,
activate_final=True)
model = SequenceModel(
core=core,
target_size=num_objects,
final_mlp=final_mlp)
tf.logging.info("Instantiated models ({:3f})".format(time.time() - t0))
# Get train and test data.
inputs_train, labels_train = dataset.get_batch()
inputs_test, labels_test = dataset.get_batch()
# Define target accuracy.
def compute_accuracy(logits, targets, name="accuracy"):
correct_pred = tf.cast(
tf.equal(tf.cast(targets, tf.int64), tf.argmax(logits, 1)),
tf.float32)
return tf.reduce_mean(correct_pred, name=name)
# Define the loss & accuracy.
def loss_fn(inputs, labels):
"""Creates the loss and the exports."""
logits = model(inputs)
labels = tf.cast(labels, tf.int32)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels))
accuracy = compute_accuracy(logits, labels)
return loss, accuracy
# Get training step counter.
global_step = tf.get_variable(
name="global_step",
shape=[],
dtype=tf.int64,
initializer=tf.zeros_initializer(),
trainable=False,
collections=[tf.GraphKeys.GLOBAL_VARIABLES,
tf.GraphKeys.GLOBAL_STEP])
# Create the optimizer.
learning_rate_op = tf.reduce_max([
tf.train.exponential_decay(
FLAGS.learning_rate,
global_step,
decay_steps=FLAGS.epochs // 100,
decay_rate=0.9,
staircase=False),
FLAGS.min_learning_rate
])
optimizer = tf.train.AdamOptimizer(learning_rate_op)
train_loss, _ = loss_fn(inputs_train, labels_train)
step_op = optimizer.minimize(train_loss, global_step=global_step)
# Compute test accuracy
logits_test = model(inputs_test)
labels_test = tf.cast(labels_test, tf.int32)
test_acc = compute_accuracy(logits_test, labels_test)
tf.logging.info("Created losses and optimizers ({:3f})".format(
time.time() - t0))
# Begin Training.
t0 = time.time()
train_losses = []
steps = []
test_accs = []
tf.logging.info("Starting training ({:3f})".format(time.time() - t0))
with tf.train.SingularMonitoredSession() as sess:
for it in six.moves.range(iterations):
sess.run([step_op, learning_rate_op])
if it % log_stride == 0:
loss_v, acc_v = sess.run([train_loss, test_acc])
elapsed = time.time() - t0
tf.logging.info(
"iter: {:2d}, train loss {:3f}; test acc {:3f} ({:3f})".format(
it, loss_v, acc_v, elapsed))
train_losses.append(loss_v)
steps.append(it)
test_accs.append(acc_v)
return steps, train_losses, test_accs
def main(unused_argv):
build_and_train(FLAGS.epochs, FLAGS.log_stride)
if __name__ == "__main__":
tf.app.run()
| sonnet-1 | sonnet/examples/rmc_nth_farthest.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for rnn_shakespeare."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import sonnet.examples.rnn_shakespeare as rnn_shakespeare
import tensorflow.compat.v1 as tf
class TinyShakespeareTest(tf.test.TestCase):
def testRun(self):
rnn_shakespeare.train(5, 5, 4)
if __name__ == "__main__":
tf.test.main()
| sonnet-1 | sonnet/examples/rnn_shakespeare_test.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for brnn_ptb."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import string
from sonnet.examples import brnn_ptb
import tensorflow.compat.v1 as tf
FLAGS = tf.flags.FLAGS
def _make_random_word():
return ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase)
for _ in range(random.randint(1, 15)))
def _make_random_vocab():
# Make a limited vocab that all the sentences should be made out of, as the
# BRNN model builds a finite vocab internally.
return [_make_random_word() for _ in range(1000)]
def _make_sentence_with_vocab(vocab):
return ' '.join(vocab[random.randint(0, len(vocab) - 1)]
for _ in range(random.randint(1, 30)))
def _make_fake_corpus_with_vocab(vocab, corpus_size):
return '\n'.join(_make_sentence_with_vocab(vocab)
for _ in range(corpus_size))
class BrnnPtbTest(tf.test.TestCase):
def testScriptRunsWithFakeData(self):
# Make some small fake data in same format as real PTB.
tmp_dir = tf.test.get_temp_dir()
vocab = _make_random_vocab()
with tf.gfile.GFile(os.path.join(tmp_dir, 'ptb.train.txt'), 'w') as f:
f.write(_make_fake_corpus_with_vocab(vocab, 1000))
with tf.gfile.GFile(os.path.join(tmp_dir, 'ptb.valid.txt'), 'w') as f:
f.write(_make_fake_corpus_with_vocab(vocab, 100))
with tf.gfile.GFile(os.path.join(tmp_dir, 'ptb.test.txt'), 'w') as f:
f.write(_make_fake_corpus_with_vocab(vocab, 100))
# Make model small, only run for 1 epoch.
FLAGS.num_training_epochs = 1
FLAGS.hidden_size = 50
FLAGS.embedding_size = 50
FLAGS.data_path = tmp_dir
# Checkpoint to tmp directory so that test runs hermetically, and there is
# no possibility of reusing checkpoints from previous runs.
FLAGS.logbasedir = tmp_dir
# Do training, test, evaluation.
brnn_ptb.main(None)
if __name__ == '__main__':
tf.test.main()
| sonnet-1 | sonnet/examples/brnn_ptb_test.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Classes to load textual data from Shakespeare's plays."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
# Dependency imports
import numpy as np
import sonnet as snt
import tensorflow.compat.v1 as tf
FLAGS = tf.flags.FLAGS
SequenceDataOpsNoMask = collections.namedtuple("SequenceDataOpsNoMask",
("obs", "target"))
class TokenDataSource(object):
"""Encapsulates loading/tokenization logic for disk-based data."""
ROOTS = [
"sonnet/examples/data/",
os.path.join(os.path.dirname(os.path.realpath(__file__)), "data"),
]
DEFAULT_START_TOKENS = ["_unk_", "_null_", "_eos_", "|"]
UNK, NULL, WORD_EOS, CHAR_EOS = DEFAULT_START_TOKENS
def __init__(self, data_file, vocab_data_file):
"""Creates a TokenDataSource instance.
Args:
data_file: file object containing text data to be tokenized.
vocab_data_file: file object containing text data used to initialize
the vocabulary.
"""
def reading_function(file_name):
for root in self.ROOTS:
file_path = os.path.join(root, file_name)
if os.path.exists(file_path):
break
file_path = None
assert file_path is not None, ("Couldn't locate %s in %r" %
(file_name, self.ROOTS))
with open(file_path, mode="rb") as fp:
return list(fp.read().decode().replace("\n", self.CHAR_EOS))
self._vocab_dict = {}
self._inv_vocab_dict = {}
token_list = reading_function(vocab_data_file)
self.vocab_size = 0
for token in self.DEFAULT_START_TOKENS + token_list:
if token not in self._vocab_dict:
self._vocab_dict[token] = self.vocab_size
self._inv_vocab_dict[self.vocab_size] = token
self.vocab_size += 1
raw_data = reading_function(data_file)
self.flat_data = np.array(self.tokenize(raw_data), dtype=np.int32)
self.num_tokens = self.flat_data.shape[0]
def tokenize(self, token_list):
"""Produces the list of integer indices corresponding to a token list."""
return [
self._vocab_dict.get(token, self._vocab_dict[self.UNK])
for token in token_list
]
def decode(self, token_list):
"""Produces a human-readable representation of the token list."""
return "".join([self._inv_vocab_dict[token] for token in token_list])
class TinyShakespeareDataset(snt.AbstractModule):
"""Tiny Shakespeare sequence data."""
TRAIN = "train"
VALID = "valid"
TEST = "test"
def __init__(self, num_steps=1, batch_size=1,
subset="train", random=False, dtype=tf.float32,
name="tiny_shakespeare_dataset"):
"""Initializes a TinyShakespeare sequence data object.
Args:
num_steps: sequence_length.
batch_size: batch size.
subset: 'train', 'valid' or 'test'.
random: boolean indicating whether to do random sampling of sequences.
Default is false (sequential sampling).
dtype: type of generated tensors (both observations and targets).
name: object name.
Raises:
ValueError: if subset is not train, valid or test.
"""
if subset not in [self.TRAIN, self.VALID, self.TEST]:
raise ValueError("subset should be %s, %s, or %s. Received %s instead."
% (self.TRAIN, self.VALID, self.TEST, subset))
super(TinyShakespeareDataset, self).__init__(name=name)
# Generate vocab from train set.
self._vocab_file = "ts.train.txt"
self._data_file = "ts.{}.txt".format(subset)
self._num_steps = num_steps
self._batch_size = batch_size
self._random_sampling = random
self._dtype = dtype
self._data_source = TokenDataSource(
data_file=self._data_file,
vocab_data_file=self._vocab_file)
self._vocab_size = self._data_source.vocab_size
self._flat_data = self._data_source.flat_data
self._n_flat_elements = self._data_source.num_tokens
self._num_batches = self._n_flat_elements // (self._num_steps * batch_size)
self._reset_head_indices()
self._queue_capacity = 10
@property
def vocab_size(self):
return self._vocab_size
def _reset_head_indices(self):
self._head_indices = np.random.randint(
low=0, high=self._n_flat_elements, size=[self._batch_size])
def _one_hot(self, token):
return tf.one_hot(token, self._vocab_size, axis=-1, dtype=self._dtype)
def _get_batch(self):
"""Returns a batch of sequences.
Returns:
obs: np.int32 array of size [Time, Batch]
target: np.int32 array of size [Time, Batch]
"""
batch_indices = np.mod(
np.array([
np.arange(head_index, head_index + self._num_steps + 1) for
head_index in self._head_indices]),
self._n_flat_elements)
obs = np.array([
self._flat_data[indices[:self._num_steps]]
for indices in batch_indices]).T
target = np.array([
self._flat_data[indices[1:self._num_steps + 1]]
for indices in batch_indices]).T
if self._random_sampling:
self._reset_head_indices()
else:
self._head_indices = np.mod(
self._head_indices + self._num_steps, self._n_flat_elements)
return obs, target
def _build(self):
"""Returns a tuple containing observation and target one-hot tensors."""
q = tf.FIFOQueue(
self._queue_capacity, [self._dtype, self._dtype],
shapes=[[self._num_steps, self._batch_size, self._vocab_size]]*2)
obs, target = tf.py_func(self._get_batch, [], [tf.int32, tf.int32])
obs = self._one_hot(obs)
target = self._one_hot(target)
enqueue_op = q.enqueue([obs, target])
obs, target = q.dequeue()
tf.train.add_queue_runner(tf.train.QueueRunner(q, [enqueue_op]))
return SequenceDataOpsNoMask(obs, target)
def cost(self, logits, target):
"""Returns cost.
Args:
logits: model output.
target: target.
Returns:
Cross-entropy loss for a sequence of logits. The loss will be averaged
across time steps if time_average_cost was enabled at construction time.
"""
logits = tf.reshape(logits, [self._num_steps * self._batch_size, -1])
target = tf.reshape(target, [self._num_steps * self._batch_size, -1])
xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=target)
loss = tf.reduce_sum(xent)
return loss / self._batch_size
def to_human_readable(self,
data,
label_batch_entries=True,
indices=None,
sep="\n"):
"""Returns a human-readable version of a one-hot encoding of words.
Args:
data: A tuple with (obs, target). `obs` is a numpy array with one-hot
encoding of words.
label_batch_entries: bool. Whether to add numerical label before each
batch element in the output string.
indices: List of int or None. Used to select a subset of minibatch indices
to print. None will print the whole minibatch.
sep: A char separator which separates the output for each batch. Defaults
to the newline character.
Returns:
String with the words from `data[0]`.
"""
obs = data[0]
batch_size = obs.shape[1]
result = []
indices = xrange(batch_size) if not indices else indices
for b in indices:
index_seq = np.argmax(obs[:, b], axis=1)
prefix = "b_{}: ".format(b) if label_batch_entries else ""
result.append(prefix + self._data_source.decode(index_seq))
return sep.join(result)
| sonnet-1 | sonnet/examples/dataset_shakespeare.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Gets MNIST or CIFAR10 dataset.
MNIST: Handwritten digits dataset in grayscale images.
CIFAR10: Dataset of 50,000 32x32 color training images, labeled over 10
categories, and 10,000 test images.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf
def get_data(name, train_batch_size, test_batch_size):
"""Gets training and testing dataset iterators.
Args:
name: String. Name of dataset, either 'mnist' or 'cifar10'.
train_batch_size: Integer. Batch size for training.
test_batch_size: Integer. Batch size for testing.
Returns:
Dict containing:
train_iterator: A tf.data.Iterator, over training data.
test_iterator: A tf.data.Iterator, over test data.
num_classes: Integer. Number of class labels.
"""
if name not in ['mnist', 'cifar10']:
raise ValueError(
'Expected dataset \'mnist\' or \'cifar10\', but got %s' % name)
dataset = getattr(tf.keras.datasets, name)
num_classes = 10
# Extract the raw data.
raw_data = dataset.load_data()
(images_train, labels_train), (images_test, labels_test) = raw_data
# Normalize inputs and fix types.
images_train = images_train.astype(np.float32) / 255.
images_test = images_test.astype(np.float32) / 255.
labels_train = labels_train.astype(np.int32).squeeze()
labels_test = labels_test.astype(np.int32).squeeze()
# Add a dummy 'color channel' dimension if it is not present.
if images_train.ndim == 3:
images_train = np.expand_dims(images_train, -1)
images_test = np.expand_dims(images_test, -1)
# Put the data onto the graph as constants.
train_data = tf.data.Dataset.from_tensor_slices((images_train, labels_train))
test_data = tf.data.Dataset.from_tensor_slices((images_test, labels_test))
# Create iterators for each dataset.
train_iterator = (
train_data
# Note: For larger datasets e.g. ImageNet, it will not be feasible to have
# a shuffle buffer this large.
.shuffle(buffer_size=len(images_train))
.batch(train_batch_size)
.repeat()
.make_one_shot_iterator()
)
test_iterator = test_data.batch(test_batch_size).make_initializable_iterator()
return dict(
train_iterator=train_iterator,
test_iterator=test_iterator,
num_classes=num_classes)
| sonnet-1 | sonnet/examples/dataset_mnist_cifar10.py |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A fork of the Penn Treebank reader from TensorFlow's RNN tutorial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
# Dependency imports
import six
import tensorflow.compat.v1 as tf
def _read_words(filename):
with tf.gfile.GFile(filename, "r") as f:
if six.PY3:
return f.read().replace("\n", "<eos>").split()
else:
return f.read().decode("utf-8").replace("\n", "<eos>").split()
def _build_vocab(filename):
data = _read_words(filename)
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
word_to_id = dict(zip(words, range(len(words))))
return word_to_id
def _file_to_word_ids(filename, word_to_id):
data = _read_words(filename)
return [word_to_id[word] for word in data if word in word_to_id]
def ptb_raw_data(data_path):
"""Load PTB raw data from data directory "data_path".
Reads PTB text files, converts strings to integer ids,
and performs mini-batching of the inputs.
The PTB dataset comes from Tomas Mikolov's webpage:
http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
Args:
data_path: string path to the directory where simple-examples.tgz has
been extracted.
Returns:
tuple (train_data, valid_data, test_data, vocabulary)
where each of the data objects can be passed to PTBIterator.
"""
train_path = os.path.join(data_path, "ptb.train.txt")
valid_path = os.path.join(data_path, "ptb.valid.txt")
test_path = os.path.join(data_path, "ptb.test.txt")
word_to_id = _build_vocab(train_path)
train_data = _file_to_word_ids(train_path, word_to_id)
valid_data = _file_to_word_ids(valid_path, word_to_id)
test_data = _file_to_word_ids(test_path, word_to_id)
return train_data, valid_data, test_data, word_to_id
def ptb_producer(raw_data, batch_size, num_steps, name=None):
"""Iterate on the raw PTB data.
This chunks up raw_data into batches of examples and returns Tensors that
are drawn from these batches.
Args:
raw_data: one of the raw data outputs from ptb_raw_data.
batch_size: int, the batch size.
num_steps: int, the number of unrolls.
name: the name of this operation (optional).
Returns:
A pair of Tensors, each shaped [batch_size, num_steps]. The second element
of the tuple is the same data time-shifted to the right by one.
Raises:
tf.errors.InvalidArgumentError: if batch_size or num_steps are too high.
"""
with tf.name_scope(name, "PTBProducer", [raw_data, batch_size, num_steps]):
raw_data = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32)
data_len = tf.size(raw_data)
batch_len = data_len // batch_size
data = tf.reshape(raw_data[0 : batch_size * batch_len],
[batch_size, batch_len])
epoch_size = (batch_len - 1) // num_steps
assertion = tf.assert_positive(
epoch_size,
message="epoch_size == 0, decrease batch_size or num_steps")
with tf.control_dependencies([assertion]):
epoch_size = tf.identity(epoch_size, name="epoch_size")
i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
x = tf.strided_slice(data, [0, i * num_steps],
[batch_size, (i + 1) * num_steps])
x.set_shape([batch_size, num_steps])
y = tf.strided_slice(data, [0, i * num_steps + 1],
[batch_size, (i + 1) * num_steps + 1])
y.set_shape([batch_size, num_steps])
return x, y
| sonnet-1 | sonnet/examples/ptb_reader.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Open Source implementation of Bayesian RNN on Penn Treebank.
Please see https://arxiv.org/pdf/1704.02798.pdf, section 7.1.
Download the Penn Treebank (PTB) dataset from:
http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
Usage: python ./brnn_ptb.py --data_path=<path_to_dataset>
Above, <path_to_dataset> is the path to the 'data' subdirectory within the
directory resulting from unpacking the .tgz file whose link is given above.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
# Dependency imports
import numpy as np
import sonnet as snt
from sonnet.examples import ptb_reader
import sonnet.python.custom_getters.bayes_by_backprop as bbb
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
nest = tf.nest
FLAGS = tf.flags.FLAGS
# Data settings.
tf.flags.DEFINE_string("data_path", "/tmp/ptb_data/data", "path to PTB data.")
# Deep LSTM settings.
tf.flags.DEFINE_integer("embedding_size", 650, "embedding size.")
tf.flags.DEFINE_integer("hidden_size", 650, "network layer size")
tf.flags.DEFINE_integer("n_layers", 2, "number of layers")
# Training settings.
tf.flags.DEFINE_integer("num_training_epochs", 70, "number of training epochs")
tf.flags.DEFINE_integer("batch_size", 20, "SGD minibatch size")
tf.flags.DEFINE_integer("unroll_steps", 35, "Truncated BPTT unroll length.")
tf.flags.DEFINE_integer("high_lr_epochs", 20, "Number of epochs with lr_start.")
tf.flags.DEFINE_float("lr_start", 1.0, "SGD learning rate initializer")
tf.flags.DEFINE_float("lr_decay", 0.9, "Polynomical decay power.")
# BBB settings.
tf.flags.DEFINE_float("prior_pi", 0.25, "Determines the prior mixture weights.")
tf.flags.DEFINE_float("prior_sigma1", np.exp(-1.0), "Prior component 1 stddev.")
tf.flags.DEFINE_float("prior_sigma2", np.exp(-7.0), "Prior component 2 stddev.")
# Logging settings.
tf.flags.DEFINE_integer("print_every_batches", 500, "Sample every x batches.")
tf.flags.DEFINE_string("logbasedir", "/tmp/bayesian_rnn", "directory for logs")
tf.flags.DEFINE_string("logsubdir", "run1", "subdirectory for this experiment.")
tf.flags.DEFINE_string(
"mode", "train_test",
"What mode to run in. Options: ['train_only', 'test_only', 'train_test']")
tf.logging.set_verbosity(tf.logging.INFO)
_LOADED = {}
DataOps = collections.namedtuple("DataOps", "sparse_obs sparse_target")
def _run_session_with_no_hooks(sess, *args, **kwargs):
"""Only runs of the training op should contribute to speed measurement."""
return sess._tf_sess().run(*args, **kwargs) # pylint: disable=protected-access
def _get_raw_data(subset):
"""Loads the data or reads it from cache."""
raw_data = _LOADED.get(subset)
if raw_data is not None:
return raw_data, _LOADED["vocab"]
else:
train_data, valid_data, test_data, vocab = ptb_reader.ptb_raw_data(
FLAGS.data_path)
_LOADED.update({
"train": np.array(train_data),
"valid": np.array(valid_data),
"test": np.array(test_data),
"vocab": vocab
})
return _LOADED[subset], vocab
class PTB(object):
"""Wraps the PTB reader of the TensorFlow tutorial."""
def __init__(self, subset, seq_len, batch_size, name="PTB"):
self.raw_data, self.word2id = _get_raw_data(subset)
self.id2word = {v: k for k, v in self.word2id.items()}
self.seq_len = seq_len
self.batch_size = batch_size
self.name = name
def to_string(self, idx_seq, join_token=" "):
return join_token.join([self.id2word[idx] for idx in idx_seq])
def to_string_tensor(self, time_major_idx_seq_batch):
def p_func(input_idx_seq):
return self.to_string(input_idx_seq)
return tf.py_func(p_func, [time_major_idx_seq_batch[:, 0]], tf.string)
def __call__(self):
x_bm, y_bm = ptb_reader.ptb_producer(
self.raw_data, self.batch_size, self.seq_len, name=self.name)
x_tm = tf.transpose(x_bm, [1, 0])
y_tm = tf.transpose(y_bm, [1, 0])
return DataOps(sparse_obs=x_tm, sparse_target=y_tm)
@property
def num_batches(self):
return np.prod(self.raw_data.shape) // (self.seq_len * self.batch_size)
@property
def vocab_size(self):
return len(self.word2id)
class GlobalNormClippingOptimizer(tf.train.Optimizer):
"""Optimizer that clips gradients by global norm."""
def __init__(self,
opt,
clip_norm,
use_locking=False,
name="GlobalNormClippingOptimizer"):
super(GlobalNormClippingOptimizer, self).__init__(use_locking, name)
self._opt = opt
self._clip_norm = clip_norm
def compute_gradients(self, *args, **kwargs):
return self._opt.compute_gradients(*args, **kwargs)
def apply_gradients(self, grads_and_vars, *args, **kwargs):
if self._clip_norm == np.inf:
return self._opt.apply_gradients(grads_and_vars, *args, **kwargs)
grads, vars_ = list(zip(*grads_and_vars))
clipped_grads, _ = tf.clip_by_global_norm(grads, self._clip_norm)
return self._opt.apply_gradients(zip(clipped_grads, vars_), *args, **kwargs)
class CustomScaleMixture(object):
"""A convenience class for the scale mixture."""
def __init__(self, pi, sigma1, sigma2):
self.mu, self.pi, self.sigma1, self.sigma2 = (
np.float32(v) for v in (0.0, pi, sigma1, sigma2))
def log_prob(self, x):
n1 = tfp.distributions.Normal(self.mu, self.sigma1)
n2 = tfp.distributions.Normal(self.mu, self.sigma2)
mix1 = tf.reduce_sum(n1.log_prob(x), -1) + tf.log(self.pi)
mix2 = tf.reduce_sum(n2.log_prob(x), -1) + tf.log(np.float32(1.0 - self.pi))
prior_mix = tf.stack([mix1, mix2])
lse_mix = tf.reduce_logsumexp(prior_mix, [0])
return tf.reduce_sum(lse_mix)
def custom_scale_mixture_prior_builder(getter, name, *args, **kwargs):
"""A builder for the gaussian scale-mixture prior of Fortunato et al.
Please see https://arxiv.org/abs/1704.02798, section 7.1
Args:
getter: The `getter` passed to a `custom_getter`. Please see the
documentation for `tf.get_variable`.
name: The `name` argument passed to `tf.get_variable`.
*args: Positional arguments forwarded by `tf.get_variable`.
**kwargs: Keyword arguments forwarded by `tf.get_variable`.
Returns:
An instance of `tfp.distributions.Distribution` representing the
prior distribution over the variable in question.
"""
# This specific prior formulation doesn't need any of the arguments forwarded
# from `get_variable`.
del getter
del name
del args
del kwargs
return CustomScaleMixture(
FLAGS.prior_pi, FLAGS.prior_sigma1, FLAGS.prior_sigma2)
def lstm_posterior_builder(getter, name, *args, **kwargs):
"""A builder for a particular diagonal gaussian posterior.
Args:
getter: The `getter` passed to a `custom_getter`. Please see the
documentation for `tf.get_variable`.
name: The `name` argument passed to `tf.get_variable`.
*args: Positional arguments forwarded by `tf.get_variable`.
**kwargs: Keyword arguments forwarded by `tf.get_variable`.
Returns:
An instance of `tfp.distributions.Distribution` representing the
posterior distribution over the variable in question.
"""
del args
parameter_shapes = tfp.distributions.Normal.param_static_shapes(
kwargs["shape"])
# The standard deviation of the scale mixture prior.
prior_stddev = np.sqrt(
FLAGS.prior_pi * np.square(FLAGS.prior_sigma1) +
(1 - FLAGS.prior_pi) * np.square(FLAGS.prior_sigma2))
loc_var = getter(
"{}/posterior_loc".format(name),
shape=parameter_shapes["loc"],
initializer=kwargs.get("initializer"),
dtype=tf.float32)
scale_var = getter(
"{}/posterior_scale".format(name),
initializer=tf.random_uniform(
minval=np.log(np.exp(prior_stddev / 4.0) - 1.0),
maxval=np.log(np.exp(prior_stddev / 2.0) - 1.0),
dtype=tf.float32,
shape=parameter_shapes["scale"]))
return tfp.distributions.Normal(
loc=loc_var,
scale=tf.nn.softplus(scale_var) + 1e-5,
name="{}/posterior_dist".format(name))
def non_lstm_posterior_builder(getter, name, *args, **kwargs):
"""A builder for a particular diagonal gaussian posterior.
Args:
getter: The `getter` passed to a `custom_getter`. Please see the
documentation for `tf.get_variable`.
name: The `name` argument passed to `tf.get_variable`.
*args: Positional arguments forwarded by `tf.get_variable`.
**kwargs: Keyword arguments forwarded by `tf.get_variable`.
Returns:
An instance of `tfp.distributions.Distribution` representing the
posterior distribution over the variable in question.
"""
del args
parameter_shapes = tfp.distributions.Normal.param_static_shapes(
kwargs["shape"])
# The standard deviation of the scale mixture prior.
prior_stddev = np.sqrt(
FLAGS.prior_pi * np.square(FLAGS.prior_sigma1) +
(1 - FLAGS.prior_pi) * np.square(FLAGS.prior_sigma2))
loc_var = getter(
"{}/posterior_loc".format(name),
shape=parameter_shapes["loc"],
initializer=kwargs.get("initializer"),
dtype=tf.float32)
scale_var = getter(
"{}/posterior_scale".format(name),
initializer=tf.random_uniform(
minval=np.log(np.exp(prior_stddev / 2.0) - 1.0),
maxval=np.log(np.exp(prior_stddev / 1.0) - 1.0),
dtype=tf.float32,
shape=parameter_shapes["scale"]))
return tfp.distributions.Normal(
loc=loc_var,
scale=tf.nn.softplus(scale_var) + 1e-5,
name="{}/posterior_dist".format(name))
def build_modules(is_training, vocab_size):
"""Construct the modules used in the graph."""
# Construct the custom getter which implements Bayes by Backprop.
if is_training:
estimator_mode = tf.constant(bbb.EstimatorModes.sample)
else:
estimator_mode = tf.constant(bbb.EstimatorModes.mean)
lstm_bbb_custom_getter = bbb.bayes_by_backprop_getter(
posterior_builder=lstm_posterior_builder,
prior_builder=custom_scale_mixture_prior_builder,
kl_builder=bbb.stochastic_kl_builder,
sampling_mode_tensor=estimator_mode)
non_lstm_bbb_custom_getter = bbb.bayes_by_backprop_getter(
posterior_builder=non_lstm_posterior_builder,
prior_builder=custom_scale_mixture_prior_builder,
kl_builder=bbb.stochastic_kl_builder,
sampling_mode_tensor=estimator_mode)
embed_layer = snt.Embed(
vocab_size=vocab_size,
embed_dim=FLAGS.embedding_size,
custom_getter=non_lstm_bbb_custom_getter,
name="input_embedding")
cores = []
for i in range(FLAGS.n_layers):
cores.append(
snt.LSTM(FLAGS.hidden_size,
custom_getter=lstm_bbb_custom_getter,
forget_bias=0.0,
name="lstm_layer_{}".format(i)))
rnn_core = snt.DeepRNN(
cores,
skip_connections=False,
name="deep_lstm_core")
# Do BBB on weights but not biases of output layer.
output_linear = snt.Linear(
vocab_size, custom_getter={"w": non_lstm_bbb_custom_getter})
return embed_layer, rnn_core, output_linear
def build_logits(data_ops, embed_layer, rnn_core, output_linear, name_prefix):
"""This is the core model logic.
Unrolls a Bayesian RNN over the given sequence.
Args:
data_ops: A `sequence_data.SequenceDataOps` namedtuple.
embed_layer: A `snt.Embed` instance.
rnn_core: A `snt.RNNCore` instance.
output_linear: A `snt.Linear` instance.
name_prefix: A string to use to prefix local variable names.
Returns:
A 3D time-major tensor representing the model's logits for a sequence of
predictions. Shape `[time_steps, batch_size, vocab_size]`.
"""
# Embed the input index sequence.
embedded_input_seq = snt.BatchApply(
embed_layer, name="input_embed_seq")(data_ops.sparse_obs)
# Construct variables for holding the RNN state.
initial_rnn_state = nest.map_structure(
lambda t: tf.get_local_variable( # pylint: disable long lambda warning
"{}/rnn_state/{}".format(name_prefix, t.op.name), initializer=t),
rnn_core.initial_state(FLAGS.batch_size))
assign_zero_rnn_state = nest.map_structure(
lambda x: x.assign(tf.zeros_like(x)), initial_rnn_state)
assign_zero_rnn_state = tf.group(*nest.flatten(assign_zero_rnn_state))
# Unroll the RNN core over the sequence.
rnn_output_seq, rnn_final_state = tf.nn.dynamic_rnn(
cell=rnn_core,
inputs=embedded_input_seq,
initial_state=initial_rnn_state,
time_major=True)
# Persist the RNN state for the next unroll.
update_rnn_state = nest.map_structure(
tf.assign, initial_rnn_state, rnn_final_state)
with tf.control_dependencies(nest.flatten(update_rnn_state)):
rnn_output_seq = tf.identity(rnn_output_seq, name="rnn_output_seq")
output_logits = snt.BatchApply(
output_linear, name="output_embed_seq")(rnn_output_seq)
return output_logits, assign_zero_rnn_state
def build_loss(model_logits, sparse_targets):
"""Compute the log loss given predictions and targets."""
time_major_shape = [FLAGS.unroll_steps, FLAGS.batch_size]
flat_batch_shape = [FLAGS.unroll_steps * FLAGS.batch_size, -1]
xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=tf.reshape(model_logits, flat_batch_shape),
labels=tf.reshape(sparse_targets, flat_batch_shape[:-1]))
xent = tf.reshape(xent, time_major_shape)
# Sum over the sequence.
sequence_neg_log_prob = tf.reduce_sum(xent, axis=0)
# Average over the batch.
return tf.reduce_mean(sequence_neg_log_prob, axis=0)
def train(logdir):
"""Run a network on the PTB training set, checkpointing the weights."""
ptb_train = PTB(
name="ptb_train",
subset="train",
seq_len=FLAGS.unroll_steps,
batch_size=FLAGS.batch_size)
# Connect to training set.
data_ops = ptb_train()
embed_layer, rnn_core, output_linear = build_modules(
is_training=True, vocab_size=ptb_train.vocab_size)
prediction_logits, zero_state_op = build_logits(
data_ops, embed_layer, rnn_core, output_linear, name_prefix="train")
data_loss = build_loss(prediction_logits, data_ops.sparse_target)
# Add the KL cost.
total_kl_cost = bbb.get_total_kl_cost()
num_dataset_elements = FLAGS.batch_size * ptb_train.num_batches
scaled_kl_cost = total_kl_cost / num_dataset_elements
total_loss = tf.add(scaled_kl_cost, data_loss)
# Optimize as usual.
global_step = tf.get_variable(
"num_weight_updates",
initializer=tf.constant(0, dtype=tf.int32, shape=()),
collections=[tf.GraphKeys.GLOBAL_VARIABLES,
tf.GraphKeys.GLOBAL_STEP])
learning_rate = tf.get_variable(
"lr", initializer=tf.constant(FLAGS.lr_start, shape=(), dtype=tf.float32))
learning_rate_update = learning_rate.assign(learning_rate * FLAGS.lr_decay)
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=learning_rate)
optimizer = GlobalNormClippingOptimizer(optimizer, clip_norm=5.0)
with tf.control_dependencies([optimizer.minimize(total_loss)]):
global_step_and_train = global_step.assign_add(1)
# Connect to valid set.
ptb_valid = PTB(
name="ptb_valid",
subset="valid",
seq_len=FLAGS.unroll_steps,
batch_size=FLAGS.batch_size)
valid_data_ops = ptb_valid()
valid_logits, zero_valid_state = build_logits(
valid_data_ops, embed_layer, rnn_core, output_linear, name_prefix="valid")
valid_loss = build_loss(valid_logits, valid_data_ops.sparse_target)
# Compute metrics for the sake of monitoring training.
predictions = tf.cast(
tf.argmax(prediction_logits, axis=-1), tf.int32, name="pred")
correct_prediction_mask = tf.cast(
tf.equal(predictions, data_ops.sparse_target), tf.int32)
accuracy = tf.reduce_mean(
tf.cast(correct_prediction_mask, tf.float32), name="acc")
error_rate = tf.subtract(1.0, accuracy, name="err")
label_probs = tf.nn.softmax(prediction_logits, dim=-1)
predictive_entropy = tf.reduce_mean(
label_probs * tf.log(label_probs + 1e-12) * -1.0)
# Create tf.summary ops.
log_ops_to_run = {
"scalar": collections.OrderedDict([
("task_loss", data_loss),
("train_err_rate", error_rate),
("pred_entropy", predictive_entropy),
("learning_rate", learning_rate),
("elbo_loss", total_loss),
("kl_cost", total_kl_cost),
("scaled_kl_cost", scaled_kl_cost),
]),
"text": collections.OrderedDict([
("labels", ptb_train.to_string_tensor(data_ops.sparse_target)),
("predictions", ptb_train.to_string_tensor(predictions))
])
}
for name, tensor in log_ops_to_run["scalar"].items():
tf.summary.scalar(os.path.join("train", name), tensor)
# The remaining logic runs the training loop and logging.
summary_writer = tf.summary.FileWriterCache.get(logdir=logdir)
tf.logging.info(
"Beginning training for {} epochs, each with {} batches.".format(
FLAGS.num_training_epochs, ptb_train.num_batches))
with tf.train.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir, save_summaries_secs=10) as sess:
num_updates_v = _run_session_with_no_hooks(sess, global_step)
epoch_idx_start, step_idx_start = divmod(
num_updates_v, ptb_train.num_batches)
tf.logging.info("On start, epoch: {}\t step: {}".format(
epoch_idx_start, step_idx_start))
for epoch_idx in range(epoch_idx_start, FLAGS.num_training_epochs):
tf.logging.info("Beginning Epoch {}/{}".format(
epoch_idx, FLAGS.num_training_epochs))
tf.logging.info(
("Beginning by evaluating on the validation set, which has "
"{} batches.".format(ptb_valid.num_batches)))
valid_cost = 0
valid_steps = 0
_run_session_with_no_hooks(sess, zero_valid_state)
for _ in range(ptb_valid.num_batches):
valid_cost_v, num_updates_v = _run_session_with_no_hooks(
sess, [valid_loss, global_step])
valid_cost += valid_cost_v
valid_steps += FLAGS.unroll_steps
tf.logging.info("Validation set perplexity: {}".format(
np.exp(valid_cost / valid_steps)))
summary = tf.summary.Summary()
summary.value.add(
tag="valid/word_level_perplexity",
simple_value=np.exp(valid_cost / valid_steps))
summary_writer.add_summary(summary, num_updates_v)
# Run a training epoch.
epoch_cost = 0
epoch_steps = 0
for batch_idx in range(step_idx_start, ptb_train.num_batches):
scalars_res, num_updates_v = sess.run(
[log_ops_to_run["scalar"], global_step_and_train])
epoch_cost += scalars_res["task_loss"]
epoch_steps += FLAGS.unroll_steps
if (batch_idx - 1) % FLAGS.print_every_batches == 0:
summary = tf.summary.Summary()
summary.value.add(
tag="train/word_level_perplexity",
simple_value=np.exp(epoch_cost / epoch_steps))
summary_writer.add_summary(summary, num_updates_v)
scalars_res, strings_res = _run_session_with_no_hooks(
sess, [log_ops_to_run["scalar"], log_ops_to_run["text"]])
tf.logging.info("Num weight updates: {}".format(num_updates_v))
for name, result in scalars_res.items():
tf.logging.info("{}: {}".format(name, result))
for name, result in strings_res.items():
tf.logging.info("{}: {}".format(name, result))
word_level_perplexity = np.exp(epoch_cost / epoch_steps)
tf.logging.info(
"Train Perplexity after Epoch {}: {}".format(
epoch_idx, word_level_perplexity))
end_of_epoch_fetches = [zero_state_op]
if epoch_idx >= FLAGS.high_lr_epochs:
end_of_epoch_fetches.append(learning_rate_update)
_run_session_with_no_hooks(sess, end_of_epoch_fetches)
tf.logging.info("Done training. Thanks for your time.")
def test(logdir):
"""Run a network on the PTB test set, restoring from the latest checkpoint."""
global_step = tf.get_variable(
"num_weight_updates",
initializer=tf.constant(0, dtype=tf.int32, shape=()),
collections=[tf.GraphKeys.GLOBAL_VARIABLES,
tf.GraphKeys.GLOBAL_STEP])
ptb_test = PTB(
name="ptb_test",
subset="test",
seq_len=FLAGS.unroll_steps,
batch_size=FLAGS.batch_size)
# Connect to test set.
data_ops = ptb_test()
# The variables in these modules will be restored from the checkpoint.
embed_layer, rnn_core, output_linear = build_modules(
is_training=False, vocab_size=ptb_test.vocab_size)
prediction_logits, _ = build_logits(
data_ops, embed_layer, rnn_core, output_linear, name_prefix="test")
avg_nats_per_sequence = build_loss(prediction_logits, data_ops.sparse_target)
dataset_cost = 0
dataset_iters = 0
with tf.train.SingularMonitoredSession(checkpoint_dir=logdir) as sess:
tf.logging.info("Running on test set in {} batches.".format(
ptb_test.num_batches))
tf.logging.info("The model has trained for {} steps.".format(
_run_session_with_no_hooks(sess, global_step)))
for _ in range(ptb_test.num_batches):
dataset_cost += _run_session_with_no_hooks(sess, avg_nats_per_sequence)
dataset_iters += FLAGS.unroll_steps
tf.logging.info("Final test set perplexity: {}.".format(
np.exp(dataset_cost / dataset_iters)))
def main(unused_argv):
logdir = os.path.join(FLAGS.logbasedir, FLAGS.logsubdir)
tf.logging.info("Log Directory: {}".format(logdir))
if FLAGS.mode == "train_only":
train(logdir)
elif FLAGS.mode == "test_only":
test(logdir)
elif FLAGS.mode == "train_test":
tf.logging.info("Beginning a training phase of {} epochs.".format(
FLAGS.num_training_epochs))
train(logdir)
tf.logging.info("Beginning testing phase.")
with tf.Graph().as_default():
# Enter new default graph so that we can read variables from checkpoint
# without getting hit by name uniquification of sonnet variables.
test(logdir)
else:
raise ValueError("Invalid mode {}. Please choose one of {}.".format(
FLAGS.mode, "['train_only', 'test_only', 'train_test']"))
if __name__ == "__main__":
tf.app.run()
| sonnet-1 | sonnet/examples/brnn_ptb.py |
# Copyright 2018 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Learning To Execute Dataset.
Generated sequences of constant time mini-programs.
Modes:
* `TRAIN_COMBINE`: Uses Combined Curriulum for training.
* `TRAIN_MIX`: Uses Mix Curriulum for training.
* `TRAIN_NAIVE`: Uses Naive Curriulum for training.
* `TEST`: Uses Baseline Currculum for testing.
This module defines:
1. Set of curriculum classes.
2. Set of execution operation classes (this may be extended).
3. Methods for defining a vocabulary and code samples.
4. Class responsible for handling the tokenization of samples.
5. Dataset class to generate train/test batches.
This dataset is generative and does not rely on any statically stored data.
Therefore there is no limit to the samples generated. A generated batch will
be of dimensionality [sequence, length, one_hot_encoding_size]. Finally, the
dataset requires a maximum literal length and nesting level, for example:
(25 if 10 < 2 else (333 - (22 + 4)))
This has a maximum literal length of 3 (`333`) and nesting level of 3
(`(22 + 4)`).
Finally, it should be mentioned that the dataset can operate also in two
tokenization modes: tokenied and detokenized. In a detokenized mode the
sequence is tokenized by character while in the tokenized mode the sample
is tokenized by keywords, numbers and literals (illustrated by the spaces in
the above example). This can set with the `token_by_char` arg where
detokenized corresponds to `True`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import random
from enum import Enum
import numpy as np
import six
import tensorflow.compat.v1 as tf
_SOS = "~" # Start of sequence symbol
_EOS = "!" # End of sequence symbol
_PAD = "." # Padding symbol
DEFAULT_MIN_CURRICULUM_EVAL_TRIES = 10 # Minimum # of times to attempt update.
@six.add_metaclass(abc.ABCMeta)
class LTECurriculum(object):
"""Base class for code operations."""
MIN_LOSS_WINDOW_SIZE = 5
MAX_LOSS_WINDOW_SIZE = 10
def __init__(self, max_length, max_nesting, loss_threshold=0.05,
min_tries=DEFAULT_MIN_CURRICULUM_EVAL_TRIES):
"""Initializes the curriculum.
Args:
max_length: The maximum literal length.
max_nesting: The maximum nesting.
loss_threshold: Fractional value under which the average difference in
validation loss will trigger additional difficulty.
min_tries: the minimum number of times required on a difficulty level.
"""
self._curr_length = 1
self._max_length = max_length
self._curr_nesting = 1
self._max_nesting = max_nesting
self._loss_threshold = loss_threshold
self._min_tries = min_tries
self._curr_tries = 0
self._set_loss_window()
def _set_loss_window(self):
"""Initializes the queue that stores the losses."""
avg_window_size = max(min(self._min_tries, self.MAX_LOSS_WINDOW_SIZE),
self.MIN_LOSS_WINDOW_SIZE)
self._losses = collections.deque(
[], maxlen=avg_window_size) # pytype: disable=wrong-arg-count
@property
def friendly_name(self):
return "Root(" + str(self._loss_threshold) + ")"
def update(self, loss, force=False):
"""Determines whether task level difficulty is to be increased.
Collects loss values and difference since the last update. This is used
to compute the fractional difference across the loss window.
Args:
loss: float indicating an loss value to determine whether to update
the curriculum state.
force: boolean that allows us to force a curriculum update.
Returns:
True if there was an update.
"""
if force:
self._curr_tries = 0
self._set_loss_window()
self._set_new_task = True
return True
self._losses.append(loss)
if self._curr_tries < self._min_tries - 1:
self._curr_tries += 1
return False
# Average change in loss normalized by average loss.
loss_diffs = [pair[0] - pair[1]
for pair in zip(list(self._losses)[1:],
list(self._losses)[:-1])]
avg_loss_norm = np.mean(loss_diffs) / np.mean(self._losses)
if avg_loss_norm < self._loss_threshold:
self._set_new_task = True
self._curr_tries = 0
self._set_loss_window()
return True
else:
return False
def fetch(self):
"""Getter for current curriculum nesting and length.
Returns:
Tuple of integer values indicating literal length and nesting depth.
"""
return self._curr_length, self._curr_nesting
@property
def current_level(self):
"""Gets current currciculum level (string)."""
return str(self._curr_nesting) + "." + str(self._curr_length)
@property
def max_length(self):
"""Gets maximum literal depth."""
return self._max_length
@property
def max_nesting(self):
"""Gets maximum nesting depth."""
return self._max_nesting
class BaselineCurriculum(LTECurriculum):
"""Baseline curriculum sets a fixed nesting and length."""
def __init__(self, length, nesting, threshold,
min_tries=DEFAULT_MIN_CURRICULUM_EVAL_TRIES):
tf.logging.info("Initializing Baseline curriculum. length=%d, nest=%d, "
"valid threshold=%f", length, nesting, threshold)
super(BaselineCurriculum, self).__init__(length, nesting, threshold,
min_tries)
self._curr_length = length
self._curr_nesting = nesting
@property
def friendly_name(self):
return "Baseline(" + str(self._loss_threshold) + ")"
class NaiveCurriculum(LTECurriculum):
"""Naive curriculum increments length, nesting complexity by 1 on update."""
def __init__(self, length, nesting, threshold,
min_tries=DEFAULT_MIN_CURRICULUM_EVAL_TRIES):
tf.logging.info("Initializing Naive curriculum."
" length=%d, nest=%d, valid threshold=%f", length, nesting,
threshold)
super(NaiveCurriculum, self).__init__(length, nesting, threshold,
min_tries)
def friendly_name(self):
return "Naive(" + str(self._loss_threshold) + ")"
def update(self, loss, force=False):
"""Increments level difficulty (length and nesting) by 1 until maximum."""
do_update = super(NaiveCurriculum, self).update(loss, force)
if do_update:
if self._curr_length < self._max_length:
self._curr_length += 1
return True
elif self._curr_nesting < self._max_nesting:
self._curr_nesting += 1
else:
self._set_new_task = False
if self._set_new_task:
tf.logging.info("New level: (length=%d, nesting=%d)",
self._curr_length,
self._curr_nesting)
return self._set_new_task
return False
class MixCurriculum(LTECurriculum):
"""Mixed chooses randomly by batch up to a maximum length/nesting."""
def __init__(self, length, nesting, threshold,
min_tries=DEFAULT_MIN_CURRICULUM_EVAL_TRIES):
tf.logging.info("Initializing Mix curriculum."
" length=%d, nest=%d, valid threshold=%f", length, nesting,
threshold)
super(MixCurriculum, self).__init__(length, nesting, threshold, min_tries)
def friendly_name(self):
return "Mix(" + str(self._loss_threshold) + ")"
def fetch(self):
"""Samples up to maximum difficulty."""
length = np.random.randint(1, self._max_length + 1)
nesting = np.random.randint(1, self._max_nesting + 1)
return length, nesting
class CombineCurriculum(LTECurriculum):
"""Combine uses both Mix and Naive strategy together."""
def __init__(self, length, nesting, threshold,
min_tries=DEFAULT_MIN_CURRICULUM_EVAL_TRIES):
tf.logging.info("Initializing Combine curriculum. length=%d, nest=%d, "
"valid threshold=%f", length, nesting, threshold)
super(CombineCurriculum, self).__init__(length, nesting, threshold,
min_tries)
@property
def friendly_name(self):
return "Combine(" + str(self._loss_threshold) + ")"
def update(self, loss, force=False):
"""Increments level difficulty (length and nesting) by 1 until maximum."""
do_update = super(CombineCurriculum, self).update(loss, force)
if not do_update:
return False
if self._curr_length < self._max_length:
self._curr_length += 1
elif self._curr_nesting < self._max_nesting:
self._curr_nesting += 1
else:
self._set_new_task = False
if self._set_new_task:
tf.logging.info("New level: (length=%d, nesting=%d)",
self._curr_length, self._curr_nesting)
return self._set_new_task
def fetch(self):
"""Samples up to current difficulty."""
length = np.random.randint(1, self._curr_length + 1)
nesting = np.random.randint(1, self._curr_nesting + 1)
return length, nesting
@six.add_metaclass(abc.ABCMeta)
class CodeOp(object):
"""Base class for code operations."""
def __init__(self, num_operands):
"""Constructor for base operations class.
This constructor sets the operands for the operation as well as whether
the operation is a "memory" operation - that is, one the output is a
permuation or copy of the input.
Args:
num_operands: integer indicating number of operands for this operation.
"""
self._num_operands = num_operands
self._is_memory = False
@property
def num_operands(self):
"""Property returning integer number of operands for the operation."""
return self._num_operands
@property
def is_memory(self):
"""Property indicating whether this is a memory Operation."""
return self._is_memory
KEYWORDS = ["if", "for", "else", "range"]
LITERALS = ["+", "-", "*", "=", ":", "/", "(", ")", " ", "x", "|", "<", ">",
"[", "]", _SOS, _EOS, _PAD]
def check_elems(self, elems, count, elem_type):
"""Ensures element list length and type valid.
Args:
elems: list of elements.
count: how many elements are expected.
elem_type: type of all elements.
Raises:
ValueError: When length and type of elems is not as expected.
"""
if len(elems) != count and all(isinstance(e, elem_type) for e in elems):
raise ValueError("Not all elements valid: {}".format(elems))
@abc.abstractmethod
def eval(self, values):
"""Evaluates the operation based with given values."""
return
@abc.abstractmethod
def get_code(self, codes):
"""Composes the operation code from code components."""
return
class AddOp(CodeOp):
"""Add operation class."""
def __init__(self):
super(AddOp, self).__init__(2)
def eval(self, values):
self.check_elems(values, 2, int)
return values[0] + values[1]
def get_code(self, codes):
self.check_elems(codes, 2, str)
return "+".join(codes[:2])
class SubtractOp(CodeOp):
"""Subtract operation class."""
def __init__(self):
super(SubtractOp, self).__init__(2)
def eval(self, values):
self.check_elems(values, 2, int)
return values[0] - values[1]
def get_code(self, codes):
self.check_elems(codes, 2, str)
return "".join([codes[0], "-", codes[1]])
class MultiplyOp(CodeOp):
"""Multiply operation class."""
def __init__(self):
super(MultiplyOp, self).__init__(2)
def eval(self, values):
self.check_elems(values, 2, int)
return values[0] * values[1]
def get_code(self, codes):
self.check_elems(codes, 2, str)
return "".join([codes[0], "*", codes[1]])
class DivideOp(CodeOp):
"""Divide operation class."""
def __init__(self):
super(DivideOp, self).__init__(2)
def eval(self, values):
self.check_elems(values, 2, int)
return values[0] / values[1]
def get_code(self, codes):
self.check_elems(codes, 2, str)
return "".join([codes[0], "/", codes[1]])
class IfOp(CodeOp):
"""If operation class."""
def __init__(self):
super(IfOp, self).__init__(4)
self._comparators = ["<", ">"]
def eval(self, values):
self.check_elems(values, 4, int)
comparator_idx = random.randint(0, len(self._comparators)-1)
self._comparator = self._comparators[comparator_idx]
if self._comparator == ">":
return values[0] if values[1] > values[2] else values[3]
elif self._comparator == "<":
return values[0] if values[1] < values[2] else values[3]
else:
ValueError("Invalid comparator.")
def get_code(self, codes):
self.check_elems(codes, 4, str)
if self._comparator == ">":
return "".join([codes[0], "if", codes[1], ">", codes[2],
"else", codes[3]])
elif self._comparator == "<":
return "".join([codes[0], "if", codes[1], "<", codes[2],
"else", codes[3]])
else:
ValueError("Invalid comparator.")
class ForOp(CodeOp):
"""For loop operation class."""
def __init__(self):
super(ForOp, self).__init__(2)
def eval(self, values):
values = list(values)
self.check_elems(values, 2, int)
self._it = random.randint(1, 9)
for _ in six.moves.range(self._it):
values[0] += values[1]
return values[0]
def get_code(self, codes):
self.check_elems(codes, 2, str)
return "".join(["x=", codes[0], "for[" + str(self._it) + "]",
"x+=", codes[1]])
class ReverseOp(CodeOp):
"""Outputs a reversal of the input."""
def __init__(self):
"""Constructor for ReverseOp."""
super(ReverseOp, self).__init__(1)
self._is_memory = True
def eval(self, values):
"""Evaluation method for reverse operation.
Args:
values: List of samples to compose operation.
Returns:
String representing reversed input.
"""
return str(values[0])[::-1]
def get_code(self, codes):
"""Composes a code for double-copy operation.
Args:
codes: List of samples to compose operation.
Returns:
String for code of reversed input result.
"""
return "".join(codes)
class CopyOp(CodeOp):
"""Outputs a copy of the input."""
def __init__(self):
"""Constructor for CopyOp."""
super(CopyOp, self).__init__(1)
self._is_memory = True
def eval(self, values):
"""Evaluation method for copy operation.
Args:
values: List of samples to compose operation.
Returns:
String representing copied input.
"""
return values[0]
def get_code(self, codes):
"""Composes a code for double-copy operation.
Args:
codes: List of samples to compose operation code.
Returns:
String for code of copied input result.
"""
return "".join(codes)
class DoubleCopyOp(CodeOp):
"""Outputs two concatenated copies of the input."""
def __init__(self):
"""Constructor for DoubleCopyOp."""
super(DoubleCopyOp, self).__init__(1)
self._is_memory = True
def eval(self, values):
"""Evaluation method for DoubleCopy operation.
Args:
values: List of samples to compose operation code.
Returns:
String representing doubled input result.
"""
return str(values[0]) + str(values[0])
def get_code(self, codes):
"""Composes a code for double-copy operation.
Args:
codes: List of samples to compose operation.
Returns:
String for code of double copied input result.
"""
return "".join(codes)
def generate_code(max_length, max_nest, ops):
"""Generates code samples.
Args:
max_length: int. max literal length.
max_nest: int. max nesting level.
ops: CodeOp. set of allowable operations.
Returns:
1. (str) output value.
2. (str) Code operation.
"""
stack = []
def fetch_one():
# Always use an existing nested value for one of the operands.
if stack:
return stack.pop()
else:
# Produce a numeral of max_length-digits.
value = random.randint(10 ** (max_length - 1), 10 ** max_length - 1)
code = str(value)
return value, code
def fetch(num_operands):
values, codes = zip(*[fetch_one() for _ in six.moves.range(num_operands)])
return values, codes
for _ in six.moves.range(max_nest):
op = random.choice(ops)
values, codes = fetch(op.num_operands)
new_value = op.eval(values)
new_code = op.get_code(codes)
stack.append((new_value, "(" + new_code + ")"))
final_value, final_code = stack.pop()
final_code = final_code[1:-1]
final_code.strip("()")
if not op.is_memory:
final_value = int(final_value) % 10 ** (max_length+1)
return str(final_value), final_code
def get_tokens(max_value):
"""Defines tokens.
Args:
max_value: the maximum numeric range for the token.
Returns:
list of string tokens in vocabulary.
"""
vocab = [str(i) for i in range(max_value)]
vocab = set(vocab)
vocab.update(CodeOp.LITERALS)
vocab.update(CodeOp.KEYWORDS)
vocab |= set("".join(vocab))
return sorted(vocab)
def get_padding():
"""Returns the padding character."""
return _PAD
def get_start_token():
"""Returns `start-of-sequence` character."""
return _SOS
def get_end_token():
"""Returns `end-of-sequence` character."""
return _EOS
class TokenDataSource(object):
"""Encapsulates loading/tokenization logic for samples from generator."""
UNK = "_unk_"
DEFAULT_START_TOKENS = ["_null_", "_eos_", "|"]
NULL, WORD_EOS, CHAR_EOS = DEFAULT_START_TOKENS
def __init__(self, curriculum_obj, batch_size, max_len, ops, token_by_char):
"""Creates a TokenDataSource instance.
Args:
curriculum_obj: (LTECurriculum) determines sample complexity.
batch_size: (int) Batch size to generate.
max_len: (int) This is the maximum size of any given sample sequence.
ops: (list(CodeOp)). Task operations that inherit from CodeOp().
token_by_char: (bool) Whether to tokenize by char ("detokenized") or by
keyword, literals and numbers.
"""
# Create the token and inverse-token dicts and fix the UNK token.
self._vocab_dict = collections.defaultdict(lambda: 0)
self._vocab_dict[self.UNK] = 0
self._inv_vocab_dict = collections.defaultdict(lambda: self.UNK)
self.curriculum_obj = curriculum_obj
self._max_seq_length = max_len
self._ops = ops
self._token_by_char = token_by_char
self._batch_size = batch_size
# Construct the vocabulary.
num_token_digits = 1 if token_by_char else curriculum_obj.max_length
token_list = get_tokens(10 ** num_token_digits)
self.vocab_size = 1
for token in self.DEFAULT_START_TOKENS + token_list:
if token not in self._vocab_dict:
self._vocab_dict[token] = self.vocab_size
self._inv_vocab_dict[self.vocab_size] = token
self.vocab_size += 1
@property
def vocabulary(self):
"""List of strings, dataset vocabulary."""
return self._vocab_dict.keys()
def generate_flat_data(self):
"""Generates batched data in flat numpy arrays.
Raises:
ValueError: When too many generate calls are required.
"""
# Construct the string statements.
all_statements = []
all_targets = []
self.sequence_sizes_in = []
self.sequence_sizes_out = []
for _ in six.moves.range(self._batch_size):
length, nest = self.curriculum_obj.fetch()
seq_size_in = self._max_seq_length
# Generate batch within max length.
is_valid_sample = False
tries_remaining = 10
while not is_valid_sample:
value, code = generate_code(length, nest, self._ops)
tokens_in, seq_size_in = self.tokenize(
code, self._max_seq_length, self._token_by_char)
tokens_out, seq_size_out = self.tokenize(
value, self._max_seq_length, self._token_by_char)
is_valid_sample = self._max_seq_length >= seq_size_in
if is_valid_sample:
self.sequence_sizes_in.append(seq_size_in)
self.sequence_sizes_out.append(seq_size_out)
if tries_remaining == 0:
raise ValueError("Could not generate a sample below the allowable "
"maximum, consider reducing either max_length or "
"max_nest.")
else:
tries_remaining -= 1
all_statements += tokens_in
all_targets += tokens_out
# Store the flattened data.
self.flat_data = np.array(all_statements, dtype=np.int64)
self.num_tokens = self.flat_data.shape[0]
self.flat_targets = np.array(all_targets, dtype=np.int64)
self.num_tokens_target = self.flat_targets.shape[0]
self.start_token = np.array(self.tokenize(
[get_start_token()], 1)[0], dtype=np.int64)
self.end_token = np.array(self.tokenize(
[get_end_token()], 1)[0], dtype=np.int64)
def tokenize(self, char_input, max_len, by_char=False):
"""Produces the list of integer indices corresponding to a token list.
Args:
char_input: The character string to be tokenized.
max_len: Truncation length.
by_char: If true each character is a token - otherwise alpha-numeric
groupings are tokens.
Returns:
A padded list of string tokens and the true sequence length.
Raises:
ValueError: the token sequence is too long.
"""
if by_char:
tokenized_list = [self._vocab_dict[token] for token in char_input]
else:
tokenized_list = []
compound_token = ""
for token in char_input:
# Compose alphanumeric inputs into compound tokens.
add_number = compound_token.isdigit() and not token.isdigit()
add_word = compound_token.isalpha() and not token.isalpha()
if add_number or add_word:
tokenized_list.append(self._vocab_dict[compound_token])
compound_token = ""
# Add token or build compound token.
if token.isdigit():
compound_token += token
elif token.isalpha():
compound_token += token
else:
tokenized_list.append(self._vocab_dict[token])
if compound_token:
tokenized_list.append(self._vocab_dict[compound_token])
# To ensure uniform batch sequence length pad the sequence.
seq_size = len(tokenized_list)
if seq_size < max_len:
padding = [self._vocab_dict[get_padding()]] * (max_len - seq_size)
tokenized_list.extend(padding)
elif seq_size > max_len:
raise ValueError("Token sequence is too large: {}".format(
len(tokenized_list)))
return tokenized_list, seq_size
def decode_to_string(self, token_list):
"""Produces a human-readable representation of the token list."""
return "".join([self._inv_vocab_dict[token] for token in token_list])
def decode_to_list(self, token_list):
"""Returns a list token index values."""
return [self._inv_vocab_dict[token] for token in token_list]
# Task Types.
class TaskType(Enum):
ALGEBRA = 1
CONTROL = 2
ALL = 3
ALG_CTRL = 4
ADDITION = 5
COPY = 6
DOUBLE = 7
REVERSE = 8
# Task Groups.
class TaskGroups(Enum):
PROG_TASKS = 1
MEMORY_1 = 2
MEMORY_2 = 3
class LearnToExecuteState(object):
"""Learn-To-Execute dataset state class.
Generated sequences of constant time mini-programs.
Modes:
* `train`: Uses Combined Curriulum for training.
* `test`: Uses Baseline Currculum for testing.
"""
TASK_TYPE_OPS = {
TaskType.ALGEBRA: [AddOp(), SubtractOp(), MultiplyOp(),],
TaskType.CONTROL: [IfOp(), ForOp(),],
TaskType.ALL: [AddOp(), SubtractOp(), MultiplyOp(), IfOp(), ForOp(),],
TaskType.ALG_CTRL: [AddOp(), SubtractOp(), IfOp()],
TaskType.ADDITION: [AddOp()],
TaskType.COPY: [CopyOp()],
TaskType.DOUBLE: [DoubleCopyOp()],
TaskType.REVERSE: [ReverseOp()],
}
TASK_GROUPS = {
TaskGroups.PROG_TASKS: (
TaskType.ALGEBRA, TaskType.ALL, TaskType.ADDITION,
TaskType.ALG_CTRL, TaskType.CONTROL),
TaskGroups.MEMORY_1: (TaskType.COPY, TaskType.REVERSE),
TaskGroups.MEMORY_2: (TaskType.DOUBLE,)
}
def __init__(self, batch_size, max_length, max_nesting, curriculum,
token_by_char=True, task_type="alg-ctrl"):
"""Creates a LearnToExecute Dataset.
Initializes the dataset task set and input annd target sequence shapes.
Maximum sequence sizes for input and target are computed based on maximum
possible assignments. Also, curriculum is set, operations corresponding to
the chosen task is set and the data source is initialized.
Args:
batch_size: (int). The number of elements in a mini-batch.
max_length: (int). Maximum character length.
max_nesting: (int). Maximum level of statement nesting.
curriculum: (LTECurriculum). Curriculum strategy to use.
token_by_char: (bool). Tokenize by character or words?
task_type: (string) defines the task by allowable ops (see TASK_TYPE_OPS).
Raises:
ValueError: If task is invalid.
"""
super(LearnToExecuteState, self).__init__()
self._token_by_char = token_by_char
# Compute the max number of steps possible to take.
if task_type in self.TASK_GROUPS[TaskGroups.PROG_TASKS]:
if token_by_char:
outer_nests_term = (max_length * 3 + 10) * (max_nesting - 1)
inner_nest_term = max_length * 4 + 10
nest_tok_term = (max_nesting - 1) * 2
self._num_steps_out = max_length * 2
else:
outer_nests_term = 10 * (max_nesting - 1)
inner_nest_term = 11
nest_tok_term = (max_nesting - 1) * 2
self._num_steps_out = 1
self._num_steps = outer_nests_term + inner_nest_term + nest_tok_term
elif task_type in self.TASK_GROUPS[TaskGroups.MEMORY_1]:
self._token_by_char = True
self._num_steps = max_length + 1
self._num_steps_out = max_length + 1
elif task_type in self.TASK_GROUPS[TaskGroups.MEMORY_2]:
self._token_by_char = True
self._num_steps = max_length + 1
self._num_steps_out = max_length * 2 + 1
else:
raise ValueError("Unknown task: {}.".format(task_type))
self._batch_size = batch_size
self._ops = LearnToExecuteState.get_task_ops(task_type)
self._curriculum = curriculum
num_steps = max(self._num_steps, self._num_steps_out)
self._data_source = TokenDataSource(
self._curriculum, self._batch_size, num_steps, self._ops,
self._token_by_char)
self.reset_data_source()
@staticmethod
def get_task_ops(task_type=TaskType.ALG_CTRL):
"""Returns an operations list based on the specified task index.
Args:
task_type: indicates the task type used.
Returns:
List of the eligible ops.
"""
try:
return LearnToExecuteState.TASK_TYPE_OPS[task_type]
except KeyError:
raise KeyError("Bad task_type '%s', check config." % task_type)
@property
def vocabulary(self):
"""List of strings, dataset vocabulary."""
return self._data_source.vocabulary
@property
def vocab_size(self):
return self._data_source.vocab_size
def _np_one_hot(self, tensor, num_steps):
tensor_oh = np.zeros((tensor.size, self.vocab_size))
tensor_oh[np.arange(tensor.size), tensor.flat] = 1
return tensor_oh.reshape(
num_steps, self.batch_size, self.vocab_size).astype(np.float32)
def reset_data_source(self):
"""Build the data source given the current curriculum state."""
self._data_source.generate_flat_data()
def evaluate_curriculum(self, loss):
"""If the currciulum state has updated rebuild the data source."""
if self._curriculum.update(loss):
self.reset_data_source()
@property
def num_steps(self):
return self._num_steps
@property
def num_steps_out(self):
return self._num_steps_out
@property
def batch_size(self):
return self._batch_size
@property
def curriculum(self):
"""Property returning curriculum object for this dataset."""
return self._curriculum
@property
def level(self):
return self._curriculum.current_level
@property
def seq_sizes_in(self):
"""Stores the input sequence size per batch."""
return self._data_source.sequence_sizes_in[:self.batch_size]
@property
def seq_sizes_out(self):
"""Stores the target sequence size per batch."""
return self._data_source.sequence_sizes_out[:self.batch_size]
def make_batch(self):
"""Generator function for batchifying data for learning to execute.
Yields:
tuple:
1. one-hot input tensor, representing programmatic input
2. one-hot target tensor, the evaluation result.
3. one-hot decoder target, start symbol added for sequence decoding.
4. batch size tensor containing integer input sequence lengths.
5. batch size tensor containing integer output sequence lengths.
"""
while True:
self.reset_data_source()
obs = np.reshape(self._data_source.flat_data,
[self.batch_size, -1])[:, :self._num_steps].T
target = np.reshape(
self._data_source.flat_targets,
[self.batch_size, -1])[:, :self._num_steps_out].T
start_tokens = np.ndarray([1, self.batch_size], dtype=np.int32)
start_tokens.fill(self._data_source.start_token[0])
target_in = np.concatenate((start_tokens, target[:-1, :]), axis=0)
yield (self._np_one_hot(obs, self._num_steps),
self._np_one_hot(target, self._num_steps_out),
self._np_one_hot(target_in, self._num_steps_out),
self.seq_sizes_in,
self.seq_sizes_out)
def to_human_readable(self, data, label_batch_entries=True, indices=None,
sep="\n"):
"""Returns a human-readable version of a one-hot encoding of words.
Args:
data: (numpy.ndarray S x B x OH). One-hot encoding of words. S is
sequence length, B is batch size, OH is one hot dimensionality.
label_batch_entries: (bool). Whether to add numerical label before each
batch element in the output string.
indices: (list(int) or None). Used to select a subset of minibatch indices
to print. None will print the whole minibatch.
sep: (str) separator which separates the output for each batch. Defaults
to the newline character.
Returns:
String composed from the data.
"""
batch_size = data.shape[1]
result = []
indices = indices or six.moves.range(batch_size)
for b in indices:
index_seq = np.argmax(data[:, b], axis=1)
prefix = "b_{}: ".format(b) if label_batch_entries else ""
result.append(prefix + self._data_source.decode_to_string(index_seq))
return sep.join(result)
# Sampling curriculum modes.
class Mode(Enum):
TRAIN_COMBINE = 1
TRAIN_MIX = 2
TRAIN_NAIVE = 3
TEST = 4
def LearnToExecute( # pylint: disable=invalid-name
batch_size, max_length=1, max_nesting=1, token_by_char=True,
mode=Mode.TRAIN_COMBINE, loss_threshold=0.1,
min_tries=DEFAULT_MIN_CURRICULUM_EVAL_TRIES, task_type=TaskType.ALG_CTRL):
"""Factory method for LearnToExecute Dataset module.
Args:
batch_size: (int). The number of elements in a mini-batch.
max_length: (int). Maximum character length.
max_nesting: (int). Maximum level of statement nesting.
token_by_char: (bool). Tokenize by character or words?
mode: (string). Either 'train', 'test'.
loss_threshold: (int) curriculum threshold for error below which increase
the task difficulty.
min_tries: (int) minimum update tries for curriculum difficulty level.
task_type: (string) defines the task by allowable ops (see TASK_TYPE_OPS).
Returns:
tf.Data.Dataset for LearnToExecute sample generator with the
LearnToExecuteState monkey patched into the `state` attribute.
Raises:
ValueError: in case of bad `mode`.
"""
# defaults mode to "train-combine"
if mode == Mode.TRAIN_COMBINE:
curriculum = CombineCurriculum(
max_length, max_nesting, loss_threshold, min_tries=min_tries)
elif mode == Mode.TRAIN_MIX:
curriculum = MixCurriculum(
max_length, max_nesting, loss_threshold, min_tries=min_tries)
elif mode == Mode.TRAIN_NAIVE:
curriculum = NaiveCurriculum(
max_length, max_nesting, loss_threshold, min_tries=min_tries)
elif mode == Mode.TEST:
curriculum = BaselineCurriculum(
max_length, max_nesting, loss_threshold, min_tries=0)
else:
raise ValueError("Invalid mode.")
lte = LearnToExecuteState(batch_size, max_length, max_nesting,
curriculum, token_by_char, task_type=task_type)
types_ = (tf.float32, tf.float32, tf.float32, tf.int64, tf.int64)
shapes_ = (tf.TensorShape([lte.num_steps, batch_size, lte.vocab_size]),
tf.TensorShape([lte.num_steps_out, batch_size, lte.vocab_size]),
tf.TensorShape([lte.num_steps_out, batch_size, lte.vocab_size]),
tf.TensorShape([batch_size,]),
tf.TensorShape([batch_size,]))
dataset = tf.data.Dataset.from_generator(lte.make_batch, types_, shapes_)
dataset.state = lte
return dataset
| sonnet-1 | sonnet/examples/learn_to_execute.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Examples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| sonnet-1 | sonnet/examples/__init__.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Example script to train a stacked LSTM on the Tiny Shakespeare dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import sonnet as snt
from sonnet.examples import dataset_shakespeare
import tensorflow.compat.v1 as tf
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_integer("num_training_iterations", 10000,
"Number of iterations to train for.")
tf.flags.DEFINE_integer("report_interval", 1000,
"Iterations between reports (samples, valid loss).")
tf.flags.DEFINE_integer("reduce_learning_rate_interval", 2500,
"Iterations between learning rate reductions.")
tf.flags.DEFINE_integer("lstm_depth", 3, "Number of LSTM layers.")
tf.flags.DEFINE_integer("batch_size", 32, "Batch size for training.")
tf.flags.DEFINE_integer("num_embedding", 32, "Size of embedding layer.")
tf.flags.DEFINE_integer("num_hidden", 128, "Size of LSTM hidden layer.")
tf.flags.DEFINE_integer("truncation_length", 64, "Sequence size for training.")
tf.flags.DEFINE_integer("sample_length", 1000, "Sequence size for sampling.")
tf.flags.DEFINE_float("max_grad_norm", 5, "Gradient clipping norm limit.")
tf.flags.DEFINE_float("learning_rate", 0.1, "Optimizer learning rate.")
tf.flags.DEFINE_float("reduce_learning_rate_multiplier", 0.1,
"Learning rate is multiplied by this when reduced.")
tf.flags.DEFINE_float("optimizer_epsilon", 0.01,
"Epsilon used for Adam optimizer.")
tf.flags.DEFINE_string("checkpoint_dir", "/tmp/tf/rnn_shakespeare",
"Checkpointing directory.")
tf.flags.DEFINE_integer("checkpoint_interval", 500,
"Checkpointing step interval.")
def _configure_saver(checkpoint_dir, checkpoint_interval):
"""Returns a tf.train.CheckpointSaverHook for autosaving checkpoints."""
saver = tf.train.Saver()
return tf.train.CheckpointSaverHook(
checkpoint_dir=checkpoint_dir,
save_steps=checkpoint_interval,
saver=saver)
def build_graph(lstm_depth=3, batch_size=32, num_embedding=32, num_hidden=128,
truncation_length=64, sample_length=1000, max_grad_norm=5,
initial_learning_rate=0.1, reduce_learning_rate_multiplier=0.1,
optimizer_epsilon=0.01):
"""Constructs the computation graph."""
# Get datasets.
dataset_train = dataset_shakespeare.TinyShakespeareDataset(
num_steps=truncation_length,
batch_size=batch_size,
subset="train",
random=True,
name="shake_train")
dataset_valid = dataset_shakespeare.TinyShakespeareDataset(
num_steps=truncation_length,
batch_size=batch_size,
subset="valid",
random=False,
name="shake_valid")
dataset_test = dataset_shakespeare.TinyShakespeareDataset(
num_steps=truncation_length,
batch_size=batch_size,
subset="test",
random=False,
name="shake_test")
# Define model.
model = TextModel(
num_embedding=num_embedding,
num_hidden=num_hidden,
lstm_depth=lstm_depth,
output_size=dataset_valid.vocab_size,
use_dynamic_rnn=True,
use_skip_connections=True)
# Get the training loss.
train_input_sequence, train_target_sequence = dataset_train()
train_output_sequence_logits, train_final_state = model(train_input_sequence) # pylint: disable=not-callable
train_loss = dataset_train.cost(train_output_sequence_logits,
train_target_sequence)
# Get the validation loss.
valid_input_sequence, valid_target_sequence = dataset_valid()
valid_output_sequence_logits, _ = model(valid_input_sequence) # pylint: disable=not-callable
valid_loss = dataset_valid.cost(valid_output_sequence_logits,
valid_target_sequence)
# Get the test loss.
test_input_sequence, test_target_sequence = dataset_test()
test_output_sequence_logits, _ = model(test_input_sequence) # pylint: disable=not-callable
test_loss = dataset_test.cost(test_output_sequence_logits,
test_target_sequence)
# Build graph to sample some strings during training.
initial_logits = train_output_sequence_logits[truncation_length - 1]
train_generated_string = model.generate_string(
initial_logits=initial_logits,
initial_state=train_final_state,
sequence_length=sample_length)
# Set up global norm clipping of gradients.
trainable_variables = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(
tf.gradients(train_loss, trainable_variables), max_grad_norm)
# Get learning rate and define annealing.
learning_rate = tf.get_variable(
"learning_rate",
shape=[],
dtype=tf.float32,
initializer=tf.constant_initializer(initial_learning_rate),
trainable=False)
reduce_learning_rate = learning_rate.assign(
learning_rate * reduce_learning_rate_multiplier)
# Get training step counter.
global_step = tf.get_variable(
name="global_step",
shape=[],
dtype=tf.int64,
initializer=tf.zeros_initializer(),
trainable=False,
collections=[tf.GraphKeys.GLOBAL_VARIABLES,
tf.GraphKeys.GLOBAL_STEP])
# Define optimizer and training step.
optimizer = tf.train.AdamOptimizer(
learning_rate, epsilon=optimizer_epsilon)
train_step = optimizer.apply_gradients(
zip(grads, trainable_variables),
global_step=global_step)
graph_tensors = {
"train_loss": train_loss,
"valid_loss": valid_loss,
"test_loss": test_loss,
"train_generated_string": train_generated_string,
"reduce_learning_rate": reduce_learning_rate,
"global_step": global_step,
"train_step": train_step
}
# Return dataset_train for translation to human readable text.
return graph_tensors, dataset_train
def train(num_training_iterations, report_interval,
reduce_learning_rate_interval):
"""Trains a deep LSTM model on the Tiny Shakespeare dataset."""
# Build the computation graph.
graph_tensors, dataset_train = build_graph(
lstm_depth=FLAGS.lstm_depth, batch_size=FLAGS.batch_size,
num_embedding=FLAGS.num_embedding, num_hidden=FLAGS.num_hidden,
truncation_length=FLAGS.truncation_length,
sample_length=FLAGS.sample_length, max_grad_norm=FLAGS.max_grad_norm,
initial_learning_rate=FLAGS.learning_rate,
reduce_learning_rate_multiplier=FLAGS.reduce_learning_rate_multiplier,
optimizer_epsilon=FLAGS.optimizer_epsilon)
# Configure a checkpoint saver.
saver_hook = _configure_saver(FLAGS.checkpoint_dir,
FLAGS.checkpoint_interval)
# Train the network.
with tf.train.SingularMonitoredSession(
hooks=[saver_hook], checkpoint_dir=FLAGS.checkpoint_dir) as sess:
start_iteration = sess.run(graph_tensors["global_step"])
for train_iteration in range(start_iteration, num_training_iterations):
if (train_iteration + 1) % report_interval == 0:
train_loss_v, valid_loss_v, _ = sess.run(
(graph_tensors["train_loss"],
graph_tensors["valid_loss"],
graph_tensors["train_step"]))
train_generated_string_v = sess.run(
graph_tensors["train_generated_string"])
train_generated_string_human = dataset_train.to_human_readable(
(train_generated_string_v, 0), indices=[0])
tf.logging.info("%d: Training loss %f. Validation loss %f. Sample = %s",
train_iteration,
train_loss_v,
valid_loss_v,
train_generated_string_human)
else:
train_loss_v, _ = sess.run((graph_tensors["train_loss"],
graph_tensors["train_step"]))
tf.logging.info("%d: Training loss %f.", train_iteration, train_loss_v)
if (train_iteration + 1) % reduce_learning_rate_interval == 0:
sess.run(graph_tensors["reduce_learning_rate"])
tf.logging.info("Reducing learning rate.")
test_loss = sess.run(graph_tensors["test_loss"])
tf.logging.info("Test loss %f", test_loss)
class TextModel(snt.AbstractModule):
"""A deep LSTM model, for use on the Tiny Shakespeare dataset."""
def __init__(self, num_embedding, num_hidden, lstm_depth, output_size,
use_dynamic_rnn=True, use_skip_connections=True,
name="text_model"):
"""Constructs a `TextModel`.
Args:
num_embedding: Size of embedding representation, used directly after the
one-hot encoded input.
num_hidden: Number of hidden units in each LSTM layer.
lstm_depth: Number of LSTM layers.
output_size: Size of the output layer on top of the DeepRNN.
use_dynamic_rnn: Whether to use dynamic RNN unrolling. If `False`, it uses
static unrolling. Default is `True`.
use_skip_connections: Whether to use skip connections in the
`snt.DeepRNN`. Default is `True`.
name: Name of the module.
"""
super(TextModel, self).__init__(name=name)
self._num_embedding = num_embedding
self._num_hidden = num_hidden
self._lstm_depth = lstm_depth
self._output_size = output_size
self._use_dynamic_rnn = use_dynamic_rnn
self._use_skip_connections = use_skip_connections
with self._enter_variable_scope():
self._embed_module = snt.Linear(self._num_embedding, name="linear_embed")
self._output_module = snt.Linear(self._output_size, name="linear_output")
self._subcores = [
snt.LSTM(self._num_hidden, name="lstm_{}".format(i))
for i in range(self._lstm_depth)
]
if self._use_skip_connections:
skips = []
current_input_shape = self._num_embedding
for lstm in self._subcores:
input_shape = tf.TensorShape([current_input_shape])
skip = snt.SkipConnectionCore(
lstm,
input_shape=input_shape,
name="skip_{}".format(lstm.module_name))
skips.append(skip)
# SkipConnectionCore concatenates the input with the output, so the
# dimensionality increases with depth.
current_input_shape += self._num_hidden
self._subcores = skips
self._core = snt.DeepRNN(self._subcores, skip_connections=False,
name="deep_lstm")
def _build(self, one_hot_input_sequence):
"""Builds the deep LSTM model sub-graph.
Args:
one_hot_input_sequence: A Tensor with the input sequence encoded as a
one-hot representation. Its dimensions should be `[truncation_length,
batch_size, output_size]`.
Returns:
Tuple of the Tensor of output logits for the batch, with dimensions
`[truncation_length, batch_size, output_size]`, and the
final state of the unrolled core,.
"""
input_shape = one_hot_input_sequence.get_shape()
batch_size = input_shape[1]
batch_embed_module = snt.BatchApply(self._embed_module)
input_sequence = batch_embed_module(one_hot_input_sequence)
input_sequence = tf.nn.relu(input_sequence)
initial_state = self._core.initial_state(batch_size)
if self._use_dynamic_rnn:
output_sequence, final_state = tf.nn.dynamic_rnn(
cell=self._core,
inputs=input_sequence,
time_major=True,
initial_state=initial_state)
else:
rnn_input_sequence = tf.unstack(input_sequence)
output, final_state = tf.nn.static_rnn(
cell=self._core,
inputs=rnn_input_sequence,
initial_state=initial_state)
output_sequence = tf.stack(output)
batch_output_module = snt.BatchApply(self._output_module)
output_sequence_logits = batch_output_module(output_sequence)
return output_sequence_logits, final_state
@snt.reuse_variables
def generate_string(self, initial_logits, initial_state, sequence_length):
"""Builds sub-graph to generate a string, sampled from the model.
Args:
initial_logits: Starting logits to sample from.
initial_state: Starting state for the RNN core.
sequence_length: Number of characters to sample.
Returns:
A Tensor of characters, with dimensions `[sequence_length, batch_size,
output_size]`.
"""
current_logits = initial_logits
current_state = initial_state
generated_letters = []
for _ in range(sequence_length):
# Sample a character index from distribution.
char_index = tf.squeeze(tf.multinomial(current_logits, 1))
char_one_hot = tf.one_hot(char_index, self._output_size, 1.0, 0.0)
generated_letters.append(char_one_hot)
# Feed character back into the deep_lstm.
gen_out_seq, current_state = self._core(
tf.nn.relu(self._embed_module(char_one_hot)),
current_state)
current_logits = self._output_module(gen_out_seq)
generated_string = tf.stack(generated_letters)
return generated_string
def main(unused_argv):
train(
num_training_iterations=FLAGS.num_training_iterations,
report_interval=FLAGS.report_interval,
reduce_learning_rate_interval=FLAGS.reduce_learning_rate_interval)
if __name__ == "__main__":
tf.app.run()
| sonnet-1 | sonnet/examples/rnn_shakespeare.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Defines the dataset for generating sequences of n-th farthest problem.
The "N-th Farthest" task is designed to stress a capacity for relational
reasoning across time. Inputs are a sequence of randomly sampled vectors and
targets are answers to a question of the form:
"What is the n-th farthest vector (in Euclidean distance) from vector `m`?"
where the vector values, their IDs, `n` and `m` are randomly sampled per
sequence. The model must compute all pairwise distance relations to the
reference vector `m` which it eventually sees at some point in the sequence.
The vector distances must be implicitly sorted to produce an answer and the
model must sort distance relations between vectors.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from scipy.spatial import distance as spdistance
import six
import tensorflow.compat.v1 as tf
class NthFarthest(object):
"""Choose the nth furthest object from the reference."""
def __init__(self, batch_size, num_objects, num_features):
self._batch_size = batch_size
self._num_objects = num_objects
self._num_features = num_features
def _get_single_set(self, num_objects, num_features):
"""Generate one input sequence and output label.
Each sequences of objects has a feature that consists of the feature vector
for that object plus the encoding for its ID, the reference vector ID and
the n-th value relative ID for a total feature size of:
`num_objects` * 3 + `num_features`
Args:
num_objects: int. number of objects in the sequence.
num_features: int. feature size of each object.
Returns:
1. np.ndarray (`num_objects`, (`num_features` + 3 * `num_objects`)).
2. np.ndarray (1,). Output object reference label.
"""
# Generate random binary vectors
data = np.random.uniform(-1, 1, size=(num_objects, num_features))
distances = spdistance.squareform(spdistance.pdist(data))
distance_idx = np.argsort(distances)
# Choose random distance
nth = np.random.randint(0, num_objects)
# Pick out the nth furthest for each object
nth_furthest = distance_idx[:, nth]
# Choose random reference object
reference = np.random.randint(0, num_objects)
# Get identity of object that is the nth furthest from reference object
labels = nth_furthest[reference]
# Compile data
object_ids = np.identity(num_objects)
nth_matrix = np.zeros((num_objects, num_objects))
nth_matrix[:, nth] = 1
reference_object = np.zeros((num_objects, num_objects))
reference_object[:, reference] = 1
inputs = np.concatenate([data, object_ids, reference_object, nth_matrix],
axis=-1)
inputs = np.random.permutation(inputs)
labels = np.expand_dims(labels, axis=0)
return inputs.astype(np.float32), labels.astype(np.float32)
def _get_batch_data(self, batch_size, num_objects, num_features):
"""Assembles a batch of input tensors and output labels.
Args:
batch_size: int. number of sequence batches.
num_objects: int. number of objects in the sequence.
num_features: int. feature size of each object.
Returns:
1. np.ndarray (`batch_size`, `num_objects`,
(`num_features` + 3 * `num_objects`)).
2. np.ndarray (`batch_size`). Output object reference label.
"""
all_inputs = []
all_labels = []
for _ in six.moves.range(batch_size):
inputs, labels = self._get_single_set(num_objects, num_features)
all_inputs += [inputs]
all_labels += [labels]
input_data = np.concatenate(all_inputs, axis=0)
label_data = np.concatenate(all_labels, axis=0)
return input_data, label_data
def get_batch(self):
"""Returns set of nth-farthest input tensors and labels.
Returns:
1. tf.Tensor (`batch_size`, `num_objects`,
(`num_features` + 3 * `num_objects`)).
2. tf.Tensor (`batch_size`). Output object reference label.
"""
params = [self._batch_size, self._num_objects, self._num_features]
inputs, labels = tf.py_func(self._get_batch_data, params,
[tf.float32, tf.float32])
inputs = tf.reshape(inputs, [self._batch_size, self._num_objects,
self._num_features + self._num_objects * 3])
labels = tf.reshape(labels, [-1])
return inputs, labels
| sonnet-1 | sonnet/examples/dataset_nth_farthest.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for sonnet.examples.rmc_nth_farthest."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sonnet as snt
from sonnet.examples import learn_to_execute
from sonnet.examples import rmc_learn_to_execute
import tensorflow.compat.v1 as tf
class RMCLearnTest(tf.test.TestCase):
def setUp(self):
self._batch_size = 2
self._seq_sz_in = 10
self._seq_sz_out = 3
self._feature_size = 8
self._nesting = 2
self._literal_length = 3
def test_object_sequence_model(self):
"""Test the model class."""
core = snt.RelationalMemory(
mem_slots=2, head_size=4, num_heads=1, num_blocks=1, gate_style="unit")
final_mlp = snt.nets.MLP(
output_sizes=(5,), activate_final=True)
model = rmc_learn_to_execute.SequenceModel(
core=core,
target_size=self._feature_size,
final_mlp=final_mlp)
dummy_in = tf.zeros(
(self._seq_sz_in, self._batch_size, self._feature_size))
dummy_out = tf.zeros(
(self._seq_sz_out, self._batch_size, self._feature_size))
sizes = tf.ones((self._batch_size))
logits = model(dummy_in, dummy_out, sizes, sizes)
self.assertAllEqual(
logits.shape, (self._seq_sz_out, self._batch_size, self._feature_size))
def test_build_and_train(self):
"""Test the example TF graph build."""
total_iterations = 2
reporting_interval = 1
rmc_learn_to_execute.build_and_train(
total_iterations, reporting_interval, test=True)
def test_learn_to_execute_datset(self):
"""Test the dataset class."""
dataset = learn_to_execute.LearnToExecute(
self._batch_size, self._literal_length, self._nesting)
dataset_iter = dataset.make_one_shot_iterator().get_next()
logit_size = dataset.state.vocab_size
seq_sz_in = dataset.state.num_steps
seq_sz_out = dataset.state.num_steps_out
self.assertAllEqual(
dataset_iter[0].shape, (seq_sz_in, self._batch_size, logit_size))
self.assertAllEqual(
dataset_iter[1].shape, (seq_sz_out, self._batch_size, logit_size))
self.assertAllEqual(
dataset_iter[2].shape, (seq_sz_out, self._batch_size, logit_size))
self.assertAllEqual(dataset_iter[3].shape, (self._batch_size,))
self.assertAllEqual(dataset_iter[4].shape, (self._batch_size,))
if __name__ == "__main__":
tf.test.main()
| sonnet-1 | sonnet/examples/rmc_learn_to_execute_test.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Example script to train the Relational Memory Core.
This is a reduced size version of the "Learning To Execute" (LTE) task defined
in:
https://arxiv.org/abs/1806.01822
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
# Dependency imports
from absl import flags
import six
import sonnet as snt
from sonnet.examples import learn_to_execute
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
flags.DEFINE_float("learning_rate", 1e-4, "Initial learning rate.")
flags.DEFINE_float("min_learning_rate", 8e-5, "Minimum learning rate.")
flags.DEFINE_integer("batch_size", 1600, "Batch size.")
flags.DEFINE_integer("head_size", 2048, "Total memory size for the RMC.")
flags.DEFINE_integer("num_heads", 1, "Attention heads for RMC.")
flags.DEFINE_integer("num_mems", 4, "Number of memories for RMC.")
flags.DEFINE_integer("num_blocks", 1, "Number of attention blocks for RMC.")
flags.DEFINE_string("gate_style", "unit", "Gating style for RMC.")
flags.DEFINE_integer("max_length", 5, "LTE max literal length.")
flags.DEFINE_integer("max_nest", 2, "LTE max nesting level.")
flags.DEFINE_integer("epochs", 1000000, "Total training epochs.")
flags.DEFINE_integer("log_stride", 500, "Iterations between reports.")
class SequenceModel(snt.AbstractModule):
"""Seq2Seq Model to process LTE sequence batches."""
def __init__(
self,
core,
target_size,
final_mlp,
name="sequence_model"):
super(SequenceModel, self).__init__(name=name)
self._core = core
self._target_size = target_size
self._final_mlp = final_mlp
def _build(
self, inputs, targets, input_sequence_length, output_sequence_length):
"""Dynamic unroll across input objects.
Args:
inputs: tensor (input_sequence_length x batch x feature_size). Encoder
sequence.
targets: tensor (output_sequence_length x batch x feature_size). Decoder
sequence.
input_sequence_length: tensor (batch). Size of each batched input
sequence.
output_sequence_length: tensor (batch). Size of each batched target
sequence.
Returns:
Tensor (batch x num_objects); logits indicating the reference objects.
"""
# Connect decoding steps.
batch_size = inputs.get_shape()[1]
initial_state = self._core.initial_state(batch_size, trainable=False)
_, state = tf.nn.dynamic_rnn(
cell=self._core,
inputs=inputs,
sequence_length=input_sequence_length,
time_major=True,
initial_state=initial_state
)
# Connect decoding steps.
zero_input = tf.zeros(shape=targets.get_shape())
output_sequence, _ = tf.nn.dynamic_rnn(
cell=self._core,
inputs=zero_input, # Non-autoregressive model. Zeroed input.
sequence_length=output_sequence_length,
initial_state=state,
time_major=True)
outputs = snt.BatchApply(self._final_mlp)(output_sequence)
logits = snt.BatchApply(snt.Linear(self._target_size))(outputs)
tf.logging.info("Connected seq2seq model.")
return logits
def build_and_train(iterations, log_stride, test=False):
"""Construct the data, model, loss and optimizer then train."""
# Test mode settings.
batch_size = 2 if test else FLAGS.batch_size
num_mems = 2 if test else FLAGS.num_mems
num_heads = 1 if test else FLAGS.num_mems
num_blocks = 1 if test else FLAGS.num_mems
head_size = 4 if test else FLAGS.head_size
max_length = 3 if test else FLAGS.max_length
max_nest = 2 if test else FLAGS.max_nest
mlp_size = (20,) if test else (256, 256, 256, 256)
with tf.Graph().as_default():
t0 = time.time()
# Initialize the dataset.
lte_train = learn_to_execute.LearnToExecute(
batch_size, max_length, max_nest)
lte_test = learn_to_execute.LearnToExecute(
batch_size, max_length, max_nest, mode=learn_to_execute.Mode.TEST)
train_data_iter = lte_train.make_one_shot_iterator().get_next()
test_data_iter = lte_test.make_one_shot_iterator().get_next()
output_size = lte_train.state.vocab_size
# Create the model.
core = snt.RelationalMemory(
mem_slots=num_mems,
head_size=head_size,
num_heads=num_heads,
num_blocks=num_blocks,
gate_style=FLAGS.gate_style)
final_mlp = snt.nets.MLP(
output_sizes=mlp_size,
activate_final=True)
model = SequenceModel(
core=core,
target_size=output_size,
final_mlp=final_mlp)
tf.logging.info("Instantiated models ({:3f})".format(time.time() - t0))
# Define the loss & accuracy.
def loss_fn(inputs, targets, input_sequence_length, output_sequence_length):
"""Creates the loss and the exports."""
logits = model(
inputs, targets, input_sequence_length, output_sequence_length)
targets = tf.cast(targets, tf.int32)
sq_sz_out_max = targets.shape[0].value
# Create a mask to ignore accuracy on buffer characters.
sequence_sizes = tf.cast(output_sequence_length, tf.float32)
lengths_transposed = tf.expand_dims(sequence_sizes, 1)
range_row = tf.expand_dims(
tf.range(0, sq_sz_out_max, 1, dtype=tf.float32), 0)
mask = tf.cast(tf.transpose(tf.less(range_row, lengths_transposed)),
tf.float32)
# Compute token accuracy and solved.
correct = tf.equal(tf.argmax(logits, 2), tf.argmax(targets, 2))
solved = tf.reduce_all(tf.boolean_mask(correct, tf.squeeze(mask)), axis=0)
token_acc = tf.reduce_sum(tf.cast(correct, tf.float32) * mask)
token_acc /= tf.reduce_sum(sequence_sizes)
# Compute Loss.
mask = tf.cast(tf.tile(tf.expand_dims(mask, 2), (1, 1, logits.shape[2])),
tf.float32)
masked_logits = logits * mask
masked_target = tf.cast(targets, tf.float32) * mask
logits_flat = tf.reshape(masked_logits,
[sq_sz_out_max * batch_size, -1])
target_flat = tf.reshape(masked_target,
[sq_sz_out_max * batch_size, -1])
xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits_flat,
labels=target_flat)
loss = tf.reduce_mean(xent)
return loss, token_acc, solved
# Get training step counter.
global_step = tf.train.get_or_create_global_step()
# Create the optimizer.
learning_rate_op = tf.reduce_max([
tf.train.exponential_decay(
FLAGS.learning_rate,
global_step,
decay_steps=FLAGS.epochs // 100,
decay_rate=0.9,
staircase=False),
FLAGS.min_learning_rate
])
optimizer = tf.train.AdamOptimizer(learning_rate_op)
# Compute loss, accuracy & the step op.
inputs, targets, _, input_lengths, output_lengths = train_data_iter
train_loss, train_acc, train_sol = loss_fn(
inputs, targets, input_lengths, output_lengths)
step_op = optimizer.minimize(train_loss, global_step=global_step)
inputs, targets, _, input_lengths, output_lengths = test_data_iter
_, test_acc, test_sol = loss_fn(
inputs, targets, input_lengths, output_lengths)
tf.logging.info("Created losses and optimizers ({:3f})".format(
time.time() - t0))
# Begin Training.
t0 = time.time()
tf.logging.info("Starting training ({:3f})".format(time.time() - t0))
with tf.train.SingularMonitoredSession() as sess:
for it in six.moves.range(iterations):
sess.run([step_op, learning_rate_op])
if it % log_stride == 0:
loss_v, train_acc_v, test_acc_v, train_sol_v, test_sol_v = sess.run([
train_loss, train_acc, test_acc, train_sol, test_sol])
elapsed = time.time() - t0
tf.logging.info(
"iter: {:2d}, train loss {:3f}; train acc {:3f}; test acc {:3f};"
" train solved {:3f}; test solved {:3f}; ({:3f})".format(
it, loss_v, train_acc_v, test_acc_v, train_sol_v, test_sol_v,
elapsed))
def main(unused_argv):
build_and_train(FLAGS.epochs, FLAGS.log_stride, test=True)
if __name__ == "__main__":
tf.app.run()
| sonnet-1 | sonnet/examples/rmc_learn_to_execute.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Example script using `snt.Module` to make a module with build method args.
`snt.Sequential` has been deliberately designed for simple use cases. In
particular, it assumes that the only arguments passed when called are inputs to
the first layer. As such, one cannot easily control behaviour of submodules that
do accept different arguments in their call method, such as `snt.BatchNorm` and
the `is_training` flag. One may, however, quite easily replicate the same
functionality using `snt.Module` to construct a module from a custom method, as
shown in this script.
To run this script (on CPU), use the following command:
```
bazel run -c opt module_with_build_args
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import sonnet as snt
import tensorflow.compat.v1 as tf
def custom_build(inputs, is_training, keep_prob):
"""A custom build method to wrap into a sonnet Module."""
outputs = snt.Conv2D(output_channels=32, kernel_shape=4, stride=2)(inputs)
outputs = snt.BatchNorm()(outputs, is_training=is_training)
outputs = tf.nn.relu(outputs)
outputs = snt.Conv2D(output_channels=64, kernel_shape=4, stride=2)(outputs)
outputs = snt.BatchNorm()(outputs, is_training=is_training)
outputs = tf.nn.relu(outputs)
outputs = snt.BatchFlatten()(outputs)
outputs = tf.nn.dropout(outputs, keep_prob=keep_prob)
outputs = snt.Linear(output_size=10)(outputs)
return outputs
def main(unused_argv):
inputs = tf.random_uniform(shape=[10, 32, 32, 3])
targets = tf.random_uniform(shape=[10, 10])
# The line below takes custom_build and wraps it to construct a sonnet Module.
module_with_build_args = snt.Module(custom_build, name='simple_net')
train_model_outputs = module_with_build_args(inputs, is_training=True,
keep_prob=tf.constant(0.5))
test_model_outputs = module_with_build_args(inputs, is_training=False,
keep_prob=tf.constant(1.0))
loss = tf.nn.l2_loss(targets - train_model_outputs)
# Ensure the moving averages for the BatchNorm modules are updated.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_step = tf.train.GradientDescentOptimizer(learning_rate=1e-3).minimize(
loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(100):
sess.run(train_step)
# Check that evaluating train_model_outputs twice returns the same value.
train_outputs, train_outputs_2 = sess.run([train_model_outputs,
train_model_outputs])
assert (train_outputs == train_outputs_2).all()
# Check that there is indeed a difference between train_model_outputs and
# test_model_outputs.
train_outputs, test_outputs = sess.run([train_model_outputs,
test_model_outputs])
assert (train_outputs != test_outputs).any()
if __name__ == '__main__':
tf.app.run()
| sonnet-1 | sonnet/examples/module_with_build_args.py |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for sonnet.examples.rmc_nth_farthest."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sonnet as snt
from sonnet.examples import dataset_nth_farthest
from sonnet.examples import rmc_nth_farthest
import tensorflow.compat.v1 as tf
class RMCNthFarthestTest(tf.test.TestCase):
def setUp(self):
self._batch_size = 2
self._num_objects = 2
self._feature_size = 2
def test_object_sequence_model(self):
"""Test the model class."""
core = snt.RelationalMemory(
mem_slots=2, head_size=4, num_heads=1, num_blocks=1, gate_style="unit")
final_mlp = snt.nets.MLP(
output_sizes=(5,), activate_final=True)
model = rmc_nth_farthest.SequenceModel(
core=core,
target_size=self._num_objects,
final_mlp=final_mlp)
logits = model(tf.zeros(
(self._batch_size, self._num_objects, self._feature_size)))
self.assertAllEqual(logits.shape,
(self._batch_size, self._num_objects))
def test_build_and_train(self):
"""Test the example TF graph build."""
total_iterations = 2
reporting_interval = 1
steps, train_losses, test_accs = rmc_nth_farthest.build_and_train(
total_iterations, reporting_interval, test=True)
self.assertEqual(len(steps), total_iterations)
self.assertEqual(len(train_losses), total_iterations)
self.assertEqual(len(test_accs), total_iterations)
def test_nth_farthest_datset(self):
"""Test the dataset class."""
dataset = dataset_nth_farthest.NthFarthest(
self._batch_size, self._num_objects, self._feature_size)
inputs, _ = dataset.get_batch()
final_feature_size = self._feature_size + 3 * self._num_objects
self.assertAllEqual(
inputs.shape,
(self._batch_size, self._num_objects, final_feature_size))
if __name__ == "__main__":
tf.test.main()
| sonnet-1 | sonnet/examples/rmc_nth_farthest_test.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configure script to get build parameters from user.
This should be run before building launchpad with Bazel. The easiest usage is
`python3 configure.py`. It will use the version of python to suggest the correct
paths to set for the bazel config.
Shamelessly taken from TensorFlow:
htps://github.com/tensorflow/tensorflow/blob/master/configure.py
"""
import argparse
import os
import subprocess
import sys
_LAUNCHPAD_BAZELRC_FILENAME = '.launchpad.bazelrc'
_LAUNCHPAD_WORKSPACE_ROOT = ''
_LAUNCHPAD_BAZELRC = ''
def main():
global _LAUNCHPAD_WORKSPACE_ROOT
global _LAUNCHPAD_BAZELRC
parser = argparse.ArgumentParser()
parser.add_argument(
'--workspace',
type=str,
default=os.path.abspath(os.path.dirname(__file__)),
help='The absolute path to your active Bazel workspace.')
args = parser.parse_args()
_LAUNCHPAD_WORKSPACE_ROOT = args.workspace
_LAUNCHPAD_BAZELRC = os.path.join(_LAUNCHPAD_WORKSPACE_ROOT,
_LAUNCHPAD_BAZELRC_FILENAME)
# Make a copy of os.environ to be clear when functions and getting and setting
# environment variables.
environ_cp = dict(os.environ)
reset_configure_bazelrc()
setup_python(environ_cp)
def get_from_env_or_user_or_default(environ_cp, var_name, ask_for_var,
var_default):
"""Get var_name either from env, or user or default.
If var_name has been set as environment variable, use the preset value, else
ask for user input. If no input is provided, the default is used.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
ask_for_var: string for how to ask for user input.
var_default: default value string.
Returns:
string value for var_name
"""
var = environ_cp.get(var_name)
if not var:
var = get_input(ask_for_var)
print('\n')
if not var:
var = var_default
return var
def get_input(question):
try:
try:
answer = raw_input(question)
except NameError:
answer = input(question)
except EOFError:
answer = ''
return answer
def setup_python(environ_cp):
"""Setup python related env variables."""
# Get PYTHON_BIN_PATH, default is the current running python.
default_python_bin_path = sys.executable
ask_python_bin_path = ('Please specify the location of python. [Default is '
'%s]: ') % default_python_bin_path
while True:
python_bin_path = get_from_env_or_user_or_default(environ_cp,
'PYTHON_BIN_PATH',
ask_python_bin_path,
default_python_bin_path)
# Check if the path is valid
if os.path.isfile(python_bin_path) and os.access(python_bin_path, os.X_OK):
break
elif not os.path.exists(python_bin_path):
print('Invalid python path: %s cannot be found.' % python_bin_path)
else:
print('%s is not executable. Is it the python binary?' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = ''
# Get PYTHON_LIB_PATH
python_lib_path = environ_cp.get('PYTHON_LIB_PATH')
if not python_lib_path:
python_lib_paths = get_python_path(environ_cp, python_bin_path)
if environ_cp.get('USE_DEFAULT_PYTHON_LIB_PATH') == '1':
python_lib_path = python_lib_paths[0]
else:
print('Found possible Python library paths:\n %s' %
'\n '.join(python_lib_paths))
default_python_lib_path = python_lib_paths[0]
python_lib_path = get_input(
'Please input the desired Python library path to use. '
'Default is [%s]\n' % python_lib_paths[0])
if not python_lib_path:
python_lib_path = default_python_lib_path
environ_cp['PYTHON_LIB_PATH'] = python_lib_path
# Set-up env variables used by python_configure.bzl
write_action_env_to_bazelrc('PYTHON_BIN_PATH', python_bin_path)
write_action_env_to_bazelrc('PYTHON_LIB_PATH', python_lib_path)
write_to_bazelrc('build --python_path=\"%s"' % python_bin_path)
write_to_bazelrc('build --repo_env=PYTHON_BIN_PATH=\"%s"' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = python_bin_path
# If choosen python_lib_path is from a path specified in the PYTHONPATH
# variable, need to tell bazel to include PYTHONPATH
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
if python_lib_path in python_paths:
write_action_env_to_bazelrc('PYTHONPATH', environ_cp.get('PYTHONPATH'))
# Write tools/python_bin_path.sh
with open(
os.path.join(_LAUNCHPAD_WORKSPACE_ROOT, 'python_bin_path.sh'),
'w') as f:
f.write('export PYTHON_BIN_PATH="%s"' % python_bin_path)
def get_python_path(environ_cp, python_bin_path):
"""Get the python site package paths."""
python_paths = []
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
try:
stderr = open(os.devnull, 'wb')
library_paths = run_shell([
python_bin_path, '-c',
'import site; print("\\n".join(site.getsitepackages()))'
],
stderr=stderr).split('\n')
except subprocess.CalledProcessError:
library_paths = [
run_shell([
python_bin_path, '-c',
'from distutils.sysconfig import get_python_lib;'
'print(get_python_lib())'
])
]
all_paths = set(python_paths + library_paths)
paths = []
for path in all_paths:
if os.path.isdir(path):
paths.append(path)
return paths
def run_shell(cmd, allow_non_zero=False, stderr=None):
"""Get var_name either from env, or user or default.
Args:
cmd: copy of the os.environ.
allow_non_zero: string for name of environment variable, e.g. "TF_NEED
stderr: string for how to ask for user input.
Returns:
string value output of the command executed.
"""
if stderr is None:
stderr = sys.stdout
if allow_non_zero:
try:
output = subprocess.check_output(cmd, stderr=stderr)
except subprocess.CalledProcessError as e:
output = e.output
else:
output = subprocess.check_output(cmd, stderr=stderr)
return output.decode('UTF-8').strip()
def write_action_env_to_bazelrc(var_name, var):
write_to_bazelrc('build --action_env %s="%s"' % (var_name, str(var)))
def write_to_bazelrc(line):
with open(_LAUNCHPAD_BAZELRC, 'a') as f:
f.write(line + '\n')
def reset_configure_bazelrc():
"""Reset file that contains customized config settings."""
open(_LAUNCHPAD_BAZELRC, 'w').close()
if __name__ == '__main__':
main()
| launchpad-master | configure.py |
from ctypes import cdll
import pkgutil
import os
tf = pkgutil.get_loader("tensorflow")
if tf:
cdll.LoadLibrary(os.path.join(os.path.dirname(tf.path), 'libtensorflow_framework.so.2')) # pytype:disable=attribute-error
del tf
del cdll
del pkgutil
del os
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Courier module."""
from courier.python.client import Client # pytype: disable=import-error
from courier.python.client import list_methods # pytype: disable=import-error
from courier.python.py_server import Server # pytype: disable=import-error
| launchpad-master | courier/__init__.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python server bindings for Courier RPCs.
Example usage:
server = courier.Server('my_server')
server.Bind('my_function', lambda a, b: a + b)
server.Start()
client = courier.Client('my_server')
result = client.my_function(4, 7) # 11, evaluated on the server.
"""
from typing import Optional
from courier.handlers.python import pybind
from courier.python import router
from courier.python import server
# Numpy import needed for proper operation of ../serialization/py_serialize.cc
import numpy
import portpicker
from six.moves import map
import tree as nest
class Server:
"""Server class for hosting Courier RPCs.
This provides a convenience wrapper around the CLIF bindings. The thread pool
size determines how many method handlers can be executed concurrently.
Server start and termination. No RPCs are served before a call of Start is
entered or after a call of Join or Stop has returned. A server may be started
at most once. The functions Stop and Join may block if they need to wait for
a concurrent Start to complete.
"""
def __init__(
self,
name: Optional[str] = None,
port: Optional[int] = None,
thread_pool_size: int = 16,
):
if port is None:
port = portpicker.pick_unused_port()
self._port = port
self._thread_pool_size = thread_pool_size
self._router = router.Router()
self._server = None
@property
def port(self) -> int:
return self._port
@property
def address(self) -> str:
return f'localhost:{self._port}'
def BindHandler(self, method_name, handler, is_priority: bool = False):
self._router.Bind(method_name, handler, is_priority)
def Bind(self, method_name: str, py_func, is_priority: bool = False):
self.BindHandler(method_name, pybind.BuildPyCallHandler(py_func),
is_priority)
def Join(self):
if not self._server:
raise ValueError('Server not started')
self._server.Join()
def Start(self):
"""Starts the Courier server."""
if self._server:
raise ValueError('Server already started')
if not self._server:
self._server = server.BuildAndStart(self._router, self._port,
self._thread_pool_size)
def Stop(self):
"""Stops the Courier server."""
if not self._server:
raise ValueError('Server not started yet')
self._server.Stop()
def Unbind(self, method_name):
self._router.Unbind(method_name)
def SetIsHealthy(self, is_healthy):
"""Changes the health status of the server.
A server, which reports as unhealthy still accepts and serves incoming
requests, but it prefers not to receive any. This is useful in the following
scenarios:
- Before the server is terminated, it starts reporting as unhealthy. This
lets clients know not to send any further requests to this server.
- When load-balancing requests over several servers and this server is
experiencing problems (e.g. high disk latency). This lets the
load-balancer know not to send any traffic until the problems have been
resolved.
Args:
is_healthy: A boolean that indicates whether or not the server reports as
healthy.
"""
if not self._server:
raise ValueError('Cannot set health status; server not started.')
self._server.SetIsHealthy(is_healthy)
@property
def has_started(self):
"""Returns True if the method `Start` has already been called.
This method is not thread safe with regards to Start().
"""
return self._server is not None
| launchpad-master | courier/python/py_server.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python client bindings for Courier RPCs.
Example usage:
server = courier.Server('my_server')
server.Bind('my_function', lambda a, b: a + b)
server.Start()
client = courier.Client('my_server')
result = client.my_function(4, 7) # 11, evaluated on the server.
"""
from concurrent import futures
import datetime
from typing import List, Optional, Union
import zoneinfo
from courier.python import py_client
# Numpy import needed for proper operation of ../serialization/py_serialize.cc
import numpy
from pybind11_abseil.status import StatusNotOk as StatusThrown # pytype: disable=import-error
from pybind11_abseil.status import StatusNotOk # pytype: disable=import-error
def translate_status(s):
"""Translate Pybind11 status to Exception."""
exc = StatusNotOk(s.message())
exc.code = s.code()
return exc
def exception_handler(func):
def inner_function(*args, **kwargs):
try:
return func(*args, **kwargs)
except StatusThrown as e:
raise translate_status(e.status) from e
return inner_function
def _calculate_deadline(
timeout: Optional[datetime.timedelta], propagate_deadline: bool
) -> datetime.datetime:
"""Calculate the out-bound deadline from timeout and existing deadline.
Args:
timeout: Timeout to apply to all calls. If set to None or a zero-length
timedelta then no timeout is applied.
propagate_deadline: Unsupported feature.
Returns:
Returns the sooner of (now + timeout) and the existing deadline.
"""
deadline = datetime.datetime.max.replace(tzinfo=zoneinfo.ZoneInfo('UTC'))
if timeout:
deadline_timeout = (
datetime.datetime.now(tz=zoneinfo.ZoneInfo('UTC')) + timeout
)
deadline = min(deadline, deadline_timeout)
return deadline
class _AsyncClient:
"""Asynchronous client."""
def __init__(
self,
client: 'Client',
wait_for_ready: bool,
call_timeout: Optional[datetime.timedelta],
compress: bool,
chunk_tensors: bool,
propagate_deadline: bool = False,
):
self._client = client
self._wait_for_ready = wait_for_ready
self._call_timeout = call_timeout
self._compress = compress
self._chunk_tensors = chunk_tensors
self._propagate_deadline = propagate_deadline
def _build_handler(self, method: str):
"""Build a future handler for a given method."""
def call(*args, **kwargs):
f = futures.Future()
def set_exception(s):
try:
f.set_exception(translate_status(s))
except futures.InvalidStateError: # pytype: disable=module-attr
# Call could have been already canceled by the user.
pass
def set_result(r):
try:
f.set_result(r)
except futures.InvalidStateError:
# Call could have been already canceled by the user.
pass
deadline = _calculate_deadline(
self._call_timeout, self._propagate_deadline
)
canceller = self._client.AsyncPyCall(
method,
list(args),
kwargs,
set_result,
set_exception,
self._wait_for_ready,
deadline,
self._compress,
self._chunk_tensors,
)
def done_callback(f):
if f.cancelled():
canceller.Cancel()
f.add_done_callback(done_callback)
return f
return call
def __getattr__(self, method):
"""Gets a callable function for the method that returns a future.
Args:
method: Name of the method.
Returns:
Callable function for the method that returns a future.
"""
return self._build_handler(method)
def __call__(self, *args, **kwargs):
return self._build_handler('__call__')(*args, **kwargs)
class Client:
"""Client class for using Courier RPCs.
This provides a convenience wrapper around the CLIF bindings which allows
calling server methods as if they were class methods.
"""
def __init__(
self,
server_address: str,
compress: bool = False,
call_timeout: Optional[Union[int, float, datetime.timedelta]] = None,
wait_for_ready: bool = True,
chunk_tensors: bool = False,
*,
load_balancing_policy: Optional[str] = None,
propagate_deadline: bool = True,
):
"""Initiates a new client that will connect to a server.
Args:
server_address: Address of the server. If the string does not start with
"/" or "localhost" then it will be interpreted as a custom BNS
registered server_name (constructor passed to Server).
compress: Whether to use compression.
call_timeout: Sets a timeout to apply to all calls. If None or 0 then
no timeout is applied.
wait_for_ready: Sets `wait_for_ready` on the gRPC::ClientContext. This
specifies whether to wait for a server to come online.
chunk_tensors: Unsupported feature.
load_balancing_policy: gRPC load balancing policy. Use 'round_robin' to
spread the load across all backends. More details at:
https://github.com/grpc/grpc/blob/master/doc/load-balancing.md
propagate_deadline: Unsupported feature.
"""
self._init_args = (server_address, compress, call_timeout, wait_for_ready)
self._address = str(server_address)
self._compress = compress
self._client = py_client.PyClient(self._address, load_balancing_policy)
if call_timeout:
if isinstance(call_timeout, datetime.timedelta):
self._call_timeout = call_timeout
else:
self._call_timeout = datetime.timedelta(seconds=call_timeout)
else:
self._call_timeout = None
self._wait_for_ready = wait_for_ready
self._chunk_tensors = chunk_tensors
self._async_client = _AsyncClient(self._client, self._wait_for_ready,
self._call_timeout, self._compress,
self._chunk_tensors, propagate_deadline)
self._propagate_deadline = propagate_deadline
def __del__(self):
self._client.Shutdown()
def __reduce__(self):
return self.__class__, self._init_args
@property
def address(self) -> str:
return self._address
@property
def futures(self) -> _AsyncClient:
"""Gets an asynchronous client on which a method call returns a future."""
return self._async_client
def _build_handler(self, method: str):
"""Build a callable handler for a given method.
Args:
method: Name of the method to build.
Returns:
Handler for the method.
"""
@exception_handler
def func(*args, **kwargs):
deadline = _calculate_deadline(
self._call_timeout, self._propagate_deadline
)
return self._client.PyCall(
method,
list(args),
kwargs,
self._wait_for_ready,
deadline,
self._compress,
self._chunk_tensors,
)
return func
def __getattr__(self, method: str):
"""Gets a callable function for the method and sets it as an attribute.
Args:
method: Name of the method.
Returns:
Callable function for the method.
"""
func = self._build_handler(method)
setattr(self, method, func)
return func
@exception_handler
def __call__(self, *args, **kwargs):
return self._build_handler('__call__')(*args, **kwargs)
@exception_handler
def list_methods(client: Client) -> List[str]:
"""Lists the methods which are available on the server.
Args:
client: A client instance.
Returns:
List of method names.
"""
return client._client.ListMethods()
| launchpad-master | courier/python/client.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for courier.python.py_client."""
from concurrent import futures
import datetime
import pickle
import threading
import time
from typing import Optional, Union
import zoneinfo
from absl.testing import absltest
from absl.testing import parameterized
from courier.python import client # pytype: disable=import-error
from courier.python import py_server # pytype: disable=import-error
import mock
import numpy as np
from pybind11_abseil.status import StatusNotOk # pytype: disable=import-error
class _A:
def add(self, a, b):
return a + b
class PyIntegrationTest(parameterized.TestCase):
def _call_sleep(self, duration, use_async):
if use_async:
self._client.futures.sleep(duration).result()
else:
self._client.sleep(duration)
def setUp(self):
super(PyIntegrationTest, self).setUp()
self._server = py_server.Server()
self._server.Bind('no_args', lambda: 1000)
self._server.Bind('lambda_add', lambda a, b: a + b)
self._server.Bind('method_add', _A().add)
self._server.Bind('add_default', lambda a, b=100: a + b)
self._server.Bind('echo', lambda a: a)
def _exception_method():
raise ValueError('Exception method called')
self._server.Bind('exception_method', _exception_method)
self._server.Bind('sleep', time.sleep)
self._server.Bind('rebind', lambda: 1234)
self._server.Bind('bytes_value', lambda: b'1234')
self._server.Bind('unicode_value', lambda: u'1234')
self._server.Start()
self._client = client.Client(self._server.address)
def tearDown(self):
self._server.Stop()
self._server.Join()
super(PyIntegrationTest, self).tearDown()
def testLambdaCall(self):
result = self._client.lambda_add(12, 5)
self.assertEqual(result, 17)
def testClassMethodCall(self):
result = self._client.method_add(12, 5)
self.assertEqual(result, 17)
def testCallWithoutArguments(self):
result = self._client.no_args()
self.assertEqual(result, 1000)
def testCallRebind(self):
result = self._client.rebind()
self.assertEqual(result, 1234)
self._server.Bind('rebind', lambda: 2345)
result = self._client.rebind()
self.assertEqual(result, 2345)
expected_msg = 'method rebind not found'
self._server.Unbind('rebind')
with self.assertRaisesRegex(StatusNotOk, expected_msg):
result = self._client.rebind()
self._server.Bind('rebind', lambda: 1234)
result = self._client.rebind()
self.assertEqual(result, 1234)
def testCallWithDefaultArguments(self):
result = self._client.add_default(23)
self.assertEqual(result, 123)
def testCallWithKwargs(self):
result = self._client.add_default(23, b=500)
self.assertEqual(result, 523)
def testPythonErrorIsRaised(self):
expected_msg = r'Exception method called'
with self.assertRaisesRegex(StatusNotOk, expected_msg):
self._client.exception_method()
def testAsyncFutureCall(self):
future = self._client.futures.add_default(23)
self.assertEqual(future.result(), 123)
def testAsyncFutureCancel(self):
future = self._client.futures.sleep(2)
self.assertTrue(future.cancel())
try:
future.result()
self.fail('Expected future to raise cancelled exception')
except futures.CancelledError:
pass
except StatusNotOk as e:
self.assertIn('CANCEL', e.message)
def testAsyncFutureException(self):
future = self._client.futures.exception_method()
expected_msg = r'Exception method called'
with self.assertRaisesRegex(StatusNotOk, expected_msg):
future.result()
def testListMethods(self):
self.assertCountEqual(
client.list_methods(self._client),
[
'no_args',
'lambda_add',
'add_default',
'exception_method',
'sleep',
'method_add',
'rebind',
'bytes_value',
'unicode_value',
'echo',
])
def testUnicodeAddress(self):
client.Client(u'test')
py_server.Server(u'test')
def testBytesValue(self):
result = self._client.bytes_value()
self.assertEqual(result, b'1234')
def testUnicodeValue(self):
result = self._client.unicode_value()
self.assertEqual(result, u'1234')
def testClientWaitsUntilServerIsUp(self):
my_server = py_server.Server()
my_client = client.Client(my_server.address)
f = my_client.futures.no_args()
my_server.Bind('no_args', lambda: 1000)
my_server.Start()
self.assertEqual(f.result(), 1000)
my_server.Stop()
@parameterized.named_parameters(
('timeout_none', None),
('timeout_0', 0),
('timeout_timedelta_0', datetime.timedelta(seconds=0)),
)
def testNoErrorWhenNoTimeoutSpecified(self, timeout):
self._client = client.Client(
self._server.address, call_timeout=timeout
)
self._client.sleep(1)
@parameterized.named_parameters(
('float', 2.0), ('int', 2), ('timedelta', datetime.timedelta(seconds=2))
)
def testClientInitHandlesDifferentTimeoutTypes(
self, timeout: Optional[Union[int, float, datetime.timedelta]]
):
self._client = client.Client(self._server.address, call_timeout=timeout)
self._client.sleep(1)
with self.assertRaisesRegex(StatusNotOk, 'Deadline Exceeded'):
self._client.sleep(3)
@parameterized.named_parameters(('async', True), ('sync', False))
def testNoErrorWhenDurationLessThanTimeout(self, use_async: bool):
self._client = client.Client(
self._server.address, call_timeout=datetime.timedelta(seconds=3)
)
self._call_sleep(duration=2, use_async=use_async)
@parameterized.named_parameters(('async', True), ('sync', False))
def testErrorDeadlineExceededWhenDurationGreaterThanTimeout(
self, use_async: bool
):
self._client = client.Client(
self._server.address, call_timeout=datetime.timedelta(seconds=1)
)
with self.assertRaisesRegex(StatusNotOk, 'Deadline Exceeded'):
self._call_sleep(duration=2, use_async=use_async)
@parameterized.named_parameters(('async', True), ('sync', False))
def testErrorDeadlineExceededWhenUnknownServerAddress(self, use_async: bool):
self._client = client.Client(
'[::]:12345', call_timeout=datetime.timedelta(seconds=1)
)
with self.assertRaisesRegex(StatusNotOk, 'Deadline Exceeded'):
self._call_sleep(duration=0, use_async=use_async)
@parameterized.named_parameters(('async', True), ('sync', False))
def testErrorNotFoundWhenMethodDoesNotExist(self, use_async: bool):
self._client = client.Client(
self._server.address, call_timeout=datetime.timedelta(seconds=1)
)
with self.assertRaisesRegex(
StatusNotOk, 'method nonexistent_method not found'
):
if use_async:
self._client.futures.nonexistent_method().result()
else:
self._client.nonexistent_method()
def testWaitForReady(self):
my_client_bad = client.Client('[::]:12345', wait_for_ready=False)
with self.assertRaisesRegex(StatusNotOk,
'failed to connect to all addresses'):
my_client_bad.blah()
if __name__ == '__main__':
absltest.main()
| launchpad-master | courier/python/client_test.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Placeholders of network addresses to be evaluated at runtime."""
import abc
import os
import re
from typing import Optional
from absl import logging
import portpicker
_ADDRESS_NAME_VALID_PATTERN = re.compile('[a-z][a-z0-9]*')
class AbstractAddressBuilder(metaclass=abc.ABCMeta):
"""Base class for creating a platform-specific address at runtime."""
@abc.abstractmethod
def build(self) -> str:
"""Builds an address."""
class SimpleLocalAddressBuilder(AbstractAddressBuilder):
"""Creates a locahost:port address, with port decided by portpicker."""
def __init__(self):
# This automatically makes use of PORTSERVER_ADDRESS (usually set by test)
self._address = 'localhost:{}'.format(portpicker.pick_unused_port())
def build(self) -> str:
return self._address
class Address(object):
"""A network address to be evaluated.
1. An unbound address is created using `address = Address()`.
2. An address should be assigned to exactly one node e.g. using
`address.assign(node)`.
2. Upon launching, Launchpad will call bind() for each address to assign the
address builder, which constructs the actual string format address at
runtime. Launchpad has access to the addressed used in a program because
each node maintains an `addresses` list of the nodes he knows about (it can
be the address(es) the node own, or addresses that sub-nodes own.
3. At runtime, use `address.resolve()` to finally resolve it.
Using an unbound address will trigger an error.
Attributes:
name: Name of this address.
"""
def __init__(self, name: Optional[str] = None):
"""Initializes an address object.
Args:
name: (Optional) Name of the address.
"""
if name is not None and not _ADDRESS_NAME_VALID_PATTERN.fullmatch(name):
raise ValueError(f'Wrong address name: {name} does not match '
f'{_ADDRESS_NAME_VALID_PATTERN.pattern}.')
self.name = name
self._address_builder = None # type: AbstractAddressBuilder
self._owning_node = None
def bind(self, address_builder: AbstractAddressBuilder) -> None:
"""Sets a function that creates the platform-specific address at runtime."""
# The address cannot be evaluated before we launch, because we might not
# have all the necessary info for evaluation
self._address_builder = address_builder
def resolve(self) -> str:
"""Returns the address as a string."""
if not self._address_builder:
if self._owning_node is None:
raise RuntimeError(
"The lp.Address hasn't been assigned to any node, "
'thus it cannot be resolved. Use '
'`address.assign(node)` to assign an address to a node')
raise RuntimeError(
f'The lp.Address associated to the node {self._owning_node} has not '
'been bound to any address builder. Launchpad is responsible for '
'doing that at launch time. If you are in tests, '
'you can use:\n\n'
'from launchpad.launch.test_multi_threading import address_builder as test_address_builder\n'
'...\n'
'test_address_builder.bind_addresses([node])')
return self._address_builder.build()
def assign(self, node) -> None:
"""Assigns the Address to the specified node (must be done exactly once)."""
if self._owning_node is not None:
if self._owning_node is node:
logging.warning(
'You are binding an lp.Address twice on the same node '
"of type %s it's probably a mistake.", node)
return
raise ValueError('You are associating a node to this lp.Address which '
'is already assigned to another node. The previous node '
'and the new node are:\n'
f'{node}\n{self._owning_node}')
self._owning_node = node
node.addresses.append(self)
def __getstate__(self):
state = self.__dict__.copy()
# Don't pickle `_owning_node`, as it's here only for launch-time checks.
# Instead, store a description of the node. If a restored instance is
# re-pickled, leave the existing node description unchanged.
if not isinstance(self._owning_node, str):
state['_owning_node'] = ('Restored from pickle node named: ' +
repr(self._owning_node))
return state
def get_port_from_address(address: str) -> int:
"""Returns the port from a given address.
Note that Launchpad uses a convention where named ports are passed as
environment variables. For example, for the named port 'baz', the actual port
value will be stored in LP_PORT_baz environment variable.
Args:
address: address with a named port or a host:port address.
Returns:
The port number as an integer.
"""
port_name = address.split(':')[-1]
if port_name.isdigit():
return int(port_name)
else:
return int(os.environ['LP_PORT_' + port_name])
| launchpad-master | launchpad/address.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for launchpad.program."""
from unittest import mock
from absl.testing import absltest
from launchpad import program as lp_program
from launchpad.nodes import base
class ProgramTest(absltest.TestCase):
def test_add_node(self):
program = lp_program.Program('foo')
node_foo = mock.Mock(autospec=base.Node)
program.add_node(node_foo, 'foo')
node_bar = mock.Mock(autospec=base.Node)
program.add_node(node_bar, 'bar')
self.assertEqual(program.groups['foo'], [node_foo])
self.assertEqual(program.groups['bar'], [node_bar])
def test_add_nodes_to_group(self):
program = lp_program.Program('foo')
nodes_foo = [mock.Mock(autospec=base.Node), mock.Mock(autospec=base.Node)]
with program.group('foo'):
for node in nodes_foo:
program.add_node(node)
nodes_bar = [mock.Mock(autospec=base.Node), mock.Mock(autospec=base.Node)]
with program.group('bar'):
for node in nodes_bar:
program.add_node(node)
self.assertEqual(program.groups['foo'], nodes_foo)
self.assertEqual(program.groups['bar'], nodes_bar)
def test_add_node_without_label(self):
program = lp_program.Program('foo')
with self.assertRaisesRegexp(ValueError, 'Label should not be empty.'):
program.add_node(mock.Mock(autospec=base.Node))
def test_add_node_with_empty_label(self):
program = lp_program.Program('foo')
with self.assertRaisesRegexp(ValueError, 'Label should not be empty.'):
program.add_node(mock.Mock(autospec=base.Node), label='')
def test_add_group_with_empty_label(self):
program = lp_program.Program('foo')
with self.assertRaisesRegexp(ValueError, 'Label should not be empty.'):
with program.group(''):
program.add_node(mock.Mock(autospec=base.Node))
def test_use_label_in_node_and_group(self):
program = lp_program.Program('foo')
with self.assertRaisesRegexp(
ValueError, 'label does not match the current group'):
with program.group('foo'):
program.add_node(mock.Mock(autospec=base.Node), label='bar')
# If the label matches the current group, then it's ok.
with program.group('foo'):
program.add_node(mock.Mock(autospec=base.Node), label='foo')
def test_nested_groups(self):
program = lp_program.Program('foo')
with self.assertRaisesRegexp(ValueError, r'group\(\) cannot be nested.'):
with program.group('foo'):
with program.group('bar'):
program.add_node(mock.Mock(autospec=base.Node))
def test_duplicated_label(self):
program = lp_program.Program('foo')
node1 = mock.Mock(autospec=base.Node)
node2 = mock.Mock(autospec=base.Node)
program.add_node(node1, 'foo')
program.add_node(node2, 'foo')
with program.group('bar'):
program.add_node(node1)
with program.group('bar'):
program.add_node(node2)
self.assertEqual(program.groups['foo'], [node1, node2])
self.assertEqual(program.groups['bar'], [node1, node2])
def test_get_all_nodes(self):
nodes = []
program = lp_program.Program('test')
node_foo = mock.Mock(autospec=base.Node)
nodes.append(node_foo)
program.add_node(node_foo, 'foo')
node_bar = mock.Mock(autospec=base.Node)
nodes.append(node_bar)
program.add_node(node_bar, 'bar')
with program.group('baz'):
node_baz = mock.Mock(autospec=base.Node)
nodes.append(node_baz)
program.add_node(node_baz)
self.assertCountEqual(program.get_all_nodes(), nodes)
if __name__ == '__main__':
absltest.main()
| launchpad-master | launchpad/program_test.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LazyLoader providing support for loading symbols upon first reference."""
import importlib
import sys
import typing
# Set to true in your local workspace to work around issues with lazy imports.
# Please report any cases of usage to stanczyk@, so we can address them.
_DISABLE_LAZY_IMPORTS = False
class LazyImports():
"""Turns all imports performed in its scope into lazy.
It is used to avoid pulling in large dependencies.
"""
def __init__(self, parent, add_to_globals=True):
"""Initializes LazyImports.
Args:
parent: Name of the parent module doing the imports.
add_to_globals: Whether imported symbols should be added to importing
module globals. Rule of thumb is to not add for __init__ modules,
as it is important for these modules to resolve Lazy symbol upon its
first access (not when accessing members of the Lazy import).
"""
self._parent = sys.modules[parent]
self._globals = vars(self._parent)
self._add_to_globals = add_to_globals
self._symbols = dict()
def parent(self):
return self._parent
def _find_and_load(self, name, import_):
del import_
return _ModuleToLoad(name, self)
def _handle_fromlist(self, module, fromlist, import_):
del import_
assert len(fromlist) == 1
if not isinstance(module, _ModuleToLoad):
return _ModuleToLoad(module.__name__, self)
return module
def __enter__(self):
if typing.TYPE_CHECKING or _DISABLE_LAZY_IMPORTS:
return
# Start capturing import statements.
self._org_find_and_load = importlib._bootstrap._find_and_load
self._org_handle_fromlist = importlib._bootstrap._handle_fromlist
importlib._bootstrap._find_and_load = self._find_and_load
importlib._bootstrap._handle_fromlist = self._handle_fromlist
def __exit__(self, type_, value, traceback):
if typing.TYPE_CHECKING or _DISABLE_LAZY_IMPORTS:
return
# Stop capturing import statements.
importlib._bootstrap._find_and_load = self._org_find_and_load
importlib._bootstrap._handle_fromlist = self._org_handle_fromlist
# If symbols are not added to globals then __getattr__ has to be patched.
if not self._add_to_globals:
self._parent.__getattr__ = self.__getattr__
# Make sure lazy symbols know their local name within the importing module.
for name in list(self._globals.keys()):
member = self._globals[name]
if isinstance(member, _MemberToLoad):
member.set_export_name(name)
if not self._add_to_globals:
del self._globals[name]
self._symbols[name] = member
def __getattr__(self, name):
if name in self._symbols:
res = self._symbols[name].load()
self._parent.__dict__[name] = res
return res
raise AttributeError(f'module {self._parent!r} has no attribute {name!r}')
class _MemberToLoad():
"""Module member to load in a lazy way."""
def __init__(self, name, parent):
self._name = name
self._export_name = name
self._parent = parent
def __getattr__(self, item):
return getattr(self.load(), item)
def __call__(self, *args, **kwargs):
return self.load()(*args, **kwargs)
def load(self):
try:
res = importlib.import_module(f'{self._parent.name()}.{self._name}')
except ModuleNotFoundError:
res = importlib.import_module(self._parent.name()).__dict__[self._name]
self._parent.parent().parent().__dict__[self._export_name] = res
return res
def set_export_name(self, name):
self._export_name = name
class _ModuleToLoad():
"""Module to load in a lazy way."""
def __init__(self, name, parent):
self._name = name
self._parent = parent
def name(self):
return self._name
def parent(self):
return self._parent
def __getattr__(self, item):
return _MemberToLoad(item, self)
| launchpad-master | launchpad/lazy_loader.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing all Launchpad flags."""
from absl import flags
from launchpad import context
FLAGS = flags.FLAGS
_DEFAULT_LAUNCH_TYPE = context.LaunchType.LOCAL_MULTI_THREADING.value
LAUNCH_TYPE = flags.DEFINE_enum(
'lp_launch_type',
_DEFAULT_LAUNCH_TYPE, [t.value for t in context.LaunchType],
'How to launch a Launchpad program when launch() is called',
allow_override=True)
LP_TERMINATION_NOTICE_SECS = flags.DEFINE_integer(
'lp_termination_notice_secs', 10,
'Send termination notice to all nodes that many seconds before hard '
'termination. Set to 0 to trigger hard termination righ away (skip '
'termination notice), set to negative value to disable hard termination.')
LP_WORKER_MANAGER_V2 = flags.DEFINE_bool(
'lp_worker_manager_v2', False,
'Use the new WorkerManager implementation (note: please do NOT use this '
'flag. It will be removed once V2 is fully implemented).')
flags.DEFINE_string('tmux_open_window', None,
'Window in new Tmux session to switch to.')
flags.DEFINE_string('tmux_session_name', 'launchpad',
'Prefix session name to use for the Tmux session.')
| launchpad-master | launchpad/flags.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launchpad is a tool to define a distributed topology."""
from launchpad import lazy_loader
# Basic types
from launchpad import flags
from launchpad.context import LaunchType
from launchpad.context import is_local_launch
from launchpad.context import is_local_launch_or_test
from launchpad.flags import LAUNCH_TYPE # Set via --lp_launch_type
from launchpad.program import Program
# Launch function
from launchpad.launch.launch import launch
# Nodes
from launchpad.nodes.courier.node import CourierHandle
from launchpad.nodes.courier.node import CourierNode
from launchpad.nodes.courier.node import CourierClient
from launchpad.nodes.multi_threading_colocation.node import MultiThreadingColocation
from launchpad.nodes.python.node import PyClassNode
from launchpad.nodes.python.node import PyNode
# Addressing
from launchpad.address import Address
from launchpad.address import AbstractAddressBuilder
from launchpad.address import get_port_from_address
from launchpad.address import SimpleLocalAddressBuilder
# Stopping a program
from launchpad.launch.worker_manager_migration import register_stop_handler
from launchpad.launch.worker_manager_migration import stop_event
from launchpad.launch.worker_manager_migration import unregister_stop_handler
from launchpad.launch.worker_manager_migration import wait_for_stop
from launchpad.program_stopper.program_stopper import make_program_stopper
from launchpad.stop_program.stop import stop
with lazy_loader.LazyImports(__name__, False):
from launchpad.nodes.reverb.node import ReverbNode
from launchpad.nodes.python.xm_docker import DockerConfig
from launchpad.nodes.courier.courier_utils import batched_handler
| launchpad-master | launchpad/__init__.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Platform-specific configuration on the node."""
import enum
import threading
from typing import Any, Callable, Optional, Union
class LaunchType(enum.Enum):
"""The different launch types supported by Launchpad.
Launch type can be specified through `lp_launch_type` command line flag or
by passing `launch_type` parameter to lp.launch() call.
"""
# Launch locally using multiple threads logging on the same terminal with
# different colors. Upon crash it drops into PDB for the
# thread that crashed, and locks output from all other threads.
LOCAL_MULTI_THREADING = 'local_mt'
# Launch locally using multiple processes. Can display logs from different
# nodes in separate windows. The behavior can be controlled using the
# `terminal` argument in `launch.launch`.
LOCAL_MULTI_PROCESSING = 'local_mp'
# Launch using multiple processes, as a test.
TEST_MULTI_PROCESSING = 'test_mp'
# Launch as a test using multiple threads (same as LOCAL_MULTI_THREADING but
# terminates the process instead of dropping into PDB).
TEST_MULTI_THREADING = 'test_mt'
# Launch locally using docker containers, similar to local multi processing.
# NOTE: Experimental, do not use.
LOCAL_DOCKER = 'local_docker'
# Launch on Google Cloud using Vertex AI (https://cloud.google.com/vertex-ai)
# throught xmanager. For an example on how to use VERTEX_AI launch, please
# refer to Launchpad's example:
# https://github.com/deepmind/launchpad/tree/master/launchpad/examples/consumer_producers/launch_vertex_ai.py
# It is also worth looking at RL agents examples from Acme, for instance:
# https://github.com/deepmind/acme/tree/master/examples/gym/lp_d4pg.py
# NOTE: Using this runtime involves prior GCP project configuration.
# Please follow the steps described at
# https://github.com/deepmind/xmanager#create-a-gcp-project.
VERTEX_AI = 'vertex_ai'
class LaunchContext(object):
"""Stores platform-specific launch config of a node.
This is created and set on the node only at launch time.
"""
def __init__(self):
self._launch_type = None
self._launch_config = None
self._program_stopper = None
self._is_initialized = False
@property
def launch_type(self) -> LaunchType:
self._check_inititialized()
return self._launch_type
@property
def launch_config(self) -> Any:
self._check_inititialized()
return self._launch_config
@property
def program_stopper(self) -> Callable[[], None]:
self._check_inititialized()
return self._program_stopper
def _check_inititialized(self):
if not self._is_initialized:
raise RuntimeError(
'Launch context is not yet initialized. It should be initialized by '
'calling initialize() at launch time.')
def initialize(self, launch_type: LaunchType, launch_config: Any,
program_stopper: Optional[Callable[[], None]] = None):
self._launch_config = launch_config
self._launch_type = launch_type
self._program_stopper = program_stopper
self._is_initialized = True
_LAUNCH_CONTEXT = threading.local()
def get_context():
context = getattr(_LAUNCH_CONTEXT, 'lp_context', None)
assert context, ("Launchpad context was not instantiated. Are you trying to "
"access it outside of the node's main thread?")
return context
def set_context(context: LaunchContext):
_LAUNCH_CONTEXT.lp_context = context
def is_local_launch(launch_type: Union[LaunchType, str]) -> bool:
"""Returns true if launch type is local multithreading/multiprocessing.
If you use `--lp_launch_type=...`, please call it with
`is_local_launch(lp.LAUNCH_TYPE.value)`, where lp.LAUNCH_TYPE.value gives the
value of `--lp_launch_type`.
Args:
launch_type: A string (e.g., 'local_mp') or a LaunchType object.
Returns:
True if launch_type is a local one, otherwise False.
"""
if isinstance(launch_type, str):
launch_type = LaunchType(launch_type)
return launch_type in [
LaunchType.LOCAL_MULTI_THREADING, LaunchType.LOCAL_MULTI_PROCESSING
]
def is_local_launch_or_test(launch_type: Union[LaunchType, str]) -> bool:
"""Returns true if launch type is local/test multithreading/multiprocessing.
If you use `--lp_launch_type=...`, please call it with
`is_local_launch_or_test(lp.LAUNCH_TYPE.value)`, where lp.LAUNCH_TYPE.value
gives the value of `--lp_launch_type`.
Args:
launch_type: A string (e.g., 'local_mp') or a LaunchType object.
Returns:
True if launch_type is local or unit test, otherwise False.
"""
if isinstance(launch_type, str):
launch_type = LaunchType(launch_type)
return launch_type in [
LaunchType.LOCAL_MULTI_THREADING, LaunchType.LOCAL_MULTI_PROCESSING,
LaunchType.TEST_MULTI_PROCESSING, LaunchType.TEST_MULTI_THREADING
]
| launchpad-master | launchpad/context.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Launchpad program."""
import contextlib
import dataclasses
import itertools
from typing import Any, Dict, List, Optional, Tuple
from launchpad.nodes import base
HandleType = Any
class Program(object):
"""A Launchpad program, representing a distributed program and its topology.
A Launchpad program contains nodes, each node could be just a process, or
provide a service, etc (please refer to Launchpad documentation for types of
nodes). Homogenous nodes are organized as groups. Here's an example of
adding nodes to a group:
with program.group('actor'):
program.add_node(lp.CourierNode(MyActor, ...))
`add_node()` returns a handle, which can be passed to another node for
communication purpose, and is the way to set up distributed communication
topology. For example:
with program.group('learner'):
learner = program.add_node(lp.CourierNode(MyLearner, ...))
with program.group('actor'):
program.add_node(lp.CourierNode(MyActor, learner=leaner))
"""
def __init__(self, name: str):
self._name = name
self._groups = {} # type: Dict[str, List[base.Node]]
# Group to add nodes to. Used by group()
self._current_group = None # type: str
def add_node(self,
node: base.Node,
label: Optional[str] = None) -> HandleType:
"""Adds node to the program and returns the node handle."""
if self._current_group:
if label and label != self._current_group:
raise ValueError('The given label does not match the current group: '
f'{label} vs {self._current_group}.')
label = self._current_group
else:
if not label:
raise ValueError('Label should not be empty.')
if label not in self._groups:
self._groups[label] = [node]
else:
self._groups[label].append(node)
return node.create_handle()
@contextlib.contextmanager
def group(self, label: str):
"""Creates a group for a collection of homogeneous nodes."""
if not label:
raise ValueError('Label should not be empty.')
if self._current_group:
raise ValueError('group() cannot be nested.')
try:
self._current_group = label
yield
finally:
# Try/finally is to make sure that the current_group is correctly
# reset even if an exception occurs.
self._current_group = None
@property
def current_group(self) -> str:
if self._current_group is None:
raise ValueError('Current group is only available in group context.')
return self._current_group
def get_all_nodes(self) -> List[base.Node]:
return list(itertools.chain(*self._groups.values()))
@property
def name(self) -> str:
return self._name
@property
def groups(self) -> Dict[str, List[base.Node]]:
return self._groups
def make_program(*nodes: base.Node, name: str = 'launchpad'):
"""A shortcut to create a program from a list of nodes.
This simplifies the syntax. For example you can do a one-liner launch:
lp.launch(lp.make_program(lp.PyNode(lambda: ...)))
Args:
*nodes: Nodes to run.
name: An optional name of the program.
Returns:
A lp.Program object
"""
program = Program(name)
for node in nodes:
program.add_node(node)
return program
| launchpad-master | launchpad/program.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stop a running Launchpad program."""
from absl import flags
from launchpad import context
FLAGS = flags.FLAGS
def stop():
"""Terminates the entire experiment.
Only a node's main thread can do this.
"""
context.get_context().program_stopper()
| launchpad-master | launchpad/stop_program/stop.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| launchpad-master | launchpad/stop_program/__init__.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| launchpad-master | launchpad/nodes/__init__.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Delaying object constructions."""
import abc
from typing import Callable, Generic, TypeVar, Union
from absl import flags
import tree
T = TypeVar('T')
_LP_CATCH_DEFERRED_EXCEPTION = flags.DEFINE_boolean(
'lp_catch_deferred_exception', True,
'Whether exceptions raised by the constructors of deferred objects should '
'be caught and annotated with the initiation stack of the object. A negative '
'side effect of using this is that pdb-on-error will enter at the re-raise '
'point rather than the source of the original exception.')
class _Uninitialized:
pass
class Dereferenceable(Generic[T], abc.ABC):
@abc.abstractmethod
def dereference(self) -> T:
"""Dereferences itself."""
def maybe_dereference(obj: Union[T, Dereferenceable[T]]) -> T:
if isinstance(obj, Dereferenceable):
return obj.dereference()
return obj
_EXCEPTION_MESSAGE = ('Error ({error_msg}) occurred when evaluating Deferred '
'defined at:\n{init_stack}\nNOTE! If you want pdb to '
'enter at raise point of the original exception, '
'please rerun with flag --nolp_catch_deferred_exception')
class Deferred(Dereferenceable[T], Generic[T]):
"""Delays object construction to achieve serializability.
Assuming we want to pass a non-serializable Python object, say an environment
object, to an Actor, the following will lead to a serialization error:
program.add_node(lp.CourierNode(Actor, envloader.load_from_settings(
platform='Atari',
settings={
'levelName': 'pong',
'env_loader.version': requested_version,
'zero_indexed_actions': True,
'interleaved_pixels': True,
})))
This helper class delays the object construction and fixes this error. The
object is constructed when the Actor is actually instantiated remotely, where
the Actor constructed will receive an actual environment object (just like how
handles are dereferenced automatically):
program.add_node(lp.CourierNode(Actor, lp.Deferred(
envloader.load_from_settings,
platform='Atari',
settings={
'levelName': 'pong',
'env_loader.version': requested_version,
'zero_indexed_actions': True,
'interleaved_pixels': True,
})))
"""
def __init__(self, constructor: Callable[..., T], *args, **kwargs) -> None:
self._constructor = constructor
self._args = args
self._kwargs = kwargs
self._init_stack = 'Stack trace missing'
self._deferred_object = _Uninitialized()
def dereference(self) -> T:
if isinstance(self._deferred_object, _Uninitialized):
args, kwargs = tree.map_structure(maybe_dereference,
(self._args, self._kwargs))
if not _LP_CATCH_DEFERRED_EXCEPTION.value:
# Allows the user to pdb where the exception happens, but won't show
# where the deferred object was originally defined.
return self._constructor(*args, **kwargs)
try:
self._deferred_object = self._constructor(*args, **kwargs)
except Exception as e:
new_message = _EXCEPTION_MESSAGE.format(
init_stack=''.join(self._init_stack),
# For clarity during pdb, we also inline the internal error message.
error_msg=str(e))
raise RuntimeError(new_message) from e
return self._deferred_object
def _apply_to_args(self, fn):
return tree.map_structure(fn, (self._args, self._kwargs))
| launchpad-master | launchpad/nodes/dereference.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Launchpad Node.
Node represents a service. It may return a handle for others to interact with
it.
"""
import abc
import functools
from typing import Any, Generic, List, Optional, Sequence, Set, TypeVar
from launchpad import address as lp_address
from launchpad import context as lp_context
from launchpad.nodes import dereference
from launchpad.program_stopper import program_stopper
ClientType = TypeVar('ClientType')
HandleType = TypeVar('HandleType', bound='Handle')
class Handle(dereference.Dereferenceable[ClientType], Generic[ClientType]):
"""Represents an interface of the service (the node).
Call `.dereference()` to get the actual client object of this service (to be
implemented in subclasses).
"""
def connect(self, node: 'Node[Handle[ClientType]]', label: str) -> None:
"""Called to let this handle know about it's connecting to a node.
This is supposed to be called:
1. Before creating any executables
2. Before any address binding happens
The motivation is we want to give the handle a chance to configure itself
for the node, before it's turned into executables and addresses are
finalized.
Args:
node: The node that the handle connects to.
label: Label of the node.
"""
pass
def transform(self, executables: Sequence[Any]) -> Sequence[Any]:
"""Transforms the executables that make use of this handle."""
return executables
class Node(Generic[HandleType], metaclass=abc.ABCMeta):
"""Represents a service, and may return a Handle for interaction with it."""
def __init__(self) -> None:
# This is a low-level API to allow Node/Handle to access launch config
# during run time. It's only available after launch, and it's set by the
# launcher.
self._launch_context = lp_context.LaunchContext()
# Handles used by this node (to interact with other nodes)
self._input_handles = [] # type: List[Handle[Any]]
# Handles created by this node
# Note: `type: List[HandleType]` is not supported yet.
self._created_handles = [] # type: List[Handle]
# Addresses known to the node. This exists so that launchpad can, from
# the program (which contains the nodes), list all the addresses that need
# to be bind before launch.
# `addresses` usually contains the address(es) owned by the node. However,
# in case of nodes containing other nodes (e.g. multi-threading nodes), it
# will also contain addresses owned by sub-nodes.
# Thus, use `address.assign` to give ownership of an address to a node,
# and `addresses.append` to only expose the address to launchpad launch
# mechanism.
self.addresses = [] # type: List[lp_address.Address]
@property
def launch_context(self):
return self._launch_context
def _initialize_context(self, launch_type: lp_context.LaunchType,
launch_config: Any):
self._launch_context.initialize(
launch_type, launch_config,
program_stopper.make_program_stopper(launch_type))
def _track_handle(self, handle: HandleType) -> HandleType:
"""Keeps track of created handles.
MUST be called in create_handle().
This is called so that the node knows about the handle it creates. The
reason we don't automate this is because we'll lose return annotation if
we wrap create_handle() using a base class method (i.e., the base class
wrapper method doesn't know about the subclass return type).
Args:
handle: The handle (MUST be created by this node) to track.
Returns:
The same handle that was passed in, for the nicer syntax on call site.
"""
self._created_handles.append(handle)
return handle
@abc.abstractmethod
def create_handle(self) -> HandleType:
"""Creates a handle to interact with this node.
MUST call _track_handle() after creating a handle.
"""
raise NotImplementedError()
@abc.abstractstaticmethod
def to_executables(nodes, label, context):
"""Creates executables for a specific launch type."""
raise NotImplementedError()
def bind_addresses(self, **kwargs) -> None:
"""Binds addresses of the node."""
del kwargs # Unused.
@property
def input_handles(self) -> List[Handle[Any]]:
return list(self._input_handles)
def allocate_address(self, address: lp_address.Address) -> None:
"""Low-level API to add an address to listen to.
Prefer `address.assign(node)`.
This is a low level API and users shouldn't need to use it most of the time.
Args:
address: Address to listen to (i.e., to create a server).
"""
address.assign(self)
@classmethod
def default_launch_config(cls, launch_type: lp_context.LaunchType):
"""Defines the default launch config of this node type.
This is optional. The returned config is conditional on the launch type.
Args:
launch_type: Return the launch_config for this launch_type.
"""
raise NotImplementedError(
f'Launch config has to be explicitly specified for {cls.__name__}')
def extract_handles(
obj: Any,
handles: List[Handle],
visited: Optional[Set[int]] = None,
) -> None:
"""Extract the handles of `obj` to and add them into `handles`."""
visited = visited or set()
# Transitive input_handles from Deferred objects are included.
if isinstance(obj, dereference.Deferred):
if id(obj) not in visited:
visited.add(id(obj))
obj._apply_to_args(
functools.partial(extract_handles, handles=handles, visited=visited))
elif isinstance(obj, Handle):
handles.append(obj)
| launchpad-master | launchpad/nodes/base.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| launchpad-master | launchpad/nodes/courier/__init__.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A PyClassNode subclass that also exposes the instance as a Courier server."""
import datetime
from typing import Any, Callable, Generic, Optional, Mapping, TypeVar
from absl import logging
import courier
from launchpad import address as lp_address
from launchpad import flags as lp_flags
from launchpad.launch import worker_manager
from launchpad.launch import worker_manager_v2
from launchpad.nodes import base
from launchpad.nodes.courier import courier_utils
from launchpad.nodes.python import node as python
WorkerType = TypeVar('WorkerType')
CourierClient = courier.Client
COURIER_PORT_NAME = 'courier'
class CourierHandle(base.Handle[CourierClient]):
"""Handle of a CourierNode."""
def __init__(self, address: lp_address.Address, **kwargs):
self._address = address
self._kwargs = kwargs
def __getattr__(self, method):
raise AttributeError(
f'\'CourierHandle\' object has no attribute \'{method}\'. '
'Most likely you need to dereference handle before use '
'(see launchpad.maybe_dereference).')
def set_client_kwargs(self, **kwargs):
self._kwargs = kwargs
def dereference(self) -> CourierClient:
return CourierClient(self._address.resolve(), **self._kwargs)
class CourierNode(Generic[WorkerType], python.PyClassNode[CourierHandle,
WorkerType]):
"""Exposes a Python instance as a Courier server.
This will initialize the object and expose all its public methods as Courier
RPC methods. Attributes and method names starting with underscore will not be
exposed. After that, run() will be called if it's provided.
When run() is provided, the server will terminate at the end of run().
Otherwise, it will serve indefinitely (until the job/experiment terminates).
Advanced usage: if the object has a set_courier_server() method, it will be
called with the courier server object passed in as the only argument. The
courier server will then be managed by the user (e.g., need to manually call
Start() of the courier server).
"""
def __init__(self,
constructor: Callable[..., WorkerType],
*args,
courier_kwargs: Optional[Mapping[str, Any]] = None,
courier_client_kwargs: Optional[Mapping[str, Any]] = None,
**kwargs):
"""Initializes a new instance of the `CourierNode` class.
Args:
constructor: Function that creates a new instance of the actual worker.
*args: Positional arguments passed to `constructor`.
courier_kwargs: Keyword arguments passed to the courier server.
courier_client_kwargs: Keyword arguments passed to the courier clients.
**kwargs: Keyword arguments passed to `constructor`.
"""
super().__init__(constructor, *args, **kwargs)
self._address = lp_address.Address(COURIER_PORT_NAME)
self.allocate_address(self._address)
self._courier_kwargs = courier_kwargs or {}
self._courier_client_kwargs = courier_client_kwargs or {}
# Set in `run()` method.
self._server: Optional[courier.Server] = None
def configure(self, *args, **kwargs):
"""Sets the args and kwargs being passed to the constructor.
This is useful for achieving cyclic referencing. E.g.:
foo_node = CourierNode(_foo)
foo_handle = foo_node.create_handle()
bar_node = CourierNode(_bar)
bar_handle = bar_node.create_handle()
foo_node.configure(bar=bar_handle)
bar_node.configure(foo=foo_handle)
p.add_node(foo_node)
p.add_node(bar_node)
Args:
*args: non-keyword arguments to pass to the constructor.
**kwargs: keyword arguments to pass to the constructor.
"""
self._args = args
self._kwargs = kwargs
self._collect_input_handles()
def create_handle(self) -> CourierHandle:
"""See `Node.create_handle`."""
return self._track_handle(
CourierHandle(self._address, **self._courier_client_kwargs))
def run(self) -> None:
"""Creates the worker instance and executes the user-provided function."""
instance = self._construct_instance()
courier_kwargs = dict(**self._courier_kwargs)
if 'port' not in courier_kwargs:
courier_kwargs['port'] = lp_address.get_port_from_address(
self._address.resolve()
)
self._server = courier_utils.make_courier_server(instance, **courier_kwargs)
if hasattr(instance, 'set_courier_server'):
# Transfer the ownership of the server to the instance, so that the user
# can decide when to start and stop the courier server.
instance.set_courier_server(self._server)
if hasattr(instance, 'run') and self._should_run:
instance.run()
else:
# Start the server after instantiation and serve forever
self._server.Start()
try:
if hasattr(instance, 'run') and self._should_run:
# If a run() method is provided, stop the server at the end of run().
instance.run()
else:
if lp_flags.LP_WORKER_MANAGER_V2.value:
worker_manager_v2.wait_for_stop()
else:
worker_manager.wait_for_stop()
finally:
self._server.Stop()
self._server.Join()
@property
def courier_address(self) -> lp_address.Address:
"""Returns the physical address of the courier server."""
return self._address
| launchpad-master | launchpad/nodes/courier/node.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for launchpad.nodes.courier.node."""
import datetime
import sys
import threading
import time
from unittest import mock
from absl.testing import absltest
import courier
from launchpad.launch.test_multi_threading import address_builder
from launchpad.nodes.courier import node as lp_courier
class Server(object):
"""Terminates once it receives the first ping() call."""
def __init__(self):
self._server = None # type: courier.Server
self._has_ping = threading.Event()
self._has_call = threading.Event()
def ping(self):
self._has_ping.set()
return 'pong'
def ping_slow(self):
time.sleep(1)
return self.ping()
def __call__(self):
self._has_call.set()
return 'called'
def set_courier_server(self, server: courier.Server):
self._server = server
def run(self):
self._server.Start()
self._has_ping.wait()
self._has_call.wait()
self._server.Stop()
class CourierNodeTest(absltest.TestCase):
def test_ping_pong(self):
node = lp_courier.CourierNode(Server)
handle = node.create_handle()
# Bind all addresses
address_builder.bind_addresses([node])
threading.Thread(target=node.run).start()
client = handle.dereference()
self.assertEqual(client.ping(), 'pong')
self.assertEqual(client(), 'called')
# Make sure Tensorflow is not imported.
self.assertNotIn('tensorflow', sys.modules)
def test_future_ping_pong(self):
node = lp_courier.CourierNode(Server)
handle = node.create_handle()
# Bind all addresses
address_builder.bind_addresses([node])
threading.Thread(target=node.run).start()
client = handle.dereference()
self.assertEqual(client.futures.ping().result(), 'pong')
self.assertEqual(client.futures().result(), 'called')
# Make sure Tensorflow is not imported.
self.assertNotIn('tensorflow', sys.modules)
def test_cyclic_reference(self):
def _foo(bar):
del bar # unused
def _bar(foo):
del foo # unused
foo_node = lp_courier.CourierNode(_foo)
foo_handle = foo_node.create_handle()
bar_node = lp_courier.CourierNode(_bar)
bar_handle = bar_node.create_handle()
self.assertNotIn(foo_handle, bar_node._input_handles)
self.assertNotIn(bar_handle, foo_node._input_handles)
foo_node.configure(bar=bar_handle)
bar_node.configure(foo=foo_handle)
self.assertIn(foo_handle, bar_node._input_handles)
self.assertIn(bar_handle, foo_node._input_handles)
def test_courier_client_kwargs(self):
node = lp_courier.CourierNode(
Server,
courier_client_kwargs={
'call_timeout': datetime.timedelta(milliseconds=50)
})
handle = node.create_handle()
address_builder.bind_addresses([node])
client = handle.dereference()
with self.assertRaisesRegex(Exception, 'Deadline Exceeded'):
client.ping_slow()
if __name__ == '__main__':
absltest.main()
| launchpad-master | launchpad/nodes/courier/node_test.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Courier utilities."""
import datetime
import functools
import inspect
from typing import Any, Callable, Text
from absl import logging
import courier
from courier.handlers.python import pybind
def batched_handler(batch_size, max_parallelism=1,
timeout=datetime.timedelta(milliseconds=200),
pad_batch=False):
"""A decorator to enable batching of the CourierNode method.
IMPORTANT: A courier method wrapped with batched_handler(batch_sie) can
use and block up to `batch_size` Courier serving threads from its pool-size.
When only processing full batches (e.g. with timeout = 30 min), you can then
dead-lock the Courier server. Configure Courier `thread_pool_size` to use more
than batch_size threads for each function wrapped under `batched_handler`.
`batch_size` calls to the method will be batched and executed in one go.
Tensors and Numpy arrays are batched by adding an additional batching
dimension. Each value inside of the dictionary as well as elements of the
tupples are batched independently. Primitive types are batched by being
wrapped in a list. Batched method is supposed to return a batched result, so
that it can be unbatch and corresponding results send back to each caller.
Class member functions should be wrapped in the __init__ function instead
of decorated (as self parameter needs to be bound at the time of wrapping),
for example:
class Server:
def __init__(self, batch_size):
self.compute = lp.batched_handler(batch_size=batch_size)(self.compute)
def compute(self, values):
...
Args:
batch_size: How many calls to batch together (at most).
max_parallelism: How many parallel calls of the function being batched to
allow.
timeout: Timeout after which batched handler will be called even if
`batch_size` calls has not been collected.
pad_batch: Should the timed-out batch be padded to the batch_size.
It guarantees that all executed batches are of the same size.
Returns:
The decorated function.
"""
def decorator_batch(func):
args = inspect.getfullargspec(func).args
# Make sure function being wrapped doesn't expect `self` as the first
# argument. Otherwise it means it is a non-bound class member.
assert not args or args[0] != 'self' or hasattr(func, '__self__'), (
'Do not decorate class methods with @lp.batched_handler. '
'Wrap them in the object constructor instead.')
handler = pybind.BuildPyCallHandler(func)
batcher = pybind.BuildBatchedHandlerWrapper(func.__name__, handler,
batch_size, max_parallelism,
timeout, pad_batch)
@functools.wraps(func)
def wrapper_batch(*args, **kwargs):
return pybind.CallHandler(batcher, func.__name__, list(args), kwargs)
wrapper_batch._batched_handler = True
wrapper_batch._batch_size = batch_size
wrapper_batch._max_parallelism = max_parallelism
wrapper_batch._timeout = timeout
wrapper_batch._pad_batch = pad_batch
wrapper_batch._func = func
return wrapper_batch
return decorator_batch
def _should_expose_method(func: Callable[..., Any], method_name: Text) -> bool:
return (callable(func) and method_name != 'set_courier_server' and
(not method_name.startswith('_') or method_name == '__call__'))
def make_courier_server(instance: Any, *courier_args,
**courier_kwargs) -> courier.Server:
"""Builds a courier.Server for an instance.
Args:
instance: The instance that the courier server associates with.
*courier_args: positional arguments to pass to courier.Server().
**courier_kwargs: keyword arguments to pass to courier.Server().
Returns:
A courier.Server object.
"""
server = courier.Server(*courier_args, **courier_kwargs)
# Bind all non-private user-defined local methods.
for method_name in dir(instance):
func = getattr(instance, method_name)
if _should_expose_method(func, method_name):
logging.info('Binding: %s', method_name)
handler = None
if func.__dict__.get('_batched_handler', False):
if handler is None:
handler = pybind.BuildPyCallHandler(func.__dict__['_func'])
batch_size = func.__dict__['_batch_size']
max_parallelism = func.__dict__['_max_parallelism']
timeout = func.__dict__['_timeout']
pad_batch = func.__dict__['_pad_batch']
handler = pybind.BuildBatchedHandlerWrapper(method_name, handler,
batch_size, max_parallelism,
timeout, pad_batch)
if handler is None:
handler = pybind.BuildPyCallHandler(func)
server.BindHandler(method_name, handler)
return server
| launchpad-master | launchpad/nodes/courier/courier_utils.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to run PyNodes in Docker containers using XManager."""
import atexit
import copy
import dataclasses
from distutils import dir_util
import functools
import os
import pathlib
import shutil
import sys
import tempfile
from typing import Any, List, Optional, Sequence, Tuple
import cloudpickle
from launchpad.launch import serialization
try:
from xmanager import xm
except ModuleNotFoundError:
raise Exception('Launchpad requires `xmanager` for XM-based runtimes.'
'Please run `pip install xmanager`.')
_DATA_FILE_NAME = 'job.pkl'
_INIT_FILE_NAME = 'init.pkl'
@dataclasses.dataclass
class DockerConfig:
"""Local docker launch configuration.
Attributes:
code_directory: Path to directory containing any user code that may be
required inside the Docker image. The user code from this directory is
copied over into the Docker containers, as the user code may be needed
during program execution. If needed, modify docker_instructions in
xm.PythonContainer construction below if user code needs installation.
docker_requirements: Path to requirements.txt specifying Python packages to
install inside the Docker image.
hw_requirements: Hardware requirements.
python_path: Additional paths to be added to PYTHONPATH prior to executing
an entry point.
"""
code_directory: Optional[str] = None
docker_requirements: Optional[str] = None
hw_requirements: Optional[xm.JobRequirements] = None
python_path: Optional[List[str]] = None
def initializer(python_path):
sys.path = python_path + sys.path
def to_docker_executables(
nodes: Sequence[Any],
label: str,
docker_config: DockerConfig,
) -> List[Tuple[xm.PythonContainer, xm.JobRequirements]]:
"""Returns a list of `PythonContainer`s objects for the given `PyNode`s."""
if docker_config.code_directory is None or docker_config.docker_requirements is None:
raise ValueError(
'code_directory and docker_requirements must be specified through'
'DockerConfig via local_resources when using "xm_docker" launch type.')
# Generate tmp dir without '_' in the name, Vertex AI fails otherwise.
tmp_dir = '_'
while '_' in tmp_dir:
tmp_dir = tempfile.mkdtemp()
atexit.register(shutil.rmtree, tmp_dir, ignore_errors=True)
command_line = f'python -m process_entry --data_file={_DATA_FILE_NAME}'
# Add common initialization function for all nodes which sets up PYTHONPATH.
if docker_config.python_path:
command_line += f' --init_file={_INIT_FILE_NAME}'
# Local 'path' is copied under 'tmp_dir' (no /tmp prefix) inside Docker.
python_path = [
'/' + os.path.basename(tmp_dir) + os.path.abspath(path)
for path in docker_config.python_path
]
initializer_file_path = pathlib.Path(tmp_dir, _INIT_FILE_NAME)
with open(initializer_file_path, 'wb') as f:
cloudpickle.dump(functools.partial(initializer, python_path), f)
data_file_path = str(pathlib.Path(tmp_dir, _DATA_FILE_NAME))
serialization.serialize_functions(data_file_path, label,
[n.function for n in nodes])
file_path = pathlib.Path(__file__).absolute()
shutil.copy(pathlib.Path(file_path.parent, 'process_entry.py'), tmp_dir)
dir_util.copy_tree(docker_config.code_directory, tmp_dir)
shutil.copy(docker_config.docker_requirements,
pathlib.Path(tmp_dir, 'requirements.txt'))
workdir_path = pathlib.Path(tmp_dir).name
if not os.path.exists(docker_config.docker_requirements):
raise FileNotFoundError('Please specify a path to a file with Python'
'package requirements through'
'docker_config.docker_requirements.')
job_requirements = docker_config.hw_requirements
if not job_requirements:
job_requirements = xm.JobRequirements()
# Make a copy of requirements since they are being mutated below.
job_requirements = copy.deepcopy(job_requirements)
if job_requirements.replicas != 1:
raise ValueError(
'Number of replicas is computed by the runtime. '
'Please do not set it explicitly in the requirements.'
)
job_requirements.replicas = len(nodes)
python_version = f'{sys.version_info.major}.{sys.version_info.minor}'
base_image = f'python:{python_version}'
return [(xm.PythonContainer(
path=tmp_dir,
base_image=base_image,
entrypoint=xm.CommandList([command_line]),
docker_instructions=[
'RUN apt-get update && apt-get install -y git',
'RUN python -m pip install --upgrade pip',
f'RUN apt-get -y install libpython{python_version}',
f'COPY {workdir_path}/requirements.txt requirements.txt',
'RUN python -m pip install xmanager',
'RUN python -m pip install -r requirements.txt',
f'COPY {workdir_path}/ {workdir_path}',
f'WORKDIR {workdir_path}',
]), job_requirements)]
| launchpad-master | launchpad/nodes/python/xm_docker.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| launchpad-master | launchpad/nodes/python/__init__.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to run PyNodes as multiple processes."""
import atexit
import dataclasses
import json
import os
import shutil
import sys
import tempfile
from typing import Any, List, Mapping, Optional, Sequence, Union
from absl import flags
from absl import logging
from launchpad import flags as lp_flags
from launchpad.launch import serialization
from launchpad.launch.local_multi_processing import commands as mp_commands
import portpicker
_INTERPRETER = sys.executable
StrOrFloat = Union[str, float]
def _to_cmd_arg(key: str, value: Any) -> str:
"""Converts key value pair to "--key=value"."""
if isinstance(value, bool):
return f'--{key}' if value else f'--no{key}'
return f'--{key}={value}'
@dataclasses.dataclass
class PythonProcess:
"""Local multiprocessing launch configuration for a PyNode.
Attributes:
args: Arguments to pass to the user script.
env: Additional environment variables to set.
"""
args: Mapping[str, StrOrFloat] = dataclasses.field(default_factory=dict)
env: Mapping[str, StrOrFloat] = dataclasses.field(default_factory=dict)
_absolute_interpreter_path: str = ''
def _get_absolute_interpreter_path(self):
"""Resolve self.interpreter to an absolute path."""
return _INTERPRETER
@property
def absolute_interpreter_path(self) -> str:
"""Returns the absolute path to the interpreter binary."""
if not self._absolute_interpreter_path:
self._absolute_interpreter_path = self._get_absolute_interpreter_path()
return self._absolute_interpreter_path
_DATA_FILE_NAME = 'job.pkl'
def to_multiprocessing_executables(
nodes: Sequence[Any],
label: str,
launch_config: PythonProcess,
pdb_post_mortem: bool,
) -> List[mp_commands.Command]:
"""Returns a list of `Command`s objects for the given `PyNode`s."""
launch_config = launch_config or PythonProcess()
if not isinstance(launch_config, PythonProcess):
raise ValueError(
'Launch config for {} must be a PythonProcess.'.format(label))
entry_script_path = os.path.join(os.path.dirname(__file__),
'process_entry.py')
tmp_dir = tempfile.mkdtemp()
atexit.register(shutil.rmtree, tmp_dir, ignore_errors=True)
data_file_path = os.path.join(tmp_dir, _DATA_FILE_NAME)
serialization.serialize_functions(data_file_path, label,
[n.function for n in nodes])
args = dict(launch_config.args)
per_task_args = [{} for _ in nodes]
per_task_interpreter_args = [{} for _ in nodes]
per_task_env = [{} for _ in nodes]
commands = []
for task_id, (_, task_args) in enumerate(zip(nodes, per_task_args)):
command_as_list = [
launch_config.absolute_interpreter_path, entry_script_path
]
all_args = {**args, **task_args}
# Arguments to pass to the script
for key, value in all_args.items():
command_as_list.append(_to_cmd_arg(key, value))
command_as_list.extend([
'--data_file', data_file_path,
'--lp_task_id', str(task_id),
])
if pdb_post_mortem:
command_as_list.append('--pdb_post_mortem')
env = {**launch_config.env}
env.update(per_task_env[task_id])
command = mp_commands.Command(command_as_list, env,
label + '/' + str(task_id))
commands.append(command)
return commands
| launchpad-master | launchpad/nodes/python/local_multi_processing.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Nodes that run user-defined Python code.
PyNode runs a user-defined function. PyClassNode constructs a Python object and
calls its run() method (if provided).
"""
import functools
import itertools
from typing import Any, Callable, Generic, TypeVar
from absl import flags
from absl import logging
from launchpad import context
from launchpad import lazy_loader
from launchpad.nodes import base
from launchpad.nodes import dereference
from launchpad.nodes.python import addressing
from launchpad.nodes.python import local_multi_processing
import tree
FLAGS = flags.FLAGS
T = TypeVar('T')
HandleType = TypeVar('HandleType', bound=base.Handle)
ReturnType = TypeVar('ReturnType')
WorkerType = TypeVar('WorkerType')
class _DummyHandle(base.Handle[Any]):
def dereference(self) -> None:
raise NotImplementedError('_DummyHandle cannot be dereferenced.')
class PyNode(base.Node[HandleType], Generic[HandleType, ReturnType]):
"""Runs a user-defined Python function."""
# Used only for no-serialization launch
NEXT_PY_NODE_ID = itertools.count()
def __init__(self, function: Callable[..., ReturnType], *args, **kwargs):
super().__init__()
self._func_args = args
self._func_kwargs = kwargs
self._function = function
self._partial_function = self._construct_function
self.py_node_id = next(PyNode.NEXT_PY_NODE_ID)
# Find input handles and put them in self._input_handles.
tree.traverse(
functools.partial(base.extract_handles, handles=self._input_handles),
(self._func_args, self._func_kwargs))
def _construct_function(self):
context.set_context(self._launch_context)
args, kwargs = tree.map_structure(dereference.maybe_dereference,
(self._func_args, self._func_kwargs))
return functools.partial(self._function, *args, **kwargs)()
def create_handle(self) -> HandleType:
"""Doesn't expose an interface for others to interact with it."""
return _DummyHandle()
@property
def function(self) -> Callable[..., ReturnType]:
return self._partial_function
@staticmethod
def to_executables(nodes, label, launch_context):
"""Creates Executables."""
if (launch_context.launch_type in [
context.LaunchType.LOCAL_MULTI_THREADING,
context.LaunchType.TEST_MULTI_THREADING
]):
return [node.function for node in nodes]
elif (launch_context.launch_type is
context.LaunchType.LOCAL_MULTI_PROCESSING):
return local_multi_processing.to_multiprocessing_executables(
nodes, label, launch_context.launch_config, pdb_post_mortem=True)
elif (
launch_context.launch_type is context.LaunchType.TEST_MULTI_PROCESSING):
return local_multi_processing.to_multiprocessing_executables(
nodes,
label,
launch_context.launch_config,
pdb_post_mortem=False,
)
elif launch_context.launch_type == context.LaunchType.VERTEX_AI:
from launchpad.nodes.python import xm_docker
return xm_docker.to_docker_executables(nodes,
label,
launch_context.launch_config)
raise NotImplementedError('Unsupported launch type: {}'.format(
launch_context.launch_type))
def bind_addresses(self, **kwargs):
if self._launch_context.launch_type in [
context.LaunchType.LOCAL_MULTI_THREADING,
context.LaunchType.LOCAL_MULTI_PROCESSING,
context.LaunchType.TEST_MULTI_PROCESSING,
context.LaunchType.TEST_MULTI_THREADING,
]:
addressing.bind_addresses_local(self.addresses)
elif self._launch_context.launch_type == context.LaunchType.VERTEX_AI:
addressing.bind_addresses_vertex_ai(self.addresses, **kwargs)
else:
raise NotImplementedError('Unsupported launch type: {}'.format(
self._launch_context.launch_type))
@classmethod
def default_launch_config(cls, launch_type: context.LaunchType):
if launch_type in [
context.LaunchType.LOCAL_MULTI_THREADING,
context.LaunchType.TEST_MULTI_THREADING
]:
return None
return super().default_launch_config(launch_type)
class PyClassNode(PyNode[HandleType, type(None)],
Generic[HandleType, WorkerType]):
"""Instantiates a Python object and runs its run() method (if provided).
If disable_run() is called before launch, instance.run() method won't be
called. This is useful in TAP-based integration tests, where users might need
to step each worker synchronously.
"""
def __init__(self, constructor: Callable[..., WorkerType], *args, **kwargs):
"""Initializes a new instance of the `PyClassNode` class.
Args:
constructor: A function that when called returns a Python object with a
run method.
*args: Arguments passed to the constructor.
**kwargs: Key word arguments passed to the constructor.
"""
super().__init__(self.run)
self._constructor = constructor
self._args = args
self._kwargs = kwargs
self._should_run = True
self._collect_input_handles()
self._instance = None
def _collect_input_handles(self):
self._input_handles.clear()
try:
# Find input handles and put them in self._input_handles.
fn = functools.partial(base.extract_handles, handles=self._input_handles)
_ = [fn(x) for x in tree.flatten((self._args, self._kwargs))]
except TypeError as e:
raise ValueError(
f'Failed to construct the {self.__class__.__name__} with\n'
f'- constructor: {self._constructor}\n'
f'- args: {self._args}\n- kwargs: {self._kwargs}') from e
def _construct_instance(self) -> WorkerType:
if self._instance is None:
args, kwargs = tree.map_structure(dereference.maybe_dereference,
(self._args, self._kwargs))
self._instance = self._constructor(*args, **kwargs)
return self._instance
def disable_run(self) -> None:
"""Prevents the node from calling `run` on the Python object.
Note that the Python object is still constructed even if `disable_run` has
been called.
"""
self._should_run = False
def enable_run(self) -> None:
"""Ensures `run` is called on the Python object.
This is the default state and callers don't need to call `enable_run` unless
`disable_run` has been called.
"""
self._should_run = True
def run(self) -> None:
"""Constructs Python object and (maybe) calls its `run` method.
The `run` method is not called if `disable_run` has ben called previously or
if the constructed Python object does not have a `run` method.
"""
instance = self._construct_instance()
if hasattr(instance, 'run') and self._should_run:
instance.run()
else:
logging.warning(
'run() not defined on the instance (or disable_run() was called.).'
'Exiting...')
| launchpad-master | launchpad/nodes/python/node.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Addressing for PyNodes."""
import collections
import itertools
import json
import os
import typing
from typing import Any, List, Optional
from absl import flags
from absl import logging
from launchpad import address as lp_address
from launchpad import flags as lp_flags
FLAGS = flags.FLAGS
def bind_addresses_local(addresses: List[lp_address.Address]):
"""Binds addresses for the local launch."""
for address in addresses:
address.bind(lp_address.SimpleLocalAddressBuilder())
class VertextAiAddressBuilder(lp_address.AbstractAddressBuilder):
"""Builds an address for Vertex AI."""
def __init__(self, cluster: str, instance: int):
self._cluster = cluster
self._instance = instance
def build(self) -> str:
cluster_spec = os.environ.get('CLUSTER_SPEC', None)
return json.loads(cluster_spec).get('cluster').get(
self._cluster)[self._instance]
def bind_addresses_vertex_ai(addresses: List[lp_address.Address], cluster: str,
instance: int):
"""Binds addresses for the execution using Vertex AI."""
if len(addresses) > 1:
raise RuntimeError(
f'Vertex AI supports only one port per node. {len(addresses)} requested.'
)
if len(addresses) == 1:
addresses[0].bind(VertextAiAddressBuilder(cluster, instance))
| launchpad-master | launchpad/nodes/python/addressing.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entry of a PythonNode worker."""
import contextlib
import functools
import json
import os
import sys
from absl import app
from absl import flags
from absl import logging
import cloudpickle
from launchpad import flags as lp_flags
from launchpad.launch import worker_manager
from launchpad.launch import worker_manager_v2
import six
FLAGS = flags.FLAGS
flags.DEFINE_integer(
'lp_task_id', None, 'a list index deciding which '
'worker to run. given a list of workers (obtained from the'
' data_file)')
flags.DEFINE_string('data_file', '',
'Pickle file location with entry points for all nodes')
# An index of an entry point in a pickle file. Will not match work unit id if
# entry points are saved to separate pickle files.
flags.DEFINE_integer('lp_unit_id', None,
'Which work unit within pickle file to run.')
flags.DEFINE_string(
'lp_job_name', '',
'The name of the job, used to access the correct pickle file resource when '
'using the new launch API')
flags.DEFINE_string(
'init_file', '', 'Pickle file location containing initialization module '
'executed for each node prior to an entry point')
flags.DEFINE_string('flags_to_populate', '{}', 'obsolete')
_FLAG_TYPE_MAPPING = {
str: flags.DEFINE_string,
six.text_type: flags.DEFINE_string,
float: flags.DEFINE_float,
int: flags.DEFINE_integer,
bool: flags.DEFINE_boolean,
list: flags.DEFINE_list,
}
def _parse_process_entry_flags(all_argv: list[str]) -> list[str]:
"""Parse and consume all flags for the entry script; return the rest."""
# unconsumed_argv will still include all_argv[0], which is expected to be
# the program name and is ignored by flag parsing.
unconsumed_argv = FLAGS(all_argv, known_only=True)
# JAX doesn't use absl flags and so we need to forward absl flags to JAX
# explicitly. Here's a heuristic to detect JAX flags and forward them.
if any(arg.startswith('--jax_') for arg in sys.argv):
try:
# pytype:disable=import-error
import jax
# pytype:enable=import-error
jax.config.parse_flags_with_absl()
except ImportError:
pass
return unconsumed_argv
def _get_task_id():
"""Returns current task's id."""
if FLAGS.lp_task_id is None:
# Running under Vertex AI...
cluster_spec = os.environ.get('CLUSTER_SPEC', None)
return json.loads(cluster_spec).get('task').get('index')
return FLAGS.lp_task_id
def main(argv: list[str], process_argv: list[str]):
# See `parse_flags_and_run()` for why arguments are passed in `process_argv`
# instead.
assert len(argv) == 1
del argv
# Allow for importing modules from the current directory.
sys.path.append(os.getcwd())
data_file = FLAGS.data_file
init_file = FLAGS.init_file
if os.environ.get('TF_CONFIG', None):
# For GCP runtime log to STDOUT so that logs are not reported as errors.
logging.get_absl_handler().python_handler.stream = sys.stdout
if init_file:
init_function = cloudpickle.load(open(init_file, 'rb'))
init_function()
functions = cloudpickle.load(open(data_file, 'rb'))
# Now that the code that we intend to run has been unpickled, that should
# have caused the registration of any remaining flags that the program needs.
[unused_program_name, *unconsumed_argv] = FLAGS(process_argv, known_only=True)
if unconsumed_argv:
logging.warning('The following command-line arguments were passed to the '
'program but are not used by anything that it imports: %s',
unconsumed_argv)
task_id = _get_task_id()
if lp_flags.LP_WORKER_MANAGER_V2.value:
worker_manager_v2.WorkerManager(
handle_sigterm=True,
register_in_thread=True)
else:
# Worker manager is used here to handle termination signals and provide
# preemption support.
worker_manager.WorkerManager(
register_in_thread=True)
with contextlib.suppress(): # no-op context manager
functions[task_id]()
def parse_flags_and_run():
# Parse flags for this module and the things it has already imported.
# Pass whatever flags are left over to main() through a side channel, so that
# app.run() doesn't try to parse them before we have set the scene.
[program_name, *process_argv] = _parse_process_entry_flags(sys.argv)
app.run(functools.partial(main, process_argv=[program_name, *process_argv]),
argv=[program_name])
if __name__ == '__main__':
parse_flags_and_run()
| launchpad-master | launchpad/nodes/python/process_entry.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| launchpad-master | launchpad/nodes/multi_processing_colocation/__init__.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A node wrapper to colocate multiple PyNodes.
See docstring of MultiProcessingColocation for usage.
"""
import atexit
import collections
from concurrent import futures
import itertools
import os
import shutil
import subprocess
import tempfile
from typing import Any
from absl import flags
from absl import logging
import cloudpickle
from launchpad import address as lp_address
from launchpad.launch import serialization
from launchpad.nodes.python import node as python
HandleType = Any
class _ResolvedAddressBuilder(lp_address.AbstractAddressBuilder):
def __init__(self, resolved_address):
self._resolved_address = resolved_address
def build(self):
return self._resolved_address
class MultiProcessingColocation(python.PyNode):
"""A special node type for colocating multiple PyNodes as subprocesses.
Please don't add inner nodes to the program, as they will become part of this
colocation node.
Example:
actor_nodes = []
for _ in range(10):
actor_nodes.append(lp.CourierNode(Actor, ...))
program.add_node(lp.MultiProcessingColocation(actor_nodes))
"""
def __init__(self, nodes, num_retries_on_failure: int = 0):
super().__init__(self.run)
self._nodes = []
self._name_uniquifier = collections.defaultdict(itertools.count)
self._num_retries_on_failure = num_retries_on_failure
self._num_restarts = 0
for node in nodes:
self.add_node(node)
def add_node(self, node: python.PyNode) -> HandleType:
if not isinstance(node, python.PyNode):
raise ValueError('MultiProcessingColocation only works with PyNodes.')
self._nodes.append(node)
# Reference all the children addresses (not owned by this node, but only
# referenced).
for address in node.addresses:
self.addresses.append(address)
# Ensure unique address names (avoid name clash when creating named ports)
address.name = address.name or 'lp' # Name might not be specified
unique_id = str(next(self._name_uniquifier[address.name]))
address.name = address.name + unique_id
return node.create_handle()
@property
def nodes(self):
return self._nodes
def run(self):
if not self._nodes:
raise ValueError('MultiProcessingColocation requires at least one node.')
for address in self.addresses:
address._address_builder = _ResolvedAddressBuilder(address.resolve())
running_processes = []
with futures.ThreadPoolExecutor(max_workers=len(self._nodes)) as e:
for node in self._nodes:
running_processes.append(e.submit(self._run_subprocess, node.function))
done, _ = futures.wait(
running_processes, return_when=futures.FIRST_EXCEPTION)
for f in done:
f.result()
def _run_subprocess(self, function):
_, data_file_path = tempfile.mkstemp()
with open(data_file_path, 'wb') as f:
serialization.enable_lru_cache_pickling_once()
cloudpickle.dump([function], f)
atexit.register(os.remove, data_file_path)
subprocess_env = {}
subprocess_env.update(os.environ)
while True:
temp_dir = tempfile.mkdtemp()
subprocess_env['TMPDIR'] = temp_dir
entry_script_path = os.path.join(
os.path.dirname(__file__), 'process_entry.py')
process = subprocess.Popen([
os.environ['_'],
entry_script_path,
'--',
'--lp_task_id',
'0',
'--data_file',
data_file_path,
],
env=subprocess_env)
exit_code = process.wait()
shutil.rmtree(temp_dir)
if exit_code == 0:
return
if self._num_restarts == self._num_retries_on_failure:
raise RuntimeError('num_retries_on_failure (=%d) is reached.' %
self._num_retries_on_failure)
logging.info('Subprocess %d exited abnormally! Restarting.', process.pid)
self._num_restarts += 1
| launchpad-master | launchpad/nodes/multi_processing_colocation/node.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for launchpad.nodes.multi_processing_colocation.node."""
import shutil
import subprocess
import tempfile
from unittest import mock
from absl.testing import absltest
from launchpad.nodes.multi_processing_colocation import node as multi_processing_colocation
from launchpad.nodes.python import node as python
def _run_no_op():
pass
class NodeTest(absltest.TestCase):
@mock.patch.object(tempfile, 'mkdtemp')
@mock.patch.object(subprocess, 'Popen')
@mock.patch.object(shutil, 'rmtree')
def test_one_subprocess(self, mock_rmtree, mock_popen, mock_mkdtemp):
# Verify cleanups are done correctly
mock_mkdtemp.return_value = 'temp_dir'
mock_process = mock.Mock()
mock_popen.return_value = mock_process
mock_process.wait.return_value = 0 # return normally
colocation = multi_processing_colocation.MultiProcessingColocation(
[python.PyNode(_run_no_op)])
colocation.run()
mock_mkdtemp.called_once_with()
mock_popen.called_once()
mock_rmtree.called_once_with('temp_dir')
@mock.patch.object(tempfile, 'mkdtemp')
@mock.patch.object(subprocess, 'Popen')
@mock.patch.object(shutil, 'rmtree')
def test_two_subprocesses(self, mock_rmtree, mock_popen, mock_mkdtemp):
# Verify it works for more than one PyNode
mock_mkdtemp.return_value = 'temp_dir'
mock_process = mock.Mock()
mock_popen.return_value = mock_process
mock_process.wait.return_value = 0 # return normally
colocation = multi_processing_colocation.MultiProcessingColocation(
[python.PyNode(_run_no_op),
python.PyNode(_run_no_op)])
colocation.run()
self.assertEqual(mock_mkdtemp.call_count, 2)
self.assertEqual(mock_popen.call_count, 2)
self.assertEqual(mock_rmtree.call_count, 2)
@mock.patch.object(tempfile, 'mkdtemp')
@mock.patch.object(subprocess, 'Popen')
@mock.patch.object(shutil, 'rmtree')
def test_retry_on_failure(self, mock_rmtree, mock_popen, mock_mkdtemp):
# Verify it retries after subprocess failure
mock_mkdtemp.return_value = 'temp_dir'
mock_process = mock.Mock()
mock_popen.return_value = mock_process
mock_process.pid = 42
def mock_wait():
# First time fail, then succeed
if mock_popen.call_count == 1:
return 1
return 0
mock_process.wait = mock_wait
colocation = multi_processing_colocation.MultiProcessingColocation(
[python.PyNode(_run_no_op)], num_retries_on_failure=1)
colocation.run()
self.assertEqual(mock_mkdtemp.call_count, 2)
self.assertEqual(mock_popen.call_count, 2)
self.assertEqual(mock_rmtree.call_count, 2)
@mock.patch.object(tempfile, 'mkdtemp')
@mock.patch.object(subprocess, 'Popen')
@mock.patch.object(shutil, 'rmtree')
def test_fail_without_retry(self, mock_rmtree, mock_popen, mock_mkdtemp):
mock_mkdtemp.return_value = 'temp_dir'
mock_process = mock.Mock()
mock_popen.return_value = mock_process
mock_process.wait.return_value = 1 # return abnormally
colocation = multi_processing_colocation.MultiProcessingColocation(
[python.PyNode(_run_no_op)], num_retries_on_failure=0)
with self.assertRaisesRegexp(
RuntimeError, r'num_retries_on_failure \(=0\) is reached.'):
colocation.run()
mock_mkdtemp.called_once_with()
mock_popen.called_once()
mock_rmtree.called_once_with('temp_dir')
if __name__ == '__main__':
absltest.main()
| launchpad-master | launchpad/nodes/multi_processing_colocation/node_test.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| launchpad-master | launchpad/nodes/reverb/__init__.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reverb replay buffers."""
from typing import Any, Callable, Optional, Sequence
from absl import logging
from launchpad import address as lp_address
from launchpad import context
from launchpad.launch import worker_manager_migration
from launchpad.nodes import base
from launchpad.nodes.python import node as python
import reverb
PriorityTablesFactory = Callable[[], Sequence[reverb.Table]]
CheckpointerFactory = Callable[[], reverb.checkpointers.CheckpointerBase]
REVERB_PORT_NAME = 'reverb'
class ReverbHandle(base.Handle):
"""Handle of the ReverbNode.
When dereferenced a reverb-Client (see https://github.com/deepmind/reverb/client.py) is
returned. This client should primarily be used for insert operations on the
actors. For sampling and updates TFClient (see
https://github.com/deepmind/reverb/tf_client.py) should be used.
To construct a TFClient:
```python
from reverb import tf_client as reverb_tf_client
client = ... # reverb.Client
tf_client = reverb_tf_client.TFClient(client.server_address)
```
The TF-client is not made directly available through LP as it would require
a dependency on TF even when TF is not used (e.g many actors).
"""
def __init__(self, address: lp_address.Address):
self._address = address
def dereference(self):
address = self._address.resolve()
logging.info('Reverb client connecting to: %s', address)
return reverb.Client(address)
class ReverbNode(python.PyNode):
"""Represents a Reverb replay buffer in a Launchpad program."""
def __init__(self,
priority_tables_fn: PriorityTablesFactory,
checkpoint_ctor: Optional[CheckpointerFactory] = None,
checkpoint_time_delta_minutes: Optional[int] = None):
"""Initialize a ReverbNode.
Args:
priority_tables_fn: A function that returns a sequence of tables to host
on the server.
checkpoint_ctor: Constructor for the checkpointer to be used. Passing None
uses Reverb's default checkpointer.
checkpoint_time_delta_minutes: Time between async (non-blocking)
checkpointing calls.
"""
super().__init__(self.run)
self._priority_tables_fn = priority_tables_fn
self._checkpoint_ctor = checkpoint_ctor
self._checkpoint_time_delta_minutes = checkpoint_time_delta_minutes
self._address = lp_address.Address(REVERB_PORT_NAME)
self.allocate_address(self._address)
if (self._checkpoint_time_delta_minutes is not None and
self._checkpoint_time_delta_minutes <= 0):
raise ValueError(
'Replay checkpoint time delta must be positive when specified.')
def create_handle(self):
return self._track_handle(ReverbHandle(self._address))
def run(self):
priority_tables = self._priority_tables_fn()
if self._checkpoint_ctor is None:
checkpointer = None
else:
checkpointer = self._checkpoint_ctor()
self._server = reverb.Server(
tables=priority_tables,
port=lp_address.get_port_from_address(self._address.resolve()),
checkpointer=checkpointer)
if self._checkpoint_time_delta_minutes is not None:
while not worker_manager_migration.wait_for_stop(
self._checkpoint_time_delta_minutes * 60):
self._server.localhost_client().checkpoint()
else:
worker_manager_migration.wait_for_stop()
@staticmethod
def to_executables(nodes: Sequence['ReverbNode'], label: str,
launch_context: context.LaunchContext):
return python.PyNode.to_executables(nodes, label, launch_context)
@property
def reverb_address(self) -> lp_address.Address:
return self._address
| launchpad-master | launchpad/nodes/reverb/node.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for launchpad.nodes.reverb.node."""
import threading
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
from launchpad import context
from launchpad.launch.test_multi_threading import address_builder as test_address_builder
from launchpad.nodes.reverb import node as reverb_node
import numpy as np
import reverb
from reverb import rate_limiters
_TABLE_NAME = 'dist'
def priority_tables_fn():
return [
reverb.Table(
name=_TABLE_NAME,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
max_size=100,
rate_limiter=rate_limiters.MinSize(100))
]
class ReverbNodeTest(absltest.TestCase):
def test_insert(self):
node = reverb_node.ReverbNode(priority_tables_fn=priority_tables_fn)
test_address_builder.bind_addresses([node])
threading.Thread(target=node.run).start()
client = node.create_handle().dereference()
client.insert([np.zeros((81, 81))], {_TABLE_NAME: 1})
node._server.stop()
if __name__ == '__main__':
absltest.main()
| launchpad-master | launchpad/nodes/reverb/node_test.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| launchpad-master | launchpad/nodes/multi_threading_colocation/__init__.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A special node type for colocating multiple PyNodes."""
import collections
import itertools
from typing import Any, Sequence
from launchpad.launch import worker_manager_migration
from launchpad.nodes.python import node as python
HandleType = Any
class MultiThreadingColocation(python.PyNode):
"""A special node type for colocating multiple PyNodes.
Please don't add inner nodes to the program, as they will become part of this
colocation node.
Example:
learner_node = lp.CacherNode(...)
replay_node = lp.CourierNode(...)
program.add_node(lp.MultiThreadingColocation([learner_node, replay_node]))
In `__init__()`, `return_on_first_completed` defaults to False, meaning
it will return from `run()` when 1) any of the colocated PyNodes throws an
exception, or 2) all of them finish. This could be set to True so as to wait
until any of the nodes finishes (or throws an exception).
"""
def __init__(self,
nodes: Sequence[python.PyNode],
return_on_first_completed=False):
super().__init__(self.run)
self._nodes = []
self._name_uniquifier = collections.defaultdict(itertools.count)
self._return_on_first_completed = return_on_first_completed
for node in nodes:
self.add_node(node)
def add_node(self, node: python.PyNode) -> HandleType:
if not isinstance(node, python.PyNode):
raise ValueError('MultiThreadingColocation only works with PyNodes.')
self._nodes.append(node)
# Reference all the children addresses (not owned by this node, but only
# referenced).
for address in node.addresses:
self.addresses.append(address)
# Ensure unique address names (avoid name clash when creating named ports)
address.name = address.name or 'lp' # Name might not be specified
unique_id = str(next(self._name_uniquifier[address.name]))
address.name = address.name + unique_id
return node.create_handle()
@property
def nodes(self):
return self._nodes
def run(self):
if not self._nodes:
raise ValueError('MultiThreadingColocation requires at least one node.')
group_name = f'coloc_{id(self)}'
manager = worker_manager_migration.get_worker_manager()
for n in self._nodes:
n._launch_context = self._launch_context
manager.thread_worker(group_name, n.function)
manager.wait(
[group_name],
return_on_first_completed=self._return_on_first_completed,
raise_error=True, # Any error from the inner threads will surface.
)
return manager
| launchpad-master | launchpad/nodes/multi_threading_colocation/node.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for launchpad.nodes.multi_threading_colocation.node."""
import threading
from absl.testing import absltest
import courier
import launchpad as lp
from launchpad.launch import worker_manager
class NodeTest(absltest.TestCase):
def test_ping(self):
program = lp.Program('test')
server_address = lp.Address()
def run_client():
client = courier.Client(server_address.resolve())
client.ping()
has_ping = threading.Event()
def run_server():
server = courier.Server(
port=lp.get_port_from_address(server_address.resolve()))
server.Bind('ping', has_ping.set)
server.Start()
lp.wait_for_stop()
server.Stop()
client_node = lp.PyNode(run_client)
server_node = lp.PyNode(run_server)
server_node.allocate_address(server_address)
program.add_node(
lp.MultiThreadingColocation([client_node, server_node]),
label='client_server')
lp.launch(
program,
launch_type='test_mt',
test_case=self,
serialize_py_nodes=False) # Disable serialization due to `Lock` objects
has_ping.wait()
def test_preemption(self):
program = lp.Program('test')
ready_to_preempt = threading.Event()
preemption_ok = threading.Event()
def node():
ready_to_preempt.set()
lp.wait_for_stop()
preemption_ok.set()
def stopper():
ready_to_preempt.wait()
lp.stop()
program.add_node(
lp.MultiThreadingColocation([lp.PyNode(node), lp.PyNode(stopper)]),
label='coloc')
# Disable serialization due to `Lock` objects
lp.launch(program, launch_type='test_mt', test_case=self,
serialize_py_nodes=False)
preemption_ok.wait()
def test_exception_propagation(self):
def raise_error():
raise RuntimeError('Foo')
def wait_test_end():
lp.wait_for_stop()
error_node = lp.PyNode(raise_error)
waiter_node = lp.PyNode(wait_test_end)
colo_node = lp.MultiThreadingColocation([error_node, waiter_node])
parent_manager = worker_manager.WorkerManager(register_in_thread=True)
with self.assertRaisesRegex(RuntimeError, 'Foo'):
manager = colo_node.run()
self.addCleanup(manager.cleanup_after_test, self) # pytype: disable=attribute-error
del parent_manager
def test_first_completed(self):
manager = worker_manager.WorkerManager(register_in_thread=True)
self.addCleanup(manager.cleanup_after_test, self)
quick_done = threading.Event()
slow_done = threading.Event()
slow_can_start = threading.Event()
def quick():
quick_done.set()
def slow():
slow_can_start.wait()
slow_done.set()
colo_node = lp.MultiThreadingColocation(
[lp.PyNode(quick), lp.PyNode(slow)],
return_on_first_completed=True)
colo_node.run() # Returns immediately without waiting for the slow node.
self.assertTrue(quick_done.is_set())
self.assertFalse(slow_done.is_set())
# Let the slow one finish.
slow_can_start.set()
slow_done.wait()
def test_all_completed(self):
manager = worker_manager.WorkerManager(register_in_thread=True)
self.addCleanup(manager.cleanup_after_test, self)
f1_done = threading.Event()
f2_done = threading.Event()
colo_node = lp.MultiThreadingColocation(
[lp.PyNode(f1_done.set), lp.PyNode(f2_done.set)],
return_on_first_completed=False)
colo_node.run() # Returns after both f1 and f2 finish.
self.assertTrue(f1_done.is_set())
self.assertTrue(f2_done.is_set())
def test_stop(self):
def _sleep():
lp.wait_for_stop()
def _stop():
lp.stop()
program = lp.Program('stop')
program.add_node(
lp.MultiThreadingColocation([lp.PyNode(_sleep),
lp.PyNode(_stop)]), label='node')
waiter = lp.launch(program, launch_type='test_mt', test_case=self)
waiter.wait()
def test_nested_stop(self):
def _sleep():
lp.wait_for_stop()
def _stop():
lp.stop()
program = lp.Program('stop')
program.add_node(
lp.MultiThreadingColocation([
lp.PyNode(_sleep),
lp.MultiThreadingColocation([lp.PyNode(_sleep),
lp.PyNode(_stop)])
]), label='node')
waiter = lp.launch(program, launch_type='test_mt', test_case=self)
waiter.wait()
if __name__ == '__main__':
absltest.main()
| launchpad-master | launchpad/nodes/multi_threading_colocation/node_test.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define Launchpad version information."""
# We follow Semantic Versioning (https://semver.org/)
_MAJOR_VERSION = '0'
_MINOR_VERSION = '6'
_PATCH_VERSION = '0'
# When building releases, we can update this value on the release branch to
# reflect the current release candidate ('rc0', 'rc1') or, finally, the official
# stable release (indicated by `_REL_SUFFIX = ''`). Outside the context of a
# release branch, the current version is by default assumed to be a
# 'development' version, labeled 'dev'.
_DEV_SUFFIX = 'dev'
_REL_SUFFIX = 'rc0'
# Example, '0.4.0rc0'
__version__ = '.'.join([
_MAJOR_VERSION,
_MINOR_VERSION,
_PATCH_VERSION,
])
__dev_version__ = '{}.{}'.format(__version__, _DEV_SUFFIX)
__rel_version__ = '{}{}'.format(__version__, _REL_SUFFIX)
__tensorflow_version__ = 'tensorflow~=2.12.0'
__reverb_version__ = 'dm-reverb==0.12.0'
__nightly_tensorflow_version__ = 'tf-nightly'
__nightly_reverb_version__ = 'dm-reverb-nightly'
| launchpad-master | launchpad/pip_package/launchpad_version.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build and installs dm-launchpad."""
import argparse
import codecs
import datetime
import fnmatch
import os
import sys
from setuptools import setup
from setuptools.command.install import install as InstallCommandBase
from setuptools.dist import Distribution
from version import launchpad_version
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
def find_files(pattern, root):
"""Return all the files matching pattern below root dir."""
for dirpath, _, files in os.walk(root):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(dirpath, filename)
class InstallCommand(InstallCommandBase):
"""Override the dir where the headers go."""
def finalize_options(self):
ret = super().finalize_options()
# We need to set this manually because we are not using setuptools to
# compile the shared libraries we are distributing.
self.install_lib = self.install_platlib
return ret
class SetupToolsHelper(object):
"""Helper to execute `setuptools.setup()`."""
def __init__(self, release):
"""Initialize ReleaseBuilder class.
Args:
release: True to do a release build. False for a nightly build.
"""
self.release = release
def _get_version(self):
"""Returns the version and project name to associate with the build."""
if self.release:
project_name = 'dm-launchpad'
version = launchpad_version.__rel_version__
else:
project_name = 'dm-launchpad-nightly'
version = launchpad_version.__dev_version__
version += datetime.datetime.now().strftime('%Y%m%d')
return version, project_name
def _get_required_packages(self):
"""Returns list of required packages."""
required_packages = [
'absl-py',
'cloudpickle',
'dm-tree',
'grpcio',
'mock',
'portpicker',
'protobuf',
'psutil',
'termcolor',
]
return required_packages
def _get_tensorflow_packages(self):
"""Returns packages needed to install Tensorflow."""
if self.release:
return [launchpad_version.__tensorflow_version__]
else:
return [launchpad_version.__nightly_tensorflow_version__]
def _get_reverb_packages(self):
"""Returns packages needed to install Reverb."""
if self.release:
return [launchpad_version.__reverb_version__]
else:
return [launchpad_version.__nightly_reverb_version__]
def run_setup(self):
# Builds the long description from the README.
root_path = os.path.abspath(os.path.dirname(__file__))
with codecs.open(
os.path.join(root_path, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
version, project_name = self._get_version()
setup(
name=project_name,
version=version,
description=('Launchpad is a library that simplifies writing '
'distributed programs and seamlessly launching them '
'on a range of supported platforms.'),
long_description=long_description,
long_description_content_type='text/markdown',
author='DeepMind',
author_email='DeepMind <[email protected]>',
url='https://github.com/deepmind/launchpad',
license='Apache 2.0',
packages=['courier', 'launchpad'],
headers=list(find_files('*.proto', 'launchpad')),
include_package_data=True,
install_requires=self._get_required_packages(),
extras_require={
'tensorflow': self._get_tensorflow_packages(),
'reverb': self._get_reverb_packages(),
'xmanager': ['xmanager'],
},
distclass=BinaryDistribution,
cmdclass={
'install': InstallCommand,
},
python_requires='>=3',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='deepmind reinforcement learning machine distributed',
)
if __name__ == '__main__':
# Hide argparse help so `setuptools.setup` help prints. This pattern is an
# improvement over using `sys.argv` and then `sys.argv.remove`, which also
# did not provide help about custom arguments.
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
'--release',
action='store_true',
default=False,
help='Pass as true to do a release build.')
FLAGS, unparsed = parser.parse_known_args()
# Go forward with only non-custom flags.
sys.argv.clear()
# Downstream `setuptools.setup` expects args to start at the second element.
unparsed.insert(0, 'foo')
sys.argv.extend(unparsed)
setup_tools_helper = SetupToolsHelper(release=FLAGS.release)
setup_tools_helper.run_setup()
| launchpad-master | launchpad/pip_package/setup.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ongoing refactoring for WorkerManager."""
import collections
from concurrent import futures
import dataclasses
import functools
import signal
import subprocess
import threading
import time
from typing import Any, Callable, Iterable, List, Mapping, MutableMapping, Optional, Sequence, Tuple
from absl import logging
from launchpad import flags as lp_flags
import psutil
import termcolor
_WORKER_MANAGERS = threading.local()
def _call_once(func):
"""Calls the function only once, regardless of arguments."""
@functools.wraps(func)
def _wrapper():
# If we haven't been called yet, actually invoke func and save the result.
if not _wrapper.has_run():
_wrapper.mark_as_run()
_wrapper.return_value = func()
return _wrapper.return_value
_wrapper._has_run = False
_wrapper.has_run = lambda: _wrapper._has_run
_wrapper.mark_as_run = lambda: setattr(_wrapper, '_has_run', True)
return _wrapper
def _register_signal_handler(sig: signal.Signals, handler: Callable[[], None]):
"""Registers a signal handler."""
# We only call the handler once.
handler = _call_once(handler)
old_handler = signal.getsignal(sig)
def _run_handler(sig=sig,
frame=None,
handler=handler,
old_handler=old_handler):
handler()
if isinstance(old_handler, Callable):
old_handler(sig, frame)
signal.signal(sig, _run_handler)
def wait_for_stop(timeout_secs: Optional[float] = None):
"""Blocks until termination of the node's program starts or timeout passes.
Args:
timeout_secs: Floating point number specifying a timeout for the operation,
in seconds. If not provided, timeout is infinite.
Returns:
True if program is being terminated, False if timeout was reached.
Usage examples:
- Perform cleanup at the end of the run:
start_server()
lp.wait_for_stop()
stop_server()
checkpoint()
- Perform some work until program is terminated:
while not lp.wait_for_stop(0): # Return immediately.
... do some work ...
- Perform some task every 5 seconds:
while not lp.wait_for_stop(5.0):
... perform periodic task ...
"""
return get_worker_manager().wait_for_stop(timeout_secs)
def get_worker_manager():
manager = getattr(_WORKER_MANAGERS, 'manager', None)
if not manager:
raise RuntimeError('Worker manager is only available from a PyNode thread.')
return manager
@dataclasses.dataclass
class ThreadWorker:
thread: threading.Thread
future: futures.Future[Any]
def _get_child_processes_with_depth(process: psutil.Process,
depth: int) -> Iterable[psutil.Process]:
"""Returns child processes at the given depth."""
if depth == 0:
return [process]
if depth == 1:
return process.children(recursive=False)
children_at_depth = []
for child in process.children(recursive=False):
children_at_depth.extend(_get_child_processes_with_depth(child, depth - 1))
return children_at_depth
def _send_signal_to_processes_with_depth(processes: Iterable[psutil.Process],
sig: signal.Signals, depth: int):
for process in processes:
for child in _get_child_processes_with_depth(process, depth):
try:
child.send_signal(sig)
except psutil.NoSuchProcess:
pass
class WorkerManager:
"""Manages running threads and processes of a Launchpad Program."""
def __init__(
self,
termination_notice_secs: Optional[int] = None,
kill_all_upon_sigint: bool = False,
kill_workers_upon_sigint: bool = False,
handle_sigterm: bool = False,
register_in_thread: bool = False,
process_tree_depth: int = 0,
):
"""Initializes a WorkerManager.
Args:
termination_notice_secs: 1) when >0, it's the countdown before a SIGKILL
is issued upon a user-requested stop (relies on handle_user_stop=True).
2) when =0, SIGKILL happens immediately upon user-requested stop.
kill_all_upon_sigint: When True, set the stop event and kill all worker
subprocesses, as well as the main process upon SIGINT. This allows the
user to stop the program with Ctrl+C. It should be set to True for
local_mp and local_mt.
kill_workers_upon_sigint: When True, similar to kill_all_upon_sigint,
except that it doesn't kill the main process. This is needed in test_mp
so as not to fail the test upon cleanup.
handle_sigterm: When this is True, kill all workers upon SIGTERM, by 1)
forwarding SIGTERM to process workers 2) setting stop event for thread
workers. Set this to True in process_entry.py so that the stop event
will be triggered in the subprocesses via SIGTERM.
register_in_thread: Make the worker manager accessible through
`get_worker_manager()` in the current thread (needed by `stop_event()`
for example). It should be False if we don't need to access
`get_worker_manager()` , e.g. at the launcher thread of local_mt and
local_mp. It should be True for process_entry.py.
process_tree_depth: the depth of Launchpad subprocesses in the process
tree. For example, when the process is managed by GNOME, this value
should be 2, so that in a tree of gnome-terminal -> bash -> interpreter
"""
if termination_notice_secs is None:
termination_notice_secs = lp_flags.LP_TERMINATION_NOTICE_SECS.value
if termination_notice_secs < 0:
raise ValueError('termination_notice_secs must be >= 0.')
self._termination_notice_secs = termination_notice_secs
if kill_all_upon_sigint and kill_workers_upon_sigint:
raise ValueError(
'Only one of kill_all_upon_sigint and kill_workers_upon_sigint can '
'be True.')
if kill_all_upon_sigint:
_register_signal_handler(signal.SIGINT,
functools.partial(self._handle_sigint, True))
elif kill_workers_upon_sigint:
_register_signal_handler(signal.SIGINT,
functools.partial(self._handle_sigint, False))
if handle_sigterm:
_register_signal_handler(
signal.SIGTERM, self._set_stop_event_and_terminate_process_workers)
self._stop_event = threading.Event()
self._thread_workers: MutableMapping[
str, List[ThreadWorker]] = collections.defaultdict(list)
self._process_workers: MutableMapping[
str, List[psutil.Process]] = collections.defaultdict(list)
self._mutex = threading.Lock()
if register_in_thread:
_WORKER_MANAGERS.manager = self
self._process_tree_depth = process_tree_depth
@property
def stop_event(self):
return self._stop_event
def wait_for_stop(self, timeout_secs: Optional[float] = None):
"""Blocks until managed runtime is terminating or timeout is reached."""
return self._stop_event.wait(timeout_secs)
def thread_worker(self, name: str, function: Callable[[], Any]):
"""Registers and starts a new thread worker.
Args:
name: Name of the worker group.
function: Entrypoint function to execute in a worker.
"""
future = futures.Future()
def run_inner(f=function, future=future, manager=self):
_WORKER_MANAGERS.manager = manager
try:
future.set_result(f())
except BaseException as e:
future.set_exception(e)
builder = lambda t, n: threading.Thread(target=t, name=n)
thread = builder(run_inner, name)
thread.setDaemon(True)
thread.start()
with self._mutex:
self._thread_workers[name].append(ThreadWorker(thread, future))
def register_existing_process(self, name: str, pid: int):
"""Registers already started worker process.
Args:
name: Name of the workers' group.
pid: Pid of the process to monitor.
"""
with self._mutex:
self._process_workers[name].append(psutil.Process(pid))
def process_worker(self,
name,
command,
env: Optional[Mapping[str, Any]] = None,
**kwargs):
"""Adds process worker to the runtime.
Args:
name: Name of the worker's group.
command: Command to execute in the worker.
env: Environment variables to set for the worker.
**kwargs: Other parameters to be passed to `subprocess.Popen`.
"""
with self._mutex:
process = subprocess.Popen(command, env=env or {}, **kwargs)
self._process_workers[name].append(psutil.Process(process.pid))
def _has_active_workers(self):
_, has_active_workers = self._update_and_get_recently_finished()
return has_active_workers
def _update_and_get_recently_finished(
self) -> Tuple[List[futures.Future[Any]], bool]:
"""Update self._thread_workers and return a tuple representing the change.
This will update self._thread_workers so that it only contains active
workers.
Returns:
A tuple. The first element of the tuple are futures for recently finished
workers, and the second is a bool indicating if there are still active
workers.
"""
recently_finished = []
has_active_workers = False
with self._mutex:
active_threads = collections.defaultdict(list)
for label in self._thread_workers:
for worker in self._thread_workers[label]:
if worker.thread.is_alive():
active_threads[label].append(worker)
has_active_workers = True
else:
recently_finished.append(worker.future)
self._thread_workers = active_threads
active_processes = collections.defaultdict(list)
for label, processes in self._process_workers.items():
for process in processes:
if process.is_running() and process.status() != psutil.STATUS_ZOMBIE:
has_active_workers = True
active_processes[label].append(process)
else:
future = futures.Future()
res = process.wait()
if res and not self._stop_event.is_set():
# Here we make sure stop_event hasn't been set yet, before we
# propagate the non-zero exit code. This is because when we
# forcefully terminate the program (e.g., due to `lp.stop()`),
# some subprocesses might have non-zero exit code.
future.set_exception(
RuntimeError(f'A "{label}" worker exited with code {res}.'))
else:
future.set_result(None)
recently_finished.append(future)
self._process_workers = active_processes
return recently_finished, has_active_workers
def check_for_thread_worker_exception(self):
"""Raises an error if there's an exception in one of the workers."""
recently_finished, _ = self._update_and_get_recently_finished()
for future in recently_finished:
future.result()
def wait(self, labels_to_wait_for: Optional[Sequence[str]] = None):
"""Waits until all thread workers finish. Raises errors if any.
Args:
labels_to_wait_for: labels of the workers to wait for. If None, wait for
all workers.
"""
has_active_worker = True
while has_active_worker:
try:
has_active_worker = False
# Will raise errors, if any.
self.check_for_thread_worker_exception()
with self._mutex:
# check_for_thread_worker_exception() will update self._thread_workers
# so that it only contains active workers. If there are still
# non-empty lists, it means some workers have not finished yet.
for label, workers in self._thread_workers.items():
if labels_to_wait_for and label not in labels_to_wait_for:
continue
if workers:
has_active_worker = True
break
for label, processes in self._process_workers.items():
if labels_to_wait_for and label not in labels_to_wait_for:
continue
if processes:
has_active_worker = True
break
time.sleep(0.1)
except KeyboardInterrupt:
pass
def _set_stop_event_and_terminate_process_workers(
self, sig: signal.Signals = signal.SIGTERM):
self._stop_event.set()
for _, processes in self._process_workers.items():
_send_signal_to_processes_with_depth(processes, sig,
self._process_tree_depth)
def _handle_sigint(self, kill_main_process: bool):
"""Handles SIGINT.
This does the following:
1. Set the stop event. Nodes can listen to the stop event and perform
cleanup actions.
2. Wait for termination_notice_secs (specified from __init__()`), since
the workers might need some time for cleanup.
3. SIGKILL the remaining workers.
if kill_main_process=True, also kill the main process. This should be set
True for local_mp, but not for test_mp as it will fail the test case.
Args:
kill_main_process: whether or not to kill the main process after killing
all the subprocesses.
"""
print(
termcolor.colored('User-requested termination. Asking workers to stop.',
'blue'))
# Notify all the thread workers.
self._stop_event.set()
# Notify all the process workers.
for _, processes in self._process_workers.items():
_send_signal_to_processes_with_depth(processes, signal.SIGTERM,
self._process_tree_depth)
def _force_stop(kill_main_process=kill_main_process):
# Since we are forcefully stopping the system, we send signals to all
# levels of the process trees. This makes sure to kill
# tmux/gnome-terminal/etc, processes that create the Launchpad
# subprocesses.
for _, processes in self._process_workers.items():
for process in processes:
for child in process.children(recursive=True):
child.send_signal(signal.SIGKILL)
process.send_signal(signal.SIGKILL)
if kill_main_process:
signal.raise_signal(signal.SIGKILL)
if self._termination_notice_secs > 0:
print(termcolor.colored('Press CTRL+C to terminate immediately.', 'blue'))
_register_signal_handler(signal.SIGINT, _force_stop)
pending_secs = self._termination_notice_secs
while self._has_active_workers() and pending_secs:
print(
termcolor.colored(
f'Waiting for workers to stop for {pending_secs}s.', 'blue'),
end='\r')
time.sleep(1)
pending_secs -= 1
if self._has_active_workers():
print(termcolor.colored('\nKilling entire runtime.', 'blue'))
_force_stop()
| launchpad-master | launchpad/launch/worker_manager_v2.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| launchpad-master | launchpad/launch/__init__.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for launchpad.launch.signal_handling."""
import os
import signal
from absl.testing import absltest
from launchpad.launch import signal_handling
import mock
from six.moves import range
class SignalHandlingTest(absltest.TestCase):
@mock.patch('sys.exit')
def test_can_intercept_sigint(self, exit_mock):
signal_handling.exit_gracefully_on_sigint()
os.kill(os.getpid(), signal.SIGINT)
exit_mock.assert_called_once_with(0)
@mock.patch('sys.exit')
def test_can_intercept_sigquit(self, exit_mock):
signal_handling.exit_gracefully_on_sigquit()
os.kill(os.getpid(), signal.SIGQUIT)
exit_mock.assert_called_once_with(0)
@mock.patch('sys.exit')
def test_ignore_multiple_sigints(self, exit_mock):
signal_handling.exit_gracefully_on_sigint()
for _ in range(100):
os.kill(os.getpid(), signal.SIGINT)
exit_mock.assert_called_once_with(0)
if __name__ == '__main__':
absltest.main()
| launchpad-master | launchpad/launch/signal_handling_test.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WorkerManager handles thread and process-based runtimes."""
import atexit
import collections
from concurrent import futures
import os
import signal
import subprocess
import sys
import threading
import time
import traceback
from typing import Optional, Sequence, Text
from absl import flags
from absl import logging
from absl.testing import absltest
from launchpad import flags as lp_flags
import psutil
import termcolor
FLAGS = flags.FLAGS
ThreadWorker = collections.namedtuple('ThreadWorker', ['thread', 'future'])
_WORKER_MANAGERS = threading.local()
_HAS_MAIN_MANAGER = False
_SIGNAL_HANDLERS = None
def _log_and_print_with_color(severity: int, message: str, *args) -> None:
logging.log(severity, message, *args)
if severity >= logging.ERROR:
color = 'red'
else:
color = 'blue'
print(termcolor.colored(message % args, color))
def get_worker_manager():
manager = getattr(_WORKER_MANAGERS, 'manager', None)
if not manager:
raise RuntimeError('Worker manager is only available from the Launchpad''s '
'program node thread.')
return manager
def _signal_dispatcher(sig, frame=None):
"""Dispatches a given signal to all registered handlers."""
if sig != signal.SIGALRM and sig != signal.SIGUSR1:
# Notify user-registered stop handler(s) before other once.
_signal_dispatcher(signal.SIGUSR1, frame)
dispatchers = _SIGNAL_HANDLERS[sig].copy()
if sig != signal.SIGALRM:
_SIGNAL_HANDLERS[sig].clear()
for dispatcher in dispatchers:
try:
try:
dispatcher(sig, frame)
except TypeError:
dispatcher() # pytype: disable=wrong-arg-count
except KeyboardInterrupt:
pass
def _register_signal_dispatcher(sig):
"""Registers signal dispatcher for a given signal type."""
assert sig not in _SIGNAL_HANDLERS
_SIGNAL_HANDLERS[sig] = set()
try:
old_signal = signal.signal(sig, _signal_dispatcher)
except ValueError:
logging.warning(
'Launchpad cannot register its signal handler. This is likely because '
'you are not running lp.launch() from the main thread. Launchpad will '
'NOT attempt to handle signal %s.', sig)
if callable(old_signal):
_SIGNAL_HANDLERS[sig].add(old_signal)
def _register_signal_handler(sig, handler):
"""Registers a signal handler."""
global _SIGNAL_HANDLERS
if _SIGNAL_HANDLERS is None:
_SIGNAL_HANDLERS = dict()
_register_signal_dispatcher(signal.SIGTERM)
_register_signal_dispatcher(signal.SIGQUIT)
_register_signal_dispatcher(signal.SIGINT)
_register_signal_dispatcher(signal.SIGALRM)
_SIGNAL_HANDLERS[signal.SIGUSR1] = set()
assert sig in _SIGNAL_HANDLERS
_SIGNAL_HANDLERS[sig].add(handler)
def _remove_signal_handler(sig, handler):
"""Unregisters a signal handler."""
if not _SIGNAL_HANDLERS:
return
try:
_SIGNAL_HANDLERS[sig].remove(handler)
except KeyError:
pass
def register_stop_handler(handler):
"""Registers a stop handler, which is called upon program termination.
Stop handler can also be registered outside of the program's execution
scope. It is guaranted that handler will be called at most once.
Args:
handler: Handler to be called.
"""
_register_signal_handler(signal.SIGUSR1, handler)
def unregister_stop_handler(handler):
"""Unregisters a stop handler previously registered with register_stop_handler."""
_remove_signal_handler(signal.SIGUSR1, handler)
def wait_for_stop(timeout_secs: Optional[float] = None):
"""Blocks until termination of the node's program starts or timeout passes.
Args:
timeout_secs: Floating point number specifying a timeout for the operation,
in seconds. If not provided, timeout is infinite.
Returns:
True if program is being terminated, False if timeout was reached.
Usage examples:
- Perform cleanup at the end of the run:
start_server()
lp.wait_for_stop()
stop_server()
checkpoint()
- Perform some work until program is terminated:
while not lp.wait_for_stop(0): # Return immediately.
... do some work ...
- Perform some task every 5 seconds:
while not lp.wait_for_stop(5.0):
... perform periodic task ...
"""
return get_worker_manager().wait_for_stop(timeout_secs)
def stop_event() -> threading.Event:
"""Returns a threading.Event used to wait for termination signal on a Program.
Usage examples:
- Perform cleanup at the end of the run:
start_server()
lp.stop_event().wait()
stop_server()
checkpoint()
"""
return get_worker_manager().stop_event()
class WorkerManager:
"""Encapsulates running threads and processes of a Launchpad Program."""
def __init__(
self,
kill_main_thread=True,
register_in_thread=False,
):
"""Initializes a WorkerManager.
Args:
kill_main_thread: When True (the default), it will kill the current
process (program launcher or the node) after killing all th
subprocesses, to guarantee complete cleanup of the program. Setting it
to False disables this behavior, which can be useful in test_mp, where
killing the main process causes a test to fail.
register_in_thread: Make the worker manager accessible through
`get_worker_manager()` in the current thread (needed by `stop_event()`
for example). It should be False if we don't need to access
`get_worker_manager()` , e.g. at the launcher thread of local_mt and
local_mp.
"""
self._mutex = threading.Lock()
self._termination_notice_secs = -1
handle_user_stop = True
global _HAS_MAIN_MANAGER
if not _HAS_MAIN_MANAGER:
# This logic resolves the potential conflict between two WorkerManagers
# in the same process. In particular, only the first one will execute the
# "countdown-and-sigkill" logic upon Ctrl+C.
self._termination_notice_secs = FLAGS.lp_termination_notice_secs
_HAS_MAIN_MANAGER = True
self._active_workers = collections.defaultdict(list)
self._workers_count = collections.defaultdict(lambda: 0)
self._first_failure = None
self._stop_counter = 0
self._kill_main_thread = kill_main_thread
self._stop_event = threading.Event()
self._main_thread = threading.current_thread().ident
_register_signal_handler(signal.SIGTERM, self._sigterm)
_register_signal_handler(signal.SIGQUIT, self._sigquit)
if handle_user_stop:
_register_signal_handler(signal.SIGINT, self._stop_by_user)
if register_in_thread:
_WORKER_MANAGERS.manager = self
def _disable_signals(self):
self._disable_alarm()
_remove_signal_handler(signal.SIGTERM, self._sigterm)
_remove_signal_handler(signal.SIGQUIT, self._sigquit)
_remove_signal_handler(signal.SIGINT, self._stop_by_user)
def _sigterm(self):
"""Handles SIGTERM by stopping the workers."""
self._stop()
def _sigquit(self):
self._kill()
def wait_for_stop(self, timeout_secs: Optional[float] = None):
"""Blocks until managed runtime is terminating or timeout is reached."""
return self._stop_event.wait(timeout_secs)
def stop_event(self):
"""Returns an event used to wait for termination signal on a Program."""
return self._stop_event
def thread_worker(self, name, function):
"""Registers and starts a new thread worker.
Args:
name: Name of the worker group.
function: Entrypoint function to execute in a worker.
"""
with self._mutex:
future = futures.Future()
def run_inner(f=function, future=future, manager=self):
_WORKER_MANAGERS.manager = manager
try:
future.set_result(f())
except BaseException as e:
future.set_exception(e)
builder = lambda t, n: threading.Thread(target=t, name=n)
thread = builder(run_inner, name)
thread.setDaemon(True)
thread.start()
self._workers_count[name] += 1
worker = ThreadWorker(thread=thread, future=future)
self._active_workers[name].append(worker)
def process_worker(self, name, command, env=None, **kwargs):
"""Adds process worker to the runtime.
Args:
name: Name of the worker's group.
command: Command to execute in the worker.
env: Environment variables to set for the worker.
**kwargs: Other parameters to be passed to `subprocess.Popen`.
"""
with self._mutex:
process = subprocess.Popen(command, env=env or {}, **kwargs)
self._workers_count[name] += 1
self._active_workers[name].append(process)
def register_existing_process(self, name: str, pid: int):
"""Registers already started worker process.
Args:
name: Name of the workers' group.
pid: Pid of the process to monitor.
"""
with self._mutex:
self._workers_count[name] += 1
self._active_workers[name].append(psutil.Process(pid))
def _stop_by_user(self):
"""Handles stopping of the runtime by a user."""
_log_and_print_with_color(
logging.WARNING, 'User-requested termination. Asking workers to stop.')
if self._termination_notice_secs > 0:
_log_and_print_with_color(logging.INFO,
'Press CTRL+C to terminate immediately.')
if self._termination_notice_secs >= 0:
signal.signal(signal.SIGINT, lambda sig, frame: self._kill())
self._stop()
def _kill_process_tree(self, pid):
"""Kills all child processes of the current process."""
parent = psutil.Process(pid)
processes = [parent]
for process in parent.children(recursive=True):
processes.append(process)
for process in processes:
try:
process.send_signal(signal.SIGKILL)
except psutil.NoSuchProcess:
pass
def _kill(self):
"""Kills all workers (and main thread/process if needed)."""
_log_and_print_with_color(logging.ERROR, 'Killing entire runtime.')
kill_self = self._kill_main_thread
for workers in self._active_workers.values():
for worker in workers:
if isinstance(worker, ThreadWorker):
# Not possible to kill a thread without killing the process.
kill_self = True
else:
self._kill_process_tree(worker.pid)
if kill_self:
self._kill_process_tree(os.getpid())
def _stop_or_kill(self):
"""Stops all workers; kills them if they don't stop on time."""
pending_secs = self._termination_notice_secs - self._stop_counter
if pending_secs == 0:
if self._termination_notice_secs > 0:
still_running = [
label for label in self._active_workers
if self._active_workers[label]
]
_log_and_print_with_color(
logging.ERROR, 'Worker groups that did not terminate in time: %s',
still_running)
self._kill()
return
if pending_secs >= 0:
_log_and_print_with_color(logging.INFO,
'Waiting for workers to stop for %ds.',
pending_secs)
self._stop_counter += 1
# Notify ThreadWorkers which registered for notifications.
_signal_dispatcher(signal.SIGUSR1)
for workers in self._active_workers.values():
for worker in workers:
if isinstance(worker, ThreadWorker):
# Thread workers should use wait_for_stop or register_stop_handler.
pass
elif isinstance(worker, subprocess.Popen):
try:
worker.send_signal(signal.SIGTERM)
except psutil.NoSuchProcess:
pass
else:
# Notify all workers running under a proxy process.
children = worker.children(recursive=True)
worker_found = False
for process in children:
try:
process_name = process.name()
if process_name != 'bash' and 'envelope_' not in process_name:
worker_found = True
process.send_signal(signal.SIGTERM)
except psutil.NoSuchProcess:
pass
if not worker_found:
# No more workers running, so we can kill the proxy itself.
try:
worker.send_signal(signal.SIGKILL)
except psutil.NoSuchProcess:
pass
if pending_secs >= 0:
signal.alarm(1)
def _stop(self):
"""Requests all workers to stop and schedule delayed termination."""
self._stop_event.set()
try:
if self._termination_notice_secs > 0:
_register_signal_handler(signal.SIGALRM, self._stop_or_kill)
except ValueError:
# This happens when we attempt to register a signal handler but not in the
# main thread. Send a SIGTERM to redirect to the main thread.
psutil.Process(os.getpid()).send_signal(signal.SIGTERM)
return
self._stop_or_kill()
def _disable_alarm(self):
_remove_signal_handler(signal.SIGALRM, self._stop_or_kill)
signal.alarm(0)
def stop_and_wait(self):
"""Requests stopping all workers and wait for termination."""
with self._mutex:
self._stop()
self.wait(raise_error=False)
def join(self):
self.wait()
def wait(self,
labels_to_wait_for: Optional[Sequence[Text]] = None,
raise_error=True,
return_on_first_completed=False):
"""Waits for workers to finish. Also stops the program upon worker failures.
Args:
labels_to_wait_for: If supplied, only wait for these groups' workers to
finish. Wait for all workers otherwise.
raise_error: Raise an exception upon any worker failure.
return_on_first_completed: Whether to return upon the first completed (or
failed) worker.
Raises:
RuntimeError: if any worker raises an exception.
"""
active_workers = True
while active_workers:
with self._mutex:
self._check_workers()
active_workers = False
if self._first_failure and raise_error:
failure = self._first_failure
self._first_failure = None
raise failure
for label in labels_to_wait_for or self._active_workers.keys():
if self._active_workers[label]:
active_workers = True
if (return_on_first_completed and len(self._active_workers[label])
< self._workers_count[label]):
return
time.sleep(0.1)
def cleanup_after_test(self, test_case: absltest.TestCase):
"""Cleanups runtime after a test."""
del test_case
with self._mutex:
self._check_workers()
self._stop()
self._disable_signals()
self.wait(raise_error=False)
with self._mutex:
if self._first_failure:
raise self._first_failure
def check_for_thread_worker_exception(self):
with self._mutex:
for label in self._active_workers:
for worker in self._active_workers[label]:
if not worker.thread.is_alive():
worker.thread.join()
# This will raise the exception, if any.
worker.future.result()
def _check_workers(self):
"""Checks status of running workers, terminate runtime in case of errors.
This REQUIRES holding self._mutex.
"""
has_workers = False
for label in self._active_workers:
still_active = []
for worker in self._active_workers[label]:
active = True
if isinstance(worker, ThreadWorker):
if not worker.thread.is_alive():
worker.thread.join()
if not self._stop_counter:
try:
worker.future.result()
except BaseException as e:
if not self._first_failure and not self._stop_counter:
self._first_failure = e
_log_and_print_with_color(logging.ERROR,
'Node %s crashed: %s', worker,
traceback.format_exc())
active = False
elif isinstance(worker, subprocess.Popen):
try:
res = worker.wait(0)
active = False
if res and not self._first_failure and not self._stop_counter:
self._first_failure = RuntimeError(
f'One of the workers exited with code {res}.')
except subprocess.TimeoutExpired:
pass
else:
try:
# We can't obtain return code of external process, so clean
# termination is assumed.
res = worker.wait(0)
active = False
except psutil.TimeoutExpired:
pass
if active:
has_workers = True
still_active.append(worker)
self._active_workers[label] = still_active
if has_workers and self._first_failure and not self._stop_counter:
self._stop()
elif not has_workers:
self._disable_alarm()
def __del__(self):
try:
if sys.is_finalizing():
return
except AttributeError:
# AttributeError can be thrown when `sys` was already destroyed upon
# finalization.
return
self._disable_signals()
| launchpad-master | launchpad/launch/worker_manager.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch program in different ways based on FLAGS.lp_launch_type."""
import sys
import typing
from typing import Any, Dict, Mapping, Optional, Sequence, Tuple, Union, cast
from absl import flags
from absl import logging
from absl.testing import absltest
from launchpad import context
from launchpad import flags as lp_flags
from launchpad import program as lp_program
from launchpad.launch.local_multi_processing import launch as launch_local_multiprocessed
from launchpad.launch.local_multi_threading import launch as launch_local_multithreaded
from launchpad.launch.test_multi_processing import launch as launch_test_multiprocessed
from launchpad.launch.test_multi_threading import launch as launch_test_multithreaded
FLAGS = flags.FLAGS
def launch(
programs: Union[lp_program.Program, Sequence[lp_program.Program],
],
launch_type: Optional[Union[context.LaunchType, str]] = None,
xm_resources: Optional[Union[Dict[str, Any], Sequence[Dict[str,
Any]]]] = None,
local_resources: Optional[Dict[str, Any]] = None,
test_case: Optional[absltest.TestCase] = None,
terminal: Optional[str] = None,
*,
serialize_py_nodes: Optional[bool] = None,
controller_xm_requirements: Optional[Mapping[str, Any]] = None,
) -> Any:
"""Launches a Launchpad program.
Args:
programs: One or more programs to launch, or a Controller instance to launch
launch_type: Type of launch. If this is None it will read from
FLAGS.lp_launch_type. See the definition of context.LaunchType for the
valid choices. The benefit of setting it to None is you can control the
launch type from command line (by just passing --lp_launch_type=...). #
local_resources: (for local/test multiprocessing launch) A dictionary to
specify per-node launch configuration.
test_case: (for test multiprocessing launch) test case in which the program
is launched.
terminal: (for local multiprocessing launch) Terminal to use to run the
commands. Valid choices are gnome-terminal, gnome-terminal-tabs, xterm,
tmux_session, current_terminal, and output_to_files.
serialize_py_nodes: If `True`, `local_mt` & `test_mt` will fail if the nodes
are not serializable. This can be useful to debug xmanager experiments in
tests or locally. If `False`, the nodes won't be serialized. If `None`
(the default), it will default to the implementation default value (
Returns:
Anything returns from the specific launcher.
"""
# Make sure that flags are parsed before launching the program. Not all users
# parse the flags.
if not FLAGS.is_parsed():
FLAGS(sys.argv, known_only=True)
launch_type = launch_type or lp_flags.LAUNCH_TYPE.value
if isinstance(launch_type, str):
launch_type = context.LaunchType(launch_type)
if not isinstance(programs, Sequence):
programs = cast(Sequence[lp_program.Program], [programs])
if len(programs) > 1:
writer = print
writer(
'Multiple programs are provided but launch type is {}. Launching only '
'the first program...'.format(launch_type))
program = programs[0]
if launch_type is context.LaunchType.LOCAL_MULTI_THREADING:
return launch_local_multithreaded.launch(
program, serialize_py_nodes=serialize_py_nodes)
elif launch_type is context.LaunchType.LOCAL_MULTI_PROCESSING:
return launch_local_multiprocessed.launch(program, local_resources,
terminal)
elif launch_type is context.LaunchType.VERTEX_AI:
from launchpad.launch.xm_docker import launch as launch_xm_docker
return launch_xm_docker.launch(program, context.LaunchType.VERTEX_AI,
xm_resources)
elif launch_type is context.LaunchType.TEST_MULTI_THREADING:
return launch_test_multithreaded.launch(
program, test_case=test_case, serialize_py_nodes=serialize_py_nodes)
elif launch_type is context.LaunchType.TEST_MULTI_PROCESSING:
assert test_case is not None
return launch_test_multiprocessed.launch(
program, test_case=test_case, local_resources=local_resources)
else:
logging.fatal('Unknown launch type: %s', launch_type)
| launchpad-master | launchpad/launch/launch.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to redirect calls to both versions of WorkerManager."""
from typing import Optional, Union
from launchpad import flags as lp_flags
from launchpad.launch import worker_manager
from launchpad.launch import worker_manager_v2
def wait_for_stop(timeout_secs: Optional[float] = None) -> bool:
if lp_flags.LP_WORKER_MANAGER_V2.value:
return worker_manager_v2.wait_for_stop(timeout_secs)
else:
return worker_manager.wait_for_stop(timeout_secs)
def get_worker_manager(
) -> Union[worker_manager.WorkerManager, worker_manager_v2.WorkerManager]:
if lp_flags.LP_WORKER_MANAGER_V2.value:
return worker_manager_v2.get_worker_manager()
else:
return worker_manager.get_worker_manager()
def register_stop_handler(handler):
if lp_flags.LP_WORKER_MANAGER_V2.value:
worker_manager_v2.get_worker_manager().register_stop_handler(handler)
else:
return worker_manager.register_stop_handler(handler)
def unregister_stop_handler(handler):
if lp_flags.LP_WORKER_MANAGER_V2.value:
worker_manager_v2.get_worker_manager().unregister_stop_handler(handler)
else:
return worker_manager.unregister_stop_handler(handler)
def stop_event():
if lp_flags.LP_WORKER_MANAGER_V2.value:
return worker_manager_v2.get_worker_manager().stop_event
else:
return worker_manager.get_worker_manager().stop_event()
| launchpad-master | launchpad/launch/worker_manager_migration.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Signal handling utilities for use when running LaunchPad interactively."""
import atexit
import signal
import sys
from typing import Callable
tty_write = print
def _exit_gracefully(signum, unused_frame):
if signum == signal.SIGINT:
# Ignore subsequent SIGINTs in order to prevent the exit handlers from being
# interrupted during cleanup if CTRL+C is pressed multiple times.
signal.signal(signal.SIGINT, signal.SIG_IGN)
tty_write('Control-C pressed. Exiting ...\n')
elif signum == signal.SIGQUIT:
tty_write('SIGQUIT received. Exiting ...\n')
sys.exit(0)
def exit_gracefully_on_sigint():
signal.signal(signal.SIGINT, _exit_gracefully)
def exit_gracefully_on_sigquit():
signal.signal(signal.SIGQUIT, _exit_gracefully)
def register_exit_handler(handler: Callable[..., None], *args, **kargs):
"""Register an exit handler."""
return atexit.register(handler, *args, **kargs)
def register_exit_handler_and_exit_gracefully_on_sigint(handler: Callable[...,
None],
*args, **kargs):
"""Register an exit handler and gracefully handle SIGINT."""
exit_handler = register_exit_handler(handler, *args, **kargs)
exit_gracefully_on_sigint()
return exit_handler
| launchpad-master | launchpad/launch/signal_handling.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests & test utils for serialization.py."""
import functools
import threading
from absl.testing import absltest
import cloudpickle
from launchpad import program as lp_program
from launchpad.launch import serialization
from launchpad.nodes.python import node as python
class NotSeriazableNode(python.PyNode):
def __init__(self):
self._event = threading.Event() # Not serializable
def __call__(self):
return
@absltest.skip('base class')
class ErrorOnSerializationMixin(absltest.TestCase):
"""A mixin class to be used in tests for launch code using `serialization`."""
@property
def _launch(self):
raise NotImplementedError('Any child class should override _get_launch.')
def _get_program(self):
program = lp_program.Program('quick_stop')
program.add_node(python.PyNode(NotSeriazableNode()), label='my_node')
return program
def test_raises_error_on_serialize_py_nodes(self):
program = self._get_program()
with self.assertRaisesRegex(
RuntimeError,
"The nodes associated to the label 'my_node'"
):
self._launch(program, test_case=self, serialize_py_nodes=True).wait()
def test_serialize_py_nodes_is_false(self):
program = self._get_program()
self._launch(program, test_case=self, serialize_py_nodes=False).wait()
class SerializationTest(absltest.TestCase):
def test_quick_stop(self):
nodes = [python.PyNode(NotSeriazableNode())]
with self.assertRaisesRegex(RuntimeError, 'The nodes associated to'):
serialization.check_nodes_are_serializable('my_node', nodes)
def test_lru_cache(self):
serialization.enable_lru_cache_pickling_once()
call_count = [0]
@functools.lru_cache(maxsize=1)
def increase_call_count():
call_count[0] += 1
return call_count[0]
f0, f1 = cloudpickle.loads(cloudpickle.dumps([increase_call_count] * 2))
f0()
f1()
self.assertEqual(f0(), 1)
self.assertEqual(f1(), 1)
if __name__ == '__main__':
absltest.main()
| launchpad-master | launchpad/launch/serialization_test.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A utility to serialize nodes and raise an error if they are not serializable.
For the launch configurations that use this feature, one can test them using:
```
from launchpad.launch import serialization_test
class SerializationTest(serialization_test.ErrorOnSerializationMixin):
@property
def _launch(self):
return launch.launch
```
"""
import copyreg
import functools
from absl import flags
import cloudpickle
from launchpad import flags as lp_flags
FLAGS = flags.FLAGS
@functools.lru_cache(maxsize=1)
def enable_lru_cache_pickling_once():
"""Enables pickling for functools.lru_cache."""
lru_cache_type = type(functools.lru_cache()(lambda: None))
def new_lru_cache(func, cache_kwargs):
return functools.lru_cache(**cache_kwargs)(func)
def _pickle_lru_cache(obj):
params = {}
if hasattr(obj, "cache_parameters"):
params = obj.cache_parameters()
return new_lru_cache, (obj.__wrapped__, params)
copyreg.pickle(lru_cache_type, _pickle_lru_cache)
def _cloudpickle_dump_with_user_friendly_error(functions,
description: str,
file=None):
"""Serializes functions, and throws user-friendly error upon failure."""
try:
if file:
return cloudpickle.dump(functions, file)
else:
return cloudpickle.dumps(functions)
except Exception as e:
# When using `pdb`, we want to be able to go up the stack that goes into
# the serialization error, not through the call-stack up to functions like
# `check_nodes_are_serializable`. Thus, we need to propagate the traceback.
raise RuntimeError(
str(e.__class__.__name__) + ": " + str(e) + "\n"
f"The nodes associated to {description} were "
"not serializable using cloudpickle. Make them picklable, or pass "
"`serialize_py_nodes=False` to `lp.launch` if you want to disable this "
"check, for example when you want to use FLAGS, mocks, threading.Event "
"etc, in your node definition."
).with_traceback(e.__traceback__) from e
def check_nodes_are_serializable(label, nodes):
"""Raises an exception if some `PyNode` objects are not serializable."""
enable_lru_cache_pickling_once()
# We only try to serialize `PyNode` objects (as they are the only nodes for
# which the default implementation of `to_executables` will do serialization
# of `node.function`).
functions = [node.function for node in nodes if hasattr(node, "function")]
_cloudpickle_dump_with_user_friendly_error(functions,
f"{label} ({type(nodes[0])}")
def serialize_functions(data_file_path: str, description: str, functions):
"""Serializes into a file at path `data_file_path` for PyNode functions.
Args:
data_file_path: The path of the (local) file to write to.
description: Describes the functions, e,g., the label of the group they
belongs to. This is propagated to enrich the error message.
functions: PyNode functions as a list or list-like object.
"""
enable_lru_cache_pickling_once()
with open(data_file_path, "wb") as f:
_cloudpickle_dump_with_user_friendly_error(functions, description, f)
| launchpad-master | launchpad/launch/serialization.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for launchpad.launch.worker_manager."""
import os
import signal
import threading
import time
from absl.testing import absltest
from launchpad.launch import worker_manager
import mock
class WorkerManagerTest(absltest.TestCase):
def setUp(self):
super().setUp()
worker_manager._HAS_MAIN_MANAGER = False
self._sigterm_patcher = mock.patch.object(
signal, 'SIGTERM', new=signal.SIGUSR1)
self._sigterm_patcher.start()
self._sigint_patcher = mock.patch.object(
signal, 'SIGINT', new=signal.SIGUSR2)
self._sigint_patcher.start()
self._manager = worker_manager.WorkerManager()
self.addCleanup(self._manager.cleanup_after_test, self)
def tearDown(self):
self._sigterm_patcher.stop()
self._sigint_patcher.stop()
super().tearDown()
def test_wait_for_stop(self):
def waiter():
self.assertTrue(self._manager.wait_for_stop())
self._manager.thread_worker('worker', waiter)
os.kill(os.getpid(), signal.SIGTERM)
self.assertTrue(self._manager.wait_for_stop())
def test_stop_event(self):
def waiter():
self.assertTrue(self._manager.stop_event().wait())
self._manager.thread_worker('worker', waiter)
os.kill(os.getpid(), signal.SIGTERM)
self.assertTrue(self._manager.wait_for_stop())
def test_wait_for_stop_timeout(self):
checks_done = threading.Event()
def waiter():
self.assertFalse(self._manager.wait_for_stop(0))
self.assertFalse(self._manager.wait_for_stop(0.1))
checks_done.set()
self.assertTrue(self._manager.wait_for_stop(10))
self._manager.thread_worker('worker', waiter)
self.assertTrue(checks_done.wait())
os.kill(os.getpid(), signal.SIGTERM)
self.assertTrue(self._manager.wait_for_stop())
def test_slow_termination(self):
def waiter():
self.assertTrue(self._manager.wait_for_stop())
time.sleep(1)
self._manager.thread_worker('worker', waiter)
os.kill(os.getpid(), signal.SIGTERM)
self.assertTrue(self._manager.wait_for_stop())
def test_system_exit(self):
def waiter():
self.assertTrue(self._manager.wait_for_stop(100.0))
self._manager.thread_worker('worker', waiter)
os.kill(os.getpid(), signal.SIGTERM)
self.assertTrue(self._manager.wait_for_stop())
def test_stop_and_wait(self):
def waiter():
self.assertTrue(self._manager.wait_for_stop())
self._manager.thread_worker('worker1', waiter)
self._manager.thread_worker('worker2', waiter)
self._manager.thread_worker('worker3', waiter)
self._manager.stop_and_wait()
def test_failure_wait(self):
def waiter():
self.assertTrue(self._manager.wait_for_stop())
def failure():
raise Exception('Error')
self._manager.thread_worker('waiter', waiter)
self._manager.thread_worker('failure', failure)
with self.assertRaisesRegexp(
Exception, 'Error'):
self._manager.wait(['waiter'])
self._manager.wait()
def test_return_on_first_completed(self):
def waiter():
self.assertTrue(self._manager.wait_for_stop())
def worker():
pass
self._manager.thread_worker('waiter', waiter)
self._manager.thread_worker('worker', worker)
self._manager.wait(return_on_first_completed=True)
def test_dont_raise_error(self):
def failure():
raise Exception('Error')
self._manager.thread_worker('failure', failure)
self._manager.wait(raise_error=False)
with self.assertRaisesRegexp(
Exception, 'Error'):
self._manager.wait()
def test_process_worker_stop(self):
self._manager.process_worker('sleep', ['sleep', '3600'])
self._manager.stop_and_wait()
def test_process_worker_failure(self):
self._manager.process_worker('failure', ['cat', 'missing_file'])
with self.assertRaisesRegexp(
RuntimeError, 'One of the workers exited'):
self._manager.wait()
def test_two_worker_managers(self):
another_manager = worker_manager.WorkerManager()
self.assertGreater(self._manager._termination_notice_secs, 0)
# The other WM won't duplicate the "countdown-and-sigkill" logic.
self.assertEqual(another_manager._termination_notice_secs, -1)
self.assertIn(self._manager._stop_by_user,
worker_manager._SIGNAL_HANDLERS[signal.SIGINT])
# The other WM will still be able to handle signal correctly.
self.assertIn(another_manager._stop_by_user,
worker_manager._SIGNAL_HANDLERS[signal.SIGINT])
if __name__ == '__main__':
absltest.main()
| launchpad-master | launchpad/launch/worker_manager_test.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| launchpad-master | launchpad/launch/xm_docker/__init__.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launches a Launchpad program using local docker containers via XManager."""
import collections
import hashlib
from typing import Any, Dict, Optional
from absl import logging
from google.auth import exceptions as google_auth_exceptions
from launchpad import context
from launchpad import program as lp_program
from launchpad.launch import signal_handling
import termcolor
from xmanager import xm
from xmanager import xm_local
def launch(program: lp_program.Program,
launch_type: context.LaunchType,
xm_resources: Optional[Dict[str, Any]] = None):
"""Launches a program using local docker containers via XManager."""
# Set up the launch context (launch type & launch config) for all nodes
xm_resources = xm_resources or {}
for label, nodes in program.groups.items():
for node in nodes:
if label in xm_resources:
launch_config = xm_resources[label]
else:
# If launch config not specified, try to use a default.
launch_config = node.default_launch_config(launch_type)
xm_resources[label] = launch_config
node._initialize_context(
launch_type, launch_config=launch_config)
# Notify the input handles
for label, nodes in program.groups.items():
for node in nodes:
for handle in node.input_handles:
handle.connect(node, label)
# Vertex AI supports only 4 worker pools, so we group nodes with the same
# requirements.
nodes_by_container = collections.defaultdict(list)
for label, nodes in program.groups.items():
launch_config = nodes[0].launch_context.launch_config
hash_value = hashlib.md5()
hash_value.update((launch_config.code_directory).encode())
hash_value.update((launch_config.docker_requirements).encode())
hash_value.update(str(launch_config.hw_requirements).encode())
nodes_by_container[hash_value.hexdigest()].extend([
(node, label) for node in nodes
])
# Vertex AI requires the first worker pool to have exactly 1 replica...
nodes_for_jobs = list(nodes_by_container.values())
for index, nodes in enumerate(nodes_for_jobs):
if len(nodes) == 1:
nodes_for_jobs = [nodes_for_jobs[
index]] + nodes_for_jobs[:index] + nodes_for_jobs[index + 1:]
break
if len(nodes_for_jobs[0]) != 1:
nodes_for_jobs.append(nodes_for_jobs[0][1:])
nodes_for_jobs[0] = [nodes_for_jobs[0][0]]
# Make sure there are at most 4 worker pools (required by Vertex AI).
cluster_names = ['workerpool0', 'workerpool1', 'workerpool2', 'workerpool3']
if len(nodes_for_jobs) > len(cluster_names):
raise RuntimeError((
'Too many nodes with different requirements specified.'
f'Vertex AI supports up to {len(cluster_names)} types.'
))
# Bind addresses
for index, nodes_with_labels in enumerate(nodes_for_jobs):
for instance, (node, label) in enumerate(nodes_with_labels):
node.bind_addresses(cluster=cluster_names[index], instance=instance)
containers = []
for index, nodes_with_labels in enumerate(nodes_for_jobs):
nodes = [node for (node, label) in nodes_with_labels]
# find the container
# to_executables() is a static method, so we can call it from any of the
# nodes in this group.
# pytype: disable=wrong-arg-count
# this is to_docker_executables based on LaunchType
docker_executables = nodes[0].to_executables(nodes, cluster_names[index],
nodes[0].launch_context)
assert len(docker_executables) == 1
containers.append(docker_executables[0])
# pytype: enable=wrong-arg-count
signal_handling.exit_gracefully_on_sigint()
signal_handling.exit_gracefully_on_sigquit()
with xm_local.create_experiment(experiment_title=program.name) as experiment:
jobs = {}
job_id = 0
for executable_spec, requirements in containers:
if launch_type == context.LaunchType.VERTEX_AI:
executor = xm_local.Caip(requirements=requirements)
executor_spec = xm_local.Caip.Spec()
else:
logging.fatal('Unknown launch type: %s', launch_type)
try:
[executable] = experiment.package([
xm.Packageable(
executable_spec=executable_spec,
executor_spec=executor_spec
),
])
except google_auth_exceptions.DefaultCredentialsError:
raise google_auth_exceptions.DefaultCredentialsError(
'GCP project seems not to be configured correctly. Please follow'
' instructions at '
'https://github.com/deepmind/xmanager#create-a-gcp-project.')
job_id += 1
jobs[str(job_id)] = xm.Job(executable=executable, executor=executor)
experiment.add(xm.JobGroup(**jobs))
print(termcolor.colored('Program launched successfully.', 'blue'))
print(
termcolor.colored('Node names mapping used in Vertex AI runtime:',
'blue'))
# Print nodes' labels mapping to the worker pool names.
def _name_range(name: str, start_idx: int, count: int):
if count == 1:
return f'{name}-{start_idx}'
return f'{name}-[{start_idx}:{start_idx+count}]'
node_index = collections.defaultdict(int)
for cluster_index, nodes in enumerate(nodes_for_jobs):
node_count = 0
for i, (node, label) in enumerate(nodes):
node_count += 1
if i == len(nodes) - 1 or label != nodes[i+1][1]:
start_idx = node_index[label]
label_range = _name_range(label, start_idx, node_count)
worker_range = _name_range(cluster_names[cluster_index],
i - node_count + 1, node_count)
print(termcolor.colored(f'{label_range} -> {worker_range}', 'blue'))
node_index[label] += node_count
node_count = 0
| launchpad-master | launchpad/launch/xm_docker/launch.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for launchpad.launch.xm_docker.launch."""
import os
from absl.testing import absltest
import launchpad as lp
from launchpad.nodes.python import xm_docker
class HelloWorld:
def __init__(self) -> None:
pass
def run(self) -> None:
print('Hello World!!!')
def make_program() -> lp.Program:
program = lp.Program('hello_world')
node = lp.PyClassNode(HelloWorld)
program.add_node(node, label='hello_printer')
return program
class LaunchTest(absltest.TestCase):
def test_launch(self):
os.environ['GOOGLE_CLOUD_BUCKET_NAME'] = 'lpbucket'
launchpad_dir = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
docker_requirements = os.path.join(
launchpad_dir, 'examples/consumer_producers/requirements.txt')
docker_config = xm_docker.DockerConfig(launchpad_dir, docker_requirements)
resources = {'hello_printer': docker_config}
program = make_program()
# Launch should fail accessing GCP.
exception_msg = (
'Request failed|404|The specified bucket does not exist|CP project '
'seems not to be configured correctly'
# We allow digit-only messages, assuming this is a KeyError as reported
# in b/241629570. Should be fixed together with
# https://github.com/python/cpython/issues/91351.
r'|\d+')
with self.assertRaisesRegex(Exception, exception_msg):
lp.launch(
program,
launch_type=lp.LaunchType.VERTEX_AI,
xm_resources=resources)
if __name__ == '__main__':
absltest.main()
| launchpad-master | launchpad/launch/xm_docker/launch_test.py |
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| launchpad-master | launchpad/launch/test_multi_processing/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.