python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
---|---|---|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf2jax."""
import contextlib
from absl.testing import parameterized
import chex
import jax
from jax.experimental import checkify
import numpy as np
import tensorflow as tf
from tf2jax._src import config
from tf2jax._src import ops
from tf2jax._src import test_util
from tf2jax._src import tf2jax
import tree
def _reorder(vals, inds):
return [vals[idx] for idx in inds]
class OpsTest(test_util.TestCase):
def test_get_unsupported(self):
unsupported = ops.get_unsupported_operations(
["Add", "Relu", "NotAnOp", "Blah", "Relu"])
self.assertEqual(unsupported, {"NotAnOp", "Blah"})
def _assert_if_jitted(self, err):
jitted = self.variant.type == chex.ChexVariantType.WITH_JIT
return self.assertRaises(err) if jitted else contextlib.nullcontext()
def _test_convert(self,
tf_func,
inputs,
*,
check_shape_only=False,
functional=True,
atol=1e-5):
if not isinstance(inputs, (list, tuple)):
inputs = (inputs,)
if not hasattr(tf_func, "get_concrete_function"):
tf_func = tf.function(tf_func, jit_compile=True)
jax_func, jax_params = tf2jax.convert(
tf_func, *tree.map_structure(np.zeros_like, inputs))
if functional:
self.assertEmpty(jax_params, "Expected no parameters for pure Ops.")
jax_func = self.variant(jax_func)
rng = jax.random.PRNGKey(42)
jax_results, new_jax_params = jax_func(jax_params, *inputs, rng=rng)
tf_results = tf_func(*inputs)
# Check outputs
for tf_res, jax_res in jax.util.safe_zip(
tree.flatten(tf_results), tree.flatten(jax_results)
):
self.assertEqual(tf_res.shape, jax_res.shape)
if not check_shape_only:
self.assertAllClose(np.asarray(tf_res), jax_res, atol=atol)
return jax_results, new_jax_params
@chex.variants(with_jit=True, without_jit=True)
@parameterized.parameters("log_softmax", "sigmoid", "softmax", "softplus",
"tanh", "relu", "relu6", "elu", "leaky_relu")
def test_activations(self, op_name):
np.random.seed(42)
inputs = [np.random.normal(size=(10, 5)).astype(np.float32)]
if jax.default_backend().lower() == "tpu" and op_name in ("softplus",):
tols = dict(atol=1e-4)
else:
tols = {}
self._test_convert(getattr(tf.nn, op_name), inputs, **tols)
@chex.variants(with_jit=True, without_jit=True)
def test_assert(self):
def assert_fn(cond, data):
tf.Assert(condition=cond, data=data)
return data
self._test_convert(assert_fn, [np.array(5) > 0, [np.array(6)]])
@chex.variants(with_jit=True, without_jit=True)
@parameterized.parameters(
"Abs",
"Acosh",
"Asinh",
"Atanh",
"BesselI0e",
"BesselI1e",
"Ceil",
"Cos",
"Cosh",
"Digamma",
"Erf",
"Erfc",
"Erfinv",
"Exp",
"Expm1",
"Floor",
"Lgamma",
"Log",
"Log1p",
"Neg",
"Reciprocal",
"Round",
"Rsqrt",
"Sign",
"Sin",
"Sinh",
"Sqrt",
"Square",
"Tan",
)
def test_unary_numerics(self, op_name):
np.random.seed(42)
if op_name == "Erfinv":
inputs = np.random.uniform(size=(10, 5)).astype(np.float32)
else:
inputs = np.random.normal(size=(10, 5)).astype(np.float32)
def tf_func(x):
return getattr(tf.raw_ops, op_name)(x=x)
self._test_convert(tf_func, inputs)
@chex.variants(with_jit=True, without_jit=True)
@parameterized.parameters("Atan2", "Atan2")
def test_binary_numerics(self, op_name):
np.random.seed(42)
inputs = (
np.random.normal(size=(10, 5)).astype(np.float32),
np.random.normal(size=(10, 5)).astype(np.float32),
)
def tf_func(x, y):
return getattr(tf.raw_ops, op_name)(x=x, y=y)
self._test_convert(tf_func, inputs)
@chex.variants(with_jit=True, without_jit=True)
@parameterized.parameters("Add", "AddV2", "Div", "FloorDiv", "FloorMod",
"Mul", "Pow", "RealDiv", "Sub")
def test_binary_numerics_with_static(self, op_name):
np.random.seed(42)
inputs = (
np.random.normal(size=(10, 5)).astype(np.float32),
np.random.normal(size=(10, 5)).astype(np.float32),
)
def tf_func(x, y):
return getattr(tf.raw_ops, op_name)(x=x, y=y)
if jax.default_backend().lower() == "tpu" and op_name in ("Pow",):
tols = dict(atol=1e-4)
else:
tols = {}
self._test_convert(tf_func, inputs, **tols)
# Check static inputs result in static outputs.
def tf_static():
vals = tf_func(x=np.array([4.0, 3.0]), y=np.array([1.0, 2.0]))
return tf.zeros(tf.cast(vals, tf.int32))
self._test_convert(tf_static, [])
@chex.variants(with_jit=False, without_jit=True)
def test_add_n(self):
np.random.seed(42)
inputs = [
[np.random.normal(size=(10, 5)).astype(np.float32) for _ in range(4)]
]
def tf_func(x):
return tf.raw_ops.AddN(inputs=x)
self._test_convert(tf_func, inputs)
# Check static inputs result in static outputs.
def tf_static():
vals = tf_func(
[np.array([4.0, 3.0]), np.array([1.0, 2.0]), np.array([2.0, 1.0])])
return tf.zeros(tf.cast(vals, tf.int32))
self._test_convert(tf_static, [])
@chex.variants(with_jit=True, without_jit=True)
@parameterized.parameters("BitwiseAnd", "BitwiseOr", "BitwiseXor", "Invert")
def test_bitwise(self, op_name):
np.random.seed(42)
inputs = (np.random.randint(1000000, size=(10, 5), dtype=np.int32),
np.random.randint(1000000, size=(10, 5), dtype=np.int32))
def tf_func(x, y):
kwargs = dict(x=x) if op_name == "Invert" else dict(x=x, y=y)
return getattr(tf.raw_ops, op_name)(**kwargs)
self._test_convert(tf_func, inputs)
@chex.variants(with_jit=True, without_jit=True)
@parameterized.parameters("All", "Any")
def test_logical_reduction(self, op_name):
inputs = np.array([[[False, False], [False, True], [True, True]]])
def tf_func(x):
return getattr(tf.raw_ops, op_name)(input=x, axis=(0, 2), keep_dims=True)
self._test_convert(tf_func, inputs)
@chex.variants(with_jit=True, without_jit=True)
@parameterized.parameters("LogicalAnd", "LogicalNot", "LogicalOr")
def test_logical(self, op_name):
np.random.seed(42)
inputs = (
np.random.normal(size=(10, 5)) > 0.,
np.random.normal(size=(10, 5)) > 0.,
)
def tf_func(x, y):
kwargs = dict(x=x) if op_name == "LogicalNot" else dict(x=x, y=y)
return getattr(tf.raw_ops, op_name)(**kwargs)
self._test_convert(tf_func, inputs)
@chex.variants(with_jit=True, without_jit=True)
@parameterized.parameters("LeftShift", "RightShift")
def test_shift(self, op_name):
np.random.seed(42)
inputs = (np.random.randint(1000000, size=(3, 2), dtype=np.int32),
np.random.randint(32, size=(3, 2), dtype=np.int32))
def tf_func(x, y):
return getattr(tf.raw_ops, op_name)(x=x, y=y)
self._test_convert(tf_func, inputs)
@chex.variants(with_jit=True, without_jit=True)
@parameterized.parameters("Equal", "Greater", "GreaterEqual", "Less",
"LessEqual", "NotEqual")
def test_binary_comparison(self, op_name):
np.random.seed(42)
inputs = (np.random.randint(10, size=(10, 5)),
np.random.randint(10, size=(10, 5)))
def tf_func(x, y):
return tf.cast(getattr(tf.raw_ops, op_name)(x=x, y=y), tf.int32)
self._test_convert(tf_func, inputs)
@chex.variants(with_jit=True, without_jit=True)
@parameterized.parameters(
("Add", [[4, 3], [1, 2]]),
("AddV2", [[4, 3], [1, 2]]),
("Div", [[9, 6], [3, 3]]),
("Mul", [[4, 3], [1, 2]]),
("Neg", [[-4, -3]]),
("Sub", [[4, 3], [1, 2]]),
("Equal", [[4, 3, 2], [2, 3, 4]]),
("Greater", [[4, 3, 2], [2, 3, 4]]),
("GreaterEqual", [[4, 3, 2], [2, 3, 4]]),
("Less", [[4, 3, 2], [2, 3, 4]]),
("LessEqual", [[4, 3, 2], [2, 3, 4]]),
("NotEqual", [[4, 3, 2], [2, 3, 4]]),
)
def test_jitting_static(self, op_name, inputs):
def tf_func():
if len(inputs) == 1:
shape = getattr(tf.raw_ops, op_name)(x=inputs[0])
else:
shape = getattr(tf.raw_ops, op_name)(x=inputs[0], y=inputs[1])
return tf.zeros(tf.cast(shape, tf.int32) + 1)
self._test_convert(tf_func, [])
@chex.variants(with_jit=True, without_jit=True)
def test_argmax(self):
inputs = np.array(np.reshape(range(60), (5, 4, 3)), dtype=np.int32)
def argmax_fn(xs):
return tf.raw_ops.ArgMax(input=xs, dimension=1)
self._test_convert(argmax_fn, inputs)
@chex.variants(with_jit=True, without_jit=True)
def test_argmin(self):
inputs = np.array(np.reshape(range(60), (5, 4, 3)), dtype=np.int32)
def argmin_fn(xs):
return tf.raw_ops.ArgMin(input=xs, dimension=1)
self._test_convert(argmin_fn, inputs)
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters(
chex.params_product(
(("SAME", "SAME"), ("VALID", "VALID")),
(("NHWC", "NHWC"), ("NCHW", "NCHW")),
named=True,
))
def test_avg_pool(self, padding, data_format):
np.random.seed(42)
inputs = np.random.normal(size=(10, 32, 16, 8)).astype(np.float32)
ksize = (1, 2, 3, 1)
strides = (1, 3, 2, 1)
if data_format == "NCHW":
if jax.default_backend().lower() == "cpu":
self.skipTest("TensorFlow AvgPool does not support NCHW on CPU.")
inputs = np.transpose(inputs, [0, 3, 1, 2])
ksize = _reorder(ksize, [0, 3, 1, 2])
strides = _reorder(strides, [0, 3, 1, 2])
else:
assert data_format == "NHWC"
def pool(x):
return tf.raw_ops.AvgPool(
value=x,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
self._test_convert(pool, [inputs])
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters(
(
"case0",
[
[[[1]]],
[[[2]]],
[[[3]]],
[[[4]]],
],
(2, 2),
[[0, 0], [0, 0]],
),
(
"case1",
[
[[[1, 2, 3]]],
[[[4, 5, 6]]],
[[[7, 8, 9]]],
[[[10, 11, 12]]],
],
(2, 2),
[[0, 0], [0, 0]],
),
(
"case2",
[
[[[1], [3]], [[9], [11]]],
[[[2], [4]], [[10], [12]]],
[[[5], [7]], [[13], [15]]],
[[[6], [8]], [[14], [16]]],
],
(2, 2),
[[0, 0], [0, 0]],
),
(
"case3",
[
[[[0], [1], [3]]],
[[[0], [9], [11]]],
[[[0], [2], [4]]],
[[[0], [10], [12]]],
[[[0], [5], [7]]],
[[[0], [13], [15]]],
[[[0], [6], [8]]],
[[[0], [14], [16]]],
],
(2, 2),
[[0, 0], [2, 0]],
),
)
def test_batch_to_space_nd(self, inputs, block_shape, crops):
inputs = np.array(inputs)
block_shape = np.array(block_shape)
crops = np.array(crops)
def batch_to_space(x):
return tf.raw_ops.BatchToSpaceND(
input=x, block_shape=block_shape, crops=crops
)
self._test_convert(batch_to_space, [inputs])
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters(
(
"case0",
[
[[[1], [2]], [[3], [4]]],
],
(2, 2),
[[0, 0], [0, 0]],
),
(
"case1",
[
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
],
(2, 2),
[[0, 0], [0, 0]],
),
(
"case2",
[[
[[1], [2], [3], [4]],
[[5], [6], [7], [8]],
[[9], [10], [11], [12]],
[[13], [14], [15], [16]],
]],
(2, 2),
[[0, 0], [0, 0]],
),
(
"case3",
[
[[[1], [2], [3], [4]], [[5], [6], [7], [8]]],
[[[9], [10], [11], [12]], [[13], [14], [15], [16]]],
],
(2, 2),
[[0, 0], [2, 0]],
),
)
def test_space_to_batch_nd(self, inputs, block_shape, paddings):
inputs = np.array(inputs)
block_shape = np.array(block_shape)
paddings = np.array(paddings)
def space_to_batch(x):
return tf.raw_ops.SpaceToBatchND(
input=x, block_shape=block_shape, paddings=paddings
)
self._test_convert(space_to_batch, [inputs])
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters(
chex.params_product(
(("NHWC", "NHWC"), ("NCHW", "NCHW")),
(
("three_dims", (32, 16, 8)),
("four_dims", (3, 32, 16, 8)),
("five_dims", (2, 3, 32, 16, 8)),
),
named=True,
))
def test_bias_add(self, data_format, input_shape):
np.random.seed(42)
if data_format == "NCHW":
nchannels = input_shape[1]
elif data_format == "NHWC":
nchannels = input_shape[-1]
else:
raise ValueError(f"Unsupported format {data_format}")
inputs = np.random.normal(size=input_shape).astype(np.float32)
bias = bias = np.linspace(-1., 1., nchannels).astype(np.float32)
def pool(x, b):
return tf.raw_ops.BiasAdd(value=x, bias=b, data_format=data_format)
self._test_convert(pool, [inputs, bias])
@chex.variants(with_jit=True, without_jit=True)
def test_bitcast(self):
inputs = np.array(0xffffffff, dtype=np.uint32)
def tf_func(x):
return tf.bitcast(x, type=tf.float32)
self._test_convert(tf_func, inputs)
def raw_func(x):
return tf.raw_ops.Bitcast(input=x, type=tf.float32)
self._test_convert(raw_func, inputs)
@chex.variants(with_jit=True, without_jit=True)
def test_broadcast_to(self):
inputs, shape = np.array([1, 2, 3]), (3, 3)
def broadcast_to(xs):
return tf.raw_ops.BroadcastTo(input=xs, shape=shape)
self._test_convert(broadcast_to, inputs)
# Check static inputs result in static outputs.
def broadcast_to_static():
return tf.zeros(broadcast_to(inputs)[0])
self._test_convert(broadcast_to_static, [])
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters(
("valid", np.array((3.14, 42.0), dtype=np.float32), None),
("nan", np.array((np.nan, 42.0), dtype=np.float32), "Found NaN values"),
("inf", np.array((3.14, np.inf), dtype=np.float32), "Found Inf values"),
)
def test_check_numerics(self, inputs, expected_message):
def check_numerics(x):
return tf.raw_ops.CheckNumerics(tensor=x, message="Checking")
with config.override_config("enable_checkify_for_asserts", False):
jax_fn = tf2jax.convert_functional(tf.function(check_numerics), inputs)
jax_fn = self.variant(jax_fn)
outputs = jax_fn(inputs)
if not expected_message:
self.assertAllClose(outputs, inputs)
checked_fn = checkify.checkify(jax_fn)
err, checked_outputs = checked_fn(inputs)
err.throw() # Nothing to throw.
if not expected_message:
self.assertAllClose(checked_outputs, inputs)
with config.override_config("enable_checkify_for_asserts", True):
jax_fn = tf2jax.convert_functional(tf.function(check_numerics), inputs)
jax_fn = self.variant(jax_fn)
checked_fn = checkify.checkify(jax_fn)
err, checked_outputs = checked_fn(inputs)
if not expected_message:
self.assertAllClose(checked_outputs, inputs)
else:
with self.assertRaisesRegex(checkify.JaxRuntimeError,
f"Checking : {expected_message}"):
err.throw()
@chex.variants(with_jit=True, without_jit=True)
def test_complex(self):
reals = np.array([1.2, -2.3, 3.4], np.float32)
imags = np.array([-4.5, 5.6, -6.7], np.float32)
self._test_convert(lambda x, y: tf.raw_ops.Complex(real=x, imag=y),
[reals, imags])
@chex.variants(with_jit=True, without_jit=True)
@parameterized.parameters("ComplexAbs", "Conj", "Imag", "Real")
def test_complex_ops(self, op_name):
reals = np.array([1.2, -2.3, 3.4], np.float32)
imags = np.array([-4.5, 5.6, -6.7], np.float32)
def forward(x):
if op_name == "ComplexAbs":
return getattr(tf.raw_ops, op_name)(x=x)
else:
return getattr(tf.raw_ops, op_name)(input=x)
self._test_convert(forward, [reals + 1j * imags])
@chex.variants(with_jit=True, without_jit=True)
def test_concat(self):
inputs = [np.zeros((10, 5)), np.zeros((7, 5))]
def concat(xs):
return tf.raw_ops.ConcatV2(values=xs, axis=0)
self._test_convert(concat, [inputs])
# Check static inputs result in static outputs.
def concat_static():
return tf.zeros(concat([[10], [5]]))
self._test_convert(concat_static, [])
@chex.variants(with_jit=True, without_jit=True)
def test_conjugate_transpose(self):
reals = np.array(range(24), np.float32).reshape((2, 3, 4)) - 6
imags = np.array(range(24), np.float32).reshape((2, 3, 4)) - 18
self._test_convert(
lambda x: tf.raw_ops.ConjugateTranspose(x=x, perm=[2, 0, 1]),
[reals + 1j * imags])
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters(
("one", [3, 5, 5, 7], [3, 3, 7, 11], [1, 2, 2, 1], "SAME", "NHWC"),
("two", [2, 1, 16, 24], [1, 8, 3, 48], [1, 1, 1, 1], "VALID", "NHWC"),
("three", [3, 5, 5, 7], [3, 3, 7, 11], [1, 2, 2, 1], "SAME", "NCHW"),
("four", [2, 1, 16, 24], [1, 8, 3, 48], [1, 1, 1, 1], "VALID", "NCHW"),
)
def test_conv2d(self, input_shape, filter_shape, strides, padding,
data_format):
np.random.seed(42)
dilations = [1, 1, 1, 1]
filters = np.random.normal(size=filter_shape).astype(np.float32)
inputs = np.random.normal(size=input_shape).astype(np.float32)
if data_format == "NCHW":
if jax.default_backend().lower() == "cpu":
self.skipTest("TensorFlow Conv2D does not support NCHW on CPU.")
inputs = np.transpose(inputs, [0, 3, 1, 2])
strides = _reorder(strides, [0, 3, 1, 2])
dilations = _reorder(dilations, [0, 3, 1, 2])
else:
assert data_format == "NHWC"
def tf_func(x):
return tf.nn.conv2d(
x,
filters=filters,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations)
self._test_convert(tf_func, inputs)
def raw_func(x):
return tf.raw_ops.Conv2D(
input=x,
filter=filters,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations)
self._test_convert(raw_func, inputs)
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters(
chex.params_product(
(("NHWC", "NHWC"), ("NCHW", "NCHW"),),
named=True,
))
def test_conv2d_transpose(self, data_format):
np.random.seed(42)
output_shape = [3, 8, 8, 128]
padding = "SAME"
strides = [1, 2, 2, 1]
dilations = [1, 1, 1, 1]
filters = np.random.normal(size=[7, 7, 128, 13]).astype(np.float32)
inputs = np.random.normal(size=[3, 4, 4, 13]).astype(np.float32)
if data_format == "NCHW":
if jax.default_backend().lower() == "cpu":
self.skipTest(
"TensorFlow Conv2DBackpropInput does not support NCHW on CPU.")
inputs = np.transpose(inputs, [0, 3, 1, 2])
strides = _reorder(strides, [0, 3, 1, 2])
dilations = _reorder(dilations, [0, 3, 1, 2])
output_shape = _reorder(output_shape, [0, 3, 1, 2])
else:
assert data_format == "NHWC"
def tf_func(x):
return tf.nn.conv2d_transpose(
x,
filters=filters,
output_shape=output_shape,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations)
self._test_convert(tf_func, inputs)
def raw_func(x):
return tf.raw_ops.Conv2DBackpropInput(
input_sizes=output_shape,
filter=filters,
out_backprop=x,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations)
self._test_convert(raw_func, inputs)
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters(
chex.params_product(
(("exclusive", True), ("not_exclusive", False),),
(("reverse", True), ("forward", False)),
named=True,
))
def test_cumsum(self, exclusive, reverse):
inputs = np.array(np.reshape(range(24), (4, 3, 2)), dtype=np.int32)
def cumsum_fn(xs):
return tf.raw_ops.Cumsum(
x=xs, axis=1, exclusive=exclusive, reverse=reverse)
self._test_convert(cumsum_fn, inputs)
# Check static inputs result in static outputs.
def cumsum_static():
return tf.zeros(cumsum_fn(inputs)[0, -1])
self._test_convert(cumsum_static, [])
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters(
chex.params_product(
(
("exclusive", True),
("not_exclusive", False),
),
(("reverse", True), ("forward", False)),
named=True,
)
)
def test_cumprod(self, exclusive, reverse):
inputs = np.array(np.reshape(range(24), (4, 3, 2)), dtype=np.int32)
def cumprod_fn(xs):
return tf.raw_ops.Cumprod(
x=xs, axis=1, exclusive=exclusive, reverse=reverse
)
self._test_convert(cumprod_fn, inputs)
# Check static inputs result in static outputs.
def cumprod_static():
return tf.zeros(cumprod_fn(inputs)[0, -1])
self._test_convert(cumprod_static, [])
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters(
chex.params_product(
(
("without_explicit_paddings", False),
("with_explicit_paddings", True),
),
(
("NHWC", "NHWC"),
("NCHW", "NCHW"),
),
named=True,
)
)
def test_depthwise_conv2d(self, use_explicit_paddings, data_format):
np.random.seed(42)
strides = [1, 2, 2, 1]
filters = np.random.normal(size=[3, 3, 7, 11]).astype(np.float32)
inputs = np.random.normal(size=[3, 5, 5, 7]).astype(np.float32)
explicit_paddings = ([[0, 0], [8, 8], [8, 8], [0, 0]]
if use_explicit_paddings else [[]])
dilations = [1, 1, 1, 1]
if data_format == "NCHW":
if jax.default_backend().lower() == "cpu":
self.skipTest(
"TensorFlow DepthwiseConv2dNative does not support NCHW on CPU.")
inputs = np.transpose(inputs, [0, 3, 1, 2])
strides = _reorder(strides, [0, 3, 1, 2])
dilations = _reorder(dilations, [0, 3, 1, 2])
if use_explicit_paddings:
explicit_paddings = _reorder(explicit_paddings, [0, 3, 1, 2])
else:
assert data_format == "NHWC"
def tf_func(x):
return tf.nn.depthwise_conv2d(
x,
filter=filters,
strides=strides,
padding=explicit_paddings if use_explicit_paddings else "SAME",
data_format=data_format,
dilations=dilations[2:] if data_format == "NCHW" else dilations[1:-1])
self._test_convert(tf_func, inputs)
def raw_func(x):
return tf.raw_ops.DepthwiseConv2dNative(
input=x,
filter=filters,
strides=strides,
padding="EXPLICIT" if use_explicit_paddings else "SAME",
explicit_paddings=sum(explicit_paddings, []),
data_format=data_format,
dilations=dilations)
self._test_convert(raw_func, inputs)
@chex.variants(with_jit=True, without_jit=True)
def test_einsum(self):
np.random.seed(42)
inputs = [
np.random.normal(size=[10, 2]).astype(np.float32),
np.random.normal(size=[2, 5]).astype(np.float32),
]
equation_tf = "ij,jk"
def einsum_tf(inputs):
return tf.einsum(equation_tf, *inputs)
self._test_convert(einsum_tf, [inputs])
equation_raw = "ij,jk->ik"
def einsum_raw(inputs):
return tf.raw_ops.Einsum(inputs=inputs, equation=equation_raw)
self._test_convert(einsum_raw, [inputs])
@chex.variants(with_jit=True, without_jit=True)
def test_expand_dims(self):
inputs, dims = 42, 0
def expand_dims(x):
return tf.raw_ops.ExpandDims(input=x, axis=dims)
self._test_convert(expand_dims, inputs)
# Check static inputs result in static outputs.
def expand_dims_static():
return tf.zeros(expand_dims(inputs))
self._test_convert(expand_dims_static, [])
@chex.variants(with_jit=True, without_jit=True)
def test_empty(self):
shape = (2,)
def empty():
return tf.raw_ops.Empty(shape=shape, dtype=tf.int32, init=True)
self._test_convert(empty, [])
# Check static inputs result in static outputs.
def empty_static():
return tf.zeros(empty())
self._test_convert(empty_static, [])
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters(
("exact", (10, 5), (10, 5), True),
("partial0", (10, None), (10, 5), True),
("partial1", (None, 5), (10, 5), True),
("partial2", (None, None), (10, 5), True),
("too_small", (10,), (10,), False),
("too_large", (10, 5, 3), (10, 5, 3), False),
("incompatible0", (20, 5), (20, 5), False),
("incompatible1", (10, 7), (10, 7), False),
("incompatible2", (20, None), (20, 5), False),
("incompatible3", (None, 7), (10, 7), False),
)
def test_ensure_shape(self, expected_shape, example_shape, valid):
def ensure_shape(x):
return tf.raw_ops.EnsureShape(
input=x, shape=tf.TensorShape(expected_shape))
valid_inputs = np.array(
range(np.prod(example_shape)), dtype=np.float32).reshape(example_shape)
self._test_convert(ensure_shape, [valid_inputs])
jax_fn = tf2jax.convert_functional(tf.function(ensure_shape), valid_inputs)
jax_fn = self.variant(jax_fn)
with config.override_config("strict_shape_check", False):
actual_inputs = np.array(range(50), dtype=np.float32).reshape((10, 5))
if valid:
outputs = jax_fn(actual_inputs)
self.assertAllClose(outputs, actual_inputs)
else:
with self.assertRaisesRegex(ValueError, "Expected shape="):
jax_fn(actual_inputs)
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters(
("FFT", tf.raw_ops.FFT, (10,)),
("FFT2D", tf.raw_ops.FFT2D, (10, 8)),
("FFT3D", tf.raw_ops.FFT3D, (10, 8, 6)),
("IFFT", tf.raw_ops.IFFT, (10,)),
("IFFT2D", tf.raw_ops.IFFT2D, (10, 8)),
("IFFT3D", tf.raw_ops.IFFT3D, (10, 8, 6)),
)
def test_fft_ifft(self, fft_op, shape):
np.random.seed(42)
inputs = np.random.normal(size=(5,) + shape).astype(np.complex64)
tols = dict(atol=1e-4) if jax.default_backend().lower() == "cpu" else {}
self._test_convert(lambda x: fft_op(input=x), inputs, **tols)
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters(
("RFFT", tf.raw_ops.RFFT, (10,), np.float32),
("RFFT2D", tf.raw_ops.RFFT2D, (10, 8), np.float32),
("RFFT3D", tf.raw_ops.RFFT3D, (10, 8, 6), np.float32),
("IRFFT", tf.raw_ops.IRFFT, (10,), np.complex64),
("IRFFT2D", tf.raw_ops.IRFFT2D, (10, 8), np.complex64),
("IRFFT3D", tf.raw_ops.IRFFT3D, (10, 8, 6), np.complex64),
)
def test_rfft_irfft(self, fft_op, shape, dtype):
np.random.seed(42)
inputs = np.random.normal(size=(5,) + shape).astype(dtype)
self._test_convert(
lambda x: fft_op(input=x, fft_length=[6] * len(shape)), inputs
)
@chex.variants(with_jit=True, without_jit=True)
def test_fill(self):
dims, value = (np.int32(2),), np.int32(3)
def fill(x):
return tf.raw_ops.Fill(dims=dims, value=x)
self._test_convert(fill, value)
# Check static inputs result in static outputs.
def fill_static():
return tf.zeros(fill(value))
self._test_convert(fill_static, [])
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters(
chex.params_product(
(("inference", False), ("training", True)),
(
("FusedBatchNorm", "FusedBatchNorm"),
("FusedBatchNormV2", "FusedBatchNormV2"),
("FusedBatchNormV3", "FusedBatchNormV3"),
),
(("avg_zero", 0.0), ("avg_half", 0.5), ("avg_one", 1.0)),
(("NHWC", "NHWC"), ("NCHW", "NCHW")),
named=True,
))
def test_fused_batch_norm(self, training, op_name, avg_factor, data_format):
np.random.seed(42)
ndim = 5
inputs = np.random.normal(size=(3, 16, 16, ndim)).astype(np.float32)
if data_format == "NCHW":
inputs = np.transpose(inputs, [0, 3, 1, 2])
else:
assert data_format == "NHWC"
def raw_func(x):
outputs = getattr(tf.raw_ops, op_name)(
x=x,
scale=[3.0] * ndim,
offset=[2.0] * ndim,
mean=np.array(range(ndim), np.float32),
variance=np.array(range(ndim), np.float32) * 0.1,
exponential_avg_factor=avg_factor,
data_format=data_format,
is_training=training)
return outputs[:3]
self._test_convert(raw_func, inputs)
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters(
chex.params_product(
(("1_0", 1, 0), ("2_0", 2, 0), ("-1_0", -1, 0), ("-2_0", -2, 0),
("1_1", 1, 1), ("2_1", 2, 1), ("-1_1", -1, 1), ("-2_1", -2, 1),
("2_2", 2, 2), ("3_2", 3, 2), ("-1_2", -1, 2), ("-2_2", -2, 2)),
named=True,
))
def test_gather(self, axis, batch_dims):
values = np.array(range(1, 2 * 4 * 5 * 3 + 1)).reshape((2, 4, 5, 3))
indices = np.array(range(2 * 4 * 2)).reshape((2, 4, 2)) % 3
def gather_fn(vals, idx):
return tf.raw_ops.GatherV2(
params=vals, indices=idx, axis=axis, batch_dims=batch_dims)
self._test_convert(gather_fn, [values, indices])
# Check static inputs result in static outputs.
def gather_static():
zeros_shape = gather_fn(values, indices)
while zeros_shape.shape.ndims > 0:
zeros_shape = zeros_shape[1]
return tf.zeros(zeros_shape)
self._test_convert(gather_static, [])
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters(
chex.params_product(
(
(
"index",
[[1, 2], [3, 4]],
[[0, 0], [1, 1]],
),
(
"slice",
[[1, 2], [3, 4]],
[[1], [0]],
),
(
"index_3_1",
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[1]],
),
(
"index_3_2",
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[0, 1], [1, 0]],
),
(
"index_3_3",
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[0, 0, 1], [1, 0, 1]],
),
(
"batch_index",
[[1, 2], [3, 4]],
[[[0, 0]], [[0, 1]]],
),
(
"batch_slice",
[[1, 2], [3, 4]],
[[[1]], [[0]]],
),
(
"batch_3d_1",
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[1]], [[0]]],
),
(
"batch_3d_2",
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
),
(
"batch_3d_3",
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]],
),
),
named=True,
))
def test_gather_nd(self, values, indices):
def gather_nd_fn(vals, idx):
return tf.raw_ops.GatherNd(params=vals, indices=idx)
self._test_convert(gather_nd_fn, [values, indices])
# Check static inputs result in static outputs.
def gather_nd_static():
zeros_shape = gather_nd_fn(values, indices)
while zeros_shape.shape.ndims > 0:
zeros_shape = zeros_shape[-1]
return tf.zeros(zeros_shape)
self._test_convert(gather_nd_static, [])
@chex.variants(with_jit=True, without_jit=True)
def test_cond(self):
inputs = [np.array(2), np.array(5)]
def cond_fn(x, y):
f1 = lambda: tf.multiply(x, 17)
f2 = lambda: tf.add(y, 23)
return tf.cond(tf.less(x, y), f1, f2)
self._test_convert(cond_fn, inputs)
@chex.variants(with_jit=True, without_jit=True)
def test_igamma_ops(self):
np.random.seed(42)
inputs = (
np.random.uniform(size=(10, 5)).astype(np.float32) * 10,
np.random.uniform(size=(10, 5)).astype(np.float32) * 10,
)
self._test_convert(lambda a, x: tf.raw_ops.Igamma(a=a, x=x), inputs)
self._test_convert(lambda a, x: tf.raw_ops.Igammac(a=a, x=x), inputs)
@chex.variants(with_jit=True, without_jit=True)
def test_inplace_add(self):
if test_util.parse_version(tf.version.VERSION) >= test_util.parse_version(
"2.14.0"
):
self.skipTest(
f"Requires earlier than tf 2.14.0, found {tf.version.VERSION}."
)
np.random.seed(42)
@tf.function
def inplace_add(x, idx, val):
return tf.raw_ops.InplaceAdd(x=x, i=idx, v=val)
inputs = [
np.random.normal(size=[10, 2]).astype(np.float32),
[2, 5],
[[1, 2], [3, 4]],
]
try:
self._test_convert(inplace_add, inputs)
except tf.errors.InvalidArgumentError as e:
if jax.default_backend().lower() == "tpu":
self.assertIn("index must be Rank 1 and size 1", str(e)) # pylint: disable=g-assert-in-except
tpu_inputs = [
np.random.normal(size=[10, 2]).astype(np.float32),
[5],
[[3, 4]],
]
self._test_convert(inplace_add, tpu_inputs)
else:
raise e
@chex.variants(with_jit=True, without_jit=True)
def test_inplace_update(self):
if test_util.parse_version(tf.version.VERSION) >= test_util.parse_version(
"2.14.0"
):
self.skipTest(
f"Requires earlier than tf 2.14.0, found {tf.version.VERSION}."
)
np.random.seed(42)
@tf.function
def inplace_update(x, idx, val):
return tf.raw_ops.InplaceUpdate(x=x, i=idx, v=val)
inputs = [
np.random.normal(size=[10, 2]).astype(np.float32),
[2, 5],
[[1, 2], [3, 4]],
]
self._test_convert(inplace_update, inputs)
@chex.variants(with_jit=True, without_jit=True)
def test_invert_permutation(self):
np.random.seed(42)
def invert(x):
return tf.raw_ops.InvertPermutation(x=x)
inputs = np.array([2, 4, 3, 0, 1])
self._test_convert(invert, inputs)
def invert_static():
return tf.zeros(invert(inputs))
self._test_convert(invert_static, [])
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters(
chex.params_product(
(
("unbatched", ([10, 2], [2, 5])),
("batched", ([7, 10, 2], [7, 2, 5])),
),
(
("transpose_x", True),
("not_transpose_x", False),
),
(
("transpose_y", True),
("not_transpose_y", False),
),
named=True,
))
def test_matmul(self, shapes, transpose_x, transpose_y):
np.random.seed(42)
inputs = [
np.random.normal(size=shapes[0]).astype(np.float32),
np.random.normal(size=shapes[1]).astype(np.float32),
]
if transpose_x:
inputs[0] = np.swapaxes(inputs[0], -1, -2)
if transpose_y:
inputs[1] = np.swapaxes(inputs[1], -1, -2)
def matmul_tf(x, y):
return tf.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)
self._test_convert(matmul_tf, inputs)
def matmul_raw(x, y):
return tf.raw_ops.BatchMatMulV2(
x=x, y=y, adj_x=transpose_x, adj_y=transpose_y)
self._test_convert(matmul_raw, inputs)
@chex.variants(with_jit=True, without_jit=True)
@parameterized.parameters((2, 3), (-2, 3), (2, -3))
def test_matix_band_part(self, lower, upper):
inputs = np.arange(900).reshape((2, 3, 10, 15))
def band_part(x):
return tf.raw_ops.MatrixBandPart(
input=x, num_lower=lower, num_upper=upper)
self._test_convert(band_part, [inputs])
@chex.variants(with_jit=True, without_jit=True)
@parameterized.parameters(np.float32, np.int32, np.bool_)
def test_matrix_diag(self, dtype):
np.random.seed(42)
if dtype == np.float32:
inputs = np.random.normal(size=[10, 3, 4]).astype(dtype)
padding = dtype(47)
elif dtype == np.int32:
inputs = np.random.randint(low=0, high=10, size=[10, 3, 4], dtype=dtype)
padding = dtype(47)
elif dtype == np.bool_:
inputs = np.random.normal(size=[10, 3, 4]) > 0.0
padding = np.bool_(False)
else:
raise ValueError(f"Unsupported dtype={dtype}")
def raw_func(x):
return tf.raw_ops.MatrixDiagV3(
diagonal=x, k=-2, num_rows=-1, num_cols=-1, padding_value=padding)
self._test_convert(raw_func, inputs)
def tf_func(x):
return tf.linalg.diag(x, k=-2, padding_value=padding)
self._test_convert(tf_func, inputs)
@chex.variants(with_jit=True, without_jit=True)
@parameterized.parameters(np.float32, np.int32, np.bool_)
def test_matrix_set_diag(self, dtype):
np.random.seed(42)
diagonal = np.array([[1, 2, 3], [4, 5, 6]]).astype(dtype)
if dtype == np.float32:
inputs = np.random.normal(size=[2, 3, 4]).astype(dtype)
elif dtype == np.int32:
inputs = np.random.randint(low=0, high=10, size=[2, 3, 4], dtype=dtype)
elif dtype == np.bool_:
inputs = np.random.normal(size=[2, 3, 4]) > 0.0
diagonal = diagonal > 3
else:
raise ValueError(f"Unsupported dtype={dtype}")
def raw_func(x, y):
return tf.raw_ops.MatrixSetDiagV3(input=x, diagonal=y, k=1)
self._test_convert(raw_func, [inputs, diagonal])
def tf_func(x, y):
return tf.linalg.set_diag(x, y, k=1)
self._test_convert(tf_func, [inputs, diagonal])
@chex.variants(with_jit=True, without_jit=True)
def test_max(self):
inputs = np.array(np.reshape(range(24), (4, 3, 2)), dtype=np.int32)
def max_fn(xs):
return tf.raw_ops.Max(input=xs, axis=[0, 2, 1])
self._test_convert(max_fn, inputs)
# Check static inputs result in static outputs.
def max_static():
return tf.zeros(max_fn(inputs))
self._test_convert(max_static, [])
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters(
chex.params_product(
(("SAME", "SAME"), ("VALID", "VALID")),
(("NHWC", "NHWC"), ("NCHW", "NCHW")),
named=True,
))
def test_max_pool(self, padding, data_format):
np.random.seed(42)
inputs = np.random.normal(size=(10, 32, 16, 8)).astype(np.float32)
ksize = (1, 2, 3, 1)
strides = (1, 3, 2, 1)
if data_format == "NCHW":
if jax.default_backend().lower() == "cpu":
self.skipTest("TensorFlow MaxPool does not support NCHW on CPU.")
inputs = np.transpose(inputs, [0, 3, 1, 2])
ksize = _reorder(ksize, [0, 3, 1, 2])
strides = _reorder(strides, [0, 3, 1, 2])
else:
assert data_format == "NHWC"
def pool(x):
return tf.raw_ops.MaxPool(
input=x,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
self._test_convert(pool, [inputs])
@chex.variants(with_jit=True, without_jit=True)
def test_maximum(self):
x_in, y_in = [1, 4], [2, 3]
def maximum(x, y):
return tf.raw_ops.Maximum(x=x, y=y)
self._test_convert(maximum, [x_in, y_in])
# Check static inputs result in static outputs.
def maximum_static():
return tf.zeros(maximum(x=x_in, y=y_in))
self._test_convert(maximum_static, [])
@chex.variants(with_jit=True, without_jit=True)
def test_min(self):
inputs = np.array(np.reshape(range(24), (4, 3, 2)), dtype=np.int32)
def min_fn(xs):
return tf.raw_ops.Min(input=xs, axis=[0, 2, 1])
self._test_convert(min_fn, inputs)
# Check static inputs result in static outputs.
def min_static():
return tf.zeros(min_fn(inputs))
self._test_convert(min_static, [])
@chex.variants(with_jit=True, without_jit=True)
def test_minimum(self):
x_in, y_in = [1, 4], [2, 3]
def minimum(x, y):
return tf.raw_ops.Minimum(x=x, y=y)
self._test_convert(minimum, [x_in, y_in])
# Check static inputs result in static outputs.
def minimum_static():
return tf.zeros(minimum(x=x_in, y=y_in))
self._test_convert(minimum_static, [])
@chex.variants(with_jit=True, without_jit=True)
def test_one_hot(self):
inputs = [[[1, 2], [3, 4], [5, 6]], 42, 47]
def make_one_hot(axis):
def one_hot(inds, on, off):
return tf.raw_ops.OneHot(
indices=inds, depth=10, on_value=on, off_value=off, axis=axis)
return one_hot
self._test_convert(make_one_hot(-1), inputs)
self._test_convert(make_one_hot(2), inputs)
@chex.variants(with_jit=True, without_jit=True)
def test_pack(self):
def pack(inputs):
return tf.raw_ops.Pack(values=inputs)
self._test_convert(pack, [[np.zeros((10,)), np.zeros((10,))]])
# Check static inputs result in static outputs.
def pack_static():
return tf.zeros(pack([10, 10]))
self._test_convert(pack_static, [])
@chex.variants(with_jit=True, without_jit=True)
@parameterized.parameters(
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
)
def test_population_count(self, dtype):
def population_count(x):
return tf.raw_ops.PopulationCount(x=x)
inputs = np.arange(1000, dtype=dtype)
self._test_convert(population_count, inputs)
@chex.variants(with_jit=True, without_jit=True)
def test_pow(self):
def power(x, y):
return tf.raw_ops.Pow(x=x, y=y)
self._test_convert(power, [2, 3])
# Check static inputs result in static outputs.
def power_static():
return tf.zeros((power(2, 3),))
self._test_convert(power_static, [])
@chex.variants(with_jit=True, without_jit=True)
def test_prevent_gradient(self):
error_message = "Gradient not allowed."
@tf.function
def prevent(x):
return tf.raw_ops.PreventGradient(input=x * x, message=error_message)
np_x = np.array(3.14, dtype=np.float32)
tf_x = tf.constant(np_x)
tf_y = prevent(tf_x)
with tf.GradientTape() as g:
g.watch(tf_x)
with self.assertRaisesRegex(LookupError, error_message):
prevent(tf_x)
jax_func = tf2jax.convert_functional(prevent, np.zeros_like(np_x))
jax_func = self.variant(jax_func)
jax_y = jax_func(np_x)
self.assertAllClose(tf_y, jax_y)
with self.assertRaisesRegex(LookupError, error_message):
jax.grad(jax_func)(np_x)
with tf2jax.config.override_config("raise_on_prevent_gradient", False):
jax_func = tf2jax.convert_functional(prevent, np.zeros_like(np_x))
jax_func = self.variant(jax_func)
jax_y = jax_func(np_x)
self.assertAllClose(tf_y, jax_y)
jax_grad = jax.grad(jax_func)(np_x)
self.assertAllClose(np_x * 2, jax_grad)
@chex.variants(with_jit=True, without_jit=True)
def test_pad(self):
inputs, pads, values = np.ones((10, 5)), [[1, 2], [3, 4]], 42.0
def pad(xs):
return tf.raw_ops.Pad(input=xs, paddings=pads)
self._test_convert(pad, [inputs])
def pad_v2(xs, value):
return tf.raw_ops.PadV2(input=xs, paddings=pads, constant_values=value)
self._test_convert(pad_v2, [inputs, values])
@chex.variants(with_jit=True, without_jit=True)
def test_prod(self):
inputs = np.array(np.reshape(range(24), (4, 3, 2)), dtype=np.int32)
def prod(xs):
return tf.raw_ops.Prod(input=xs, axis=[0, 2, 1])
self._test_convert(prod, inputs)
# Check static inputs result in static outputs.
def prod_static():
return tf.zeros(prod(inputs))
self._test_convert(prod_static, [])
@chex.variants(with_jit=True, without_jit=True)
def test_rank(self):
inputs = np.array(np.reshape(range(24), (4, 3, 2)), dtype=np.int32)
def rank(xs):
return tf.raw_ops.Rank(input=xs)
self._test_convert(rank, inputs)
# Check static inputs result in static outputs.
def rank_static():
return tf.zeros(rank(inputs))
self._test_convert(rank_static, [])
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters(
("basic0", 2, 0),
("basic1", -2, 1),
("basic2", 0, 1),
("single", (2,), (1,)),
("double0", (2, -1), (0, 1)),
("double1", (2, -1), (1, 0)),
("dupe_axis", (2, 1), (1, 1)),
)
def test_roll(self, shift, axis):
inputs = np.array(range(1, 51)).reshape((10, 5))
def roll(x):
return tf.raw_ops.Roll(input=x, shift=shift, axis=axis)
self._test_convert(roll, inputs)
# Check static inputs result in static outputs.
def roll_static():
return tf.zeros(roll(inputs)[0])
self._test_convert(roll_static, [])
@chex.variants(with_jit=True, without_jit=True)
@parameterized.parameters(((0, 2),), (tuple(),), (None,))
def test_squeeze(self, dims):
inputs = np.array([[[42], [47]]])
def squeeze(x):
return tf.raw_ops.Squeeze(input=x, axis=dims)
self._test_convert(squeeze, inputs)
# Check static inputs result in static outputs.
def squeeze_static():
return tf.zeros(squeeze(inputs))
self._test_convert(squeeze_static, [])
@chex.variants(with_jit=True, without_jit=True)
def test_rand_normal(self):
tf.random.set_seed(42) # Not sure how to translate this to Jax.
def tf_func():
return tf.random.normal(shape=(16, 7), dtype=tf.float32)
self._test_convert(tf_func, (), check_shape_only=True)
def raw_func():
return tf.raw_ops.RandomStandardNormal(
shape=(1000000, 7), dtype=tf.float32)
self._test_convert(raw_func, (), check_shape_only=True)
with self.subTest("check_statistics"):
jax_func = tf2jax.convert_functional(tf.function(raw_func))
jax_func = self.variant(jax_func)
samples = jax_func(rng=jax.random.PRNGKey(42))
self.assertAllClose(np.mean(samples), 0.0, atol=1e-3)
self.assertAllClose(np.std(samples), 1.0, atol=1e-3)
with self.subTest("check_same_seed"):
same_samples = jax_func(rng=jax.random.PRNGKey(42))
self.assertAllClose(samples, same_samples)
with self.subTest("check_diff_seed"):
diff_samples = jax_func(rng=jax.random.PRNGKey(47))
self.assertNotAllClose(samples, diff_samples)
@chex.variants(with_jit=True, without_jit=True)
def test_rand_uniform(self):
tf.random.set_seed(42) # Not sure how to translate this to Jax.
def tf_func():
return tf.random.uniform(shape=(16, 7), dtype=tf.float32)
self._test_convert(tf_func, (), check_shape_only=True)
def raw_func():
return tf.raw_ops.RandomUniform(shape=(1000000, 7), dtype=tf.float32)
self._test_convert(raw_func, (), check_shape_only=True)
with self.subTest("check_statistics"):
jax_func = tf2jax.convert_functional(tf.function(raw_func))
jax_func = self.variant(jax_func)
samples = jax_func(rng=jax.random.PRNGKey(42))
for expected in np.linspace(0.1, 1, 10):
actual = np.mean(samples < expected)
self.assertAllClose(actual, expected, atol=1e-3)
with self.subTest("check_same_seed"):
same_samples = jax_func(rng=jax.random.PRNGKey(42))
self.assertAllClose(samples, same_samples)
with self.subTest("check_diff_seed"):
diff_samples = jax_func(rng=jax.random.PRNGKey(47))
self.assertNotAllClose(samples, diff_samples)
@chex.variants(with_jit=True, without_jit=True)
def test_rand_uniform_int(self):
tf.random.set_seed(42) # Not sure how to translate this to Jax.
def tf_func():
return tf.random.uniform(
shape=(16, 7), minval=0, maxval=10, dtype=tf.int32)
self._test_convert(tf_func, (), check_shape_only=True)
def raw_func():
return tf.raw_ops.RandomUniformInt(
shape=(1000000, 7), minval=0, maxval=10)
self._test_convert(raw_func, (), check_shape_only=True)
with self.subTest("check_statistics"):
jax_func = tf2jax.convert_functional(tf.function(raw_func))
jax_func = self.variant(jax_func)
samples = jax_func(rng=jax.random.PRNGKey(42))
for val in range(0, 10):
actual = np.mean(samples == val)
self.assertAllClose(actual, 0.1, atol=1e-3)
with self.subTest("check_same_seed"):
same_samples = jax_func(rng=jax.random.PRNGKey(42))
self.assertAllClose(samples, same_samples)
with self.subTest("check_diff_seed"):
diff_samples = jax_func(rng=jax.random.PRNGKey(47))
self.assertNotAllClose(samples, diff_samples)
@chex.variants(with_jit=True, without_jit=True)
def test_range(self):
inputs = [np.int32(1), np.int32(5), np.int32(2)]
def range_fn(start, limit, delta):
return tf.raw_ops.Range(start=start, limit=limit, delta=delta)
# jnp.range cannot be jitted.
with self._assert_if_jitted(jax.core.ConcretizationTypeError):
self._test_convert(range_fn, inputs)
# Check static inputs result in static outputs.
def range_static():
return tf.zeros(range_fn(*inputs))
self._test_convert(range_static, [])
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters(
chex.params_product(
(("upper_sample", (10, 10)), ("down_sample", (5, 5))),
(
("not_align_and_half", (False, True)),
("not_align_and_not_half", (False, False)),
("align_and_not_half", (True, False)),
),
named=True,
))
def test_resize_bilinear(self, size, align_and_half):
np.random.seed(42)
align, half_pixel = align_and_half
def resize_bilinear(imgs):
return tf.raw_ops.ResizeBilinear(
images=imgs,
size=size,
align_corners=align,
half_pixel_centers=half_pixel)
images = np.random.normal(size=(4, 7, 7, 3)).astype(np.float32)
self._test_convert(resize_bilinear, [images])
@parameterized.named_parameters(
chex.params_product(
(
("align_and_half", (True, True)),
),
named=True,
))
def test_resize_bilinear_invalid(self, align_and_half):
np.random.seed(42)
align, half_pixel = align_and_half
def resize_bilinear(imgs):
return tf.raw_ops.ResizeBilinear(
images=imgs,
size=(10, 10),
align_corners=align,
half_pixel_centers=half_pixel)
images = np.random.normal(size=(4, 7, 7, 3)).astype(np.float32)
with self.assertRaisesRegex(
ValueError, "align_corners=True and half_pixel_centers=True"):
_ = tf2jax.convert_functional(tf.function(resize_bilinear), images)
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters(
chex.params_product(
(("upper_sample", (10, 10)), ("down_sample", (5, 5))),
(
("not_align_and_half", (False, True)),
),
named=True,
))
def test_resize_nearest_neighbor(self, size, align_and_half):
np.random.seed(42)
align, half_pixel = align_and_half
def resize_nearest_neighbor(imgs):
return tf.raw_ops.ResizeNearestNeighbor(
images=imgs,
size=size,
align_corners=align,
half_pixel_centers=half_pixel)
images = np.random.normal(size=(4, 7, 7, 3)).astype(np.float32)
self._test_convert(resize_nearest_neighbor, [images])
@parameterized.named_parameters(
chex.params_product(
(
("align_and_half", (True, True)),
("align_and_not_half", (True, False)),
("not_align_and_not_half", (False, False)),
),
named=True,
))
def test_resize_nearest_neighbor_invalid(self, align_and_half):
np.random.seed(42)
align, half_pixel = align_and_half
def resize_nearest_neighbor(imgs):
return tf.raw_ops.ResizeNearestNeighbor(
images=imgs,
size=(10, 10),
align_corners=align,
half_pixel_centers=half_pixel)
images = np.random.normal(size=(4, 7, 7, 3)).astype(np.float32)
with self.assertRaisesRegex(
ValueError, "align_corners=False and half_pixel_centers=True"):
_ = tf2jax.convert_functional(tf.function(resize_nearest_neighbor),
images)
@chex.variants(with_jit=True, without_jit=True)
def test_reverse(self):
axis = (1, 0)
def reverse(x):
return tf.raw_ops.ReverseV2(tensor=x, axis=axis)
self._test_convert(reverse, [[[1, 2], [3, 4]]])
def reverse_static():
return tf.zeros(reverse([[1, 2], [3, 4]])[0])
self._test_convert(reverse_static, ())
@chex.variants(with_jit=True, without_jit=True)
def test_scatter_nd(self):
idxs = np.array([[2, 3], [5, 1]], dtype=np.int32)
vals = np.array([[1, 2], [3, 4]], dtype=np.int32)
shape = np.array([10, 5, 2], dtype=np.int32)
def scatter_nd(idx, val):
return tf.raw_ops.ScatterNd(indices=idx, updates=val, shape=shape)
self._test_convert(scatter_nd, [idxs, vals])
def scatter_nd_static():
return tf.zeros((tf.reduce_sum(scatter_nd(idxs, vals),)))
self._test_convert(scatter_nd_static, ())
@chex.variants(with_jit=True, without_jit=True)
def test_select(self):
inputs = [
np.array([True, False]),
np.array([[1, 2], [3, 4]]),
np.array([[5, 6], [7, 8]]),
]
def select(cond, x, y):
return tf.raw_ops.Select(condition=cond, x=x, y=y)
self._test_convert(select, inputs)
def select_static():
return tf.zeros(select([True, False], [1, 2], [3, 4]))
self._test_convert(select_static, ())
@chex.variants(with_jit=True, without_jit=True)
def test_size(self):
inputs = np.array((10, 5))
def size(x):
return tf.raw_ops.Size(input=x)
self._test_convert(size, inputs)
def size_static():
return tf.zeros(size(inputs))
self._test_convert(size_static, ())
@chex.variants(with_jit=True, without_jit=True)
def test_slice(self):
inputs, begins, sizes = [np.array([[1, 2], [3, 4], [5, 6]]), [1, 1], [2, 1]]
def slice_fn(xs):
return tf.raw_ops.Slice(input=xs, begin=begins, size=sizes)
self._test_convert(slice_fn, inputs)
# Check static inputs result in static outputs.
def slice_static():
return tf.zeros(slice_fn(inputs)[:, 0])
self._test_convert(slice_static, [])
@chex.variants(with_jit=True, without_jit=True)
def test_sparse_softmax_cross_entropy_with_logits(self):
features = np.array([np.arange(5), np.arange(5, 0, -1)], dtype=np.float32)
labels = np.array([2, 3])
def cross_entropy_fn(xs, ys):
return tf.raw_ops.SparseSoftmaxCrossEntropyWithLogits(
features=xs, labels=ys)
self._test_convert(cross_entropy_fn, (features, labels))
@chex.variants(with_jit=True, without_jit=True)
def test_split(self):
inputs = np.array([[1, 2], [3, 4], [5, 6]])
def split(inputs):
return tf.raw_ops.Split(axis=0, value=inputs, num_split=len(inputs))
self._test_convert(split, inputs)
# Check static inputs result in static outputs.
def split_static():
return [tf.zeros(s[0]) for s in split(inputs)]
self._test_convert(split_static, [])
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters(
chex.params_product(
(
("split1", (2, 4)),
("split2", (-1, 4)),
("split3", (2, -1)),
),
(
("axis0", 0),
("axis1", 1),
),
named=True,
))
def test_splitv(self, splits, axis):
inputs = np.arange(36).reshape(6, 6)
def splitv(inputs):
return tf.raw_ops.SplitV(
value=inputs,
size_splits=np.array(splits),
axis=axis,
num_split=len(splits))
self._test_convert(splitv, inputs)
# Check static inputs result in static outputs.
def split_static():
return [tf.zeros(s[0]) for s in splitv(inputs)]
self._test_convert(split_static, [])
@chex.variants(with_jit=True, without_jit=True)
def test_stateless_random_get_alg(self):
@tf.function
def tf_func():
return tf.raw_ops.StatelessRandomGetAlg()
jax_func = tf2jax.convert_functional(tf_func)
jax_alg = self.variant(jax_func)()
self.assertEqual(jax_alg, tf.random.Algorithm.AUTO_SELECT.value)
@chex.variants(with_jit=True, without_jit=True)
def test_stateless_random_normal(self):
def tf_func(seed):
return tf.random.stateless_normal(
shape=(16, 7), seed=seed, dtype=tf.float32)
seed = np.array([0, 42], dtype=np.int32)
self._test_convert(tf_func, (seed,), check_shape_only=True)
def raw_func(seed):
key, counter = tf.raw_ops.StatelessRandomGetKeyCounter(seed=seed)
return tf.raw_ops.StatelessRandomNormalV2(
shape=(1000000, 7), key=key, counter=counter, alg=3, dtype=tf.float32)
seed = np.array([0, 42], dtype=np.int32)
self._test_convert(raw_func, (seed,), check_shape_only=True)
with self.subTest("check_statistics"):
jax_func = tf2jax.convert_functional(
tf.function(raw_func, jit_compile=True), seed)
jax_func = self.variant(jax_func)
samples = jax_func(seed)
self.assertAllClose(np.mean(samples), 0.0, atol=1e-3)
self.assertAllClose(np.std(samples), 1.0, atol=1e-3)
with self.subTest("check_same_seed"):
same_samples = jax_func(seed)
self.assertAllClose(samples, same_samples)
with self.subTest("check_diff_seed"):
another_seed = np.array([0, 47], dtype=np.int32)
diff_samples = jax_func(another_seed)
self.assertNotAllClose(samples, diff_samples)
@chex.variants(with_jit=True, without_jit=True)
def test_stateless_random_uniform(self):
def tf_func(seed):
return tf.random.stateless_uniform(
shape=(16, 7), seed=seed, dtype=tf.float32)
seed = np.array([0, 42], dtype=np.int32)
self._test_convert(tf_func, (seed,), check_shape_only=True)
def raw_func(seed):
key, counter = tf.raw_ops.StatelessRandomGetKeyCounter(seed=seed)
return tf.raw_ops.StatelessRandomUniformV2(
shape=(1000000, 7), key=key, counter=counter, alg=3, dtype=tf.float32)
seed = np.array([0, 42], dtype=np.int32)
self._test_convert(raw_func, (seed,), check_shape_only=True)
with self.subTest("check_statistics"):
jax_func = tf2jax.convert_functional(
tf.function(raw_func, jit_compile=True), seed)
jax_func = self.variant(jax_func)
samples = jax_func(seed)
for expected in np.linspace(0.1, 1, 10):
actual = np.mean(samples < expected)
self.assertAllClose(actual, expected, atol=1e-3)
with self.subTest("check_same_seed"):
same_samples = jax_func(seed)
self.assertAllClose(samples, same_samples)
with self.subTest("check_diff_seed"):
another_seed = np.array([0, 47], dtype=np.int32)
diff_samples = jax_func(another_seed)
self.assertNotAllClose(samples, diff_samples)
@chex.variants(with_jit=True, without_jit=True)
def test_stateless_random_uniform_int(self):
def tf_func(seed):
return tf.random.stateless_uniform(
shape=(16, 7), seed=seed, minval=0, maxval=10, dtype=tf.int32)
seed = np.array([0, 42], dtype=np.int32)
self._test_convert(tf_func, (seed,), check_shape_only=True)
def raw_func(seed):
key, counter = tf.raw_ops.StatelessRandomGetKeyCounter(seed=seed)
return tf.raw_ops.StatelessRandomUniformIntV2(
shape=(1000000, 7),
key=key,
counter=counter,
alg=3,
minval=0,
maxval=10)
seed = np.array([0, 42], dtype=np.int32)
self._test_convert(raw_func, (seed,), check_shape_only=True)
with self.subTest("check_statistics"):
jax_func = tf2jax.convert_functional(
tf.function(raw_func, jit_compile=True), seed)
jax_func = self.variant(jax_func)
samples = jax_func(seed)
for val in range(0, 10):
actual = np.mean(samples == val)
self.assertAllClose(actual, 0.1, atol=1e-3)
with self.subTest("check_same_seed"):
same_samples = jax_func(seed)
self.assertAllClose(samples, same_samples)
with self.subTest("check_diff_seed"):
another_seed = np.array([0, 47], dtype=np.int32)
diff_samples = jax_func(another_seed)
self.assertNotAllClose(samples, diff_samples)
@chex.variants(with_jit=True, without_jit=True)
def test_stateless_random_uniform_full_int(self):
def raw_func(seed):
key, counter = tf.raw_ops.StatelessRandomGetKeyCounter(seed=seed)
return tf.raw_ops.StatelessRandomUniformFullIntV2(
shape=(1000000, 7), key=key, counter=counter, alg=3, dtype=tf.int32)
seed = np.array([0, 42], dtype=np.int32)
self._test_convert(raw_func, (seed,), check_shape_only=True)
with self.subTest("check_statistics"):
jax_func = tf2jax.convert_functional(
tf.function(raw_func, jit_compile=True), seed)
jax_func = self.variant(jax_func)
samples = jax_func(seed)
int32_min = np.iinfo(np.int32).min
int32_max = np.iinfo(np.int32).max
for val in range(int32_min, int32_max, 200_000_000):
actual = np.mean(samples < val)
expected = (val - int32_min) / (int32_max - int32_min)
self.assertAllClose(actual, expected, atol=1e-3)
with self.subTest("check_same_seed"):
same_samples = jax_func(seed)
self.assertAllClose(samples, same_samples)
with self.subTest("check_diff_seed"):
another_seed = np.array([0, 47], dtype=np.int32)
diff_samples = jax_func(another_seed)
self.assertNotAllClose(samples, diff_samples)
@chex.variants(with_jit=True, without_jit=True)
def test_stateless_multinomial(self):
def raw_func(logits, seed):
return tf.raw_ops.StatelessMultinomial(
logits=logits, num_samples=1000000, seed=seed, output_dtype=tf.int32)
inputs = np.array([np.arange(5), np.arange(5, 0, -1)], dtype=np.float32)
seed = np.array([0, 42], dtype=np.int32)
self._test_convert(raw_func, (inputs, seed), check_shape_only=True)
with self.subTest("check_statistics"):
jax_func = tf2jax.convert_functional(
tf.function(raw_func, jit_compile=True), inputs, seed)
jax_func = self.variant(jax_func)
samples = jax_func(inputs, seed)
for batch in range(inputs.shape[0]):
probs = jax.nn.softmax(inputs[batch])
samps = samples[batch]
for idx in range(inputs.shape[1]):
self.assertAllClose(probs[idx], np.mean(samps == idx), atol=1e-3)
with self.subTest("check_same_seed"):
same_samples = jax_func(inputs, seed)
self.assertAllClose(samples, same_samples)
with self.subTest("check_diff_seed"):
another_seed = np.array([0, 47], dtype=np.int32)
diff_samples = jax_func(inputs, another_seed)
self.assertNotAllClose(samples, diff_samples)
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters(
("slice0", lambda x: x[2:, :, :, :3]),
("slice1", lambda x: x[1:-2, ..., :3:2]),
("slice2", lambda x: x[..., 1:-2, :3:2]),
("slice3", lambda x: x[1:-2, :-2:2, ...]),
("newaxis0", lambda x: x[tf.newaxis, ...]),
("newaxis1", lambda x: x[..., tf.newaxis, 2:5]),
("newaxis2", lambda x: x[:4, tf.newaxis, ..., :2]),
("shrink0", lambda x: x[..., 2, 3]),
("shrink1", lambda x: x[2:, 3, :, 5]),
("mixed0", lambda x: x[..., 4:1:-1, tf.newaxis, 3]),
("zero_slice", lambda x: x[:, :0, ...]),
)
def test_strided_slice(self, slice_fn):
inputs = np.linspace(0., 1., 4 * 5 * 6 * 7).astype(np.float32)
inputs = inputs.reshape((4, 5, 6, 7))
self._test_convert(slice_fn, inputs)
@chex.variants(with_jit=True, without_jit=True)
def test_sum(self):
inputs = np.array(np.reshape(range(120), (5, 4, 3, 2)), dtype=np.int32)
def sum_fn(xs):
return tf.raw_ops.Sum(input=xs, axis=[2, 1])
self._test_convert(sum_fn, inputs)
# Check static inputs result in static outputs.
def sum_static():
return tf.zeros(sum_fn(inputs)[0])
self._test_convert(sum_static, [])
@chex.variants(with_jit=True, without_jit=True)
def test_switch_case(self):
f1 = lambda: tf.constant(17)
f2 = lambda: tf.constant(31)
f3 = lambda: tf.constant(-1)
def switch_fn(x):
return tf.switch_case(
tf.convert_to_tensor(x),
branch_fns={
0: f1,
1: f2,
},
default=f3,
)
self._test_convert(switch_fn, [np.array(1, dtype=np.int32)])
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters(
("defined", (3, 2)),
("partial0", (-1, 2)),
("partial1", (3, -1)),
("undefined", -1)
)
def test_tensor_list_get_item(self, init_shape):
@tf.function(jit_compile=False)
def tensor_list_fn():
handle = tf.raw_ops.TensorListReserve(
element_shape=init_shape, num_elements=3, element_dtype=tf.int32)
return tf.raw_ops.TensorListGetItem(
input_handle=handle,
index=1,
element_shape=(3, 2),
element_dtype=tf.int32)
self._test_convert(tensor_list_fn, [])
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters(
("defined", dict(fn_output_signature=tf.TensorSpec((5, 4), tf.int32))),
("undefined", {})
)
def test_tensor_list(self, output_signature_kwargs):
def tensor_list_fn():
return tf.map_fn(
fn=lambda t: tf.ones((5, 4), dtype=tf.int32) * t,
elems=tf.constant([3, 5, 2]),
**output_signature_kwargs,
)
self._test_convert(tensor_list_fn, [])
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters(
chex.params_product(
(
(
"1D",
np.ones([8], dtype=np.float32),
np.array([[4], [3], [1], [7]], dtype=np.int32),
np.array([9, 10, 11, 12], dtype=np.float32),
),
(
"2D_scalar",
np.ones([8, 2], dtype=np.float32),
np.array([[4, 0], [3, 1], [1, 0], [7, 1]], dtype=np.int32),
np.array([9, 10, 11, 12], dtype=np.float32),
),
(
"2D_slice",
np.ones([8, 2], dtype=np.float32),
np.array([[4], [3], [1], [7]], dtype=np.int32),
np.array([[9, 90], [10, 100], [11, 110], [12, 120]],
dtype=np.float32),
),
(
"3D_scalar",
np.ones([8, 3, 2], dtype=np.float32),
np.array([[4, 0, 0], [3, 1, 1], [1, 2, 0], [7, 0, 1]],
dtype=np.int32),
np.array([9, 10, 11, 12], dtype=np.float32),
),
(
"3D_slice",
np.ones([8, 3, 2], dtype=np.float32),
np.array([[4, 0], [3, 1], [1, 2], [7, 0]], dtype=np.int32),
np.array([[9, 90], [10, 100], [11, 110], [12, 120]],
dtype=np.float32),
),
(
"3D_block",
np.ones([8, 3, 2], dtype=np.float32),
np.array([[4], [3], [1], [7]], dtype=np.int32),
np.array([
[[9, 90], [91, 92], [93, 94]],
[[10, 100], [101, 102], [103, 104]],
[[11, 110], [111, 112], [113, 114]],
[[12, 120], [121, 122], [123, 124]],
],
dtype=np.float32),
),
),
named=True,
))
def test_tensor_scatter_update(self, tensor, indices, updates):
def scatter(x, inds, ups):
return tf.raw_ops.TensorScatterUpdate(tensor=x, indices=inds, updates=ups)
self._test_convert(scatter, [tensor, indices, updates])
@chex.variants(with_jit=True, without_jit=True)
def test_unpack(self):
inputs = np.array([[1, 2], [3, 4], [5, 6]])
def unpack(inputs):
return tf.raw_ops.Unpack(value=inputs, num=len(inputs))
self._test_convert(unpack, inputs)
# Check static inputs result in static outputs.
def unpack_static():
return [tf.zeros(s) for s in unpack(inputs)]
self._test_convert(unpack_static, [])
@chex.variants(with_jit=True, without_jit=True)
@parameterized.parameters("UnsortedSegmentSum", "UnsortedSegmentMax",
"UnsortedSegmentMin", "UnsortedSegmentProd")
def test_unsorted_segment(self, op_name):
def segment_reduce(x, ids):
return getattr(tf.raw_ops, op_name)(
data=x, segment_ids=ids, num_segments=2)
data = np.array([5, 1, 7, 2, 3, 4], np.float32)
segment_ids = np.array([0, 0, 1, 1, 0, 1], np.int32)
self._test_convert(segment_reduce, [data, segment_ids])
data = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [4, 3, 2, 1]], np.float32)
segment_ids = np.array([0, 1, 0], np.int32)
self._test_convert(segment_reduce, [data, segment_ids])
@chex.variants(without_jit=True)
def test_where(self):
inputs = [np.array([True, False])]
def where(cond):
return tf.raw_ops.Where(condition=cond)
self._test_convert(where, inputs)
@chex.variants(with_jit=True, without_jit=True)
def test_while_loop(self):
inputs = np.array(np.reshape(range(24), (4, 3, 2)), dtype=np.float32)
# If offset is a tensor, then the raw versions will fail to capture them as
# inputs. tf.while_loop correctly handles it.
offset = np.array(3.14, dtype=np.float32)
cond = lambda v, i: tf.less(i, 10)
body = lambda v, i: (v + tf.cast(i, tf.float32) + offset, tf.add(i, 1))
step = tf.constant(0)
def while_loop(x):
return tf.while_loop(cond, body, [x, step])
self._test_convert(while_loop, inputs)
if jax.default_backend().lower() == "gpu":
self.skipTest("Skip remaining tests on GPU due to CUDA errors.")
def raw_stateless_while(x):
loop_vars = [x, step]
return tf.raw_ops.StatelessWhile(
input=loop_vars,
cond=tf.function(cond).get_concrete_function(*loop_vars),
body=tf.function(body).get_concrete_function(*loop_vars))
self._test_convert(raw_stateless_while, inputs)
def raw_while(x):
loop_vars = [x, step]
return tf.raw_ops.While(
input=loop_vars,
cond=tf.function(cond).get_concrete_function(*loop_vars),
body=tf.function(body).get_concrete_function(*loop_vars))
self._test_convert(raw_while, inputs)
@chex.variants(with_jit=True, without_jit=True)
def test_while_loop_with_random(self):
inputs = np.array(np.reshape(range(24), (4, 3, 2)), dtype=np.float32)
cond = lambda v, i: tf.less(i, 10)
body = lambda v, i: (v + tf.random.uniform(v.shape), tf.add(i, 1))
step = tf.constant(0)
def while_loop(x):
return tf.while_loop(cond, body, [x, step])
self._test_convert(while_loop, inputs, check_shape_only=True)
@chex.variants(with_jit=True, without_jit=True)
def test_while_loop_side_effect(self):
inputs = np.array([42, 47], dtype=np.float32)
acc_var = tf.Variable(initial_value=[3, 14], dtype=tf.float32)
cond = lambda v, i: tf.less(i, 2)
body = lambda v, i: (acc_var.assign_add(v), tf.add(i, 1))
step = tf.constant(0)
def while_loop(x):
return tf.while_loop(cond, body, [x, step]), [acc_var]
with self.assertRaisesRegex(ValueError, "Some updated parameters are lost"):
self._test_convert(while_loop, inputs, functional=False)
@chex.variants(with_jit=True, without_jit=True)
def test_while_captured_static_args(self):
# This can still fail if output_size is an argument to cond and body.
output_size = tf.constant([10, 5])
cond = lambda v, i: tf.less(i, 10)
body = lambda v, i: (v + tf.ones(output_size, tf.float32), tf.add(i, 1))
step = tf.constant(0)
@tf.function
def while_loop(x):
return tf.while_loop(cond, body, [x, step])
if jax.default_backend().lower() == "tpu":
tpu_context = self.assertRaisesRegex(
tf.errors.InvalidArgumentError,
("Input 0 to node `while/ones` with op Fill must be a compile-time "
"constant."))
else:
tpu_context = contextlib.nullcontext()
with tpu_context:
self._test_convert(while_loop, np.zeros(output_size, np.float32))
@chex.variants(with_jit=True, without_jit=True)
def test_assign_side_effect(self):
inputs = np.array([42, 47], dtype=np.float32)
acc_var = tf.Variable(initial_value=[3, 14], dtype=tf.float32, name="blah")
def tf_fn(x):
unused_y = tf.sin(x)
acc_var.assign_add(tf.cos(x))
return ()
jax_result, jax_params = self._test_convert(tf_fn, inputs, functional=False)
self.assertEqual(jax_result, ())
self.assertAllClose(jax_params["blah"], acc_var)
@chex.variants(with_jit=True, without_jit=True)
def test_top_k(self):
inputs = np.array([range(10), range(10)[::-1]], dtype=np.float32)
k = tf.constant(5, dtype=tf.int32)
def top_k(x):
return tf.raw_ops.TopKV2(input=x, k=k, sorted=True)
self._test_convert(top_k, inputs)
@chex.variants(with_jit=True, without_jit=True)
@parameterized.parameters((2., 2.), (2., 0.), (0., 0.), (0., 2.))
def test_div_no_nan(self, x, y):
@tf.function
def div_no_nan(inputs):
x, y = inputs
return tf.math.divide_no_nan(x=x, y=y)
x = np.array(x)
y = np.array(y)
tf_x = tf.convert_to_tensor(x)
tf_y = tf.convert_to_tensor(y)
with tf.GradientTape() as g:
g.watch(tf_x)
g.watch(tf_y)
output = div_no_nan([tf_x, tf_y])
tf_gradient = g.gradient(output, [tf_x, tf_y])
jax_func = tf2jax.convert_functional(div_no_nan, [x, y])
jax_func = self.variant(jax_func)
jax_gradient = jax.grad(jax_func)([x, y])
with self.subTest("check_forward_pass"):
self.assertAllClose(jax_func([x, y]), np.asarray(output))
with self.subTest("check_backward_pass"):
self.assertAllClose(jax_gradient, np.asarray(tf_gradient))
@chex.variants(with_jit=True, without_jit=True)
def test_angle(self):
inputs = np.array([-2.25 + 4.75j, 3.25 + 5.75j], dtype=np.csingle)
def angle(x):
return tf.raw_ops.Angle(input=x)
self._test_convert(angle, inputs)
@chex.variants(with_jit=True, without_jit=True)
def test_rfft(self):
inputs = np.array([2.25, 3.25] * 2, dtype=np.single)
def rfft(x):
return tf.raw_ops.RFFT(input=x, fft_length=[len(x)])
self._test_convert(rfft, inputs)
@chex.variants(with_jit=True, without_jit=True)
def test_irfft(self):
inputs = np.array([-2.25 + 4.75j, 3.25 + 5.75j] * 2, dtype=np.csingle)
def irfft(x):
return tf.raw_ops.IRFFT(input=x, fft_length=[len(x)])
self._test_convert(irfft, inputs)
@chex.variants(with_jit=True, without_jit=True)
def test_var_handle(self):
def var_handle():
return tf.raw_ops.VarHandleOp(dtype=tf.float32, shape=(3, 5), name="blah")
with self.assertRaisesRegex(
ValueError, "VarHandleOp `blah` cannot be evaluated"
):
self._test_convert(var_handle, [])
@chex.variants(with_jit=True, without_jit=True)
@parameterized.parameters(
"LowerBound",
"UpperBound",
"SearchSorted",
)
def test_lower_upper_bound(self, op_name):
np.random.seed(42)
inputs = (
np.array([[0, 1, 2, 3, 4], [-4, -3, -2, -1, 0]], dtype=np.float32),
np.array(
[[3.5, 0, 1.5, 10, -1], [-3.5, 0, -1.5, -10, 1]], dtype=np.float32)
)
if op_name == "SearchSorted":
tf_func = lambda x, y: tf.searchsorted(x, y, out_type=tf.int32)
else:
# LowerBound and UpperBound expect keyword arguments.
def tf_func(x, y):
return getattr(tf.raw_ops, op_name)(sorted_inputs=x, values=y)
self._test_convert(tf_func, inputs)
if __name__ == "__main__":
tf.test.main()
| tf2jax-main | tf2jax/_src/ops_test.py |
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for JAX -> TF -> JAX with partitioning."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
import haiku as hk
import jax
from jax.experimental import jax2tf
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
from tf2jax._src import test_util
from tf2jax._src import tf2jax
import tree
def _net(x):
y = hk.Conv2D(32, kernel_shape=3, name='A1')(x)
y = hk.Conv2D(32, kernel_shape=3, name='A2')(y)
return y
def _get_param_pspecs():
return {
'A1': {
'b': jax.sharding.PartitionSpec('model'),
'w': jax.sharding.PartitionSpec(None, None, None, 'model'),
},
'A2': {
'b': jax.sharding.PartitionSpec(None),
'w': jax.sharding.PartitionSpec(None, None, 'model', None),
},
}
class ShardingTest(test_util.TestCase):
@parameterized.named_parameters(
chex.params_product(
(('enable_xla', True), ('disable_xla', False)),
(('native_serialization', True), ('graph_serialization', False)),
named=True,
)
)
def test_sharding(self, enable_xla, native_serialization):
if jax.default_backend().upper() != 'TPU':
self.skipTest('Only run sharding tests on TPU.')
if not enable_xla and native_serialization:
self.skipTest(
'native_serializaton is only supported with enable_xla=True.'
)
# Set up network and inputs.
transformed = hk.without_apply_rng(hk.transform(_net))
rng = jax.random.PRNGKey(42)
images = jax.random.normal(rng, [2, 16, 16, 3])
grad_tols = dict(rtol=1e-4)
# Init params.
params = jax.jit(transformed.init)(rng, images)
# Partitioned to 8 devices.
assert jax.device_count() == 8, jax.device_count()
mesh = jax.sharding.Mesh(
np.array(jax.devices()).reshape((2, 4)), ('data', 'model'))
params_pspecs = _get_param_pspecs()
def to_xla_sharding(pspecs):
return jax.tree_map(
lambda x: jax.sharding.NamedSharding(mesh, x), pspecs)
partitioned_apply = jax.jit(
transformed.apply,
in_shardings=to_xla_sharding(
(params_pspecs, jax.sharding.PartitionSpec('data'))
),
)
self.assertAllClose(
jax.jit(transformed.apply)(params, images),
partitioned_apply(params, images),
)
# Check gradients.
@jax.grad
def unpartitioned_grad(params, xs):
return jnp.sum(jax.jit(transformed.apply)(params, xs))
@jax.grad
def partitioned_grad(params, xs):
return jnp.sum(partitioned_apply(params, xs))
self.assertAllClose(
unpartitioned_grad(params, images),
partitioned_grad(params, images),
**grad_tols,
)
# Convert to TF and save.
@tf.function(autograph=False, jit_compile=True)
def tf_fn(params, inputs):
return jax2tf.convert(
partitioned_apply,
enable_xla=enable_xla,
native_serialization=native_serialization,
)(params, inputs)
tf_fn(params, images)
module = tf.Module()
module.f = tf_fn
export_dir = self.create_tempdir().full_path
tf.saved_model.save(module, export_dir)
# Load and tf2jax
reloaded = tf.saved_model.load(export_dir)
jax_fn = tf2jax.convert_functional(
tf.function(reloaded.f, autograph=False),
params=tree.map_structure(np.zeros_like, params),
inputs=np.zeros_like(images),
)
self.assertAllClose(
transformed.apply(params, images), jax_fn(params, images)
)
self.assertAllClose(
jax.jit(transformed.apply)(params, images),
jax.jit(jax_fn)(params, images),
)
# Check gradients.
@jax.grad
def reloaded_grad(params, xs):
return jnp.sum(jax.jit(jax_fn)(params, xs))
self.assertAllClose(
jax.jit(unpartitioned_grad)(params, images),
jax.jit(reloaded_grad)(params, images),
**grad_tols,
)
# Check shardings.
unpartitioned_output = jax.jit(transformed.apply)(params, images)
self.assertLen(unpartitioned_output.devices(), 1)
partitioned_output = partitioned_apply(params, images)
self.assertLen(partitioned_output.devices(), 8)
reloaded_output = jax.jit(jax_fn)(params, images)
self.assertLen(reloaded_output.devices(), 8)
self.assertTrue(
partitioned_output.sharding.is_equivalent_to(
reloaded_output.sharding, 4
)
)
if __name__ == '__main__':
absltest.main()
| tf2jax-main | tf2jax/_src/sharding_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental functions for converting TF graphs to Jax functions."""
import collections
import functools
import inspect
import itertools
import json
from typing import Any, Callable, Iterable, Iterator, Optional, Mapping, NamedTuple, Sequence, Tuple, Union
from absl import logging
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
from tf2jax._src import config
from tf2jax._src import ops
from tf2jax._src import utils
import tree
# Import usage logging here.
from tensorflow.python.framework import op_def_registry # pylint: disable=no-name-in-module
from tensorflow.python.framework import ops as tf_ops # pylint: disable=no-name-in-module
try:
import tf2jax.experimental.ops # pylint: disable=g-import-not-at-top,unused-import
except ImportError:
logging.info(
"Proceeding without support for experimental ops, e.g. XlaCallModule.")
ArrayLike = ops.ArrayLike
SpecTree = Union[tf.TensorSpec, Iterable["SpecTree"], Mapping[str, "SpecTree"]]
safe_zip = jax.util.safe_zip
class AnnotatedFunction:
"""Callable function with additional metadata.
The metadata includes: structured inputs and outputs (composed of
tf.TensorSpec) and the original function signature.
"""
def __init__(
self,
# TODO(b/235830619) Add more specific type annotations.
fn: Callable[..., Any],
structured_inputs: SpecTree,
structured_outputs: SpecTree,
signature: inspect.Signature,
):
self.fn = fn
self.structured_inputs = structured_inputs
self.structured_outputs = structured_outputs
# Attributes that might be expected by downstream usage.
self.__doc__ = fn.__doc__
self.__name__ = fn.__name__
self.__signature__ = signature
# This should not be called directly.
def __call__(self, *args, **kwargs):
return self.fn(*args, **kwargs)
@property
def signature(self) -> inspect.Signature:
return self.__signature__
_EMPTY_RETURN_OP_NAME = ops._EMPTY_RETURN_OP_NAME # pylint: disable=protected-access
_EMPTY_RETURN_VALUE = ops._EMPTY_RETURN_VALUE # pylint: disable=protected-access
_UNUSED_INPUT = object()
_XLA_CALL_MODULE = "XlaCallModule"
def _fix_jax_poly_shape(shape: Tuple[Any, ...]) -> Tuple[Any, ...]:
good_shape = []
for dim in shape:
try:
# This catches _DimPolynomial from jax2tf.
tf.compat.v1.Dimension(dim)
good_shape.append(dim)
except TypeError:
good_shape.append(None)
return tuple(good_shape)
class _TensorEdge(NamedTuple):
"""Represents an input/output Tensor."""
op_name: str
idx: int = 0
is_control: bool = False
@classmethod
def from_string(
cls,
op_str: str,
node_map: Optional[Mapping[str, Any]] = None,
):
"""Parse a NodeDef input string."""
node_map = node_map or {}
is_control = op_str.startswith("^")
if is_control:
op_str = op_str[1:]
args = op_str.split(":")
if len(args) == 1:
op_name, = args
output_name = ""
idx = 0
elif len(args) == 2:
op_name, idx = args
output_name = ""
idx = int(idx)
elif len(args) == 3:
op_name, output_name, idx = args
op_def = op_def_registry.get(node_map[op_name].op)
assert op_def.output_arg
if len(op_def.output_arg) == 1:
idx = int(idx)
else:
found_sequence_args = any([
arg.number_attr or arg.type_list_attr for arg in op_def.output_arg
])
if idx != "0" or found_sequence_args:
raise ValueError(
"Only zero index and single tensors are supported for multiple "
"output args.\n"
f"op_str={op_str}\n"
f"op_def={op_def}")
idx = [arg.name for arg in op_def.output_arg].index(output_name)
else:
raise ValueError(f"Invalid input spec string: `{op_str}`")
return cls(op_name=op_name, idx=idx, is_control=is_control)
_TF_ASSIGN_OPS = (
"AssignAddVariableOp", "AssignSubVariableOp", "AssignVariableOp"
)
_TF_RANDOM_OPS = ("RandomStandardNormal", "RandomUniform", "RandomUniformInt")
class _LibraryFunction(NamedTuple):
"""A library function."""
fn: Callable[..., Any]
require_rng: bool
# Optional fields (mainly) used by gradient functions.
params: Optional[Mapping[str, ArrayLike]] = None
input_specs: Optional[Tuple[tf.TensorSpec, ...]] = None
output_specs: Optional[Tuple[tf.TensorSpec, ...]] = None
# If fn is a gradient function, this is the output specs for the original fn.
orig_fn_output_specs: Optional[Tuple[tf.TensorSpec, ...]] = None
# Whether an output is unmodified input to the function.
output_is_input: Optional[Tuple[bool]] = None
# Inputs corresponding to VarHandleOp
variable_input_specs: Optional[Tuple[tf.TensorSpec, ...]] = None
def __call__(self, *args, **kwargs):
if self.params:
return self.fn(self.params, *args, **kwargs)
else:
# Ignore parameters in inputs and outputs.
return self.fn({}, *args, **kwargs)[0]
def _unbox_named_args(
named_args: Sequence[Tuple[str, jnp.ndarray]],
inputs: Tuple[_TensorEdge, ...],
) -> Tuple[jnp.ndarray, ...]:
"""Extract inputs from named arguments."""
if len(named_args) != len(inputs):
raise ValueError(
f"Expected {len(inputs)} arguments, received {len(named_args)}")
for (arg_name, _), inp in safe_zip(named_args, inputs):
if arg_name != inp.op_name:
raise ValueError(f"Expected inputs {inp.op_name}, found {arg_name}")
unboxed_args = [
arg[inp.idx] if isinstance(arg, (list, tuple)) else arg
for arg, inp in safe_zip([x for _, x in named_args], inputs)
]
return tuple(unboxed_args)
class _OpNode:
"""Represents an Op."""
def __init__(self, proto, library: Mapping[str, _LibraryFunction],
node_map: Mapping[str, Any]):
self.jax_func = ops.get_parser(proto.op)(proto)
self.op = proto.op
self.name = proto.name
inputs = [_TensorEdge.from_string(inp, node_map) for inp in proto.input]
self.inner_fns = dict()
if isinstance(self.jax_func, ops._HigherOrderFunction):
self.inner_fns = self.jax_func.get_inner_functions(library)
for input_name in self.jax_func.get_additional_inputs(**self.inner_fns):
inputs.append(_TensorEdge.from_string(input_name, node_map))
self.control_inputs = tuple([inp for inp in inputs if inp.is_control])
self.inputs = tuple([inp for inp in inputs if not inp.is_control])
@property
def all_inputs(self) -> Tuple[_TensorEdge, ...]:
return self.inputs + self.control_inputs
@property
def require_rng(self) -> bool:
inner_require_rngs = any([fn.require_rng for fn in self.inner_fns.values()])
return self.op in _TF_RANDOM_OPS or inner_require_rngs
def __call__(
self,
named_args: Sequence[Tuple[str, jnp.ndarray]],
*,
rng: jnp.ndarray,
) -> Tuple[Tuple[jnp.ndarray, ...], Mapping[str, jnp.ndarray]]:
unboxed_args = _unbox_named_args(named_args, self.inputs)
extras_dict = dict(rng=rng) if self.require_rng else dict()
extras_dict.update(self.inner_fns)
outputs = self.jax_func(*unboxed_args, **extras_dict)
# Return updated variables.
if self.op in _TF_ASSIGN_OPS:
# This assumes the assign op returns the updated value.
updated_params = {named_args[0][0]: outputs}
else:
updated_params = {}
return outputs, updated_params
def __repr__(self) -> str:
message = f"_OpNode(name={repr(self.name)}, op={repr(self.op)}"
if self.inputs:
message += f", inputs={repr([x.op_name for x in self.inputs])}"
if self.control_inputs:
message += f", controls={repr([x.op_name for x in self.control_inputs])}"
message += ")"
return message
def _parse_input(op_str: str) -> str:
# Parses an input name in tf.NodeDef. This extracts the node name, removes
# the control character and the output tensor index e.g. ^Sin:0 -> Sin
return op_str.split(":")[0].split("^")[-1]
def _toposort(
nodes: Mapping[str, tf.compat.v1.NodeDef],
end_node_names: Tuple[str, ...],
):
"""Topological sorting of nodes."""
child_counts = {}
stack = list(end_node_names)
while stack:
node_name = stack.pop()
if node_name in child_counts:
child_counts[node_name] += 1
else:
child_counts[node_name] = 1
node = nodes[node_name]
stack.extend([_parse_input(v) for v in node.input])
for node_name in end_node_names:
child_counts[node_name] -= 1
sorted_nodes = []
childless_nodes = [
node_name for node_name in end_node_names if child_counts[node_name] == 0
]
if not childless_nodes:
raise ValueError("No childless nodes found.")
while childless_nodes:
node_name = childless_nodes.pop()
node = nodes[node_name]
sorted_nodes.append(node)
for parent in [_parse_input(v) for v in node.input]:
if child_counts[parent] == 1:
childless_nodes.append(parent)
else:
child_counts[parent] -= 1
return sorted_nodes[::-1]
class Variable(np.ndarray):
"""Array subclass with additional metadaa for representing variables."""
def __new__(cls, arr: np.ndarray, trainable: bool, name: str):
obj = np.asarray(arr).view(cls)
obj.trainable = trainable
obj.name = name
return obj
def assign(self, arr: np.ndarray) -> "Variable":
return Variable(arr, trainable=self.trainable, name=self.name)
def __array_finalize__(self, obj):
if obj is None:
return
self.trainable = getattr(obj, "trainable", None)
self.name = getattr(obj, "name", None)
def __repr__(self) -> str:
message = f"Variable(name={repr(self.name)}, "
message += f"trainable={repr(self.trainable)}, "
message += f"numpy={np.array_repr(self)}"
message += ")"
return message
def _make_parameterless(
jax_func: AnnotatedFunction,
jax_params: Mapping[str, Variable],
) -> AnnotatedFunction:
"""Return an AnnotatedFunction that neither expects nor returns parameters."""
def assert_empty_params(params):
if params:
raise ValueError(
f"Expected function to have no captured variables, found {params}")
def parameterless_fn(*args, **kwargs):
results, params = jax_func.fn({}, *args, **kwargs)
assert_empty_params(params)
return results
assert_empty_params(jax_params)
return AnnotatedFunction(
fn=parameterless_fn,
structured_inputs=jax_func.structured_inputs,
structured_outputs=jax_func.structured_outputs,
signature=jax_func.signature,
)
def convert_functional(tf_func: Any, *args, **kwargs) -> AnnotatedFunction:
return _make_parameterless(*convert(tf_func, *args, **kwargs))
def convert_functional_from_restored(tf_func: Any) -> AnnotatedFunction:
return _make_parameterless(*convert_from_restored(tf_func))
def convert_from_restored(
tf_func) -> Tuple[AnnotatedFunction, Mapping[str, Variable]]:
"""Converts a RestoredFunction (from a SaveModel) if it is unambiguous."""
if tf_func.input_signature is None:
concrete_functions = getattr(tf_func, "concrete_functions", ())
if len(concrete_functions) != 1:
err_message = (
f"Found {len(concrete_functions)} concrete functions, use "
"tf2jax.convert or tf2jax.convert_functional with *args and **kwargs "
"to select one of the options above.")
saved_model_err_message = ""
try:
# Deliberately trigger pretty error message from SavedModel.
tf_func(None)
except ValueError as e:
saved_model_err_message = str(e)
raise ValueError(saved_model_err_message + "\n\n" + err_message)
args, kwargs = concrete_functions[0].structured_input_signature
else:
args = tf_func.input_signature
kwargs = {}
return convert(tf_func, *args, **kwargs)
def _is_tensorspec_like(v: Any) -> bool:
return (isinstance(v, tf.TensorSpec) or (
type(v).__module__.endswith("tensorflow.python.framework.tensor_spec") and
type(v).__name__ == "VariableSpec"))
def _fix_tfhub_specs(
structured_specs,
flat_tensors,
expected_count: Optional[int] = None,
):
"""Fix names used by TF-Hub TensorSpecs."""
flat_specs = tree.flatten(structured_specs)
tensor_count = 0
for idx, val in enumerate(flat_specs):
if _is_tensorspec_like(val):
flat_specs[idx] = tf.TensorSpec(
val.shape, val.dtype, name=flat_tensors[tensor_count].op.name)
tensor_count += 1
if expected_count is not None:
assert tensor_count == expected_count
structured_specs = tree.unflatten_as(structured_specs, flat_specs)
return structured_specs
def _maybe_tracer_to_tf_spec(val: Any) -> Any:
if isinstance(val, jax.core.Tracer):
return tf.TensorSpec(shape=val.shape, dtype=val.dtype)
else:
return val
def convert(
tf_func: Any, # tensorflow.python.eager.function is not visible.
*args,
**kwargs,
) -> Tuple[AnnotatedFunction, Mapping[str, Variable]]:
"""Convert a tf.Function to a Jax function for some concrete inputs.
The concrete inputs are used to instantiate a tf.ConcreteFunction, the
graphdef of which is then parsed and used to generate the corresponding Jax
code.
Args:
tf_func: a tf.Function
*args: positional arguments.
**kwargs: keyword arguments.
Returns:
A tuple: the first is a Jax functions that takes a flat parameter dict and a
variable number of arrays as inputs, and returns a nested structure of
outputs and an updated parameter dict; the second is the flat parameter dict
correpondings to all TF variables used by the concrete function.
"""
# Log usage here.
args, kwargs = tree.map_structure(_maybe_tracer_to_tf_spec, (args, kwargs))
try:
concrete_func = tf_func.get_concrete_function(*args, **kwargs)
except AttributeError:
logging.error("Expected `tf.Function`, received %s", tf_func)
raise
graph = concrete_func.graph
graphdef = graph.as_graph_def()
num_flat_args = len(concrete_func.inputs) - len(concrete_func.captured_inputs)
captures = list(
safe_zip(
[inp.op.name for inp in concrete_func.inputs[num_flat_args:]],
[inp for inp in concrete_func.captured_inputs],
)
)
func_variables = {v.handle.ref(): v for v in concrete_func.variables}
variable_map = {
k: func_variables[v.ref()] for k, v in captures if v.dtype == tf.resource
}
constants = {k: v.numpy() for k, v in captures if v.dtype != tf.resource}
captured_input_names = tuple([
v.op.name for v in concrete_func.inputs[num_flat_args:]
])
def maybe_tensor_to_spec(v):
if v is None or isinstance(v, tf.TensorSpec):
return v
else:
return tf.TensorSpec.from_tensor(v)
num_inputs = len(concrete_func.inputs) - len(concrete_func.captured_inputs)
structured_inputs = concrete_func.structured_input_signature
structured_inputs = _fix_tfhub_specs(structured_inputs, concrete_func.inputs,
num_inputs)
flat_outputs = tree.flatten(concrete_func.outputs)
structured_outputs = tree.map_structure(maybe_tensor_to_spec,
concrete_func.structured_outputs)
# We do not check the number of output tensors because they do not match for
# custom gradients functions.
structured_outputs = _fix_tfhub_specs(structured_outputs, flat_outputs)
try:
fullargspec = tf_func.function_spec.fullargspec
except AttributeError:
logging.warning("No fullargspec found on %s.", tf_func)
fullargspec = None
if fullargspec is not None:
signature = utils.fullargspec_to_signature(fullargspec)
else:
exp_args, exp_kwargs = structured_inputs
if exp_args:
raise ValueError("If function_spec is None then only keyword arguments "
f"are expectd, found args={exp_args} in structure.")
parameters = tuple([
# TODO(b/266552275) Remove temporary fix for TF-Hub.
inspect.Parameter(
k.replace("$", "___"), kind=inspect.Parameter.KEYWORD_ONLY)
for k in tree.flatten(exp_kwargs.keys())
])
signature = inspect.Signature(parameters=parameters)
# Extract custom_gradient functions from the registry.
if config.get_config("convert_custom_gradient"):
library = _convert_all_gradient_functions(graph, {})
else:
library = {}
jax_func, jax_params = _convert(
graphdef,
signature=signature,
structured_inputs=structured_inputs,
structured_outputs=structured_outputs,
captured_input_names=captured_input_names,
variable_map=variable_map,
constants=constants,
library=library,
)
annotated_fn = AnnotatedFunction(
fn=jax_func,
structured_inputs=structured_inputs,
structured_outputs=structured_outputs,
signature=signature,
)
return annotated_fn, jax_params
class _EvaluationCache:
"""Cache holding intermediate outputs."""
def __init__(
self,
nodes: Sequence[Any],
inputs: Sequence[Tuple[str, Any]],
outputs: Sequence[_TensorEdge],
):
self.outputs = dict(inputs)
# Reference counting.
self._counts = collections.Counter()
for node in nodes:
for inp in node.inputs + node.control_inputs:
self._counts[inp.op_name] += 1
for inp_op_name, _ in inputs:
self._counts[inp_op_name] += 1
for out in outputs:
self._counts[out.op_name] += 1
def free_inputs(self, node):
for inp in node.inputs + node.control_inputs:
self._counts[inp.op_name] -= 1
# Free up buffers.
if not self._counts[inp.op_name]:
del self.outputs[inp.op_name]
def _unique_everseen(seq):
seen = set()
return [x for x in seq if not (x in seen or seen.add(x))]
class _Subgraph(NamedTuple):
"""A subgraph of computations with a custom_gradient."""
subgraph: Tuple[_OpNode, ...]
captures: Tuple[_TensorEdge, ...]
outputs: Tuple[_TensorEdge, ...]
output_node: _OpNode
grad_fn: _LibraryFunction
@property
def name(self) -> str:
return self.output_node.name # pytype: disable=attribute-error
@property
def inputs(self) -> Tuple[_TensorEdge, ...]:
return self.function_inputs + self.captures
@property
def function_inputs(self) -> Tuple[_TensorEdge, ...]:
"""Inputs for the custom_gradient wrapped function."""
return tuple(self.output_node.inputs[len(self.outputs):]) # pytype: disable=attribute-error
@property
def unique_inputs(self) -> Tuple[_TensorEdge, ...]:
unique_captures = tuple([
x for x in _unique_everseen(self.captures)
if x not in set(self.function_inputs)
])
return self.function_inputs + unique_captures
@property
def control_inputs(self) -> Tuple[_TensorEdge, ...]:
return ()
@property
def require_rng(self) -> bool:
return any([n.require_rng for n in self.subgraph])
def __call__(
self,
named_args: Sequence[Tuple[str, jnp.ndarray]],
*,
rng: jnp.ndarray,
) -> Tuple[Tuple[jnp.ndarray, ...], Mapping[str, jnp.ndarray]]:
grad_inputs = tuple([_TensorEdge(v.name) for v in self.grad_fn.input_specs])
@jax.custom_gradient
def fn(*args):
eval_cache = _EvaluationCache(
self.subgraph, named_args, self.output_node.inputs + grad_inputs)
assert len(args) == len(self.unique_inputs)
for inp, val in safe_zip(self.unique_inputs, args):
if isinstance(eval_cache.outputs[inp.op_name], list):
eval_cache.outputs[inp.op_name][inp.idx] = val
elif isinstance(eval_cache.outputs[inp.op_name], tuple):
old_outputs = eval_cache.outputs[inp.op_name]
eval_cache.outputs[inp.op_name] = (
old_outputs[:inp.idx] + (val,) + old_outputs[inp.idx + 1:])
else:
eval_cache.outputs[inp.op_name] = val
num_rng_required = sum([node.require_rng for node in self.subgraph])
if num_rng_required:
rng_keys = list(jax.random.split(rng, num_rng_required))
else:
rng_keys = []
for node in self.subgraph + (self.output_node,):
collected_inputs = [
(v.op_name, eval_cache.outputs[v.op_name]) for v in node.inputs
]
sub_rng = rng_keys.pop() if node.require_rng else None
eval_cache.outputs[node.name], updated_params = node(
collected_inputs, rng=sub_rng)
if updated_params:
raise ValueError(
"Variable assignments not supported in custom_gradient subgraph, "
f"found {tuple(updated_params.keys())}.")
eval_cache.free_inputs(node)
# dy is the gradients for fn(*args) + args, i.e. inputs to IdentityN
def grad_fn(dy):
assert len(dy) == len(self.output_node.inputs)
captured_inputs = tuple(grad_inputs[len(dy):])
named_captures = [
(v.op_name, eval_cache.outputs[v.op_name]) for v in captured_inputs
]
captures = _unbox_named_args(named_captures, captured_inputs)
grad_args = dy + captures
dx = self.grad_fn(*grad_args)
assert len(dx) == len(self.unique_inputs)
return dx
return eval_cache.outputs[self.output_node.name], grad_fn
args_map = dict(named_args)
fn_named_args = [
(v.op_name, args_map[v.op_name]) for v in self.unique_inputs
]
unboxed_args = _unbox_named_args(fn_named_args, self.unique_inputs)
outputs = fn(*unboxed_args)
return outputs, {}
def rewrite(
self,
nodes: Sequence[_OpNode]) -> Tuple[Union["_Subgraph", _OpNode], ...]:
"""Remove subgraph from the main graph."""
new_nodes = []
processed = []
subgraph = self.subgraph + (self.output_node,)
subgraph_map = {v.name: v for v in subgraph}
for node in nodes:
if node.name in subgraph_map:
processed.append(node)
if node == self.output_node:
new_nodes.append(self)
else:
# Rewire control inputs to point to the subgraph.
new_control_inputs = tuple(
v._replace(op_name=self.name) if v.op_name in subgraph_map else v
for v in node.control_inputs
)
node.control_inputs = new_control_inputs
new_nodes.append(node)
assert len(processed) == len(subgraph), (len(processed), len(subgraph))
return tuple(new_nodes)
def _contains_custom_gradient(node: tf.compat.v1.NodeDef) -> bool:
# IdentityN are used to override gradients.
return node.op == "IdentityN"
def _get_consumers_and_producers_fns(nodes):
"""Returns functions to get consumers and producers for a node."""
op_map = {n.name: (idx, n) for idx, n in enumerate(nodes)}
# Maps node name to immediate consumers.
consumers_map = {n.name: [] for n in nodes}
for node in nodes:
for inp in node.inputs:
consumers_map[inp.op_name].append(node.name)
# Get all consumers for a node.
@functools.lru_cache(None)
def get_consumers(node_name: str) -> list[str]:
if not consumers_map[node_name]:
return [node_name]
else:
return sum([get_consumers(n) for n in consumers_map[node_name]], [])
# Get all producers for a node.
@functools.lru_cache(None)
def get_producers(node_name: str):
_, node = op_map[node_name]
if node.inputs:
return set.union(
set([node_name]), *[get_producers(x.op_name) for x in node.inputs]
)
else:
return set([node_name])
return get_consumers, get_producers
# Extract subgraphs for custom_gradient.
def _extract_subgraphs(graphdef, nodes, library):
"""Extract all subgraphs with their own custom_gradients."""
op_map = {n.name: (idx, n) for idx, n in enumerate(nodes)}
if _EMPTY_RETURN_OP_NAME in op_map:
logging.info("Skip subgraph extraction for function with no return values.")
return {}
get_consumers, get_producers = _get_consumers_and_producers_fns(nodes)
subgraphs = {}
for node in graphdef.node:
if _contains_custom_gradient(node):
grad_fn_name = str(node.attr["_gradient_op_type"].s, "utf-8")
grad_fn = library[grad_fn_name]
output_node = op_map[node.name][1]
assert len(node.input) == len(output_node.inputs)
# Inputs to the gradient function are fn(*args) + args + captured_args
num_outputs = len(grad_fn.orig_fn_output_specs)
all_specs = [_TensorEdge(x.name) for x in grad_fn.input_specs]
outputs = list(output_node.inputs[:num_outputs])
inputs = list(output_node.inputs[num_outputs:len(node.input)])
captured_inputs = all_specs[len(node.input):]
# Traverse and extract subgraph.
subgraph = set([x.op_name for x in inputs + captured_inputs])
unexpanded = [x.op_name for x in outputs]
while unexpanded:
top_node, *unexpanded = unexpanded
if top_node not in subgraph:
subgraph.add(top_node)
unvisited = [x.op_name for x in op_map[top_node][1].inputs]
unexpanded += unvisited
# Separate internal and external captures. Internal captures are found in
# the subgraph. External captures are found in outer graph.
unused_captures = []
internal_captures = []
external_captures = []
used_inputs = set(
sum([op_map[n][1].inputs for n in subgraph], ()) + output_node.inputs)
for inp in captured_inputs:
is_internal = all(
[x.op_name in subgraph for x in op_map[inp.op_name][1].inputs])
if inp not in used_inputs:
unused_captures.append(inp)
elif is_internal:
internal_captures.append(inp)
else:
external_captures.append(inp)
# Find side-effects, i.e. nodes that depends on the subgraph but do not
# feed into the subgraph outputs, e.g. shape check asserts from jax2tf.
side_effects = []
subgraph_and_output = subgraph | set([output_node.name])
for node in nodes:
if node.name not in subgraph_and_output:
if any(inp.op_name in subgraph for inp in node.inputs):
side_effects.append(set(get_consumers(node.name)))
# Gather all dependencies of side-effects, assume they are not depended on
# by other nodes outside of the subgraph + side-effects. This may only
# work for shape check assert added by jax2tf.
side_effects = set.union(set(), *side_effects)
side_effect_deps = set()
for x in set.union(set(), *[get_producers(x) for x in side_effects]):
if op_map[x][1].op != "Placeholder":
side_effect_deps.add(x)
# Merge side-effects and dependencies into the subgraph.
subgraph = subgraph | side_effects | side_effect_deps
output_node.control_inputs = output_node.control_inputs + tuple(
_TensorEdge(op_name=x, is_control=True) for x in side_effects
)
excluded = inputs + unused_captures + external_captures
subgraph = subgraph.difference(set([x.op_name for x in excluded]))
sub_nodes = [op_map[x] for x in subgraph]
sub_nodes = [x for _, x in sorted(sub_nodes)]
subgraphs[grad_fn_name] = _Subgraph(
subgraph=tuple(sub_nodes),
captures=tuple(external_captures),
outputs=tuple(outputs),
output_node=output_node,
grad_fn=grad_fn,
)
num_nodes = sum([len(g.subgraph) for g in subgraphs.values()])
num_unique_nodes = len(
set(sum([[x.name for x in g.subgraph] for g in subgraphs.values()], [])))
if num_nodes != num_unique_nodes:
raise ValueError("Overlapping subgraphs are not yet supported.")
return subgraphs
def _infer_relu_from_jax2tf(nodes):
"""Detect max(x, 0) and replace with jax.nn.relu."""
found_jax2tf = any(["jax2tf_out" in n.name for n in nodes])
node_map = {n.name: n for n in nodes}
for node in nodes:
if node.op == "Maximum" and "jax2tf" in node.name and "_relu_" in node.name:
cast_or_const_arg = node_map[node.inputs[1].op_name]
if cast_or_const_arg.op == "Cast":
const_arg = node_map[cast_or_const_arg.inputs[0].op_name]
else:
const_arg = cast_or_const_arg
if const_arg.op == "Const" and const_arg((), rng=None)[0].tolist() == 0:
# Replace the Maximum op with a Relu op, but keep the node name.
# The Cast and Const ops may now be redundant but are kept anyway.
node.op = "Relu"
node.inputs = node.inputs[:1]
node.jax_func = (
ops.get_parser("Relu")(_NodeDef("Relu", node.name, (), {})))
if not found_jax2tf:
logging.warning("Replaced max(x, 0) with jax.nn.relu but did not "
"find jax2tf_out.")
_FunctionDef = Any
def _get_function_protos(
graphdef: tf.compat.v1.GraphDef,) -> Iterator[Tuple[str, _FunctionDef]]:
"""Get all library function protos from a tf.GraphDef."""
func_protos = {
func.signature.name: func for func in graphdef.library.function
}
processed = set()
def process_func(func) -> Iterator[Tuple[str, _FunctionDef]]:
for node in func.node_def:
yield from process_node(node)
def process_node(node) -> Iterator[Tuple[str, _FunctionDef]]:
for attr in node.attr.values():
for func_name in [attr.func.name] + [f.name for f in attr.list.func]:
if func_name and func_name not in processed:
yield from process_func(func_protos[func_name])
yield func_name, func_protos[func_name]
processed.add(func_name)
for node in graphdef.node:
yield from process_node(node)
# Partially mimics tf.compat.v1.NodeDef
class _NodeDef(NamedTuple):
op: str
name: str
input: Tuple[str, ...]
attr: Optional[Mapping[str, tf.compat.v1.AttrValue]] = None
class _GraphDef(NamedTuple):
node: Tuple[Union[tf.compat.v1.NodeDef, _NodeDef], ...]
def _validate_inputs(
signature: inspect.Signature,
structured_specs: Any,
input_map: Mapping[str, Any],
) -> str:
"""Validate inputs against input specs."""
class _ExpectedArg:
def __init__(self, path: Tuple[Any, ...], spec: Any):
self.path = path
self.spec = spec
expected_args = [
_ExpectedArg(p, v) for p, v in tree.flatten_with_path(structured_specs)
]
expected_tree = tree.unflatten_as(structured_specs, expected_args)
def format_spec(spec: tf.TensorSpec) -> str:
return "{}[{}]".format(spec.dtype.name, ",".join(map(str, spec.shape)))
def check_arg(arg: _ExpectedArg):
arg_desc = "UNUSED" if arg.spec is _UNUSED_INPUT else format_spec(arg.spec)
return "." if arg.path in input_map else arg_desc
checked_args, checked_kwargs = tree.map_structure(check_arg, expected_tree)
bound_checked_args = signature.bind(*checked_args, **checked_kwargs)
# TODO(b/282901848) Use a better pretty printer than json which does not
# render namedtuple correctly.
return json.dumps(bound_checked_args.arguments, indent=4)
class MissingInputError(Exception):
...
def _convert(
graphdef: Union[tf.compat.v1.GraphDef, _GraphDef],
signature: inspect.Signature,
structured_inputs,
structured_outputs,
captured_input_names: Optional[Tuple[str, ...]] = None,
variable_map: Optional[Mapping[str, tf.Variable]] = None,
constants: Optional[Mapping[str, jnp.ndarray]] = None,
library: Optional[Mapping[str, _LibraryFunction]] = None,
) -> Tuple[Callable[..., Any], Mapping[str, Variable]]:
"""Convert a GraphDef to a Jax function.
Args:
graphdef: a tf.GraphDef or GraphDef like object.
signature: An inspect.Signature representing a call signature.
structured_inputs: Structured input spec for the function, follows the
format of inspect.BoundArguments, i.e. a tuple of a list of position args
and a dict of keyword-only args.
structured_outputs: Structured output spec for the function.
captured_input_names: Names of other input tensors that are not part of
`structured_inputs`, e.g. variables captured by tf.Function.
variable_map: A mapping from tensor names to tf.Variables. The keys are a
subset of captured_input_names.
constants: A mapping from tensor names to constant values. The keys are a
subset of captured_input_names.
library: A mapping from function names to Callable. This is non-empty on
recurisve calls if the FunctionDefLibrary in the GraphDef is non-empty.
Returns:
A tuple: the first is a Jax functions that takes a flat parameter dict and a
variable number of arrays as inputs, and returns a nested structure of
outputs and an updated parameter dict; the second is the flat parameter dict
correpondings to all TF variables used by the graph.
"""
# Recursively convert function protos in FunctionDefLibrary.
library = dict((library or {}).items())
if hasattr(graphdef, "library"):
if graphdef.library.gradient:
raise ValueError("GradientDef not currently supported, found "
f"{graphdef.library.gradient}")
for func_name, func_proto in _get_function_protos(graphdef):
if func_name not in library:
logging.info("Converting library function %s", func_name)
library[func_name] = _convert_library_function(func_proto, library)
captured_input_names = captured_input_names or ()
variable_map = variable_map or {}
constants = constants or {}
# Canonicalise inputs and outputs.
input_path_to_specs = [(p, v)
for p, v in tree.flatten_with_path(structured_inputs)
if v is not _UNUSED_INPUT]
# TODO(b/266552275) Remove temporary fix for TF-Hub.
replace_fn = (lambda k: k.replace("$", "___") if isinstance(k, str) else k)
input_path_to_specs = [
(tree.map_structure(replace_fn, p), v) for p, v in input_path_to_specs
]
# Extract input and output tensor names.
input_names = []
static_arg_num = 0
for (_, spec) in input_path_to_specs:
if isinstance(spec, tf.TensorSpec):
input_names.append(spec.name)
else:
input_names.append(f"__static_arg_{static_arg_num}")
static_arg_num += 1
input_names = tuple(input_names)
assert len(input_names) == len(set(input_names))
flat_output_specs = tree.flatten(structured_outputs)
output_names = tuple(
v.name for v in flat_output_specs if isinstance(v, tf.TensorSpec)
)
unsupported = ops.get_unsupported_operations(
[node.op for node in graphdef.node])
if unsupported:
err_message = f"Unsupported operations in graph: {list(unsupported)}"
if _XLA_CALL_MODULE in unsupported:
err_message += "\n"
err_message += (
f"`{_XLA_CALL_MODULE}` is generated by jax2tf.convert with native "
"serialization, this is partially supported in tf2jax (check for "
"import error if you see this message)."
)
err_message += "\n"
err_message += (
"Support for additional TensorFlow ops are added on an as-needed "
"basis, please contact the library owner(s)."
)
raise ValueError(err_message)
# Extract variables.
if tf.executing_eagerly():
# Uniqueify variables with identical names.
variables_tf = {}
var_name_by_ref = {}
for v in variable_map.values():
name = _parse_input(v.name)
suffix = 1
var_name = name
while var_name in variables_tf:
var_name = f"{name}_{suffix}"
suffix += 1
variables_tf[var_name] = v
var_name_by_ref[v.ref()] = var_name
variables = {
k: Variable(v.numpy(), v.trainable, v.name)
for k, v in variables_tf.items()
}
else:
variables_tf = {_parse_input(v.name): v for _, v in variable_map.items()}
var_name_by_ref = {
v.ref(): _parse_input(v.name) for v in variable_map.values()
}
if variables_tf:
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.initialize_variables(variables_tf.values()))
variables_np = sess.run(variables_tf)
variables = tree.map_structure(
lambda var, arr: Variable(arr, var.trainable, var.name),
variables_tf, variables_np)
else:
variables = {}
assert len(variable_map) == len(variables_tf)
assert len(variable_map) == len(var_name_by_ref)
assert len(variable_map) == len(variables)
var_by_node = {k: var_name_by_ref[v.ref()] for k, v in variable_map.items()}
node_by_var = {v: k for k, v in var_by_node.items()}
assert len(var_by_node) == len(node_by_var)
node_map = {n.name: n for n in graphdef.node}
if not output_names:
assert _EMPTY_RETURN_OP_NAME not in node_map
assign_nodes = tuple(
f"^{n.name}" for n in graphdef.node if n.op in _TF_ASSIGN_OPS)
node_map[_EMPTY_RETURN_OP_NAME] = _NodeDef(
"NoOp", _EMPTY_RETURN_OP_NAME, assign_nodes, {})
output_names = [_EMPTY_RETURN_OP_NAME]
logging.warning(
"No output nodes found, inserted NoOp to trigger side effects: %s",
assign_nodes)
nodes = _toposort(node_map, output_names)
nodes = [_OpNode(node, library, node_map) for node in nodes]
output_args = [_TensorEdge.from_string(v, node_map) for v in output_names]
num_rng_required = sum([node.require_rng for node in nodes])
if config.get_config("infer_relu_from_jax2tf"):
_infer_relu_from_jax2tf(nodes)
if config.get_config("convert_custom_gradient"):
subgraphs = _extract_subgraphs(graphdef, nodes, library)
for _, subgraph in subgraphs.items():
nodes = subgraph.rewrite(nodes)
def jax_func(
params: Mapping[str, jnp.ndarray],
*func_args,
rng: Optional[jnp.ndarray] = None,
**func_kwargs,
) -> Tuple[Any, Mapping[str, jnp.ndarray]]:
if num_rng_required and rng is None:
raise ValueError(f"PRNG key required for random ops, found rng={rng}.")
bound_args = signature.bind(*func_args, **func_kwargs)
bound_args.apply_defaults()
inputs = dict(
tree.flatten_with_path((bound_args.args, bound_args.kwargs)))
try:
inputs = tuple([inputs[p] for p, _ in input_path_to_specs])
except KeyError as e:
input_err_message = _validate_inputs(signature, structured_inputs, inputs)
err_message = (
"\nSome input(s) are missing (entries that map to dtype[shape]"
f" annotations in the following structure): \n{input_err_message}"
)
raise MissingInputError(err_message) from e
if len(inputs) != len(input_names):
raise ValueError(
f"Expected {len(input_names)} args, found {len(inputs)}.")
inputs = tree.map_structure(
lambda x: x if isinstance(x, jnp.ndarray) else np.array(x), inputs)
all_params = dict(**params, **constants)
for inp, (path, spec) in safe_zip(inputs, input_path_to_specs):
if not isinstance(spec, tf.TensorSpec):
# A `None` spec denotes a Tensor argument that was unused in deepfunc.
is_tracer = isinstance(inp, jax.core.Tracer)
if spec is not None and (is_tracer or spec != inp):
inp_type = "tracer" if is_tracer else "literal value"
raise ValueError(
f"Found unexpected {inp_type} `{inp}`, expected `{spec}` for "
f"argument at {path}. This may be because the original "
"TensorFlow function was traced with a literal value instead of "
"a Tensor or Array.")
else:
continue
if (config.get_config("strict_shape_check") and
not spec.shape.is_compatible_with(_fix_jax_poly_shape(inp.shape))):
raise ValueError(
f"Found incompatible input shape: {inp.shape}, expected "
f"{spec.shape} for argument at {path}.\n"
"This can be a problem if the outputs depend on input shapes, e.g. "
"BatchNorm during training.\n"
"If this is not an issue for you, the error can be disabled using "
"either tf2jax.update_config('strict_shape_check', False) or the "
"context manager "
"tf2jax.override_config('strict_shape_check', False)")
if (config.get_config("strict_dtype_check") and
not spec.dtype.is_compatible_with(inp.dtype)):
raise ValueError(
f"Found incompatible input dtype: {inp.dtype}, expected "
f"{spec.dtype} for argument at {path}.\n"
"If this is not an issue for you, the error can be disabled "
"either tf2jax.update_config('strict_dtype_check', False) or the "
"context manager "
"tf2jax.override_config('strict_dtype_check', False)")
missing_params = [
var_by_node.get(v, v)
for v in captured_input_names
if var_by_node.get(v, v) not in all_params
]
if missing_params:
raise ValueError(f"Some parameters are missing, {missing_params}.")
full_inputs = inputs + tuple(
[all_params[var_by_node.get(v, v)] for v in captured_input_names])
full_inputs = list(
safe_zip(input_names + captured_input_names, full_inputs)
)
if num_rng_required:
rng_keys = list(jax.random.split(rng, num_rng_required))
else:
rng_keys = []
updated_param_names = set()
eval_cache = _EvaluationCache(nodes, full_inputs, output_args)
for node in nodes:
if node.name not in eval_cache.outputs:
# Double-check control inputs.
for inp in node.control_inputs:
if inp.op_name not in eval_cache.outputs:
raise ValueError(
f"Control dependency {inp} not executed for node `{node.name}`")
collected_inputs = [
(v.op_name, eval_cache.outputs[v.op_name]) for v in node.inputs
]
sub_rng = rng_keys.pop() if node.require_rng else None
eval_cache.outputs[node.name], updated_params = node(
collected_inputs, rng=sub_rng)
# Assign variables.
for var_name, var_val in updated_params.items():
eval_cache.outputs[var_name] = var_val
updated_param_names.add(var_name)
eval_cache.free_inputs(node)
tensor_outputs = tuple([eval_cache.outputs[k.op_name] for k in output_args])
tensor_outputs = [v for v in tensor_outputs if v is not _EMPTY_RETURN_VALUE]
# Merge the tensor and non-tensor outputs.
output_idx = 0
flat_outputs = []
for spec in flat_output_specs:
if isinstance(spec, tf.TensorSpec):
flat_outputs.append(tensor_outputs[output_idx])
output_idx += 1
else:
flat_outputs.append(spec)
assert output_idx == len(tensor_outputs)
collected_outputs = tree.unflatten_as(structured_outputs, flat_outputs)
# Parameters after any assignment.
new_params = {
var_name: eval_cache.outputs[node_by_var[var_name]]
for var_name in params.keys()
}
lost_params = [v for v in updated_param_names if v not in var_by_node]
if lost_params:
raise ValueError(f"Some updated parameters are lost, {lost_params}.")
return collected_outputs, new_params
return jax_func, variables
def _convert_library_function(
proto,
library: Optional[Mapping[str, _LibraryFunction]],
) -> _LibraryFunction:
"""Convert a FunctionDef."""
input_nodes = []
for arg in proto.signature.input_arg:
input_nodes.append(
_NodeDef("Placeholder", arg.name, (), {"dtype": tf.as_dtype(arg.type)}))
input_arg_names = [arg.name for arg in proto.signature.input_arg]
output_nodes = []
output_is_input = []
for arg in proto.signature.output_arg:
# TODO(b/233985145) Keep following the Identity ops through the node_def?
output_is_input.append(proto.ret[arg.name] in input_arg_names)
output_nodes.append(
_NodeDef(
"Identity",
arg.name,
tuple([proto.ret[arg.name]] + ["^" + v for v in proto.control_ret]),
{"T": tf.as_dtype(arg.type)},
))
graphdef = _GraphDef(tuple(input_nodes + list(proto.node_def) + output_nodes))
output_is_input = tuple(output_is_input)
# VarHandleOp correspond to variables in checkpoints.
var_handle_ops = [n for n in proto.node_def if n.op == "VarHandleOp"]
params = [
inspect.Parameter(arg.name, inspect.Parameter.POSITIONAL_ONLY)
for arg in proto.signature.input_arg
] + [
inspect.Parameter(
arg.name.replace("/", "___"), inspect.Parameter.POSITIONAL_ONLY
)
for arg in var_handle_ops
]
signature = inspect.Signature(params)
structured_inputs = tuple([
tf.TensorSpec(None, dtype=tf.as_dtype(arg.type), name=arg.name)
for arg in proto.signature.input_arg
])
structured_outputs = tuple([
tf.TensorSpec(None, dtype=tf.as_dtype(arg.type), name=arg.name)
for arg in proto.signature.output_arg
])
def node_to_spec(v):
return tf.TensorSpec(
shape=[dim.size for dim in v.attr["shape"].shape.dim],
dtype=tf.as_dtype(v.attr["dtype"].type),
name=v.name,
)
var_inputs = tuple(node_to_spec(v) for v in var_handle_ops)
structured_inputs = structured_inputs + var_inputs
jax_func, jax_params = _convert(
graphdef,
signature,
[structured_inputs, {}],
structured_outputs,
library=library)
if jax_params:
raise ValueError(
f"Library function should be stateless, found variables {jax_params}")
# Does any of the ops or inner functions require RNG?
require_rng = any([n.op in _TF_RANDOM_OPS for n in proto.node_def])
for node in proto.node_def:
for attr in node.attr.values():
if attr.func.name:
require_rng = require_rng or library[attr.func.name].require_rng
return _LibraryFunction(
fn=jax_func,
require_rng=require_rng,
params=None,
input_specs=structured_inputs,
output_specs=structured_outputs,
output_is_input=output_is_input,
variable_input_specs=var_inputs,
)
def _filter_nodes(
predicate: Callable[[tf.compat.v1.NodeDef], bool],
graph: Any,
) -> Iterator[Tuple[tf.compat.v1.NodeDef, Any]]:
"""Filter nodes in a tf.Graph with a predicate."""
graph_def = graph.as_graph_def()
for node in graph_def.node:
if predicate(node):
yield node, graph
if hasattr(graph_def, "library"):
for func in graph_def.library.function:
# TODO(b/266553384) Use the public API once it is available.
library_fn = graph._functions[func.signature.name] # pylint: disable=protected-access
for node in func.node_def:
if predicate(node):
yield node, library_fn.graph
def _convert_all_gradient_functions(
graph: Any,
library: Mapping[str, _LibraryFunction],
) -> Mapping[str, _LibraryFunction]:
"""Recursively convert all custom gradients in a tf.Graph."""
grad_lib = {}
for node, graph in _filter_nodes(_contains_custom_gradient, graph):
# Note that dict(**a, **b) will raise TypeError on dupliates, unlike {}.
grad_lib.update(
_convert_gradient_function(node, graph, dict(**library, **grad_lib)))
return grad_lib
def _convert_gradient_function(
proto: tf.compat.v1.NodeDef,
graph: Any,
library: Mapping[str, _LibraryFunction],
) -> Mapping[str, _LibraryFunction]:
"""Convert a custom_gradient function."""
op = graph.as_graph_element(proto.name)
input_specs = tuple([tf.TensorSpec.from_tensor(v) for v in op.inputs])
grad_fn_name = str(proto.attr["_gradient_op_type"].s, "utf-8")
if grad_fn_name in library:
return {}
@tf.function
def tf_grad_fn(*grad_args, **grad_kwargs):
fn = tf_ops.gradient_registry.lookup(grad_fn_name)
return fn(None, *grad_args, **grad_kwargs)
concrete_tf_grad_fn = tf_grad_fn.get_concrete_function(*input_specs)
grad_lib = _convert_all_gradient_functions(concrete_tf_grad_fn.graph, library)
logging.info("Converting gradient function %s", grad_fn_name)
grad_inputs = concrete_tf_grad_fn.inputs
grad_captured_inputs = concrete_tf_grad_fn.captured_inputs
num_flat_args = len(grad_inputs) - len(grad_captured_inputs)
func_variables = {v.handle.ref(): v for v in concrete_tf_grad_fn.variables}
# Gradient function can capture tensors in the outer function. Move them
# into the arguments of the gradient function for conversion to JAX.
variable_map = {}
constant_map = {}
external_capture_specs = []
internal_capture_names = []
for inp, cap in safe_zip(grad_inputs[num_flat_args:], grad_captured_inputs):
if cap.dtype == tf.resource:
variable_map[inp.op.name] = func_variables[cap.ref()]
internal_capture_names.append(inp.op.name)
elif hasattr(cap, "numpy"):
constant_map[inp.op.name] = cap.numpy()
internal_capture_names.append(inp.op.name)
else:
external_capture_specs.append(tf.TensorSpec.from_tensor(cap))
structured_grad_input_specs = tree.map_structure(tf.TensorSpec.from_tensor,
concrete_tf_grad_fn.inputs)
structured_grad_input_specs = (structured_grad_input_specs, {})
grad_input_specs = input_specs + tuple(external_capture_specs)
grad_structured_outputs = tuple(
itertools.dropwhile(lambda x: x is None,
concrete_tf_grad_fn.structured_outputs))
grad_output_specs = tuple([
tf.TensorSpec.from_tensor(x) for x in grad_structured_outputs
])
# Nones correspond to the outputs of the original function.
num_fn_outputs = (
len(concrete_tf_grad_fn.structured_outputs) -
len(grad_structured_outputs))
signature = inspect.Signature(
(inspect.Parameter("grad_args", inspect.Parameter.VAR_POSITIONAL),))
jax_grad_fn, jax_grad_params = _convert(
concrete_tf_grad_fn.graph.as_graph_def(),
signature,
structured_grad_input_specs,
grad_output_specs,
captured_input_names=tuple(internal_capture_names),
variable_map=variable_map,
constants=constant_map,
# Note that dict(**a, **b) will raise TypeError on dupliates, unlike {}.
library=dict(**library, **grad_lib),
)
grad_fn = _LibraryFunction(jax_grad_fn, False, jax_grad_params,
grad_input_specs, grad_output_specs,
grad_output_specs[:num_fn_outputs])
return dict(**grad_lib, **{grad_fn_name: grad_fn})
| tf2jax-main | tf2jax/_src/tf2jax.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration parameters for main.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class Config(dict):
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
self[key] = value
# Directory for the experiment logs, when running locally.
experiment_dir = '/tmp/tf_logs/'
# The dataset name.
dataset_name = 'breast_cancer'
# Training batch size.
batch_size = 32
# Use the entire dataset for evaluation.
eval_batch_size = None
# Number of batches to be used for evaluation.
num_eval_batches = 1
# The number of posterior samples used for evaluation.
num_eval_posterior_samples = 1000
# Number of posterior samples to be used for training.
num_posterior_samples = 50
# Initial learning rate.
start_learning_rate = 0.001
# Whether or not to use cosine learning rate decay.
cosine_learning_rate_decay = True
# Number of training steps.
training_steps = 5000
# Number of steps between loss logging.
report_interval = 10
# Number of steps between gradient logging.
grad_report_interval = 10
# Number of steps between checkpoints.
checkpoint_interval = 1000
gradient_config = Config()
# Options: 'pathwise', score_function', 'measure_valued'
gradient_config.type = 'pathwise'
gradient_config.control_variate = ''
# Whether or not use a control variate coefficient chosen
# to minimise the variance surrogate estimate.
# Set to False for experiments which control for the number of posterior
# samples used by the estimators.
estimate_cv_coeff = True
num_posterior_samples_cv_coeff = 25
# Whether or not to use the analytical KL computation.
use_analytical_kl = True
| mc_gradients-master | monte_carlo_gradients/config.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow as tf
from monte_carlo_gradients import utils
class UtilsTest(tf.test.TestCase):
def testRepeatLastDim(self):
a = tf.constant([[0., 1.,], [2., 3.]])
b = utils.tile_second_to_last_dim(a)
with tf.Session() as sess:
b_val = sess.run(b)
self.assertAllEqual(b_val[0], np.array([[0., 1.], [0., 1.]]))
self.assertAllEqual(b_val[1], np.array([[2., 3.], [2., 3.]]))
if __name__ == '__main__':
tf.test.main()
| mc_gradients-master | monte_carlo_gradients/utils_test.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bayesian logistic regression model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl import logging
import tensorflow as tf
import tensorflow_probability as tfp
from monte_carlo_gradients import utils
def _predictions(logits):
return tf.to_float(logits >= 0)
def _accuracy(targets, predictions):
# `predictions` have rank 2: batch_size, num_posterior samples.
# We expand dims the targets to compute the accuracy per sample.
targets = tf.expand_dims(targets, axis=1)
return tf.reduce_mean(tf.to_float(tf.equal(targets, predictions)))
def linear_model(data, targets, posterior_samples):
num_posterior_samples = tf.shape(posterior_samples)[0]
logits = tf.matmul(data, posterior_samples, transpose_b=True)
# Make targets [B, 1] to use broadcasting.
targets = tf.expand_dims(targets, axis=1)
targets = targets * tf.ones([1, num_posterior_samples])
log_probs = - tf.nn.sigmoid_cross_entropy_with_logits(
labels=targets, logits=logits)
return log_probs, logits
ModelOutput = collections.namedtuple(
'ModelOutput', ['elbo',
'predictions',
'accuracy',
'logits',
'log_probs',
'stochastic_batched_loss',
'kl',
'data_log_probs'])
class BayesianLogisticRegression(object):
"""Bayesian logistic regression model."""
def __init__(
self, prior, posterior,
dataset_size,
model_fn=linear_model,
use_analytical_kl=True):
self._prior = prior
self._posterior = posterior
self._dataset_size = tf.cast(dataset_size, tf.float32)
self._use_analytical_kl = use_analytical_kl
self._model_fn = model_fn
@property
def prior(self):
return self._prior
@property
def posterior(self):
return self._posterior
def predict(self, data, targets, posterior_samples):
_, logits = self.model_log_probs(
data, targets, posterior_samples=posterior_samples)
return _predictions(logits)
@property
def analytical_kl(self):
"""Computes the analytical KL."""
if self._use_analytical_kl:
try:
kl = tfp.distributions.kl_divergence(self._posterior, self._prior)
except NotImplementedError:
logging.warn('Analytic KLD not available, using sampling KLD instead')
self._use_analytical_kl = False
if not self._use_analytical_kl:
return None
return kl
def compute_kl(self, posterior_samples):
"""Computes the KL between the posterior to the prior."""
if self._use_analytical_kl:
return self.analytical_kl
num_posterior_samples = posterior_samples.shape[0]
# Compute the log probs of the posterior samples under the prior and
# posterior. This is `num_posterior_samples`
posterior_log_probs = self._posterior.log_prob(posterior_samples)
posterior_log_probs.shape.assert_is_compatible_with(
[num_posterior_samples])
prior_log_probs = self._prior.log_prob(posterior_samples)
prior_log_probs.shape.assert_is_compatible_with(
[num_posterior_samples])
kl = posterior_log_probs - prior_log_probs
kl.shape.assert_is_compatible_with([num_posterior_samples])
return kl
def apply(self, features, targets, posterior_samples):
"""Applies the model and computes the elbo for the given inputs."""
# Sample parameters from the posterior.
batch_size = utils.get_shape_list(features)[0]
num_posterior_samples = posterior_samples.shape[0]
# Compute the log probs of the data under all the models we have sampled.
log_probs, logits = self._model_fn(features, targets, posterior_samples)
log_probs.shape.assert_is_compatible_with(
[batch_size, num_posterior_samples])
# The likelihood of the data is the average over parameters.
data_log_probs = tf.reduce_mean(log_probs, axis=1) # Reduce over samples.
data_log_probs.shape.assert_is_compatible_with([batch_size])
kl = self.compute_kl(posterior_samples)
# Elbo computation - sum over data instances.
param_log_probs = tf.reduce_mean(log_probs, axis=0) * self._dataset_size
param_log_probs.shape.assert_is_compatible_with([num_posterior_samples])
# Note: we rely on broadcasting to ensure the KL will be subtracted per
# number of samples, in case the KL was computed analytically.
elbo = param_log_probs - kl
elbo.shape.assert_is_compatible_with([num_posterior_samples])
predictions = _predictions(logits)
accuracy = _accuracy(targets=targets, predictions=predictions)
if self._use_analytical_kl:
stochastic_batched_loss = - param_log_probs
else:
stochastic_batched_loss = - elbo
stochastic_batched_loss.shape.assert_is_compatible_with(
[num_posterior_samples])
return ModelOutput(
data_log_probs=data_log_probs,
log_probs=log_probs,
elbo=elbo,
kl=kl,
logits=logits,
predictions=predictions,
accuracy=accuracy,
stochastic_batched_loss=stochastic_batched_loss)
| mc_gradients-master | monte_carlo_gradients/bayes_lr.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gradient utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from monte_carlo_gradients import control_variates
from monte_carlo_gradients import utils
def add_kl_to_loss(kl, loss, jacobian_dict):
"""Adds the KL to the optimized loss and updates jacobians accordingly."""
if kl is None:
return loss, jacobian_dict
loss += kl
jacobian_dict = utils.add_grads_to_jacobians(jacobian_dict, kl)
return loss, jacobian_dict
def model_surrogate_loss(
model, features, targets, posterior_samples, grad_loss_fn,
control_variate_fn=None,
estimate_cv_coeff=True,
num_posterior_samples_cv_coeff=None,
jacobian_parallel_iterations=None):
r"""Computes the surrogate loss given the input model_outputs.
It creates the appropriate surrogate function based on `grad_loss_fn`.
The loss returned is of the form: \sum[stop_gradient(grad_var) * var].
Args:
model: An Instance of BayesianLogisticRegression.
features: A tf.Tensor `[Batch size, data_dim]`.
targets: A tf.Tensor `[Batch size]`. Values in `{0, 1}`.
posterior_samples: A tf.Tensor `[Number of samples, data_dim]`.
grad_loss_fn: The gradient estimator loss function.
control_variate_fn: The control variate function.
estimate_cv_coeff: Boolean. Whether or not to use a coefficient
for the control variate to minimize variance of the surrogate loss
estimate. If False, the control variate coefficient is set to 1.
num_posterior_samples_cv_coeff: Integer. The number of samples used to
compute the CV coeff.
jacobian_parallel_iterations: None or Integer. The number of samples for
which to compute the jacobian in parallel. Trades-off memory for speed.
Returns:
A tuple of size 2:
* a tf.Tensor. The surrogate loss to optimize.
* A dict from variable to the jacobians for the variable.
"""
def model_loss_fn(x):
return model.apply(features, targets, x).stochastic_batched_loss
grad_loss_fn = utils.grad_loss_fn_with_jacobians(
grad_loss_fn, jacobian_parallel_iterations=jacobian_parallel_iterations)
if control_variate_fn:
loss, jacobian_dict = control_variates.control_variates_surrogate_loss(
dist=model.posterior,
dist_samples=posterior_samples,
dist_vars=model.posterior.dist_vars,
model_loss_fn=model_loss_fn,
grad_loss_fn=grad_loss_fn,
control_variate_fn=control_variate_fn,
estimate_cv_coeff=estimate_cv_coeff,
num_posterior_samples_cv_coeff=num_posterior_samples_cv_coeff)
else:
loss, jacobian_dict = grad_loss_fn(
function=model_loss_fn,
dist=model.posterior,
dist_samples=posterior_samples)
loss, jacobian_dict = add_kl_to_loss(model.analytical_kl, loss, jacobian_dict)
return loss, jacobian_dict
| mc_gradients-master | monte_carlo_gradients/blr_model_grad_utils.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from monte_carlo_gradients import dist_utils
from monte_carlo_gradients import gradient_estimators
def _cross_prod(items1, items2):
prod = itertools.product(items1, items2)
return [i1 + (i2,) for i1, i2 in prod]
class ReinforceTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters([0.1, 0.5, 0.9])
def testConstantFunction(self, constant):
data_dims = 3
num_samples = 10**6
effective_mean = 1.5
mean = effective_mean * tf.ones(shape=(data_dims), dtype=tf.float32)
effective_log_scale = 0.0
log_scale = effective_log_scale * tf.ones(
shape=(data_dims), dtype=tf.float32)
dist = dist_utils.multi_normal(loc=mean, log_scale=log_scale)
dist_samples = dist.sample(num_samples)
dist_samples.shape.assert_is_compatible_with([num_samples, data_dims])
function = lambda x: tf.ones_like(x[:, 0])
loss = gradient_estimators.score_function_loss(
function, dist_samples, dist)
# Average over the number of samples.
loss.shape.assert_is_compatible_with([num_samples])
loss = tf.reduce_mean(loss)
mean_grads = tf.gradients(loss, mean)[0]
mean_grads.shape.assert_is_compatible_with(data_dims)
expected_mean_grads = np.zeros(data_dims, dtype=np.float32)
log_scale_grads = tf.gradients(loss, log_scale)[0]
expected_log_scale_grads = np.zeros(data_dims, dtype=np.float32)
with self.test_session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
self.assertAllClose(
sess.run(mean_grads), expected_mean_grads, rtol=1e-1, atol=5e-3)
self.assertAllClose(
sess.run(log_scale_grads),
expected_log_scale_grads,
rtol=1e-1,
atol=5e-3)
@parameterized.parameters([(0.5, -1.), (0.7, 0.0), (0.8, 0.1)])
def testLinearFunction(self, effective_mean, effective_log_scale):
data_dims = 3
num_samples = 10**6
mean = effective_mean * tf.ones(shape=(data_dims), dtype=tf.float32)
log_scale = effective_log_scale * tf.ones(
shape=(data_dims), dtype=tf.float32)
dist = dist_utils.multi_normal(loc=mean, log_scale=log_scale)
dist_samples = dist.sample(num_samples)
dist_samples.shape.assert_is_compatible_with([num_samples, data_dims])
function = lambda x: tf.reduce_sum(x, axis=1)
loss = gradient_estimators.score_function_loss(
dist=dist, dist_samples=dist_samples, function=function)
loss.shape.assert_is_compatible_with([num_samples])
loss = tf.reduce_mean(loss)
mean_grads = tf.gradients(loss, mean)[0]
mean_grads.shape.assert_is_compatible_with(data_dims)
expected_mean_grads = np.ones(data_dims, dtype=np.float32)
log_scale_grads = tf.gradients(loss, log_scale)[0]
expected_log_scale_grads = np.zeros(data_dims, dtype=np.float32)
with self.test_session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
self.assertAllClose(
sess.run(mean_grads), expected_mean_grads, rtol=1e-1, atol=1e-2)
self.assertAllClose(
sess.run(log_scale_grads),
expected_log_scale_grads,
rtol=1e-1,
atol=1e-2)
@parameterized.parameters([(1.0, 1.0)])
def testQuadraticFunction(self, effective_mean, effective_log_scale):
data_dims = 3
num_samples = 10**6
mean = effective_mean * tf.ones(shape=(data_dims), dtype=tf.float32)
log_scale = effective_log_scale * tf.ones(
shape=(data_dims), dtype=tf.float32)
dist = dist_utils.multi_normal(loc=mean, log_scale=log_scale)
dist_samples = dist.sample(num_samples)
function = lambda x: tf.reduce_sum(x**2, axis=1)
loss = gradient_estimators.score_function_loss(
dist=dist,
dist_samples=dist_samples,
function=function)
loss.shape.assert_is_compatible_with([num_samples])
loss = tf.reduce_mean(loss)
mean_grads = tf.gradients(loss, mean)[0]
mean_grads.shape.assert_is_compatible_with(data_dims)
expected_mean_grads = 2 * effective_mean * np.ones(
data_dims, dtype=np.float32)
log_scale_grads = tf.gradients(loss, log_scale)[0]
log_scale_grads.shape.assert_is_compatible_with(data_dims)
expected_log_scale_grads = 2 * np.exp(2 * effective_log_scale) * np.ones(
data_dims, dtype=np.float32)
with self.test_session() as sess:
init_op = tf.initialize_all_variables()
sess.run(init_op)
self.assertAllClose(
sess.run(mean_grads), expected_mean_grads, rtol=1e-1, atol=1e-3)
self.assertAllClose(
sess.run(log_scale_grads),
expected_log_scale_grads, rtol=1e-1, atol=1e-3)
class ReparametrizationTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters([1.2, 10.0, 5.])
def testConstantFunction(self, constant):
data_dims = 3
num_samples = 10**6
effective_mean = 1.5
mean = effective_mean * tf.ones(shape=(data_dims), dtype=tf.float32)
effective_log_scale = 0.0
log_scale = effective_log_scale * tf.ones(
shape=(data_dims), dtype=tf.float32)
dist = dist_utils.multi_normal(loc=mean, log_scale=log_scale)
dist_samples = dist.sample(num_samples)
dist_samples.shape.assert_is_compatible_with([num_samples, data_dims])
function = lambda x: tf.ones_like(x[:, 0])
loss = gradient_estimators.pathwise_loss(
function, dist_samples, dist)
loss.shape.assert_is_compatible_with([num_samples])
loss = tf.reduce_mean(loss)
mean_grads = tf.gradients(loss, mean)[0]
self.assertFalse(mean_grads)
log_scale_grads = tf.gradients(loss, log_scale)[0]
self.assertFalse(log_scale_grads)
@parameterized.parameters([(0.5, -1.), (1.0, 0.0), (0.8, 0.1)])
def testLinearFunction(self, effective_mean, effective_log_scale):
data_dims = 3
num_samples = 10**6
mean = effective_mean * tf.ones(shape=(data_dims), dtype=tf.float32)
log_scale = effective_log_scale * tf.ones(
shape=(data_dims), dtype=tf.float32)
dist = dist_utils.multi_normal(loc=mean, log_scale=log_scale)
dist_samples = dist.sample(num_samples)
dist_samples.shape.assert_is_compatible_with([num_samples, data_dims])
function = lambda x: tf.reduce_sum(x, axis=1)
loss = gradient_estimators.pathwise_loss(
function, dist_samples, dist)
loss.shape.assert_is_compatible_with([num_samples])
loss = tf.reduce_mean(loss)
loss.shape.assert_is_compatible_with([])
mean_grads = tf.gradients(loss, mean)[0]
mean_grads.shape.assert_is_compatible_with(data_dims)
expected_mean_grads = np.ones(data_dims, dtype=np.float32)
log_scale_grads = tf.gradients(loss, log_scale)[0]
expected_log_scale_grads = np.zeros(data_dims, dtype=np.float32)
with self.test_session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
# This should be an analytical computation, the result needs to be
# accurate.
self.assertAllClose(
sess.run(mean_grads), expected_mean_grads, rtol=1e-1, atol=1e-3)
self.assertAllClose(
sess.run(log_scale_grads),
expected_log_scale_grads,
atol=1e-2)
@parameterized.parameters([(1.0, 1.0)])
def testQuadraticFunction(self, effective_mean, effective_log_scale):
data_dims = 1
num_samples = 10**6
mean = effective_mean * tf.ones(shape=(data_dims), dtype=tf.float32)
log_scale = effective_log_scale * tf.ones(
shape=(data_dims), dtype=tf.float32)
dist = dist_utils.multi_normal(loc=mean, log_scale=log_scale)
dist_samples = dist.sample(num_samples)
function = lambda x: tf.reduce_sum(x**2, axis=1)
loss = gradient_estimators.pathwise_loss(
function, dist_samples, dist)
loss.shape.assert_is_compatible_with([num_samples])
loss = tf.reduce_mean(loss)
loss.shape.assert_is_compatible_with([])
mean_grads = tf.gradients(loss, mean)[0]
mean_grads.shape.assert_is_compatible_with(data_dims)
expected_mean_grads = 2 * effective_mean * np.ones(
data_dims, dtype=np.float32)
log_scale_grads = tf.gradients(loss, log_scale)[0]
log_scale_grads.shape.assert_is_compatible_with(data_dims)
expected_log_scale_grads = 2 * np.exp(2 * effective_log_scale) * np.ones(
data_dims, dtype=np.float32)
with self.test_session() as sess:
init_op = tf.initialize_all_variables()
sess.run(init_op)
self.assertAllClose(
sess.run(mean_grads), expected_mean_grads, rtol=1e-1, atol=1e-3)
self.assertAllClose(
sess.run(log_scale_grads),
expected_log_scale_grads, rtol=1e-1, atol=1e-3)
class MeasureValuedDerivativesTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(_cross_prod(
[([1.0, 2.0, 3.], [-1., 0.3, -2.], [1., 1., 1.]),
([1.0, 2.0, 3.], [-1., 0.3, -2.], [4., 2., 3.]),
([1.0, 2.0, 3.], [0.1, 0.2, 0.1], [10., 5., 1.])
], [True, False]))
def testWeightedLinear(
self, effective_mean, effective_log_scale, weights, coupling):
num_samples = 10**5
effective_mean = np.array(effective_mean)
effective_log_scale = np.array(effective_log_scale)
weights = np.array(weights)
data_dims = len(effective_mean)
mean = tf.constant(effective_mean, dtype=tf.float32)
log_scale = tf.constant(effective_log_scale, dtype=tf.float32)
dist = dist_utils.multi_normal(loc=mean, log_scale=log_scale)
dist_samples = dist.sample(num_samples)
function = lambda x: (tf.reduce_sum(x* weights, axis=1))
loss, _ = gradient_estimators.measure_valued_loss(
function, dist_samples, dist, coupling=coupling)
loss.shape.assert_is_compatible_with([num_samples])
loss = tf.reduce_mean(loss)
mean_grads = tf.gradients(loss, mean)[0]
mean_grads.shape.assert_is_compatible_with(data_dims)
log_scale_grads = tf.gradients(loss, log_scale)[0]
log_scale_grads.shape.assert_is_compatible_with(data_dims)
expected_mean_grads = weights
expected_log_scale_grads = np.zeros(data_dims, dtype=np.float32)
with self.test_session() as sess:
init_op = tf.initialize_all_variables()
sess.run(init_op)
mean_grads_np, log_scale_grads_np = sess.run(
[mean_grads, log_scale_grads])
self.assertAllClose(
expected_mean_grads, mean_grads_np, rtol=1e-1, atol=1e-3)
self.assertAllClose(
expected_log_scale_grads, log_scale_grads_np, rtol=1, atol=1e-3)
@parameterized.parameters(_cross_prod(
[([1.0, 2.0, 3.], [-1., 0.3, -2.], [1., 1., 1.]),
([1.0, 2.0, 3.], [-1., 0.3, -2.], [4., 2., 3.]),
([1.0, 2.0, 3.], [0.1, 0.2, 0.1], [10., 5., 1.])
], [True, False]))
def testWeightedQuadratic(
self, effective_mean, effective_log_scale, weights, coupling):
num_samples = 5 * 10**5
effective_mean = np.array(effective_mean)
effective_log_scale = np.array(effective_log_scale)
weights = np.array(weights)
data_dims = len(effective_mean)
mean = tf.constant(effective_mean, dtype=tf.float32)
log_scale = tf.constant(effective_log_scale, dtype=tf.float32)
dist = dist_utils.multi_normal(loc=mean, log_scale=log_scale)
dist_samples = dist.sample(num_samples)
function = lambda x: (tf.reduce_sum(x* weights, axis=1)) ** 2
loss, _ = gradient_estimators.measure_valued_loss(
function, dist_samples, dist, coupling=coupling)
loss.shape.assert_is_compatible_with([num_samples])
loss = tf.reduce_mean(loss)
mean_grads = tf.gradients(loss, mean)[0]
mean_grads.shape.assert_is_compatible_with(data_dims)
log_scale_grads = tf.gradients(loss, log_scale)[0]
log_scale_grads.shape.assert_is_compatible_with(data_dims)
expected_mean_grads = 2 * weights * np.sum(weights * effective_mean)
effective_scale = np.exp(effective_log_scale)
expected_scale_grads = 2 * weights ** 2 * effective_scale
expected_log_scale_grads = expected_scale_grads * effective_scale
with self.test_session() as sess:
init_op = tf.initialize_all_variables()
sess.run(init_op)
mean_grads_np, log_scale_grads_np = sess.run(
[mean_grads, log_scale_grads])
self.assertAllClose(
expected_mean_grads, mean_grads_np, rtol=1e-1, atol=1e-1)
self.assertAllClose(
expected_log_scale_grads, log_scale_grads_np, rtol=1e-1, atol=1e-1)
@parameterized.parameters(_cross_prod(
[([1.0], [1.0], lambda x: (tf.reduce_sum(tf.cos(x), axis=1))),
# Need to ensure that the mean is not too close to 0.
([10.0], [0.0], lambda x: (tf.reduce_sum(tf.log(x), axis=1))),
([1.0, 2.0], [1.0, -2],
lambda x: (tf.reduce_sum(tf.cos(2 * x), axis=1))),
([1.0, 2.0], [1.0, -2],
lambda x: (tf.cos(tf.reduce_sum(2 * x, axis=1)))),
], [True, False]))
def testNonPolynomialFunctionConsistencyWithReparam(
self, effective_mean, effective_log_scale, function, coupling):
num_samples = 10**5
effective_mean = np.array(effective_mean)
effective_log_scale = np.array(effective_log_scale)
data_dims = len(effective_mean)
mean = tf.constant(effective_mean, dtype=tf.float32)
log_scale = tf.constant(effective_log_scale, dtype=tf.float32)
dist = dist_utils.multi_normal(loc=mean, log_scale=log_scale)
dist_samples = dist.sample(num_samples)
loss, _ = gradient_estimators.measure_valued_loss(
function, dist_samples, dist, coupling=coupling)
loss.shape.assert_is_compatible_with([num_samples])
loss = tf.reduce_mean(loss)
mean_grads = tf.gradients(loss, mean)[0]
mean_grads.shape.assert_is_compatible_with(data_dims)
log_scale_grads = tf.gradients(loss, log_scale)[0]
log_scale_grads.shape.assert_is_compatible_with(data_dims)
reparam_loss = gradient_estimators.pathwise_loss(
function, dist_samples, dist)
reparam_loss.shape.assert_is_compatible_with([num_samples])
reparam_loss = tf.reduce_mean(reparam_loss)
reparam_mean_grads = tf.gradients(reparam_loss, mean)[0]
reparam_log_scale_grads = tf.gradients(reparam_loss, log_scale)[0]
with self.test_session() as sess:
init_op = tf.initialize_all_variables()
sess.run(init_op)
(mean_grads_np, log_scale_grads_np,
reparam_mean_grads_np, reparam_log_scale_grads_np) = sess.run(
[mean_grads, log_scale_grads,
reparam_mean_grads, reparam_log_scale_grads])
self.assertAllClose(
reparam_mean_grads_np, mean_grads_np, rtol=5e-1, atol=1e-1)
self.assertAllClose(
reparam_log_scale_grads_np, log_scale_grads_np, rtol=5e-1, atol=1e-1)
if __name__ == '__main__':
tf.test.main()
| mc_gradients-master | monte_carlo_gradients/gradient_estimators_test.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Control variate utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sonnet as snt
import tensorflow as tf
from monte_carlo_gradients import utils
def control_delta_method(dist, dist_samples, function, grad_loss_fn=None):
"""Computes the delta method control variate.
For details, see: https://icml.cc/2012/papers/687.pdf
Args:
dist: A tfp.distributions.Distribution instance.
The expected control variate computation assumes this distribution is
from the Normal family.
dist_samples: a tf.Tensor of samples from `dist`.
function: A function for which to compute the second order approximation.
grad_loss_fn: The gradient estimator function or None.
Needs to return both a surrogate loss and a dictionary of jacobians.
If None, only the first two elements in the return tuple are meaningful.
Returns:
A tuple containing four elements:
* The control variate, a [B, num_samples] tensor. The
number of samples is taken from `model_outputs.posterior_samples`.
* The expected value of the control variate, a [B] tensor.
* The surrogate CV used for gradient estimation (a `tf.Tensor`).
This tensor is obtained by splitting the control variate into
two parts: one for the stochastic gradient estimation (via
`grad_loss_fn`) and one via analytical gradients. This is
done via the chain rule. Note: the value of this loss will not
be the same as the value of the control variate - but twice
its value.
* A dictionary containing the jacobians of the control variate according
to `grad_fn`. If `grad_fn` is None, then a dictionary with 0 values.
"""
mean_dist = dist.input_mean
# Expand the mean distribution tensor.
data_dim = utils.get_shape_list(dist_samples)[1]
mean_dist = tf.ones((1, 1)) * tf.expand_dims(mean_dist, axis=0)
def _cv(mean_dist, dist_samples, stop_grad_mean):
"""Computes the value of the control variate."""
num_samples = utils.get_shape_list(dist_samples)[0]
if stop_grad_mean:
mean_dist = tf.stop_gradient(mean_dist)
posterior_mean_f = function(mean_dist)
# The loss has been summed over batch, and we only used one parameter
# (the mean) to compute loss.
posterior_mean_f_shape = utils.get_shape_list(posterior_mean_f)
if posterior_mean_f_shape not in [[1], []]:
raise ValueError('Invalid shape for posterior_mean_f {}!'.format(
posterior_mean_f_shape))
# Compute first order gradients.
first_order_gradients = tf.gradients(posterior_mean_f, mean_dist)[0]
first_order_gradients.shape.assert_is_compatible_with([1, data_dim])
# Compute second order gradients.
second_order_gradients = utils.hessians(
posterior_mean_f, mean_dist, no_backprop=stop_grad_mean)
second_order_gradients.shape.assert_is_compatible_with(
[data_dim, data_dim])
centered_dist_samples = dist_samples - mean_dist
centered_dist_samples.shape.assert_is_compatible_with(
[num_samples, data_dim])
first_order_term = tf.reduce_sum(
centered_dist_samples * first_order_gradients, axis=1)
first_order_term.shape.assert_is_compatible_with([num_samples])
second_order_term = tf.matmul(
centered_dist_samples, second_order_gradients)
second_order_term.shape.assert_is_compatible_with(
[num_samples, data_dim])
second_order_term = tf.reduce_sum(
second_order_term * centered_dist_samples, axis=1)
second_order_term = 1./2 * second_order_term
control_variate = posterior_mean_f + first_order_term + second_order_term
control_variate.shape.assert_is_compatible_with([num_samples])
return control_variate, second_order_gradients
# Chain rule: we need to compute the gradients with respect to the mean,
# and the gradients coming from samples.
sample_gradients, _ = _cv(
mean_dist, dist_samples, stop_grad_mean=True)
param_gradients, second_order_gradients = _cv(
mean_dist, tf.stop_gradient(dist_samples), stop_grad_mean=False)
if grad_loss_fn:
surrogate_cv, jacobians = grad_loss_fn(
lambda x: _cv(mean_dist, x, stop_grad_mean=True)[0],
dist_samples, dist)
surrogate_cv += param_gradients
# The second order expansion is done at the mean, so only the mean will
# have jacobians here.
# Note: we do not need to use parallel iterations here, since there is no
# backpropagation through the samples.
jacobians[dist.input_mean] += utils.jacobians(
param_gradients, dist.input_mean)
else:
surrogate_cv = tf.zeros(())
jacobians = {var: 0. for var in dist.dist_vars}
expected_second_order_term = 1./2 * tf.reduce_sum(
dist.variance() * tf.diag_part(second_order_gradients))
posterior_mean_f = function(mean_dist)
expected_control_variate = posterior_mean_f + expected_second_order_term
return sample_gradients, expected_control_variate, surrogate_cv, jacobians
def moving_averages_baseline(
dist, dist_samples, function, decay=0.9, grad_loss_fn=None):
loss = tf.reduce_mean(function(dist_samples))
moving_avg = tf.stop_gradient(snt.MovingAverage(decay=decay)(loss))
control_variate = moving_avg
expected_control_variate = moving_avg
surrogate_cv, jacobians = grad_loss_fn(
lambda x: moving_avg, dist_samples, dist)
# Note: this has no effect on the gradient in the pathwise case.
return control_variate, expected_control_variate, surrogate_cv, jacobians
def compute_control_variate_coeff(
dist, dist_var, model_loss_fn, grad_loss_fn, control_variate_fn,
num_samples, moving_averages=False, eps=1e-3):
r"""Computes the control variate coefficients for the given variable.
The coefficient is given by:
\sum_k cov(df/d var_k, dcv/d var_k) / (\sum var(dcv/d var_k) + eps)
Where var_k is the k'th element of the variable dist_var.
The covariance and variance calculations are done from samples obtained
from the distribution `dist`.
Args:
dist: a tfp.distributions.Distribution instance.
dist_var: the variable for which we are interested in computing the
coefficient.
The distribution samples should depend on these variables.
model_loss_fn: A function with signature: lambda samples: f(samples).
The model loss function.
grad_loss_fn: The gradient estimator function.
Needs to return both a surrogate loss and a dictionary of jacobians.
control_variate_fn: The surrogate control variate function. Its gradient
will be used as a control variate.
num_samples: Int. The number of samples to use for the cov/var calculation.
moving_averages: Bool. Whether or not to use moving averages for the
calculation.
eps: Float. Used to stabilize division.
Returns:
a tf.Tensor of rank 0. The coefficient for the input variable.
"""
# Resample to avoid biased gradients.
cv_dist_samples = dist.sample(num_samples)
cv_jacobians = control_variate_fn(
dist, cv_dist_samples, model_loss_fn, grad_loss_fn=grad_loss_fn)[-1]
loss_jacobians = grad_loss_fn(model_loss_fn, cv_dist_samples, dist)[-1]
cv_jacobians = cv_jacobians[dist_var]
loss_jacobians = loss_jacobians[dist_var]
# Num samples x num_variables
utils.assert_rank(loss_jacobians, 2)
# Num samples x num_variables
utils.assert_rank(cv_jacobians, 2)
mean_f = tf.reduce_mean(loss_jacobians, axis=0)
mean_cv, var_cv = tf.nn.moments(cv_jacobians, axes=[0])
cov = tf.reduce_mean(
(loss_jacobians - mean_f) * (cv_jacobians - mean_cv), axis=0)
utils.assert_rank(var_cv, 1)
utils.assert_rank(cov, 1)
# Compute the coefficients which minimize variance.
# Since we want to minimize the variances across parameter dimensions,
# the optimal # coefficients are given by the sum of covariances per
# dimensions over the sum of variances per dimension.
cv_coeff = tf.reduce_sum(cov) / (tf.reduce_sum(var_cv) + eps)
cv_coeff = tf.stop_gradient(cv_coeff)
utils.assert_rank(cv_coeff, 0)
if moving_averages:
cv_coeff = tf.stop_gradient(snt.MovingAverage(decay=0.9)(cv_coeff))
return cv_coeff
def control_variates_surrogate_loss(
dist, dist_samples, dist_vars,
model_loss_fn,
grad_loss_fn,
control_variate_fn,
estimate_cv_coeff=True,
num_posterior_samples_cv_coeff=20):
r"""Computes a surrogate loss by computing the gradients manually.
The loss function returned is:
\sum_i stop_grad(grad_i) * var_i,
where grad_i was computed from stochastic_loss and control variate.
This function uses `compute_control_variate_coeff` to compute the control
variate coefficients and should be used only in conjunction with control
variates.
Args:
dist: a tfp.distributions.Distribution instance.
dist_samples: samples from dist.
dist_vars: the variables for which we are interested in computing gradients.
The distribution samples should depend on these variables.
model_loss_fn: A function with signature: lambda samples: f(samples).
The model loss function.
grad_loss_fn: The gradient estimator function.
Needs to return both a surrogate loss and a dictionary of jacobians.
control_variate_fn: The surrogate control variate function. Its gradient
will be used as a control variate.
estimate_cv_coeff: Boolean. Whether or not to use a coefficient
for the control variate to minimize variance of the surrogate loss
estimate. If False, the control variate coefficient is set to 1.
If True, uses `compute_control_variate_coeff` to compute the coefficient.
num_posterior_samples_cv_coeff: The number of posterior samples used
to compute the cv coeff. Only used if `estimate_cv_coeff`
is True.
Returns:
A tuple containing three elements:
* the surrogate loss - a tf.Tensor [num_samples].
* the jacobians wrt dist_vars.
* a dict of debug information.
"""
_, expected_control_variate, _, cv_jacobians = control_variate_fn(
dist, dist_samples, model_loss_fn, grad_loss_fn=grad_loss_fn)
_, loss_jacobians = grad_loss_fn(model_loss_fn, dist_samples, dist)
jacobians = {}
for dist_var in dist_vars:
if estimate_cv_coeff:
cv_coeff = compute_control_variate_coeff(
dist, dist_var,
model_loss_fn=model_loss_fn,
grad_loss_fn=grad_loss_fn,
control_variate_fn=control_variate_fn,
num_samples=num_posterior_samples_cv_coeff)
else:
cv_coeff = 1.
var_jacobians = loss_jacobians[dist_var] - cv_coeff * cv_jacobians[dist_var]
# Num samples x num_variables
utils.assert_rank(var_jacobians, 2)
jacobians[dist_var] = var_jacobians
utils.add_grads_to_jacobians(
jacobians, expected_control_variate * cv_coeff, [dist_var])
surrogate_loss = 0.0
for dist_var in dist_vars:
surrogate_loss += tf.stop_gradient(jacobians[dist_var]) * dist_var
# Sum over variable dimensions.
surrogate_loss = tf.reduce_sum(surrogate_loss, axis=1)
return surrogate_loss, jacobians
| mc_gradients-master | monte_carlo_gradients/control_variates.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from monte_carlo_gradients import control_variates
from monte_carlo_gradients import dist_utils
from monte_carlo_gradients import gradient_estimators
from monte_carlo_gradients import utils
class DeltaControlVariateTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters([(1.0, 0.5)])
def testQuadraticFunction(self, effective_mean, effective_log_scale):
data_dims = 20
num_samples = 10**6
mean = effective_mean * tf.ones(shape=(data_dims), dtype=tf.float32)
log_scale = effective_log_scale * tf.ones(
shape=(data_dims), dtype=tf.float32)
dist = dist_utils.multi_normal(loc=mean, log_scale=log_scale)
dist_samples = dist.sample(num_samples)
function = lambda x: tf.reduce_sum(x**2)
cv, expected_cv, _, _ = control_variates.control_delta_method(
dist, dist_samples, function)
avg_cv = tf.reduce_mean(cv)
expected_cv_value = tf.reduce_sum(dist_samples** 2) / num_samples
with self.test_session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
# This should be an analytical computation, the result needs to be
# accurate.
self.assertAllClose(
sess.run(avg_cv), sess.run(expected_cv_value), rtol=1e-1, atol=1e-3)
self.assertAllClose(
sess.run(expected_cv),
sess.run(expected_cv_value),
atol=1e-1)
@parameterized.parameters([(1.0, 1.0)])
def testPolinomialFunction(self, effective_mean, effective_log_scale):
data_dims = 10
num_samples = 10**3
mean = effective_mean * tf.ones(shape=(data_dims), dtype=tf.float32)
log_scale = effective_log_scale * tf.ones(
shape=(data_dims), dtype=tf.float32)
dist = dist_utils.multi_normal(loc=mean, log_scale=log_scale)
dist_samples = dist.sample(num_samples)
function = lambda x: tf.reduce_sum(x**5)
cv, expected_cv, _, _ = control_variates.control_delta_method(
dist, dist_samples, function)
avg_cv = tf.reduce_mean(cv)
with self.test_session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
# Check that the average value of the control variate is close to the
# expected value.
self.assertAllClose(
sess.run(avg_cv), sess.run(expected_cv), rtol=1e-1, atol=1e-3)
@parameterized.parameters([(1.0, 1.0)])
def testNonPolynomialFunction(self, effective_mean, effective_log_scale):
data_dims = 10
num_samples = 10**3
mean = effective_mean * tf.ones(shape=(data_dims), dtype=tf.float32)
log_scale = effective_log_scale * tf.ones(
shape=(data_dims), dtype=tf.float32)
dist = dist_utils.multi_normal(loc=mean, log_scale=log_scale)
dist_samples = dist.sample(num_samples)
function = lambda x: tf.reduce_sum(tf.log(x**2))
cv, expected_cv, _, _ = control_variates.control_delta_method(
dist, dist_samples, function)
avg_cv = tf.reduce_mean(cv)
self.assertTrue(tf.gradients(expected_cv, mean))
self.assertTrue(tf.gradients(expected_cv, log_scale))
with self.test_session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
# Check that the average value of the control variate is close to the
# expected value.
self.assertAllClose(
sess.run(avg_cv), sess.run(expected_cv), rtol=1e-1, atol=1e-3)
def testNonPolynomialFunctionWithGradients(self):
data_dims = 1
num_samples = 10**3
effective_mean = 1.
effective_log_scale = 1.
mean = effective_mean * tf.ones(shape=(data_dims), dtype=tf.float32)
log_scale = effective_log_scale * tf.ones(
shape=(data_dims), dtype=tf.float32)
dist = dist_utils.multi_normal(loc=mean, log_scale=log_scale)
dist_samples = dist.sample(num_samples)
function = lambda x: tf.reduce_sum(tf.log(x**2))
(cv, expected_cv,
surrogate_cv, jacobians) = control_variates.control_delta_method(
dist, dist_samples, function,
grad_loss_fn=utils.grad_loss_fn_with_jacobians(
gradient_estimators.pathwise_loss))
surrogate_cv = tf.reduce_mean(surrogate_cv)
mean_cv_grads = tf.gradients(surrogate_cv, mean)[0]
mean_expected_cv_grads = tf.gradients(expected_cv, mean)[0]
log_scale_cv_grads = tf.gradients(surrogate_cv, log_scale)[0]
log_scale_expected_cv_grads = tf.gradients(expected_cv, log_scale)[0]
# Second order expansion is log(\mu**2) + 1/2 * \sigma**2 (-2 / \mu**2)
expected_cv_val = - np.exp(1.) ** 2
# The gradient is 2 / mu + \sigma ** 2 * 2
expected_cv_mean_grad = 2 + 2 * np.exp(1.) ** 2
mean_jacobians = jacobians[mean]
log_scale_jacobians = jacobians[log_scale]
with self.test_session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
self.assertAllClose(
sess.run(tf.reduce_mean(cv)),
sess.run(expected_cv), rtol=1e-1, atol=1e-3)
self.assertAllClose(
sess.run(expected_cv), expected_cv_val, rtol=1e-1, atol=1e-3)
self.assertAllClose(
sess.run(tf.reduce_mean(cv)), expected_cv_val, rtol=1e-1, atol=1e-3)
self.assertAllClose(
sess.run(mean_expected_cv_grads[0]), expected_cv_mean_grad,
rtol=1e-1, atol=1e-3)
self.assertAllClose(
sess.run(mean_cv_grads),
sess.run(mean_expected_cv_grads), rtol=1e-1, atol=1e-3)
self.assertAllClose(
sess.run(log_scale_cv_grads),
sess.run(log_scale_expected_cv_grads), rtol=1e-1, atol=1e-3)
self.assertAllClose(
sess.run(tf.reduce_mean(mean_jacobians)),
# Strip the leading dimension of 1.
sess.run(mean_cv_grads[0]), rtol=1e-1, atol=1e-3)
self.assertAllClose(
sess.run(tf.reduce_mean(log_scale_jacobians)),
# Strip the leading dimension of 1.
sess.run(log_scale_cv_grads[0]), rtol=1e-1, atol=1e-3)
class SurrogateLossFromGradients(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters([
(1.0, 1.0, gradient_estimators.score_function_loss, True),
(1.0, 1.0, gradient_estimators.pathwise_loss, False),
(1.0, 1.0, gradient_estimators.pathwise_loss, True)])
def testQuadraticFunction(
self, effective_mean, effective_log_scale, grad_loss_fn,
estimate_cv_coeff):
data_dims = 3
num_samples = 10**3
mean = effective_mean * tf.ones(shape=(data_dims), dtype=tf.float32)
log_scale = effective_log_scale * tf.ones(
shape=(data_dims), dtype=tf.float32)
dist_vars = [mean, log_scale]
dist = dist_utils.multi_normal(loc=mean, log_scale=log_scale)
dist_samples = dist.sample(num_samples)
model_loss_fn = lambda x: tf.reduce_sum(x**2, axis=1)
control_variate_fn = control_variates.control_delta_method
loss, jacobians = control_variates.control_variates_surrogate_loss(
dist=dist,
dist_samples=dist_samples,
dist_vars=dist_vars,
model_loss_fn=model_loss_fn,
grad_loss_fn=utils.grad_loss_fn_with_jacobians(grad_loss_fn),
estimate_cv_coeff=estimate_cv_coeff,
control_variate_fn=control_variate_fn)
loss.shape.assert_is_compatible_with([num_samples])
loss = tf.reduce_mean(loss)
expected_mean_grads = 2 * effective_mean * np.ones(
data_dims, dtype=np.float32)
expected_log_scale_grads = 2 * np.exp(2 * effective_log_scale) * np.ones(
data_dims, dtype=np.float32)
mean_jacobians = jacobians[mean]
mean_jacobians.shape.assert_is_compatible_with([num_samples, data_dims])
mean_grads_from_jacobian = tf.reduce_mean(mean_jacobians, axis=0)
log_scale_jacobians = jacobians[log_scale]
log_scale_jacobians.shape.assert_is_compatible_with(
[num_samples, data_dims])
log_scale_grads_from_jacobian = tf.reduce_mean(log_scale_jacobians, axis=0)
mean_grads = tf.gradients(loss, mean)[0]
mean_grads.shape.assert_is_compatible_with(data_dims)
log_scale_grads = tf.gradients(loss, log_scale)[0]
log_scale_grads.shape.assert_is_compatible_with(data_dims)
with self.test_session() as sess:
init_op = tf.initialize_all_variables()
sess.run(init_op)
self.assertAllClose(
sess.run(mean_grads), expected_mean_grads, rtol=1e-1, atol=1e-3)
self.assertAllClose(
sess.run(log_scale_grads),
expected_log_scale_grads, rtol=1e-1, atol=1e-3)
self.assertAllClose(
sess.run(mean_grads_from_jacobian), expected_mean_grads,
rtol=1e-1, atol=1e-3)
self.assertAllClose(
sess.run(log_scale_grads_from_jacobian),
expected_log_scale_grads, rtol=1e-1, atol=1e-3)
@parameterized.parameters([
(1.0, 1.0, gradient_estimators.score_function_loss),
(1.0, 1.0, gradient_estimators.pathwise_loss)])
def testQuadraticFunctionWithAnalyticalLoss(
self, effective_mean, effective_log_scale, grad_loss_fn):
data_dims = 3
num_samples = 10**3
mean = effective_mean * tf.ones(shape=(data_dims), dtype=tf.float32)
log_scale = effective_log_scale * tf.ones(
shape=(data_dims), dtype=tf.float32)
dist_vars = [mean, log_scale]
dist = dist_utils.multi_normal(loc=mean, log_scale=log_scale)
dist_samples = dist.sample(num_samples)
model_loss_fn = lambda x: tf.reduce_sum(x**2, axis=1)
control_variate_fn = control_variates.control_delta_method
loss, jacobians = control_variates.control_variates_surrogate_loss(
dist=dist,
dist_samples=dist_samples,
dist_vars=dist_vars,
model_loss_fn=model_loss_fn,
grad_loss_fn=utils.grad_loss_fn_with_jacobians(grad_loss_fn),
control_variate_fn=control_variate_fn)
loss.shape.assert_is_compatible_with([num_samples])
loss = tf.reduce_mean(loss)
expected_mean_grads = 2 * effective_mean * np.ones(
data_dims, dtype=np.float32)
expected_log_scale_grads = 2 * np.exp(2 * effective_log_scale) * np.ones(
data_dims, dtype=np.float32)
mean_jacobians = jacobians[mean]
mean_jacobians.shape.assert_is_compatible_with([num_samples, data_dims])
mean_grads_from_jacobian = tf.reduce_mean(mean_jacobians, axis=0)
log_scale_jacobians = jacobians[log_scale]
log_scale_jacobians.shape.assert_is_compatible_with(
[num_samples, data_dims])
log_scale_grads_from_jacobian = tf.reduce_mean(log_scale_jacobians, axis=0)
mean_grads = tf.gradients(loss, mean)[0]
mean_grads.shape.assert_is_compatible_with(data_dims)
log_scale_grads = tf.gradients(loss, log_scale)[0]
log_scale_grads.shape.assert_is_compatible_with(data_dims)
with self.test_session() as sess:
init_op = tf.initialize_all_variables()
sess.run(init_op)
self.assertAllClose(
sess.run(mean_grads), expected_mean_grads, rtol=1e-1, atol=1e-3)
self.assertAllClose(
sess.run(log_scale_grads),
expected_log_scale_grads, rtol=1e-1, atol=1e-3)
self.assertAllClose(
sess.run(mean_grads_from_jacobian), expected_mean_grads,
rtol=1e-1, atol=1e-3)
self.assertAllClose(
sess.run(log_scale_grads_from_jacobian),
expected_log_scale_grads, rtol=1e-1, atol=1e-3)
@parameterized.parameters([
(1.0, 1.0, gradient_estimators.score_function_loss, 7 * 10 **3),
(1.0, 1.0, gradient_estimators.measure_valued_loss, 10 **3),
(1.0, 1.0, gradient_estimators.pathwise_loss, 10 **3)])
def testNonPolynomialFunctionConsistency(
self, effective_mean, effective_log_scale, grad_loss_fn, num_samples):
"""Check that the gradients are consistent between estimators."""
data_dims = 3
mean = effective_mean * tf.ones(shape=(data_dims), dtype=tf.float32)
log_scale = effective_log_scale * tf.ones(
shape=(data_dims), dtype=tf.float32)
dist_vars = [mean, log_scale]
dist = dist_utils.multi_normal(loc=mean, log_scale=log_scale)
dist_samples = dist.sample(num_samples)
model_loss_fn = lambda x: tf.log(tf.reduce_sum(x**2, axis=1))
control_variate_fn = control_variates.control_delta_method
grad_loss_fn = utils.grad_loss_fn_with_jacobians(grad_loss_fn)
loss, jacobians = control_variates.control_variates_surrogate_loss(
dist=dist,
dist_samples=dist_samples,
dist_vars=dist_vars,
model_loss_fn=model_loss_fn,
grad_loss_fn=grad_loss_fn,
control_variate_fn=control_variate_fn)
loss.shape.assert_is_compatible_with([num_samples])
loss = tf.reduce_mean(loss)
mean_jacobians = jacobians[mean]
mean_jacobians.shape.assert_is_compatible_with([num_samples, data_dims])
mean_grads_from_jacobian = tf.reduce_mean(mean_jacobians, axis=0)
log_scale_jacobians = jacobians[log_scale]
log_scale_jacobians.shape.assert_is_compatible_with(
[num_samples, data_dims])
log_scale_grads_from_jacobian = tf.reduce_mean(log_scale_jacobians, axis=0)
mean_grads = tf.gradients(loss, mean)[0]
mean_grads.shape.assert_is_compatible_with(data_dims)
log_scale_grads = tf.gradients(loss, log_scale)[0]
log_scale_grads.shape.assert_is_compatible_with(data_dims)
no_cv_loss, _ = grad_loss_fn(model_loss_fn, dist_samples, dist)
no_cv_loss.shape.assert_is_compatible_with([num_samples])
no_cv_loss = tf.reduce_mean(no_cv_loss)
no_cv_mean_grads = tf.gradients(no_cv_loss, mean)[0]
no_cv_mean_grads.shape.assert_is_compatible_with(data_dims)
no_cv_log_scale_grads = tf.gradients(no_cv_loss, log_scale)[0]
no_cv_log_scale_grads.shape.assert_is_compatible_with(data_dims)
with self.test_session() as sess:
init_op = tf.initialize_all_variables()
sess.run(init_op)
(mean_grads_from_jacobian_np, mean_grads_np,
log_scale_grads_from_jacobian_np, log_scale_grads_np,
no_cv_mean_grads_np, no_cv_log_scale_grads_np) = sess.run(
[mean_grads_from_jacobian, mean_grads,
log_scale_grads_from_jacobian, log_scale_grads,
no_cv_mean_grads, no_cv_log_scale_grads])
self.assertAllClose(
mean_grads_from_jacobian_np, mean_grads_np, rtol=1e-1, atol=1e-3)
self.assertAllClose(
log_scale_grads_from_jacobian_np, log_scale_grads_np,
rtol=1e-1, atol=1e-3)
self.assertAllClose(
mean_grads_np, no_cv_mean_grads_np, rtol=1e-1, atol=1e-1)
self.assertAllClose(
log_scale_grads_np, no_cv_log_scale_grads_np, rtol=1e-1, atol=1e-1)
if __name__ == '__main__':
tf.test.main()
| mc_gradients-master | monte_carlo_gradients/control_variates_test.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for utility functions for distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow_probability as tfp
from monte_carlo_gradients import dist_utils
tfd = tfp.distributions
class DistUtilsTest(tf.test.TestCase):
def testGaussianfromMaxwellShape(self):
sample_shape = (10, 5, 7)
loc = tf.zeros(sample_shape)
scale = tf.ones(sample_shape)
n_samples = int(10e3)
dsmaxwell = tfd.DoublesidedMaxwell(
loc=loc, scale=scale)
samples = dsmaxwell.sample(n_samples, seed=100)
std_gaussian_rvs = dist_utils.std_gaussian_from_std_dsmaxwell(samples)
self.assertEqual(std_gaussian_rvs.shape[1:], sample_shape)
def testGaussianfromMaxwell(self):
shape = (5, 10)
mu = 3. * tf.ones(shape)
sigma = 1.8 * tf.ones(shape)
n_samples = int(10e3)
dsmaxwell = tfd.DoublesidedMaxwell(
loc=tf.zeros(shape), scale=tf.ones(shape))
samples = dsmaxwell.sample(n_samples, seed=100)
std_gaussian_rvs = (
dist_utils.std_gaussian_from_std_dsmaxwell(samples))
gaussian_rvs = std_gaussian_rvs*sigma + mu
sample_mean = tf.reduce_mean(gaussian_rvs, 0)
sample_variance = tf.sqrt(
tf.reduce_mean(tf.square(gaussian_rvs - sample_mean), 0))
self.assertAllClose(sample_mean, mu, rtol=0.05)
self.assertAllClose(sample_variance, sigma, rtol=0.05)
if __name__ == '__main__':
tf.test.main()
| mc_gradients-master | monte_carlo_gradients/dist_utils_test.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from scipy import special
import tensorflow as tf
from monte_carlo_gradients import bayes_lr
from monte_carlo_gradients import dist_utils
def _sigmoid(x):
return special.expit(x)
class BayesianLogisticRegressionTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters([10, 12, 50])
def testApplyZeroSamples(self, batch_size):
data_dims = 10
num_samples = 5
dataset_size = 500
mean = tf.Variable(
tf.zeros(shape=(data_dims), dtype=tf.float32),
name='mean')
log_scale = tf.Variable(
tf.zeros(shape=(data_dims), dtype=tf.float32),
name='log_scale')
# Prior = posterior.
prior = dist_utils.multi_normal(loc=mean, log_scale=log_scale)
posterior = dist_utils.multi_normal(loc=mean, log_scale=log_scale)
model = bayes_lr.BayesianLogisticRegression(
prior, posterior, dataset_size=dataset_size, use_analytical_kl=True)
# Build the data
features = tf.random.uniform((batch_size, data_dims))
targets = tf.ones(batch_size)
posterior_samples = tf.zeros((num_samples, data_dims))
model_output = model.apply(
features, targets, posterior_samples=posterior_samples)
expected_predictions = np.ones((batch_size, num_samples))
expected_accuracy = 1.
expected_data_log_probs = np.log(0.5) * np.ones((batch_size))
expected_elbo = np.log(0.5) * dataset_size * np.ones((num_samples))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertEqual(sess.run(model.analytical_kl), 0)
self.assertAllEqual(
sess.run(model_output.predictions), expected_predictions)
self.assertAllEqual(
sess.run(model_output.accuracy), expected_accuracy)
self.assertAllClose(
sess.run(model_output.data_log_probs), expected_data_log_probs)
self.assertAllClose(
sess.run(model_output.elbo), expected_elbo)
def testApply(self):
data_dims = 10
batch_size = 50
num_samples = 6
dataset_size = 500
assert not batch_size % 2
assert not num_samples % 2
mean = tf.Variable(
tf.zeros(shape=(data_dims), dtype=tf.float32),
name='mean')
log_scale = tf.Variable(
tf.zeros(shape=(data_dims), dtype=tf.float32),
name='log_scale')
# Prior = posterior.
prior = dist_utils.multi_normal(loc=mean, log_scale=log_scale)
posterior = dist_utils.multi_normal(loc=mean, log_scale=log_scale)
model = bayes_lr.BayesianLogisticRegression(
prior, posterior, dataset_size=dataset_size, use_analytical_kl=True)
# Build the data
features = 3 * tf.ones((batch_size, data_dims), dtype=tf.float32)
targets = tf.concat(
[tf.zeros(int(batch_size/2), dtype=tf.float32),
tf.ones(int(batch_size/2), dtype=tf.float32)],
axis=0)
posterior_samples = tf.concat(
[tf.ones((int(num_samples/2), data_dims), dtype=tf.float32),
-1 * tf.ones((int(num_samples/2), data_dims), dtype=tf.float32)],
axis=0)
model_output = model.apply(
features, targets, posterior_samples=posterior_samples)
expected_logits = 3 * data_dims * np.concatenate(
[np.ones((batch_size, int(num_samples/2))),
-1 * np.ones((batch_size, int(num_samples/2)))],
axis=1)
quarter_ones = np.ones((int(batch_size/2), int(num_samples/2)))
# Compute log probs for the entire batch, for the first half of samples.
first_half_data_expected_log_probs = np.concatenate(
[np.log(1 - _sigmoid(3 * data_dims)) * quarter_ones,
np.log(_sigmoid(3 * data_dims)) * quarter_ones], axis=0)
# Compute log probs for the entire batch, for the second half of samples.
second_half_data_expected_log_probs = np.concatenate(
[np.log(1 - _sigmoid(- 3 * data_dims)) * quarter_ones,
np.log(_sigmoid(- 3 * data_dims)) * quarter_ones], axis=0)
expected_log_probs = np.concatenate(
[first_half_data_expected_log_probs,
second_half_data_expected_log_probs], axis=1)
first_half_expected_elbo = np.log(1 - _sigmoid(3 * data_dims))
first_half_expected_elbo += np.log(_sigmoid(3 * data_dims))
second_half_expected_elbo = np.log(_sigmoid(-3 * data_dims))
second_half_expected_elbo += np.log(1 - _sigmoid(-3 * data_dims))
expected_elbo = dataset_size/2. * np.concatenate([
first_half_expected_elbo* np.ones((int(num_samples/2))),
second_half_expected_elbo * np.ones((int(num_samples/2)))])
expected_predictions = np.concatenate(
[np.ones((batch_size, int(num_samples/2))),
np.zeros((batch_size, int(num_samples/2)))],
axis=1)
expected_accuracy = 0.5
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertEqual(sess.run(model_output.kl), 0)
self.assertAllEqual(
sess.run(model_output.logits), expected_logits)
self.assertAllEqual(
sess.run(model_output.predictions), expected_predictions)
self.assertAllEqual(
sess.run(model_output.accuracy), expected_accuracy)
self.assertAllClose(
sess.run(model_output.log_probs),
expected_log_probs, rtol=1e-1, atol=5e-3)
self.assertAllClose(
sess.run(model_output.elbo), expected_elbo, rtol=1e-1, atol=5e-3)
if __name__ == '__main__':
tf.test.main()
| mc_gradients-master | monte_carlo_gradients/bayes_lr_test.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data utilies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
import tensorflow as tf
def get_sklearn_data(dataset_name='breast_cancer', normalize=True):
"""Read sklearn datasets as numpy array."""
if dataset_name == 'breast_cancer':
data = datasets.load_breast_cancer()
else:
raise ValueError('Unsupported dataset')
features = np.array(data.data, dtype=np.float32)
targets = np.array(data.target, dtype=np.float32)
if normalize:
# Note: the data dimensions have very different scales.
# We normalize by the mean and scale of the entire dataset.
features = features - features.mean(axis=0)
features = features / features.std(axis=0)
assert targets.min() == 0
assert targets.max() == 1
# Add a dimension of 1 (bias).
features = np.concatenate((features, np.ones((features.shape[0], 1))), axis=1)
features = np.array(features, dtype=np.float32)
return features, targets
def get_sklearn_data_as_tensors(batch_size=None, dataset_name='breast_cancer'):
"""Read sklearn datasets as tf.Tensors.
Args:
batch_size: Integer or None. If None, the entire dataset is used.
dataset_name: A string, the name of the dataset.
Returns:
A tuple of size two containing two tensors of rank 2 `[B, F]`, the data
features and targets.
"""
features, targets = get_sklearn_data(dataset_name=dataset_name)
dataset = tf.data.Dataset.from_tensor_slices((features, targets))
if batch_size:
# Shuffle, repeat, and batch the examples.
batched_dataset = dataset.shuffle(1000).repeat().batch(batch_size)
else:
batch_size = features.shape[0]
batched_dataset = dataset.repeat().batch(batch_size)
iterator = batched_dataset.make_one_shot_iterator()
batch_features, batch_targets = iterator.get_next()
data_dim = features.shape[1]
batch_features.set_shape([batch_size, data_dim])
batch_targets.set_shape([batch_size])
return batch_features, batch_targets
| mc_gradients-master | monte_carlo_gradients/data_utils.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import tensorflow as tf
from tensorflow.python.ops.parallel_for import gradients as pfor_gradients # pylint: disable=g-direct-tensorflow-import
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
def tile_second_to_last_dim(t):
rank = t.shape.rank
multiples = [1] * (rank - 1) + [tf.shape(t)[-1]] + [1]
return tf.tile(tf.expand_dims(t, axis=-2), multiples)
def jacobians(ys, xs, parallel_iterations=None):
"""Compute the jacobians of `ys` with respect to `xs`.
Args:
ys: tf.Tensor.
xs: tf.Tensor. The variables wrt to compute the Jacobian.
parallel_iterations: The number of iterations to be done in paralel. Used to
trade-off memory consumption for speed: if None, the Jacobian
computation is done in parallel, but requires most memory.
Returns:
a tf.Tensor of Jacobians.
"""
return pfor_gradients.jacobian(
ys, xs, use_pfor=True, parallel_iterations=parallel_iterations)
def add_grads_to_jacobians(jacobians_dict, y, x_vars=None):
if not x_vars:
x_vars = jacobians_dict.keys()
for var in x_vars:
grads = tf.gradients(y, var)[0]
if grads is not None:
jacobians_dict[var] += grads
return jacobians_dict
def hessians(ys, xs, no_backprop=False):
if not no_backprop:
return tf.squeeze(tf.hessians(ys, xs)[0], axis=[0, 2])
grads = tf.gradients(ys, xs)[0][0]
# Note: it is important to use parallel_iterations=None here, to avoid an
# in graph while loop inside the jacobians computation, which itself is an
# in graph while loop. This is more efficient since we do not have a large
# number of parameters, but can use many samples to compute gradient estimator
# variance.
return tf.squeeze(
jacobians(grads, xs, parallel_iterations=None), axis=1)
def dist_var_jacobians(loss, dist, parallel_iterations=None):
return {p: jacobians(loss, p, parallel_iterations=parallel_iterations)
for p in dist.dist_vars}
def grad_loss_fn_with_jacobians(
gradient_estimator_fn, jacobian_parallel_iterations=None):
"""Wrap a gradient loss function to return a surrogate loss and jacobians."""
def grad_fn(function, dist_samples, dist):
output = gradient_estimator_fn(function, dist_samples, dist)
if isinstance(output, tf.Tensor):
loss = output
jacobian_dict = dist_var_jacobians(
loss, dist, parallel_iterations=jacobian_parallel_iterations)
else:
loss, jacobian_dict = output
return loss, jacobian_dict
return grad_fn
| mc_gradients-master | monte_carlo_gradients/utils.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main experimental file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import logging
import numpy as np
import tensorflow as tf
from monte_carlo_gradients import bayes_lr
from monte_carlo_gradients import blr_model_grad_utils
from monte_carlo_gradients import config
from monte_carlo_gradients import control_variates
from monte_carlo_gradients import data_utils
from monte_carlo_gradients import dist_utils
from monte_carlo_gradients import gradient_estimators
def _get_control_variate_fn():
"""Get control variate."""
control_variate = config.gradient_config.control_variate
if control_variate == 'delta':
return control_variates.control_delta_method
if control_variate == 'moving_avg':
return control_variates.moving_averages_baseline
if control_variate:
raise ValueError('Unsupported control variate')
return None
def _flag_checks():
if (config.gradient_config.type != 'score_function' and
config.control_variate == 'moving_avg'):
raise ValueError(
'Only the score function estimator supports a moving average baseline')
def _get_grad_loss_fn():
"""Get gradient loss function."""
gradient_type_to_grad_loss_fn = {
'pathwise': gradient_estimators.pathwise_loss,
'score_function': gradient_estimators.score_function_loss,
'measure_valued': gradient_estimators.measure_valued_loss,
}
return gradient_type_to_grad_loss_fn[config.gradient_config.type]
def _variance_reduction():
return (config.gradient_config.control_variate or
config.gradient_config.type == 'measure_valued')
def get_ckpt_dir(output_dir):
ckpt_dir = os.path.join(output_dir, 'ckpt')
if not tf.gfile.IsDirectory(ckpt_dir):
tf.gfile.MakeDirs(ckpt_dir)
return ckpt_dir
def _jacobian_parallel_iterations():
# The pathwise estimator requires more memory since it uses the backward
# pass through the model, so we use less parallel iterations to compute
# jacobians.
return 100 if config.gradient_config.type == 'pathwise' else None
def _configure_hooks(train_loss):
checkpoint_saver_hook = tf.train.CheckpointSaverHook(
checkpoint_dir=get_ckpt_dir(config.experiment_dir),
save_steps=config.checkpoint_interval)
nan_hook = tf.train.NanTensorHook(train_loss)
# Step counter.
step_conter_hook = tf.train.StepCounterHook()
hooks = [checkpoint_saver_hook, nan_hook, step_conter_hook]
return hooks
def _add_summaries(summary_writer, metrics):
summary = tf.Summary()
for name, value in metrics.items():
summary.value.add(tag=name, simple_value=value)
summary_writer.add_summary(summary, metrics['step'])
summary_writer.flush()
def get_gradient_stats_for_logs(all_gradient_values):
"""Aggregate the stats for multiple mini batches of gradients."""
# all_gradient_values is a list of dictionaries, containing the
# jacobian values obtained at a particular iteration.
# The keys of the dictionaries are the variables of the model.
all_grads_list = {v: [] for v in all_gradient_values[0].keys()}
for grad_values in all_gradient_values:
for v, grads in grad_values.items():
all_grads_list[v].append(grads)
all_grads_np = {}
for v, grads in all_grads_list.items():
# Stack across batch dimension
all_grads_np[v] = np.concatenate(all_grads_list[v], axis=0)
# num_samples x num_params.
assert len(all_grads_np[v].shape) == 2
grad_stats = {}
logged_keys = []
for v, grads in all_grads_np.items():
# Prefix the stats by the number of samples were used to compute them.
num_eval_samples = grads.shape[0]
key_prefix = str(num_eval_samples) + '_samples_' + v
# Get the mean and variance per dimension, reducing over number of samples.
grad_mean = np.mean(grads, axis=0)
grad_variance = np.var(grads, axis=0)
# Get the mean and variance averaged over all dimensions.
grad_stats[key_prefix + '_grad_global_mean'] = np.mean(grad_mean)
# Max gradient variance across data dimensions.
grad_stats[key_prefix + '_grad_across_dim_max_var'] = np.max(grad_variance)
# Variance averaged across data dimensions.
grad_stats[key_prefix + '_grad_var_mean'] = np.mean(grad_variance)
# Estimate training variance by normalizing using the training number of
# samples.
num_training_samples = config.num_posterior_samples
training_key_prefix = str(num_training_samples) + 'samples_' + v
# Var (1/n \sum_{i=0}^{n} X_i) = 1/n Var(X_i) since our estimates are iid.
normalizing_factor = num_training_samples / num_eval_samples
grad_stats[training_key_prefix + '_grad_across_dim_max_var'] = np.max(
grad_variance) * normalizing_factor
grad_stats[training_key_prefix + '_grad_var_mean'] = np.mean(
grad_variance) * normalizing_factor
logged_keys += [
key_prefix + '_grad_across_dim_max_var', key_prefix + '_grad_var_mean']
return grad_stats, logged_keys
def metrics_fetch_dict(model_output):
return {
'elbo': tf.reduce_mean(model_output.elbo),
'kl': tf.reduce_mean(model_output.kl),
'accuracy': model_output.accuracy}
def run_multi_batch_metrics(metrics, sess, num_eval_batches):
stats = {k: 0 for k in metrics}
for _ in range(num_eval_batches):
metrics_values = sess.run(metrics)
for k, v in metrics_values.items():
stats[k] += v / num_eval_batches
return stats
def run_gradient_stats(jacobians, sess, num_eval_batches):
"""Compute metrics on multiple batches."""
all_jacobian_vals = []
for _ in range(num_eval_batches):
jacobian_vals = sess.run(jacobians)
for v in jacobian_vals.values():
# num_samples by num_params
assert len(v.shape) == 2
all_jacobian_vals += [jacobian_vals]
gradient_stats, grad_log_keys = get_gradient_stats_for_logs(all_jacobian_vals)
return gradient_stats, grad_log_keys
def _pretty_jacobians(jacobians):
# Go from variable to jacobian to variable name to jacobian.
return {v.name: j for v, j in jacobians.items()}
def main(argv):
del argv
# Training data.
features, targets = data_utils.get_sklearn_data_as_tensors(
batch_size=config.batch_size,
dataset_name=config.dataset_name)
# Eval data.
eval_features, eval_targets = data_utils.get_sklearn_data_as_tensors(
batch_size=None,
dataset_name=config.dataset_name)
dataset_size = eval_features.get_shape()[0]
data_dims = features.shape[1]
prior = dist_utils.multi_normal(
loc=tf.zeros(data_dims), log_scale=tf.zeros(data_dims))
with tf.variable_scope('posterior'):
posterior = dist_utils.diagonal_gaussian_posterior(data_dims)
model = bayes_lr.BayesianLogisticRegression(
prior=prior, posterior=posterior,
dataset_size=dataset_size,
use_analytical_kl=config.use_analytical_kl)
grad_loss_fn = _get_grad_loss_fn()
control_variate_fn = _get_control_variate_fn()
jacobian_parallel_iterations = _jacobian_parallel_iterations()
def model_loss(features, targets, posterior_samples):
num_posterior_samples_cv_coeff = config.num_posterior_samples_cv_coeff
return blr_model_grad_utils.model_surrogate_loss(
model,
features, targets, posterior_samples,
grad_loss_fn=grad_loss_fn,
control_variate_fn=control_variate_fn,
estimate_cv_coeff=config.estimate_cv_coeff,
num_posterior_samples_cv_coeff=num_posterior_samples_cv_coeff,
jacobian_parallel_iterations=jacobian_parallel_iterations)
posterior_samples = posterior.sample(config.num_posterior_samples)
train_loss, _ = model_loss(features, targets, posterior_samples)
train_loss = tf.reduce_mean(train_loss)
num_eval_posterior_samples = config.num_eval_posterior_samples
eval_posterior_samples = posterior.sample(num_eval_posterior_samples)
eval_model_output = model.apply(
eval_features, eval_targets, posterior_samples=eval_posterior_samples)
_, jacobians = model_loss(
eval_features, eval_targets, eval_posterior_samples)
eval_model_metrics = metrics_fetch_dict(eval_model_output)
jacobians = _pretty_jacobians(jacobians)
# Compute the surrogate loss without any variance reduction.
# Used as a sanity check and for debugging.
if _variance_reduction():
if control_variate_fn:
no_var_reduction_grad_fn = grad_loss_fn
no_var_reducion_prefix = 'no_control_variate'
elif config.gradient_config.type == 'measure_valued':
# Compute the loss and stats when not using coupling.
def no_var_reduction_grad_fn(function, dist_samples, dist):
return gradient_estimators.measure_valued_loss(
function, dist_samples, dist, coupling=False)
_, no_var_reduction_jacobians = blr_model_grad_utils.model_surrogate_loss(
model, eval_features, eval_targets, eval_posterior_samples,
grad_loss_fn=no_var_reduction_grad_fn,
jacobian_parallel_iterations=jacobian_parallel_iterations)
no_var_reduction_jacobians = _pretty_jacobians(no_var_reduction_jacobians)
no_var_reducion_prefix = 'no_coupling'
else:
# No variance reduction used. No reason for additional logging.
no_var_reduction_jacobians = {}
for j in no_var_reduction_jacobians.values():
assert j.get_shape().as_list()[0] == num_eval_posterior_samples
start_learning_rate = config.start_learning_rate
global_step = tf.train.get_or_create_global_step()
if config.cosine_learning_rate_decay:
training_steps = config.training_steps
learning_rate_multiplier = tf.math.cos(
np.pi / 2 * tf.cast(global_step, tf.float32) / training_steps)
else:
learning_rate_multiplier = tf.constant(1.0)
learning_rate = start_learning_rate * learning_rate_multiplier
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(train_loss, global_step=global_step)
hyper_dict = {
'start_learning_rate': config.start_learning_rate,
'num_posterior_samples': config.num_posterior_samples,
'batch_size': config.batch_size}
summary_writer = tf.summary.FileWriter(
os.path.join(config.experiment_dir, 'logs'))
# Checkpointing.
hooks = _configure_hooks(train_loss)
i = -1
with tf.train.MonitoredSession(hooks=hooks) as sess:
logging.info('starting training')
for i in range(config.training_steps):
sess.run(train_op)
if (i + 1) % config.report_interval == 0:
# Training loss and debug ops.
logging.info('global_step %i', sess.run(global_step))
logging.info('training loss at step %i: %f', i, sess.run(train_loss))
# Compute multi batch eval metrics.
multi_batch_metrics = run_multi_batch_metrics(
eval_model_metrics, sess, config.num_eval_batches)
for key, value in multi_batch_metrics.items():
logging.info('%s at step %i: %f', key, i, value)
posterior_vars_value = sess.run(
{v.name: v for v in model.posterior.dist_vars})
for k, value in posterior_vars_value.items():
logging.info('%s avg at step %i: %f', k, i, np.mean(value))
metrics = multi_batch_metrics
metrics.update({'step': i})
metrics.update({'learning_rate': sess.run(learning_rate)})
metrics.update(hyper_dict)
if (i + 1) % config.grad_report_interval == 0:
gradient_stats, grad_log_keys = run_gradient_stats(
jacobians, sess, config.num_eval_batches)
for key in grad_log_keys:
logging.info(
'%s at step %i: %f', key, i, gradient_stats[key])
metrics.update(gradient_stats)
if no_var_reduction_jacobians:
no_var_reduction_grad_stats, grad_log_keys = run_gradient_stats(
no_var_reduction_jacobians, sess, config.num_eval_batches)
no_var_reduction_grad_stats = {
no_var_reducion_prefix + '_' + k: v
for k, v in no_var_reduction_grad_stats.items()}
metrics.update(no_var_reduction_grad_stats)
_add_summaries(summary_writer, metrics)
if __name__ == '__main__':
app.run(main)
| mc_gradients-master | monte_carlo_gradients/main.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of gradient estimators.
The gradient estimators below have the same API:
````
def grad_estimator(function, dist_samples, dist):
return surrogate_loss, jacobian_dict
```
where `function` is a function that takes in as input a
`[num_samples, samples_dim]` tensor, and returns a tensor of size `num_samples`.
Thus, `function` has to be parallelizable across the first input dimension.
This can be achieved either via linear algebra operations, or by using
`snt.BatchApply`. `dist_samples` is a tensor of size
`[num_samples, samples_dim]` samples obtained from the `tfp.Distributions`
instance `dist`.
A gradient estimator needs to return a surrogate loss (a tensor of shape
`num_samples`), that can be used for optimization via automatic differentiation,
as well as a dictionary from a distribution variable of `dist` to the Jacobians
of the surrogate loss with respect to that variable. The Jacobians are used
to monitor variance and are not used for training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import sonnet as snt
import tensorflow as tf
from monte_carlo_gradients import dist_utils
from monte_carlo_gradients import utils
def score_function_loss(function, dist_samples, dist):
"""Computes the score_function surrogate loss."""
log_probs = dist.log_prob(tf.stop_gradient(dist_samples))
# log_probs is of the size of the number of samples.
utils.assert_rank(log_probs, 1)
# Broadcast the log_probs to the loss.
loss = tf.stop_gradient(function(dist_samples)) * log_probs
return loss
def pathwise_loss(function, dist_samples, dist):
"""Computes the pathwise loss."""
del dist
return function(dist_samples)
def _apply_f(f, t):
return snt.BatchApply(f, 2)(t)
def _measure_valued_normal_mean_grad(
model_loss_fn, dist_samples, dist, coupling=True):
"""Computes the measure valued gradient wrt the mean of the Normal `dist`.
For details, see Section 6 of
"Monte Carlo Gradient Estimation in Machine learning".
Args:
model_loss_fn: A function for which to compute stochastic gradient for.
dist_samples: a tf.Tensor of samples from `dist`.
dist: A tfp.distributions.Distribution instance.
The code here assumes this distribution is from the Normal family.
coupling: A boolean. Whether or not to use coupling for the positive and
negative samples. Recommended: True, as this usually reduces variance.
Returns:
A tf.Tensor of size `num_samples`, where `num_samples` are the number
of samples (the first dimension) of `dist_samples`. The gradient of
model_loss_fn(dist_samples) wrt to the mean of `dist`.
"""
mean = dist.loc
# We will rely on backprop to compute the right gradient with respect
# to the log scale.
scale = dist.stddev()
utils.assert_rank(mean, 1)
utils.assert_rank(scale, 1)
# Duplicate the D dimension - N x D x D.
base_dist_samples = utils.tile_second_to_last_dim(dist_samples)
shape = dist_samples.shape
# N x D
pos_sample = dist_utils.sample_weibull(
shape, scale=tf.sqrt(2.), concentration=2.)
pos_sample.shape.assert_is_compatible_with(shape)
if coupling:
neg_sample = pos_sample
else:
neg_sample = dist_utils.sample_weibull(
shape, scale=tf.sqrt(2.), concentration=2.)
neg_sample.shape.assert_is_compatible_with(shape)
# N x D
positive_diag = mean + scale * pos_sample
positive_diag.shape.assert_is_compatible_with(shape)
# N x D
negative_diag = mean - scale * neg_sample
negative_diag.shape.assert_is_compatible_with(shape)
# Set the positive and negative - N x D x D
positive = tf.linalg.set_diag(base_dist_samples, positive_diag)
negative = tf.linalg.set_diag(base_dist_samples, negative_diag)
c = np.sqrt(2 * np.pi) * scale # D
f = model_loss_fn
# Broadcast the division.
grads = (_apply_f(f, positive) - _apply_f(f, negative)) / c
# grads - N x D
grads.shape.assert_is_compatible_with(shape)
return grads
def _measure_valued_normal_scale_grad(
function, dist_samples, dist, coupling=True):
"""Computes the measure valued gradient wrt the `scale of the Normal `dist`.
For details, see Section 6 of
"Monte Carlo Gradient Estimation in Machine learning".
Args:
function: A function for which to compute stochastic gradient for.
dist_samples: a tf.Tensor of samples from `dist`.
dist: A tfp.distributions.Distribution instance.
The code here assumes this distribution is from the Normal family.
coupling: A boolean. Whether or not to use coupling for the positive and
negative samples. Recommended: True, as this reduces variance.
Returns:
A tf.Tensor of size `num_samples`, where `num_samples` are the number
of samples (the first dimension) of `dist_samples`. The gradient of
function(dist_samples) wrt to the scale of `dist`.
"""
mean = dist.loc
# We will rely on backprop to compute the right gradient with respect
# to the log scale.
scale = dist.stddev()
utils.assert_rank(mean, 1)
utils.assert_rank(scale, 1)
# Duplicate the D dimension - N x D x D
base_dist_samples = utils.tile_second_to_last_dim(dist_samples)
shape = dist_samples.shape
# N x D
pos_sample = dist_utils.sample_ds_maxwell(shape, loc=0., scale=1.0)
if coupling:
neg_sample = dist_utils.std_gaussian_from_std_dsmaxwell(pos_sample)
else:
neg_sample = tf.random.normal(shape)
# N x D
positive_diag = mean + scale * pos_sample
positive_diag.shape.assert_is_compatible_with(shape)
# N x D
negative_diag = mean + scale * neg_sample
negative_diag.shape.assert_is_compatible_with(shape)
# Set the positive and negative values - N x D x D.
positive = tf.linalg.set_diag(base_dist_samples, positive_diag)
negative = tf.linalg.set_diag(base_dist_samples, negative_diag)
c = scale # D
f = function
# Broadcast the division.
grads = (_apply_f(f, positive) - _apply_f(f, negative)) / c
# grads - N x D
grads.shape.assert_is_compatible_with(shape)
return grads
def measure_valued_loss(
function, dist_samples, dist, coupling=True):
"""Computes the surrogate loss via measure valued derivatives.
Assumes `dist` is a Gaussian distribution.
For details, see Section 6 of
"Monte Carlo Gradient Estimation in Machine learning".
Args:
function: A function for which to compute stochastic gradient for.
dist_samples: a tf.Tensor of samples from `dist`.
dist: A tfp.distributions.Distribution instance.
The code here assumes this distribution is from the Normal family.
coupling: A boolean. Whether or not to use coupling for the positive and
negative samples. Recommended: True, as this reduces variance.
Returns:
A tuple of two elements:
1) tf.Tensor of size `num_samples`, where `num_samples` are the number
of samples (the first dimension) of `dist_samples`. The surrogate loss
that can be used as an input to an optimizer.
2) A dictionary from distribution variable to Jacobians computed for
that variable.
"""
mean = dist.loc
# We will rely on backprop to compute the right gradient with respect
# to the log scale
scale = dist.stddev()
measure_valued_mean_grad = _measure_valued_normal_mean_grad(
function, dist_samples, dist, coupling=coupling)
measure_valued_scale_grad = _measure_valued_normal_scale_grad(
function, dist_samples, dist, coupling=coupling)
# Full surrogate loss which ensures the gradient wrt mean and scale are
# the ones given by the measure valued derivatives.
surrogate_loss = tf.stop_gradient(measure_valued_mean_grad) * mean
surrogate_loss += tf.stop_gradient(measure_valued_scale_grad) * scale
surrogate_loss.shape.assert_is_compatible_with(dist_samples.shape)
# Reduce over the data dimensions.
loss = tf.reduce_sum(surrogate_loss, axis=-1)
# Create the gradients with respect to the log scale, since that is
# what we plot in the other methods.
log_scale_grads = measure_valued_scale_grad * scale
jacobian_dict = {
dist.input_mean: measure_valued_mean_grad,
dist.log_scale: log_scale_grads}
return loss, jacobian_dict
| mc_gradients-master | monte_carlo_gradients/gradient_estimators.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distribution utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
def multi_normal(loc, log_scale):
return MultiNormalDiagFromLogScale(loc=loc, log_scale=log_scale)
class MultiNormalDiagFromLogScale(tfd.MultivariateNormalDiag):
"""MultiNormalDiag which directly exposes its input parameters."""
def __init__(self, loc, log_scale):
scale = tf.exp(log_scale)
self._log_scale = log_scale
self._input_mean = loc
super(MultiNormalDiagFromLogScale, self).__init__(
loc, scale)
@property
def input_mean(self):
return self._input_mean
@property
def log_scale(self):
return self._log_scale
@property
def dist_vars(self):
return [self.input_mean, self.log_scale]
def diagonal_gaussian_posterior(data_dims):
mean = tf.Variable(
tf.zeros(shape=(data_dims), dtype=tf.float32), name='mean')
log_scale = tf.Variable(
tf.zeros(shape=(data_dims), dtype=tf.float32), name='log_scale')
return multi_normal(loc=mean, log_scale=log_scale)
def std_gaussian_from_std_dsmaxwell(std_dsmaxwell_samples):
"""Generate Gaussian variates from Maxwell variates.
Useful for coupling samples from Gaussian and double_sided Maxwell dist.
1. Generate ds-maxwell variates: dsM ~ dsMaxwell(0,1)
2. Generate uniform variatres: u ~ Unif(0,1)
3. multiply y = u * dsM
The result is Gaussian distribution N(0,1) which can be loc-scale adjusted.
Args:
std_dsmaxwell_samples: Samples generated from a zero-mean, unit variance
double-sided Maxwell distribution M(0,1).
Returns:
Tensor of Gaussian variates with shape maxwell_samples.
"""
unif_rvs = tf.random.uniform(std_dsmaxwell_samples.shape)
gaussian_rvs = unif_rvs * std_dsmaxwell_samples
return gaussian_rvs
def sample_weibull(sh, scale, concentration):
distrib = tfp.distributions.TransformedDistribution(
distribution=tfp.distributions.Uniform(low=0., high=1. - 1e-6),
bijector=tfp.bijectors.Invert(
tfp.bijectors.Weibull(scale=scale, concentration=concentration)))
return distrib.sample(sh)
def sample_ds_maxwell(sh, loc, scale):
return tfd.DoublesidedMaxwell(loc=loc, scale=scale).sample(sh)
| mc_gradients-master | monte_carlo_gradients/dist_utils.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Package setup."""
import os
import shutil
import subprocess
import setuptools
from setuptools import find_packages
from setuptools import setup
from setuptools.command import build_ext
with open("README.md", "r") as f:
long_description = f.read()
with open("requirements.txt", "r") as f:
dependencies = list(map(lambda x: x.strip(), f.readlines()))
class CMakeExtension(setuptools.Extension):
"""A Python extension that has been prebuilt by CMake.
We do not want distutils to handle the build process for our extensions, so
so we pass an empty list to the super constructor.
"""
def __init__(self, name):
super().__init__(name, sources=[])
class BuildCMakeExtension(build_ext.build_ext):
"""Uses CMake to build extensions."""
def run(self):
self._build()
for ext in self.extensions:
self.build_extension(ext)
def _build(self):
print("Building C++ extension")
os.makedirs(self.build_temp, exist_ok=True)
subprocess.check_call(
["cmake"]
+ [os.path.join(os.getcwd(), "transformer_grammars/models/masking")],
cwd=self.build_temp,
)
subprocess.check_call(
["cmake", "--build", ".", "--", "-j"], cwd=self.build_temp
)
def build_extension(self, ext):
dest_path = self.get_ext_fullpath(ext.name)
build_path = os.path.join(self.build_temp, os.path.basename(dest_path))
shutil.copyfile(build_path, dest_path)
setup(
name="transformer_grammars",
version="1.0.0",
url="https://github.com/deepmind/transformer_grammars",
author="Laurent Sartran et al.",
author_email="[email protected]",
description="Implementation of Transformer Grammars",
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages(),
install_requires=dependencies,
classifiers=[
"Programming Language :: Python :: 3",
],
cmdclass=dict(build_ext=BuildCMakeExtension),
ext_modules=[
CMakeExtension("transformer_grammars.models.masking.cpp_masking"),
],
python_requires=">=3.7",
test_suite="transformer_grammars",
dependency_links=[
"https://storage.googleapis.com/jax-releases/jax_cuda_releases.html"
],
)
| transformer_grammars-main | setup.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train a TG / TXL (trees) / TXL (words) model."""
# This needs to be put first -- it prevents TF from allocating GPU memory.
import os
os.environ["TF_ENABLED_DEVICE_TYPES"] = "CPU"
# pylint: disable=g-import-not-at-top,g-bad-import-order
import functools
from absl import app
from absl import flags
from ml_collections import config_flags
from transformer_grammars.training import train
_CONFIG = config_flags.DEFINE_config_file("config")
if __name__ == "__main__":
flags.mark_flag_as_required("config")
app.run(functools.partial(train.main, _CONFIG))
| transformer_grammars-main | train.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sample from a trained model."""
# This needs to be put first -- it prevents TF from allocating GPU memory.
import os
os.environ["TF_ENABLED_DEVICE_TYPES"] = "CPU"
# pylint: disable=g-import-not-at-top,g-bad-import-order
import functools
from absl import app
from absl import flags
from transformer_grammars import sample
_CHECKPOINT = flags.DEFINE_string("checkpoint", None, "Checkpoint to load.")
_INPUT = flags.DEFINE_string(
"input", None, "File containing sequences to score, as tokenized TSV files."
)
_TOKENIZER = flags.DEFINE_string("tokenizer", None, "Tokenizer.")
_SEED = flags.DEFINE_integer("seed", 42, "Sampling PRNG seed.")
_TEMPERATURE = flags.DEFINE_float("temperature", 0.7, "Sampling temperature.")
_NUM_STEPS = flags.DEFINE_integer("num_stemps", 300, "Sampling steps.")
if __name__ == "__main__":
app.run(
functools.partial(
sample.main,
_TOKENIZER,
_CHECKPOINT,
_INPUT,
_SEED,
_TEMPERATURE,
_NUM_STEPS
)
)
| transformer_grammars-main | sample.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Score tokenized post-processed sequences."""
# This needs to be put first -- it prevents TF from allocating GPU memory.
import os
os.environ["TF_ENABLED_DEVICE_TYPES"] = "CPU"
# pylint: disable=g-import-not-at-top,g-bad-import-order
import functools
from absl import app
from absl import flags
from transformer_grammars import score
_CHECKPOINT = flags.DEFINE_string("checkpoint", None, "Checkpoint to load.")
_INPUT = flags.DEFINE_string(
"input", None, "File containing sequences to score, as tokenized TSV files."
)
_TOKENIZER = flags.DEFINE_string("tokenizer", None, "Tokenizer.")
_ADD_EOS = flags.DEFINE_bool(
"add_eos",
None,
(
"Whether to append EOS to sequences. Must be True for words, False for"
" trees."
),
)
if __name__ == "__main__":
app.run(
functools.partial(
score.main,
_TOKENIZER,
_CHECKPOINT,
_INPUT,
_ADD_EOS,
)
)
| transformer_grammars-main | score.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts a file from tree representation to Choe-Charniak.
For example:
(S (NP the hungry cat) (VP meows)) (+ possibly pre-terminals)
is converted to
(S (NP the hungry cat NP) (VP meows VP) S)
"""
from typing import Sequence
from absl import app
from absl import flags
from transformer_grammars.data import text_processing
_INPUT = flags.DEFINE_string("input", None, "Input file")
_OUTPUT = flags.DEFINE_string("output", None, "Output file")
_HAS_PRETERMS = flags.DEFINE_bool(
"has_preterms",
True,
"Whether the input file contains preterminals (POS tags)",
)
_USE_UNTYPED_CLOSING_TERMINALS = flags.DEFINE_bool(
"use_untyped_closing_terminals",
False,
(
"Whether the output file should have typed closing non-terminals (e.g. "
"S), NP), etc.) or a single untyped closing non-terminal X)"
),
)
def main(argv: Sequence[str]) -> None:
del argv
text_processing.convert_to_choe_charniak(
_INPUT.value,
_OUTPUT.value,
has_preterms=_HAS_PRETERMS.value,
untyped_closing_terminal=_USE_UNTYPED_CLOSING_TERMINALS.value,
)
if __name__ == "__main__":
app.run(main)
| transformer_grammars-main | tools/convert_to_choe_charniak.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Transforms the input structures.
This was used for our control experiments, and is not required otherwise.
3 modes are supported:
- reverse:
maps (A (B x) y) to (A x (B y))
- left-branching:
creates a left-branching binary tree with the labels of the input structure,
then attaches the extra terminals to the root node
- right-branching:
same thing, with a right-branching binary tree
NOTE: all of these operations are applied at the sentence-level.
"""
from typing import Sequence
from absl import app
from absl import flags
from absl import logging
import nltk
import tqdm
from transformer_grammars.data import constants
from transformer_grammars.data import transforms
_VALID_MODES = ("reverse", "lb", "rb")
_INPUT = flags.DEFINE_string("input", None, "Input file")
_OUTPUT = flags.DEFINE_string("output", None, "Output file")
_HAS_PRETERMS = flags.DEFINE_bool(
"has_preterms",
True,
"Whether the input file contains preterminals (POS tags)",
)
_DOC_LEVEL = flags.DEFINE_bool(
"document_level",
False,
"Whether the input file contains documents (DOC ...)",
)
_MODE = flags.DEFINE_enum(
"mode",
None,
_VALID_MODES,
"How the trees should be transformed. Must be one of reverse, lb, rb.",
)
def _transform_tree(tree, mode, has_preterms, doc_level):
"""Transforms a tree."""
assert mode in _VALID_MODES
if has_preterms:
tree = transforms.drop_pos_tags(tree)
if doc_level:
if tree.label() != "DOC":
raise RuntimeError(
"The label of the root node is %s, where DOC was expected."
% tree.label()
)
transformed_sentences = [
_transform_tree(sent, mode, False, False) for sent in tree
]
return nltk.Tree("DOC", transformed_sentences)
else:
return transforms.transform_sentence(tree, constants.TreeTransform(mode))
def main(argv: Sequence[str]) -> None:
del argv
input_fname = _INPUT.value
output_fname = _OUTPUT.value
mode = _MODE.value
has_preterms = _HAS_PRETERMS.value
doc_level = _DOC_LEVEL.value
logging.info("Input file: %s", input_fname)
logging.info("Output file: %s", output_fname)
logging.info("Mode: %s", mode)
logging.info("Has preterminals: %s", str(has_preterms))
logging.info("Document level: %s", str(doc_level))
with open(input_fname, "r") as in_f:
with open(output_fname, "w") as out_f:
for line in tqdm.tqdm(in_f):
tree = transforms.tree_from_string(line)
transformed_tree = _transform_tree(tree, mode, has_preterms, doc_level)
transformed_line = transforms.string_from_tree(transformed_tree) + "\n"
out_f.write(transformed_line)
if __name__ == "__main__":
app.run(main)
| transformer_grammars-main | tools/transform_trees.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Build a dictionary from a list of whitespace-separated tokens."""
import itertools
import json
from absl import app
from absl import flags
from absl import logging
from transformer_grammars.data import constants
from transformer_grammars.data import text_processing
_INPUT_FNAME = flags.DEFINE_string("input", None, "Input filename.")
_OUTPUT_PREFIX = flags.DEFINE_string(
"output", None, "Output prefix for dictionary."
)
_CONVERT_TO_CC = flags.DEFINE_bool(
"convert_to_choe_charniak",
False,
(
"Whether the input should be converted to Choe-Charniak before being"
" processed further."
),
)
_USE_EXTRA_UNTYPED_CLOSING_NON_TERMINAL = flags.DEFINE_bool(
"use_extra_untyped_closing_non_terminal",
False,
(
"Whether the learnt dictionary should include an extra untyped closing"
" non-terminal."
),
)
def main(unused_argv):
# We accumulate terminals and non-terminals separately so that we can assign
# to each group a consecutive range of IDs.
terminals = set()
opening_non_terminals = set()
closing_non_terminals = set()
with open(_INPUT_FNAME.value, "r") as in_f:
for l in in_f:
l = l.strip()
if _CONVERT_TO_CC.value:
l = text_processing.choe_charniak_from_tree(l)
for word in l.split(" "):
if word == constants.PLACEHOLDER_TOKEN:
continue
elif word in constants.RESERVED_WORDS:
raise ValueError(
f"Cannot encode word {word} as it is a reserved word."
)
elif constants.OPENING_NON_TERMINAL_REGEXP.match(word):
if word not in opening_non_terminals:
logging.info("Found ONT: %s", word)
opening_non_terminals.add(word)
elif constants.CLOSING_NON_TERMINAL_REGEXP.match(word):
if word not in closing_non_terminals:
logging.info("Found CNT: %s", word)
closing_non_terminals.add(word)
else:
terminals.add(word)
num_reserved = len(constants.RESERVED_WORDS)
num_terminals = len(terminals)
num_opening_non_terminals = len(opening_non_terminals)
num_closing_non_terminals = len(closing_non_terminals)
start_idx = 0
for name, num in [
("reserved tokens", num_reserved),
("terminals", num_terminals),
("opening non terminals", num_opening_non_terminals),
("closing non terminals", num_closing_non_terminals),
]:
end_idx = start_idx + num
logging.info("%d %s, %d ≤ token_id < %d", num, name, start_idx, end_idx)
start_idx = end_idx
if num_closing_non_terminals != num_opening_non_terminals:
raise RuntimeError(
f"The number of opening non-terminals ({num_opening_non_terminals}) "
"does not match the number of closing non-terminals "
f"({num_closing_non_terminals}).")
if (
num_opening_non_terminals > 0
and _USE_EXTRA_UNTYPED_CLOSING_NON_TERMINAL.value
):
logging.info(
"Input has non-terminals tokens (Choe-Charniak representation)"
' so adding one extra untyped closing non-terminal ")".'
)
extra_untyped_closing_non_terminal = True
untyped_closing_non_terminals = [constants.UNTYPED_CLOSING_NON_TERMINAL]
else:
extra_untyped_closing_non_terminal = False
untyped_closing_non_terminals = []
dic_metadata = dict(
num_reserved=num_reserved,
num_terminals=num_terminals,
# We write the number of opening and closing non-terminals independently,
# to avoid being confusing with a single `num_non_terminals` which can be
# understood as either the number of non-terminals of each type, or the
# total number of non-terminals of both types.
num_opening_non_terminals=num_opening_non_terminals,
num_closing_non_terminals=num_closing_non_terminals,
extra_untyped_closing_non_terminal=extra_untyped_closing_non_terminal,
)
dic_fname = _OUTPUT_PREFIX.value + ".txt"
dic_metadata_fname = _OUTPUT_PREFIX.value + ".json"
with open(dic_fname, "w") as out_f:
for w in itertools.chain(
constants.RESERVED_WORDS,
sorted(terminals),
sorted(opening_non_terminals),
sorted(closing_non_terminals),
untyped_closing_non_terminals,
):
out_f.write(w + "\n")
with open(dic_metadata_fname, "w") as metadata_f:
json.dump(dic_metadata, metadata_f, indent=4)
if __name__ == "__main__":
app.run(main)
| transformer_grammars-main | tools/build_dictionary.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
r"""Postprocess documents after encoding with spm_encode.
After encoding with SentencePiece, documents represented as Choe-Charniak
strings contain a word piece for whitespace before and after non-terminals, e.g.
(DOC (S (NP the hungry cat NP) (VP meows VP) S) DOC)
is encoded into pieces as
▁ (DOC ▁ (S ▁ (NP ▁the ▁ hungry ▁ cat ▁ NP) ▁ (VP ▁me ow s ▁ VP) ▁
S) ▁ DOC)
59 7 59 19 59 12 62 59 9464 59 6104 59 39 59 25 1207 5209 63 59 52 59
46 59 34
It's not desirable to have such whitespaces after the non-terminals, as they
implicitly separate words. What we want instead is:
(DOC (S (NP ▁the ▁ hungry ▁ cat NP) (VP ▁me ow s VP) S) DOC)
7 19 12 62 59 9464 59 6104 39 25 1207 5209 63 52 46 34
(Or shall we even have (WORD ... WORD) constructs to delineate words?)
This script strips out the redundant whitespace tokens.
Usage:
python postprocess_encoded_docs.py -- \
--vocab model.vocab \
--input foo.txt \
--output foo2.txt
"""
from typing import Sequence
from absl import app
from absl import flags
from transformer_grammars.data import sp_utils
from transformer_grammars.data import text_processing
_VOCAB_FNAME = flags.DEFINE_string("vocab", None, ".vocab file to use")
_INPUT_FNAME = flags.DEFINE_string(
"input", None, "Input file, output of spm_encode"
)
_OUTPUT_FNAME = flags.DEFINE_string("output", None, "Output file")
def process_line(l, vocab):
"""Processes a single line from the input."""
input_ids = [int(x) for x in l.split(" ")]
return ",".join(
str(x) for x in text_processing.postprocess_token_ids(input_ids, vocab)
)
def main(argv: Sequence[str]) -> None:
del argv
with open(_VOCAB_FNAME.value, "r") as f:
vocab = sp_utils.SentencePieceVocab.from_vocab_file(f)
with open(_INPUT_FNAME.value, "r") as inp:
with open(_OUTPUT_FNAME.value, "w") as output:
for l in inp:
output.write(process_line(l, vocab) + "\n")
if __name__ == "__main__":
app.run(main)
| transformer_grammars-main | tools/postprocess_encoded_docs.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Encode whitespace-separated tokens into integers using a dictionary.
The output is a CSV file, the rows of which contain encoded tokens for a single
sequence.
"""
from absl import app
from absl import flags
from absl import logging
from transformer_grammars.data import constants
from transformer_grammars.data import dictionary
from transformer_grammars.data import text_processing
_INPUT_FNAME = flags.DEFINE_string("input", None, "Input filename.")
_DICTIONARY_PREFIX = flags.DEFINE_string(
"dictionary", None, "Dictionary prefix (i.e. filename w/o extension)."
)
_OUTPUT_FNAME = flags.DEFINE_string("output", None, "Output filename for IDs.")
_CONVERT_TO_CC = flags.DEFINE_bool(
"convert_to_choe_charniak",
False,
(
"Whether the input should be converted to Choe-Charniak before being"
" processed further."
),
)
def _csv_from_list_of_ints(l):
return ",".join(map(str, l))
def main(_):
# Load the dictionary.
dic = dictionary.Dict()
with open(_DICTIONARY_PREFIX.value + ".txt", "r") as f:
dic.load_from_file(f)
dic.freeze()
max_len = 0
with open(_INPUT_FNAME.value, "r") as in_f:
with open(_OUTPUT_FNAME.value, "w") as out_f:
for l in in_f:
l = l.strip()
if _CONVERT_TO_CC.value:
l = text_processing.choe_charniak_from_tree(l)
encoded_l = []
for word in l.split(" "):
if (
word != constants.PLACEHOLDER_TOKEN
and word in constants.RESERVED_WORDS
):
raise ValueError(
"Cannot encode word %s as it is a reserved word." % word
)
id_ = dic[word]
encoded_l.append(id_)
max_len = max(max_len, len(encoded_l))
out_f.write(_csv_from_list_of_ints(encoded_l) + "\n")
logging.info("Maximum sequence length: %d", max_len)
if __name__ == "__main__":
app.run(main)
| transformer_grammars-main | tools/encode_offline.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| transformer_grammars-main | transformer_grammars/__init__.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common code to training and evaluation."""
import haiku as hk
from transformer_grammars.models import lm
from transformer_grammars.models.masking import utils as masking_utils
def build_forward(model_cfg, maskrules, token_type_ranges, *, is_training):
"""Builds a forward function for the model."""
model_kwargs = dict(**model_cfg)
model_kwargs.pop("extra_attention_mask_name", None)
model_kwargs.pop("extra_attention_mask_kwargs", None)
model_kwargs["num_attns"] = maskrules.num_attention_functions
model_kwargs["vocab_size"] = token_type_ranges.vocab_size
@hk.transform_with_state
def forward(
*,
inputs,
inputs_ttypes,
attn_mask,
attn_relpos,
attn_indicator,
memory_attn_mask,
memory_padding_mask,
smartmem_mem_from_seq,
smartmem_mem_from_mem,
beginning_of_seq,
):
model = lm.GeneralizedTXLLanguageModel(**model_kwargs)
output, unused_layers_outputs = model(
inputs,
beginning_of_seq=beginning_of_seq,
token_type=inputs_ttypes,
attn_mask=attn_mask,
attn_relpos=attn_relpos,
attn_indicator=attn_indicator,
memory_attn_mask=memory_attn_mask,
memory_padding_mask=memory_padding_mask,
smartmem_mem_from_mem=smartmem_mem_from_mem,
smartmem_mem_from_seq=smartmem_mem_from_seq,
is_training=is_training,
)
return output
return forward
def build_maskrules(model_cfg):
maskrules_name = model_cfg.get("extra_attention_mask_name", "txl")
maskrules_kwargs = model_cfg.get("extra_attention_mask_kwargs", {})
return masking_utils.get_masking_rules(
maskrules_name,
sequence_length=model_cfg["sequence_length"],
memory_length=model_cfg["memory_length"],
**maskrules_kwargs,
)
def model_input_from_chunk(chunk, maskrules):
"""Returns model input from masking rules chunk."""
d = chunk._asdict()
for key in [
"memory_pos",
"depth",
"end_of_seq",
"labels",
"labels_ttypes",
"seq_idx",
]:
del d[key]
if not maskrules.use_relative_positions:
d["attn_relpos"] = None
return d
| transformer_grammars-main | transformer_grammars/common.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sample from a trained model.
Whilst simple in principle, sampling is somewhat complicated with our
implementation which computes the attention mask, the relative positions, and
the memory update tensors outside of the JAX model core. This is an example of
how to do it at the cost of one full forward pass per token. Just like for
scoring, this is a naive implementation with batch size 1.
Warning: we do not verify here that the samples are correctly parenthesized
before passing them to the model. In our experience, this is not an issue with
a trained model, but might be one for very small or very undertrained ones.
"""
import functools
import jax
import jax.numpy as jnp
import numpy as np
from transformer_grammars import common
from transformer_grammars.data import preprocessing
from transformer_grammars.data import text_dataset
from transformer_grammars.data import tokenizer_utils as utils
from transformer_grammars.training import checkpoint
def _sequences_iterator(fname):
"""Iterator over the pretokenized dataset."""
ds = text_dataset.PreEncodedTextDataset(
filename=fname, num_samples=None, add_bos=True, add_eos=False
)
ds = ds.raw_dataset(
shuffle=False,
shuffle_buffer=None,
sample_without_replacement=False,
num_epochs=1,
)
return ds.as_numpy_iterator()
@functools.partial(jax.jit, static_argnums=(0, 1, 2))
def _call_model(forward, maskrules, temperature, params, state, key, chunk):
"""Calls the model for sampling purposes."""
model_inputs = common.model_input_from_chunk(chunk, maskrules)
logits, state = forward(params, state, rng=None, **model_inputs)
# Find last non-padding token, assuming that not all tokens are padding,
# which shouldn't be the case.
non_padding = jnp.greater(model_inputs["inputs"], 0).astype(jnp.int32)
last_non_padding = jnp.max(
jnp.arange(model_inputs["inputs"].shape[1]) * non_padding, axis=1
)
last_logits = jax.vmap(lambda x, idx: x[idx], in_axes=(0, 0))(
logits, last_non_padding
)
last_logits = last_logits.at[:, :2].add(-100.0) # Disallow PAD and BOS.
last_logits /= temperature
# (We could transform logits here.)
next_key, key = jax.random.split(key)
next_token = jax.random.categorical(key, last_logits, axis=1)
output = (next_token,)
# Batch size is 1, so drop the batch dimension inside the jitted call.
output = jax.tree_util.tree_map(lambda x: x[0], output)
return output, state, next_key
def _sample(forward, params, maskrules, ranges, dic, temperature, key, prefix,
num_steps):
"""Generates samples from a prefix.
We first pass the successive chunks corresponding to the prefix to the
model, and we sample a token from the last one. We append it to the prefix,
then call the model again, with the memory state as it was after the last
full chunk, and correspondingly skipping the parts of the input
corresponding to these full chunks.
As an example, assuming a Transformer Grammars model, a sequence length of
4, and a prefix "(S (NP the":
+---> predicted: hungry
|
+----+-----+-----+-------+
| (S | (NP | the | <pad> |
+----+-----+-----+-------+
+---> predicted: cat
|
+----+-----+-----+--------+
| (S | (NP | the | hungry |
+----+-----+-----+--------+
+---> predicted: NP)
|
+----+-----+-----+--------++-----+
| (S | (NP | the | hungry || cat |
+----+-----+-----+--------++-----+
As we are now in the second chunk, we can continue from the memory state
after "hungry", skipping "(S (NP the hungry". (We could have saved the state
one step before, but this requires a careful handling of the case where a
single sampled token extends the current prefix by 2, for closing
non-terminals. We do not do this given the minimal gains, and for
simplicity.)
+---> predicted: (VP
|
+-----+-----+-----+
| cat | NP) | NP) |
+-----+-----+-----+
+---> predicted: meows
|
+-----+-----+-----+-----+
| cat | NP) | NP) | (VP |
+-----+-----+-----+-----+
+---> predicted: VP)
|
+-----+-----+-----+-----++-------+
| cat | NP) | NP) | (VP || meows |
+-----+-----+-----+-----++-------+
+-------+-----+-----+
| meows | VP) | VP) |
+-------+-----+-----+
and so on.
Args:
forward: Forward function to call the model.
params: Model parameters.
maskrules: Masking rules object.
ranges: Token type ranges.
dic: Dictionary.
temperature: Temperature applied on the logits for sampling.
key: PRNG key.
prefix: Prefix to sample from.
num_steps: Number of sampling steps.
Returns:
Next RNG key.
"""
last_idx = None
state = None
seq = prefix
skip_chunks = 0
for _ in range(num_steps):
idx = 0
chunks = preprocessing.get_chunks_from_dataset(
[seq],
maskrules,
ranges,
shape_prefix=(1,),
multithread=False,
use_monitor_thread=False,
)
# Keep the linter happy:
chunk_idx = 0
chunk = None
for chunk_idx, chunk in enumerate(chunks):
# Skip chunks already evaluated.
if chunk_idx < skip_chunks:
idx += chunk.inputs.shape[1]
continue
(next_token,), next_state, key = _call_model(
forward, maskrules, temperature, params, state, key, chunk
)
inputs = chunk.inputs[0]
for inp in inputs:
if inp != 0: # Do not print padding tokens.
idx += 1
if last_idx is None:
# Print the initial prompt.
print(f">>> {dic[inp]}")
elif idx > last_idx:
# And tokens that have been added to the input since
# the previous step.
print(f"+++ {dic[inp]}")
# Do not update the state if this is the final chunk, because it
# may be incomplete.
if not chunk.end_of_seq.item():
state = next_state
assert chunk.end_of_seq.item()
next_token = int(jax.device_get(next_token))
assert next_token not in (0, 1) # PAD and BOS should not be sampled.
seq = np.concatenate([seq, [next_token]])
# Keep track of the last token printed.
last_idx = idx
# And how many chunks are to be skipped.
skip_chunks = chunk_idx
return key
def main(tokenizer, checkpoint_path, input_, seed, temperature, num_steps, _):
"""Sample."""
# Extract values from flag handles.
tokenizer = tokenizer.value
checkpoint_path = checkpoint_path.value
input_ = input_.value
seed = seed.value
temperature = temperature.value
num_steps = num_steps.value
# Get the token type ranges, i.e. which token IDs correspond to terminals,
# to opening non-terminals, to closing non-terminals, etc.
dic, ranges = utils.get_dictionary_and_ranges(tokenizer)
# Load the model checkpoint.
ckpt = checkpoint.load_checkpoint(checkpoint_path)
model_cfg = ckpt.config
params = ckpt.params
# Build the appropriate masking rules object.
maskrules = common.build_maskrules(model_cfg)
# Build the forward function that corresponds to the model config and
# masking rules.
forward = common.build_forward(
model_cfg, maskrules, ranges, is_training=False
).apply
# Get an iterator over the pre-tokenized dataset. This contains unbatched
# sequences of ints.
prefixes_it = _sequences_iterator(input_)
# Initialize the RNG used for sampling.
key = jax.random.PRNGKey(seed)
for prefix in prefixes_it:
key = _sample(
forward,
params,
maskrules,
ranges,
dic,
temperature,
key,
prefix,
num_steps,
)
| transformer_grammars-main | transformer_grammars/sample.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Score tokenized post-processed sequences.
Note: This is a naive implementation with batch size 1 for simplicity. It can be
extended to support > 1 batch sizes, or returning activations from the model.
"""
import functools
import jax
import jax.numpy as jnp
from transformer_grammars import common
from transformer_grammars.data import preprocessing
from transformer_grammars.data import text_dataset
from transformer_grammars.data import tokenizer_utils as utils
from transformer_grammars.training import checkpoint
def _sequences_iterator(fname, add_eos):
"""Iterator over the pretokenized dataset."""
ds = text_dataset.PreEncodedTextDataset(
filename=fname, num_samples=None, add_bos=True, add_eos=add_eos
)
ds = ds.raw_dataset(
shuffle=False,
shuffle_buffer=None,
sample_without_replacement=False,
num_epochs=1,
)
return ds.as_numpy_iterator()
@functools.partial(jax.jit, static_argnums=(0, 1))
def _call_model(forward, maskrules, params, state, chunk):
"""Calls the model for scoring purposes."""
model_inputs = common.model_input_from_chunk(chunk, maskrules)
logits, state = forward(params, state, rng=None, **model_inputs)
log_probs = jax.nn.log_softmax(logits, axis=-1)
mask = jnp.logical_and(
jnp.greater(chunk.labels, 0), jnp.greater(chunk.seq_idx, -1)[:, None]
).astype(jnp.int32)
log_probs *= mask[:, :, None]
labels_log_probs = jax.vmap(jax.vmap(lambda t, idx: t[idx], 0, 0), 0, 0)(
log_probs, chunk.labels
)
chunk_log_prob = jnp.sum(labels_log_probs, axis=1)
output = (log_probs, labels_log_probs, chunk_log_prob)
# Batch size is 1, so drop the batch dimension inside the jitted call.
output = jax.tree_util.tree_map(functools.partial(jnp.squeeze, axis=0),
output)
return output, state
def main(tokenizer, checkpoint_path, input_, add_eos, _):
"""Score."""
# Extract value from flag handles.
tokenizer = tokenizer.value
checkpoint_path = checkpoint_path.value
input_ = input_.value
add_eos = add_eos.value
# Get the token type ranges, i.e. which token IDs correspond to terminals,
# to opening non-terminals, to closing non-terminals, etc.
dic, ranges = utils.get_dictionary_and_ranges(tokenizer)
# Load the model checkpoint.
ckpt = checkpoint.load_checkpoint(checkpoint_path)
model_cfg = ckpt.config
params = ckpt.params
# Build the appropriate masking rules object.
maskrules = common.build_maskrules(model_cfg)
# Build the forward function that corresponds to the model config and
# masking rules.
forward = common.build_forward(
model_cfg, maskrules, ranges, is_training=False
).apply
# Get an iterator over the pre-tokenized dataset. This contains unbatched
# sequences of ints.
sequences_it = _sequences_iterator(input_, add_eos)
# Get an iterator over the batches (of chunks). We have batch size == 1 for
# simplicity here. Chunks are fixed-length successive portions of the
# sequence, plus auxiliary quantities that are computed out of the model
# but that the model requires (i.e. what the masking rules compute).
chunks_it = preprocessing.get_chunks_from_dataset(
sequences_it,
maskrules,
ranges,
shape_prefix=(1,),
multithread=False,
use_monitor_thread=False,
)
state = None
seq_log_prob = 0.0
total_log_prob = 0.0
for chunk in chunks_it:
(_, labels_log_probs, chunk_log_prob), state = _call_model(
forward, maskrules, params, state, chunk
)
inputs = chunk.inputs[0]
labels = chunk.labels[0]
seq_log_prob += chunk_log_prob
total_log_prob += chunk_log_prob
if chunk.beginning_of_seq.item():
print("=" * 80)
for inp, lab, lp in zip(inputs, labels, labels_log_probs):
if inp == 0:
continue
if lab != 0:
print(f"Input: {dic[inp]}\tLabel: {dic[lab]}\tLog prob: {lp:.2f}")
else:
print(f"Input: {dic[inp]}\tLabel: (no prediction)")
if chunk.end_of_seq.item():
print(f"Sequence log probability: {seq_log_prob:.2f}")
print("=" * 80)
print("")
seq_log_prob = 0.0
print(f"Total dataset log probability: {total_log_prob:.2f}")
| transformer_grammars-main | transformer_grammars/score.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Checkpointing."""
import pickle
from typing import Any
from absl import logging
import chex
@chex.dataclass
class Checkpoint:
step: int
params: Any
opt_state: Any
config: Any
class CheckpointLoadingError(Exception):
pass
def load_checkpoint(fname) -> Checkpoint:
"""Loads a checkpoint from a file."""
try:
with open(fname, "rb") as f:
return pickle.load(f)
except Exception as ex:
logging.info("Exception %s raised when loading checkpoint", str(ex))
raise CheckpointLoadingError from ex
| transformer_grammars-main | transformer_grammars/training/checkpoint.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Main training code."""
import functools
import json
import pickle
from typing import Any, Mapping
from absl import logging
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import more_itertools
import optax
import tensorflow_datasets as tfds
from transformer_grammars import common
from transformer_grammars.data import preprocessing
from transformer_grammars.data import sp_utils
from transformer_grammars.data import text_dataset
from transformer_grammars.models import lr_schedules
from transformer_grammars.models.masking import utils as masking_utils
from transformer_grammars.training import checkpoint
def _get_first(tree):
return jax.tree_map(lambda arr: jax.device_get(arr[0]), tree)
def _replicate_to_local_devices(tree):
return jax.tree_map(
lambda arr: jax.device_put_replicated(arr, jax.local_devices()), tree
)
def _build_from_cfg(module, cfg):
builder = getattr(module, cfg.name)
return builder(**cfg.kwargs)
def _build_dataset_instance(ctor_name, kwargs):
if ctor_name == "PreEncodedTextDataset":
ctor = text_dataset.PreEncodedTextDataset
else:
raise NotImplementedError
return ctor(**kwargs)
def _build_input(
name,
batch_size,
dataset_cfg,
maskrules,
token_type_ranges,
*,
shuffle,
shuffle_buffer,
num_epochs,
peekable,
multithread,
):
"""Builds an input iterator."""
logging.info("Building %s dataset.", name)
num_devices = jax.device_count()
num_local_devices = jax.local_device_count()
global_batch_size = batch_size
per_device_batch_size, ragged = divmod(global_batch_size, num_devices)
if ragged:
raise ValueError(
f"Global batch size {global_batch_size} must be divisible by "
f"num devices {num_devices}"
)
logging.info(
(
"Global batch size: %d, num devices: %d, num local devices: %d, "
"per-device batch size: %d"
),
global_batch_size,
num_devices,
num_local_devices,
per_device_batch_size,
)
ds = _build_dataset_instance(dataset_cfg.name, dataset_cfg.kwargs)
ds = ds.raw_dataset(
shuffle=shuffle,
shuffle_buffer=shuffle_buffer,
sample_without_replacement=shuffle,
num_epochs=num_epochs,
seed=None,
)
it = tfds.as_numpy(ds)
it = preprocessing.get_chunks_from_dataset(
it,
maskrules,
token_type_ranges,
(num_local_devices, per_device_batch_size),
multithread=multithread,
use_monitor_thread=False,
)
if peekable:
it = more_itertools.peekable(it)
logging.info("Dataset built.")
return it
def _build_train_input(cfg, maskrules, token_type_ranges):
return _build_input(
"training",
cfg.batch_size,
cfg.dataset,
maskrules,
token_type_ranges,
shuffle=True,
shuffle_buffer=int(2e5),
num_epochs=None,
peekable=True,
multithread=True,
)
def _build_eval_input(cfg, maskrules, token_type_ranges):
return _build_input(
"evaluation",
cfg.batch_size,
cfg.dataset,
maskrules,
token_type_ranges,
shuffle=False,
shuffle_buffer=0,
num_epochs=1,
peekable=False,
multithread=False,
)
def _load_dictionary_metadata(config):
metadata_fname = config.dictionary_metadata_filename
with open(metadata_fname, "r") as f:
metadata = json.load(f)
logging.info("Loaded dictionary metadata:\n%s", repr(metadata))
return metadata
def _load_sentencepiece_vocab(config):
sentencepiece_vocab_filename = config.sentencepiece_vocab_filename
with open(sentencepiece_vocab_filename, "r") as f:
vocab = sp_utils.SentencePieceVocab.from_vocab_file(f)
logging.info("Loaded SentencePiece vocab:\n%s", repr(vocab))
return vocab
def _load_token_type_ranges(config):
"""Loads token type ranges info from dictionary metadata or SP .vocab file."""
if config.get("dictionary_metadata_filename", ""):
dic_metadata = _load_dictionary_metadata(config)
token_type_ranges = masking_utils.TokenTypeRanges.from_dictionary_metadata(
**dic_metadata
)
elif config.get("sentencepiece_vocab_filename", ""):
vocab = _load_sentencepiece_vocab(config)
token_type_ranges = masking_utils.TokenTypeRanges.from_sentencepiece_vocab(
vocab
)
else:
token_type_ranges = None
logging.info("Using token ranges:\n%s", repr(token_type_ranges))
return token_type_ranges
def _initialize_model(model_cfg, maskrules, token_type_ranges, init_rng, batch):
init_inputs = common.model_input_from_chunk(batch, maskrules)
forward = common.build_forward(
model_cfg, maskrules, token_type_ranges, is_training=True
)
p_init = jax.pmap(forward.init)
params, state = p_init(init_rng, **init_inputs)
return params, state
################################################################################
# Training
################################################################################
def _loss(apply, maskrules, vocab_size, params, state, rng, batch):
"""Computes the loss."""
inputs = common.model_input_from_chunk(batch, maskrules)
logits, state = apply(params, state, rng=rng, **inputs)
mask = jnp.logical_and(
jnp.greater(batch.labels, 0), jnp.greater(batch.seq_idx, -1)[:, None]
).astype(jnp.int32)
labels_one_hot = hk.one_hot(batch.labels, vocab_size)
loss = optax.softmax_cross_entropy(logits, labels_one_hot)
total_loss = jnp.sum(mask * loss)
total_count = jnp.sum(mask)
# Compute the average loss per-token for the batches received on each device
# independently, then use that to get the per-device gradient, then average
# those. This is fine here, as batches on each device roughly have the same
# number of non-masked tokens.
loss = total_loss / total_count
scaled_loss = loss / jax.device_count() # For gradients, to avoid a pmean.
aux = (state, (loss, total_loss, total_count))
return scaled_loss, aux
def _learning_rate(cfg, step):
return _build_from_cfg(lr_schedules, cfg)(step)
def _optimizer(cfg, learning_rate):
optimizer = getattr(optax, cfg.name)
return optimizer(learning_rate, **cfg.kwargs)
@chex.dataclass
class TrainingState:
rng: jnp.array
step: jnp.array
params: Any
state: Any
opt_state: Any
def _build_update(config, maskrules, token_type_ranges):
"""Builds the training state update function."""
forward = common.build_forward(
config.model, maskrules, token_type_ranges, is_training=True
)
def update(training_state, batch):
"""Updates the training state from a batch of data."""
loss_fn = functools.partial(
_loss, forward.apply, maskrules, token_type_ranges.vocab_size
)
grad_loss_fn = jax.grad(loss_fn, has_aux=True)
scaled_grads, (state, (loss, *_)) = grad_loss_fn(
training_state.params,
training_state.state,
training_state.rng,
batch,
)
grads = jax.lax.psum(scaled_grads, axis_name="i")
# Clip gradients
grad_norm = optax.global_norm(grads)
assert not grad_norm.shape
clip_grad_norm = config.training.get("clip_grad_norm", 0.0)
if clip_grad_norm:
# Implement our own gradient clipping (by norm) as optax's doesn't handle
# gradients with 0 norm (which shouldn't happen, but still.)
clipping_factor = jnp.minimum(1.0, clip_grad_norm / (grad_norm + 1e-6))
clipped_grads = jax.tree_util.tree_map(
lambda t: t * clipping_factor, grads
)
else:
clipped_grads = grads
clipped_grad_norm = optax.global_norm(clipped_grads)
# Compute and apply updates via our optimizer.
learning_rate = _learning_rate(
config.training.lr_schedule, training_state.step
)
_, opt_update = _optimizer(config.training.optimizer, learning_rate)
updates, opt_state = opt_update(grads, training_state.opt_state)
params = optax.apply_updates(training_state.params, updates)
# Compute norms.
params_norm = optax.global_norm(params)
update_norm = optax.global_norm(updates)
mask = jnp.greater(batch.inputs, 0).astype(jnp.int32)
indic_mean = jnp.sum(batch.attn_indicator * mask) / jnp.sum(mask)
# Scalars to log (note: we log the mean across all hosts/devices).
scalars = {
"loss": loss,
"learning_rate": learning_rate,
"params_norm": params_norm,
"update_norm": update_norm,
"unclipped_grad_norm": grad_norm,
"clipped_grad_norm": clipped_grad_norm,
"attn_indicator_mean": indic_mean,
"tokens_per_batch": jnp.sum(mask),
}
# These should be summed, not averaged, across devices.
scalars["tokens_per_batch"] *= jax.device_count()
scalars = jax.lax.pmean(scalars, axis_name="i")
step = training_state.step + 1
rng, _ = jax.random.split(training_state.rng)
new_training_state = TrainingState(
rng=rng, step=step, params=params, state=state, opt_state=opt_state
)
return new_training_state, scalars
return update
################################################################################
# Evaluation
################################################################################
def _build_evaluator(eval_cfg, model_cfg, maskrules, token_type_ranges):
"""Builds the evaluator function."""
apply = common.build_forward(
model_cfg, maskrules, token_type_ranges, is_training=False
).apply
def eval_batch(params, state, batch):
rng = None
_, aux = _loss(
apply,
maskrules,
token_type_ranges.vocab_size,
params,
state,
rng,
batch,
)
state, (_, total_loss, total_count) = aux
total_loss = jax.lax.psum(total_loss, axis_name="i")
total_count = jax.lax.psum(total_count, axis_name="i")
return state, (total_loss, total_count)
p_eval_batch = jax.pmap(eval_batch, axis_name="i")
ds = _build_eval_input(eval_cfg, maskrules, token_type_ranges)
ds = more_itertools.seekable(ds)
def eval_epoch(py_step, training_state):
logging.info("Evaluating at step %d.", py_step)
params = training_state.params
state = None
total_loss = 0.0
total_count = 0.0
for batch in ds:
state, batch_metrics = p_eval_batch(params, state, batch)
batch_metrics = _get_first(batch_metrics)
total_loss += batch_metrics[0]
total_count += batch_metrics[1]
logging.info(
"[eval % 10d] total_loss=%s\ttotal_count=%d",
py_step,
total_loss,
total_count,
)
ds.seek(0) # Reset the evaluation dataset without recreating it.
return eval_epoch
################################################################################
# Main loop
################################################################################
def _split_init_and_train_rngs(seed, _):
orig_rng = jax.random.PRNGKey(seed)
init_rng, train_rng = jax.random.split(orig_rng)
train_rng = jax.random.fold_in(train_rng, jax.lax.axis_index("i"))
return init_rng, train_rng
def _should_do(cfg, py_step):
return py_step % cfg.interval_steps == 0
def _log(unused_cfg, py_step, metrics):
metrics_str = "\t".join(
(
f"{k}={_get_first(v)!s}" for (k, v) in metrics.items()
)
)
logging.info("[train % 9d] %s", py_step, metrics_str)
def _save_checkpoint(unused_cfg, py_step, training_state, model_cfg):
logging.info("Saving checkpoint at step %d.", py_step)
params = _get_first(training_state.params)
opt_state = _get_first(training_state.opt_state)
ckpt = checkpoint.Checkpoint(
step=py_step,
params=params,
opt_state=opt_state,
config=model_cfg.to_dict(),
)
with open("checkpoint.pkl", "wb") as f:
pickle.dump(ckpt, f)
def _reload_from_checkpoint(_, current_state):
ckpt = checkpoint.load_checkpoint("checkpoint.pkl")
params = _replicate_to_local_devices(ckpt.params)
opt_state = _replicate_to_local_devices(ckpt.opt_state)
py_step = ckpt.step
jax_step = _replicate_to_local_devices(jnp.array(py_step, dtype=jnp.int32))
training_state = current_state.replace(
step=jax_step, params=params, opt_state=opt_state
)
return training_state, py_step
def _log_shapes(mapping, prefix=""):
for k, v in mapping.items():
key = f"{prefix}/{k}" if prefix else k
if isinstance(v, Mapping):
_log_shapes(v, prefix=key)
elif isinstance(v, tuple):
for i, elem in enumerate(v):
_log_shapes(elem, prefix=key + f"[{i}]")
else:
logging.info("\t%s: %s", key, repr(v.shape[1:]))
def main(config, _):
"""Simultaneous train+eval loop."""
jnp.set_printoptions(precision=4)
# Load the config
config = config.value
# Checks.
if jax.local_device_count() < jax.device_count():
raise RuntimeError("Multiple processes (hosts) training is not supported.")
# Setup the RNGs.
dummy_input = jax.device_put_replicated(jnp.zeros(()), jax.local_devices())
init_rng, train_rng = jax.pmap(
functools.partial(_split_init_and_train_rngs, 0), axis_name="i"
)(
dummy_input # Dummy input, unused.
)
# Load token type ranges.
token_type_ranges = _load_token_type_ranges(config)
# Load masking rules.
# Because these carry properties that the model core needs to know about,
# build them early.
maskrules = common.build_maskrules(config.model)
# Create the training dataset.
ds = _build_train_input(config.training, maskrules, token_type_ranges)
first_batch = ds.peek()
# Create the update function.
p_update = jax.pmap(
_build_update(config, maskrules, token_type_ranges), axis_name="i"
)
# Create the evaluator.
evaluator = _build_evaluator(
config.evaluation, config.model, maskrules, token_type_ranges
)
# Initialize the training state.
params, state = _initialize_model(
config.model, maskrules, token_type_ranges, init_rng, first_batch
)
opt_init, _ = _optimizer(config.training.optimizer, 0.0)
opt_state = jax.pmap(opt_init)(params)
step = _replicate_to_local_devices(jnp.zeros((), dtype=jnp.int32))
training_state = TrainingState(
rng=train_rng,
step=step,
params=params,
state=state,
opt_state=opt_state,
)
# Keep a Python and a JAX (on-device) copy of the current step to avoid
# transfers.
py_step = 0
logging.info("Parameters shapes:")
_log_shapes(training_state.params)
# Possibly overwrite it from a checkpoint (except for the RNG)
try:
training_state, py_step = _reload_from_checkpoint(None, training_state)
except checkpoint.CheckpointLoadingError:
logging.warning(
"No checkpoint found, or unusable -- starting from scratch."
)
else:
logging.warning("Checkpoint found -- restarting from step %d.", py_step)
# Training loop.
logging.info("Starting training.")
while py_step < config.training.num_steps:
training_state, metrics = p_update(training_state, next(ds))
py_step += 1
last = py_step == config.training.num_steps
if last or _should_do(config.logging, py_step):
_log(config.logging, py_step, metrics)
if last or _should_do(config.checkpointing, py_step):
_save_checkpoint(
config.checkpointing, py_step, training_state, config.model
)
if last or _should_do(config.evaluation, py_step):
evaluator(py_step, training_state)
logging.info("Training complete.")
| transformer_grammars-main | transformer_grammars/training/train.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Embedding layer."""
import haiku as hk
import jax.numpy as jnp
class EmbeddingLayer(hk.Module):
"""Embedding layer that allows weight sharing."""
def __init__(
self,
embedding_size: int,
vocab_size: int,
dtype: jnp.dtype = jnp.float32,
share_weights: bool = True,
output_bias: bool = True,
name: str = "embedding_layer",
):
"""Initialises the module."""
super().__init__(name=name)
self._embedding_size = embedding_size
self._vocab_size = vocab_size
self._dtype = dtype
self._output_bias = output_bias
self._embedding_weights = hk.get_parameter(
name="input_embedding",
shape=[self._vocab_size, self._embedding_size],
dtype=self._dtype,
init=hk.initializers.VarianceScaling(
distribution="uniform", mode="fan_out"
),
)
if share_weights:
self._output_weights = jnp.transpose(self._embedding_weights)
else:
self._output_weights = hk.get_parameter(
name="output_weights",
shape=[self._embedding_size, self._vocab_size],
dtype=self._dtype,
init=hk.initializers.VarianceScaling(
distribution="uniform", mode="fan_out", scale=1.0
),
)
def encode(self, input_tokens: jnp.ndarray) -> jnp.ndarray:
"""Map tokens to embeddings."""
assert jnp.issubdtype(input_tokens.dtype, jnp.integer)
# If you don't wrap ids in a singleton tuple then JAX will try to unpack
# it along the row dimension and treat each row as a separate index into
# one of the dimensions of the array. The error only surfaces when
# indexing with DeviceArray, while indexing with numpy.ndarray works fine.
# See https://github.com/google/jax/issues/620 for more details.
# Cast to a jnp array in case `ids` is a tracer (eg a dynamic_unroll).
return jnp.asarray(self._embedding_weights)[(input_tokens,)]
def decode(self, embeddings: jnp.ndarray) -> jnp.ndarray:
"""Decode embeddings to token logits."""
out = jnp.matmul(embeddings, self._output_weights)
if self._output_bias:
bias = hk.get_parameter(
"bias",
shape=[self._vocab_size],
dtype=self._dtype,
init=jnp.zeros,
)
out += bias
return out
| transformer_grammars-main | transformer_grammars/models/embedding_layer.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Learning rate schedule functions."""
import jax.numpy as jnp
import numpy as np
def cosine_anneal(min_lr, max_lr, cosine_cycle_length):
"""Cosine annealing from max_lr to min_lr in cosine_cycle_length steps."""
def schedule(step):
t = jnp.minimum(step / cosine_cycle_length, 1.0)
cosine_decay = 0.5 * (1.0 + jnp.cos(np.pi * t))
return min_lr + (max_lr - min_lr) * cosine_decay
return schedule
def linear_warmup(min_lr, max_lr, num_steps):
"""Linear warmup schedule from min_lr to max_lr in num_steps."""
def schedule(step):
step = jnp.minimum(step, num_steps)
return min_lr + (step / num_steps) * (max_lr - min_lr)
return schedule
def constant_lr(lr):
"""Constant learning rate."""
def schedule(unused_step):
del unused_step
return lr
return schedule
def linear_warmup_then_cosine_anneal(
start_lr, max_lr, min_lr, warmup_steps, cosine_cycle_length
):
"""Linear warmup for warmup_steps steps followed by cosine anneal."""
linear_schedule = linear_warmup(start_lr, max_lr, warmup_steps)
cosine_schedule = cosine_anneal(min_lr, max_lr, cosine_cycle_length)
def schedule(step):
return jnp.where(
step < warmup_steps,
linear_schedule(step),
cosine_schedule(step - warmup_steps),
)
return schedule
def inverse_sqrt(warmup_steps):
def schedule(step):
return 1 / jnp.sqrt(jnp.maximum(step, warmup_steps))
return schedule
| transformer_grammars-main | transformer_grammars/models/lr_schedules.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for transformer_grammars.models.lm."""
import unittest
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from transformer_grammars.models import lm
class AbstractArray(object):
"""Abstract JAX array."""
def __init__(self, shape, dtype):
self.shape = shape
self.dtype = jnp.dtype(dtype)
def get_inputs():
batch_size = 64
sequence_length = 256
inputs = AbstractArray((batch_size, sequence_length), jnp.int32)
inputs_ttypes = AbstractArray((batch_size, sequence_length), jnp.int32)
attn_mask = AbstractArray(
(batch_size, sequence_length, sequence_length), jnp.int32
)
attn_relpos = AbstractArray(
(batch_size, sequence_length, 2 * sequence_length), jnp.int32
)
attn_indicator = AbstractArray((batch_size, sequence_length), jnp.int32)
memory_attn_mask = AbstractArray(
(batch_size, sequence_length, sequence_length), jnp.int32
)
memory_padding_mask = AbstractArray((batch_size, sequence_length), jnp.int32)
smartmem_mem_from_seq = AbstractArray(
(batch_size, sequence_length, sequence_length), jnp.int32
)
smartmem_mem_from_mem = AbstractArray(
(batch_size, sequence_length, sequence_length), jnp.int32
)
beginning_of_seq = AbstractArray((batch_size,), jnp.int32)
return dict(
seq=inputs,
token_type=inputs_ttypes,
beginning_of_seq=beginning_of_seq,
attn_mask=attn_mask,
attn_relpos=attn_relpos,
attn_indicator=attn_indicator,
memory_attn_mask=memory_attn_mask,
memory_padding_mask=memory_padding_mask,
smartmem_mem_from_mem=smartmem_mem_from_mem,
smartmem_mem_from_seq=smartmem_mem_from_seq,
)
def apply_fn(**kwargs):
model = lm.GeneralizedTXLLanguageModel(
vocab_size=32768,
d_model=1024,
num_layers=16,
num_heads=8,
ffw_hidden_size=4096,
embedding_dropout=0.1,
core_dropout=0.1,
core_output_dropout=0.1,
sequence_length=256,
memory_length=256,
tied_input_output_embeddings=True,
relative_position_embeddings=1,
tied_layer_weights=0,
)
output, _ = model(**kwargs, is_training=True)
return output
class CoreTest(unittest.TestCase):
def test_expected_num_params(self):
inputs_dict = get_inputs()
transformed = hk.transform_with_state(apply_fn)
params, _ = jax.eval_shape(
transformed.init, jax.random.PRNGKey(0), **inputs_dict
)
num_params = sum([np.product(x.shape) for x in jax.tree_flatten(params)[0]])
self.assertEqual(num_params, 251887616)
if __name__ == "__main__":
unittest.main()
| transformer_grammars-main | transformer_grammars/models/lm_test.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| transformer_grammars-main | transformer_grammars/models/__init__.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generalized Transformer-XL implementation.
Extended for TG:
- to accept any mask, relative positions
- to possibly apply different attention functions at different positions
- to tie layers, or not
- to have some layers be restricted, or not
- to have some heads be restricted, or not
"""
# pylint: disable=g-complex-comprehension
from typing import Any, Callable, Mapping, Optional, Tuple
import haiku as hk
import jax
from jax import lax
import jax.numpy as jnp
import numpy as np
# The memory consists of the inputs to every transformer layer, and has
# shape [num_layers, batch_size, memory_length, d_model]
TransformerMemory = jnp.ndarray # pylint: disable=invalid-name
def layer_norm(
name: Optional[str] = None,
) -> Callable[[jnp.ndarray], jnp.ndarray]:
return hk.LayerNorm(axis=-1, create_scale=True, create_offset=True, name=name)
def broadcast_batch(x: jnp.ndarray, batch_size: int) -> jnp.ndarray:
return jnp.broadcast_to(x, (batch_size,) + x.shape)
def make_attention_mask(
input_mask: jnp.ndarray,
memory_mask: Optional[jnp.ndarray] = None,
extra_attention_mask: Optional[jnp.ndarray] = None,
extra_memory_attention_mask: Optional[jnp.ndarray] = None,
memory_len: Optional[int] = None,
dtype: jnp.dtype = jnp.float32,
causal: Optional[bool] = True,
) -> jnp.ndarray:
"""Creates the attention mask out of component masks.
Args:
input_mask: Array of shape [B, T].
memory_mask: Optional array of shape [B, T, M].
extra_attention_mask: Optional array of shape [B, T, T].
extra_memory_attention_mask: Optional array of shape [B, T, M].
memory_len: M.
dtype: Return dtype.
causal: Whether the mask is causal.
Returns:
Array of shape [B, T, T + M].
"""
batch_size, seq_len = input_mask.shape
if causal:
causal_mask = np.tril(np.ones((seq_len, seq_len), dtype=np.bool_))
# Ensure that the baked-in constant is only of size (seq_len, seq_len).
causal_mask = lax.tie_in(input_mask, causal_mask)
attention_mask = input_mask[:, None, :] * causal_mask[None, :, :]
else:
attention_mask = jnp.broadcast_to(
input_mask[:, np.newaxis, :], [batch_size, seq_len, seq_len]
)
attention_mask = attention_mask.astype(dtype)
unrestricted_attention_mask = attention_mask
if extra_attention_mask is not None:
attention_mask *= extra_attention_mask
# Prepend memory_mask to attention_mask if needed.
if memory_len:
if memory_mask is None:
mask_shape = (seq_len, memory_len)
memory_mask = np.ones(mask_shape, dtype=np.bool_)
# Ensure that the baked-in constant is only of size (seq_len, mem_len).
memory_mask = lax.tie_in(input_mask, memory_mask)
memory_mask = broadcast_batch(memory_mask.astype(dtype), batch_size)
unrestricted_memory_mask = memory_mask
if extra_memory_attention_mask is not None:
memory_mask *= extra_memory_attention_mask
attention_mask = jnp.concatenate((memory_mask, attention_mask), axis=-1)
unrestricted_attention_mask = jnp.concatenate(
(unrestricted_memory_mask, unrestricted_attention_mask), axis=-1
)
# Verify we did it right.
assert attention_mask.dtype == dtype
assert attention_mask.shape == (batch_size, seq_len, seq_len + memory_len)
assert unrestricted_attention_mask.dtype == attention_mask.dtype
assert unrestricted_attention_mask.shape == attention_mask.shape
return attention_mask, unrestricted_attention_mask
def _apply_mask_to_tensor(x, mask):
num_mask_dims = len(mask.shape)
num_x_dims = len(x.shape)
assert num_x_dims >= num_mask_dims
assert x.shape[:num_mask_dims] == mask.shape
for _ in range(num_x_dims - num_mask_dims):
mask = jnp.expand_dims(mask, -1)
return x * mask
def _apply_mask_to_pytree(x, mask):
f = lambda y: _apply_mask_to_tensor(y, mask)
return jax.tree_map(f, x)
def switch(funcs, indicator, *args, **kwargs):
"""Applies one of the functions, depending on the value of the indicator."""
num_classes = len(funcs)
assert indicator is not None or len(funcs) == 1
if len(funcs) == 1:
f = funcs[0]
return f(*args, **kwargs)
outs = [f(*args, **kwargs) for f in funcs]
masks = [jnp.equal(indicator, i) for i in range(num_classes)]
masked_outs = [
_apply_mask_to_pytree(x, mask) for (x, mask) in zip(outs, masks)
]
return jax.tree_map(sum, *masked_outs)
def _rel_shift_inner(logits: jnp.ndarray, attention_length: int) -> jnp.ndarray:
"""Shifts the relative logits.
This is a more general than the original Transformer-XL implementation as
inputs may also see the future. (The implementation does not rely on a
causal mask removing the upper-right triangle.)
Given attention length 3 and inputs:
[[-3, -2, -1, 0, 1, 2],
[-3, -2, -1, 0, 1, 2],
[-3, -2, -1, 0, 1, 2]]
The shifted output is:
[[0, 1, 2],
[-1, 0, 1],
[-2, -1, 0]]
Args:
logits: input tensor of shape [T_q, T_v + T_q]
attention_length: T_v `int` length of the attention, should be equal to
memory size + sequence length.
Returns:
A shifted version of the input of size [T_q, T_v]. In each row, a window
of size T_v elements is kept. The window starts at the rightmost end, for
the first row. It then shifts left by 1 for each subsequent row.
"""
if logits.ndim != 2:
raise ValueError("`logits` needs to be an array of dimension 2.")
tq, total_len = logits.shape
assert total_len == tq + attention_length
logits = jnp.reshape(logits, [total_len, tq])
logits = lax.slice(logits, (1, 0), logits.shape) # logits[1:]
logits = jnp.reshape(logits, [tq, total_len - 1])
# Equiv to logits[:, :attention_length].
logits = lax.slice(logits, (0, 0), (tq, attention_length))
return logits
def relative_shift(logits: jnp.ndarray, attention_length: int) -> jnp.ndarray:
fn = lambda t: _rel_shift_inner(t, attention_length)
return jax.vmap(jax.vmap(fn))(logits)
class MultiHeadAttention(hk.Module):
"""Multihead attention module with relative position encodings and memory.
With TG changes to accept any mask/relative position.
"""
def __init__(
self,
*,
value_size: int,
key_size: int,
num_heads: int,
init_scale: float,
dropout_rate: float,
use_bias: bool,
use_final_bias: bool,
final_init_scale_multiplier: float,
min_relative_position: Optional[int] = None,
max_relative_position: Optional[int] = None,
apply_final_linear: bool = True,
name: str = "multihead_attention",
):
"""Initialises the MultiHeadAttention module."""
super().__init__(name=name)
self._value_size = value_size
self._key_size = key_size
self._num_heads = num_heads
self._dropout_rate = dropout_rate
self._init_scale = init_scale
self._final_init_scale = final_init_scale_multiplier * init_scale
self._use_bias = use_bias
self._use_final_bias = use_final_bias
self._min_relative_position = min_relative_position
self._max_relative_position = max_relative_position
self._apply_final_linear = apply_final_linear
@hk.transparent
def _multihead_linear(self, inputs: jnp.ndarray, hidden_size: int, name: str):
linear = hk.Linear(
self._num_heads * hidden_size,
with_bias=self._use_bias,
w_init=hk.initializers.VarianceScaling(scale=self._init_scale),
name=name,
)
out = linear(inputs)
return jnp.reshape(out, inputs.shape[:-1] + (self._num_heads, hidden_size))
def __call__(
self,
inputs: jnp.ndarray,
attention_mask: jnp.ndarray,
positional_encodings: Optional[jnp.ndarray],
memory: Optional[jnp.ndarray],
relative_positions: Optional[jnp.ndarray],
is_training: bool,
) -> jnp.ndarray:
"""Computes the attention values.
We use the following shape conventions: `B` for batch size, `T` for
chunk
size, `M` for memory length and `D` for the embedding dimension.
Args:
inputs: Array of shape [B, T, D]
attention_mask: Array of shape [B, T, M + T] indicating which attention
pairs are valid.
positional_encodings: Optional array of shape [B, R, D] where R is the
number of possible relative positions.
memory: Optional array of extra attention values of shape [B, M, D]
relative_positions: Optional relative position indication, of shape [B, T,
T], taking values in the interval [-T, T+M], indicating the relative
position of query[b, i] vs. key[b, j] in relative_positions[b, i, j]. In
a usual TXL, this is i - j.
is_training: Whether to apply dropout
Returns:
An array of shape [B, T, D] the result of applying self-attention to
inputs, unless apply_final_linear is False, in which case the returned
value is an array of shape [B, T, H, V].
"""
batch_size, seq_len, embedding_size = inputs.shape
queries = inputs
if memory is None:
values = inputs
else:
values = jnp.concatenate([memory, inputs], axis=1)
query_heads = self._multihead_linear(queries, self._key_size, "query")
# query_heads has shape [B, T, H, d_keys]
key_heads = self._multihead_linear(values, self._key_size, "key")
# key_heads has shape [B, T + M, H, d_keys]
value_heads = self._multihead_linear(values, self._value_size, "value")
# value_heads has shape [B, T + M, H, d_value]
if positional_encodings is not None:
logits = self._relative_position_embeddings(
query_heads,
key_heads,
positional_encodings,
relative_positions,
is_training,
)
else:
logits = jnp.einsum("bthd,bThd->bhtT", query_heads, key_heads)
scaled_logits = logits * self._key_size ** (-0.5)
# Mask logits by subtracting a large number (1e30). These become 0 after
# the exponentiation in the softmax.
assert attention_mask.dtype == scaled_logits.dtype
masked_logits = scaled_logits - (1 - attention_mask[:, None, :, :]) * 1e30
assert masked_logits.dtype == scaled_logits.dtype
weights = jax.nn.softmax(masked_logits)
if is_training:
weights = hk.dropout(hk.next_rng_key(), self._dropout_rate, weights)
attn_vec = jnp.einsum("bhtT,bThd->bthd", weights, value_heads)
if self._apply_final_linear:
attn_vec = jnp.reshape(
attn_vec,
[batch_size, seq_len, self._num_heads * self._value_size],
)
final_linear = hk.Linear(
embedding_size,
w_init=hk.initializers.VarianceScaling(scale=self._final_init_scale),
with_bias=self._use_final_bias,
)
outputs = final_linear(attn_vec)
else:
outputs = attn_vec # [B, T, H, V]
return outputs
@hk.transparent
def _relative_position_embeddings(
self,
query_heads: jnp.ndarray,
key_heads: jnp.ndarray,
positional_encodings: jnp.ndarray,
relative_positions: Optional[jnp.ndarray],
is_training: bool,
) -> jnp.ndarray:
"""Compute attention using the given encodings."""
r_w_bias = hk.get_parameter(
"r_w_bias",
[self._num_heads * self._key_size],
init=hk.initializers.VarianceScaling(),
)
r_w_bias = jnp.reshape(r_w_bias, [self._num_heads, self._key_size])
content_logits = jnp.einsum(
"bthd,bThd->bhtT",
query_heads + r_w_bias,
key_heads,
)
batch_size = query_heads.shape[0]
if is_training:
positional_encodings = broadcast_batch(positional_encodings, batch_size)
positional_encodings = hk.dropout(
hk.next_rng_key(), self._dropout_rate, positional_encodings
)
relative_keys = self._multihead_linear(
positional_encodings, self._key_size, "relative_keys"
)
# relative_keys has shape [B, R, H, d_keys]
# Since we didn't do this before.
if not is_training:
relative_keys = broadcast_batch(relative_keys, batch_size)
r_r_bias = hk.get_parameter(
"r_r_bias",
[self._num_heads * self._key_size],
init=hk.initializers.VarianceScaling(),
) # i.e. v in TXL paper
r_r_bias = jnp.reshape(r_r_bias, [self._num_heads, self._key_size])
# Reminder: query_heads has shape [B, T, H, d_keys]
relative_logits = jnp.einsum(
"bthd,bThd->bhtT", query_heads + r_r_bias, relative_keys
)
# relative_logits has shape [B, H, T, R]
if relative_positions is None:
relative_logits = relative_shift(
relative_logits, attention_length=key_heads.shape[1]
)
else:
assert self._max_relative_position is not None
assert self._min_relative_position is not None
relative_positions = jnp.clip(
relative_positions,
self._min_relative_position,
self._max_relative_position,
)
# Here, instead of doing the relative shift, which is justified because
# when we go one token to the right in the queries, the keys all become
# further away by one position, we need to do a gather to pick, given the
# relative positions matrix, the right relative logits.
relative_positions_ = (
self._max_relative_position - relative_positions
).astype(jnp.int32)
# We index in TPU friendly way:
relative_positions_one_hot = jax.nn.one_hot(
relative_positions_, num_classes=relative_logits.shape[-1]
)
relative_logits = jnp.einsum(
"bhtT,btsT->bhts", relative_logits, relative_positions_one_hot
)
# Instead of doing nested vmaps:
# def h(x, idx):
# return x[idx]
# def g(x, idx):
# return jax.vmap(h, (0, None), 0)(x, idx)
# def f(x, idx):
# return jax.vmap(g, (1, 0), 1)(x, idx)
# relative_logits = jax.vmap(f, (0, 0), 0)(
# relative_logits, relative_positions_)
assert content_logits.shape == relative_logits.shape
return content_logits + relative_logits
class DenseBlock(hk.Module):
"""Dense block."""
def __init__(
self,
*,
ffw_hidden_size: int,
dropout_rate: float,
init_scale: float,
final_init_scale_multiplier: float,
use_final_bias: bool,
activation: Callable[[jnp.ndarray], jnp.ndarray],
name: Optional[str] = None,
):
super().__init__(name=name)
self._ffw_hidden_size = ffw_hidden_size
self._dropout_rate = dropout_rate
self._init_scale = init_scale
self._final_init_scale = init_scale * final_init_scale_multiplier
self._use_final_bias = use_final_bias
self._activation = activation
def __call__(self, x: jnp.ndarray, is_training: bool) -> jnp.ndarray:
d_model = x.shape[-1]
x = hk.Linear(
self._ffw_hidden_size,
w_init=hk.initializers.VarianceScaling(self._init_scale),
)(x)
x = self._activation(x)
if is_training:
x = hk.dropout(hk.next_rng_key(), self._dropout_rate, x)
return hk.Linear(
d_model,
w_init=hk.initializers.VarianceScaling(self._final_init_scale),
with_bias=self._use_final_bias,
)(x)
def _suffixes(n):
if n == 1:
yield (0, "")
else:
for i in range(n):
yield (i, str(i))
def _make_block_head_hybrid(
*,
layer: int,
mha_kwargs: Mapping[str, Any],
ffw_kwargs: Mapping[str, Any],
dropout_rate: float,
num_heads: int,
num_unrestricted_heads: int,
embedding_size: int,
num_attns: int = 1,
):
"""Generalized Transformer-XL block, with restricted/unrestricted heads."""
num_restricted_heads = num_heads - num_unrestricted_heads
restricted_attns = [
MultiHeadAttention(
name=f"h{layer}_attn{suffix}",
num_heads=num_restricted_heads,
apply_final_linear=False,
**mha_kwargs,
)
for i, suffix in _suffixes(num_attns)
]
unrestricted_attn = MultiHeadAttention(
name=f"h{layer}_unrestricted_attn",
num_heads=num_unrestricted_heads,
apply_final_linear=False,
**mha_kwargs,
)
# pylint: disable=protected-access
post_attn_linears = [
hk.Linear(
embedding_size,
w_init=hk.initializers.VarianceScaling(
scale=unrestricted_attn._final_init_scale
),
with_bias=unrestricted_attn._use_final_bias,
name=f"h{layer}_attn{suffix}_linear",
)
for i, suffix in _suffixes(num_attns)
]
# pylint: enable=protected-access
dense_block = DenseBlock(name=f"h{layer}_mlp", **ffw_kwargs)
ln1 = layer_norm(name=f"h{layer}_ln1")
ln2 = layer_norm(name=f"h{layer}_ln2")
def f(
*,
is_training: bool,
inputs: jnp.ndarray,
attention_mask: jnp.ndarray,
unrestricted_attention_mask: jnp.ndarray,
positional_encodings: jnp.ndarray,
txl_positional_encodings: jnp.ndarray,
memory: jnp.ndarray,
relative_positions: Optional[jnp.ndarray],
attn_indicator: Optional[jnp.ndarray],
):
if attn_indicator is None:
assert num_attns == 1
batch_size, seq_len, _ = inputs.shape
attn_vecs = []
if num_restricted_heads > 0:
restricted_attn_vec = switch(
restricted_attns,
attn_indicator,
inputs=inputs,
attention_mask=attention_mask,
positional_encodings=positional_encodings,
memory=memory,
is_training=is_training,
relative_positions=relative_positions,
)
attn_vecs.append(restricted_attn_vec)
if num_unrestricted_heads > 0:
unrestricted_attn_vec = unrestricted_attn(
inputs=inputs,
attention_mask=unrestricted_attention_mask,
positional_encodings=txl_positional_encodings,
memory=memory,
relative_positions=None,
is_training=is_training,
)
attn_vecs.append(unrestricted_attn_vec)
attn_vec = jnp.concatenate(attn_vecs, axis=2)
# pylint: disable=protected-access
attn_vec = jnp.reshape(
attn_vec,
[batch_size, seq_len, num_heads * unrestricted_attn._value_size],
)
# pylint: enable=protected-access
h_attention = switch(post_attn_linears, attn_indicator, attn_vec)
if is_training:
h_attention = hk.dropout(hk.next_rng_key(), dropout_rate, h_attention)
h = ln1(inputs + h_attention)
h_ffw = dense_block(h, is_training)
if is_training:
h_ffw = hk.dropout(hk.next_rng_key(), dropout_rate, h_ffw)
h = ln2(h + h_ffw)
return h
return f
def _make_block(
*,
layer: int,
mha_kwargs: Mapping[str, Any],
ffw_kwargs: Mapping[str, Any],
dropout_rate: float,
num_heads: int,
embedding_size: int,
num_attns: int = 1,
):
"""Generalized Transformer-XL block."""
del embedding_size
attns = [
MultiHeadAttention(
name=f"h{layer}_attn{suffix}", num_heads=num_heads, **mha_kwargs
)
for i, suffix in _suffixes(num_attns)
]
dense_block = DenseBlock(name=f"h{layer}_mlp", **ffw_kwargs)
ln1 = layer_norm(name=f"h{layer}_ln1")
ln2 = layer_norm(name=f"h{layer}_ln2")
def f(
*,
is_training: bool,
inputs: jnp.ndarray,
attention_mask: jnp.ndarray,
unrestricted_attention_mask: jnp.ndarray,
positional_encodings: jnp.ndarray,
txl_positional_encodings: jnp.ndarray,
memory: jnp.ndarray,
relative_positions: Optional[jnp.ndarray],
attn_indicator: Optional[jnp.ndarray],
):
# Delete unused, received for compatibility with head-hybrid.
del unrestricted_attention_mask, txl_positional_encodings
if attn_indicator is None:
assert num_attns == 1
h_attention = switch(
attns,
attn_indicator,
inputs=inputs,
attention_mask=attention_mask,
positional_encodings=positional_encodings,
memory=memory,
is_training=is_training,
relative_positions=relative_positions,
)
if is_training:
h_attention = hk.dropout(hk.next_rng_key(), dropout_rate, h_attention)
h = ln1(inputs + h_attention)
h_ffw = dense_block(h, is_training)
if is_training:
h_ffw = hk.dropout(hk.next_rng_key(), dropout_rate, h_ffw)
h = ln2(h + h_ffw)
return h
return f
def _extract_for_layer(x, layer_idx, unrestricted_layer, y=None):
if unrestricted_layer:
return y
if not isinstance(x, tuple):
return x
idx = layer_idx % len(x)
return x[idx]
def _sinusoid_position_encoding(
max_value: int,
min_value: int,
hidden_size: int,
max_timescale: float = 1e4,
min_timescale: float = 2.0,
):
"""Creates sinusoidal encodings.
The time dimension is larger than sequence_length as we need to cover all
cases of looking in either the future or past.
Args:
max_value: `int` max position M (appearing in the first row of the output)
min_value: `int` min position m (appearing in the last row of the output)
hidden_size: `int` dimension of the positional encoding vectors, D
max_timescale: `int` maximum timescale for the frequency
min_timescale: `int` minimum timescale for the frequency
Returns:
An array of shape [M - m, D]
"""
assert min_value <= max_value
freqs = np.arange(0, hidden_size, min_timescale)
inv_freq = max_timescale ** (-freqs / hidden_size)
# Since inputs can look into the past and into the future, depending on the
# permutation mask, we need to have relative encodings for both. The furthest
# back an input can see is the final token, up to sequence_length +
# memory_length - 1. The furthest ahead an input can see is for token 0 where
# it can see up to sequence_length - 1 future tokens.
pos_seq = np.arange(max_value, min_value, -1.0)
sinusoid_inp = np.einsum("i,j->ij", pos_seq, inv_freq)
pos_emb = np.concatenate(
[np.sin(sinusoid_inp), np.cos(sinusoid_inp)], axis=-1
)
return pos_emb
class Core(hk.Module):
"""Generalized Transformer-XL-based core."""
def __init__(
self,
d_model: int,
num_layers: int,
num_heads: int,
key_size: int,
value_size: int,
ffw_hidden_size: int,
dropout_rate: float,
memory_length: int,
relative_position_embeddings: bool = True,
use_attn_bias: bool = False,
activation: Callable[[jnp.ndarray], jnp.ndarray] = jax.nn.gelu,
tied_layer_weights: bool = False,
num_attns: int = 1,
num_unrestricted_layers: int = 0,
min_relative_position: Optional[int] = None,
max_relative_position: Optional[int] = None,
num_unrestricted_heads: Optional[int] = None,
name: str = "core",
):
"""Initialises the module.
Args:
d_model: Size of the embeddings.
num_layers: Number of transformer block layers.
num_heads: Number of attention heads to use.
key_size: Size of key (and query) embedding for attention.
value_size: Size of value embedding for attention.
ffw_hidden_size: Hidden size for MLP that follows attention.
dropout_rate: How much dropout to apply to attention and MLP modules.
memory_length: How many tokens to hold in memory.
relative_position_embeddings: Whether to use relative position embeddings.
use_attn_bias: Whether or not to use biases in attention linear layers.
activation: The nonlinearity to use in the DenseBlocks.
tied_layer_weights: If True, all the layers share the same weights.
num_attns: Number of attention functions. 1 by default, can be 2 to use
different weights for stack and compose attention.
num_unrestricted_layers: Number of regular TXL (no Transformer Grammar
tweak) layers (0 means no regular TXL layer, n > 0 means n at the top of
the stack, n < 0 means -n at the bottom of the stack)
min_relative_position: (TG only) Minimum value for the relative positions
that can be passed to the model.
max_relative_position: (TG only) Maximum value for the relative positions
that can be passed to the model.
num_unrestricted_heads: (TG only) For TG layers, number of unrestricted
(TXL) heads.
name: The Haiku name of the module.
"""
super().__init__(name=name)
if tied_layer_weights and num_unrestricted_layers:
raise ValueError(
"tied_layer_weights and num_unrestricted_layers are incompatible."
)
if (
num_unrestricted_heads is not None
) and not 0 <= num_unrestricted_heads <= num_heads:
raise ValueError(
f"The number of unrestricted heads must be less than the "
f"number of heads: {num_unrestricted_heads} vs. {num_heads}."
)
self._d_model = d_model
self._num_layers = num_layers
self._dropout_rate = dropout_rate
self._memory_length = memory_length
self._relative_position_embeddings = relative_position_embeddings
self._tied_layer_weights = tied_layer_weights
self._num_attns = num_attns
self._num_unrestricted_layers = num_unrestricted_layers
self._min_relative_position = min_relative_position
self._max_relative_position = max_relative_position
self._num_heads = num_heads
self._num_unrestricted_heads = num_unrestricted_heads
self._mha_kwargs = dict(
value_size=value_size,
key_size=key_size,
init_scale=2.0 / self._num_layers,
dropout_rate=self._dropout_rate,
use_bias=use_attn_bias,
use_final_bias=True,
final_init_scale_multiplier=1.0,
)
self._ffw_kwargs = dict(
ffw_hidden_size=ffw_hidden_size,
dropout_rate=self._dropout_rate,
init_scale=2.0 / self._num_layers,
final_init_scale_multiplier=1.0,
use_final_bias=True,
activation=activation,
)
def __call__(
self,
input_embeddings: jnp.ndarray,
input_mask: jnp.ndarray,
memory: Optional[TransformerMemory] = None,
memory_mask: Optional[jnp.ndarray] = None,
extra_attention_mask: Optional[jnp.ndarray] = None,
extra_memory_attention_mask: Optional[jnp.ndarray] = None,
relative_positions: Optional[jnp.ndarray] = None,
attn_indicator: Optional[jnp.ndarray] = None,
smartmem_mem_from_seq: Optional[jnp.ndarray] = None,
smartmem_mem_from_mem: Optional[jnp.ndarray] = None,
is_training: bool = True,
) -> Tuple[jnp.ndarray, Optional[TransformerMemory], jnp.ndarray]:
"""Computes the logits and next memory.
Args:
input_embeddings: array of shape [B, T, d_model]
input_mask: Padding mask of shape [B, T].
memory: Optional memory of shape [N_layers, B, M, d_model]
memory_mask: Optional memory mask of shape [B, T, M].
extra_attention_mask: Optional attention mask of shape [B, T, T]. This
mask will be used in addition to the input_mask and the causal_mask.
extra_memory_attention_mask: Optional attention mask of shape [B, T, M].
This mask will be used in addition to the memory_mask.
relative_positions: Optional relative position indication, of shape [B, T,
T+M], taking values in the interval [-T, T+M], indicating the relative
position of query[b, i] vs. key[b, j] in relative_positions[b, i, j],
with key computed from the concatenated [memory, inputs]. In a usual
TXL, this is i - j + M.
attn_indicator: Optional attention function indicator, i.e. which
attention function to use for each position. Shape [B, T].
smartmem_mem_from_seq: Smart memory -- indicator array of shape [B, T, M]
such that embeddings at index i in the current sequence should be put in
position j in the memory for the next chunk.
smartmem_mem_from_mem: Smart memory -- indicator array of shape [B, M, M]
such that embeddings at index i in the memory should be put in position
j in the memory for the next chunk.
is_training: Whether to use dropout.
Returns:
A tuple containing:
- The final layer embeddings
- The new memory if `memory` is given else the constant 0
"""
assert len(input_embeddings.shape) == 3
assert self._num_attns == 1 or attn_indicator is not None
batch_size = input_embeddings.shape[0]
seq_len = input_embeddings.shape[1]
memory_and_seq_len = seq_len + self._memory_length
expected_shape = (batch_size, seq_len, memory_and_seq_len)
if relative_positions is not None and (
relative_positions.shape != expected_shape
):
raise ValueError(
"Invalid input shape for relative_positions: "
f"{relative_positions.shape!r} vs {expected_shape!r} expected."
)
if (smartmem_mem_from_mem is None) != (smartmem_mem_from_seq is None):
raise ValueError(
"smartmem_mem_from_mem and smartmem_mem_from_seq must be"
" either both None, or none of them should be None."
)
use_smart_memory = smartmem_mem_from_seq is not None
h = input_embeddings
if is_training:
h = hk.dropout(hk.next_rng_key(), self._dropout_rate, h)
# Generate positional encodings.
_, seq_len, embedding_size = input_embeddings.shape
if not self._relative_position_embeddings:
positional_encodings = None
txl_positional_encodings = None
mha_kwargs = self._mha_kwargs
else:
if self._max_relative_position is not None:
max_relative_position = self._max_relative_position
else:
max_relative_position = seq_len + self._memory_length
if self._min_relative_position is not None:
min_relative_position = self._min_relative_position - 1
else:
min_relative_position = -seq_len
positional_encodings = _sinusoid_position_encoding(
max_value=max_relative_position,
min_value=min_relative_position,
hidden_size=embedding_size,
)
positional_encodings = positional_encodings.astype(input_embeddings.dtype)
# Ensure that the baked-in constant is size (2 * seq_len, seq_len).
positional_encodings = lax.tie_in(input_embeddings, positional_encodings)
# TXL-style positional encodings
txl_positional_encodings = _sinusoid_position_encoding(
max_value=seq_len + self._memory_length,
min_value=-seq_len,
hidden_size=embedding_size,
)
txl_positional_encodings = txl_positional_encodings.astype(
input_embeddings.dtype
)
txl_positional_encodings = lax.tie_in(
input_embeddings, txl_positional_encodings
)
mha_kwargs = self._mha_kwargs.copy()
mha_kwargs["max_relative_position"] = max_relative_position
mha_kwargs["min_relative_position"] = min_relative_position
if self._num_unrestricted_heads is not None:
block_fn = _make_block_head_hybrid
block_kwargs = dict(num_unrestricted_heads=self._num_unrestricted_heads)
else:
block_fn = _make_block
block_kwargs = dict()
if self._tied_layer_weights:
assert self._num_unrestricted_layers == 0
# Parameterize function on options.
block = block_fn(
layer=0,
mha_kwargs=mha_kwargs,
ffw_kwargs=self._ffw_kwargs,
dropout_rate=self._dropout_rate,
num_heads=self._num_heads,
num_attns=self._num_attns,
embedding_size=embedding_size,
**block_kwargs,
)
blocks = [(block, False)] * self._num_layers
else:
blocks = []
for i in range(self._num_layers):
if self._num_unrestricted_layers > 0 and (
i >= self._num_layers - self._num_unrestricted_layers
):
unrestricted_layer = True
elif self._num_unrestricted_layers < 0 and (
i < -self._num_unrestricted_layers
):
unrestricted_layer = True
else:
unrestricted_layer = False
if unrestricted_layer:
actual_block_fn = _make_block
actual_block_kwargs = dict()
else:
actual_block_fn, actual_block_kwargs = (
block_fn,
block_kwargs,
)
blocks.append((
# Parameterize function on options.
actual_block_fn(
layer=i,
# We don't need to specify special MHA args in unrestricted
# mode, because the min/max relative positions will be ignored
# in that case.
mha_kwargs=mha_kwargs,
ffw_kwargs=self._ffw_kwargs,
dropout_rate=self._dropout_rate,
num_heads=self._num_heads,
num_attns=self._num_attns if not unrestricted_layer else 1,
embedding_size=embedding_size,
**actual_block_kwargs,
),
unrestricted_layer,
))
new_memory = None if memory is None else []
layers_outputs = [h]
for i, (block, unrestricted_layer) in enumerate(blocks):
# Add embeddings to memory before we go through the layer
if new_memory is not None:
if not use_smart_memory:
new_mem = jnp.concatenate((memory[i], h), axis=1)
new_mem = lax.slice(
new_mem,
start_indices=(
0,
new_mem.shape[1] - self._memory_length,
0,
),
limit_indices=new_mem.shape,
)
else:
# memory has shape [N_layers, B, M, d_model]
old_mem = memory[i] # [B, M, d_model]
new_mem_from_mem = jnp.einsum(
"bmd,bmn->bnd", old_mem, smartmem_mem_from_mem
)
new_mem_from_seq = jnp.einsum(
"btd,btn->bnd", h, smartmem_mem_from_seq
)
new_mem = new_mem_from_mem + new_mem_from_seq
new_memory.append(new_mem)
memory_i = memory[i] if memory is not None else None
# Generate attention mask.
attention_mask, unrestricted_attention_mask = make_attention_mask(
input_mask=input_mask,
memory_mask=memory_mask,
extra_attention_mask=_extract_for_layer(
extra_attention_mask, i, unrestricted_layer
),
extra_memory_attention_mask=_extract_for_layer(
extra_memory_attention_mask, i, unrestricted_layer
),
memory_len=self._memory_length,
dtype=input_embeddings.dtype,
)
h = block( # pylint: disable=missing-kwoa
is_training=is_training,
inputs=h,
attention_mask=attention_mask,
unrestricted_attention_mask=unrestricted_attention_mask,
positional_encodings=_extract_for_layer(
positional_encodings,
i,
unrestricted_layer,
y=txl_positional_encodings,
),
txl_positional_encodings=txl_positional_encodings,
memory=memory_i,
relative_positions=_extract_for_layer(
relative_positions, i, unrestricted_layer
),
attn_indicator=attn_indicator if not unrestricted_layer else None,
)
layers_outputs.append(h)
if new_memory is not None:
new_memory = jnp.stack(new_memory)
return (
h, # [B, T, H]
new_memory,
jnp.stack(layers_outputs, axis=2), # [B, T, L, H]
)
def initial_memory(
self,
batch_size: int,
dtype=jnp.float32,
) -> Optional[TransformerMemory]:
"""Creates the initial memory array, filled with 0s."""
if not self._memory_length:
return
memory_shape = (
self._num_layers,
batch_size,
self._memory_length,
self._d_model,
)
return jnp.zeros(shape=memory_shape, dtype=dtype)
| transformer_grammars-main | transformer_grammars/models/core.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for transformer_grammars.models.core."""
import functools
import operator
import unittest
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from transformer_grammars.models import core
import tree
TEST_MODEL_PARAMS = dict(
d_model=32,
num_layers=2,
num_heads=4,
key_size=8,
value_size=8,
ffw_hidden_size=128,
dropout_rate=0.1,
)
SMALL_MODEL_PARAMS = dict(TEST_MODEL_PARAMS, num_layers=1)
def _product(l):
return functools.reduce(operator.mul, l, 1)
def get_input_data(key, batch_size, seq_len, mem_len, d_model, num_layers):
key, key_ = jax.random.split(key)
input_embeddings = jax.random.normal(
key_, shape=(batch_size, seq_len, d_model), dtype=jnp.float32
)
key, key_ = jax.random.split(key)
## Only the first 6 inputs are valid
input_mask = jnp.array([[1] * 6 + [0] * 4] * batch_size)
key, key_ = jax.random.split(key)
memory_keys = jax.random.split(key, num=num_layers)
memory = jnp.stack(
[
jax.random.normal(memory_keys[i], (batch_size, mem_len, d_model))
for i in range(num_layers)
]
)
return dict(
input_embeddings=input_embeddings, input_mask=input_mask, memory=memory
)
class CoreTest(unittest.TestCase):
def test_rel_shift(self):
logits = jnp.array([
[-3, -2, -1, 0, 1, 2],
[-3, -2, -1, 0, 1, 2],
[-3, -2, -1, 0, 1, 2],
])
logits = jnp.broadcast_to(logits, (4, 8) + logits.shape)
shifted_logits = core.relative_shift(logits, attention_length=3)
expected_output = jnp.array([[0, 1, 2], [-1, 0, 1], [-2, -1, 0]])
expected_output = jnp.broadcast_to(
expected_output, (4, 8) + expected_output.shape
)
np.testing.assert_array_equal(shifted_logits, expected_output)
def test_output_and_memory_shape(self):
key = jax.random.PRNGKey(42)
batch_size = 2
seq_len = 10
mem_len = 5
d_model = TEST_MODEL_PARAMS["d_model"]
num_layers = TEST_MODEL_PARAMS["num_layers"]
model_inputs = get_input_data(
key, batch_size, seq_len, mem_len, d_model, num_layers
)
def forward(model_inputs, is_training=True):
return core.Core(memory_length=mem_len, **TEST_MODEL_PARAMS)(
is_training=is_training, **model_inputs
)
init_fn, apply_fn = hk.transform(forward)
key, key_ = jax.random.split(key)
params = init_fn(key_, model_inputs)
key, key_ = jax.random.split(key)
outputs, next_memory, _ = apply_fn(params, key_, model_inputs)
self.assertEqual(outputs.shape, (batch_size, seq_len, d_model))
self.assertEqual(len(next_memory), num_layers)
for i in range(num_layers):
self.assertEqual(next_memory[i].shape, (batch_size, mem_len, d_model))
def test_masked_inputs_are_unused(self):
"""Output position i depends exactly on i and non-masked inputs <= i."""
seq_len = 10
mem_len = 5
key_ = jax.random.PRNGKey(42)
key, key_ = jax.random.split(key_)
data = get_input_data(
key=key,
batch_size=1,
seq_len=seq_len,
mem_len=mem_len,
d_model=SMALL_MODEL_PARAMS["d_model"],
num_layers=SMALL_MODEL_PARAMS["num_layers"],
)
def logits(input_embeddings):
model = core.Core(memory_length=mem_len, **SMALL_MODEL_PARAMS)
logits, _, _ = model(
input_embeddings=input_embeddings,
input_mask=data["input_mask"],
memory=data["memory"],
is_training=False,
)
# Use batch 0 only
return logits[0]
init_fn, apply_fn = hk.without_apply_rng(hk.transform(logits))
key, key_ = jax.random.split(key_)
params = init_fn(key, data["input_embeddings"])
inputs_jacobian_fn = jax.jacrev(lambda inputs: apply_fn(params, inputs))
# Jacobian has shape [T, D, B, T, D]
inputs_jacobian = inputs_jacobian_fn(data["input_embeddings"])
# Use input batch 0 only
inputs_jacobian = inputs_jacobian[:, :, 0]
# Sum over input channnel dimension
inputs_jacobian = jnp.sum(inputs_jacobian, axis=-1)
# Take max gradient over output channel dimension
inputs_jacobian = jnp.max(jnp.abs(inputs_jacobian), axis=1)
used_inputs = inputs_jacobian != 0.0
allowed_inputs = jnp.logical_and(
data["input_mask"], jnp.tril(jnp.ones((seq_len, seq_len)))
)
# Each masked input can see itself due to residual connections
allowed_inputs = jnp.logical_or(allowed_inputs, jnp.eye(seq_len))
np.testing.assert_array_equal(used_inputs, allowed_inputs)
def test_memory_mask(self):
"""Tests that masked memory doesn't change the output logits."""
key = jax.random.PRNGKey(42)
batch_size = 1
seq_len = 10
mem_len = 5
d_model = TEST_MODEL_PARAMS["d_model"]
num_layers = TEST_MODEL_PARAMS["num_layers"]
model_inputs = get_input_data(
key, batch_size, seq_len, mem_len, d_model, num_layers
)
input_embeddings = model_inputs["input_embeddings"]
initial_memory = model_inputs["memory"]
input_mask = np.ones((batch_size, seq_len)) # all inputs are valid
key, key_ = jax.random.split(key)
def forward_small_mem(input_embeddings, input_mask, memory, memory_mask):
model = core.Core(memory_length=mem_len, **TEST_MODEL_PARAMS)
return model(
input_embeddings,
input_mask,
memory,
memory_mask,
is_training=False,
)
init_fn, apply_small_fn = hk.without_apply_rng(
hk.transform(forward_small_mem)
)
params = init_fn(
key_, input_embeddings, input_mask, initial_memory, memory_mask=None
)
chunk_small_outputs, _, _ = apply_small_fn(
params, input_embeddings, input_mask, initial_memory, None
)
def forward_large_mem(input_embeddings, input_mask, memory, memory_mask):
model = core.Core(memory_length=mem_len * 2, **TEST_MODEL_PARAMS)
return model(
input_embeddings,
input_mask,
memory,
memory_mask,
is_training=False,
)
_, apply_large_fn = hk.without_apply_rng(hk.transform(forward_large_mem))
# Memory is twice as large, but we only attend to the second half.
# Outputs should be the same if the memory mask works as expected.
large_initial_memory = jnp.concatenate(
(initial_memory, initial_memory), axis=2
)
large_memory_mask = jnp.arange(mem_len * 2) >= mem_len
large_memory_mask = jnp.broadcast_to(
large_memory_mask, (batch_size, seq_len, mem_len * 2)
)
chunk_large_outputs, _, _ = apply_large_fn(
params,
input_embeddings,
input_mask,
large_initial_memory,
large_memory_mask,
)
np.testing.assert_array_almost_equal(
chunk_small_outputs, chunk_large_outputs, decimal=5
)
def test_memory_shifts_over_multiple_steps(self):
key = jax.random.PRNGKey(42)
batch_size = 2
seq_len = 5
mem_len_factor = 4
mem_len = seq_len * mem_len_factor
d_model = TEST_MODEL_PARAMS["d_model"]
num_layers = TEST_MODEL_PARAMS["num_layers"]
def forward(input_embeddings, input_mask, memory):
model = core.Core(memory_length=mem_len, **TEST_MODEL_PARAMS)
return model(input_embeddings, input_mask, memory, is_training=False)
init_fn, apply_fn = hk.without_apply_rng(hk.transform(forward))
apply_fn = jax.jit(apply_fn)
key, key_ = jax.random.split(key)
model_inputs = get_input_data(
key_, batch_size, seq_len, mem_len, d_model, num_layers
)
initial_memory = model_inputs["memory"]
input_mask = jnp.ones((batch_size, seq_len)) # all inputs are valid
key, key_ = jax.random.split(key)
params = init_fn(
key_, model_inputs["input_embeddings"], input_mask, initial_memory
)
# Compute outputs by feeding one token at a time.
memory = initial_memory
for i in range(1, mem_len_factor):
key, key_ = jax.random.split(key)
input_embeddings = jax.random.normal(
key_, shape=(batch_size, seq_len, d_model), dtype=jnp.float32
)
_, new_memory, _ = apply_fn(params, input_embeddings, input_mask, memory)
memory = new_memory
# Memory is shifted i times by seq_len after i steps.
np.testing.assert_array_equal(
initial_memory[:, :, -seq_len:],
memory[:, :, -(i + 1) * seq_len : -i * seq_len],
)
def test_rel_shift_and_explicit_relative_position_give_same_result(self):
key = jax.random.PRNGKey(42)
batch_size = 2
seq_len = 10
mem_len = 5
d_model = TEST_MODEL_PARAMS["d_model"]
num_layers = TEST_MODEL_PARAMS["num_layers"]
model_inputs = get_input_data(
key, batch_size, seq_len, mem_len, d_model, num_layers
)
def forward(model_inputs, is_training=True):
return core.Core(memory_length=mem_len, **TEST_MODEL_PARAMS)(
is_training=is_training, **model_inputs
)
def forward_with_relative_positions(
model_inputs, relative_positions, is_training=True
):
return core.Core(memory_length=mem_len, **TEST_MODEL_PARAMS)(
is_training=is_training,
relative_positions=relative_positions,
**model_inputs,
)
init_fn, apply_fn = hk.transform(forward)
_, apply_with_relative_pos_fn = hk.transform(
forward_with_relative_positions
)
key, key_ = jax.random.split(key)
params = init_fn(key_, model_inputs)
_, key_ = jax.random.split(key)
outputs, next_memory, _ = apply_fn(params, key_, model_inputs)
relative_positions = (
mem_len
+ np.arange(seq_len).reshape((-1, 1))
- np.arange(seq_len + mem_len).reshape((1, -1))
)
# relative_positions[i, j] = i - j + mem_len
# i.e. how many tokens ahead query i compared to key j
relative_positions = np.broadcast_to(
relative_positions, (batch_size, seq_len, seq_len + mem_len)
)
outputs2, next_memory2, _ = apply_with_relative_pos_fn(
params, key_, model_inputs, relative_positions
)
np.testing.assert_array_equal(outputs, outputs2)
np.testing.assert_array_equal(next_memory, next_memory2)
def test_shift_and_smartmem_indicator_give_same_result(self):
key = jax.random.PRNGKey(42)
batch_size = 2
seq_len = 10
mem_len = 5
d_model = TEST_MODEL_PARAMS["d_model"]
num_layers = TEST_MODEL_PARAMS["num_layers"]
model_inputs0 = get_input_data(
key, batch_size, seq_len, mem_len, d_model, num_layers
)
model_inputs1 = get_input_data(
key, batch_size, seq_len, mem_len, d_model, num_layers
)
del model_inputs1["memory"]
smartmem_mem_from_mem = jnp.zeros(
(batch_size, mem_len, mem_len), dtype=jnp.float32
)
smartmem_mem_from_seq = jnp.zeros(
(batch_size, seq_len, mem_len), dtype=jnp.float32
)
for i in range(min(mem_len, seq_len)):
smartmem_mem_from_seq = smartmem_mem_from_seq.at[:, -1 - i, -1 - i].set(1)
def forward(model_inputs, is_training=True):
return core.Core(memory_length=mem_len, **TEST_MODEL_PARAMS)(
is_training=is_training, **model_inputs
)
init_fn, apply_fn = hk.transform(forward)
key, key_ = jax.random.split(key)
params = init_fn(key_, model_inputs0)
_, key_ = jax.random.split(key)
# Apply the TXL core as usual, i.e. shifting the current embeddings into the
# memory.
outputs0, next_memory0, _ = apply_fn(params, key_, model_inputs0)
model_inputs1["memory"] = next_memory0
outputs1, next_memory1, _ = apply_fn(params, key_, model_inputs1)
model_inputs0_sm = model_inputs0.copy()
model_inputs0_sm["smartmem_mem_from_mem"] = smartmem_mem_from_mem
model_inputs0_sm["smartmem_mem_from_seq"] = smartmem_mem_from_seq
outputs0_sm, next_memory0_sm, _ = apply_fn(params, key_, model_inputs0_sm)
model_inputs1_sm = model_inputs1.copy()
model_inputs1_sm["smartmem_mem_from_mem"] = smartmem_mem_from_mem
model_inputs1_sm["smartmem_mem_from_seq"] = smartmem_mem_from_seq
model_inputs1_sm["memory"] = next_memory0_sm
outputs1_sm, next_memory1_sm, _ = apply_fn(params, key_, model_inputs1_sm)
np.testing.assert_array_equal(outputs0, outputs0_sm)
np.testing.assert_array_equal(outputs1, outputs1_sm)
np.testing.assert_array_equal(next_memory0, next_memory0_sm)
np.testing.assert_array_equal(next_memory1, next_memory1_sm)
def test_fewer_params_with_tied_layers(self):
key = jax.random.PRNGKey(42)
batch_size = 2
seq_len = 10
d_model = TEST_MODEL_PARAMS["d_model"]
num_layers = TEST_MODEL_PARAMS["num_layers"]
model_inputs = get_input_data(
key, batch_size, seq_len, 0, d_model, num_layers
)
del model_inputs["memory"]
def forward(model_inputs, is_training=True):
return core.Core(
memory_length=0, tied_layer_weights=False, **TEST_MODEL_PARAMS
)(is_training=is_training, **model_inputs)
def forward_tied(model_inputs, is_training=True):
return core.Core(
memory_length=0, tied_layer_weights=True, **TEST_MODEL_PARAMS
)(is_training=is_training, **model_inputs)
init_fn, _ = hk.transform(forward)
params = init_fn(key, model_inputs)
init_tied_fn, _ = hk.transform(forward_tied)
params_tied = init_tied_fn(key, model_inputs)
num_params = sum(map(lambda x: _product(x.shape), tree.flatten(params)))
num_params_tied = sum(
map(lambda x: _product(x.shape), tree.flatten(params_tied))
)
self.assertLess(num_params_tied, num_params)
def test_more_params_with_several_attn_functions(self):
key = jax.random.PRNGKey(42)
batch_size = 2
seq_len = 10
d_model = TEST_MODEL_PARAMS["d_model"]
num_layers = TEST_MODEL_PARAMS["num_layers"]
model_inputs = get_input_data(
key, batch_size, seq_len, 0, d_model, num_layers
)
model_inputs["attn_indicator"] = model_inputs["input_mask"] * 0
del model_inputs["memory"]
def forward(model_inputs, is_training=True):
return core.Core(memory_length=0, num_attns=1, **TEST_MODEL_PARAMS)(
is_training=is_training, **model_inputs
)
def forward_multiple_attns(model_inputs, is_training=True):
return core.Core(memory_length=0, num_attns=2, **TEST_MODEL_PARAMS)(
is_training=is_training, **model_inputs
)
init_fn, _ = hk.transform(forward)
params = init_fn(key, model_inputs)
init_multiple_attns_fn, _ = hk.transform(forward_multiple_attns)
params_multiple_attns = init_multiple_attns_fn(key, model_inputs)
num_params = sum(map(lambda x: _product(x.shape), tree.flatten(params)))
num_params_multiple_attns = sum(
map(
lambda x: _product(x.shape),
tree.flatten(params_multiple_attns),
)
)
self.assertGreater(num_params_multiple_attns, num_params)
def test_hybrid_with_only_txl_layers_is_a_txl(self):
key = jax.random.PRNGKey(42)
batch_size = 2
seq_len = 10
mem_len = 5
d_model = TEST_MODEL_PARAMS["d_model"]
num_layers = TEST_MODEL_PARAMS["num_layers"]
keys = hk.PRNGSequence(42)
model_inputs = get_input_data(
key, batch_size, seq_len, mem_len, d_model, num_layers
)
hybrid_model_inputs = model_inputs.copy()
hybrid_model_inputs["extra_attention_mask"] = np.broadcast_to(
np.eye(seq_len).reshape((1, seq_len, seq_len)),
(batch_size, seq_len, seq_len),
)
hybrid_model_inputs["relative_positions"] = np.random.randint(
-4, 4, (batch_size, seq_len, seq_len + mem_len)
)
hybrid_model_inputs["attn_indicator"] = model_inputs["input_mask"] * 0
def forward_txl(model_inputs, is_training=False):
model = core.Core(memory_length=mem_len, **TEST_MODEL_PARAMS)
return model(is_training=is_training, **model_inputs)
def forward_hybrid_tg_with_only_txl_layers(model_inputs, is_training=False):
model = core.Core(
num_unrestricted_layers=num_layers,
memory_length=mem_len,
num_attns=2,
min_relative_position=-1,
max_relative_position=1,
**TEST_MODEL_PARAMS,
)
return model(is_training=is_training, **model_inputs)
init_fn, apply_fn = hk.transform(forward_txl)
params = init_fn(next(keys), model_inputs)
_, apply_hybrid_fn = hk.transform(forward_hybrid_tg_with_only_txl_layers)
key = next(keys)
outputs, next_memory, _ = apply_fn(params, key, model_inputs)
outputs_hybrid, next_memory_hybrid, _ = apply_hybrid_fn(
params, key, hybrid_model_inputs
)
np.testing.assert_array_equal(outputs, outputs_hybrid)
np.testing.assert_array_equal(next_memory, next_memory_hybrid)
| transformer_grammars-main | transformer_grammars/models/core_test.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generalized Transformer-XL-based language model.
It is generalized to relative positions different from difference in linear
order, custom attention masks, and memory update.
Embeds, applies the core, projects to get logits.
See the documentation for the core about TG specific changes.
"""
from typing import Callable, Optional
from einshape import jax_einshape as einshape
import haiku as hk
import jax
import jax.numpy as jnp
from transformer_grammars.models import core
from transformer_grammars.models import embedding_layer
def _apply_mask(m, a, b, *, axis):
"""Returns an array composed of elements from a or b depending on the mask.
Args:
m: Mask used to select elements from `a` when equal to 1, from `b`
otherwise, of shape [m]
a: Array of arbitrary shape [d_0, d_1, ..., d_{n-1}]
b: Array of same shape as `a`
axis: Axis, in `a` and `b`, such that d_{axis} == m.
Returns:
Array `c` such that slices, on axis `axis`, for which m == 1, are
extracted from `a, and for which m == 0, are extracted from `b`.
"""
assert a.shape == b.shape, (a.shape, b.shape)
assert m.shape[0] == a.shape[axis], (m.shape, a.shape, axis)
new_shape = [1 if i != axis else -1 for (i, _) in enumerate(a.shape)]
m = m.reshape(new_shape)
return a * m + b * (1 - m)
class GeneralizedTXLLanguageModel(hk.Module):
"""Generalized Transformer-XL-based language model."""
def __init__(
self,
*,
vocab_size: int,
d_model: int,
num_layers: int,
num_heads: int,
ffw_hidden_size: int,
core_dropout: float,
core_output_dropout: float,
embedding_dropout: float,
sequence_length: int,
memory_length: int,
tied_input_output_embeddings: bool,
use_output_bias: bool = True,
key_size: Optional[int] = None,
value_size: Optional[int] = None,
relative_position_embeddings: bool = True,
use_attn_bias: bool = False,
activation: Callable[[jnp.ndarray], jnp.ndarray] = jax.nn.gelu,
num_attns: int = 1,
tied_layer_weights: bool = False,
num_unrestricted_layers: int = 0,
min_relative_position: Optional[int] = None,
max_relative_position: Optional[int] = None,
num_unrestricted_heads: Optional[int] = None,
name: str = "lm",
):
"""Initialises the module.
Args:
vocab_size: Vocabulary size.
d_model: Size of the embeddings.
num_layers: Number of transformer block layers.
num_heads: Number of attention heads to use.
ffw_hidden_size: Hidden size for MLP that follows attention.
core_dropout: Dropout rate for the TXL core, applied to the embeddings,
attention and MLP modules.
core_output_dropout: Dropout rate applied to the output of the core,
before projection.
embedding_dropout: Dropout rate for the token embeddings, before them
being passed to the core.
sequence_length: (Unused) Input sequence length.
memory_length: How many tokens to hold in Core memory.
tied_input_output_embeddings: Use the same embedding matrix for the input
and for the output.
use_output_bias: Apply a learned bias to the output logits.
key_size: Size of key (and query) embedding for attention. If not passed,
defaults to d_model / num_heads.
value_size: Size of value embedding for attention. If not passed, defaults
to d_model / num_heads.
relative_position_embeddings: Whether to use relative position embeddings.
use_attn_bias: Whether or not to use biases in attention linear layers.
activation: The nonlinearity to use in the DenseBlocks.
num_attns: (TG only) Number of attention functions (e.g. 2 if different
attns for STACK and COMPOSE)
tied_layer_weights: If True, all the layers share the same weights.
num_unrestricted_layers: (TG only) Number of regular TXL (no Transformer
Grammar restriction) layers (0 means no regular TXL layer, n > 0 means
n at the top of the stack, n < 0 means -n at the bottom of the stack)
min_relative_position: (TG only) Minimum value for the relative positions
that can be passed to the model.
max_relative_position: (TG only) Maximum value for the relative positions
that can be passed to the model.
num_unrestricted_heads: (TG only) For TG layers, number of unrestricted
(TXL) heads.
name: The Haiku name of the module.
"""
super().__init__(name=name)
del sequence_length
if key_size is None:
key_size = d_model // num_heads
if value_size is None:
value_size = d_model // num_heads
if ffw_hidden_size < 0:
# Negative ffw_hidden_size is used to express a ratio between d_model
# and FFW d_hidden. Useful for sweeps.
ffw_hidden_size = d_model * (-ffw_hidden_size)
self._core = core.Core(
d_model=d_model,
num_layers=num_layers,
num_heads=num_heads,
key_size=key_size,
value_size=value_size,
ffw_hidden_size=ffw_hidden_size,
dropout_rate=core_dropout,
memory_length=memory_length,
relative_position_embeddings=relative_position_embeddings,
use_attn_bias=use_attn_bias,
activation=activation,
tied_layer_weights=tied_layer_weights,
num_attns=num_attns,
num_unrestricted_layers=num_unrestricted_layers,
min_relative_position=min_relative_position,
max_relative_position=max_relative_position,
num_unrestricted_heads=num_unrestricted_heads,
name="core",
)
self._num_layers = num_layers
self._d_model = d_model
self._memory_length = memory_length
self._embed = embedding_layer.EmbeddingLayer(
embedding_size=d_model,
vocab_size=vocab_size,
share_weights=tied_input_output_embeddings,
output_bias=use_output_bias,
)
self._embedding_dropout = embedding_dropout
self._core_output_dropout = core_output_dropout
def __call__(
self,
seq,
beginning_of_seq,
*,
attn_mask,
attn_relpos,
attn_indicator,
memory_attn_mask,
memory_padding_mask,
smartmem_mem_from_seq,
smartmem_mem_from_mem,
is_training: bool,
**kwargs,
):
"""Applies the model to a sequence."""
bs, seqlen = seq.shape
use_memory = self._memory_length > 0
mask = jnp.greater(seq, 0)
emb = self._embed.encode(seq)
if is_training:
# WARNING: The core applies dropout to the token embeddings as well, so
# the effective dropout rate for the embeddings is
# embedding_dropout + core_dropout - embedding_dropout * core_dropout.
emb = hk.dropout(hk.next_rng_key(), self._embedding_dropout, emb)
# Get the memory from Haiku state
if use_memory:
# `memory` is the saved activations from each layer
memory = hk.get_state(
"memory",
shape=[
self._num_layers,
bs,
self._memory_length,
self._d_model,
],
dtype=emb.dtype,
init=jnp.zeros,
)
empty_memory = jnp.zeros_like(memory)
memory = _apply_mask(beginning_of_seq, empty_memory, memory, axis=1)
memory_padding_mask = jnp.tile(
einshape("bt->b1t", memory_padding_mask), (1, seqlen, 1)
)
else:
memory = None
memory_padding_mask = None
core_output, new_memory, layers_outputs = self._core(
input_embeddings=emb,
input_mask=mask,
memory=memory,
memory_mask=memory_padding_mask,
is_training=is_training,
extra_attention_mask=attn_mask,
extra_memory_attention_mask=memory_attn_mask,
relative_positions=attn_relpos,
attn_indicator=attn_indicator,
smartmem_mem_from_seq=smartmem_mem_from_seq,
smartmem_mem_from_mem=smartmem_mem_from_mem,
)
# Set the memory into Haiku state.
if memory is not None:
hk.set_state("memory", jax.lax.stop_gradient(new_memory))
if is_training:
core_output = hk.dropout(
hk.next_rng_key(), self._core_output_dropout, core_output
)
output = self._embed.decode(core_output)
return output, layers_outputs
| transformer_grammars-main | transformer_grammars/models/lm.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for transformer_grammars.models.masking.cpp_masking."""
import unittest
from absl import logging
import numpy as np
from parameterized import parameterized
from transformer_grammars.models.masking import constants as mc
from transformer_grammars.models.masking import cpp_masking
from transformer_grammars.models.masking import masking_types as types
# pylint: disable=g-generic-assert
# Sets of kwargs for test_ctor_does_not_raise
_DEFAULT_KWARGS = dict(sequence_length=4, memory_length=4)
_DELTA_DEPTH_KWARGS = dict(
sequence_length=4,
memory_length=4,
relative_pos="delta_depth",
use_different_attn_fns=True,
)
class FastMaskingTest(unittest.TestCase):
def assertAllEqual(self, actual, expected):
self.assertEqual(
np.all(expected == actual), True, f"{expected} != {actual}"
)
def assertLen(self, container, length):
self.assertEqual(len(container), length)
def assertContainsSubsequence(self, seq, subseq):
self.assertTrue(subseq in seq)
def test_stack_compose_init_docstring(self):
"""Tests that a docstring is set on __init__ for stack/compose."""
self.assertContainsSubsequence(
cpp_masking.StackComposeDoubleClosingNT.__init__.__doc__,
"Initialises the stack/compose masking rule.",
)
def test_txl_init_docstring(self):
"""Tests that a docstring is set on __init__ for TXL-style masking."""
self.assertContainsSubsequence(
cpp_masking.TXLCausalMasking.__init__.__doc__,
"Initialises the TXL-style causal masking rule.",
)
@parameterized.expand([
("default", _DEFAULT_KWARGS),
("delta_depth_different_attn_fns", _DELTA_DEPTH_KWARGS),
])
def test_ctor_does_not_raise(self, _, kwargs):
"""Tests that construction of the rules succeeds for correct kwargs."""
_ = cpp_masking.StackComposeDoubleClosingNT(**kwargs)
def test_ctor_raises_on_invalid_relative_position_type(self):
"""Tests that construction of the rules fails on invalid relpos type."""
with self.assertRaises(RuntimeError):
_ = cpp_masking.StackComposeDoubleClosingNT(
sequence_length=4,
memory_length=4,
relative_pos="foo",
use_different_attn_fns=True,
)
@parameterized.expand([
("different_attn_fns", True, 2),
("single_attn_fn", False, 1),
])
def test_num_attention_functions(
self, _, use_different_attn_fns, expected_num_attention_functions
):
"""Tests the `num_attention_functions` property of the masking rule."""
maskrules = cpp_masking.StackComposeDoubleClosingNT(
sequence_length=4,
memory_length=4,
relative_pos="delta_depth",
use_different_attn_fns=use_different_attn_fns,
)
self.assertEqual(
maskrules.num_attention_functions, expected_num_attention_functions
)
@parameterized.expand([
("delta_depth", "delta_depth", True),
("no_relative_positions", "", False),
])
def test_use_relative_positions_stack_compose(
self, _, relative_pos, expected_use_relative_positions
):
"""Tests the `use_relative_positions` property of the masking rule."""
maskrules = cpp_masking.StackComposeDoubleClosingNT(
sequence_length=4,
memory_length=4,
relative_pos=relative_pos,
use_different_attn_fns=True,
)
self.assertEqual(
maskrules.use_relative_positions, expected_use_relative_positions
)
def test_use_relative_positions_txl(self):
"""Tests the `use_relative_positions` property in the TXL case."""
maskrules = cpp_masking.TXLCausalMasking(sequence_length=4, memory_length=4)
self.assertFalse(maskrules.use_relative_positions)
def _data(self):
seq = np.array(
[
1, # <s>
2, # (S
3, # (NP
4, # the
5, # hungry
6, # cat
7, # NP)
8, # (VP
9, # meows
10, # NP)
11, # S)
0, # <pad>
],
dtype=np.int32,
)
ttypes = np.array(
[
mc.OPENING_NT, # <s>
mc.OPENING_NT, # (S
mc.OPENING_NT, # (NP
mc.TERMINAL, # the
mc.TERMINAL, # hungry
mc.TERMINAL, # cat
mc.CLOSING_NT, # NP)
mc.OPENING_NT, # (VP
mc.TERMINAL, # meows
mc.CLOSING_NT, # VP)
mc.CLOSING_NT, # S)
mc.PAD, # <pad>
],
dtype=np.int32,
)
inputs = seq[:-1]
labels = seq[1:]
inputs_ttypes = ttypes[:-1]
labels_ttypes = ttypes[1:]
return inputs, labels, inputs_ttypes, labels_ttypes
def test_stack_compose_double_closing_nt(self):
"""Runs stack/compose code on known inputs, compares with gold outputs."""
inputs, labels, inputs_ttypes, labels_ttypes = self._data()
maskrules = cpp_masking.StackComposeDoubleClosingNT(
sequence_length=4, memory_length=4, use_different_attn_fns=True
)
chunks = maskrules.chunks_for_sequence(
inputs, inputs_ttypes, labels, labels_ttypes
)
chunks = [types.Chunk(None, *chunk) for chunk in chunks]
for chunk in chunks:
logging.info("Got chunk: %s", repr(chunk))
actual_t_inputs = np.concatenate([chunk.inputs for chunk in chunks], axis=0)
expected_t_inputs = np.array([
1, # <s>
2, # (S
3, # (NP
4, # the
5, # hungry
6, # cat
7, # NP)
7, # NP)
8, # (VP
9, # meows
10, # NP)
10, # NP)
11, # S)
11, # S)
0, # <pad>
0, # <pad>
])
logging.info("Actual transformed inputs: %s", repr(actual_t_inputs))
self.assertAllEqual(expected_t_inputs, actual_t_inputs)
actual_t_labels = np.concatenate([chunk.labels for chunk in chunks], axis=0)
expected_t_labels = np.array([
2, # (S
3, # (NP
4, # the
5, # hungry
6, # cat
7, # NP)
0, # <pad> !
8, # (VP
9, # meows
10, # NP)
0, # <pad> !
11, # S)
0, # <pad> !
0, # <pad>
0, # <pad>
0, # <pad>
])
logging.info("Actual transformed labels: %s", repr(actual_t_labels))
self.assertAllEqual(expected_t_labels, actual_t_labels)
# Sequence padded to length 16, so 4 chunks of size 4
self.assertLen(chunks, 4)
self.assertAllEqual(chunks[0].inputs, np.array([1, 2, 3, 4]))
self.assertAllEqual(
chunks[0].inputs_ttypes,
np.array([mc.OPENING_NT, mc.OPENING_NT, mc.OPENING_NT, mc.TERMINAL]),
)
self.assertAllEqual(chunks[0].labels, np.array([2, 3, 4, 5]))
self.assertAllEqual(
chunks[0].labels_ttypes,
np.array([mc.OPENING_NT, mc.OPENING_NT, mc.TERMINAL, mc.TERMINAL]),
)
self.assertAllEqual(chunks[0].attn_indicator, np.array([0, 0, 0, 0]))
self.assertAllEqual(chunks[0].memory_padding_mask, np.array([0, 0, 0, 0]))
self.assertAllEqual(chunks[0].memory_pos, np.array([-1, -1, -1, -1]))
self.assertAllEqual(chunks[0].depth, np.array([0, 1, 2, 3]))
self.assertAllEqual(chunks[0].beginning_of_seq, np.array(1))
self.assertAllEqual(chunks[0].end_of_seq, np.array(0))
self.assertAllEqual(
chunks[0].smartmem_mem_from_seq, np.eye(4, dtype=np.int32)
)
self.assertAllEqual(
chunks[0].smartmem_mem_from_mem, np.zeros((4, 4), dtype=np.int32)
)
# Stack attention: <s> -> <s>
self.assertAllEqual(chunks[0].attn_mask[0], np.array([1, 0, 0, 0]))
self.assertAllEqual(
chunks[0].attn_relpos[0], np.array([0, 0, 0, 0, 0, 0, 0, 0])
)
self.assertAllEqual(chunks[0].memory_attn_mask[0], np.array([0, 0, 0, 0]))
# Stack attention: (S -> <s> (S
self.assertAllEqual(chunks[0].attn_mask[1], np.array([1, 1, 0, 0]))
self.assertAllEqual(
chunks[0].attn_relpos[1], np.array([0, 0, 0, 0, 1, 0, 0, 0])
)
self.assertAllEqual(chunks[0].memory_attn_mask[1], np.array([0, 0, 0, 0]))
# Stack attention: (NP -> <s> (S (NP
self.assertAllEqual(chunks[0].attn_mask[2], np.array([1, 1, 1, 0]))
self.assertAllEqual(
chunks[0].attn_relpos[2], np.array([0, 0, 0, 0, 2, 1, 0, 0])
)
self.assertAllEqual(chunks[0].memory_attn_mask[2], np.array([0, 0, 0, 0]))
# Stack attention: the -> <s> (S (NP the
self.assertAllEqual(chunks[0].attn_mask[3], np.array([1, 1, 1, 1]))
self.assertAllEqual(
chunks[0].attn_relpos[3], np.array([0, 0, 0, 0, 3, 2, 1, 0])
)
self.assertAllEqual(chunks[0].memory_attn_mask[3], np.array([0, 0, 0, 0]))
self.assertAllEqual(chunks[1].inputs, np.array([5, 6, 7, 7]))
self.assertAllEqual(
chunks[1].inputs_ttypes,
np.array([mc.TERMINAL, mc.TERMINAL, mc.CLOSING_NT, mc.CLOSING_NT_2]),
)
self.assertAllEqual(chunks[1].labels, np.array([6, 7, 0, 8]))
self.assertAllEqual(
chunks[1].labels_ttypes,
np.array([mc.TERMINAL, mc.CLOSING_NT, mc.PAD, mc.OPENING_NT]),
)
self.assertAllEqual(chunks[1].attn_indicator, np.array([0, 0, 1, 0]))
self.assertAllEqual(chunks[1].memory_padding_mask, np.array([1, 1, 1, 1]))
self.assertAllEqual(chunks[1].memory_pos, np.array([0, 1, 2, 3]))
self.assertAllEqual(chunks[1].depth, np.array([3, 3, 2, 2]))
self.assertAllEqual(chunks[1].beginning_of_seq, np.array(0))
self.assertAllEqual(chunks[1].end_of_seq, np.array(0))
self.assertAllEqual(
chunks[1].smartmem_mem_from_seq, np.eye(4, dtype=np.int32)
)
self.assertAllEqual(
chunks[1].smartmem_mem_from_mem, np.zeros((4, 4), dtype=np.int32)
)
# Stack attention: hungry -> [<s> (S (NP the] hungry
self.assertAllEqual(chunks[1].attn_mask[0], np.array([1, 0, 0, 0]))
self.assertAllEqual(
chunks[1].attn_relpos[0], np.array([3, 2, 1, 0, 0, 0, 0, 0])
)
self.assertAllEqual(chunks[1].memory_attn_mask[0], np.array([1, 1, 1, 1]))
# Stack attention: cat -> [<s> (S (NP the] hungry cat
self.assertAllEqual(chunks[1].attn_mask[1], np.array([1, 1, 0, 0]))
self.assertAllEqual(
chunks[1].attn_relpos[1], np.array([3, 2, 1, 0, 0, 0, 0, 0])
)
self.assertAllEqual(chunks[1].memory_attn_mask[1], np.array([1, 1, 1, 1]))
# COMPOSE attention: NP) -> [(NP the] hungry cat NP)
self.assertAllEqual(chunks[1].attn_mask[2], np.array([1, 1, 1, 0]))
self.assertAllEqual(
chunks[1].attn_relpos[2], np.array([0, 0, 0, -1, -1, -1, 0, 0])
)
self.assertAllEqual(chunks[1].memory_attn_mask[2], np.array([0, 0, 1, 1]))
# Stack attention: NP) -> [<s> (NP] NP) NP)
self.assertAllEqual(chunks[1].attn_mask[3], np.array([0, 0, 1, 1]))
self.assertAllEqual(
chunks[1].attn_relpos[3], np.array([2, 1, 0, 0, 0, 0, 0, 0])
)
self.assertAllEqual(chunks[1].memory_attn_mask[3], np.array([1, 1, 0, 0]))
self.assertAllEqual(chunks[2].inputs, np.array([8, 9, 10, 10]))
self.assertAllEqual(
chunks[2].inputs_ttypes,
np.array([mc.OPENING_NT, mc.TERMINAL, mc.CLOSING_NT, mc.CLOSING_NT_2]),
)
self.assertAllEqual(chunks[2].labels, np.array([9, 10, 0, 11]))
self.assertAllEqual(
chunks[2].labels_ttypes,
np.array([mc.TERMINAL, mc.CLOSING_NT, mc.PAD, mc.CLOSING_NT]),
)
self.assertAllEqual(chunks[2].attn_indicator, np.array([0, 0, 1, 0]))
self.assertAllEqual(chunks[2].memory_padding_mask, np.array([1, 1, 1, 1]))
self.assertAllEqual(chunks[2].memory_pos, np.array([4, 5, 6, 7]))
self.assertAllEqual(chunks[2].depth, np.array([2, 3, 2, 2]))
self.assertAllEqual(chunks[2].beginning_of_seq, np.array(0))
self.assertAllEqual(chunks[2].end_of_seq, np.array(0))
self.assertAllEqual(
chunks[2].smartmem_mem_from_seq, np.eye(4, dtype=np.int32)
)
self.assertAllEqual(
chunks[2].smartmem_mem_from_mem, np.zeros((4, 4), dtype=np.int32)
)
# Stack attention: (VP -> [[<s> (S]] [NP)] (VP
self.assertAllEqual(chunks[2].attn_mask[0], np.array([1, 0, 0, 0]))
self.assertAllEqual(
chunks[2].attn_relpos[0], np.array([0, 0, 0, 0, 0, 0, 0, 0])
)
self.assertAllEqual(chunks[2].memory_attn_mask[0], np.array([0, 0, 1, 0]))
# Stack attention: meows -> [[<s> (S]] [NP)] (VP
self.assertAllEqual(chunks[2].attn_mask[1], np.array([1, 1, 0, 0]))
self.assertAllEqual(
chunks[2].attn_relpos[1], np.array([0, 0, 1, 0, 1, 0, 0, 0])
)
self.assertAllEqual(chunks[2].memory_attn_mask[1], np.array([0, 0, 1, 0]))
# COMPOSE attention: VP) -> (VP meows VP)
self.assertAllEqual(chunks[2].attn_mask[2], np.array([1, 1, 1, 0]))
self.assertAllEqual(
chunks[2].attn_relpos[2], np.array([0, 0, 0, 0, 0, -1, 0, 0])
)
self.assertAllEqual(chunks[2].memory_attn_mask[2], np.array([0, 0, 0, 0]))
# Stack attention: VP) -> [[<s> (S]] [NP)] (VP VP)
self.assertAllEqual(chunks[2].attn_mask[3], np.array([0, 0, 1, 1]))
self.assertAllEqual(
chunks[2].attn_relpos[3], np.array([0, 0, 0, 0, 0, 0, 0, 0])
)
self.assertAllEqual(chunks[2].memory_attn_mask[3], np.array([0, 0, 1, 0]))
self.assertAllEqual(chunks[3].inputs, np.array([11, 11, 0, 0]))
self.assertAllEqual(
chunks[3].inputs_ttypes,
np.array([mc.CLOSING_NT, mc.CLOSING_NT_2, mc.PAD, mc.PAD]),
)
self.assertAllEqual(chunks[3].labels, np.array([0, 0, 0, 0]))
self.assertAllEqual(
chunks[3].labels_ttypes, np.array([mc.PAD, mc.PAD, mc.PAD, mc.PAD])
)
self.assertAllEqual(chunks[3].attn_indicator, np.array([1, 0, 0, 0]))
self.assertAllEqual(chunks[3].memory_padding_mask, np.array([1, 1, 1, 1]))
self.assertAllEqual(chunks[3].memory_pos, np.array([8, 9, 10, 11]))
self.assertAllEqual(chunks[3].depth, np.array([1, 1, 0, 0]))
self.assertAllEqual(chunks[3].beginning_of_seq, np.array(0))
self.assertAllEqual(chunks[3].end_of_seq, np.array(1))
self.assertAllEqual(
chunks[3].smartmem_mem_from_seq,
np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.int32,
),
)
self.assertAllEqual(
chunks[3].smartmem_mem_from_mem, np.zeros((4, 4), dtype=np.int32)
)
# COMPOSE attention: S) -> [[(S NP)]] [VP)] S)
self.assertAllEqual(chunks[3].attn_mask[0], np.array([1, 0, 0, 0]))
self.assertAllEqual(
chunks[3].attn_relpos[0], np.array([0, 0, -1, 0, 0, 0, 0, 0])
)
self.assertAllEqual(chunks[3].memory_attn_mask[0], np.array([0, 0, 1, 0]))
# Stack attention: S) -> [[<s>]] S) S)
self.assertAllEqual(chunks[3].attn_mask[1], np.array([1, 1, 0, 0]))
self.assertAllEqual(
chunks[3].attn_relpos[1], np.array([0, 0, 0, 0, 0, 0, 0, 0])
)
self.assertAllEqual(chunks[3].memory_attn_mask[1], np.array([0, 0, 0, 0]))
# Attention: <pad> -> nothing
self.assertAllEqual(chunks[3].attn_mask[2], np.array([0, 0, 0, 0]))
self.assertAllEqual(chunks[3].memory_attn_mask[2], np.array([0, 0, 0, 0]))
self.assertAllEqual(
chunks[3].attn_relpos[2], np.array([0, 0, 0, 0, 0, 0, 0, 0])
)
# Attention: <pad> -> nothing
self.assertAllEqual(chunks[3].attn_mask[3], np.array([0, 0, 0, 0]))
self.assertAllEqual(
chunks[3].attn_relpos[3], np.array([0, 0, 0, 0, 0, 0, 0, 0])
)
self.assertAllEqual(chunks[3].memory_attn_mask[3], np.array([0, 0, 0, 0]))
def test_stack_compose_double_closing_nt_smartmem(self):
"""Runs stack/compose code on known inputs, compares with gold outputs.
With "smart" memory, i.e. updating the memory so that tokens that won't
be attended to in the future are not added to the memory at all.
"""
inputs, labels, inputs_ttypes, labels_ttypes = self._data()
maskrules = cpp_masking.StackComposeDoubleClosingNT(
sequence_length=4,
memory_length=4,
use_different_attn_fns=True,
gather_into_new_memory=True,
)
chunks = maskrules.chunks_for_sequence(
inputs, inputs_ttypes, labels, labels_ttypes
)
chunks = [types.Chunk(None, *chunk) for chunk in chunks]
for chunk in chunks:
logging.info("Got chunk: %s", repr(chunk))
actual_t_inputs = np.concatenate([chunk.inputs for chunk in chunks], axis=0)
expected_t_inputs = np.array([
1, # <s>
2, # (S
3, # (NP
4, # the
5, # hungry
6, # cat
7, # NP)
7, # NP)
8, # (VP
9, # meows
10, # NP)
10, # NP)
11, # S)
11, # S)
0, # <pad>
0, # <pad>
])
logging.info("Actual transformed inputs: %s", repr(actual_t_inputs))
self.assertAllEqual(expected_t_inputs, actual_t_inputs)
actual_t_labels = np.concatenate([chunk.labels for chunk in chunks], axis=0)
expected_t_labels = np.array([
2, # (S
3, # (NP
4, # the
5, # hungry
6, # cat
7, # NP)
0, # <pad> !
8, # (VP
9, # meows
10, # NP)
0, # <pad> !
11, # S)
0, # <pad> !
0, # <pad>
0, # <pad>
0, # <pad>
])
logging.info("Actual transformed labels: %s", repr(actual_t_labels))
self.assertAllEqual(expected_t_labels, actual_t_labels)
# Sequence padded to length 16, so 4 chunks of size 4
self.assertLen(chunks, 4)
self.assertAllEqual(chunks[0].inputs, np.array([1, 2, 3, 4]))
self.assertAllEqual(
chunks[0].inputs_ttypes,
np.array([mc.OPENING_NT, mc.OPENING_NT, mc.OPENING_NT, mc.TERMINAL]),
)
self.assertAllEqual(chunks[0].labels, np.array([2, 3, 4, 5]))
self.assertAllEqual(
chunks[0].labels_ttypes,
np.array([mc.OPENING_NT, mc.OPENING_NT, mc.TERMINAL, mc.TERMINAL]),
)
self.assertAllEqual(chunks[0].attn_indicator, np.array([0, 0, 0, 0]))
self.assertAllEqual(chunks[0].memory_padding_mask, np.array([0, 0, 0, 0]))
self.assertAllEqual(chunks[0].memory_pos, np.array([-1, -1, -1, -1]))
self.assertAllEqual(chunks[0].depth, np.array([0, 1, 2, 3]))
self.assertAllEqual(chunks[0].beginning_of_seq, np.array(1))
self.assertAllEqual(chunks[0].end_of_seq, np.array(0))
self.assertAllEqual(
chunks[0].smartmem_mem_from_seq, np.eye(4, dtype=np.int32)
)
self.assertAllEqual(
chunks[0].smartmem_mem_from_mem, np.zeros((4, 4), dtype=np.int32)
)
# Stack attention: <s> -> <s>
self.assertAllEqual(chunks[0].attn_mask[0], np.array([1, 0, 0, 0]))
self.assertAllEqual(
chunks[0].attn_relpos[0], np.array([0, 0, 0, 0, 0, 0, 0, 0])
)
self.assertAllEqual(chunks[0].memory_attn_mask[0], np.array([0, 0, 0, 0]))
# Stack attention: (S -> <s> (S
self.assertAllEqual(chunks[0].attn_mask[1], np.array([1, 1, 0, 0]))
self.assertAllEqual(
chunks[0].attn_relpos[1], np.array([0, 0, 0, 0, 1, 0, 0, 0])
)
self.assertAllEqual(chunks[0].memory_attn_mask[1], np.array([0, 0, 0, 0]))
# Stack attention: (NP -> <s> (S (NP
self.assertAllEqual(chunks[0].attn_mask[2], np.array([1, 1, 1, 0]))
self.assertAllEqual(
chunks[0].attn_relpos[2], np.array([0, 0, 0, 0, 2, 1, 0, 0])
)
self.assertAllEqual(chunks[0].memory_attn_mask[2], np.array([0, 0, 0, 0]))
# Stack attention: the -> <s> (S (NP the
self.assertAllEqual(chunks[0].attn_mask[3], np.array([1, 1, 1, 1]))
self.assertAllEqual(
chunks[0].attn_relpos[3], np.array([0, 0, 0, 0, 3, 2, 1, 0])
)
self.assertAllEqual(chunks[0].memory_attn_mask[3], np.array([0, 0, 0, 0]))
self.assertAllEqual(chunks[1].inputs, np.array([5, 6, 7, 7]))
self.assertAllEqual(
chunks[1].inputs_ttypes,
np.array([mc.TERMINAL, mc.TERMINAL, mc.CLOSING_NT, mc.CLOSING_NT_2]),
)
self.assertAllEqual(chunks[1].labels, np.array([6, 7, 0, 8]))
self.assertAllEqual(
chunks[1].labels_ttypes,
np.array([mc.TERMINAL, mc.CLOSING_NT, mc.PAD, mc.OPENING_NT]),
)
self.assertAllEqual(chunks[1].attn_indicator, np.array([0, 0, 1, 0]))
self.assertAllEqual(chunks[1].memory_padding_mask, np.array([1, 1, 1, 1]))
self.assertAllEqual(chunks[1].memory_pos, np.array([0, 1, 2, 3]))
self.assertAllEqual(chunks[1].depth, np.array([3, 3, 2, 2]))
self.assertAllEqual(chunks[1].beginning_of_seq, np.array(0))
self.assertAllEqual(chunks[1].end_of_seq, np.array(0))
self.assertAllEqual(
chunks[1].smartmem_mem_from_seq,
np.array(
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 0, 0]],
dtype=np.int32,
),
)
self.assertAllEqual(
chunks[1].smartmem_mem_from_mem,
np.array(
[[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.int32,
),
)
# Stack attention: hungry -> [<s> (S (NP the] hungry
self.assertAllEqual(chunks[1].attn_mask[0], np.array([1, 0, 0, 0]))
self.assertAllEqual(
chunks[1].attn_relpos[0], np.array([3, 2, 1, 0, 0, 0, 0, 0])
)
self.assertAllEqual(chunks[1].memory_attn_mask[0], np.array([1, 1, 1, 1]))
# Stack attention: cat -> [<s> (S (NP the] hungry cat
self.assertAllEqual(chunks[1].attn_mask[1], np.array([1, 1, 0, 0]))
self.assertAllEqual(
chunks[1].attn_relpos[1], np.array([3, 2, 1, 0, 0, 0, 0, 0])
)
self.assertAllEqual(chunks[1].memory_attn_mask[1], np.array([1, 1, 1, 1]))
# COMPOSE attention: NP) -> [(NP the] hungry cat NP)
self.assertAllEqual(chunks[1].attn_mask[2], np.array([1, 1, 1, 0]))
self.assertAllEqual(
chunks[1].attn_relpos[2], np.array([0, 0, 0, -1, -1, -1, 0, 0])
)
self.assertAllEqual(chunks[1].memory_attn_mask[2], np.array([0, 0, 1, 1]))
# Stack attention: NP) -> [<s> (NP] NP) NP)
self.assertAllEqual(chunks[1].attn_mask[3], np.array([0, 0, 1, 1]))
self.assertAllEqual(
chunks[1].attn_relpos[3], np.array([2, 1, 0, 0, 0, 0, 0, 0])
)
self.assertAllEqual(chunks[1].memory_attn_mask[3], np.array([1, 1, 0, 0]))
self.assertAllEqual(chunks[2].inputs, np.array([8, 9, 10, 10]))
self.assertAllEqual(
chunks[2].inputs_ttypes,
np.array([mc.OPENING_NT, mc.TERMINAL, mc.CLOSING_NT, mc.CLOSING_NT_2]),
)
self.assertAllEqual(chunks[2].labels, np.array([9, 10, 0, 11]))
self.assertAllEqual(
chunks[2].labels_ttypes,
np.array([mc.TERMINAL, mc.CLOSING_NT, mc.PAD, mc.CLOSING_NT]),
)
self.assertAllEqual(chunks[2].attn_indicator, np.array([0, 0, 1, 0]))
self.assertAllEqual(chunks[2].memory_padding_mask, np.array([0, 1, 1, 1]))
self.assertAllEqual(chunks[2].memory_pos, np.array([-1, 0, 1, 6]))
self.assertAllEqual(chunks[2].depth, np.array([2, 3, 2, 2]))
self.assertAllEqual(chunks[2].beginning_of_seq, np.array(0))
self.assertAllEqual(chunks[2].end_of_seq, np.array(0))
self.assertAllEqual(
chunks[2].smartmem_mem_from_seq,
np.array(
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 0, 0]],
dtype=np.int32,
),
)
self.assertAllEqual(
chunks[2].smartmem_mem_from_mem,
np.array(
[[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]],
dtype=np.int32,
),
)
# Stack attention: (VP -> [<s> (S NP)] (VP
self.assertAllEqual(chunks[2].attn_mask[0], np.array([1, 0, 0, 0]))
self.assertAllEqual(
chunks[2].attn_relpos[0], np.array([0, 2, 1, 0, 0, 0, 0, 0])
)
self.assertAllEqual(chunks[2].memory_attn_mask[0], np.array([0, 1, 1, 1]))
# Stack attention: meows -> [<s> (S NP)] (VP
self.assertAllEqual(chunks[2].attn_mask[1], np.array([1, 1, 0, 0]))
self.assertAllEqual(
chunks[2].attn_relpos[1], np.array([0, 3, 2, 1, 1, 0, 0, 0])
)
self.assertAllEqual(chunks[2].memory_attn_mask[1], np.array([0, 1, 1, 1]))
# COMPOSE attention: VP) -> (VP meows VP)
self.assertAllEqual(chunks[2].attn_mask[2], np.array([1, 1, 1, 0]))
self.assertAllEqual(
chunks[2].attn_relpos[2], np.array([0, 0, 0, 0, 0, -1, 0, 0])
)
self.assertAllEqual(chunks[2].memory_attn_mask[2], np.array([0, 0, 0, 0]))
# Stack attention: VP) -> [<s> (S NP)] (VP VP)
self.assertAllEqual(chunks[2].attn_mask[3], np.array([0, 0, 1, 1]))
self.assertAllEqual(
chunks[2].attn_relpos[3], np.array([0, 2, 1, 0, 0, 0, 0, 0])
)
self.assertAllEqual(chunks[2].memory_attn_mask[3], np.array([0, 1, 1, 1]))
self.assertAllEqual(chunks[3].inputs, np.array([11, 11, 0, 0]))
self.assertAllEqual(
chunks[3].inputs_ttypes,
np.array([mc.CLOSING_NT, mc.CLOSING_NT_2, mc.PAD, mc.PAD]),
)
self.assertAllEqual(chunks[3].labels, np.array([0, 0, 0, 0]))
self.assertAllEqual(
chunks[3].labels_ttypes, np.array([mc.PAD, mc.PAD, mc.PAD, mc.PAD])
)
self.assertAllEqual(chunks[3].attn_indicator, np.array([1, 0, 0, 0]))
self.assertAllEqual(chunks[3].memory_padding_mask, np.array([1, 1, 1, 1]))
self.assertAllEqual(chunks[3].memory_pos, np.array([0, 1, 6, 10]))
self.assertAllEqual(chunks[3].depth, np.array([1, 1, 0, 0]))
self.assertAllEqual(chunks[3].beginning_of_seq, np.array(0))
self.assertAllEqual(chunks[3].end_of_seq, np.array(1))
self.assertAllEqual(
chunks[3].smartmem_mem_from_seq,
np.array(
[[0, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.int32,
),
)
self.assertAllEqual(
chunks[3].smartmem_mem_from_mem,
np.array(
[[0, 0, 1, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
dtype=np.int32,
),
)
# COMPOSE attention: S) -> [(S NP) VP)] S)
self.assertAllEqual(chunks[3].attn_mask[0], np.array([1, 0, 0, 0]))
self.assertAllEqual(
chunks[3].attn_relpos[0], np.array([0, 0, -1, -1, 0, 0, 0, 0])
)
self.assertAllEqual(chunks[3].memory_attn_mask[0], np.array([0, 1, 1, 1]))
# Stack attention: S) -> [<s>] S) S)
self.assertAllEqual(chunks[3].attn_mask[1], np.array([1, 1, 0, 0]))
self.assertAllEqual(
chunks[3].attn_relpos[1], np.array([1, 0, 0, 0, 0, 0, 0, 0])
)
self.assertAllEqual(chunks[3].memory_attn_mask[1], np.array([1, 0, 0, 0]))
# Attention: <pad> -> nothing
self.assertAllEqual(chunks[3].attn_mask[2], np.array([0, 0, 0, 0]))
self.assertAllEqual(chunks[3].memory_attn_mask[2], np.array([0, 0, 0, 0]))
self.assertAllEqual(
chunks[3].attn_relpos[2], np.array([0, 0, 0, 0, 0, 0, 0, 0])
)
# Attention: <pad> -> nothing
self.assertAllEqual(chunks[3].attn_mask[3], np.array([0, 0, 0, 0]))
self.assertAllEqual(
chunks[3].attn_relpos[3], np.array([0, 0, 0, 0, 0, 0, 0, 0])
)
self.assertAllEqual(chunks[3].memory_attn_mask[3], np.array([0, 0, 0, 0]))
def test_txl(self):
"""Runs TXL masking code to known inputs, compares with gold outputs."""
inputs, labels, inputs_ttypes, labels_ttypes = self._data()
maskrules = cpp_masking.TXLCausalMasking(sequence_length=4, memory_length=4)
chunks = maskrules.chunks_for_sequence(
inputs, inputs_ttypes, labels, labels_ttypes
)
chunks = [types.Chunk(None, *chunk) for chunk in chunks]
# Sequence padded to length 12, so 3 chunks of size 4
self.assertLen(chunks, 3)
for chunk in chunks:
logging.info("Got chunk: %s", repr(chunk))
actual_inputs = np.concatenate([chunk.inputs for chunk in chunks], axis=0)
expected_inputs = np.array([
1, # <s>
2, # (S
3, # (NP
4, # the
5, # hungry
6, # cat
7, # NP)
8, # (VP
9, # meows
10, # NP)
11, # S)
0, # <pad>
])
logging.info("Actual inputs: %s", repr(actual_inputs))
self.assertAllEqual(expected_inputs, actual_inputs)
actual_labels = np.concatenate([chunk.labels for chunk in chunks], axis=0)
expected_labels = np.array([
2, # (S
3, # (NP
4, # the
5, # hungry
6, # cat
7, # NP)
8, # (VP
9, # meows
10, # NP)
11, # S)
0, # <pad>
0, # <pad>
])
logging.info("Actual labels: %s", repr(actual_labels))
self.assertAllEqual(expected_labels, actual_labels)
self.assertAllEqual(chunks[0].inputs, np.array([1, 2, 3, 4]))
self.assertAllEqual(
chunks[0].inputs_ttypes,
np.array([mc.OPENING_NT, mc.OPENING_NT, mc.OPENING_NT, mc.TERMINAL]),
)
self.assertAllEqual(chunks[0].labels, np.array([2, 3, 4, 5]))
self.assertAllEqual(
chunks[0].labels_ttypes,
np.array([mc.OPENING_NT, mc.OPENING_NT, mc.TERMINAL, mc.TERMINAL]),
)
self.assertAllEqual(chunks[0].attn_indicator, np.array([0, 0, 0, 0]))
self.assertAllEqual(chunks[0].memory_padding_mask, np.array([0, 0, 0, 0]))
self.assertAllEqual(chunks[0].memory_pos, np.array([-1, -1, -1, -1]))
self.assertAllEqual(chunks[0].depth, np.array([0, 0, 0, 0]))
self.assertAllEqual(chunks[0].beginning_of_seq, np.array(1))
self.assertAllEqual(chunks[0].end_of_seq, np.array(0))
self.assertAllEqual(
chunks[0].smartmem_mem_from_seq, np.eye(4, dtype=np.int32)
)
self.assertAllEqual(
chunks[0].smartmem_mem_from_mem, np.zeros((4, 4), dtype=np.int32)
)
self.assertAllEqual(chunks[0].attn_mask[0], np.array([1, 0, 0, 0]))
self.assertAllEqual(
chunks[0].attn_relpos[0], np.array([0, 0, 0, 0, 0, 0, 0, 0])
)
self.assertAllEqual(chunks[0].memory_attn_mask[0], np.array([0, 0, 0, 0]))
self.assertAllEqual(chunks[0].attn_mask[1], np.array([1, 1, 0, 0]))
self.assertAllEqual(
chunks[0].attn_relpos[1], np.array([0, 0, 0, 0, 1, 0, 0, 0])
)
self.assertAllEqual(chunks[0].memory_attn_mask[1], np.array([0, 0, 0, 0]))
self.assertAllEqual(chunks[0].attn_mask[2], np.array([1, 1, 1, 0]))
self.assertAllEqual(
chunks[0].attn_relpos[2], np.array([0, 0, 0, 0, 2, 1, 0, 0])
)
self.assertAllEqual(chunks[0].memory_attn_mask[2], np.array([0, 0, 0, 0]))
self.assertAllEqual(chunks[0].attn_mask[3], np.array([1, 1, 1, 1]))
self.assertAllEqual(
chunks[0].attn_relpos[3], np.array([0, 0, 0, 0, 3, 2, 1, 0])
)
self.assertAllEqual(chunks[0].memory_attn_mask[3], np.array([0, 0, 0, 0]))
self.assertAllEqual(chunks[1].inputs, np.array([5, 6, 7, 8]))
self.assertAllEqual(
chunks[1].inputs_ttypes,
np.array([mc.TERMINAL, mc.TERMINAL, mc.CLOSING_NT, mc.OPENING_NT]),
)
self.assertAllEqual(chunks[1].labels, np.array([6, 7, 8, 9]))
self.assertAllEqual(
chunks[1].labels_ttypes,
np.array([mc.TERMINAL, mc.CLOSING_NT, mc.OPENING_NT, mc.TERMINAL]),
)
self.assertAllEqual(chunks[1].attn_indicator, np.array([0, 0, 0, 0]))
self.assertAllEqual(chunks[1].memory_padding_mask, np.array([1, 1, 1, 1]))
self.assertAllEqual(chunks[1].memory_pos, np.array([0, 1, 2, 3]))
self.assertAllEqual(chunks[1].depth, np.array([0, 0, 0, 0]))
self.assertAllEqual(chunks[1].beginning_of_seq, np.array(0))
self.assertAllEqual(chunks[1].end_of_seq, np.array(0))
self.assertAllEqual(
chunks[1].smartmem_mem_from_seq, np.eye(4, dtype=np.int32)
)
self.assertAllEqual(
chunks[1].smartmem_mem_from_mem, np.zeros((4, 4), dtype=np.int32)
)
self.assertAllEqual(chunks[1].attn_mask[0], np.array([1, 0, 0, 0]))
self.assertAllEqual(
chunks[1].attn_relpos[0], np.array([4, 3, 2, 1, 0, 0, 0, 0])
)
self.assertAllEqual(chunks[1].memory_attn_mask[0], np.array([1, 1, 1, 1]))
self.assertAllEqual(chunks[1].attn_mask[1], np.array([1, 1, 0, 0]))
self.assertAllEqual(
chunks[1].attn_relpos[1], np.array([5, 4, 3, 2, 1, 0, 0, 0])
)
self.assertAllEqual(chunks[1].memory_attn_mask[1], np.array([1, 1, 1, 1]))
self.assertAllEqual(chunks[1].attn_mask[2], np.array([1, 1, 1, 0]))
self.assertAllEqual(
chunks[1].attn_relpos[2], np.array([6, 5, 4, 3, 2, 1, 0, 0])
)
self.assertAllEqual(chunks[1].memory_attn_mask[2], np.array([1, 1, 1, 1]))
self.assertAllEqual(chunks[1].attn_mask[3], np.array([1, 1, 1, 1]))
self.assertAllEqual(
chunks[1].attn_relpos[3], np.array([7, 6, 5, 4, 3, 2, 1, 0])
)
self.assertAllEqual(chunks[1].memory_attn_mask[3], np.array([1, 1, 1, 1]))
self.assertAllEqual(chunks[2].inputs, np.array([9, 10, 11, 0]))
self.assertAllEqual(
chunks[2].inputs_ttypes,
np.array([mc.TERMINAL, mc.CLOSING_NT, mc.CLOSING_NT, mc.PAD]),
)
self.assertAllEqual(chunks[2].labels, np.array([10, 11, 0, 0]))
self.assertAllEqual(
chunks[2].labels_ttypes,
np.array([mc.CLOSING_NT, mc.CLOSING_NT, mc.PAD, mc.PAD]),
)
self.assertAllEqual(chunks[2].attn_indicator, np.array([0, 0, 0, 0]))
self.assertAllEqual(chunks[2].memory_padding_mask, np.array([1, 1, 1, 1]))
self.assertAllEqual(chunks[2].memory_pos, np.array([4, 5, 6, 7]))
self.assertAllEqual(chunks[2].depth, np.array([0, 0, 0, 0]))
self.assertAllEqual(chunks[2].beginning_of_seq, np.array(0))
self.assertAllEqual(chunks[2].end_of_seq, np.array(1))
self.assertAllEqual(
chunks[2].smartmem_mem_from_seq, np.eye(4, dtype=np.int32)
)
self.assertAllEqual(
chunks[2].smartmem_mem_from_mem, np.zeros((4, 4), dtype=np.int32)
)
self.assertAllEqual(chunks[2].attn_mask[0], np.array([1, 0, 0, 0]))
self.assertAllEqual(
chunks[2].attn_relpos[0], np.array([4, 3, 2, 1, 0, 0, 0, 0])
)
self.assertAllEqual(chunks[2].memory_attn_mask[0], np.array([1, 1, 1, 1]))
self.assertAllEqual(chunks[2].attn_mask[1], np.array([1, 1, 0, 0]))
self.assertAllEqual(
chunks[2].attn_relpos[1], np.array([5, 4, 3, 2, 1, 0, 0, 0])
)
self.assertAllEqual(chunks[2].memory_attn_mask[1], np.array([1, 1, 1, 1]))
self.assertAllEqual(chunks[2].attn_mask[2], np.array([1, 1, 1, 0]))
self.assertAllEqual(
chunks[2].attn_relpos[2], np.array([6, 5, 4, 3, 2, 1, 0, 0])
)
self.assertAllEqual(chunks[2].memory_attn_mask[2], np.array([1, 1, 1, 1]))
self.assertAllEqual(chunks[2].attn_mask[3], np.array([1, 1, 1, 1]))
self.assertAllEqual(
chunks[2].attn_relpos[3], np.array([7, 6, 5, 4, 3, 2, 1, 0])
)
self.assertAllEqual(chunks[2].memory_attn_mask[3], np.array([1, 1, 1, 1]))
| transformer_grammars-main | transformer_grammars/models/masking/cpp_masking_test.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Types used by the masking rules."""
import collections
Chunk = collections.namedtuple(
"Chunk",
[
"seq_idx",
"inputs",
"inputs_ttypes",
"labels",
"labels_ttypes",
"attn_mask",
"attn_relpos",
"attn_indicator",
"memory_attn_mask",
"memory_padding_mask",
"memory_pos",
"depth",
"beginning_of_seq",
"end_of_seq",
"smartmem_mem_from_seq",
"smartmem_mem_from_mem",
],
)
| transformer_grammars-main | transformer_grammars/models/masking/masking_types.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Constants for masking code."""
import enum
PAD = 0
SOS = 1
OPENING_NT = 2
TERMINAL = 3
CLOSING_NT = 4
PLACEHOLDER = 5
TOKEN_TYPES = [PAD, SOS, OPENING_NT, TERMINAL, CLOSING_NT]
class TokenTypesEnum(enum.IntEnum):
PAD = PAD
SOS = SOS
OPENING_NT = OPENING_NT
TERMINAL = TERMINAL
CLOSING_NT = CLOSING_NT
PLACEHOLDER = PLACEHOLDER
# For proposals involving duplicating tokens.
CLOSING_NT_2 = 14
| transformer_grammars-main | transformer_grammars/models/masking/constants.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| transformer_grammars-main | transformer_grammars/models/masking/__init__.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Masks for Transformer Grammars models."""
import dataclasses
from typing import Optional, Tuple, Union
from absl import logging
import jax.numpy as jnp
import numpy as np
from transformer_grammars.data import constants
from transformer_grammars.models.masking import constants as mc
from transformer_grammars.models.masking import cpp_masking as mcpp
def _in_range(range_, arr, np_=jnp):
min_, max_ = range_
return np_.logical_and(np_.greater_equal(arr, min_), np_.less(arr, max_))
def _interval_from_list(l):
minval = min(l)
maxval = max(l)
if len(set(l)) != len(l):
raise ValueError("The list contains duplicated elements.")
# No duplicated elements.
if set(range(minval, maxval + 1)) == set(l):
return (minval, maxval + 1)
raise ValueError(
"The values in the list do not exactly correspond to an interval."
)
@dataclasses.dataclass(frozen=True)
class TokenTypeRanges:
"""Mapping between token IDs ranges to token types."""
start_token: int
pad_token: int
end_token: int
placeholder_token: Optional[int]
opening_non_terminals: Tuple[int, int]
closing_non_terminals: Tuple[int, int]
terminals: Tuple[int, int]
has_extra_untyped_closing_non_terminal: bool
vocab_size: int
@classmethod
def from_dictionary_metadata(
cls,
*,
num_reserved,
num_terminals,
num_opening_non_terminals,
num_closing_non_terminals,
extra_untyped_closing_non_terminal,
):
"""Returns ranges from dictionary metadata."""
if num_reserved < 4:
raise ValueError("At least 4 reserved tokens are required.")
terminals_start = num_reserved
terminals_end = num_reserved + num_terminals
opening_nt_start = terminals_end
opening_nt_end = opening_nt_start + num_opening_non_terminals
closing_nt_start = opening_nt_end
closing_nt_end = closing_nt_start + num_closing_non_terminals
vocab_size = closing_nt_end + (
1 if extra_untyped_closing_non_terminal else 0
)
return cls(
start_token=constants.BOS,
pad_token=constants.PAD,
end_token=constants.EOS,
terminals=(terminals_start, terminals_end),
opening_non_terminals=(opening_nt_start, opening_nt_end),
closing_non_terminals=(closing_nt_start, closing_nt_end),
placeholder_token=constants.PLACEHOLDER,
has_extra_untyped_closing_non_terminal=extra_untyped_closing_non_terminal,
vocab_size=vocab_size,
)
@classmethod
def from_sentencepiece_vocab(cls, vocab):
return cls(
start_token=vocab.bos,
pad_token=vocab.pad,
end_token=vocab.eos,
placeholder_token=vocab.unk,
opening_non_terminals=_interval_from_list(vocab.opening_nts),
closing_non_terminals=_interval_from_list(vocab.closing_nts),
terminals=_interval_from_list(vocab.terminals + [vocab.whitespace]),
has_extra_untyped_closing_non_terminal=False,
vocab_size=len(vocab.dictionary),
)
def token_type_from_token(
self, seq: Union[jnp.array, np.ndarray], *, use_jax=True
):
"""Returns an array of token types from an array of token IDs."""
if use_jax:
np_ = jnp
else:
np_ = np
start_token_mask = np_.equal(seq, self.start_token)
pad_token_mask = np_.equal(seq, self.pad_token)
if self.placeholder_token is not None:
placeholder_mask = np_.equal(seq, self.placeholder_token)
else:
placeholder_mask = np_.zeros_like(start_token_mask)
opening_nt_mask = _in_range(self.opening_non_terminals, seq, np_)
closing_nt_mask = _in_range(self.closing_non_terminals, seq, np_)
if self.has_extra_untyped_closing_non_terminal:
closing_nt_mask = np_.logical_or(
closing_nt_mask, np_.equal(self.closing_non_terminals[1], seq)
)
terminal_mask = _in_range(self.terminals, seq, np_)
result = 0
for mask, id_ in zip(
[
start_token_mask,
pad_token_mask,
placeholder_mask,
opening_nt_mask,
closing_nt_mask,
terminal_mask,
],
[
mc.SOS,
mc.PAD,
mc.PLACEHOLDER,
mc.OPENING_NT,
mc.CLOSING_NT,
mc.TERMINAL,
],
):
result += mask.astype(np_.int32) * id_
return result
def token_type_from_token(ranges: TokenTypeRanges, seq: jnp.array):
"""Returns an array of token types from an array of token IDs."""
return ranges.token_type_from_token(seq, use_jax=True)
def get_masking_rules(name, **kwargs):
"""Returns the masking rules instance."""
logging.info("Creating masking rules %s with kwargs=%s", name, repr(kwargs))
if name == "stack_compose_double_closing_nt":
cls = mcpp.StackComposeDoubleClosingNT
elif name == "txl":
cls = mcpp.TXLCausalMasking
else:
raise NotImplementedError
if kwargs is None:
kwargs = dict()
maskrules = cls(**kwargs)
return maskrules
| transformer_grammars-main | transformer_grammars/models/masking/utils.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tree transformations."""
import nltk
from transformer_grammars.data import constants
def tree_from_string(s):
return nltk.Tree.fromstring(s)
def string_from_tree(tree):
return tree._pformat_flat("", "()", False) # pylint: disable=protected-access
def get_terminals(tree):
"""Returns the terminals in a tree."""
for node in tree:
if isinstance(node, str):
yield node
else:
yield from get_terminals(node)
def get_inode_labels(tree):
"""Get labels of non-terminals."""
if isinstance(tree, str):
pass
else:
yield tree.label()
for node in tree:
yield from get_inode_labels(node)
def reverse(tree):
if isinstance(tree, str):
return tree
else:
nodes = [reverse(node) for node in reversed(list(tree))]
return nltk.Tree(tree.label(), nodes)
def replace_leaves(tree, leaves):
it = iter(leaves)
if isinstance(tree, str):
return next(it)
else:
new_nodes = [replace_leaves(node, it) for node in tree]
return nltk.Tree(tree.label(), new_nodes)
def reverse_structure(tree):
rev_tree = reverse(tree)
terminals = get_terminals(tree)
return replace_leaves(rev_tree, terminals)
def drop_pos_tags(tree):
if isinstance(tree, str):
return tree
if len(tree) == 1 and isinstance(tree[0], str):
return tree[0]
return nltk.Tree(tree.label(), [drop_pos_tags(node) for node in tree])
def anonymize_pos_tags(tree):
if isinstance(tree, str):
return tree
if len(tree) == 1 and isinstance(tree[0], str):
return nltk.Tree("XX", [tree[0]])
return nltk.Tree(tree.label(), [anonymize_pos_tags(node) for node in tree])
def _make_left_or_right_branching_binary(labels, leaves_it, leftwards=True):
"""Makes a left/right-branching binary tree with given labels and leaves."""
if not labels:
return next(leaves_it)
if len(labels) == 1:
return nltk.Tree(labels[0], [next(leaves_it), next(leaves_it)])
if leftwards:
subtree = _make_left_or_right_branching_binary(
labels[1:], leaves_it, leftwards
)
right_leaf = next(leaves_it)
return nltk.Tree(labels[0], [subtree, right_leaf])
else:
left_leaf = next(leaves_it) # Get the left leaf before recursing.
subtree = _make_left_or_right_branching_binary(
labels[1:], leaves_it, leftwards
)
return nltk.Tree(labels[0], [left_leaf, subtree])
def make_left_or_right_branching(tree, leftwards):
"""Converts a tree to left-branching (or right-) + trail of words."""
labels = list(get_inode_labels(tree))
leaves = list(get_terminals(tree))
if len(labels) + 1 > len(leaves):
# Set the extra labels, which can't be used for the binary tree, aside.
if len(leaves) == 1:
# Then labels[:-0] doesn't do what we want, which is selecting the whole
# string.
extra_labels = labels
binary_tree_labels = []
else:
extra_labels = labels[: -len(leaves) + 1]
binary_tree_labels = labels[-len(leaves) + 1 :]
else:
extra_labels = []
binary_tree_labels = labels
num_leaves_in_binary_tree = len(binary_tree_labels) + 1
# Some leaves of the tree are going to be part of the binary tree proper, some
# are going to be part of the trail of leaves either on the left or on the
# right, attached to the root.
if leftwards:
binary_tree_leaves_it = iter(leaves[:num_leaves_in_binary_tree])
remaining_leaves = leaves[num_leaves_in_binary_tree:]
else:
binary_tree_leaves_it = iter(leaves[-num_leaves_in_binary_tree:])
remaining_leaves = leaves[:-num_leaves_in_binary_tree]
new_tree = _make_left_or_right_branching_binary(
binary_tree_labels, binary_tree_leaves_it, leftwards
)
if leftwards:
for leaf in remaining_leaves:
new_tree.append(leaf)
else:
for leaf in reversed(remaining_leaves):
new_tree.insert(0, leaf)
for label in reversed(extra_labels):
new_tree = nltk.Tree(label, [new_tree])
assert list(get_terminals(new_tree)) == leaves
assert list(get_inode_labels(new_tree)) == labels
return new_tree
def make_left_branching(tree):
return make_left_or_right_branching(tree, leftwards=True)
def make_right_branching(tree):
return make_left_or_right_branching(tree, leftwards=False)
def transform_sentence(tree, mode):
"""Transforms a tree corresponding to a sentence."""
if mode == constants.TreeTransform.NONE:
return tree
elif mode == constants.TreeTransform.REVERSE:
return reverse_structure(tree)
elif mode == constants.TreeTransform.LEFT_BRANCHING:
return make_left_branching(tree)
elif mode == constants.TreeTransform.RIGHT_BRANCHING:
return make_right_branching(tree)
else:
raise NotImplementedError
| transformer_grammars-main | transformer_grammars/data/transforms.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SentencePiece utils."""
import collections
import dataclasses
import re
from typing import List
from absl import logging
import numpy as np
TokenID = int
class Dict(object):
"""Dictionary class to convert word types to embedding indices."""
def __init__(self):
"""Initialize the dictionary object.
Args:
Returns:
None.
"""
# Map string to indices.
self.map = collections.defaultdict(lambda: len(self.map))
# Map indices to string using a list of words in the dictionary.
self.map_rev = []
# Boolean to to indicate if dictionary is frozen.
self.frozen = False
def __len__(self):
"""Obtain the size (number of words) in the current dictionary.
Args:
Returns:
An integer >= 0.
"""
return len(self.map)
def __contains__(self, word):
"""Check whether the dictionary contains a particular word.
Args:
word: A string that may or may not exist in the dictionary.
Returns:
A boolean that specifies whether the word exists in the dictionary.
"""
return word in self.map
def freeze(self):
"""Freeze the dictionary to prevent conversion of new word types.
Args:
Returns:
None.
"""
self.frozen = True
def __getitem__(self, item):
"""Convert a word to its index, or an index to its string word form.
Args:
item: either a string word type or an integer embedding index.
Returns:
either the corresponding embedding index or the string form of the
item.
Raises:
IndexError: converting an out-of-bounds embedding index.
ValueError: wrong argument type or converting a new type when frozen.
"""
if isinstance(item, str):
if self.frozen and item not in self.map:
raise ValueError(f"Converting a new type: {item} when frozen.")
# Retrieve item's embedding index (if existent) or create a new one.
emb_idx = self.map[item]
# Populate the reverse dictionary if necessary.
if emb_idx >= len(self.map_rev):
self.map_rev.append(item)
# Assert that the we can retrieve the correct word given the index.
assert self.map_rev[emb_idx] == item
return emb_idx
elif isinstance(item, (int, np.integer)):
# item is an int, retrieve its string word form.
if item < 0:
raise IndexError("Indices in the dictionary are >= 0")
return self.map_rev[item]
else:
raise ValueError("The passed argument is neither string nor integer.")
def clear(self):
"""Clear the internal dictionary elements.
Args:
Returns:
None.
"""
self.map.clear()
self.map = collections.defaultdict(lambda: len(self.map))
def items(self):
"""Get the iterator over the (key, value) pairs in the Dict object.
Args:
Returns:
An iterator over (key, value) pairs.
"""
return self.map.items()
def values(self):
"""Get the iterator over the (non-unique) values in the Dict object.
Args:
Returns:
An iterator over each value entry in the Dict object.
"""
return self.map.values()
def load_from_file(self, file_obj):
"""Load vocabulary from file.
Args:
file_obj: A file object that represents the vocabulary file.
Returns:
None (the dictionary is populated).
"""
lines_ctr = 0
for line in file_obj:
lines_ctr += 1
word = line.rstrip()
_ = self[word]
logging.info("Read %s lines from the file", lines_ctr)
def _repr_list(l):
min_ = min(l)
max_ = max(l)
if l == list(range(min_, max_ + 1)):
return f"[{min_}, ..., {max_}]"
else:
return repr(l)
@dataclasses.dataclass()
class SentencePieceVocab:
"""SentencePiece vocabulary."""
pad: TokenID
bos: TokenID
eos: TokenID
unk: TokenID
whitespace: TokenID
terminals: List[TokenID]
whitespace_prefixed_terminals: List[TokenID]
opening_nts: List[TokenID]
closing_nts: List[TokenID]
dictionary: Dict
@classmethod
def from_vocab_file(cls, f):
"""Initialises from a SentencePiece .vocab file."""
pad, bos, eos, unk = None, None, None, None
whitespace = None
opening_nts = []
closing_nts = []
terminals = []
whitespace_prefixed_terminals = []
dic = Dict()
for idx, l in enumerate(f):
token, _ = l.rstrip().split("\t")
_ = dic[token]
assert dic[token] == idx
if token == "<pad>":
pad = idx
elif token == "<s>":
bos = idx
elif token == "</s>":
eos = idx
elif token == "<unk>":
unk = idx
elif re.fullmatch(r"\([A-Z]+", token):
opening_nts.append(idx)
elif re.fullmatch(r"[A-Z]+\)", token):
closing_nts.append(idx)
else:
# Terminal, or whitespace.
# NOTE: This is brittle, and valid only with SP models built with the
# default options.
if token == "▁":
whitespace = idx
else:
terminals.append(idx)
if token[0] == "▁":
whitespace_prefixed_terminals.append(idx)
if pad is None:
raise ValueError("Could not find <pad> in the vocab.")
if bos is None:
raise ValueError("Could not find <s> in the vocab.")
if eos is None:
raise ValueError("Could not find </s> in the vocab.")
if unk is None:
raise ValueError("Could not find <unk> in the vocab.")
if whitespace is None:
raise ValueError("Could not find ▁ (whitespace) in the vocab.")
dic.freeze()
return cls(
pad=pad,
bos=bos,
eos=eos,
unk=unk,
whitespace=whitespace,
terminals=terminals,
whitespace_prefixed_terminals=whitespace_prefixed_terminals,
opening_nts=opening_nts,
closing_nts=closing_nts,
dictionary=dic)
def __repr__(self):
return (
f"SentencePieceVocab(pad={self.pad!r}, bos={self.bos!r},"
f" eos={self.eos!r}, unk={self.unk!r},"
f" whitespace={self.whitespace!r},"
f" terminals={_repr_list(self.terminals)},"
f" opening_nts={_repr_list(self.opening_nts)},"
f" closing_nts={_repr_list(self.closing_nts)},"
f" dictionary={self.dictionary!r})"
)
def is_whitespace(self, id_: TokenID) -> bool:
return self.whitespace == id_
def is_terminal(self, id_: TokenID) -> bool:
return id_ in self.terminals
def is_whitespace_prefixed_terminal(self, id_: TokenID) -> bool:
return id_ in self.whitespace_prefixed_terminals
def is_non_terminal(self, id_: TokenID) -> bool:
return id_ in self.opening_nts or id_ in self.closing_nts
| transformer_grammars-main | transformer_grammars/data/sp_utils.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils to load word-based or SentencePiece vocabs."""
import json
import os.path
from absl import logging
from transformer_grammars.data import dictionary
from transformer_grammars.data import sp_utils
from transformer_grammars.models.masking import utils as masking_utils
def _read_dictionary(dictionary_fname: str):
dic = dictionary.Dict()
with open(dictionary_fname, "r") as f:
dic.load_from_file(f)
dic.freeze()
return dic
def get_dictionary_and_ranges(fname):
"""Returns dictionary and token ranges from a dictionary (or SPM) file."""
def read_token_types(vocab_fname):
with open(vocab_fname, "r") as f:
vocab = sp_utils.SentencePieceVocab.from_vocab_file(f)
token_type_ranges = masking_utils.TokenTypeRanges.from_sentencepiece_vocab(
vocab
)
return vocab.dictionary, token_type_ranges
if fname.endswith(".model"):
vocab_fname = os.path.splitext(fname)[0] + ".vocab"
return read_token_types(vocab_fname)
elif fname.endswith(".vocab"):
logging.warning(
"get_dictionary_and_ranges should be called with the .model file"
)
return read_token_types(fname)
else:
dic = _read_dictionary(fname)
# Load dictionary metadata
dic_metadata_path = os.path.splitext(fname)[0] + ".json"
with open(dic_metadata_path) as f:
dic_metadata = json.load(f)
token_type_ranges = masking_utils.TokenTypeRanges.from_dictionary_metadata(
**dic_metadata
)
logging.info("Using token ranges:\n%s", repr(token_type_ranges))
return dic, token_type_ranges
| transformer_grammars-main | transformer_grammars/data/tokenizer_utils.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Text processing functions."""
from typing import Iterable, Sequence
from absl import logging
from transformer_grammars.data import sentence
from transformer_grammars.data import sp_utils
def postprocess_token_ids(
ids: Sequence[int], vocab: sp_utils.SentencePieceVocab
) -> Iterable[int]:
"""Removes extra-whitespace from token IDs output by SentencePiece."""
new_ids = []
between_words = True # We behave differently depending on whether we are
# between two words, or within a word.
for id_ in ids:
if between_words:
if vocab.is_whitespace(id_):
# Do nothing, stay in the same state.
pass
elif vocab.is_non_terminal(id_):
# Emit the token, stay in the same state.
new_ids.append(id_)
elif vocab.is_terminal(id_):
# Emit the token, but add the potentially missing whitespace.
if vocab.is_whitespace_prefixed_terminal(id_):
new_ids.append(id_)
else:
new_ids.append(vocab.whitespace)
new_ids.append(id_)
between_words = False
else:
logging.warning("Encountered token %d which is neither a terminal or a "
"non-terminal. UNK? Skipping.", id_)
else:
if vocab.is_whitespace(id_):
new_ids.append(id_)
elif vocab.is_terminal(id_):
new_ids.append(id_)
elif vocab.is_non_terminal(id_):
if new_ids[-1] == vocab.whitespace:
# We've already left the previous word, so emit the current token, but
# retrospectively remove the whitespace we left.
new_ids.pop()
new_ids.append(id_)
between_words = True
else:
logging.warning("Encountered token %d which is neither a terminal or a "
"non-terminal. UNK? Skipping.", id_)
return new_ids
def choe_charniak_from_tree(
s: str, has_preterms: bool = True, untyped_closing_terminal: bool = False
):
"""Converts a tree (as a string) to its Choe-Charniak representation."""
sent = sentence.PhraseStructureSentence(s, has_preterms=has_preterms)
return sent.convert_to_choe_charniak(untyped_closing_terminal)
def convert_to_choe_charniak(
input_fname: str,
output_fname: str,
has_preterms: bool = True,
untyped_closing_terminal: bool = False,
):
"""Given a PTB-style input file, linearise trees to a Choe & Charniak format.
If the input line is a tab-separated sequence of values, the tree is assumed
to be the last one, and the preceding values are copied unchanged to the
output.
Args:
input_fname: string for the PTB-style input file name.
output_fname: string for the PTB-style output file name.
has_preterms: whether the input file has preterminals (POS tags).
untyped_closing_terminal: whether the output should have untyped closing NTs
or not.
Returns:
None.
"""
with open(input_fname, "r") as input_file:
with open(output_fname, "w+") as output_cc:
sent_num = 0
for line in input_file:
line = line.rstrip()
if "\t" in line:
*prefix, line = line.split("\t")
else:
prefix = []
choe_charniak = choe_charniak_from_tree(
line,
has_preterms=has_preterms,
untyped_closing_terminal=untyped_closing_terminal,
)
output_line = "\t".join(prefix + [choe_charniak])
output_cc.write(output_line + "\n")
sent_num += 1
logging.info("Processed %d lines from %s", sent_num, input_fname)
| transformer_grammars-main | transformer_grammars/data/text_processing.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Shared constants."""
import enum
import re
PLACEHOLDER_TOKEN = "<XXX>"
RESERVED_WORDS = ("<PAD>", "<s>", "</s>", PLACEHOLDER_TOKEN)
OPENING_NON_TERMINAL_REGEXP = re.compile(r"^\([^ ]+$")
CLOSING_NON_TERMINAL_REGEXP = re.compile(r"^[^ ]+\)$")
UNTYPED_CLOSING_NON_TERMINAL = ")"
PAD = 0
BOS = 1
EOS = 2
PLACEHOLDER = 3
class TreeTransform(enum.Enum):
NONE = "none"
REVERSE = "reverse"
LEFT_BRANCHING = "lb"
RIGHT_BRANCHING = "rb"
| transformer_grammars-main | transformer_grammars/data/constants.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for transformer_grammars.data.transforms."""
import unittest
import nltk
from transformer_grammars.data import transforms
_HUNGRY_CAT = nltk.Tree.fromstring("(S (NP the hungry cat) (VP meows))")
_LOUD_HUNGRY_CAT = nltk.Tree.fromstring(
"(S (NP the hungry cat) (VP meows loudly))"
)
class TransformsTest(unittest.TestCase):
def test_reverse_structure(self):
self.assertEqual(
transforms.reverse_structure(nltk.Tree.fromstring("(A x (B y))")),
nltk.Tree.fromstring("(A (B x) y)"),
)
self.assertEqual(
transforms.reverse_structure(nltk.Tree.fromstring("(A x (B y (C z)))")),
nltk.Tree.fromstring("(A (B (C x) y) z)"),
)
def test_drop_pos_tags(self):
self.assertEqual(
transforms.drop_pos_tags(nltk.Tree.fromstring("(A (B x))")),
nltk.Tree.fromstring("(A x)"),
)
def test_get_terminals(self):
self.assertEqual(
list(transforms.get_terminals(_HUNGRY_CAT)),
["the", "hungry", "cat", "meows"],
)
def test_make_left_branching(self):
self.assertEqual(
transforms.make_left_branching(_HUNGRY_CAT),
nltk.Tree.fromstring("(S (NP (VP the hungry) cat) meows)"),
)
self.assertEqual(
transforms.make_left_branching(_LOUD_HUNGRY_CAT),
nltk.Tree.fromstring("(S (NP (VP the hungry) cat) meows loudly)"),
)
def test_make_right_branching(self):
self.assertEqual(
transforms.make_right_branching(_HUNGRY_CAT),
nltk.Tree.fromstring("(S the (NP hungry (VP cat meows)))"),
)
self.assertEqual(
transforms.make_right_branching(_LOUD_HUNGRY_CAT),
nltk.Tree.fromstring("(S the hungry (NP cat (VP meows loudly)))"),
)
| transformer_grammars-main | transformer_grammars/data/transforms_test.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| transformer_grammars-main | transformer_grammars/data/__init__.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple text-based dataset."""
import functools
from typing import Dict, List, Optional, Tuple
import tensorflow.compat.v1 as tf
import tree
BOS_ID = 1
EOS_ID = 2
PREFETCH_COUNT = 32768
def _ints_from_string(add_bos, add_eos, s):
seq = tf.strings.to_number(tf.strings.split(s, ",").values, tf.int32)
parts = []
if add_bos:
parts.append(tf.constant([BOS_ID], shape=(1,), dtype=tf.int32))
parts.append(seq)
if add_eos:
parts.append(tf.constant([EOS_ID], shape=(1,), dtype=tf.int32))
return tf.concat(parts, axis=0)
def _parts_from_tsv_string(fields_spec, s):
d = {}
for name, idx in fields_spec.items():
values = tf.strings.split(s, "\t").values
d[name] = values[idx]
return d
def _repeat_and_shuffle(
ds,
*,
num_epochs,
shuffle,
shuffle_buffer,
sample_without_replacement,
seed,
prefetch_count,
):
"""Apply shuffling to a dataset."""
if shuffle and sample_without_replacement:
ds = ds.shuffle(shuffle_buffer, reshuffle_each_iteration=True, seed=seed)
ds = ds.repeat(num_epochs)
if shuffle and not sample_without_replacement:
ds = ds.shuffle(shuffle_buffer, seed=seed)
ds = ds.prefetch(prefetch_count)
return ds
def _get_shard_from_list(
l: List[str], shard_idx: int, num_shards: int
) -> List[str]:
"""Returns a strided slice from a list."""
if not 0 <= shard_idx < num_shards:
raise ValueError(
f"The shard index {shard_idx:d} is not compatible with the number "
f"of shards {num_shards:d}."
)
if num_shards <= 0:
raise ValueError(
f"The number of shards {num_shards:d} must be positive."
)
if num_shards > len(l):
raise ValueError(
f"Cannot have more shards ({num_shards:d}) than items to shard "
f"({len(l):d})."
)
return sorted(l)[shard_idx::num_shards]
def text_dataset(
*,
filenames: List[str],
shuffle: bool,
shuffle_buffer: Optional[int],
return_key: bool,
seed: Optional[int] = None,
shard: Optional[Tuple[int, int]] = None,
) -> tf.data.Dataset:
"""Returns a raw text tf.data.Dataset, with the usual options."""
if len(filenames) >= 2 and return_key and (shuffle or shuffle_buffer):
raise RuntimeError(
"return_key=True with shuffling is not supported for "
"text datasets with more than one input file."
)
if return_key:
num_parallel_reads = 1
else:
num_parallel_reads = 8
if shard is not None:
(shard_idx, num_shards) = shard
filenames = _get_shard_from_list(filenames, shard_idx, num_shards)
filenames_ds = tf.data.Dataset.from_tensor_slices(filenames)
if shuffle and len(filenames) >= 2 and not return_key:
# Shuffle filenames.
filenames_ds = filenames_ds.shuffle(
buffer_size=len(filenames), reshuffle_each_iteration=True, seed=seed
)
return tf.data.TextLineDataset(
filenames=filenames_ds,
buffer_size=(8 * 1024 * 1024), # 8 MB
num_parallel_reads=num_parallel_reads,
)
class PreEncodedTextDataset:
"""Dataset of pre-encoded tokens.
In the single field case, each line is a sequence of comma-separated
integers:
3,4,5
8,19,38
In the multiple fields cases, each line is a tab-separated sequence of
sequences of comma-separated integers:
3,4,5<TAB>3,4,6
8,19,38<TAB>8,20,38
"""
def __init__(
self,
filename: str,
num_samples: Optional[int],
add_bos: bool,
add_eos: bool,
multiple_fields: Optional[Dict[str, int]] = None,
prefetch_count: int = PREFETCH_COUNT,
max_seqlen: Optional[int] = None,
):
"""Initialises the TSVDataset.
Args:
filename: Name (or sharded filename, or globbing pattern) of the file
constituting the dataset, i.e. the following are accepted: "foo.txt",
"[email protected]" "/path/to/*.txt"
num_samples: Total number of samples (pairs) in the dataset. Only required
when the dataset is used for validation.
add_bos: Prepend a beginning-of-sentence token (1) to each sequence.
add_eos: Append an end-of-sentence token (2) to each sequence.
multiple_fields: When not None, dict of field names in the output, mapping
to field numbers in the input.
prefetch_count: Number of items to pre-fetch from dataset.
max_seqlen: Optional maximum sequence length. When set, only sequences
strictly shorter than the maximum are kept.
"""
self._num_samples = num_samples
self._add_bos = add_bos
self._add_eos = add_eos
self._multiple_fields = multiple_fields
self._prefetch_count = prefetch_count
self._max_seqlen = max_seqlen
filenames = filename.split(",")
self._filenames = filenames
if not self._filenames:
raise ValueError(f"No filenames corresponding to {filename!s}.")
@property
def num_examples(self):
"""Number of examples."""
return self._num_samples
def raw_dataset(
self,
*,
shuffle: bool,
shuffle_buffer: Optional[int],
sample_without_replacement: bool,
num_epochs: Optional[int] = None,
seed: Optional[int] = None,
) -> tf.data.Dataset:
"""Returns a raw tf.data.Dataset.
In the single field case (self._multiple_fields is None), the dataset
returned contains unbatched, unpadded, sequences of integers of dynamic
shapes.
In the multiple field case (self._multiple_fields is not None), the
dataset returned contains dicts with unbatched, unpadded, sequences of
integers of dynamic shapes as values, with the values of
self._multiple_fields as keys, such that if output is a dict in the returned
dataset, output[self._multiple_fields[0]] is the sequence in the first
position in the input file, etc.
Args:
shuffle: Whether the dataset is shuffled (True) or not (False).
shuffle_buffer: If applicable, size of the shuffle buffer.
sample_without_replacement: Whether the dataset is shuffled without
replacement (True) or not (False).
num_epochs: If not None, number of epochs to repeat the dataset for.
Otherwise, repeat infinitely.
seed: If not None, seed to use for shuffling.
Returns:
Dataset described previously.
"""
ds = text_dataset(
filenames=self._filenames,
shuffle=shuffle,
shuffle_buffer=shuffle_buffer,
return_key=False,
seed=seed,
)
if self._multiple_fields:
ds = ds.map(
functools.partial(_parts_from_tsv_string, self._multiple_fields)
)
ds = ds.map(
functools.partial(
tree.map_structure,
functools.partial(_ints_from_string, self._add_bos, self._add_eos),
)
)
if self._max_seqlen:
def _keep(item):
return functools.reduce(
tf.logical_and,
[tf.shape(x)[0] < self._max_seqlen for x in tree.flatten(item)],
)
ds = ds.filter(_keep)
ds = ds.cache()
ds = _repeat_and_shuffle(
ds,
num_epochs=num_epochs,
shuffle=shuffle,
shuffle_buffer=shuffle_buffer,
sample_without_replacement=sample_without_replacement,
seed=seed,
prefetch_count=self._prefetch_count,
)
return ds
| transformer_grammars-main | transformer_grammars/data/text_dataset.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines a data structure for a sentence.
This class encapsulates various aspects of a sentence:
i. The original string sequence,
ii. The lowercased string sequence, and
iii. The POS tag of each token in the sentence, and
iv. the depth-first traversal of the tree.
UNK-ification is handled separately by the Unkifier class.
"""
from nltk.tree import Tree
class TaggedSentence(object):
"""A list of words with their POS tags."""
def __init__(self, word_tag_pairs, words_dict, unkifier):
"""Initialize from a list of word/tag pairs.
Args:
word_tag_pairs: see docs for nltk.tree.pos()
words_dict: instance of Dict
unkifier: instance of Unkifier
Returns:
None
Raises:
No exceptions.
"""
self.wtp = word_tag_pairs
self._length = len(self.wtp)
# is_test is True, because we don't want to mutate the unkifier's vocabulary
self.unk_toks_str = [unkifier.unkify(w, True) for (w, _) in self.wtp]
self.unk_toks = [words_dict[tok] for tok in self.unk_toks_str]
def __len__(self):
return self._length
def __str__(self):
return " ".join([w + "/" + t for (w, t) in self.wtp])
class PhraseStructureSentence(object):
"""The Sentence class encapsulates various useful aspects of a sentence."""
def __init__(self, sent_line, has_preterms=True):
"""Initialize the Sentence instance from its phrase-structure trees.
UNK-ification is handled at the TopDownRnngOracle class and not here.
Args:
sent_line: tree string for this sentence, e.g. "(S (NP x) (VP y))".
has_preterms: whether or not preterminal symbols exist on the tree.
Returns:
None
Raises:
ValueError: failure to parse the string to a tree data structure.
"""
self._sent_tree = Tree.fromstring(sent_line.strip())
self.has_preterms = has_preterms
# Cached DFS traversal
self._dfs_traversal = None
# Get the tags and token strings from the constituency tree.
self.tags, self.raw_tokens = self.get_tags_tokens()
# Lowercased tokens are used for pretrained word embedding lookup.
self.lower_tokens = [token.lower() for token in self.raw_tokens]
# Nonterminals are used to obtain the stack representation in the RNNG.
self.nonterminals = self.get_nonterminals()
def get_tags_tokens(self):
"""Given a constituency tree, get all tags (preterminals) and terminals.
Args:
Returns:
A tuple of (tags, tokens), where "tags" is a list of tags and "tokens"
is a list of tokens, and by definition len(tags) == len(tokens)
For instance, if self.sent_tree = "(S (NP The hungry cat) (VP
meows))",
then the function would return (['DET', 'JJ', 'NNS', 'VBZ'],
['The', 'hungry', 'cat', 'meows'])
Raises:
AssertionError: The number of terminals and preterminals do not match.
"""
tags = []
tokens = []
for sym_type, symbol in self.dfs_traverse():
if "TERM" not in sym_type:
continue
curr_list = tags if sym_type == "PRETERM" else tokens
curr_list.append(symbol)
if self.has_preterms and len(tags) != len(tokens):
raise AssertionError("Different number of tags and tokens.")
return (tags, tokens) if self.has_preterms else (None, tokens)
def get_nonterminals(self):
"""Given a constituency tree, get all nonterminal symbols.
Args:
Returns:
A list of nonterminal symbols that occur in the sentence.
If self.sent_tree = "(S (NP The hungry cat) (VP meows))",
then the function would return ['S', 'NP', 'VP'].
"""
nonterminals = []
for sym_type, symbol in self.dfs_traverse():
if sym_type != "NT":
continue
nonterminals.append(symbol)
return nonterminals
def _dfs_traverse_recursive(self, tree, output):
"""A recursive function that actually does the depth-first traversal.
Args:
tree: the nltk tree object of the sentence.
output: the temporary output (reused in the recursive call).
Returns:
The list of symbols in the current subtree.
"""
if isinstance(tree, (str)): # Terminal symbol
output.append(("TERM", tree))
else: # Nonterminal or preterminal.
sym_type = "NT"
if tree.height() == 2 and self.has_preterms:
sym_type = "PRETERM"
output.append((sym_type, tree.label()))
if sym_type == "PRETERM": # Preterminals can only have one child.
assert len(tree) == 1
for subtree in tree:
self._dfs_traverse_recursive(subtree, output)
if sym_type == "NT":
output.append(("REDUCE", tree.label()))
return output
def dfs_traverse(self):
"""A generator function that does a depth-first traversal over the tree.
Args:
Yields:
A generator that contains the next symbols in the traversal.
Given "(S (NP (DET The) (JJ hungry) (NN cat)) (VP (VBZ meows)))",
and has_preterms=True, this generator function would return:
-------------------------------------------------------------------------
("NT", "S"),
("NT", "NP"),
("PRETERM", "DET"),
("TERM", "The"),
("PRETERM", "JJ"),
("TERM", "hungry"),
("PRETERM", "NN"),
("TERM", "cat"),
("REDUCE", "NP"),
("NT", "VP"),
("PRETERM", "VBZ"),
("TERM", "meows"),
("REDUCE", "VP"),
("REDUCE", "S")
-------------------------------------------------------------------------
If has_preterms=False, then all the "PRETERM" entries will be ignored.
"""
# If dfs_traverse() has already been called, the result is available in
# self._dfs_traversal.
if self._dfs_traversal is not None:
yield from self._dfs_traversal
else:
# Split the sentence to iterate over the symbols.
output = []
self._dfs_traverse_recursive(self._sent_tree, output)
self._dfs_traversal = output # Cache the value
for sym in output:
yield sym
def _lc_traverse_recursive(self, tree, output):
"""A recursive function that actually does the left-corner traversal.
Args:
tree: the nltk tree object of the sentence.
output: the temporary output (reused in the recursive call).
Returns:
The list of symbols in the current subtree.
"""
if isinstance(tree, (str)): # Terminal symbol
output.append(("TERM", tree))
else: # Nonterminal or preterminal.
sym_type = "NT"
if tree.height() == 2 and self.has_preterms:
sym_type = "PRETERM"
if sym_type == "NT":
self._lc_traverse_recursive(tree[0], output)
output.append((sym_type, tree.label()))
if sym_type == "PRETERM": # Preterminals can only have one child.
assert len(tree) == 1
self._lc_traverse_recursive(tree[0], output)
else:
for subtree in tree[1:]:
self._lc_traverse_recursive(subtree, output)
output.append(("REDUCE", tree.label()))
return output
def lc_traverse(self):
"""A generator function that does a left-corner traversal over the tree.
Args:
Yields:
A generator that contains the next symbols in the traversal.
Given "(S (NP (DET The) (JJ hungry) (NN cat)) (VP (VBZ meows)))",
and has_preterms=True, this generator function would return:
-------------------------------------------------------------------------
("PRETERM", "DET"),
("TERM", "The"),
("NT", "NP"),
("PRETERM", "JJ"),
("TERM", "hungry"),
("PRETERM", "NN"),
("TERM", "cat"),
("REDUCE", "NP"),
("NT", "S"),
("PRETERM", "VBZ"),
("TERM", "meows"),
("NT", "VP"),
("REDUCE", "VP"),
("REDUCE", "S")
-------------------------------------------------------------------------
If has_preterms=False, then all the "PRETERM" entries will be ignored.
"""
# Split the sentence to iterate over the symbols.
output = []
self._lc_traverse_recursive(self._sent_tree, output)
for sym in output:
yield sym
def _bu_traverse_recursive(self, tree, output):
"""A recursive function that actually does the bottom-up traversal.
Args:
tree: the nltk tree object of the sentence.
output: the temporary output (reused in the recursive call).
Returns:
The list of symbols in the current subtree.
"""
if isinstance(tree, (str)): # Terminal symbol
output.append(("TERM", tree))
else: # Nonterminal or preterminal.
sym_type = "NT"
if tree.height() == 2 and self.has_preterms:
sym_type = "PRETERM"
if sym_type == "NT":
for subtree in tree:
self._bu_traverse_recursive(subtree, output)
output.append(("REDUCE", (len(tree), tree.label())))
else: # Preterminals can only have one child.
assert len(tree) == 1 and sym_type == "PRETERM"
output.append((sym_type, tree.label()))
self._bu_traverse_recursive(tree[0], output)
return output
def bu_traverse(self):
"""A generator function that does a bottom-up traversal over the tree.
Args:
Yields:
A generator that contains the next symbols in the traversal.
Given "(S (NP (DET The) (JJ hungry) (NN cat)) (VP (VBZ meows)))",
and has_preterms=True, this generator function would return:
-------------------------------------------------------------------------
("PRETERM", "DET"),
("TERM", "The"),
("PRETERM", "JJ"),
("TERM", "hungry"),
("PRETERM", "NN"),
("TERM", "cat"),
("REDUCE", (3, "NP")),
("PRETERM", "VBZ"),
("TERM", "meows"),
("REDUCE", (1, "VP")),
("REDUCE", (2, "S"))
("STOP", None)
-------------------------------------------------------------------------
If has_preterms=False, then all the "PRETERM" entries will be ignored.
"""
# Split the sentence to iterate over the symbols.
output = []
self._bu_traverse_recursive(self._sent_tree, output)
output.append(("STOP", None))
for sym in output:
yield sym
def convert_to_choe_charniak(self, untyped_closing_terminal=False):
"""Given a tree, convert to a Choe & Charniak format.
Ignores preterminals.
Args:
untyped_closing_terminal: Use untyped closing terminals.
Returns:
A string with the tree in the Choe & Charniak format.
Raises:
ValueError: Unrecognised symbol type beyond NT, REDUCE, TERM, and
PRETERM.
"""
output = []
for sym_type, symbol in self.dfs_traverse():
if sym_type == "PRETERM":
continue
if sym_type == "NT":
output.append("(%s" % symbol)
elif sym_type == "REDUCE" and not untyped_closing_terminal:
output.append("%s)" % symbol)
elif sym_type == "REDUCE" and untyped_closing_terminal:
output.append("X)")
elif sym_type == "TERM":
output.append(symbol)
else:
raise ValueError("Unrecognised symbol type.")
return " ".join(output)
| transformer_grammars-main | transformer_grammars/data/sentence.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data preprocessing between the dataset and the C++ masking rules."""
import functools
import itertools
import operator
import queue
import threading
import time
from typing import Callable, Dict, Generator, Sequence
from absl import logging
import numpy as np
from transformer_grammars.models.masking import masking_types as types
from transformer_grammars.models.masking import utils as masking_utils
import tree
Chunk = types.Chunk
def lshift(arr):
assert len(arr.shape) == 1
arr = arr[1:]
return np.pad(arr, [(0, 1)], mode="constant")
def compute_inputs_and_labels(
inp: Dict[str, np.ndarray], use_untyped_closing_nt_for_labels: bool = False
) -> Dict[str, np.ndarray]:
"""Computes a sequence of observations and a sequence of labels.
Args:
inp: Input dict, containing a NumPy sequence of shape [T] associated to the
key 'seq', and possibly other keys/values.
use_untyped_closing_nt_for_labels: Whether typed (False) or untyped (True)
closing non-terminals are to appear in the labels.
Returns:
Dict with keys 'inputs' and 'labels'.
"""
if use_untyped_closing_nt_for_labels:
raise NotImplementedError
# Be careful not to mutate the input, as the same objects may belong to an
# iterator on which itertools.tee is applied.
inp_ = inp.copy()
for_observation = inp_.pop("for_observation")
for_target = inp_.pop("for_target")
return dict(inputs=for_observation, labels=lshift(for_target), **inp_)
def pad_to_multiple(inp: np.ndarray, seqlen: int) -> np.ndarray:
if len(inp.shape) >= 1:
# inp has shape [T, ...]
current_len = inp.shape[0]
# a % b is the remainder of the Euclidean division of a by b, even if a is
# negative, so it always positive.
padding = -current_len % seqlen
return np.pad(
inp, [(0, padding)] + [(0, 0)] * (len(inp.shape) - 1), "constant"
)
else:
return inp
def compute_token_types(
inp: Dict[str, np.ndarray], ranges: masking_utils.TokenTypeRanges
) -> Dict[str, np.ndarray]:
"""Computes token types using a dictionary."""
for key in ("inputs", "labels"):
if ranges is not None:
# Only ever happens for terminals on PTB
# For CC, we have explicit ranges available, for datasets tokenised with
# SentencePiece, we derive ranges from the .vocab file, so this is very
# much a corner case.
inp[f"{key}_ttypes"] = ranges.token_type_from_token(
inp[key], use_jax=False
)
else:
inp[f"{key}_ttypes"] = np.zeros_like(inp[key])
return inp
def chunks_generator(it, ranges, maskrules) -> Generator[Chunk, None, None]:
"""Yields chunks to be passed to model from enumerated sequences iterator."""
for tpl in it:
item_idx, item = tpl
if not isinstance(item, dict):
item = dict(for_observation=item, for_target=item)
item = compute_inputs_and_labels(item)
item = compute_token_types(item, ranges)
for chunk in maskrules.chunks_for_sequence(
item["inputs"],
item["inputs_ttypes"],
item["labels"],
item["labels_ttypes"],
):
yield Chunk(np.array(item_idx, dtype=np.int32), *chunk)
def _batches_generator(
chunks_gen_callable: Callable[[], Generator[Chunk, None, None]],
shape_prefix: Sequence[int]
) -> Generator[Chunk, None, None]:
"""Yields batches (batched chunks)."""
batch_size = functools.reduce(operator.mul, shape_prefix, 1)
zipped_it = itertools.zip_longest(
*[chunks_gen_callable() for _ in range(batch_size)], fillvalue=None
)
def safe_zipped_gen():
zeroed_example = None
for arrays in zipped_it:
# The worker threads may pass an exception instead of a batch element for
# it to be caught in the main thread.
for arr in arrays:
if isinstance(arr, Exception):
raise arr
if zeroed_example is None:
not_none_indices = [
i for (i, arr) in enumerate(arrays) if arr is not None
]
assert not_none_indices
not_none_idx = not_none_indices[0]
not_none_example = arrays[not_none_idx]
zeroed_example = tree.map_structure(np.zeros_like, not_none_example)
zeroed_example = zeroed_example._replace(
seq_idx=zeroed_example.seq_idx - 1
)
arrays = [arr if arr is not None else zeroed_example for arr in arrays]
assert all(arr is not None for arr in arrays)
yield arrays
def stack_and_reshape(arrays):
arr = np.stack(arrays)
return arr.reshape(shape_prefix + tuple(arr.shape[1:]))
for zipped in safe_zipped_gen():
yield tree.map_structure(lambda *args: stack_and_reshape(args), *zipped)
def get_chunks_from_dataset(
it, maskrules, ranges, shape_prefix, multithread, use_monitor_thread=False
) -> Generator[Chunk, None, None]:
"""Generates batches of chunks from sequences from a dataset.
In multithreaded mode, this creates as many threads as the batch size. A batch
is composed of elements, each preprocessed by a thread.
Args:
it: Iterator over raw dataset elements (either unbatched, unpadded sequences
of ints, or dicts thereof).
maskrules: Masking rules to use.
ranges: Token types ranges.
shape_prefix: Prefix of the shape that comes before the time dimension.
multithread: Whether to use multithreaded mode or not.
use_monitor_thread: Whether to use a monitor thread or not (periodically
logging the number of elements in the queues, useful for debugging).
Yields:
Chunks.
"""
it = enumerate(it)
if not multithread:
chunks_gen_callable = lambda: chunks_generator(it, ranges, maskrules)
yield from _batches_generator(chunks_gen_callable, shape_prefix)
else:
batch_size = functools.reduce(operator.mul, shape_prefix, 1)
in_queue = queue.Queue(maxsize=100)
queues = [queue.Queue(maxsize=5) for _ in range(batch_size)]
def producer():
for x in it:
in_queue.put(x)
def worker(i):
try:
subit = (in_queue.get() for _ in itertools.count())
for chunk in chunks_generator(subit, ranges, maskrules):
queues[i].put(chunk)
except Exception as exc: # pylint: disable=broad-except
queues[i].put(exc)
threading.Thread(target=producer, daemon=True).start()
for i in range(batch_size):
threading.Thread(target=worker, args=(i,), daemon=True).start()
def reader_gen(i):
while True:
yield queues[i].get()
if use_monitor_thread:
def monitor():
while True:
logging.info("in_queue: %d", in_queue.qsize())
for i, q in enumerate(queues):
logging.info("queue[%d]: %d", i, q.qsize())
time.sleep(5)
threading.Thread(target=monitor, daemon=True).start()
reader_gens = [reader_gen(i) for i in range(batch_size)]
reader_callables = reader_gens.pop
yield from _batches_generator(reader_callables, shape_prefix)
| transformer_grammars-main | transformer_grammars/data/preprocessing.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines a dictionary data structure.
The dictionary data structure is a bidirectional one that converts:
i. Word types to embedding indices, and
ii. Embedding indices to word types.
The dictionary can be evoked like a native Python dictionary, e.g.
dict['cat'] = 5 and dict[5] = 'cat'.
If a word type does not exist in the dictionary, a new index will be created.
Functions like UNK-ification and freezing are handled
separately by the Unkifier class.
"""
import collections
from absl import logging
import numpy as np
class Dict(object):
"""Dictionary class to convert word types to embedding indices."""
def __init__(self):
"""Initialize the dictionary object.
Args:
Returns:
None.
"""
# Map string to indices.
self.map = collections.defaultdict(lambda: len(self.map))
# Map indices to string using a list of words in the dictionary.
self.map_rev = []
# Boolean to to indicate if dictionary is frozen.
self.frozen = False
def __len__(self):
"""Obtain the size (number of words) in the current dictionary.
Args:
Returns:
An integer >= 0.
"""
return len(self.map)
def __contains__(self, word):
"""Check whether the dictionary contains a particular word.
Args:
word: A string that may or may not exist in the dictionary.
Returns:
A boolean that specifies whether the word exists in the dictionary.
"""
return word in self.map
def freeze(self):
"""Freeze the dictionary to prevent conversion of new word types.
Args:
Returns:
None.
"""
self.frozen = True
def __getitem__(self, item):
"""Convert a word to its index, or an index to its string word form.
Args:
item: either a string word type or an integer embedding index.
Returns:
either the corresponding embedding index or the string form of the
item.
Raises:
IndexError: converting an out-of-bounds embedding index.
ValueError: wrong argument type or converting a new type when frozen.
"""
if isinstance(item, str):
if self.frozen and item not in self.map:
raise ValueError(f"Converting a new type: {item} when frozen.")
# Retrieve item's embedding index (if existent) or create a new one.
emb_idx = self.map[item]
# Populate the reverse dictionary if necessary.
if emb_idx >= len(self.map_rev):
self.map_rev.append(item)
# Assert that the we can retrieve the correct word given the index.
assert self.map_rev[emb_idx] == item
return emb_idx
elif isinstance(item, (int, np.integer)):
# item is an int, retrieve its string word form.
if item < 0:
raise IndexError("Indices in the dictionary are >= 0")
return self.map_rev[item]
else:
raise ValueError("The passed argument is neither string nor integer.")
def clear(self):
"""Clear the internal dictionary elements.
Args:
Returns:
None.
"""
self.map.clear()
self.map = collections.defaultdict(lambda: len(self.map))
def items(self):
"""Get the iterator over the (key, value) pairs in the Dict object.
Args:
Returns:
An iterator over (key, value) pairs.
"""
return self.map.items()
def values(self):
"""Get the iterator over the (non-unique) values in the Dict object.
Args:
Returns:
An iterator over each value entry in the Dict object.
"""
return self.map.values()
def load_from_file(self, file_obj):
"""Load vocabulary from file.
Args:
file_obj: A file object that represents the vocabulary file.
Returns:
None (the dictionary is populated).
"""
lines_ctr = 0
for line in file_obj:
lines_ctr += 1
word = line.rstrip()
_ = self[word]
logging.info("Read %s lines from the file", lines_ctr)
| transformer_grammars-main | transformer_grammars/data/dictionary.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Config for language modelling on example data.
This is an example to quickly try the training code -- it does not train the
model to convergence.
"""
import ml_collections as dm_collections
def get_config():
"""Config for training."""
return dm_collections.ConfigDict(
dict(
sentencepiece_vocab_filename="spm/spm.vocab",
# dictionary_metadata_filename='word_based.json',
training=dict(
num_steps=1_000, # Only 1k steps for the example.
clip_grad_norm=3.0,
batch_size=64,
dataset=dict(
name="PreEncodedTextDataset",
kwargs=dict(
filename="data/train.csv",
num_samples=None,
add_bos=True,
add_eos=False, # Not needed for Choe-Charniak.
),
),
optimizer=dict(
name="adam",
kwargs=dict(
b1=0.9,
b2=0.999,
),
),
lr_schedule=dict(
name="linear_warmup_then_cosine_anneal",
kwargs=dict(
start_lr=1e-7,
min_lr=3e-7,
max_lr=1.5e-4,
warmup_steps=8000,
cosine_cycle_length=100_000,
),
),
),
model=dict(
d_model=512,
num_layers=6,
num_heads=8,
ffw_hidden_size=2048,
embedding_dropout=0.1,
core_dropout=0.1,
core_output_dropout=0.1,
sequence_length=256,
memory_length=256,
tied_input_output_embeddings=True,
relative_position_embeddings=1,
tied_layer_weights=0,
# TG settings
extra_attention_mask_name="stack_compose_double_closing_nt",
extra_attention_mask_kwargs=dict(
relative_pos="delta_depth",
# Do not use different STACK/COMPOSE attention weights.
use_different_attn_fns=0,
# Transparency probability
transparency_prob=0.0,
# Smart memory
gather_into_new_memory=1,
# Depth below or at which the node is transparent
# -1 means that it's never transparent.
# <s> has depth 0, (DOC depth 1, so for the top level (S
# to be transparent, we need this to be set to 2
transparency_depth_threshold=-1,
),
min_relative_position=-1,
max_relative_position=62, # So 64 positions possible
# Layer-hybrid
num_unrestricted_layers=0,
# Head-hybrid
num_unrestricted_heads=None,
),
evaluation=dict(
interval_steps=500,
batch_size=8,
sequence_length=256,
dataset=dict(
name="PreEncodedTextDataset",
kwargs=dict(
filename="data/valid.csv",
num_samples=None,
add_bos=True,
add_eos=False, # Not needed for Choe-Charniak.
),
),
),
logging=dict(
interval_steps=10,
),
checkpointing=dict(
interval_steps=500,
),
)
)
| transformer_grammars-main | example/config.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example config. Copy into a new file for your training run.
In this config, no TG-specific option is enabled, and the model is effectively
a Transformer-XL.
Remember to set the correct training and validation filenames in the config as
required (search for <<<).
"""
import ml_collections as dm_collections
def get_config(debug=False):
"""Return config object for training."""
def m(default_value, debug_value):
"""Switcher that returns the default or debug value based on debug flag."""
return debug_value if debug else default_value
return dm_collections.ConfigDict(
dict(
sentencepiece_vocab_filename="<<< SP .vocab filename >>>",
training=dict(
# Number of training steps (i.e. number of gradient updates,
# i.e. number of training batches drawn)
num_steps=100_000,
# Gradient clipping
clip_grad_norm=3.0,
# Global (not per-device) batch size
batch_size=64,
dataset=dict(
name="PreEncodedTextDataset",
kwargs=dict(
filename="<<< training .csv filename >>>",
num_samples=None,
add_bos=True,
add_eos=False, # Not needed for Choe-Charniak.
),
),
optimizer=dict(
name="adam",
kwargs=dict(
b1=0.9,
b2=0.999,
),
),
# Learning rate schedule.
lr_schedule=dict(
name="linear_warmup_then_cosine_anneal",
kwargs=dict(
start_lr=1e-7,
min_lr=3e-7,
max_lr=1.5e-4,
warmup_steps=8000,
cosine_cycle_length=100_000,
),
),
),
model=dict(
vocab_size=32768,
d_model=m(1024, 8),
num_layers=m(16, 1),
num_heads=m(8, 8),
ffw_hidden_size=m(4096, 8),
embedding_dropout=0.1,
core_dropout=0.1,
core_output_dropout=0.1,
sequence_length=m(256, 128),
memory_length=m(256, 128),
tied_input_output_embeddings=True,
relative_position_embeddings=1,
tied_layer_weights=0,
),
evaluation=dict(
interval_steps=500,
batch_size=8,
sequence_length=256,
dataset=dict(
name="PreEncodedTextDataset",
kwargs=dict(
filename="<<< validation .csv filename >>>",
num_samples=None,
add_bos=True,
add_eos=False, # Not needed for Choe-Charniak.
),
),
),
logging=dict(
interval_steps=10,
),
checkpointing=dict(
interval_steps=500,
),
)
)
| transformer_grammars-main | configs/config_txl.py |
# Copyright 2021-2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example config. Copy into a new file for your training run.
In this config, TG-specific mode of operation is enabled, see the
extra_attention_mask_kwargs section.
Remember to set the correct training and validation filenames in the config as
required (search for <<<).
"""
import ml_collections as dm_collections
def get_config(debug=False):
"""Return config object for training."""
def m(default_value, debug_value):
"""Switcher that returns the default or debug value based on debug flag."""
return debug_value if debug else default_value
return dm_collections.ConfigDict(
dict(
sentencepiece_vocab_filename="<<< SP .vocab filename >>>",
training=dict(
# Number of training steps (i.e. number of gradient updates,
# i.e. number of training batches drawn)
num_steps=100_000,
# Gradient clipping
clip_grad_norm=3.0,
# Global (not per-device) batch size
batch_size=64,
dataset=dict(
name="PreEncodedTextDataset",
kwargs=dict(
filename="<<< training .csv filename >>>",
num_samples=None,
add_bos=True,
add_eos=False, # Not needed for Choe-Charniak.
),
),
optimizer=dict(
name="adam",
kwargs=dict(
b1=0.9,
b2=0.999,
),
),
# Learning rate schedule.
lr_schedule=dict(
name="linear_warmup_then_cosine_anneal",
kwargs=dict(
start_lr=1e-7,
min_lr=3e-7,
max_lr=1.5e-4,
warmup_steps=8000,
cosine_cycle_length=100_000,
),
),
),
model=dict(
vocab_size=32768,
d_model=m(1024, 8),
num_layers=m(16, 1),
num_heads=m(8, 8),
ffw_hidden_size=m(4096, 8),
embedding_dropout=0.1,
core_dropout=0.1,
core_output_dropout=0.1,
sequence_length=m(256, 128),
memory_length=m(256, 128),
tied_input_output_embeddings=True,
relative_position_embeddings=1,
tied_layer_weights=0,
# TG settings
extra_attention_mask_name="stack_compose_double_closing_nt",
extra_attention_mask_kwargs=dict(
relative_pos="delta_depth",
# Do not use different STACK/COMPOSE attention weights.
use_different_attn_fns=0,
# Transparency probability
transparency_prob=0.0,
# Smart memory
gather_into_new_memory=1,
# Depth below or at which the node is transparent
# -1 means that it's never transparent.
# <s> has depth 0, (DOC depth 1, so for the top level (S
# to be transparent, we need this to be set to 2
transparency_depth_threshold=-1,
),
min_relative_position=-1,
max_relative_position=62, # So 64 positions possible
# Layer-hybrid
num_unrestricted_layers=0,
# Head-hybrid
num_unrestricted_heads=None,
),
evaluation=dict(
interval_steps=500,
batch_size=8,
sequence_length=256,
dataset=dict(
name="PreEncodedTextDataset",
kwargs=dict(
filename="<<< validation .csv filename >>>",
num_samples=None,
add_bos=True,
add_eos=False, # Not needed for Choe-Charniak.
),
),
),
logging=dict(
interval_steps=10,
),
checkpointing=dict(
interval_steps=500,
),
)
)
| transformer_grammars-main | configs/config_tg.py |
# Copyright 2018 The Interval Bound Propagation Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Setup for pip package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = ['six', 'absl-py', 'numpy']
EXTRA_PACKAGES = {
'tensorflow': ['tensorflow>=1.8.0'],
'tensorflow with gpu': ['tensorflow-gpu>=1.8.0'],
'sonnet': ['dm-sonnet>=1.26'],
'sonnet with gpu': ['dm-sonnet-gpu>=1.26'],
}
def ibp_test_suite():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('interval_bound_propagation/tests',
pattern='*_test.py')
return test_suite
setup(
name='interval_bound_propagation',
version='1.1',
description='A library to train verifiably robust neural networks.',
url='https://github.com/deepmind/interval_bound_propagation',
author='DeepMind',
author_email='[email protected]',
# Contained modules and scripts.
packages=find_packages(),
install_requires=REQUIRED_PACKAGES,
extras_require=EXTRA_PACKAGES,
platforms=['any'],
license='Apache 2.0',
test_suite='setup.ibp_test_suite',
)
| interval-bound-propagation-master | setup.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trains a verifiable model on Mnist or CIFAR-10."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from absl import logging
import interval_bound_propagation as ibp
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
flags.DEFINE_enum('dataset', 'mnist', ['mnist', 'cifar10'],
'Dataset (either "mnist" or "cifar10").')
flags.DEFINE_enum('model', 'tiny', ['tiny', 'small', 'medium', 'large'],
'Model size.')
flags.DEFINE_string('output_dir', '/tmp/ibp_model', 'Output directory.')
# Options.
flags.DEFINE_integer('steps', 60001, 'Number of steps in total.')
flags.DEFINE_integer('test_every_n', 2000,
'Number of steps between testing iterations.')
flags.DEFINE_integer('warmup_steps', 2000, 'Number of warm-up steps.')
flags.DEFINE_integer('rampup_steps', 10000, 'Number of ramp-up steps.')
flags.DEFINE_integer('batch_size', 200, 'Batch size.')
flags.DEFINE_float('epsilon', .3, 'Target epsilon.')
flags.DEFINE_float('epsilon_train', .33, 'Train epsilon.')
flags.DEFINE_string('learning_rate', '1e-3,1e-4@15000,1e-5@25000',
'Learning rate schedule of the form: '
'initial_learning_rate[,learning:steps]*. E.g., "1e-3" or '
'"1e-3,1e-4@15000,1e-5@25000".')
flags.DEFINE_float('nominal_xent_init', 1.,
'Initial weight for the nominal cross-entropy.')
flags.DEFINE_float('nominal_xent_final', .5,
'Final weight for the nominal cross-entropy.')
flags.DEFINE_float('verified_xent_init', 0.,
'Initial weight for the verified cross-entropy.')
flags.DEFINE_float('verified_xent_final', .5,
'Final weight for the verified cross-entropy.')
flags.DEFINE_float('crown_bound_init', 0.,
'Initial weight for mixing the CROWN bound with the IBP '
'bound in the verified cross-entropy.')
flags.DEFINE_float('crown_bound_final', 0.,
'Final weight for mixing the CROWN bound with the IBP '
'bound in the verified cross-entropy.')
flags.DEFINE_float('attack_xent_init', 0.,
'Initial weight for the attack cross-entropy.')
flags.DEFINE_float('attack_xent_final', 0.,
'Initial weight for the attack cross-entropy.')
def show_metrics(step_value, metric_values, loss_value=None):
print('{}: {}nominal accuracy = {:.2f}%, '
'verified = {:.2f}%, attack = {:.2f}%'.format(
step_value,
'loss = {}, '.format(loss_value) if loss_value is not None else '',
metric_values.nominal_accuracy * 100.,
metric_values.verified_accuracy * 100.,
metric_values.attack_accuracy * 100.))
def layers(model_size):
"""Returns the layer specification for a given model name."""
if model_size == 'tiny':
return (
('linear', 100),
('activation', 'relu'))
elif model_size == 'small':
return (
('conv2d', (4, 4), 16, 'VALID', 2),
('activation', 'relu'),
('conv2d', (4, 4), 32, 'VALID', 1),
('activation', 'relu'),
('linear', 100),
('activation', 'relu'))
elif model_size == 'medium':
return (
('conv2d', (3, 3), 32, 'VALID', 1),
('activation', 'relu'),
('conv2d', (4, 4), 32, 'VALID', 2),
('activation', 'relu'),
('conv2d', (3, 3), 64, 'VALID', 1),
('activation', 'relu'),
('conv2d', (4, 4), 64, 'VALID', 2),
('activation', 'relu'),
('linear', 512),
('activation', 'relu'),
('linear', 512),
('activation', 'relu'))
elif model_size == 'large':
return (
('conv2d', (3, 3), 64, 'SAME', 1),
('activation', 'relu'),
('conv2d', (3, 3), 64, 'SAME', 1),
('activation', 'relu'),
('conv2d', (3, 3), 128, 'SAME', 2),
('activation', 'relu'),
('conv2d', (3, 3), 128, 'SAME', 1),
('activation', 'relu'),
('conv2d', (3, 3), 128, 'SAME', 1),
('activation', 'relu'),
('linear', 512),
('activation', 'relu'))
else:
raise ValueError('Unknown model: "{}"'.format(model_size))
def main(unused_args):
logging.info('Training IBP on %s...', FLAGS.dataset.upper())
step = tf.train.get_or_create_global_step()
# Learning rate.
learning_rate = ibp.parse_learning_rate(step, FLAGS.learning_rate)
# Dataset.
input_bounds = (0., 1.)
num_classes = 10
if FLAGS.dataset == 'mnist':
data_train, data_test = tf.keras.datasets.mnist.load_data()
else:
assert FLAGS.dataset == 'cifar10', (
'Unknown dataset "{}"'.format(FLAGS.dataset))
data_train, data_test = tf.keras.datasets.cifar10.load_data()
data_train = (data_train[0], data_train[1].flatten())
data_test = (data_test[0], data_test[1].flatten())
data = ibp.build_dataset(data_train, batch_size=FLAGS.batch_size,
sequential=False)
if FLAGS.dataset == 'cifar10':
data = data._replace(image=ibp.randomize(
data.image, (32, 32, 3), expand_shape=(40, 40, 3),
crop_shape=(32, 32, 3), vertical_flip=True))
# Base predictor network.
original_predictor = ibp.DNN(num_classes, layers(FLAGS.model))
predictor = original_predictor
if FLAGS.dataset == 'cifar10':
mean = (0.4914, 0.4822, 0.4465)
std = (0.2023, 0.1994, 0.2010)
predictor = ibp.add_image_normalization(original_predictor, mean, std)
if FLAGS.crown_bound_init > 0 or FLAGS.crown_bound_final > 0:
logging.info('Using CROWN-IBP loss.')
model_wrapper = ibp.crown.VerifiableModelWrapper
loss_helper = ibp.crown.create_classification_losses
else:
model_wrapper = ibp.VerifiableModelWrapper
loss_helper = ibp.create_classification_losses
predictor = model_wrapper(predictor)
# Training.
train_losses, train_loss, _ = loss_helper(
step,
data.image,
data.label,
predictor,
FLAGS.epsilon_train,
loss_weights={
'nominal': {
'init': FLAGS.nominal_xent_init,
'final': FLAGS.nominal_xent_final,
'warmup': FLAGS.verified_xent_init + FLAGS.nominal_xent_init
},
'attack': {
'init': FLAGS.attack_xent_init,
'final': FLAGS.attack_xent_final
},
'verified': {
'init': FLAGS.verified_xent_init,
'final': FLAGS.verified_xent_final,
'warmup': 0.
},
'crown_bound': {
'init': FLAGS.crown_bound_init,
'final': FLAGS.crown_bound_final,
'warmup': 0.
},
},
warmup_steps=FLAGS.warmup_steps,
rampup_steps=FLAGS.rampup_steps,
input_bounds=input_bounds)
saver = tf.train.Saver(original_predictor.get_variables())
optimizer = tf.train.AdamOptimizer(learning_rate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(train_loss, step)
# Test using while loop.
def get_test_metrics(batch_size, attack_builder=ibp.UntargetedPGDAttack):
"""Returns the test metrics."""
num_test_batches = len(data_test[0]) // batch_size
assert len(data_test[0]) % batch_size == 0, (
'Test data is not a multiple of batch size.')
def cond(i, *unused_args):
return i < num_test_batches
def body(i, metrics):
"""Compute the sum of all metrics."""
test_data = ibp.build_dataset(data_test, batch_size=batch_size,
sequential=True)
predictor(test_data.image, override=True, is_training=False)
input_interval_bounds = ibp.IntervalBounds(
tf.maximum(test_data.image - FLAGS.epsilon, input_bounds[0]),
tf.minimum(test_data.image + FLAGS.epsilon, input_bounds[1]))
predictor.propagate_bounds(input_interval_bounds)
test_specification = ibp.ClassificationSpecification(
test_data.label, num_classes)
test_attack = attack_builder(predictor, test_specification, FLAGS.epsilon,
input_bounds=input_bounds,
optimizer_builder=ibp.UnrolledAdam)
test_losses = ibp.Losses(predictor, test_specification, test_attack)
test_losses(test_data.label)
new_metrics = []
for m, n in zip(metrics, test_losses.scalar_metrics):
new_metrics.append(m + n)
return i + 1, new_metrics
total_count = tf.constant(0, dtype=tf.int32)
total_metrics = [tf.constant(0, dtype=tf.float32)
for _ in range(len(ibp.ScalarMetrics._fields))]
total_count, total_metrics = tf.while_loop(
cond,
body,
loop_vars=[total_count, total_metrics],
back_prop=False,
parallel_iterations=1)
total_count = tf.cast(total_count, tf.float32)
test_metrics = []
for m in total_metrics:
test_metrics.append(m / total_count)
return ibp.ScalarMetrics(*test_metrics)
test_metrics = get_test_metrics(
FLAGS.batch_size, ibp.UntargetedPGDAttack)
summaries = []
for f in test_metrics._fields:
summaries.append(
tf.summary.scalar(f, getattr(test_metrics, f)))
test_summaries = tf.summary.merge(summaries)
test_writer = tf.summary.FileWriter(os.path.join(FLAGS.output_dir, 'test'))
# Run everything.
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
with tf.train.SingularMonitoredSession(config=tf_config) as sess:
for _ in range(FLAGS.steps):
iteration, loss_value, _ = sess.run(
[step, train_losses.scalar_losses.nominal_cross_entropy, train_op])
if iteration % FLAGS.test_every_n == 0:
metric_values, summary = sess.run([test_metrics, test_summaries])
test_writer.add_summary(summary, iteration)
show_metrics(iteration, metric_values, loss_value=loss_value)
saver.save(sess._tf_sess(), # pylint: disable=protected-access
os.path.join(FLAGS.output_dir, 'model'),
global_step=FLAGS.steps - 1)
if __name__ == '__main__':
app.run(main)
| interval-bound-propagation-master | examples/train.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluates a verifiable model on Mnist or CIFAR-10."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from absl import logging
import interval_bound_propagation as ibp
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
flags.DEFINE_enum('dataset', 'auto', ['auto', 'mnist', 'cifar10'], 'Dataset '
'("auto", "mnist" or "cifar10"). When set to "auto", '
'the dataset is inferred from the model directory path.')
flags.DEFINE_enum('model', 'auto', ['auto', 'tiny', 'small', 'medium',
'large_200', 'large'], 'Model size. '
'When set to "auto", the model name is inferred from the '
'model directory path.')
flags.DEFINE_string('model_dir', None, 'Model checkpoint directory.')
flags.DEFINE_enum('bound_method', 'ibp', ['ibp', 'crown-ibp'],
'Bound progataion method. For models trained with CROWN-IBP '
'and beta_final=1 (e.g., CIFAR 2/255), use "crown-ibp". '
'Otherwise use "ibp".')
flags.DEFINE_integer('batch_size', 200, 'Batch size.')
flags.DEFINE_float('epsilon', .3, 'Target epsilon.')
def layers(model_size):
"""Returns the layer specification for a given model name."""
if model_size == 'tiny':
return (
('linear', 100),
('activation', 'relu'))
elif model_size == 'small':
return (
('conv2d', (4, 4), 16, 'VALID', 2),
('activation', 'relu'),
('conv2d', (4, 4), 32, 'VALID', 1),
('activation', 'relu'),
('linear', 100),
('activation', 'relu'))
elif model_size == 'medium':
return (
('conv2d', (3, 3), 32, 'VALID', 1),
('activation', 'relu'),
('conv2d', (4, 4), 32, 'VALID', 2),
('activation', 'relu'),
('conv2d', (3, 3), 64, 'VALID', 1),
('activation', 'relu'),
('conv2d', (4, 4), 64, 'VALID', 2),
('activation', 'relu'),
('linear', 512),
('activation', 'relu'),
('linear', 512),
('activation', 'relu'))
elif model_size == 'large_200':
# Some old large checkpoints have 200 hidden neurons in the last linear
# layer.
return (
('conv2d', (3, 3), 64, 'SAME', 1),
('activation', 'relu'),
('conv2d', (3, 3), 64, 'SAME', 1),
('activation', 'relu'),
('conv2d', (3, 3), 128, 'SAME', 2),
('activation', 'relu'),
('conv2d', (3, 3), 128, 'SAME', 1),
('activation', 'relu'),
('conv2d', (3, 3), 128, 'SAME', 1),
('activation', 'relu'),
('linear', 200),
('activation', 'relu'))
elif model_size == 'large':
return (
('conv2d', (3, 3), 64, 'SAME', 1),
('activation', 'relu'),
('conv2d', (3, 3), 64, 'SAME', 1),
('activation', 'relu'),
('conv2d', (3, 3), 128, 'SAME', 2),
('activation', 'relu'),
('conv2d', (3, 3), 128, 'SAME', 1),
('activation', 'relu'),
('conv2d', (3, 3), 128, 'SAME', 1),
('activation', 'relu'),
('linear', 512),
('activation', 'relu'))
else:
raise ValueError('Unknown model: "{}"'.format(model_size))
def show_metrics(metric_values, bound_method='ibp'):
if bound_method == 'crown-ibp':
verified_accuracy = metric_values.crown_ibp_verified_accuracy
else:
verified_accuracy = metric_values.verified_accuracy
print('nominal accuracy = {:.2f}%, '
'verified accuracy = {:.2f}%, '
'accuracy under PGD attack = {:.2f}%'.format(
metric_values.nominal_accuracy * 100.,
verified_accuracy* 100.,
metric_values.attack_accuracy * 100.))
def main(unused_args):
dataset = FLAGS.dataset
if FLAGS.dataset == 'auto':
if 'mnist' in FLAGS.model_dir:
dataset = 'mnist'
elif 'cifar' in FLAGS.model_dir:
dataset = 'cifar10'
else:
raise ValueError('Cannot guess the dataset name. Please specify '
'--dataset manually.')
model_name = FLAGS.model
if FLAGS.model == 'auto':
model_names = ['large_200', 'large', 'medium', 'small', 'tiny']
for name in model_names:
if name in FLAGS.model_dir:
model_name = name
logging.info('Using guessed model name "%s".', model_name)
break
if model_name == 'auto':
raise ValueError('Cannot guess the model name. Please specify --model '
'manually.')
checkpoint_path = tf.train.latest_checkpoint(FLAGS.model_dir)
if checkpoint_path is None:
raise OSError('Cannot find a valid checkpoint in {}.'.format(
FLAGS.model_dir))
# Dataset.
input_bounds = (0., 1.)
num_classes = 10
if dataset == 'mnist':
data_train, data_test = tf.keras.datasets.mnist.load_data()
else:
assert dataset == 'cifar10', (
'Unknown dataset "{}"'.format(dataset))
data_train, data_test = tf.keras.datasets.cifar10.load_data()
data_train = (data_train[0], data_train[1].flatten())
data_test = (data_test[0], data_test[1].flatten())
# Base predictor network.
original_predictor = ibp.DNN(num_classes, layers(model_name))
predictor = original_predictor
if dataset == 'cifar10':
mean = (0.4914, 0.4822, 0.4465)
std = (0.2023, 0.1994, 0.2010)
predictor = ibp.add_image_normalization(original_predictor, mean, std)
if FLAGS.bound_method == 'crown-ibp':
predictor = ibp.crown.VerifiableModelWrapper(predictor)
else:
predictor = ibp.VerifiableModelWrapper(predictor)
# Test using while loop.
def get_test_metrics(batch_size, attack_builder=ibp.UntargetedPGDAttack):
"""Returns the test metrics."""
num_test_batches = len(data_test[0]) // batch_size
assert len(data_test[0]) % batch_size == 0, (
'Test data is not a multiple of batch size.')
def cond(i, *unused_args):
return i < num_test_batches
def body(i, metrics):
"""Compute the sum of all metrics."""
test_data = ibp.build_dataset(data_test, batch_size=batch_size,
sequential=True)
predictor(test_data.image, override=True, is_training=False)
input_interval_bounds = ibp.IntervalBounds(
tf.maximum(test_data.image - FLAGS.epsilon, input_bounds[0]),
tf.minimum(test_data.image + FLAGS.epsilon, input_bounds[1]))
predictor.propagate_bounds(input_interval_bounds)
test_specification = ibp.ClassificationSpecification(
test_data.label, num_classes)
test_attack = attack_builder(predictor, test_specification, FLAGS.epsilon,
input_bounds=input_bounds,
optimizer_builder=ibp.UnrolledAdam)
# Use CROWN-IBP bound or IBP bound.
if FLAGS.bound_method == 'crown-ibp':
test_losses = ibp.crown.Losses(predictor, test_specification,
test_attack, use_crown_ibp=True,
crown_bound_schedule=tf.constant(1.))
else:
test_losses = ibp.Losses(predictor, test_specification, test_attack)
test_losses(test_data.label)
new_metrics = []
for m, n in zip(metrics, test_losses.scalar_metrics):
new_metrics.append(m + n)
return i + 1, new_metrics
if FLAGS.bound_method == 'crown-ibp':
metrics = ibp.crown.ScalarMetrics
else:
metrics = ibp.ScalarMetrics
total_count = tf.constant(0, dtype=tf.int32)
total_metrics = [tf.constant(0, dtype=tf.float32)
for _ in range(len(metrics._fields))]
total_count, total_metrics = tf.while_loop(
cond,
body,
loop_vars=[total_count, total_metrics],
back_prop=False,
parallel_iterations=1)
total_count = tf.cast(total_count, tf.float32)
test_metrics = []
for m in total_metrics:
test_metrics.append(m / total_count)
return metrics(*test_metrics)
test_metrics = get_test_metrics(
FLAGS.batch_size, ibp.UntargetedPGDAttack)
# Prepare to load the pretrained-model.
saver = tf.compat.v1.train.Saver(original_predictor.get_variables())
# Run everything.
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
with tf.train.SingularMonitoredSession(config=tf_config) as sess:
logging.info('Restoring from checkpoint "%s".', checkpoint_path)
saver.restore(sess, checkpoint_path)
logging.info('Evaluating at epsilon = %f.', FLAGS.epsilon)
metric_values = sess.run(test_metrics)
show_metrics(metric_values, FLAGS.bound_method)
if __name__ == '__main__':
flags.mark_flag_as_required('model_dir')
app.run(main)
| interval-bound-propagation-master | examples/eval.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train verifiable robust models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl import logging
import interval_bound_propagation as ibp
import numpy as np
import six
import sonnet as snt
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
from tensorflow.contrib import lookup as contrib_lookup
import models
import utils
EmbeddedDataset = collections.namedtuple(
'EmbeddedDataset',
['embedded_inputs', 'length', 'input_tokens', 'sentiment'])
Dataset = collections.namedtuple(
'Dataset',
['tokens', 'num_tokens', 'sentiment'])
Perturbation = collections.namedtuple(
'Perturbation',
['positions', 'tokens'])
def _pad_fixed(x, axis, padded_length):
"""Pads a tensor to a fixed size (rather than batch-specific)."""
pad_shape = x.shape.as_list()
pad_shape[axis] = tf.maximum(padded_length - tf.shape(x)[axis], 0)
# Pad zero as in utils.get_padded_indexes.
padded = tf.concat([x, tf.zeros(dtype=x.dtype, shape=pad_shape)], axis=axis)
assert axis == 1
padded = padded[:, :padded_length]
padded_shape = padded.shape.as_list()
padded_shape[axis] = padded_length
padded.set_shape(padded_shape)
return padded
class GeneratedDataset(snt.AbstractModule):
"""A dataset wrapper for data_gen such that it behaves like sst_binary."""
def __init__(self, data_gen, batch_size, mode='train',
num_examples=0,
dataset_name='glue/sst2',
name='generated_dataset'):
super(GeneratedDataset, self).__init__(name=name)
self._data_gen = data_gen
self._batch_size = batch_size
self._mode = mode
self._shuffle = True if mode == 'train' else False
self._num_examples = num_examples
self._dataset_name = dataset_name
def get_row_lengths(self, sparse_tensor_input):
# sparse_tensor_input is a tf.SparseTensor
# In RaggedTensor, row_lengths is a vector with shape `[nrows]`,
# which specifies the length of each row.
rt = tf.RaggedTensor.from_sparse(sparse_tensor_input)
return rt.row_lengths()
def _build(self):
dataset = tfds.load(name=self._dataset_name, split=self._mode)
minibatch = dataset.map(parse).repeat()
if self._shuffle:
minibatch = minibatch.shuffle(self._batch_size*100)
minibatch = minibatch.batch(
self._batch_size).make_one_shot_iterator().get_next()
minibatch['sentiment'].set_shape([self._batch_size])
minibatch['sentence'] = tf.SparseTensor(
indices=minibatch['sentence'].indices,
values=minibatch['sentence'].values,
dense_shape=[self._batch_size, minibatch['sentence'].dense_shape[1]])
# minibatch.sentence sparse tensor with dense shape
# [batch_size x seq_length], length: [batch_size]
return Dataset(
tokens=minibatch['sentence'],
num_tokens=self.get_row_lengths(minibatch['sentence']),
sentiment=minibatch['sentiment'],
)
@property
def num_examples(self):
return self._num_examples
def parse(data_dict):
"""Parse dataset from _data_gen into the same format as sst_binary."""
sentiment = data_dict['label']
sentence = data_dict['sentence']
dense_chars = tf.decode_raw(sentence, tf.uint8)
dense_chars.set_shape((None,))
chars = tfp.math.dense_to_sparse(dense_chars)
if six.PY3:
safe_chr = lambda c: '?' if c >= 128 else chr(c)
else:
safe_chr = chr
to_char = np.vectorize(safe_chr)
chars = tf.SparseTensor(indices=chars.indices,
values=tf.py_func(to_char, [chars.values], tf.string),
dense_shape=chars.dense_shape)
return {'sentiment': sentiment,
'sentence': chars}
class RobustModel(snt.AbstractModule):
"""Model for applying sentence representations for different tasks."""
def __init__(self,
task,
batch_size,
pooling,
learning_rate,
config,
embedding_dim,
fine_tune_embeddings=False,
num_oov_buckets=1000,
max_grad_norm=5.0,
name='robust_model'):
super(RobustModel, self).__init__(name=name)
self.config = config
self.task = task
self.batch_size = batch_size
self.pooling = pooling
self.learning_rate = learning_rate
self.embedding_dim = embedding_dim
self.fine_tune_embeddings = fine_tune_embeddings
self.num_oov_buckets = num_oov_buckets
self.max_grad_norm = max_grad_norm
self.linear_classifier = None
def add_representer(self, vocab_filename, padded_token=None):
"""Add sentence representer to the computation graph.
Args:
vocab_filename: the name of vocabulary files.
padded_token: padded_token to the vocabulary.
"""
self.embed_pad = utils.EmbedAndPad(
self.batch_size,
[self._lines_from_file(vocab_filename)],
embedding_dim=self.embedding_dim,
num_oov_buckets=self.num_oov_buckets,
fine_tune_embeddings=self.fine_tune_embeddings,
padded_token=padded_token)
self.keep_prob = tf.placeholder(tf.float32, shape=None, name='keep_prob')
# Model to get a sentence representation from embeddings.
self.sentence_representer = models.SentenceRepresenterConv(
self.config, keep_prob=self.keep_prob, pooling=self.pooling)
def add_dataset(self):
"""Add datasets.
Returns:
train_data, dev_data, test_data, num_classes
"""
if self.config.get('dataset', '') == 'sst':
train_data = GeneratedDataset(None, self.batch_size, mode='train',
num_examples=67349)
dev_data = GeneratedDataset(None, self.batch_size, mode='validation',
num_examples=872)
test_data = GeneratedDataset(None, self.batch_size, mode='validation',
num_examples=872)
num_classes = 2
return train_data, dev_data, test_data, num_classes
else:
raise ValueError('Not supported dataset')
def get_representation(self, tokens, num_tokens):
if tokens.dtype == tf.float32:
return self.sentence_representer(tokens, num_tokens)
else: # dtype == tf.string
return self.sentence_representer(self.embed_pad(tokens), num_tokens)
def add_representation(self, minibatch):
"""Compute sentence representations.
Args:
minibatch: a minibatch of sequences of embeddings.
Returns:
joint_rep: representation of sentences or concatenation of
sentence vectors.
"""
joint_rep = self.get_representation(minibatch.tokens, minibatch.num_tokens)
result = {'representation1': joint_rep}
return joint_rep, result
def add_train_ops(self,
num_classes,
joint_rep,
minibatch):
"""Add ops for training in the computation graph.
Args:
num_classes: number of classes to predict in the task.
joint_rep: the joint sentence representation if the input is sentence
pairs or the representation for the sentence if the input is a single
sentence.
minibatch: a minibatch of sequences of embeddings.
Returns:
train_accuracy: the accuracy on the training dataset
loss: training loss.
opt_step: training op.
"""
if self.linear_classifier is None:
classifier_layers = []
classifier_layers.append(snt.Linear(num_classes))
self.linear_classifier = snt.Sequential(classifier_layers)
logits = self.linear_classifier(joint_rep)
# Losses and optimizer.
def get_loss(logits, labels):
return tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits))
loss = get_loss(logits, minibatch.sentiment)
train_accuracy = utils.get_accuracy(logits, minibatch.sentiment)
opt_step = self._add_optimize_op(loss)
return train_accuracy, loss, opt_step
def create_perturbation_ops(self, minibatch, synonym_values, vocab_table):
"""Perturb data_batch using synonym_values."""
data_batch = _pad_fixed(
utils.get_padded_indexes(vocab_table, minibatch.tokens,
self.batch_size), axis=1,
padded_length=self.config['max_padded_length'])
# synonym_values: [vocab_size x max_num_synonyms]
# data_batch: [batch_size x seq_length]
# [batch_size x seq_length x max_num_synonyms] - synonyms for each token.
# Defaults to same word in case of no other synonyms.
synonym_ids = tf.gather(synonym_values, data_batch, axis=0)
# Split along batchsize. Elements shape: [seq_length x max_num_synonyms].
synonym_ids_per_example = tf.unstack(synonym_ids, axis=0)
# Loop across batch.
# synonym_ids_this_example shape: [seq_length x max_num_synonyms]
sequence_positions_across_batch, values_across_batch = [], []
for i_sample, synonym_ids_this_example in enumerate(
synonym_ids_per_example):
# [num_nonzero, 2]. The rows are pairs of (t,s), where t is an index for
# a time step, and s is an index into the max_num_synonyms dimension.
nonzero_indices = tf.where(synonym_ids_this_example)
# shape [num_nonzero]. Corresponding to the entries at nonzero_indices
synonym_tokens = tf.gather_nd(params=synonym_ids_this_example,
indices=nonzero_indices)
# [num_nonzero] - Of the (t,s) pairs in nonzero_indices, pick only the
# time dimension (t), corresponding to perturbation positions in the
# sequence.
perturbation_positions_this_example = nonzero_indices[:, 0]
# The main logic is done. Now follows padding to a fixed length of
# num_perturbations. However, this cannot be done with 0-padding, as it
# would introduce a new (zero) vertex. Instead, we duplicate existing
# tokens as perturbations (which have no effect), until we have reached a
# total of num_perturbations perturbations. In this case, the padded
# tokens are the original tokens from the data_batch. The padded positions
# are all the positions (using range) corresponding to the padded tokens.
# How often seq-length fits into maximum num perturbations
padding_multiplier = tf.floordiv(self.config['num_perturbations'],
tf.cast(minibatch.num_tokens[i_sample],
tf.int32)) + 1
# original tokens # [seq_length]
original_tokens = data_batch[i_sample, :minibatch.num_tokens[i_sample]]
# [padding_multiplier * seq_length]. Repeat several times, use as padding.
padding_tokens = tf.tile(original_tokens, multiples=[padding_multiplier])
synonym_tokens_padded = tf.concat([synonym_tokens, tf.cast(padding_tokens,
dtype=tf.int64)
], axis=0)
# Crop at exact num_perturbations size.
synonym_tokens_padded = synonym_tokens_padded[
:self.config['num_perturbations']]
# [seq_length] padding sequence positions with tiles of range()
pad_positions = tf.range(minibatch.num_tokens[i_sample], delta=1)
# [padding_multiplier*seq_length]
padding_positions = tf.tile(pad_positions, multiples=[padding_multiplier])
perturbation_positions_this_example_padded = tf.concat(
[perturbation_positions_this_example, tf.cast(padding_positions,
dtype=tf.int64)],
axis=0)
# Crop at exact size num_perturbations.
sequence_positions_padded = perturbation_positions_this_example_padded[
:self.config['num_perturbations']]
# Collect across the batch for tf.stack later.
sequence_positions_across_batch.append(sequence_positions_padded)
values_across_batch.append(synonym_tokens_padded)
# Both [batch_size x max_n_perturbations]
perturbation_positions = tf.stack(sequence_positions_across_batch, axis=0)
perturbation_tokens = tf.stack(values_across_batch, axis=0)
# Explicitly setting the shape to self.config['num_perturbations']
perturbation_positions_shape = perturbation_positions.shape.as_list()
perturbation_positions_shape[1] = self.config['num_perturbations']
perturbation_positions.set_shape(perturbation_positions_shape)
perturbation_tokens_shape = perturbation_tokens.shape.as_list()
perturbation_tokens_shape[1] = self.config['num_perturbations']
perturbation_tokens.set_shape(perturbation_tokens_shape)
return Perturbation(
positions=perturbation_positions,
tokens=perturbation_tokens)
def _add_optimize_op(self, loss):
"""Add ops for training."""
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.Variable(self.learning_rate, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars),
self.max_grad_norm)
opt = tf.train.AdamOptimizer(learning_rate)
opt_step = opt.apply_gradients(zip(grads, tvars),
global_step=global_step)
return opt_step
def embed_dataset(self, minibatch, vocab_table):
return EmbeddedDataset(
embedded_inputs=_pad_fixed(
self.embed_pad(minibatch.tokens),
axis=1,
padded_length=self.config['max_padded_length']),
input_tokens=_pad_fixed(
utils.get_padded_indexes(vocab_table, minibatch.tokens,
self.batch_size),
axis=1,
padded_length=self.config['max_padded_length']),
length=tf.minimum(self.config['max_padded_length'],
tf.cast(minibatch.num_tokens, tf.int32)),
sentiment=minibatch.sentiment)
def compute_mask_vertices(self, data_batch, perturbation):
"""Compute perturbation masks and perbuted vertices.
Args:
data_batch: EmbeddedDataset object.
perturbation: Perturbation object.
Returns:
masks: Positions where there are perturbations.
vertices: The resulting embeddings of the perturbed inputs.
"""
# The following are all shaped (after broadcasting) as:
# (batch_size, num_perturbations, seq_length, embedding_size).
embedding = self.embed_pad._embeddings # pylint: disable=protected-access
# (batch_size, 1, seq_length, emb_dim)
original_vertices = tf.expand_dims(data_batch.embedded_inputs, axis=1)
# (batch_size, num_perturbation, 1, emb_dim])
perturbation_vertices = tf.gather(
embedding, tf.expand_dims(perturbation.tokens, axis=2))
# (batch_size, num_perturbations, seq_length, 1)
mask = tf.expand_dims(
tf.one_hot(perturbation.positions,
depth=self.config['max_padded_length']), axis=3)
# (batch_size, num_perturbations, seq_length, embedding_size)
vertices = (1 - mask) * original_vertices + mask * perturbation_vertices
return mask, vertices
def preprocess_databatch(self, minibatch, vocab_table, perturbation):
data_batch = self.embed_dataset(minibatch, vocab_table)
mask, vertices = self.compute_mask_vertices(data_batch, perturbation)
return data_batch, mask, vertices
def add_verifiable_objective(self,
minibatch,
vocab_table,
perturbation,
stop_gradient=False):
# pylint: disable=g-missing-docstring
data_batch = self.embed_dataset(minibatch, vocab_table)
_, vertices = self.compute_mask_vertices(data_batch, perturbation)
def classifier(embedded_inputs):
representation = self.sentence_representer(embedded_inputs,
data_batch.length)
return self.linear_classifier(representation)
# Verification graph.
network = ibp.VerifiableModelWrapper(classifier)
network(data_batch.embedded_inputs)
input_bounds = ibp.SimplexBounds(
vertices=vertices,
nominal=data_batch.embedded_inputs,
r=(self.delta if not stop_gradient else self.config['delta']))
network.propagate_bounds(input_bounds)
# Calculate the verifiable objective.
verifiable_obj = verifiable_objective(
network, data_batch.sentiment, margin=1.)
return verifiable_obj
def run_classification(self, inputs, labels, length):
prediction = self.run_prediction(inputs, length)
correct = tf.cast(tf.equal(labels, tf.argmax(prediction, 1)),
dtype=tf.float32)
return correct
def compute_verifiable_loss(self, verifiable_obj, labels):
"""Compute verifiable training objective.
Args:
verifiable_obj: Verifiable training objective.
labels: Ground truth labels.
Returns:
verifiable_loss: Aggregrated loss of the verifiable training objective.
"""
# Three options: reduce max, reduce mean, and softmax.
if self.config['verifiable_training_aggregation'] == 'mean':
verifiable_loss = tf.reduce_mean(
verifiable_obj) # average across all target labels
elif self.config['verifiable_training_aggregation'] == 'max':
# Worst target label only.
verifiable_loss = tf.reduce_mean(tf.reduce_max(verifiable_obj, axis=0))
elif self.config['verifiable_training_aggregation'] == 'softmax':
# This assumes that entries in verifiable_obj belonging to the true class
# are set to a (large) negative value, so to not affect the softmax much.
# [batch_size]. Compute x-entropy against one-hot distrib. for true label.
verifiable_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=tf.transpose(verifiable_obj), labels=labels)
verifiable_loss = tf.reduce_mean(
verifiable_loss) # aggregation across batch
else:
logging.info(self.config['verifiable_training_aggregation'])
raise ValueError(
'Bad input argument for verifiable_training_aggregation used.')
return verifiable_loss
def compute_verifiable_verified(self, verifiable_obj):
# Overall upper bound is maximum over all incorrect target classes.
bound = tf.reduce_max(verifiable_obj, axis=0)
verified = tf.cast(bound <= 0, dtype=tf.float32)
return bound, verified
def run_prediction(self, inputs, length):
representation = self.sentence_representer(inputs, length)
prediction = self.linear_classifier(representation)
return prediction
def sentiment_accuracy_op(self, minibatch):
"""Compute accuracy of dev/test set on the task of sentiment analysis.
Args:
minibatch: a batch of sequences of embeddings.
Returns:
num_correct: the number of examples that are predicted correctly on the
given dataset.
"""
rep = self.get_representation(minibatch.tokens, minibatch.num_tokens)
logits = self.linear_classifier(rep)
num_correct = utils.get_num_correct_predictions(logits,
minibatch.sentiment)
return num_correct
def add_dev_eval_ops(self, minibatch):
"""Add ops for evaluating on the dev/test set.
Args:
minibatch: a batch of sequence of embeddings.
Returns:
num_correct: the number of examples that are predicted correctly.
"""
num_correct = self.sentiment_accuracy_op(minibatch)
return num_correct
def _build(self):
"""Build the computation graph.
Returns:
graph_tensors: list of ops that are to be executed during
training/evaluation.
"""
train_data, dev_data, test_data, num_classes = self.add_dataset()
train_minibatch = train_data()
dev_minibatch = dev_data()
test_minibatch = test_data()
# Load the vocab without padded_token and add it to the add_representer
# later. Otherwise, it will be sorted.
vocab_filename = self.config['vocab_filename']
self.add_representer(vocab_filename, padded_token=b'<PAD>')
graph_tensors = self._build_graph_with_datasets(
train_minibatch, dev_minibatch, test_minibatch, num_classes)
graph_tensors['dev_num_examples'] = dev_data.num_examples
graph_tensors['test_num_examples'] = test_data.num_examples
return graph_tensors
def _build_graph_with_datasets(self,
train_minibatch,
dev_minibatch,
test_minibatch,
num_classes):
"""Returns the training/evaluation ops."""
self.keep_prob = 1. # Using literal 1 (not placeholder) skips dropout op.
self.sentence_representer._keep_prob = 1. # pylint:disable=protected-access
# Build the graph as per the base class.
(train_joint_rep, _) = self.add_representation(train_minibatch)
(train_accuracy,
loss,
opt_step) = self.add_train_ops(num_classes, train_joint_rep,
train_minibatch)
dev_num_correct = self.add_dev_eval_ops(dev_minibatch)
test_num_correct = self.add_dev_eval_ops(test_minibatch)
graph_tensors = {
'loss': loss,
'train_op': opt_step,
'train_accuracy': train_accuracy,
'dev_num_correct': dev_num_correct,
'test_num_correct': test_num_correct,
'keep_prob': self.keep_prob
}
vocab_table = self.embed_pad.vocab_table
vocab_size = self.embed_pad.vocab_size
verifiable_loss_ratio = tf.constant(
self.config['verifiable_loss_ratio'],
dtype=tf.float32,
name='verifiable_loss_ratio')
self.delta = tf.constant(self.config['delta'],
dtype=tf.float32, name='delta')
lookup_token = tf.placeholder(tf.string, shape=None, name='lookup_token')
indices = vocab_table.lookup(lookup_token)
self.vocab_list = contrib_lookup.index_to_string_table_from_file(
self.config['vocab_filename_pad'])
lookup_token_index = tf.placeholder(tf.int64, shape=None,
name='lookup_token_index')
lookup_token_string = self.vocab_list.lookup(lookup_token_index)
synonym_values = tf.placeholder(tf.int64, shape=[None, None],
name='synonym_values')
synonym_counts = tf.placeholder(tf.int64, shape=[None],
name='synonym_counts')
train_perturbation = self.create_perturbation_ops(
train_minibatch, synonym_values, vocab_table)
train_data_batch, _, _ = self.preprocess_databatch(
train_minibatch, vocab_table, train_perturbation)
train_words = self.vocab_list.lookup(train_data_batch.input_tokens)
# [num_targets x batchsize]
verifiable_obj = self.add_verifiable_objective(
train_minibatch, vocab_table, train_perturbation, stop_gradient=False)
train_nominal = self.run_classification(train_data_batch.embedded_inputs,
train_data_batch.sentiment,
train_data_batch.length)
train_bound, train_verified = self.compute_verifiable_verified(
verifiable_obj)
verifiable_loss = self.compute_verifiable_loss(verifiable_obj,
train_minibatch.sentiment)
if (self.config['verifiable_loss_ratio']) > 1.0:
raise ValueError('Loss ratios sum up to more than 1.0')
total_loss = (1 - verifiable_loss_ratio) * graph_tensors['loss']
if self.config['verifiable_loss_ratio'] != 0:
total_loss += verifiable_loss_ratio * verifiable_loss
# Attack on dev/test set.
dev_perturbation = self.create_perturbation_ops(
dev_minibatch, synonym_values, vocab_table)
# [num_targets x batchsize]
dev_verifiable_obj = self.add_verifiable_objective(
dev_minibatch, vocab_table, dev_perturbation, stop_gradient=True)
dev_bound, dev_verified = self.compute_verifiable_verified(
dev_verifiable_obj)
dev_data_batch, _, _ = self.preprocess_databatch(
dev_minibatch, vocab_table, dev_perturbation)
test_perturbation = self.create_perturbation_ops(
test_minibatch, synonym_values, vocab_table)
# [num_targets x batchsize]
test_verifiable_obj = self.add_verifiable_objective(
test_minibatch, vocab_table, test_perturbation, stop_gradient=True)
test_bound, test_verified = self.compute_verifiable_verified(
test_verifiable_obj)
test_data_batch, _, _ = self.preprocess_databatch(
test_minibatch, vocab_table, test_perturbation)
dev_words = self.vocab_list.lookup(dev_data_batch.input_tokens)
test_words = self.vocab_list.lookup(test_data_batch.input_tokens)
dev_nominal = self.run_classification(dev_data_batch.embedded_inputs,
dev_data_batch.sentiment,
dev_data_batch.length)
test_nominal = self.run_classification(test_data_batch.embedded_inputs,
test_data_batch.sentiment,
test_data_batch.length)
dev_predictions = self.run_prediction(dev_data_batch.embedded_inputs,
dev_data_batch.length)
test_predictions = self.run_prediction(test_data_batch.embedded_inputs,
test_data_batch.length)
with tf.control_dependencies([train_verified, test_verified, dev_verified]):
opt_step = self._add_optimize_op(total_loss)
graph_tensors['total_loss'] = total_loss
graph_tensors['verifiable_loss'] = verifiable_loss
graph_tensors['train_op'] = opt_step
graph_tensors['indices'] = indices
graph_tensors['lookup_token_index'] = lookup_token_index
graph_tensors['lookup_token_string'] = lookup_token_string
graph_tensors['lookup_token'] = lookup_token
graph_tensors['vocab_size'] = vocab_size
graph_tensors['synonym_values'] = synonym_values
graph_tensors['synonym_counts'] = synonym_counts
graph_tensors['verifiable_loss_ratio'] = verifiable_loss_ratio
graph_tensors['delta'] = self.delta
graph_tensors['train'] = {
'bound': train_bound,
'verified': train_verified,
'words': train_words,
'sentiment': train_minibatch.sentiment,
'correct': train_nominal,
}
graph_tensors['dev'] = {
'predictions': dev_predictions,
'data_batch': dev_data_batch,
'tokens': dev_minibatch.tokens,
'num_tokens': dev_minibatch.num_tokens,
'minibatch': dev_minibatch,
'bound': dev_bound,
'verified': dev_verified,
'words': dev_words,
'sentiment': dev_minibatch.sentiment,
'correct': dev_nominal,
}
graph_tensors['test'] = {
'predictions': test_predictions,
'data_batch': test_data_batch,
'tokens': test_minibatch.tokens,
'num_tokens': test_minibatch.num_tokens,
'minibatch': test_minibatch,
'bound': test_bound,
'verified': test_verified,
'words': test_words,
'sentiment': test_minibatch.sentiment,
'correct': test_nominal,
}
return graph_tensors
def _lines_from_file(self, filename):
with open(filename, 'rb') as f:
return f.read().splitlines()
def verifiable_objective(network, labels, margin=0.):
"""Computes the verifiable objective.
Args:
network: `ibp.VerifiableModelWrapper` for the network to verify.
labels: 1D integer tensor of shape (batch_size) of labels for each
input example.
margin: Verifiable objective values for correct class will be forced to
`-margin`, thus disregarding large negative bounds when maximising. By
default this is set to 0.
Returns:
2D tensor of shape (num_classes, batch_size) containing verifiable objective
for each target class, for each example.
"""
last_layer = network.output_module
# Objective, elided with final linear layer.
obj_w, obj_b = targeted_objective(
last_layer.module.w, last_layer.module.b, labels)
# Relative bounds on the objective.
per_neuron_objective = tf.maximum(
obj_w * last_layer.input_bounds.lower_offset,
obj_w * last_layer.input_bounds.upper_offset)
verifiable_obj = tf.reduce_sum(
per_neuron_objective,
axis=list(range(2, per_neuron_objective.shape.ndims)))
# Constant term (objective layer bias).
verifiable_obj += tf.reduce_sum(
obj_w * last_layer.input_bounds.nominal,
axis=list(range(2, obj_w.shape.ndims)))
verifiable_obj += obj_b
# Filter out cases in which the target class is the correct class.
# Using `margin` makes the irrelevant cases of target=correct return
# a large negative value, which will be ignored by the reduce_max.
num_classes = last_layer.output_bounds.shape[-1]
verifiable_obj = filter_correct_class(
verifiable_obj, num_classes, labels, margin=margin)
return verifiable_obj
def targeted_objective(final_w, final_b, labels):
"""Determines final layer weights for attacks targeting each class.
Args:
final_w: 2D tensor of shape (last_hidden_layer_size, num_classes)
containing the weights for the final linear layer.
final_b: 1D tensor of shape (num_classes) containing the biases for the
final hidden layer.
labels: 1D integer tensor of shape (batch_size) of labels for each
input example.
Returns:
obj_w: Tensor of shape (num_classes, batch_size, last_hidden_layer_size)
containing weights (to use in place of final linear layer weights)
for targeted attacks.
obj_b: Tensor of shape (num_classes, batch_size) containing bias
(to use in place of final linear layer biases) for targeted attacks.
"""
# Elide objective with final linear layer.
final_wt = tf.transpose(final_w)
obj_w = tf.expand_dims(final_wt, axis=1) - tf.gather(final_wt, labels, axis=0)
obj_b = tf.expand_dims(final_b, axis=1) - tf.gather(final_b, labels, axis=0)
return obj_w, obj_b
def filter_correct_class(verifiable_obj, num_classes, labels, margin):
"""Filters out the objective when the target class contains the true label.
Args:
verifiable_obj: 2D tensor of shape (num_classes, batch_size) containing
verifiable objectives.
num_classes: number of target classes.
labels: 1D tensor of shape (batch_size) containing the labels for each
example in the batch.
margin: Verifiable objective values for correct class will be forced to
`-margin`, thus disregarding large negative bounds when maximising.
Returns:
2D tensor of shape (num_classes, batch_size) containing the corrected
verifiable objective values for each (class, example).
"""
targets_to_filter = tf.expand_dims(
tf.range(num_classes, dtype=labels.dtype), axis=1)
neq = tf.not_equal(targets_to_filter, labels)
verifiable_obj = tf.where(neq, verifiable_obj, -margin *
tf.ones_like(verifiable_obj))
return verifiable_obj
| interval-bound-propagation-master | examples/language/robust_model.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration parameters for sentence representation models."""
def get_config():
"""Returns the default configuration as a dict."""
config = {}
config['dataset'] = 'sst'
# Convolutional architecture.
# Format: Tuple/List for a Conv layer (filters, kernel_size, pooling_size)
# Otherwise, nonlinearity.
config['conv_architecture'] = ((100, 5, 1), 'relu')
# Fully connected layer 1 hidden sizes (0 means no layer).
config['conv_fc1'] = 0
# Fully connected layer 2 hidden sizes (0 means no layer).
config['conv_fc2'] = 0
# Number of allowable perturbations.
# (delta specifies the budget, i.e., how many may be used at once.)
config['delta'] = 3.0
# Allow each character to be changed to another character.
config['synonym_filepath'] = 'data/character_substitution_enkey_sub1.json'
config['max_padded_length'] = 268
# (~1*268) Max num_perturbations.
# seqlen * max_number_synonyms (total number of elementary perturbations)
config['num_perturbations'] = 268
config['vocab_filename'] = 'data/sst_binary_character_vocabulary_sorted.txt'
# Need to add pad for analysis (which is what is used after
# utils.get_merged_vocabulary_file).
config['vocab_filename_pad'] = (
'data/sst_binary_character_vocabulary_sorted_pad.txt')
config['embedding_dim'] = 150
config['delta_schedule'] = True
config['verifiable_loss_schedule'] = True
# Ratio between the task loss and verifiable loss.
config['verifiable_loss_ratio'] = 0.75
# Aggregrated loss of the verifiable training objective
# (among softmax, mean, max).
config['verifiable_training_aggregation'] = 'softmax'
config['data_id'] = 1
config['model_location'] = '/tmp/robust_model/checkpoint/final'
return config
| interval-bound-propagation-master | examples/language/config.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for sentence representation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sonnet as snt
import tensorflow.compat.v1 as tf
def _max_pool_1d(x, pool_size=2, name='max_pool_1d'):
with tf.name_scope(name, 'MaxPool1D', [x, pool_size]):
return tf.squeeze(
tf.nn.max_pool(tf.expand_dims(x, 1),
[1, 1, pool_size, 1],
[1, 1, pool_size, 1],
'VALID'),
axis=1)
class SentenceRepresenterConv(snt.AbstractModule):
"""Use stacks of 1D Convolutions to build a sentence representation."""
def __init__(self,
config,
keep_prob=1.,
pooling='max',
name='sentence_rep_conv'):
super(SentenceRepresenterConv, self).__init__(name=name)
self._config = config
self._pooling = pooling
self._keep_prob = keep_prob
def _build(self, padded_word_embeddings, length):
x = padded_word_embeddings
for layer in self._config['conv_architecture']:
if isinstance(layer, tuple) or isinstance(layer, list):
filters, kernel_size, pooling_size = layer
conv = snt.Conv1D(
output_channels=filters,
kernel_shape=kernel_size)
x = conv(x)
if pooling_size and pooling_size > 1:
x = _max_pool_1d(x, pooling_size)
elif layer == 'relu':
x = tf.nn.relu(x)
if self._keep_prob < 1:
x = tf.nn.dropout(x, keep_prob=self._keep_prob)
else:
raise RuntimeError('Bad layer type {} in conv'.format(layer))
# Final layer pools over the remaining sequence length to get a
# fixed sized vector.
if self._pooling == 'max':
x = tf.reduce_max(x, axis=1)
elif self._pooling == 'average':
x = tf.reduce_sum(x, axis=1)
lengths = tf.expand_dims(tf.cast(length, tf.float32), axis=1)
x = x / lengths
if self._config['conv_fc1']:
fc1_layer = snt.Linear(output_size=self._config['conv_fc1'])
x = tf.nn.relu(fc1_layer(x))
if self._keep_prob < 1:
x = tf.nn.dropout(x, keep_prob=self._keep_prob)
if self._config['conv_fc2']:
fc2_layer = snt.Linear(output_size=self._config['conv_fc2'])
x = tf.nn.relu(fc2_layer(x))
if self._keep_prob < 1:
x = tf.nn.dropout(x, keep_prob=self._keep_prob)
return x
| interval-bound-propagation-master | examples/language/models.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train verifiably robust models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import imp
import json
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
import robust_model
flags.DEFINE_string('config_path', 'config.py',
'Path to training configuration file.')
flags.DEFINE_integer('batch_size', 40, 'Batch size.')
flags.DEFINE_integer('num_train_steps', 150000, 'Number of training steps.')
flags.DEFINE_integer('num_oov_buckets', 1,
'Number of out of vocabulary buckets.')
flags.DEFINE_integer('report_every', 100,
'Report test loss every N batches.')
flags.DEFINE_float('schedule_ratio', 0.8,
'The final delta and verifiable_loss_ratio are reached when '
'the number of steps equals schedule_ratio * '
'num_train_steps.')
flags.DEFINE_float('learning_rate', 0.001, 'Learning rate.')
flags.DEFINE_float('max_grad_norm', 5.0, 'Maximum norm of gradients.')
flags.DEFINE_boolean('fine_tune_embeddings', True, 'Finetune embeddings.')
flags.DEFINE_string('task', 'sst', 'One of snli, mnli, sick, sst.')
flags.DEFINE_string('pooling', 'average', 'One of averge, sum, max, last.')
flags.DEFINE_boolean('analysis', False, 'Analysis mode.')
flags.DEFINE_string('analysis_split', 'test', 'Analysis dataset split.')
flags.DEFINE_string('experiment_root',
'/tmp/robust_model/',
'Path to save trained models.')
flags.DEFINE_string(
'tensorboard_dir', None,
'Tensorboard folder. If not specified, set under experiment_root')
FLAGS = flags.FLAGS
def load_synonyms(synonym_filepath=None):
synonyms = None
with open(synonym_filepath) as f:
synonyms = json.load(f)
return synonyms
def construct_synonyms(synonym_filepath):
synonyms = load_synonyms(synonym_filepath)
synonym_keys = list(synonyms.keys())
synonym_values = [synonyms[k] for k in synonym_keys]
max_synoynm_counts = max([len(s) for s in synonym_values])
synonym_value_lens = [len(x) for x in synonym_values]
# Add 0 for the first starting point.
synonym_value_lens_cum = np.cumsum([0] + synonym_value_lens)
synonym_values_list = [word for val in synonym_values for word in val] # pylint: disable=g-complex-comprehension
return synonym_keys, max_synoynm_counts, synonym_value_lens_cum, synonym_values_list
def linear_schedule(step, init_step, final_step, init_value, final_value):
"""Linear schedule."""
assert final_step >= init_step
if init_step == final_step:
return final_value
rate = np.float32(step - init_step) / float(final_step - init_step)
linear_value = rate * (final_value - init_value) + init_value
return np.clip(linear_value, min(init_value, final_value),
max(init_value, final_value))
def config_train_summary(task, train_accuracy, loss):
"""Add ops for summary in the computation graph.
Args:
task: string name of task being trained for.
train_accuracy: training accuracy.
loss: training loss.
Returns:
train_summary: summary for training.
saver: tf.saver, used to save the checkpoint with the best dev accuracy.
"""
train_acc_summ = tf.summary.scalar(('%s_train_accuracy' % task),
train_accuracy)
loss_summ = tf.summary.scalar('loss', loss)
train_summary = tf.summary.merge([train_acc_summ, loss_summ])
return train_summary
def write_tf_summary(writer, step, tag, value):
summary = tf.Summary()
summary.value.add(tag=tag, simple_value=value)
writer.add_summary(summary, step)
def train(config_dict, synonym_filepath,
batch_size, num_train_steps, schedule_ratio, report_every,
checkpoint_path, tensorboard_dir):
"""Model training."""
graph_tensor_producer = robust_model.RobustModel(**config_dict)
graph_tensors = graph_tensor_producer()
synonym_keys, max_synoynm_counts, synonym_value_lens_cum, \
synonym_values_list = construct_synonyms(synonym_filepath)
train_summary = config_train_summary(config_dict['task'],
graph_tensors['train_accuracy'],
graph_tensors['loss'])
tf.gfile.MakeDirs(checkpoint_path)
best_dev_accuracy = 0.0
best_test_accuracy = 0.0
best_verified_dev_accuracy = 0.0
best_verified_test_accuracy = 0.0
network_saver = tf.train.Saver(graph_tensor_producer.variables)
with tf.train.SingularMonitoredSession() as session:
logging.info('Initialize parameters...')
writer = tf.summary.FileWriter(tensorboard_dir, session.graph)
input_feed = {}
# Tokenize synonyms.
tokenize_synonyms = [[] for _ in range(graph_tensors['vocab_size'])]
lookup_indices_keys = session.run(graph_tensors['indices'],
feed_dict={graph_tensors['lookup_token']:
synonym_keys})
lookup_indices_values = session.run(graph_tensors['indices'],
feed_dict={
graph_tensors['lookup_token']:
synonym_values_list})
for i, key_index in enumerate(lookup_indices_keys):
tokenize_synonyms[key_index] = lookup_indices_values[
synonym_value_lens_cum[i]:synonym_value_lens_cum[i+1]].tolist()
synonym_values_np = np.zeros([graph_tensors['vocab_size'],
max_synoynm_counts])
for i in range(graph_tensors['vocab_size']):
# False-safe case. No perturbations. Set it as itself.
synonym_values_np[i][0] = i
for j in range(len(tokenize_synonyms[i])):
synonym_values_np[i][j] = tokenize_synonyms[i][j]
synonym_counts_np = [len(s) for s in tokenize_synonyms]
input_feed[graph_tensors['synonym_values']] = synonym_values_np
input_feed[graph_tensors['synonym_counts']] = synonym_counts_np
warmup_steps = 0
for step in range(num_train_steps):
config = config_dict['config']
if config['delta'] > 0.0 and config['delta_schedule']:
delta = linear_schedule(
step, 0., schedule_ratio * num_train_steps,
0., config['delta'])
input_feed[graph_tensors['delta']] = delta
if (config['verifiable_loss_ratio'] > 0.0 and
config['verifiable_loss_schedule']):
if delta > 0.0 and warmup_steps == 0:
warmup_steps = step
if delta > 0.0:
verifiable_loss_ratio = linear_schedule(
step, warmup_steps, schedule_ratio * num_train_steps,
0., config['verifiable_loss_ratio'])
else:
verifiable_loss_ratio = 0.0
input_feed[
graph_tensors['verifiable_loss_ratio']] = verifiable_loss_ratio
total_loss_np, loss_np, verifiable_loss_np, train_accuracy_np, \
train_bound, train_verified, \
verifiable_loss_ratio_val, delta_val, \
train_summary_py, _ = session.run(
[graph_tensors['total_loss'],
graph_tensors['loss'],
graph_tensors['verifiable_loss'],
graph_tensors['train_accuracy'],
graph_tensors['train']['bound'],
graph_tensors['train']['verified'],
graph_tensors['verifiable_loss_ratio'],
graph_tensors['delta'],
train_summary,
graph_tensors['train_op']], input_feed)
writer.add_summary(train_summary_py, step)
if step % report_every == 0 or step == num_train_steps - 0:
dev_total_num_correct = 0.0
test_total_num_correct = 0.0
dev_verified_count = 0.0
test_verified_count = 0.0
dev_num_batches = graph_tensors['dev_num_examples'] // batch_size
test_num_batches = graph_tensors['test_num_examples'] // batch_size
dev_total_num_examples = dev_num_batches * batch_size
test_total_num_examples = test_num_batches * batch_size
for _ in range(dev_num_batches):
correct, verified = session.run(
[graph_tensors['dev_num_correct'],
graph_tensors['dev']['verified']], input_feed)
dev_total_num_correct += correct
dev_verified_count += np.sum(verified)
for _ in range(test_num_batches):
correct, verified = session.run(
[graph_tensors['test_num_correct'],
graph_tensors['test']['verified']], input_feed)
test_total_num_correct += correct
test_verified_count += np.sum(verified)
dev_accuracy = dev_total_num_correct / dev_total_num_examples
test_accuracy = test_total_num_correct / test_total_num_examples
dev_verified_accuracy = dev_verified_count / dev_total_num_examples
test_verified_accuracy = test_verified_count / test_total_num_examples
write_tf_summary(writer, step, tag='dev_accuracy', value=dev_accuracy)
write_tf_summary(writer, step, tag='test_accuracy', value=test_accuracy)
write_tf_summary(writer, step, tag='train_bound_summary',
value=np.mean(train_bound))
write_tf_summary(writer, step, tag='train_verified_summary',
value=np.mean(train_verified))
write_tf_summary(writer, step, tag='dev_verified_summary',
value=np.mean(dev_verified_accuracy))
write_tf_summary(writer, step, tag='test_verified_summary',
value=np.mean(test_verified_accuracy))
write_tf_summary(writer, step, tag='total_loss_summary',
value=total_loss_np)
write_tf_summary(writer, step, tag='verifiable_train_loss_summary',
value=verifiable_loss_np)
logging.info('verifiable_loss_ratio: %f, delta: %f',
verifiable_loss_ratio_val, delta_val)
logging.info('step: %d, '
'train loss: %f, '
'verifiable train loss: %f, '
'train accuracy: %f, '
'dev accuracy: %f, '
'test accuracy: %f, ', step, loss_np,
verifiable_loss_np, train_accuracy_np,
dev_accuracy, test_accuracy)
dev_verified_accuracy_mean = np.mean(dev_verified_accuracy)
test_verified_accuracy_mean = np.mean(test_verified_accuracy)
logging.info('Train Bound = %.05f, train verified: %.03f, '
'dev verified: %.03f, test verified: %.03f',
np.mean(train_bound),
np.mean(train_verified), dev_verified_accuracy_mean,
test_verified_accuracy_mean)
if dev_accuracy > best_dev_accuracy:
# Store most accurate model so far.
network_saver.save(session.raw_session(),
os.path.join(checkpoint_path, 'best'))
best_dev_accuracy = dev_accuracy
best_test_accuracy = test_accuracy
logging.info('best dev acc\t%f\tbest test acc\t%f',
best_dev_accuracy, best_test_accuracy)
if dev_verified_accuracy_mean > best_verified_dev_accuracy:
# Store model with best verified accuracy so far.
network_saver.save(session.raw_session(),
os.path.join(checkpoint_path, 'best_verified'))
best_verified_dev_accuracy = dev_verified_accuracy_mean
best_verified_test_accuracy = test_verified_accuracy_mean
logging.info('best verified dev acc\t%f\tbest verified test acc\t%f',
best_verified_dev_accuracy, best_verified_test_accuracy)
network_saver.save(session.raw_session(),
os.path.join(checkpoint_path, 'model'))
writer.flush()
# Store model at end of training.
network_saver.save(session.raw_session(),
os.path.join(checkpoint_path, 'final'))
def analysis(config_dict, synonym_filepath,
model_location, batch_size, batch_offset=0,
total_num_batches=0, datasplit='test', delta=3.0,
num_perturbations=5, max_padded_length=0):
"""Run analysis."""
tf.reset_default_graph()
if datasplit not in ['train', 'dev', 'test']:
raise ValueError('Invalid datasplit: %s' % datasplit)
logging.info('model_location: %s', model_location)
logging.info('num_perturbations: %d', num_perturbations)
logging.info('delta: %f', delta)
logging.info('Run analysis, datasplit: %s, batch %d', datasplit, batch_offset)
synonym_keys, max_synoynm_counts, synonym_value_lens_cum, \
synonym_values_list = construct_synonyms(synonym_filepath)
graph_tensor_producer = robust_model.RobustModel(**config_dict)
# Use new batch size.
graph_tensor_producer.batch_size = batch_size
# Overwrite the config originally in the saved checkpoint.
logging.info('old delta %f, old num_perturbations: %d',
graph_tensor_producer.config['delta'],
graph_tensor_producer.config['num_perturbations'])
graph_tensor_producer.config['delta'] = delta
graph_tensor_producer.config['num_perturbations'] = num_perturbations
if max_padded_length > 0:
graph_tensor_producer.config['max_padded_length'] = max_padded_length
logging.info('new delta %f, num_perturbations: %d, max_padded_length: %d',
graph_tensor_producer.config['delta'],
graph_tensor_producer.config['num_perturbations'],
graph_tensor_producer.config['max_padded_length'])
logging.info('graph_tensors.config: %s', graph_tensor_producer.config)
graph_tensors = graph_tensor_producer()
network_saver = tf.train.Saver(graph_tensor_producer.variables)
with tf.train.SingularMonitoredSession() as session:
network_saver.restore(session.raw_session(), model_location)
for _ in range(batch_offset):
# Seek to the correct batch.
session.run(graph_tensors[datasplit]['sentiment'])
input_feed = {}
# Tokenize synonyms.
tokenize_synonyms = [[] for _ in range(graph_tensors['vocab_size'])]
lookup_indices_keys = session.run(graph_tensors['indices'],
feed_dict={graph_tensors['lookup_token']:
synonym_keys})
lookup_indices_values = session.run(graph_tensors['indices'],
feed_dict={
graph_tensors['lookup_token']:
synonym_values_list})
for i, key_index in enumerate(lookup_indices_keys):
tokenize_synonyms[key_index] = lookup_indices_values[
synonym_value_lens_cum[i]:synonym_value_lens_cum[i+1]].tolist()
synonym_values_np = np.zeros([graph_tensors['vocab_size'],
max_synoynm_counts])
for i in range(graph_tensors['vocab_size']):
# False-safe case. No perturbations. Set it as itself.
synonym_values_np[i][0] = i
for j in range(len(tokenize_synonyms[i])):
synonym_values_np[i][j] = tokenize_synonyms[i][j]
synonym_counts_np = [len(s) for s in tokenize_synonyms]
input_feed[graph_tensors['synonym_values']] = synonym_values_np
input_feed[graph_tensors['synonym_counts']] = synonym_counts_np
total_num_batches = (
graph_tensors['%s_num_examples' % datasplit] //
batch_size) if total_num_batches == 0 else total_num_batches
total_num_examples = total_num_batches * batch_size
logging.info('total number of examples %d', total_num_examples)
logging.info('total number of batches %d', total_num_batches)
total_correct, total_verified = 0.0, 0.0
for ibatch in range(total_num_batches):
results = session.run(graph_tensors[datasplit], input_feed)
logging.info('batch: %d, %s bound = %.05f, verified: %.03f,'
' nominally correct: %.03f',
ibatch, datasplit, np.mean(results['bound']),
np.mean(results['verified']),
np.mean(results['correct']))
total_correct += sum(results['correct'])
total_verified += sum(results['verified'])
total_correct /= total_num_examples
total_verified /= total_num_examples
logging.info('%s final correct: %.03f, verified: %.03f',
datasplit, total_correct, total_verified)
logging.info({
'datasplit': datasplit,
'nominal': total_correct,
'verify': total_verified,
'delta': delta,
'num_perturbations': num_perturbations,
'model_location': model_location,
'final': True
})
def main(_):
# Read the config file into a new ad-hoc module.
with open(FLAGS.config_path, 'r') as config_file:
config_code = config_file.read()
config_module = imp.new_module('config')
exec(config_code, config_module.__dict__) # pylint: disable=exec-used
config = config_module.get_config()
config_dict = {'task': FLAGS.task,
'batch_size': FLAGS.batch_size,
'pooling': FLAGS.pooling,
'learning_rate': FLAGS.learning_rate,
'config': config,
'embedding_dim': config['embedding_dim'],
'fine_tune_embeddings': FLAGS.fine_tune_embeddings,
'num_oov_buckets': FLAGS.num_oov_buckets,
'max_grad_norm': FLAGS.max_grad_norm}
if FLAGS.analysis:
logging.info('Analyze model location: %s', config['model_location'])
base_batch_offset = 0
analysis(config_dict, config['synonym_filepath'], config['model_location'],
FLAGS.batch_size, base_batch_offset,
0, datasplit=FLAGS.analysis_split,
delta=config['delta'],
num_perturbations=config['num_perturbations'],
max_padded_length=config['max_padded_length'])
else:
checkpoint_path = os.path.join(FLAGS.experiment_root, 'checkpoint')
if FLAGS.tensorboard_dir is None:
tensorboard_dir = os.path.join(FLAGS.experiment_root, 'tensorboard')
else:
tensorboard_dir = FLAGS.tensorboard_dir
train(config_dict, config['synonym_filepath'],
FLAGS.batch_size,
num_train_steps=FLAGS.num_train_steps,
schedule_ratio=FLAGS.schedule_ratio,
report_every=FLAGS.report_every,
checkpoint_path=checkpoint_path,
tensorboard_dir=tensorboard_dir)
if __name__ == '__main__':
app.run(main)
| interval-bound-propagation-master | examples/language/robust_train.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for sentence representation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
from absl import logging
import sonnet as snt
import tensorflow as tf
from tensorflow.contrib import lookup as contrib_lookup
def get_padded_embeddings(embeddings,
vocabulary_table,
tokens, batch_size,
token_indexes=None):
"""Reshapes and pads 'raw' word embeddings.
Say we have batch of B tokenized sentences, of variable length, with a total
of W tokens. For example, B = 2 and W = 3 + 4 = 7:
[['The', 'cat', 'eats'],
[ 'A', 'black', 'cat', 'jumps']]
Since rows have variable length, this cannot be represented as a tf.Tensor.
It is represented as a tf.SparseTensor, with 7 values & indexes:
indices: [[0,0], [0,1], [0,2], [1,0], [1,1], [1,2], [1,3]]
values: ['The', 'cat', 'eats', 'A', 'black', 'cat', 'jumps']
We have also built a vocabulary table:
vocabulary table: ['cat', 'The', 'A', 'black', 'eats', 'jumps']
We also have the embeddings, a WxD matrix of floats
representing each word in the vocabulary table as a normal tf.Tensor.
For example, with D=3, embeddings could be:
[[0.4, 0.5, -0.6], # This is the embedding for word 0 = 'cat'
[0.1, -0.3, 0.6], # This is the embedding for word 1 = 'The''
[0.7, 0.8, -0.9], # This is the embedding for word 2 = 'A'
[-0.1, 0.9, 0.7], # This is the embedding for word 3 = 'black'
[-0.2, 0.4, 0.7], # This is the embedding for word 4 = 'eats
[0.3, -0.5, 0.2]] # This is the embedding for word 5 = 'jumps'
This function builds a normal tf.Tensor containing the embeddings for the
tokens provided, in the correct order, with appropriate 0 padding.
In our example, the returned tensor would be:
[[[0.1, -0.3, 0.6], [0.4, 0.5, -0.6], [-0.2, 0.4, 0.7], [0.0, 0.0, 0.0]],
[[0.7, 0.8, -0.9], [-0.1, 0.9, 0.7], [0.4, 0.5, -0.6], [0.3, -0.5, 0.2]]]
Note that since the first sentence has only 3 words, the 4th embedding gets
replaced by a D-dimensional vector of 0.
Args:
embeddings: [W, D] Tensor of floats, containing the embeddings, initialized
with the same vocabulary file as vocabulary_table.
vocabulary_table: a tf.contrib.lookup.LookupInterface,
containing the vocabulary, initialized with the same vocabulary file as
embeddings.
tokens: [B, ?] SparseTensor of strings, the tokens.
batch_size: Python integer.
token_indexes: A Boolean, indicating whether the input tokens are
token ids or string.
Returns:
[B, L, D] Tensor of floats: the embeddings in the correct order,
appropriately padded with 0.0, where L = max(num_tokens) and B = batch_size
"""
embedding_dim = embeddings.get_shape()[1].value # D in docstring above.
num_tokens_in_batch = tf.shape(tokens.indices)[0] # W in the docstring above.
max_length = tokens.dense_shape[1] # This is L in the docstring above.
# Get indices of tokens in vocabulary_table.
if token_indexes is not None:
indexes = token_indexes
else:
indexes = vocabulary_table.lookup(tokens.values)
# Get word embeddings.
tokens_embeddings = tf.gather(embeddings, indexes)
# Shape of the return tensor.
new_shape = tf.cast(
tf.stack([batch_size, max_length, embedding_dim], axis=0), tf.int32)
# Build the vector of indices for the return Tensor.
# In the example above, indices_final would be:
# [[[0,0,0], [0,0,1], [0,0,2]],
# [[0,1,0], [0,1,1], [0,1,2]],
# [[0,2,0], [0,2,1], [0,2,2]],
# [[1,0,0], [1,0,1], [1,0,2]],
# [[1,1,0], [1,1,1], [1,1,2]],
# [[1,2,0], [1,2,1], [1,2,2]],
# [[1,3,0], [1,3,1], [1,3,2]]]
tiled = tf.tile(tokens.indices, [1, embedding_dim])
indices_tiled = tf.cast(
tf.reshape(tiled, [num_tokens_in_batch * embedding_dim, 2]), tf.int32)
indices_linear = tf.expand_dims(
tf.tile(tf.range(0, embedding_dim), [num_tokens_in_batch]), axis=1)
indices_final = tf.concat([indices_tiled, indices_linear], axis=1)
# Build the dense Tensor.
embeddings_padded = tf.sparse_to_dense(
sparse_indices=indices_final,
output_shape=new_shape,
sparse_values=tf.reshape(tokens_embeddings,
[num_tokens_in_batch * embedding_dim]))
embeddings_padded.set_shape((batch_size, None, embedding_dim))
return embeddings_padded
def get_padded_indexes(vocabulary_table,
tokens, batch_size,
token_indexes=None):
"""Get the indices of tokens from vocabulary table.
Args:
vocabulary_table: a tf.contrib.lookup.LookupInterface,
containing the vocabulary, initialized with the same vocabulary file as
embeddings.
tokens: [B, ?] SparseTensor of strings, the tokens.
batch_size: Python integer.
token_indexes: A Boolean, indicating whether the input tokens are
token ids or string.
Returns:
[B, L] Tensor of integers: indices of tokens in the correct order,
appropriately padded with 0, where L = max(num_tokens) and B = batch_size
"""
num_tokens_in_batch = tf.shape(tokens.indices)[0]
max_length = tokens.dense_shape[1]
# Get indices of tokens in vocabulary_table.
if token_indexes is not None:
indexes = token_indexes
else:
indexes = vocabulary_table.lookup(tokens.values)
# Build the dense Tensor.
indexes_padded = tf.sparse_to_dense(
sparse_indices=tokens.indices,
output_shape=[batch_size, max_length],
sparse_values=tf.reshape(indexes,
[num_tokens_in_batch]))
indexes_padded.set_shape((batch_size, None))
return indexes_padded
class EmbedAndPad(snt.AbstractModule):
"""Embed and pad tokenized words.
This class primary functionality is similar to get_padded_embeddings.
It stores references to the embeddings and vocabulary table for convenience,
so that the user does not have to keep and pass them around.
"""
def __init__(self,
batch_size,
vocabularies,
embedding_dim,
num_oov_buckets=1000,
fine_tune_embeddings=False,
padded_token=None,
name='embed_and_pad'):
super(EmbedAndPad, self).__init__(name=name)
self._batch_size = batch_size
vocab_file, vocab_size = get_merged_vocabulary_file(vocabularies,
padded_token)
self._vocab_size = vocab_size
self._num_oov_buckets = num_oov_buckets
# Load vocabulary table for index lookup.
self._vocabulary_table = contrib_lookup.index_table_from_file(
vocabulary_file=vocab_file,
num_oov_buckets=num_oov_buckets,
vocab_size=self._vocab_size)
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
# The default value is chosen from language/bert/modeling.py.
return tf.truncated_normal_initializer(stddev=initializer_range)
self._embeddings = tf.get_variable('embeddings_matrix',
[self._vocab_size + num_oov_buckets,
embedding_dim],
trainable=fine_tune_embeddings,
initializer=create_initializer())
def _build(self, tokens):
padded_embeddings = get_padded_embeddings(
self._embeddings, self._vocabulary_table, tokens, self._batch_size)
return padded_embeddings
@property
def vocab_table(self):
return self._vocabulary_table
@property
def vocab_size(self):
return self._vocab_size + self._num_oov_buckets
def get_accuracy(logits, labels):
"""Top 1 accuracy from logits and labels."""
return tf.reduce_mean(tf.cast(tf.nn.in_top_k(logits, labels, 1), tf.float32))
def get_num_correct_predictions(logits, labels):
"""Get the number of correct predictions over a batch."""
predictions = tf.cast(tf.argmax(logits, axis=1), tf.int64)
evals = tf.equal(predictions, labels)
num_correct = tf.reduce_sum(tf.cast(evals, tf.float64))
return num_correct
def get_merged_vocabulary_file(vocabularies, padded_token=None):
"""Merges several vocabulary files into one temporary file.
The TF object that loads the embedding expects a vocabulary file, to know
which embeddings it should load.
See tf.contrib.embedding.load_embedding_initializer.
When we want to train/test on several datasets simultaneously we need to merge
their vocabulary files into a single file.
Args:
vocabularies: Iterable of vocabularies. Each vocabulary should be
a list of tokens.
padded_token: If not None, add the padded_token to the first index.
Returns:
outfilename: Name of the merged file. Contains the union of all tokens in
filenames, without duplicates, one token per line.
vocabulary_size: Count of tokens in the merged file.
"""
uniques = [set(vocabulary) for vocabulary in vocabularies]
unique_merged = frozenset().union(*uniques)
unique_merged_sorted = sorted(unique_merged)
if padded_token is not None:
# Add padded token as 0 index.
unique_merged_sorted = [padded_token] + unique_merged_sorted
vocabulary_size = len(unique_merged_sorted)
outfile = tempfile.NamedTemporaryFile(delete=False)
outfile.write(b'\n'.join(unique_merged_sorted))
outfilename = outfile.name
logging.info('Merged vocabulary file with %d tokens: %s', vocabulary_size,
outfilename)
outfile.close()
return outfilename, vocabulary_size
| interval-bound-propagation-master | examples/language/utils.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Minimum code to interact with a pretrained Stanford Sentiment Treebank model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
import robust_model
SparseTensorValue = collections.namedtuple(
'SparseTensorValue', ['indices', 'values', 'dense_shape'])
class InteractiveSentimentPredictor(object):
"""Can be used to interact with a trained sentiment analysis model."""
def __init__(self, config_dict, model_location, max_padded_length=0,
num_perturbations=0):
self.graph_tensor_producer = robust_model.RobustModel(**config_dict)
self.batch_size = self.graph_tensor_producer.batch_size
if max_padded_length:
self.graph_tensor_producer.config.max_padded_length = max_padded_length
if num_perturbations:
self.graph_tensor_producer.config.num_perturbations = num_perturbations
self.graph_tensors = self.graph_tensor_producer()
network_saver = tf.train.Saver(self.graph_tensor_producer.variables)
self.open_session = tf.Session()
self.open_session.run(tf.tables_initializer())
network_saver.restore(self.open_session, model_location)
def batch_predict_sentiment(self, list_of_sentences, is_tokenised=True):
"""Computes sentiment predictions for a batch of sentences.
Note: the model batch size is usually hard-coded in the model (e.g. at 64).
We require that len(list_of_sentences)==self.batch_size.
If padding is necessary to reach as many sentences, this should happen
outside of this function.
Important: we assume that each sentence has the same number of tokens.
Args:
list_of_sentences: List[str] in case is_tokenised is False, or
List[List[str]] in case is_tokenised is True. Holds inputs whose
sentiment is to be classified.
is_tokenised: bool. Whether sentences are already tokenised. If not,
naive whitespace splitting tokenisation is applied.
Returns:
batch_label_predictions: np.array of shape [self.batch_size] holding
integers, representing model predictions for each input.
"""
# Prepare inputs.
tokenised_sentence_list = []
for sentence in list_of_sentences:
if not is_tokenised:
tokenised_sentence = sentence.lower().split(' ')
else:
tokenised_sentence = sentence
tokenised_sentence_list.append(tokenised_sentence)
length = len(tokenised_sentence_list[0])
assert all([len(x) == length for x in tokenised_sentence_list])
assert len(tokenised_sentence_list) == self.batch_size
# Construct sparse tensor holding token information.
indices = np.zeros([self.batch_size*length, 2])
dense_shape = [self.batch_size, length]
# Loop over words. All sentences have the same length.
for j, _ in enumerate(tokenised_sentence_list[0]):
for i in range(self.batch_size): # Loop over samples.
offset = i*length + j
indices[offset, 0] = i
indices[offset, 1] = j
# Define sparse tensor values.
tokenised_sentence_list = [word for sentence in tokenised_sentence_list # pylint:disable=g-complex-comprehension
for word in sentence]
values = np.array(tokenised_sentence_list)
mb_tokens = SparseTensorValue(indices=indices, values=values,
dense_shape=dense_shape)
mb_num_tokens = np.array([length]*self.batch_size)
# Fill feed_dict with input token information.
feed_dict = {}
feed_dict[self.graph_tensors['dev']['tokens']] = mb_tokens
feed_dict[self.graph_tensors['dev']['num_tokens']] = mb_num_tokens
# Generate model predictions [batch_size x n_labels].
logits = self.open_session.run(self.graph_tensors['dev']['predictions'],
feed_dict)
batch_label_predictions = np.argmax(logits, axis=1)
return batch_label_predictions, logits
def predict_sentiment(self, sentence, tokenised=False):
"""Computes sentiment of a sentence."""
# Create inputs to tensorflow graph.
if tokenised:
inputstring_tokenised = sentence
else:
assert isinstance(sentence, str)
# Simple tokenisation.
inputstring_tokenised = sentence.lower().split(' ')
length = len(inputstring_tokenised)
# Construct inputs to sparse tensor holding token information.
indices = np.zeros([self.batch_size*length, 2])
dense_shape = [self.batch_size, length]
for j, _ in enumerate(inputstring_tokenised):
for i in range(self.batch_size):
offset = i*length + j
indices[offset, 0] = i
indices[offset, 1] = j
values = inputstring_tokenised*self.batch_size
mb_tokens = SparseTensorValue(indices=indices, values=np.array(values),
dense_shape=dense_shape)
mb_num_tokens = np.array([length]*self.batch_size)
# Fill feeddict with input token information.
feed_dict = {}
feed_dict[self.graph_tensors['dev']['tokens']] = mb_tokens
feed_dict[self.graph_tensors['dev']['num_tokens']] = mb_num_tokens
# Generate predictions.
logits = self.open_session.run(self.graph_tensors['dev']['predictions'],
feed_dict)
predicted_label = np.argmax(logits, axis=1)
final_prediction = predicted_label[0]
# Check that prediction same everywhere (had batch of identical inputs).
assert np.all(predicted_label == final_prediction)
return final_prediction, logits
| interval-bound-propagation-master | examples/language/interactive_example.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functionality for exhaustive adversarial attacks on synonym perturbations.
Models restored from checkpoint can be tested w.r.t their robustness to
exhaustive-search adversaries, which have a fixed perturbation budget with which
they can flip words to synonyms.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import imp
import json
import pprint
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
import tqdm
import interactive_example
flags.DEFINE_boolean('character_level', True, 'Character level model.')
flags.DEFINE_boolean('debug_mode', False, 'Debug mode.')
flags.DEFINE_string('checkpoint_path', '/tmp/robust_model/checkpoint/final',
'Checkpoint path.')
flags.DEFINE_string('dataset', 'sst', 'Dataset name. train, dev, or test.')
flags.DEFINE_string('mode', 'validation', 'Dataset part. train, dev, or test.')
flags.DEFINE_string('config_path', './config.py',
'Path to training configuration file.')
flags.DEFINE_string('task', 'sst', 'One of snli, mnli, sick, sst.')
flags.DEFINE_integer('batch_size', 30, 'Batch size.')
flags.DEFINE_string('pooling', 'average', 'One of averge, sum, max, last.')
flags.DEFINE_boolean('fine_tune_embeddings', True, 'Finetune embeddings.')
flags.DEFINE_integer('num_oov_buckets', 1, 'Number of out-of-vocab buckets.')
flags.DEFINE_integer('delta', 1, 'Maximum perturbation radius')
flags.DEFINE_integer('skip_batches', 0, 'Skip this number of batches'
' for analysis.')
flags.DEFINE_integer('num_examples', 100, 'Analyze this number of examples. '
' 0 suggest the whole dataset.')
flags.DEFINE_integer('truncated_len', 0, 'truncated sentence length. '
' 0 suggest the whole sentence.')
flags.DEFINE_integer('max_padded_length', 0, 'max_padded_length. '
' 0 suggest no change.')
flags.DEFINE_integer('num_perturbations', 0, 'num_perturbations. '
' 0 suggest no change.')
FLAGS = flags.FLAGS
def load_synonyms(synonym_filepath=None):
"""Loads synonym dictionary. Returns as defaultdict(list)."""
with tf.gfile.Open(synonym_filepath) as f:
synonyms = json.load(f)
synonyms_ = collections.defaultdict(list)
synonyms_.update(synonyms)
return synonyms_
def load_dataset(mode='validation', character_level=False):
"""Loads SST dataset.
Takes data from disk/cns if it exists, otherwise out of tensorflow graph.
Args:
mode: string. Either train, dev, or test.
character_level: bool. Whether to return character-level, or token level
inputs.
Returns:
List of (input, output) pairs, where input is a list of strings (tokens),
and output is an integer (categorical label in [0,1]).
"""
message = 'Loading SST {}, character_level {}'.format(mode,
str(character_level))
logging.info(message)
dataset = tfds.load(name='glue/sst2', split=mode)
minibatch = dataset.batch(1).make_one_shot_iterator().get_next()
label_list, input_list = [], []
with tf.train.SingularMonitoredSession() as session:
while True:
output_nodes = (minibatch['label'], minibatch['sentence'])
label, sentence = session.run(output_nodes)
label_list.append(label[0])
input_list.append([chr(i) for i in sentence[0]])
# zip together.
dataset = [(in_, out_) for (in_, out_) in zip(input_list, label_list)]
return dataset
def expand_by_one_perturbation(original_tokenized_sentence,
tokenized_sentence, synonym_dict):
"""Expands given sentence by all possible synonyms.
Note that only a single synonym replacement is applied, and it is applied
everywhere, i.e. for every mention of the word with the synonym.
Args:
original_tokenized_sentence: List[str]. List of tokens.
tokenized_sentence: List[str]. List of tokens.
synonym_dict: dict, mapping words (str) to lists of synonyms (list of str)
Returns:
new_sentences_list: List[List[str]]. Outer list is across different synonym
replacements. Inner list is over (str) tokens.
"""
new_sentences_list = []
for i_outer, (original_token, _) in enumerate(zip(
original_tokenized_sentence, tokenized_sentence)):
synonyms = synonym_dict[original_token]
for synonym in synonyms: # replace only one particular mention
new_sentence = copy.copy(tokenized_sentence)
new_sentence[i_outer] = synonym
new_sentences_list.append(new_sentence)
return new_sentences_list
def find_up_to_depth_k_perturbations(
original_tokenized_sentence, tokenized_sentence, synonym_dict, k):
"""Takes sentence, finds all sentences reachable using k token perturbations.
Args:
original_tokenized_sentence: List[str]. List of tokens.
tokenized_sentence: List[str]. List of tokens.
synonym_dict: dict, mapping words (str) to lists of synonyms (list of str)
k: int. perturbation depth parameter.
Returns:
output_sentences: List[List[str]]. List of tokenised sentences.
"""
# Case: recursion ends - no further perturbations.
if k == 0:
return [tokenized_sentence]
else:
# Expand by one level.
expanded_sentences = expand_by_one_perturbation(original_tokenized_sentence,
tokenized_sentence,
synonym_dict)
# Call recursive function one level deeper for each expanded sentence.
expanded_sentences_deeper = []
for sentence in expanded_sentences:
new_sentences = find_up_to_depth_k_perturbations(
original_tokenized_sentence, sentence, synonym_dict, k-1)
expanded_sentences_deeper.extend(new_sentences)
output_sentences = expanded_sentences + expanded_sentences_deeper
output_sentences = remove_duplicates(output_sentences)
return output_sentences
def remove_duplicates(list_of_list_of_tokens):
# Convert list of str to str.
sentences = ['|'.join(s) for s in list_of_list_of_tokens]
sentences = set(sentences) # Now hashable -> remove duplicates.
sentences = [s.split('|') for s in sentences] # Convert to original format.
return sentences
def verify_exhaustively(sample, synonym_dict, sst_model, delta,
truncated_len=0):
"""Returns True if a sample can be verified, False otherwise.
Args:
sample: a 2-tuple (x,y), where x is a tokenised sentence (List[str]), and y
is a label (int).
synonym_dict: str -> List[str]. Keys are words, values are word lists with
synonyms for the key word.
sst_model: InteractiveSentimentPredictor instance. Used to make predictions.
delta: int. How many synonym perturbations to maximally allow.
truncated_len: int. Truncate sentence to truncated_len. 0 for unchanged.
Returns:
verified: bool. Whether all possible perturbed version of input sentence x
up to perturbation radius delta have the correct prediction.
"""
(x, y) = sample
counter_example = None
counter_prediction = None
# Create (potentially long) list of perturbed sentences from x.
if truncated_len > 0:
x = x[: truncated_len]
# Add original sentence.
altered_sentences = find_up_to_depth_k_perturbations(x, x, synonym_dict,
delta)
altered_sentences = altered_sentences + [x]
# Form batches of these altered sentences.
batch = []
num_forward_passes = len(altered_sentences)
for sentence in altered_sentences:
any_prediction_wrong = False
batch.append(sentence)
# When batch_size is reached, make predictions, break if any label flip
if len(batch) == sst_model.batch_size:
# np array of size [batch_size]
predictions, _ = sst_model.batch_predict_sentiment(
batch, is_tokenised=True)
# Check any prediction that is different from the true label.
any_prediction_wrong = np.any(predictions != y)
if any_prediction_wrong:
wrong_index = np.where(predictions != y)[0].tolist()[0]
counter_example = ' '.join([str(c) for c in batch[wrong_index]])
if FLAGS.debug_mode:
logging.info('\nOriginal example: %s, prediction: %d',
' '.join([str(c) for c in sentence]), y)
logging.info('\ncounter example: %s, prediction: %s',
counter_example, predictions[wrong_index].tolist())
counter_prediction = predictions[wrong_index]
# Break. No need to evaluate further.
return False, counter_example, counter_prediction, num_forward_passes
# Start filling up the next batch.
batch = []
if not batch:
# No remainder, not previously broken the loop.
return True, None, None, num_forward_passes
else:
# Remainder -- what didn't fit into a full batch of size batch_size.
# We use the first altered_sentence to pad.
batch += [altered_sentences[0]]*(sst_model.batch_size-len(batch))
assert len(batch) == sst_model.batch_size
predictions, _ = sst_model.batch_predict_sentiment(batch, is_tokenised=True)
any_prediction_wrong = np.any(predictions != y)
if any_prediction_wrong:
wrong_index = np.where(predictions != y)[0].tolist()[0]
counter_example = ' '.join([str(c) for c in batch[wrong_index]])
if FLAGS.debug_mode:
logging.info('\nOriginal example: %s, prediction: %d',
' '.join([str(c) for c in sentence]), y) # pylint: disable=undefined-loop-variable
logging.info('\ncounter example: %s, prediction: %s', counter_example,
predictions[wrong_index].tolist())
counter_prediction = predictions[wrong_index]
return (not any_prediction_wrong, counter_example,
counter_prediction, num_forward_passes)
def verify_dataset(dataset, config_dict, model_location, synonym_dict, delta):
"""Tries to verify against perturbation attacks up to delta."""
sst_model = interactive_example.InteractiveSentimentPredictor(
config_dict, model_location,
max_padded_length=FLAGS.max_padded_length,
num_perturbations=FLAGS.num_perturbations)
verified_list = [] # Holds boolean entries, across dataset.
samples = []
labels = []
counter_examples = []
counter_predictions = []
total_num_forward_passes = []
logging.info('dataset size: %d', len(dataset))
num_examples = FLAGS.num_examples if FLAGS.num_examples else len(dataset)
logging.info('skip_batches: %d', FLAGS.skip_batches)
logging.info('num_examples: %d', num_examples)
logging.info('new dataset size: %d',
len(dataset[FLAGS.skip_batches:FLAGS.skip_batches+num_examples]))
for i, sample in tqdm.tqdm(enumerate(
dataset[FLAGS.skip_batches:FLAGS.skip_batches+num_examples])):
if FLAGS.debug_mode:
logging.info('index: %d', i)
(verified_bool, counter_example, counter_prediction, num_forward_passes
) = verify_exhaustively(
sample, synonym_dict, sst_model, delta, FLAGS.truncated_len)
samples.append(''.join(sample[0]))
labels.append(sample[1])
counter_examples.append(counter_example)
counter_predictions.append(counter_prediction)
total_num_forward_passes.append(num_forward_passes)
else:
verified_bool, _, _, num_forward_passes = verify_exhaustively(
sample, synonym_dict, sst_model, delta, FLAGS.truncated_len)
verified_list.append(verified_bool)
verified_proportion = np.mean(verified_list)
assert len(verified_list) == len(
dataset[FLAGS.skip_batches:FLAGS.skip_batches+num_examples])
return (verified_proportion, verified_list, samples, counter_examples,
counter_predictions, total_num_forward_passes)
def example(synonym_dict, dataset, k=2):
"""Example usage of functions above."""
# The below example x has these synonyms.
# 'decree' --> [edict, order],
# 'tubes' --> 'pipes';
# 'refrigerated' --> ['cooled', 'chilled']
x = ['the', 'refrigerated', 'decree', 'tubes']
# Example: 1 perturbation.
new_x = expand_by_one_perturbation(x, x, synonym_dict)
pprint.pprint(sorted(new_x))
# Example: up to k perturbations.
new_x = find_up_to_depth_k_perturbations(x, x, synonym_dict, k)
pprint.pprint(sorted(new_x))
# Statistics: how large is the combinatorial space of perturbations?
total_x = []
size_counter = collections.Counter()
for (x, _) in tqdm.tqdm(dataset):
new_x = find_up_to_depth_k_perturbations(x, x, synonym_dict, k)
size_counter[len(new_x)] += 1
total_x.extend(new_x)
# Histogram for perturbation space size, computed across dataset.
pprint.pprint([x for x in sorted(size_counter.items(), key=lambda xx: xx[0])])
# Total number of inputs for forward pass if comprehensively evaluated.
pprint.pprint(len(total_x))
def main(args):
del args
# Read the config file into a new ad-hoc module.
with open(FLAGS.config_path, 'r') as config_file:
config_code = config_file.read()
config_module = imp.new_module('config')
exec(config_code, config_module.__dict__) # pylint: disable=exec-used
config = config_module.get_config()
config_dict = {'task': FLAGS.task,
'batch_size': FLAGS.batch_size,
'pooling': FLAGS.pooling,
'learning_rate': 0.,
'config': config,
'embedding_dim': config['embedding_dim'],
'fine_tune_embeddings': FLAGS.fine_tune_embeddings,
'num_oov_buckets': FLAGS.num_oov_buckets,
'max_grad_norm': 0.}
# Maximum verification range.
delta = FLAGS.delta
character_level = FLAGS.character_level
mode = FLAGS.mode
model_location = FLAGS.checkpoint_path
# Load synonyms.
synonym_filepath = config['synonym_filepath']
synonym_dict = load_synonyms(synonym_filepath)
# Load data.
dataset = load_dataset(mode, character_level)
# Compute verifiable accuracy on dataset.
(verified_proportion, _, _, _, _, _) = verify_dataset(dataset, config_dict,
model_location,
synonym_dict, delta)
logging.info('verified_proportion:')
logging.info(str(verified_proportion))
logging.info({
'delta': FLAGS.delta,
'character_level': FLAGS.character_level,
'mode': FLAGS.mode,
'checkpoint_path': FLAGS.checkpoint_path,
'verified_proportion': verified_proportion
})
if __name__ == '__main__':
logging.set_stderrthreshold('info')
app.run(main)
| interval-bound-propagation-master | examples/language/exhaustive_verification.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library to train verifiably robust neural networks.
For more details see paper: On the Effectiveness of Interval Bound Propagation
for Training Verifiably Robust Models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from interval_bound_propagation.src.attacks import MemoryEfficientMultiTargetedPGDAttack
from interval_bound_propagation.src.attacks import MultiTargetedPGDAttack
from interval_bound_propagation.src.attacks import pgd_attack
from interval_bound_propagation.src.attacks import RestartedAttack
from interval_bound_propagation.src.attacks import UnrolledAdam
from interval_bound_propagation.src.attacks import UnrolledFGSMDescent
from interval_bound_propagation.src.attacks import UnrolledGradientDescent
from interval_bound_propagation.src.attacks import UnrolledSPSAAdam
from interval_bound_propagation.src.attacks import UnrolledSPSAFGSMDescent
from interval_bound_propagation.src.attacks import UnrolledSPSAGradientDescent
from interval_bound_propagation.src.attacks import UntargetedAdaptivePGDAttack
from interval_bound_propagation.src.attacks import UntargetedPGDAttack
from interval_bound_propagation.src.attacks import UntargetedTop5PGDAttack
from interval_bound_propagation.src.bounds import AbstractBounds
from interval_bound_propagation.src.bounds import IntervalBounds
import interval_bound_propagation.src.crown as crown
from interval_bound_propagation.src.fastlin import RelativeSymbolicBounds
from interval_bound_propagation.src.fastlin import SymbolicBounds
import interval_bound_propagation.src.layer_utils as layer_utils
from interval_bound_propagation.src.layers import BatchNorm
from interval_bound_propagation.src.layers import ImageNorm
from interval_bound_propagation.src.loss import Losses
from interval_bound_propagation.src.loss import ScalarLosses
from interval_bound_propagation.src.loss import ScalarMetrics
from interval_bound_propagation.src.model import DNN
from interval_bound_propagation.src.model import StandardModelWrapper
from interval_bound_propagation.src.model import VerifiableModelWrapper
from interval_bound_propagation.src.relative_bounds import RelativeIntervalBounds
from interval_bound_propagation.src.simplex_bounds import SimplexBounds
from interval_bound_propagation.src.specification import ClassificationSpecification
from interval_bound_propagation.src.specification import LeastLikelyClassificationSpecification
from interval_bound_propagation.src.specification import LinearSpecification
from interval_bound_propagation.src.specification import RandomClassificationSpecification
from interval_bound_propagation.src.specification import Specification
from interval_bound_propagation.src.specification import TargetedClassificationSpecification
from interval_bound_propagation.src.utils import add_image_normalization
from interval_bound_propagation.src.utils import build_dataset
from interval_bound_propagation.src.utils import create_attack
from interval_bound_propagation.src.utils import create_classification_losses
from interval_bound_propagation.src.utils import create_specification
from interval_bound_propagation.src.utils import get_attack_builder
from interval_bound_propagation.src.utils import linear_schedule
from interval_bound_propagation.src.utils import parse_learning_rate
from interval_bound_propagation.src.utils import randomize
from interval_bound_propagation.src.utils import smooth_schedule
from interval_bound_propagation.src.verifiable_wrapper import BatchFlattenWrapper
from interval_bound_propagation.src.verifiable_wrapper import BatchNormWrapper
from interval_bound_propagation.src.verifiable_wrapper import BatchReshapeWrapper
from interval_bound_propagation.src.verifiable_wrapper import ConstWrapper
from interval_bound_propagation.src.verifiable_wrapper import ImageNormWrapper
from interval_bound_propagation.src.verifiable_wrapper import IncreasingMonotonicWrapper
from interval_bound_propagation.src.verifiable_wrapper import LinearConv1dWrapper
from interval_bound_propagation.src.verifiable_wrapper import LinearConv2dWrapper
from interval_bound_propagation.src.verifiable_wrapper import LinearConvWrapper
from interval_bound_propagation.src.verifiable_wrapper import LinearFCWrapper
from interval_bound_propagation.src.verifiable_wrapper import ModelInputWrapper
from interval_bound_propagation.src.verifiable_wrapper import PiecewiseMonotonicWrapper
from interval_bound_propagation.src.verifiable_wrapper import VerifiableWrapper
__version__ = '1.10'
| interval-bound-propagation-master | interval_bound_propagation/__init__.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for CROWN bounds."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import interval_bound_propagation as ibp
import numpy as np
import sonnet as snt
import tensorflow.compat.v1 as tf
def _generate_identity_spec(modules, shape, dimension=1):
spec = ibp.LinearSpecification(tf.reshape(tf.eye(dimension), shape),
prune_irrelevant=False)
initial_bound = ibp.crown.create_initial_backward_bounds(spec, modules)
return initial_bound
class CROWNBoundsTest(tf.test.TestCase):
def testFCBackwardBounds(self):
m = snt.Linear(1, initializers={
'w': tf.constant_initializer(1.),
'b': tf.constant_initializer(2.),
})
z = tf.constant([[1, 2, 3]], dtype=tf.float32)
m(z) # Connect to create weights.
m = ibp.LinearFCWrapper(m)
input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
m.propagate_bounds(input_bounds) # Create IBP bounds.
crown_init_bounds = _generate_identity_spec([m], shape=(1, 1, 1))
output_bounds = m.propagate_bounds(crown_init_bounds)
concrete_bounds = output_bounds.concretize()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
lw, uw, lb, ub, cl, cu = sess.run([output_bounds.lower.w,
output_bounds.upper.w,
output_bounds.lower.b,
output_bounds.upper.b,
concrete_bounds.lower,
concrete_bounds.upper])
self.assertTrue(np.all(lw == 1.))
self.assertTrue(np.all(lb == 2.))
self.assertTrue(np.all(uw == 1.))
self.assertTrue(np.all(ub == 2.))
cl = cl.item()
cu = cu.item()
self.assertAlmostEqual(5., cl)
self.assertAlmostEqual(11., cu)
def testConv2dBackwardBounds(self):
m = snt.Conv2D(
output_channels=1,
kernel_shape=(2, 2),
padding='VALID',
stride=1,
use_bias=True,
initializers={
'w': tf.constant_initializer(1.),
'b': tf.constant_initializer(2.),
})
z = tf.constant([1, 2, 3, 4], dtype=tf.float32)
z = tf.reshape(z, [1, 2, 2, 1])
m(z) # Connect to create weights.
m = ibp.LinearConv2dWrapper(m)
input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
m.propagate_bounds(input_bounds) # Create IBP bounds.
crown_init_bounds = _generate_identity_spec([m], shape=(1, 1, 1, 1, 1))
output_bounds = m.propagate_bounds(crown_init_bounds)
concrete_bounds = output_bounds.concretize()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
l, u = sess.run([concrete_bounds.lower, concrete_bounds.upper])
l = l.item()
u = u.item()
self.assertAlmostEqual(8., l)
self.assertAlmostEqual(16., u)
def testReluBackwardBounds(self):
m = tf.nn.relu
z = tf.constant([[-2, 3]], dtype=tf.float32)
m = ibp.IncreasingMonotonicWrapper(m)
input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
m.propagate_bounds(input_bounds) # Create IBP bounds.
crown_init_bounds = _generate_identity_spec([m], shape=(1, 2, 2),
dimension=2)
output_bounds = m.propagate_bounds(crown_init_bounds)
concrete_bounds = output_bounds.concretize()
with self.test_session() as sess:
l, u = sess.run([concrete_bounds.lower, concrete_bounds.upper])
self.assertAlmostEqual([[0., 2.]], l.tolist())
self.assertAlmostEqual([[0., 4.]], u.tolist())
if __name__ == '__main__':
tf.test.main()
| interval-bound-propagation-master | interval_bound_propagation/tests/crown_test.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import interval_bound_propagation as ibp
import numpy as np
import tensorflow.compat.v1 as tf
def _get_inputs(dtype=tf.float32):
v = np.array(range(6), dtype=dtype.as_numpy_dtype)
input_v = np.array([v] * 7)
inputs = tf.constant(input_v)
return v, input_v, inputs
class LayersTest(tf.test.TestCase):
def assertBetween(self, value, minv, maxv):
"""Asserts that value is between minv and maxv (inclusive)."""
self.assertLessEqual(minv, value)
self.assertGreaterEqual(maxv, value)
# Subset of the tests in sonnet/python/modules/batch_norm_test.py.
def testBatchNormUpdateImproveStatistics(self):
"""Test that updating the moving_mean improves statistics."""
_, _, inputs = _get_inputs()
# Use small decay_rate to update faster.
bn = ibp.BatchNorm(offset=False, scale=False, decay_rate=0.1,
update_ops_collection=tf.GraphKeys.UPDATE_OPS)
out1 = bn(inputs, is_training=False)
# Build the update ops.
bn(inputs, is_training=True)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
out_v = sess.run(out1)
# Before updating the moving_mean the results are off.
self.assertBetween(np.max(np.abs(np.zeros([7, 6]) - out_v)), 2, 5)
sess.run(tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS)))
# After updating the moving_mean the results are better.
out_v = sess.run(out1)
self.assertBetween(np.max(np.abs(np.zeros([7, 6]) - out_v)), 1, 2)
def testImageNorm(self):
mean = [4, 0, -4]
std = [1., 2., 4.]
image = tf.constant(4., shape=[10, 2, 2, 3])
normalized_image = ibp.ImageNorm(mean, std)(image)
with self.test_session() as sess:
out_image = sess.run(normalized_image)
self.assertTrue(np.all(np.isclose(out_image[:, :, :, 0], 0.)))
self.assertTrue(np.all(np.isclose(out_image[:, :, :, 1], 2.)))
self.assertTrue(np.all(np.isclose(out_image[:, :, :, 2], 2.)))
if __name__ == '__main__':
tf.test.main()
| interval-bound-propagation-master | interval_bound_propagation/tests/layers_test.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import interval_bound_propagation as ibp
import numpy as np
import sonnet as snt
import tensorflow.compat.v1 as tf
def _build_model():
num_classes = 3
layer_types = (
('conv2d', (2, 2), 4, 'VALID', 1),
('activation', 'relu'),
('linear', 10),
('activation', 'relu'))
return ibp.DNN(num_classes, layer_types)
class ModelTest(parameterized.TestCase, tf.test.TestCase):
def testDNN(self):
predictor = _build_model()
# Input.
z = tf.constant([1, 2, 3, 4], dtype=tf.float32)
z = tf.reshape(z, [1, 2, 2, 1])
predictor(z)
# Verify the variables that are created.
expected_shapes = {
'predictor/conv2d_0/w:0': (2, 2, 1, 4),
'predictor/conv2d_0/b:0': (4,),
'predictor/linear_0/w:0': (4, 10),
'predictor/linear_0/b:0': (10,),
'predictor/linear_1/w:0': (10, 3),
'predictor/linear_1/b:0': (3,),
}
for v in predictor.get_variables():
self.assertEqual(expected_shapes[v.name], v.shape)
def _propagation_test(self, wrapper, inputs, outputs):
input_bounds = ibp.IntervalBounds(inputs, inputs)
output_bounds = wrapper.propagate_bounds(input_bounds)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
o, l, u = sess.run([outputs, output_bounds.lower, output_bounds.upper])
self.assertAlmostEqual(o.tolist(), l.tolist())
self.assertAlmostEqual(o.tolist(), u.tolist())
def testVerifiableModelWrapperDNN(self):
predictor = _build_model()
# Input.
z = tf.constant([1, 2, 3, 4], dtype=tf.float32)
z = tf.reshape(z, [1, 2, 2, 1])
wrapper = ibp.VerifiableModelWrapper(predictor)
wrapper(z)
# Verify basic wrapping.
self.assertEqual(predictor, wrapper.wrapped_network)
self.assertEqual(3, wrapper.output_size)
self.assertEqual((1, 3), tuple(wrapper.logits.shape.as_list()))
self.assertEqual(z, wrapper.inputs)
# Build another input and test reuse.
z2 = tf.constant([1, 2, 3, 4], dtype=tf.float32)
z2 = tf.reshape(z, [1, 2, 2, 1])
logits = wrapper(z2, reuse=True)
self.assertEqual(z, wrapper.inputs)
self.assertNotEqual(z2, wrapper.inputs)
# Check that the verifiable modules are constructed.
self.assertLen(wrapper.input_wrappers, 1)
self.assertLen(wrapper.modules, 6)
self.assertIsInstance(wrapper.modules[0].module, snt.Conv2D)
self.assertEqual(wrapper.modules[1].module, tf.nn.relu)
self.assertIsInstance(wrapper.modules[2].module, snt.BatchFlatten)
self.assertIsInstance(wrapper.modules[3].module, snt.Linear)
self.assertEqual(wrapper.modules[4].module, tf.nn.relu)
self.assertIsInstance(wrapper.modules[5].module, snt.Linear)
# It's a sequential network, so all nodes (including input) have fanout 1.
self.assertEqual(wrapper.fanout_of(wrapper.input_wrappers[0]), 1)
for module in wrapper.modules:
self.assertEqual(wrapper.fanout_of(module), 1)
# Check propagation.
self._propagation_test(wrapper, z2, logits)
def testVerifiableModelWrapperResnet(self):
def _build(z0, is_training=False): # pylint: disable=unused-argument
input_size = np.prod(z0.shape[1:])
# We make a resnet-like structure.
z = snt.Linear(input_size)(z0)
z_left = tf.nn.relu(z)
z_left = snt.Linear(input_size)(z_left)
z = z_left + z0
return snt.Linear(2)(z)
z = tf.constant([[1, 2, 3, 4]], dtype=tf.float32)
wrapper = ibp.VerifiableModelWrapper(_build)
logits = wrapper(z)
self.assertLen(wrapper.input_wrappers, 1)
self.assertLen(wrapper.modules, 5)
# Check input has fanout 2, as it is the start of the resnet block.
self.assertEqual(wrapper.fanout_of(wrapper.input_wrappers[0]), 2)
for module in wrapper.modules:
self.assertEqual(wrapper.fanout_of(module), 1)
# Check propagation.
self._propagation_test(wrapper, z, logits)
def testVerifiableModelWrapperPool(self):
def _build(z0):
z = tf.reduce_mean(z0, axis=1, keep_dims=True)
z = tf.reduce_max(z, axis=2, keep_dims=False)
return snt.Linear(2)(z)
z = tf.constant([[1, 2, 3, 4]], dtype=tf.float32)
z = tf.reshape(z, [1, 2, 2])
wrapper = ibp.VerifiableModelWrapper(_build)
logits = wrapper(z)
self.assertLen(wrapper.modules, 3)
# Check propagation.
self._propagation_test(wrapper, z, logits)
def testVerifiableModelWrapperConcat(self):
def _build(z0):
z = snt.Linear(10)(z0)
z = tf.concat([z, z0], axis=1)
return snt.Linear(2)(z)
z = tf.constant([[1, 2, 3, 4]], dtype=tf.float32)
wrapper = ibp.VerifiableModelWrapper(_build)
logits = wrapper(z)
self.assertLen(wrapper.modules, 3)
# Check propagation.
self._propagation_test(wrapper, z, logits)
def testVerifiableModelWrapperExpandAndSqueeze(self):
def _build(z0):
z = snt.Linear(10)(z0)
z = tf.expand_dims(z, axis=-1)
z = tf.squeeze(z, axis=-1)
return snt.Linear(2)(z)
z = tf.constant([[1, 2, 3, 4]], dtype=tf.float32)
wrapper = ibp.VerifiableModelWrapper(_build)
logits = wrapper(z)
self.assertLen(wrapper.modules, 4)
# Check propagation.
self._propagation_test(wrapper, z, logits)
@parameterized.named_parameters(
('Add', lambda z: z + z, 3),
('Sub', lambda z: z - z, 3),
('Identity', tf.identity, 3),
('Mul', lambda z: z * z, 3),
('Slice', lambda z: tf.slice(z, [0, 0], [-1, 5]), 3),
('StridedSlice', lambda z: z[:, :5], 3),
('Reshape', lambda z: tf.reshape(z, [2, 5]), 3),
('Const', lambda z: z + tf.ones_like(z), 5))
def testVerifiableModelWrapperSimple(self, fn, expected_modules):
def _build(z0):
z = snt.Linear(10)(z0)
z = fn(z)
return snt.Linear(2)(z)
z = tf.constant([[1, 2, 3, 4]], dtype=tf.float32)
wrapper = ibp.VerifiableModelWrapper(_build)
logits = wrapper(z)
self.assertLen(wrapper.modules, expected_modules)
# Check propagation.
self._propagation_test(wrapper, z, logits)
def testPointlessReshape(self):
def _build(z0):
z = snt.Linear(10)(z0)
z = snt.BatchFlatten()(z) # This is a no-op; no graph nodes created.
return snt.Linear(2)(z)
z = tf.constant([[1, 2, 3, 4]], dtype=tf.float32)
wrapper = ibp.VerifiableModelWrapper(_build)
logits = wrapper(z)
# Expect the batch flatten to have been skipped.
self.assertLen(wrapper.modules, 2)
self.assertIsInstance(wrapper.modules[0], ibp.LinearFCWrapper)
self.assertIsInstance(wrapper.modules[1], ibp.LinearFCWrapper)
# Check propagation.
self._propagation_test(wrapper, z, logits)
def testLeakyRelu(self):
def _build(z0):
z = snt.Linear(10)(z0)
z = tf.nn.leaky_relu(z0, alpha=0.375)
return snt.Linear(2)(z)
z = tf.constant([[1, 2, 3, 4]], dtype=tf.float32)
wrapper = ibp.VerifiableModelWrapper(_build)
logits = wrapper(z)
self.assertLen(wrapper.modules, 3)
self.assertEqual(wrapper.modules[1].module.__name__, 'leaky_relu')
self.assertEqual(wrapper.modules[1].parameters['alpha'], 0.375)
# Check propagation.
self._propagation_test(wrapper, z, logits)
def testMultipleInputs(self):
# Tensor to overwrite.
def _build(z0, z1):
return z0 + z1
z0 = tf.constant([[1, 2, 3, 4]], dtype=tf.float32)
z1 = tf.constant([[2, 2, 4, 4]], dtype=tf.float32)
wrapper = ibp.VerifiableModelWrapper(_build)
logits = wrapper(z0, z1)
input_bounds0 = ibp.IntervalBounds(z0 - 2, z0 + 1)
input_bounds1 = ibp.IntervalBounds(z1, z1 + 10)
output_bounds = wrapper.propagate_bounds(input_bounds0, input_bounds1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
o, l, u = sess.run([logits, output_bounds.lower, output_bounds.upper])
print(o, l, u)
self.assertAlmostEqual([[3., 4., 7., 8.]], o.tolist())
self.assertAlmostEqual([[1., 2., 5., 6.]], l.tolist())
self.assertAlmostEqual([[14., 15., 18., 19.]], u.tolist())
if __name__ == '__main__':
tf.test.main()
| interval-bound-propagation-master | interval_bound_propagation/tests/model_test.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for bounds."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import interval_bound_propagation as ibp
import numpy as np
import sonnet as snt
import tensorflow.compat.v1 as tf
class IntervalBoundsTest(parameterized.TestCase, tf.test.TestCase):
def testFCIntervalBounds(self):
m = snt.Linear(1, initializers={
'w': tf.constant_initializer(1.),
'b': tf.constant_initializer(2.),
})
z = tf.constant([[1, 2, 3]], dtype=tf.float32)
m(z) # Connect to create weights.
m = ibp.LinearFCWrapper(m)
input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
output_bounds = m.propagate_bounds(input_bounds)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
l, u = sess.run([output_bounds.lower, output_bounds.upper])
l = l.item()
u = u.item()
self.assertAlmostEqual(5., l)
self.assertAlmostEqual(11., u)
def testConv1dIntervalBounds(self):
m = snt.Conv1D(
output_channels=1,
kernel_shape=2,
padding='VALID',
stride=1,
use_bias=True,
initializers={
'w': tf.constant_initializer(1.),
'b': tf.constant_initializer(2.),
})
z = tf.constant([3, 4], dtype=tf.float32)
z = tf.reshape(z, [1, 2, 1])
m(z) # Connect to create weights.
m = ibp.LinearConv1dWrapper(m)
input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
output_bounds = m.propagate_bounds(input_bounds)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
l, u = sess.run([output_bounds.lower, output_bounds.upper])
l = l.item()
u = u.item()
self.assertAlmostEqual(7., l)
self.assertAlmostEqual(11., u)
def testConv2dIntervalBounds(self):
m = snt.Conv2D(
output_channels=1,
kernel_shape=(2, 2),
padding='VALID',
stride=1,
use_bias=True,
initializers={
'w': tf.constant_initializer(1.),
'b': tf.constant_initializer(2.),
})
z = tf.constant([1, 2, 3, 4], dtype=tf.float32)
z = tf.reshape(z, [1, 2, 2, 1])
m(z) # Connect to create weights.
m = ibp.LinearConv2dWrapper(m)
input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
output_bounds = m.propagate_bounds(input_bounds)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
l, u = sess.run([output_bounds.lower, output_bounds.upper])
l = l.item()
u = u.item()
self.assertAlmostEqual(8., l)
self.assertAlmostEqual(16., u)
def testReluIntervalBounds(self):
m = tf.nn.relu
z = tf.constant([[-2, 3]], dtype=tf.float32)
m = ibp.IncreasingMonotonicWrapper(m)
input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
output_bounds = m.propagate_bounds(input_bounds)
with self.test_session() as sess:
l, u = sess.run([output_bounds.lower, output_bounds.upper])
self.assertAlmostEqual([[0., 2.]], l.tolist())
self.assertAlmostEqual([[0., 4.]], u.tolist())
def testMulIntervalBounds(self):
m = tf.multiply
z = tf.constant([[-2, 3, 0]], dtype=tf.float32)
m = ibp.PiecewiseMonotonicWrapper(m, (0,))
input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
output_bounds = m.propagate_bounds(input_bounds, input_bounds)
with self.test_session() as sess:
l, u = sess.run([output_bounds.lower, output_bounds.upper])
self.assertAlmostEqual([[1., 4., -1.]], l.tolist())
self.assertAlmostEqual([[9., 16., 1.]], u.tolist())
def testSubIntervalBounds(self):
m = tf.subtract
z = tf.constant([[-2, 3, 0]], dtype=tf.float32)
m = ibp.PiecewiseMonotonicWrapper(m)
input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
output_bounds = m.propagate_bounds(input_bounds, input_bounds)
with self.test_session() as sess:
l, u = sess.run([output_bounds.lower, output_bounds.upper])
self.assertAlmostEqual([[-2., -2., -2.]], l.tolist())
self.assertAlmostEqual([[2., 2., 2.]], u.tolist())
@parameterized.named_parameters(
('DefaultAxis', -1, [[[1., 0.5, 0.5], [1., 0.5, 0.5]],
[[1. / 3, 0., 0.], [1. / 3, 0., 0.]]]),
('NonDefaultAxis', 0, [[[1., 1., 1.], [1., 1., 1.]],
[[0., 0., 0.], [0., 0., 0.]]]))
def testSoftmaxIntervalBounds(self, axis, expected_outputs):
z = tf.constant([[1., -10., -10.], [1., -10., -10.]])
input_bounds = ibp.IntervalBounds(z - 1.0, z + 10.0)
softmax_fn = lambda x: tf.nn.softmax(x, axis=axis)
softmax_fn = ibp.VerifiableModelWrapper(softmax_fn)
softmax_fn(z)
output_bounds = softmax_fn.propagate_bounds(input_bounds)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
l, u = sess.run([output_bounds.lower, output_bounds.upper])
self.assertTrue(np.all(np.abs(expected_outputs[0] - u) < 1e-3))
self.assertTrue(np.all(np.abs(expected_outputs[1] - l) < 1e-3))
def testBatchNormIntervalBounds(self):
z = tf.constant([[1, 2, 3]], dtype=tf.float32)
input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
g = tf.reshape(tf.range(-1, 2, dtype=tf.float32), [1, 3])
b = tf.reshape(tf.range(3, dtype=tf.float32), [1, 3])
batch_norm = ibp.BatchNorm(scale=True, offset=True, eps=0., initializers={
'gamma': lambda *args, **kwargs: g,
'beta': lambda *args, **kwargs: b,
'moving_mean': tf.constant_initializer(1.),
'moving_variance': tf.constant_initializer(4.),
})
batch_norm(z, is_training=False)
batch_norm = ibp.BatchNormWrapper(batch_norm)
# Test propagation.
output_bounds = batch_norm.propagate_bounds(input_bounds)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
l, u = sess.run([output_bounds.lower, output_bounds.upper])
self.assertAlmostEqual([[-.5, 1., 2.5]], l.tolist())
self.assertAlmostEqual([[.5, 1., 3.5]], u.tolist())
def testCaching(self):
m = snt.Linear(1, initializers={
'w': tf.constant_initializer(1.),
'b': tf.constant_initializer(2.),
})
z = tf.placeholder(shape=(1, 3), dtype=tf.float32)
m(z) # Connect to create weights.
m = ibp.LinearFCWrapper(m)
input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
output_bounds = m.propagate_bounds(input_bounds)
input_bounds.enable_caching()
output_bounds.enable_caching()
update_all_caches_op = tf.group([input_bounds.update_cache_op,
output_bounds.update_cache_op])
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
# Initialise the caches based on the model inputs.
sess.run(update_all_caches_op, feed_dict={z: [[1., 2., 3.]]})
l, u = sess.run([output_bounds.lower, output_bounds.upper])
l = l.item()
u = u.item()
self.assertAlmostEqual(5., l)
self.assertAlmostEqual(11., u)
# Update the cache based on a different set of inputs.
sess.run([output_bounds.update_cache_op], feed_dict={z: [[2., 3., 7.]]})
# We only updated the output bounds' cache.
# This asserts that the computation depends on the underlying
# input bounds tensor, not on cached version of it.
# (Thus it doesn't matter what order the caches are updated.)
l, u = sess.run([output_bounds.lower, output_bounds.upper])
l = l.item()
u = u.item()
self.assertAlmostEqual(11., l)
self.assertAlmostEqual(17., u)
if __name__ == '__main__':
tf.test.main()
| interval-bound-propagation-master | interval_bound_propagation/tests/bounds_test.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import interval_bound_propagation as ibp
import sonnet as snt
import tensorflow.compat.v1 as tf
class FixedNN(snt.AbstractModule):
def _build(self, z0, is_training=False):
self._m = snt.Linear(2, initializers={
'w': tf.constant_initializer(1.),
'b': lambda *unsed_args, **unused_kwargs: tf.constant([0., 1.]),
})
return self._m(z0)
class LossTest(tf.test.TestCase):
def testEndToEnd(self):
predictor = FixedNN()
predictor = ibp.VerifiableModelWrapper(predictor)
# Labels.
labels = tf.constant([1], dtype=tf.int64)
# Connect to input.
z = tf.constant([[1, 2, 3]], dtype=tf.float32)
predictor(z, is_training=True)
# Input bounds.
eps = 1.
input_bounds = ibp.IntervalBounds(z - eps, z + eps)
predictor.propagate_bounds(input_bounds)
# Create output specification (that forces the first logits to be greater).
c = tf.constant([[[1, -1]]], dtype=tf.float32)
d = tf.constant([[0]], dtype=tf.float32)
# Turn elision off for more interesting results.
spec = ibp.LinearSpecification(c, d, collapse=False)
# Create an attack.
attack = ibp.UntargetedPGDAttack(
predictor, spec, eps, num_steps=1, input_bounds=(-100., 100))
# Build loss.
losses = ibp.Losses(predictor, spec, attack,
interval_bounds_loss_type='hinge',
interval_bounds_hinge_margin=0.)
losses(labels)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
# We expect the worst-case logits from IBP to be [9, 4].
# The adversarial attack should fail since logits are always [l, l + 1].
# Similarly, the nominal predictions are correct.
accuracy_values, loss_values = sess.run(
[losses.scalar_metrics, losses.scalar_losses])
self.assertAlmostEqual(1., accuracy_values.nominal_accuracy)
self.assertAlmostEqual(0., accuracy_values.verified_accuracy)
self.assertAlmostEqual(1., accuracy_values.attack_accuracy)
expected_xent = 0.31326168751822947
self.assertAlmostEqual(expected_xent, loss_values.nominal_cross_entropy,
places=5)
self.assertAlmostEqual(expected_xent, loss_values.attack_cross_entropy,
places=5)
expected_hinge = 5.
self.assertAlmostEqual(expected_hinge, loss_values.verified_loss)
if __name__ == '__main__':
tf.test.main()
| interval-bound-propagation-master | interval_bound_propagation/tests/loss_test.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for naive_bounds."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import interval_bound_propagation as ibp
from interval_bound_propagation import layer_utils
import numpy as np
import tensorflow.compat.v1 as tf
class SimplexBoundsTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('float32', tf.float32),
('float64', tf.float64))
def test_linear_simplex_bounds_shape(self, dtype):
vocab_size = 103
batch_size = 11
input_size = 7
output_size = 5
w = tf.placeholder(dtype=dtype, shape=(input_size, output_size))
b = tf.placeholder(dtype=dtype, shape=(output_size,))
embedding = tf.placeholder(dtype=dtype, shape=(vocab_size, input_size))
centres = tf.placeholder(dtype=dtype, shape=(batch_size, input_size))
r = .2
bounds_in = ibp.SimplexBounds(embedding, centres, r)
bounds_out = bounds_in.apply_linear(None, w, b)
lb_out, ub_out = bounds_out.lower, bounds_out.upper
self.assertEqual(dtype, lb_out.dtype)
self.assertEqual(dtype, ub_out.dtype)
self.assertEqual((batch_size, output_size), lb_out.shape)
self.assertEqual((batch_size, output_size), ub_out.shape)
@parameterized.named_parameters(('float32', tf.float32, 1.e-6),
('float64', tf.float64, 1.e-8))
def test_linear_bounds_on_embedding_layer(self, dtype, tol):
w = tf.constant([[1.0, 2.0, 3.0], [4.0, -5.0, 6.0]], dtype=dtype)
b = tf.constant([0.01, -0.02, 0.03], dtype=dtype)
embedding = tf.constant([[0.0, 0.0], [10.0, 10.0], [0.0, -20.0]],
dtype=dtype)
centres = tf.constant([[7.0, 6.0]], dtype=dtype)
r = .1
# Simplex vertices: [6.3, 5.4], [7.3, 6.4], and [6.3, 3.4].
# They map to: [27.91, -14.42, 51.33], [32.91, -17.42, 60.33],
# and [19.91, -4.42, 39.33].
bounds_in = ibp.SimplexBounds(embedding, centres, r)
bounds_out = bounds_in.apply_linear(None, w, b)
lb_out, ub_out = bounds_out.lower, bounds_out.upper
lb_out_exp = np.array([[19.91, -17.42, 39.33]])
ub_out_exp = np.array([[32.91, -4.42, 60.33]])
with self.test_session() as session:
lb_out_act, ub_out_act = session.run((lb_out, ub_out))
self.assertAllClose(lb_out_exp, lb_out_act, atol=tol, rtol=tol)
self.assertAllClose(ub_out_exp, ub_out_act, atol=tol, rtol=tol)
@parameterized.named_parameters(('float32', tf.float32),
('float64', tf.float64))
def test_conv1d_simplex_bounds_shape(self, dtype):
num_vertices = 41
batch_size = 11
input_length = 13
kernel_length = 5
input_channels = 3
output_channels = 2
padding = 'VALID'
strides = (2,)
# Expected output dimensions, based on convolution settings.
output_length = 5
w = tf.placeholder(dtype=dtype, shape=(
kernel_length, input_channels, output_channels))
b = tf.placeholder(dtype=dtype, shape=(output_channels,))
vertices = tf.placeholder(dtype=dtype, shape=(
batch_size, num_vertices, input_length, input_channels))
centres = tf.placeholder(dtype=dtype, shape=(
batch_size, input_length, input_channels))
r = .2
bounds_in = ibp.SimplexBounds(vertices, centres, r)
bounds_out = bounds_in.apply_conv1d(None, w, b, padding, strides)
lb_out, ub_out = bounds_out.lower, bounds_out.upper
self.assertEqual(dtype, lb_out.dtype)
self.assertEqual(dtype, ub_out.dtype)
self.assertEqual((batch_size, output_length, output_channels),
lb_out.shape)
self.assertEqual((batch_size, output_length, output_channels),
ub_out.shape)
@parameterized.named_parameters(('float32', tf.float32, 2.e-6),
('float64', tf.float64, 1.e-8))
def test_conv1d_simplex_bounds(self, dtype, tol):
num_vertices = 37
batch_size = 53
input_length = 17
kernel_length = 7
input_channels = 3
output_channels = 2
padding = 'VALID'
strides = (2,)
w = tf.random_normal(dtype=dtype, shape=(
kernel_length, input_channels, output_channels))
b = tf.random_normal(dtype=dtype, shape=(output_channels,))
vertices = tf.random_normal(dtype=dtype, shape=(
batch_size, num_vertices, input_length, input_channels))
centres = tf.random_normal(dtype=dtype, shape=(
batch_size, input_length, input_channels))
r = .2
bounds_in = ibp.SimplexBounds(vertices, centres, r)
bounds_out = bounds_in.apply_conv1d(None, w, b, padding, strides[0])
lb_out, ub_out = bounds_out.lower, bounds_out.upper
# Compare against equivalent linear layer.
bounds_out_lin = _materialised_conv_simplex_bounds(
w, b, padding, strides, bounds_in)
lb_out_lin, ub_out_lin = bounds_out_lin.lower, bounds_out_lin.upper
with self.test_session() as session:
(lb_out_val, ub_out_val,
lb_out_lin_val, ub_out_lin_val) = session.run((lb_out, ub_out,
lb_out_lin, ub_out_lin))
self.assertAllClose(lb_out_val, lb_out_lin_val, atol=tol, rtol=tol)
self.assertAllClose(ub_out_val, ub_out_lin_val, atol=tol, rtol=tol)
def _materialised_conv_simplex_bounds(w, b, padding, strides, bounds_in):
"""Calculates naive bounds on output of an N-D convolution layer.
The calculation is performed by first materialising the convolution as a
(sparse) fully-connected linear layer. Doing so will affect performance, but
may be useful for investigating numerical stability issues.
The layer inputs and the vertices are assumed to be (N-D) sequences in an
embedding space. The input domain is taken to be the simplex of perturbations
of the centres (true inputs) towards the given vertices.
Specifically, the input domain is the convex hull of this set of vertices::
{ (1-r)*centres + r*vertices[j] : j<num_vertices }
Args:
w: (N+2)D tensor of shape (kernel_length, input_channels, output_channels)
containing weights for the convolution.
b: 1D tensor of shape (output_channels) containing biases for the
convolution, or `None` if no bias.
padding: `"VALID"` or `"SAME"`, the convolution's padding algorithm.
strides: Integer list of length N: `[vertical_stride, horizontal_stride]`.
bounds_in: bounds of shape (batch_size, input_length, input_channels)
containing bounds on the inputs to the convolution layer.
Returns:
bounds of shape (batch_size, output_length, output_channels)
with bounds on the outputs of the convolution layer.
Raises:
ValueError: if an unsupported convolution dimensionality is encountered.
"""
# Flatten the inputs, as the materialised convolution will have no
# spatial structure.
bounds_in_flat = bounds_in.apply_batch_reshape(None, [-1])
# Materialise the convolution as a (sparse) fully connected linear layer.
input_shape = bounds_in.shape[1:]
w_lin, b_lin = layer_utils.materialise_conv(w, b, input_shape,
padding=padding, strides=strides)
bounds_out_flat = bounds_in_flat.apply_linear(None, w_lin, b_lin)
# Unflatten the output bounds.
output_shape = layer_utils.conv_output_shape(input_shape, w, padding, strides)
return bounds_out_flat.apply_batch_reshape(None, output_shape)
if __name__ == '__main__':
tf.test.main()
| interval-bound-propagation-master | interval_bound_propagation/tests/simplex_bounds_test.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for attacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import interval_bound_propagation as ibp
import sonnet as snt
import tensorflow.compat.v1 as tf
class MockWithIsTraining(object):
"""Mock wrapper around the predictor network."""
def __init__(self, module, test):
self._module = module
self._test = test
def __call__(self, z0, is_training=False):
# is_training should be False.
self._test.assertFalse(is_training)
return self._module(z0)
class MockWithoutIsTraining(object):
"""Mock wrapper around the predictor network."""
def __init__(self, module, test):
self._module = module
self._test = test
def __call__(self, z0):
return self._module(z0)
class AttacksTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.named_parameters(
('UntargetedWithGradientDescent', MockWithIsTraining,
ibp.UntargetedPGDAttack, ibp.UnrolledGradientDescent, 1.),
('UntargetedWithAdam', MockWithIsTraining,
ibp.UntargetedPGDAttack, ibp.UnrolledAdam, 1.),
('MultiTargetedWithGradientDescent', MockWithIsTraining,
ibp.MultiTargetedPGDAttack, ibp.UnrolledGradientDescent, 1.),
('MultiTargetedWithAdam', MockWithIsTraining,
ibp.MultiTargetedPGDAttack, ibp.UnrolledAdam, 1.),
('DiverseEpsilon', MockWithIsTraining,
ibp.MultiTargetedPGDAttack, ibp.UnrolledAdam, [1., 1.]),
('WithoutIsTraining', MockWithoutIsTraining,
ibp.UntargetedPGDAttack, ibp.UnrolledGradientDescent, 1.),
('Restarted', MockWithIsTraining,
ibp.UntargetedPGDAttack, ibp.UnrolledGradientDescent, 1., True),
('SPSA', MockWithIsTraining,
ibp.UntargetedPGDAttack, ibp.UnrolledSPSAAdam, 1.))
def testEndToEnd(self, predictor_cls, attack_cls, optimizer_cls, epsilon,
restarted=False):
# l-\infty norm of perturbation ball.
if isinstance(epsilon, list):
# We test the ability to have different epsilons across dimensions.
epsilon = tf.constant([epsilon], dtype=tf.float32)
bounds = (-.5, 2.5)
# Create a simple network.
m = snt.Linear(1, initializers={
'w': tf.constant_initializer(1.),
'b': tf.constant_initializer(1.),
})
z = tf.constant([[1, 2]], dtype=tf.float32)
predictor = predictor_cls(m, self)
# Not important for the test but needed.
labels = tf.constant([1], dtype=tf.int64)
# We create two attacks to maximize and then minimize the output.
max_spec = ibp.LinearSpecification(tf.constant([[[1.]]]))
max_attack = attack_cls(predictor, max_spec, epsilon, input_bounds=bounds,
optimizer_builder=optimizer_cls)
if restarted:
max_attack = ibp.RestartedAttack(max_attack, num_restarts=10)
z_max = max_attack(z, labels)
min_spec = ibp.LinearSpecification(tf.constant([[[-1.]]]))
min_attack = attack_cls(predictor, min_spec, epsilon, input_bounds=bounds,
optimizer_builder=optimizer_cls)
if restarted:
min_attack = ibp.RestartedAttack(min_attack, num_restarts=10)
z_min = min_attack(z, labels)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
z_max_values, z_min_values = sess.run([z_max, z_min])
z_max_values = z_max_values[0]
z_min_values = z_min_values[0]
self.assertAlmostEqual(2., z_max_values[0])
self.assertAlmostEqual(2.5, z_max_values[1])
self.assertAlmostEqual(0., z_min_values[0])
self.assertAlmostEqual(1., z_min_values[1])
if __name__ == '__main__':
tf.test.main()
| interval-bound-propagation-master | interval_bound_propagation/tests/attacks_test.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for symbolic bounds."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import interval_bound_propagation as ibp
import numpy as np
import sonnet as snt
import tensorflow.compat.v1 as tf
class SymbolicBoundsTest(parameterized.TestCase, tf.test.TestCase):
def testConvertSymbolicBounds(self):
z = tf.constant([[1, 2, 3, 4]], dtype=tf.float32)
z = tf.reshape(z, [1, 2, 2])
b = ibp.SymbolicBounds.convert(z)
for l in (b.lower, b.upper):
self.assertEqual([1, 4, 2, 2], l.w.shape.as_list())
self.assertEqual([1, 2, 2], l.b.shape.as_list())
self.assertEqual([1, 4], l.lower.shape.as_list())
self.assertEqual([1, 4], l.upper.shape.as_list())
def testFCSymbolicBounds(self):
m = snt.Linear(1, initializers={
'w': tf.constant_initializer(1.),
'b': tf.constant_initializer(2.),
})
z = tf.constant([[1, 2, 3]], dtype=tf.float32)
m(z) # Connect to create weights.
m = ibp.LinearFCWrapper(m)
input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
input_bounds = ibp.SymbolicBounds.convert(input_bounds)
output_bounds = m.propagate_bounds(input_bounds)
concrete_bounds = ibp.IntervalBounds.convert(output_bounds)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
l, u, cl, cu = sess.run([output_bounds.lower, output_bounds.upper,
concrete_bounds.lower, concrete_bounds.upper])
self.assertTrue(np.all(l.w == 1.))
self.assertTrue(np.all(l.b == 2.))
self.assertAlmostEqual([[0, 1, 2]], l.lower.tolist())
self.assertAlmostEqual([[2, 3, 4]], l.upper.tolist())
self.assertTrue(np.all(u.w == 1.))
self.assertTrue(np.all(u.b == 2.))
self.assertAlmostEqual([[0, 1, 2]], u.lower.tolist())
self.assertAlmostEqual([[2, 3, 4]], u.upper.tolist())
cl = cl.item()
cu = cu.item()
self.assertAlmostEqual(5., cl)
self.assertAlmostEqual(11., cu)
def testConv2dSymbolicBounds(self):
m = snt.Conv2D(
output_channels=1,
kernel_shape=(2, 2),
padding='VALID',
stride=1,
use_bias=True,
initializers={
'w': tf.constant_initializer(1.),
'b': tf.constant_initializer(2.),
})
z = tf.constant([1, 2, 3, 4], dtype=tf.float32)
z = tf.reshape(z, [1, 2, 2, 1])
m(z) # Connect to create weights.
m = ibp.LinearConv2dWrapper(m)
input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
input_bounds = ibp.SymbolicBounds.convert(input_bounds)
output_bounds = m.propagate_bounds(input_bounds)
output_bounds = ibp.IntervalBounds.convert(output_bounds)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
l, u = sess.run([output_bounds.lower, output_bounds.upper])
l = l.item()
u = u.item()
self.assertAlmostEqual(8., l)
self.assertAlmostEqual(16., u)
def testConv1dSymbolicBounds(self):
m = snt.Conv1D(
output_channels=1,
kernel_shape=(2),
padding='VALID',
stride=1,
use_bias=True,
initializers={
'w': tf.constant_initializer(1.),
'b': tf.constant_initializer(3.),
})
z = tf.constant([3, 4], dtype=tf.float32)
z = tf.reshape(z, [1, 2, 1])
m(z) # Connect to create weights.
m = ibp.LinearConv1dWrapper(m)
input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
input_bounds = ibp.SymbolicBounds.convert(input_bounds)
output_bounds = m.propagate_bounds(input_bounds)
output_bounds = ibp.IntervalBounds.convert(output_bounds)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
l, u = sess.run([output_bounds.lower, output_bounds.upper])
l = l.item()
u = u.item()
self.assertAlmostEqual(8., l)
self.assertAlmostEqual(12., u)
def testReluSymbolicBounds(self):
m = tf.nn.relu
z = tf.constant([[-2, 3]], dtype=tf.float32)
m = ibp.IncreasingMonotonicWrapper(m)
input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
input_bounds = ibp.SymbolicBounds.convert(input_bounds)
output_bounds = m.propagate_bounds(input_bounds)
output_bounds = ibp.IntervalBounds.convert(output_bounds)
with self.test_session() as sess:
l, u = sess.run([output_bounds.lower, output_bounds.upper])
self.assertAlmostEqual([[0., 2.]], l.tolist())
self.assertAlmostEqual([[0., 4.]], u.tolist())
if __name__ == '__main__':
tf.test.main()
| interval-bound-propagation-master | interval_bound_propagation/tests/fastlin_test.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for specification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import interval_bound_propagation as ibp
import numpy as np
import tensorflow.compat.v1 as tf
MockLinearModule = collections.namedtuple('MockLinearModule', ['w', 'b'])
MockModule = collections.namedtuple(
'MockModule', ['input_bounds', 'output_bounds', 'module'])
def _build_spec_input():
# Specifications expects a list of objects with output_bounds or input_bounds
# attributes.
w = np.identity(2, dtype=np.float32)
b = np.ones(2, dtype=np.float32)
snt_module = MockLinearModule(tf.constant(w), tf.constant(b))
z_lower = np.array([[1, 2]], dtype=np.float32)
z_upper = np.array([[3, 4]], dtype=np.float32)
input_bounds = ibp.IntervalBounds(tf.constant(z_lower), tf.constant(z_upper))
z_lower += b
z_upper += b
output_bounds = ibp.IntervalBounds(tf.constant(z_lower), tf.constant(z_upper))
return [MockModule(input_bounds, output_bounds, snt_module)]
def _build_classification_specification(label, num_classes, collapse):
"""Returns a LinearSpecification for adversarial classification."""
# Pre-construct the specifications of the different classes.
eye = np.eye(num_classes - 1)
specifications = []
for i in range(num_classes):
specifications.append(np.concatenate(
[eye[:, :i], -np.ones((num_classes - 1, 1)), eye[:, i:]], axis=1))
specifications = np.array(specifications, dtype=np.float32)
specifications = tf.constant(specifications)
# We can then use gather.
c = tf.gather(specifications, label)
# By construction all specifications are relevant.
d = tf.zeros(shape=(tf.shape(label)[0], num_classes - 1))
return ibp.LinearSpecification(c, d, prune_irrelevant=False,
collapse=collapse)
class SpecificationTest(tf.test.TestCase):
def testLinearSpecification(self):
# c has shape [batch_size, num_specifications, num_outputs]
# d has shape [batch_size, num_specifications]
c = tf.constant([[[1, 2]]], dtype=tf.float32)
d = tf.constant([[3]], dtype=tf.float32)
# The above is equivalent to z_{K,1} + 2 * z_{K,2} + 3 <= 0
spec = ibp.LinearSpecification(c, d, collapse=False)
spec_collapse = ibp.LinearSpecification(c, d, collapse=True)
modules = _build_spec_input()
values = spec(modules)
values_collapse = spec_collapse(modules)
with self.test_session() as sess:
self.assertAlmostEqual(17., sess.run(values).item())
self.assertAlmostEqual(17., sess.run(values_collapse).item())
def testEquivalenceLinearClassification(self):
num_classes = 3
def _build_model():
layer_types = (
('conv2d', (2, 2), 4, 'VALID', 1),
('activation', 'relu'),
('linear', 10),
('activation', 'relu'))
return ibp.DNN(num_classes, layer_types)
# Input.
batch_size = 100
width = height = 2
channels = 3
num_restarts = 10
z = tf.random.uniform((batch_size, height, width, channels),
minval=-1., maxval=1., dtype=tf.float32)
y = tf.random.uniform((batch_size,), minval=0, maxval=num_classes,
dtype=tf.int64)
predictor = _build_model()
predictor = ibp.VerifiableModelWrapper(predictor)
logits = predictor(z)
random_logits1 = tf.random.uniform((num_restarts, batch_size, num_classes))
random_logits2 = tf.random.uniform((num_restarts, num_classes - 1,
batch_size, num_classes))
input_bounds = ibp.IntervalBounds(z - 2., z + 4.)
predictor.propagate_bounds(input_bounds)
# Specifications.
s1 = ibp.ClassificationSpecification(y, num_classes, collapse=False)
s1_collapse = ibp.ClassificationSpecification(y, num_classes, collapse=True)
s2 = _build_classification_specification(y, num_classes, collapse=False)
s2_collapse = _build_classification_specification(y, num_classes,
collapse=True)
def _build_values(s, s_collapse):
return [
s(predictor.modules),
s_collapse(predictor.modules),
s.evaluate(logits),
s.evaluate(random_logits1),
s.evaluate(random_logits2)
]
v1 = _build_values(s1, s1_collapse)
v2 = _build_values(s2, s2_collapse)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output1, output2 = sess.run([v1, v2])
for a, b in zip(output1, output2):
self.assertTrue(np.all(np.abs(a - b) < 1e-5))
if __name__ == '__main__':
tf.test.main()
| interval-bound-propagation-master | interval_bound_propagation/tests/specification_test.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for relative_bounds."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import interval_bound_propagation as ibp
from interval_bound_propagation import layer_utils
import numpy as np
import sonnet as snt
import tensorflow.compat.v1 as tf
class RelativeIntervalBoundsTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('float32', tf.float32),
('float64', tf.float64))
def test_linear_bounds_shape(self, dtype):
batch_size = 11
input_size = 7
output_size = 5
w = tf.placeholder(dtype=dtype, shape=(input_size, output_size))
b = tf.placeholder(dtype=dtype, shape=(output_size,))
lb_rel_in = tf.placeholder(dtype=dtype, shape=(batch_size, input_size))
ub_rel_in = tf.placeholder(dtype=dtype, shape=(batch_size, input_size))
nominal = tf.placeholder(dtype=dtype, shape=(batch_size, input_size))
bounds_in = ibp.RelativeIntervalBounds(lb_rel_in, ub_rel_in, nominal)
bounds_out = bounds_in.apply_linear(None, w, b)
lb_out, ub_out = bounds_out.lower, bounds_out.upper
self.assertEqual(dtype, lb_out.dtype)
self.assertEqual(dtype, ub_out.dtype)
self.assertEqual((batch_size, output_size), lb_out.shape)
self.assertEqual((batch_size, output_size), ub_out.shape)
@parameterized.named_parameters(('float32', tf.float32, 1.e-6),
('float64', tf.float64, 1.e-8))
def test_linear_bounds(self, dtype, tol):
w = tf.constant([[1.0, 2.0, 3.0], [4.0, -5.0, 6.0]], dtype=dtype)
b = tf.constant([0.1, 0.2, 0.3], dtype=dtype)
lb_in = tf.constant([[-1.0, -1.0]], dtype=dtype)
ub_in = tf.constant([[2.0, 2.0]], dtype=dtype)
nominal = tf.constant([[3.1, 4.2]], dtype=dtype)
bounds_in = ibp.RelativeIntervalBounds(lb_in - nominal,
ub_in - nominal, nominal)
bounds_out = bounds_in.apply_linear(None, w, b)
lb_out, ub_out = bounds_out.lower, bounds_out.upper
lb_out_exp = np.array([[-4.9, -11.8, -8.7]])
ub_out_exp = np.array([[10.1, 9.2, 18.3]])
with self.test_session() as session:
lb_out_act, ub_out_act = session.run((lb_out, ub_out))
self.assertAllClose(lb_out_exp, lb_out_act, atol=tol, rtol=tol)
self.assertAllClose(ub_out_exp, ub_out_act, atol=tol, rtol=tol)
@parameterized.named_parameters(('float32', tf.float32),
('float64', tf.float64))
def test_conv2d_bounds_shape(self, dtype):
batch_size = 23
input_height = 17
input_width = 7
kernel_height = 3
kernel_width = 4
input_channels = 3
output_channels = 5
padding = 'VALID'
strides = (2, 1)
# Expected output dimensions, based on convolution settings.
output_height = 8
output_width = 4
w = tf.placeholder(dtype=dtype, shape=(
kernel_height, kernel_width, input_channels, output_channels))
b = tf.placeholder(dtype=dtype, shape=(output_channels,))
lb_rel_in = tf.placeholder(dtype=dtype, shape=(
batch_size, input_height, input_width, input_channels))
ub_rel_in = tf.placeholder(dtype=dtype, shape=(
batch_size, input_height, input_width, input_channels))
nominal = tf.placeholder(dtype=dtype, shape=(
batch_size, input_height, input_width, input_channels))
bounds_in = ibp.RelativeIntervalBounds(lb_rel_in, ub_rel_in, nominal)
bounds_out = bounds_in.apply_conv2d(None, w, b, padding, strides)
lb_out, ub_out = bounds_out.lower, bounds_out.upper
self.assertEqual(dtype, lb_out.dtype)
self.assertEqual(dtype, ub_out.dtype)
self.assertEqual((batch_size, output_height, output_width, output_channels),
lb_out.shape)
self.assertEqual((batch_size, output_height, output_width, output_channels),
ub_out.shape)
@parameterized.named_parameters(('float32', tf.float32, 1.e-5),
('float64', tf.float64, 1.e-8))
def test_conv2d_bounds(self, dtype, tol):
batch_size = 53
input_height = 17
input_width = 7
kernel_height = 3
kernel_width = 4
input_channels = 3
output_channels = 2
padding = 'VALID'
strides = (2, 1)
w = tf.random_normal(dtype=dtype, shape=(
kernel_height, kernel_width, input_channels, output_channels))
b = tf.random_normal(dtype=dtype, shape=(output_channels,))
lb_in = tf.random_normal(dtype=dtype, shape=(
batch_size, input_height, input_width, input_channels))
ub_in = tf.random_normal(dtype=dtype, shape=(
batch_size, input_height, input_width, input_channels))
lb_in, ub_in = tf.minimum(lb_in, ub_in), tf.maximum(lb_in, ub_in)
nominal = tf.random_normal(dtype=dtype, shape=(
batch_size, input_height, input_width, input_channels))
bounds_in = ibp.RelativeIntervalBounds(lb_in - nominal,
ub_in - nominal, nominal)
bounds_out = bounds_in.apply_conv2d(None, w, b, padding, strides)
lb_out, ub_out = bounds_out.lower, bounds_out.upper
# Compare against equivalent linear layer.
bounds_out_lin = _materialised_conv_bounds(
w, b, padding, strides, bounds_in)
lb_out_lin, ub_out_lin = bounds_out_lin.lower, bounds_out_lin.upper
with self.test_session() as session:
(lb_out_val, ub_out_val,
lb_out_lin_val, ub_out_lin_val) = session.run((lb_out, ub_out,
lb_out_lin, ub_out_lin))
self.assertAllClose(lb_out_val, lb_out_lin_val, atol=tol, rtol=tol)
self.assertAllClose(ub_out_val, ub_out_lin_val, atol=tol, rtol=tol)
@parameterized.named_parameters(('float32', tf.float32),
('float64', tf.float64))
def test_conv1d_bounds_shape(self, dtype):
batch_size = 23
input_length = 13
kernel_length = 3
input_channels = 3
output_channels = 5
padding = 'VALID'
strides = (2,)
# Expected output dimensions, based on convolution settings.
output_length = 6
w = tf.placeholder(dtype=dtype, shape=(
kernel_length, input_channels, output_channels))
b = tf.placeholder(dtype=dtype, shape=(output_channels,))
lb_rel_in = tf.placeholder(dtype=dtype, shape=(
batch_size, input_length, input_channels))
ub_rel_in = tf.placeholder(dtype=dtype, shape=(
batch_size, input_length, input_channels))
nominal = tf.placeholder(dtype=dtype, shape=(
batch_size, input_length, input_channels))
bounds_in = ibp.RelativeIntervalBounds(lb_rel_in, ub_rel_in, nominal)
bounds_out = bounds_in.apply_conv1d(None, w, b, padding, strides[0])
lb_out, ub_out = bounds_out.lower, bounds_out.upper
self.assertEqual(dtype, lb_out.dtype)
self.assertEqual(dtype, ub_out.dtype)
self.assertEqual((batch_size, output_length, output_channels),
lb_out.shape)
self.assertEqual((batch_size, output_length, output_channels),
ub_out.shape)
@parameterized.named_parameters(('float32', tf.float32, 1.e-5),
('float64', tf.float64, 1.e-8))
def test_conv1d_bounds(self, dtype, tol):
batch_size = 53
input_length = 13
kernel_length = 5
input_channels = 3
output_channels = 2
padding = 'VALID'
strides = (2,)
w = tf.random_normal(dtype=dtype, shape=(
kernel_length, input_channels, output_channels))
b = tf.random_normal(dtype=dtype, shape=(output_channels,))
lb_in = tf.random_normal(dtype=dtype, shape=(
batch_size, input_length, input_channels))
ub_in = tf.random_normal(dtype=dtype, shape=(
batch_size, input_length, input_channels))
lb_in, ub_in = tf.minimum(lb_in, ub_in), tf.maximum(lb_in, ub_in)
nominal = tf.random_normal(dtype=dtype, shape=(
batch_size, input_length, input_channels))
bounds_in = ibp.RelativeIntervalBounds(lb_in - nominal,
ub_in - nominal, nominal)
bounds_out = bounds_in.apply_conv1d(None, w, b, padding, strides[0])
lb_out, ub_out = bounds_out.lower, bounds_out.upper
# Compare against equivalent linear layer.
bounds_out_lin = _materialised_conv_bounds(
w, b, padding, strides, bounds_in)
lb_out_lin, ub_out_lin = bounds_out_lin.lower, bounds_out_lin.upper
with self.test_session() as session:
(lb_out_val, ub_out_val,
lb_out_lin_val, ub_out_lin_val) = session.run((lb_out, ub_out,
lb_out_lin, ub_out_lin))
self.assertAllClose(lb_out_val, lb_out_lin_val, atol=tol, rtol=tol)
self.assertAllClose(ub_out_val, ub_out_lin_val, atol=tol, rtol=tol)
@parameterized.named_parameters(
('float32_snt', snt.BatchNorm, tf.float32, 1.e-5, False),
('float64_snt', snt.BatchNorm, tf.float64, 1.e-8, False),
('float32', ibp.BatchNorm, tf.float32, 1.e-5, False),
('float64', ibp.BatchNorm, tf.float64, 1.e-8, False),
('float32_train', ibp.BatchNorm, tf.float32, 1.e-5, True),
('float64_train', ibp.BatchNorm, tf.float64, 1.e-8, True))
def test_batchnorm_bounds(self, batchnorm_class, dtype, tol, is_training):
batch_size = 11
input_size = 7
output_size = 5
lb_in = tf.random_normal(dtype=dtype, shape=(batch_size, input_size))
ub_in = tf.random_normal(dtype=dtype, shape=(batch_size, input_size))
lb_in, ub_in = tf.minimum(lb_in, ub_in), tf.maximum(lb_in, ub_in)
nominal = tf.random_normal(dtype=dtype, shape=(batch_size, input_size))
# Linear layer.
w = tf.random_normal(dtype=dtype, shape=(input_size, output_size))
b = tf.random_normal(dtype=dtype, shape=(output_size,))
# Batch norm layer.
epsilon = 1.e-2
bn_initializers = {
'beta': tf.random_normal_initializer(),
'gamma': tf.random_uniform_initializer(.1, 3.),
'moving_mean': tf.random_normal_initializer(),
'moving_variance': tf.random_uniform_initializer(.1, 3.)
}
batchnorm_module = batchnorm_class(offset=True, scale=True, eps=epsilon,
initializers=bn_initializers)
# Connect the batchnorm module to the graph.
batchnorm_module(tf.random_normal(dtype=dtype,
shape=(batch_size, output_size)),
is_training=is_training)
bounds_in = ibp.RelativeIntervalBounds(lb_in - nominal,
ub_in - nominal, nominal)
bounds_out = bounds_in.apply_linear(None, w, b)
bounds_out = bounds_out.apply_batch_norm(
batchnorm_module,
batchnorm_module.mean if is_training else batchnorm_module.moving_mean,
batchnorm_module.variance if is_training
else batchnorm_module.moving_variance,
batchnorm_module.gamma,
batchnorm_module.beta,
epsilon)
lb_out, ub_out = bounds_out.lower, bounds_out.upper
# Separately, calculate dual objective by adjusting the linear layer.
wn, bn = layer_utils.combine_with_batchnorm(w, b, batchnorm_module)
bounds_out_lin = bounds_in.apply_linear(None, wn, bn)
lb_out_lin, ub_out_lin = bounds_out_lin.lower, bounds_out_lin.upper
init_op = tf.global_variables_initializer()
with self.test_session() as session:
session.run(init_op)
(lb_out_val, ub_out_val,
lb_out_lin_val, ub_out_lin_val) = session.run((lb_out, ub_out,
lb_out_lin, ub_out_lin))
self.assertAllClose(lb_out_val, lb_out_lin_val, atol=tol, rtol=tol)
self.assertAllClose(ub_out_val, ub_out_lin_val, atol=tol, rtol=tol)
def _materialised_conv_bounds(w, b, padding, strides, bounds_in):
"""Calculates bounds on output of an N-D convolution layer.
The calculation is performed by first materialising the convolution as a
(sparse) fully-connected linear layer. Doing so will affect performance, but
may be useful for investigating numerical stability issues.
Args:
w: (N+2)D tensor of shape (kernel_height, kernel_width, input_channels,
output_channels) containing weights for the convolution.
b: 1D tensor of shape (output_channels) containing biases for the
convolution, or `None` if no bias.
padding: `"VALID"` or `"SAME"`, the convolution's padding algorithm.
strides: Integer list of length N: `[vertical_stride, horizontal_stride]`.
bounds_in: bounds of shape (batch_size, input_height, input_width,
input_channels) containing bounds on the inputs to the
convolution layer.
Returns:
bounds of shape (batch_size, output_height, output_width,
output_channels) with bounds on the outputs of the
convolution layer.
Raises:
ValueError: if an unsupported convolution dimensionality is encountered.
"""
# Flatten the inputs, as the materialised convolution will have no
# spatial structure.
bounds_in_flat = bounds_in.apply_batch_reshape(None, [-1])
# Materialise the convolution as a (sparse) fully connected linear layer.
input_shape = bounds_in.shape[1:]
w_lin, b_lin = layer_utils.materialise_conv(w, b, input_shape,
padding=padding, strides=strides)
bounds_out_flat = bounds_in_flat.apply_linear(None, w_lin, b_lin)
# Unflatten the output bounds.
output_shape = layer_utils.conv_output_shape(input_shape, w, padding, strides)
return bounds_out_flat.apply_batch_reshape(None, output_shape)
if __name__ == '__main__':
tf.test.main()
| interval-bound-propagation-master | interval_bound_propagation/tests/relative_bounds_test.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CROWN-IBP implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl import logging
from interval_bound_propagation.src import bounds
from interval_bound_propagation.src import fastlin
from interval_bound_propagation.src import loss
from interval_bound_propagation.src import model
from interval_bound_propagation.src import specification as specification_lib
from interval_bound_propagation.src import utils
from interval_bound_propagation.src import verifiable_wrapper
import tensorflow.compat.v1 as tf
class BackwardBounds(bounds.AbstractBounds):
"""Implementation of backward bound propagation used by CROWN."""
def __init__(self, lower, upper):
super(BackwardBounds, self).__init__()
# Setting "lower" or "upper" to None will avoid creating the computation
# graph for CROWN lower or upper bounds. For verifiable training, only the
# upper bound is necessary.
self._lower = lower
self._upper = upper
@property
def lower(self):
return self._lower
@property
def upper(self):
return self._upper
@property
def shape(self):
return self.lower.shape.as_list()
def concretize(self):
"""Returns lower and upper interval bounds."""
lb = ub = None
if self.lower is not None:
lb = (
tf.einsum('nsi,ni->ns',
self._reshape_to_rank(tf.maximum(self.lower.w, 0), 3),
self._reshape_to_rank(self.lower.lower, 2)) +
tf.einsum('nsi,ni->ns',
self._reshape_to_rank(tf.minimum(self.lower.w, 0), 3),
self._reshape_to_rank(self.lower.upper, 2)))
lb += self.lower.b
if self.upper is not None:
ub = (
tf.einsum('nsi,ni->ns',
self._reshape_to_rank(tf.maximum(self.upper.w, 0), 3),
self._reshape_to_rank(self.upper.upper, 2)) +
tf.einsum('nsi,ni->ns',
self._reshape_to_rank(tf.minimum(self.upper.w, 0), 3),
self._reshape_to_rank(self.upper.lower, 2)))
ub += self.upper.b
return bounds.IntervalBounds(lb, ub)
@classmethod
def convert(cls, other_bounds):
if isinstance(other_bounds, cls):
return other_bounds
raise RuntimeError('BackwardBounds does not support conversion from any '
'other bound type.')
def apply_linear(self, wrapper, w, b):
"""Propagate CROWN bounds backward through a linear layer."""
def _linear_propagate(bound):
"""Propagate one side of the bound."""
new_bound_w = tf.einsum('nsk,lk->nsl', bound.w, w)
if b is not None:
bias = tf.tensordot(bound.w, b, axes=1)
return fastlin.LinearExpression(w=new_bound_w, b=bias + bound.b,
lower=wrapper.input_bounds.lower,
upper=wrapper.input_bounds.upper)
ub_expr = _linear_propagate(self.upper) if self.upper else None
lb_expr = _linear_propagate(self.lower) if self.lower else None
return BackwardBounds(lb_expr, ub_expr)
def apply_conv2d(self, wrapper, w, b, padding, strides):
"""Propagate CROWN bounds backward through a convolution layer."""
def _conv2d_propagate(bound):
"""Propagate one side of the bound."""
s = tf.shape(bound.w)
# Variable bound.w has shape (batch_size, num_specs, H, W, C),
# resize it to (batch_size * num_specs, H, W, C) for batch processing.
effective_batch_size = tf.reshape(s[0] * s[1], [1])
batched_shape = tf.concat([effective_batch_size, s[2:]], 0)
# The output of a deconvolution is the input shape of the corresponding
# convolution.
output_shape = wrapper.input_bounds.lower.shape
batched_output_shape = tf.concat([effective_batch_size, output_shape[1:]],
0)
# Batched transpose convolution for efficiency.
bound_batch = tf.nn.conv2d_transpose(tf.reshape(bound.w, batched_shape),
filter=w,
output_shape=batched_output_shape,
strides=[1] + list(strides) + [1],
padding=padding)
# Reshape results to (batch_size, num_specs, new_H, new_W, new_C).
new_shape = tf.concat(
[tf.reshape(s[0], [1]), tf.reshape(s[1], [1]), output_shape[1:]], 0)
new_bound_w = tf.reshape(bound_batch, new_shape)
# If this convolution has bias, multiplies it with current w.
bias = 0
if b is not None:
# Variable bound.w has dimension (batch_size, num_specs, H, W, C),
# accumulate H and W, and do a dot product for each channel C.
bias = tf.tensordot(tf.reduce_sum(bound.w, [2, 3]), b, axes=1)
return fastlin.LinearExpression(w=new_bound_w, b=bias + bound.b,
lower=wrapper.input_bounds.lower,
upper=wrapper.input_bounds.upper)
ub_expr = _conv2d_propagate(self.upper) if self.upper else None
lb_expr = _conv2d_propagate(self.lower) if self.lower else None
return BackwardBounds(lb_expr, ub_expr)
def _get_monotonic_fn_bound(self, wrapper, fn):
"""Compute CROWN upper and lower linear bounds for a given function fn."""
# Get lower and upper bounds from forward IBP pass.
lb, ub = wrapper.input_bounds.lower, wrapper.input_bounds.upper
if fn.__name__ == 'relu':
# CROWN upper and lower linear bounds for ReLU.
f_lb = tf.minimum(lb, 0)
f_ub = tf.maximum(ub, 0)
# When both ub and lb are very close to 0 we might have NaN issue,
# so we have to avoid this happening.
f_ub = tf.maximum(f_ub, f_lb + 1e-8)
# CROWN upper/lower scaling matrices and biases.
ub_scaling_matrix = f_ub / (f_ub - f_lb)
ub_bias = -f_lb * ub_scaling_matrix
# Expand dimension for using broadcast later.
ub_scaling_matrix = tf.expand_dims(ub_scaling_matrix, 1)
lb_scaling_matrix = tf.cast(tf.greater(ub_scaling_matrix, .5),
dtype=tf.float32)
lb_bias = 0.
# For 'apply' fn we need to differentiate them through the wrapper.
elif isinstance(wrapper, verifiable_wrapper.ImageNormWrapper):
inner_module = wrapper.inner_module
ub_scaling_matrix = lb_scaling_matrix = inner_module.scale
ub_bias = - inner_module.offset * inner_module.scale
lb_bias = ub_bias
else:
raise NotImplementedError('monotonic fn {} is not supported '
'by BackwardBounds'.format(fn.__name__))
return ub_scaling_matrix, lb_scaling_matrix, ub_bias, lb_bias
def apply_increasing_monotonic_fn(self, wrapper, fn, *args):
"""Propagate CROWN bounds backward through a increasing monotonic fn."""
# Function _get_monotonic_fn_bound returns matrix and bias term for linear
# relaxation.
(ub_scaling_matrix, lb_scaling_matrix,
ub_bias, lb_bias) = self._get_monotonic_fn_bound(wrapper, fn)
def _propagate_monotonic_fn(bound, ub_mult, lb_mult):
# Matrix multiplication by a diagonal matrix.
new_bound_w = ub_mult * ub_scaling_matrix + lb_mult * lb_scaling_matrix
# Matrix vector product for the bias term. ub_bias or lb_bias might be 0
# or a constant, or need broadcast. They will be handled optimally.
b = self._matvec(ub_mult, ub_bias) + self._matvec(lb_mult, lb_bias)
return fastlin.LinearExpression(w=new_bound_w, b=bound.b + b,
lower=wrapper.input_bounds.lower,
upper=wrapper.input_bounds.upper)
# Multiplies w to upper or lower scaling terms according to its sign.
ub_expr = _propagate_monotonic_fn(
self.upper, tf.maximum(self.upper.w, 0),
tf.minimum(self.upper.w, 0)) if self.upper else None
lb_expr = _propagate_monotonic_fn(
self.lower, tf.minimum(self.lower.w, 0),
tf.maximum(self.lower.w, 0)) if self.lower else None
return BackwardBounds(lb_expr, ub_expr)
def apply_batch_reshape(self, wrapper, shape):
"""Propagate CROWN bounds backward through a reshape layer."""
input_shape = wrapper.input_bounds.lower.shape[1:]
def _propagate_batch_flatten(bound):
new_bound_w = tf.reshape(
bound.w, tf.concat([tf.shape(bound.w)[:2], input_shape], 0))
return fastlin.LinearExpression(w=new_bound_w, b=bound.b,
lower=wrapper.input_bounds.lower,
upper=wrapper.input_bounds.upper)
ub_expr = _propagate_batch_flatten(self.upper) if self.upper else None
lb_expr = _propagate_batch_flatten(self.lower) if self.lower else None
return BackwardBounds(lb_expr, ub_expr)
@staticmethod
def _reshape_to_rank(a, rank):
"""Reshapes to the given rank while keeping the first (rank-1) dims."""
shape = tf.concat([tf.shape(a)[0:(rank - 1)], [-1]], axis=-1)
return tf.reshape(a, shape)
@staticmethod
def _matvec(a, b):
"""Specialized matvec detecting the case where b is 0 or constant."""
if isinstance(b, int) or isinstance(b, float):
if b == 0:
# For efficiency we directly return constant 0, no graph generated.
return 0
else:
# Broadcasting a constant.
return a * b
elif len(b.shape) == 1:
# Need to broadcast against all examples in the batch. This can be done
# using an einsum "tf.einsum('ns...c,c->ns', a, b)" but it currently
# triggers a compiler bug on TPUs, thus we use the following instead.
return tf.einsum('nsc,c->ns', tf.reduce_sum(a, [2, 3]), b)
else:
# Normal 1D or 3D mat-vec product.
return tf.einsum('nsi,ni->ns',
BackwardBounds._reshape_to_rank(a, 3),
BackwardBounds._reshape_to_rank(b, 2))
ScalarMetrics = collections.namedtuple('ScalarMetrics', [
'nominal_accuracy',
# Verified accuracy using pure IBP bounds.
'verified_accuracy',
# Verified accuracy using CROWN and IBP mixture.
'crown_ibp_verified_accuracy',
'attack_accuracy',
'attack_success'])
ScalarLosses = collections.namedtuple('ScalarLosses', [
'nominal_cross_entropy',
'attack_cross_entropy',
'verified_loss'])
class Losses(loss.Losses):
"""Helper to compute CROWN-IBP losses."""
def __init__(self, predictor, specification=None, pgd_attack=None,
interval_bounds_loss_type='xent',
interval_bounds_hinge_margin=10.,
label_smoothing=0.,
use_crown_ibp=False,
crown_bound_schedule=None):
super(Losses, self).__init__(predictor, specification, pgd_attack,
interval_bounds_loss_type,
interval_bounds_hinge_margin,
label_smoothing)
self._use_crown_ibp = use_crown_ibp
self._crown_bound_schedule = crown_bound_schedule
def _get_specification_bounds(self):
"""Get upper bounds on specification. Used for building verified loss."""
ibp_bounds = self._specification(self._predictor.modules)
# Compute verified accuracy using IBP bounds.
v = tf.reduce_max(ibp_bounds, axis=1)
self._interval_bounds_accuracy = tf.reduce_mean(
tf.cast(v <= 0., tf.float32))
# CROWN-IBP bounds.
if self._use_crown_ibp:
logging.info('CROWN-IBP active')
def _build_crown_ibp_bounds():
"""Create the computationally expensive CROWN bounds for tf.cond."""
predictor = self._predictor
# CROWN is computed backwards so we need to start with a
# initial bound related to the specification.
init_crown_bounds = create_initial_backward_bounds(self._specification,
predictor.modules)
# Now propagate the specification matrix layer by layer;
# we only need the CROWN upper bound, do not need lower bound.
crown_bound = predictor.propagate_bound_backward(init_crown_bounds,
compute_upper=True,
compute_lower=False)
# A linear mixture of the two bounds with a schedule.
return self._crown_bound_schedule * crown_bound.upper + \
(1. - self._crown_bound_schedule) * ibp_bounds
# If the coefficient for CROWN bound is close to 0, compute IBP only.
mixture_bounds = tf.cond(self._crown_bound_schedule < 1e-6,
lambda: ibp_bounds, _build_crown_ibp_bounds)
v = tf.reduce_max(mixture_bounds, axis=1)
self._crown_ibp_accuracy = tf.reduce_mean(tf.cast(v <= 0., tf.float32))
else:
mixture_bounds = ibp_bounds
self._crown_ibp_accuracy = tf.constant(0.)
return mixture_bounds
@property
def scalar_metrics(self):
self._ensure_is_connected()
return ScalarMetrics(self._nominal_accuracy,
self._interval_bounds_accuracy,
self._crown_ibp_accuracy,
self._attack_accuracy,
self._attack_success)
@property
def scalar_losses(self):
self._ensure_is_connected()
return ScalarLosses(self._cross_entropy,
self._attack_cross_entropy,
self._verified_loss)
class VerifiableModelWrapper(model.VerifiableModelWrapper):
"""Model wrapper with CROWN-IBP backward bound propagation."""
def _propagate(self, current_module, current_bounds):
"""Propagate CROWN bounds in a backwards manner."""
# Construct bounds for this layer.
if isinstance(current_module, verifiable_wrapper.ModelInputWrapper):
if current_module.index != 0:
raise NotImplementedError('CROWN backpropagation does not support '
'multiple inputs.')
return current_bounds
# Propagate the bounds through the current layer.
new_bounds = current_module.propagate_bounds(current_bounds)
prev_modules = self._module_depends_on[current_module]
# We assume that each module only depends on one module.
if len(prev_modules) != 1:
raise NotImplementedError('CROWN for non-sequential networks is not '
'implemented.')
return self._propagate(prev_modules[0], new_bounds)
def propagate_bound_backward(self, initial_bound,
compute_upper=True, compute_lower=False):
"""Propagates CROWN bounds backward through the network.
This function assumes that we have obtained bounds for all intermediate
layers using IBP. Currently only sequential networks are implemented.
Args:
initial_bound: A BackwardBounds object containing the initial matrices
and biases to start bound propagation.
compute_upper: Set to True to construct the computation graph for the
CROWN upper bound. For verified training, only the upper bound is
needed. Default is True.
compute_lower: Set to True to construct the computation graph for the
CROWN lower bound. Default is False.
Returns:
IntervalBound instance corresponding to bounds on the specification.
"""
if (not compute_upper) and (not compute_lower):
raise ValueError('At least one of "compute_upper" or "compute_lower" '
'needs to be True')
self._ensure_is_connected()
# We start bound propagation from the logit layer.
logit_layer = self._produced_by[self._logits.name]
# If only one of ub or lb is needed, we set the unnecessary one to None.
ub = initial_bound.upper if compute_upper else None
lb = initial_bound.lower if compute_lower else None
bound = BackwardBounds(lb, ub)
crown_bound = self._propagate(logit_layer, bound)
return crown_bound.concretize()
def create_initial_backward_bounds(spec, modules):
"""Create the initial BackwardBounds according to specification."""
last_bounds = bounds.IntervalBounds.convert(modules[-1].input_bounds)
if isinstance(spec, specification_lib.ClassificationSpecification):
c_correct = tf.expand_dims(
tf.one_hot(spec.correct_idx[:, 1], spec.num_specifications + 1), 1)
c_wrong = tf.one_hot(spec.wrong_idx[:, :, 1], spec.num_specifications + 1)
c = c_wrong - c_correct
b = tf.zeros(spec.num_specifications)
lb = ub = fastlin.LinearExpression(w=c, b=b, lower=last_bounds.lower,
upper=last_bounds.upper)
elif isinstance(spec, specification_lib.LinearSpecification):
b = spec.d if spec.d is not None else tf.zeros(spec.num_specifications)
lb = ub = fastlin.LinearExpression(w=spec.c, b=b, lower=last_bounds.lower,
upper=last_bounds.upper)
else:
raise ValueError('Unknown specification class type "{}"'.format(str(spec)))
return BackwardBounds(lb, ub)
def create_classification_losses(
global_step, inputs, label, predictor_network, epsilon, loss_weights,
warmup_steps=0, rampup_steps=-1, input_bounds=(0., 1.), options=None):
"""Create the training loss for CROWN-IBP."""
def _is_loss_active(init, final, warmup=None):
return init > 0. or final > 0. or (warmup is not None and warmup > 0.)
if 'crown_bound' in loss_weights:
schedule = utils.build_loss_schedule(global_step, warmup_steps,
rampup_steps,
**loss_weights.get('crown_bound'))
use_crown_ibp = _is_loss_active(**loss_weights.get('crown_bound'))
else:
schedule = None
use_crown_ibp = False
# Use the loss builder for CROWN-IBP with additional kwargs.
def _loss_builder(*args, **kwargs):
kwargs.update(dict(use_crown_ibp=use_crown_ibp,
crown_bound_schedule=schedule))
return Losses(*args, **kwargs)
return utils.create_classification_losses(
global_step, inputs, label, predictor_network, epsilon,
loss_weights, warmup_steps, rampup_steps, input_bounds,
loss_builder=_loss_builder, options=options)
| interval-bound-propagation-master | interval_bound_propagation/src/crown.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Graph construction for dual verification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from interval_bound_propagation.src import layers
import sonnet as snt
import tensorflow.compat.v1 as tf
def conv_output_shape(input_shape, w, padding, strides):
"""Calculates the output shape of the given N-D convolution.
Args:
input_shape: Integer list of length N+1 specifying the non-batch dimensions
of the inputs: [input_height, input_width, input_channels].
w: (N+2)D tensor of shape (kernel_height, kernel_width, input_channels,
output_channels) containing weights for the convolution.
padding: `"VALID"` or `"SAME"`, the convolution's padding algorithm.
strides: Integer list of length N: `[vertical_stride, horizontal_stride]`.
Returns:
Integer list of length N+1 specifying the non-batch dimensions
of the outputs: [output_height, output_width, output_channels].
Raises:
ValueError: if an unsupported convolution dimensionality is encountered.
"""
# Connect a convolution (never to be run) to infer the output's
# spatial structure.
dummy_inputs = tf.zeros(dtype=w.dtype, shape=([1] + input_shape))
if len(w.shape) == 4:
dummy_outputs = tf.nn.convolution(dummy_inputs,
w, padding=padding, strides=strides)
elif len(w.shape) == 3:
dummy_outputs = tf.nn.conv1d(dummy_inputs,
w, padding=padding, stride=strides[0])
else:
raise ValueError()
return dummy_outputs.shape.as_list()[1:]
def materialise_conv(w, b, input_shape, padding, strides):
"""Converts an N-D convolution to an equivalent linear layer.
Args:
w: (N+2)D tensor of shape (kernel_height, kernel_width, input_channels,
output_channels) containing the convolution weights.
b: 1D tensor of shape (output_channels) containing the convolution biases,
or `None` if no biases.
input_shape: Integer list of length N+1 specifying the non-batch dimensions
of the inputs: [input_height, input_width, input_channels].
padding: `"VALID"` or `"SAME"`, the convolution's padding algorithm.
strides: Integer list of length N: `[vertical_stride, horizontal_stride]`.
Returns:
w: 2D tensor of shape (input_height * input_width * input_channels,
output_height * output_width * output_channels) containing weights.
b: 1D tensor of shape (output_height * output_width * output_channels)
containing biases, or `None` if no biases.
Raises:
ValueError: if an unsupported convolution dimensionality is encountered.
"""
if len(input_shape) == 3:
return _materialise_conv2d(w, b, input_shape[0], input_shape[1],
padding, strides)
elif len(input_shape) == 2:
return _materialise_conv1d(w, b, input_shape[0], padding, strides[0])
else:
raise ValueError()
def _materialise_conv2d(w, b, input_height, input_width, padding, strides):
"""Converts a convolution to an equivalent linear layer.
Args:
w: 4D tensor of shape (kernel_height, kernel_width, input_channels,
output_channels) containing the convolution weights.
b: 1D tensor of shape (output_channels) containing the convolution biases,
or `None` if no biases.
input_height: height of the input tensor.
input_width: width of the input tensor.
padding: `"VALID"` or `"SAME"`, the convolution's padding algorithm.
strides: Integer list of `[vertical_stride, horizontal_stride]`.
Returns:
w: 2D tensor of shape (input_height * input_width * input_channels,
output_height * output_width * output_channels) containing weights.
b: 1D tensor of shape (output_height * output_width * output_channels)
containing biases, or `None` if no biases.
"""
kernel_height = w.shape[0].value
kernel_width = w.shape[1].value
input_channels = w.shape[2].value
output_channels = w.shape[3].value
# Temporarily move the input_channels dimension to output_channels.
w = tf.reshape(w, shape=(kernel_height, kernel_width, 1,
input_channels * output_channels))
# Apply the convolution to elementary (i.e. one-hot) inputs.
diagonal_input = tf.reshape(
tf.eye(input_height * input_width, dtype=w.dtype),
shape=[input_height * input_width, input_height, input_width, 1])
conv = tf.nn.convolution(
diagonal_input, w,
padding=padding, strides=strides)
output_height = conv.shape[1].value
output_width = conv.shape[2].value
# conv is of shape (input_height * input_width, output_height, output_width,
# input_channels * output_channels).
# Reshape it to (input_height * input_width * input_channels,
# output_height * output_width * output_channels).
w = tf.reshape(conv, shape=(
[input_height * input_width,
output_height, output_width,
input_channels, output_channels]))
w = tf.transpose(w, perm=[0, 3, 1, 2, 4])
w = tf.reshape(w, shape=(
[input_height * input_width * input_channels,
output_height * output_width * output_channels]))
# Broadcast b over spatial dimensions.
b = tf.tile(b, [output_height * output_width]) if b is not None else None
return w, b
def _materialise_conv1d(w, b, input_length, padding, stride):
"""Converts a convolution to an equivalent linear layer.
Args:
w: 3D tensor of shape (kernel_length, input_channels,
output_channels) containing the convolution weights.
b: 1D tensor of shape (output_channels) containing the convolution biases,
or `None` if no biases.
input_length: length of the input tensor.
padding: `"VALID"` or `"SAME"`, the convolution's padding algorithm.
stride: Integer stride.
Returns:
w: 2D tensor of shape (input_length * input_channels,
output_length * output_channels) containing weights.
b: 1D tensor of shape (output_length * output_channels)
containing biases, or `None` if no biases.
"""
kernel_length = w.shape[0].value
input_channels = w.shape[1].value
output_channels = w.shape[2].value
# Temporarily move the input_channels dimension to output_channels.
w = tf.reshape(w, shape=(kernel_length, 1,
input_channels * output_channels))
# Apply the convolution to elementary (i.e. one-hot) inputs.
diagonal_input = tf.reshape(
tf.eye(input_length, dtype=w.dtype),
shape=[input_length, input_length, 1])
conv = tf.nn.conv1d(
diagonal_input, w,
padding=padding, stride=stride)
output_length = conv.shape[1].value
# conv is of shape (input_length, output_length,
# input_channels * output_channels).
# Reshape it to (input_length * input_channels,
# output_length * output_channels).
w = tf.reshape(conv, shape=(
[input_length,
output_length,
input_channels, output_channels]))
w = tf.transpose(w, perm=[0, 2, 1, 3])
w = tf.reshape(w, shape=(
[input_length * input_channels,
output_length * output_channels]))
# Broadcast b over spatial dimensions.
b = tf.tile(b, [output_length]) if b is not None else None
return w, b
def decode_batchnorm(batchnorm_module):
"""Calculates the neuron-wise multipliers and biases of the batch norm layer.
Note that, in the case of a convolution, the returned bias will have
spatial dimensions.
Args:
batchnorm_module: `snt.BatchNorm` module.
Returns:
w: 1D tensor of shape (output_size) or 3D tensor of shape
(output_height, output_width, output_channels) containing
neuron-wise multipliers for the batch norm layer.
b: 1D tensor of shape (output_size) or 3D tensor of shape
(output_height, output_width, output_channels) containing
neuron-wise biases for the batch norm layer.
"""
if isinstance(batchnorm_module, layers.BatchNorm):
mean = batchnorm_module.mean
variance = batchnorm_module.variance
variance_epsilon = batchnorm_module.epsilon
scale = batchnorm_module.scale
offset = batchnorm_module.bias
else:
assert isinstance(batchnorm_module, snt.BatchNorm)
mean = batchnorm_module.moving_mean
variance = batchnorm_module.moving_variance
variance_epsilon = batchnorm_module._eps # pylint: disable=protected-access
try:
scale = batchnorm_module.gamma
except snt.Error:
scale = None
try:
offset = batchnorm_module.beta
except snt.Error:
offset = None
w = tf.rsqrt(variance + variance_epsilon)
if scale is not None:
w *= scale
b = -w * mean
if offset is not None:
b += offset
# Batchnorm vars have a redundant leading dim.
w = tf.squeeze(w, axis=0)
b = tf.squeeze(b, axis=0)
return w, b
def combine_with_batchnorm(w, b, batchnorm_module):
"""Combines a linear layer and a batch norm into a single linear layer.
Calculates the weights and biases of the linear layer formed by
applying the specified linear layer followed by the batch norm.
Note that, in the case of a convolution, the returned bias will have
spatial dimensions.
Args:
w: 2D tensor of shape (input_size, output_size) or 4D tensor of shape
(kernel_height, kernel_width, input_channels, output_channels) containing
weights for the linear layer.
b: 1D tensor of shape (output_size) or (output_channels) containing biases
for the linear layer, or `None` if no bias.
batchnorm_module: `snt.BatchNorm` module.
Returns:
w: 2D tensor of shape (input_size, output_size) or 4D tensor of shape
(kernel_height, kernel_width, input_channels, output_channels) containing
weights for the combined layer.
b: 1D tensor of shape (output_size) or 3D tensor of shape
(output_height, output_width, output_channels) containing
biases for the combined layer.
"""
if b is None:
b = tf.zeros(dtype=w.dtype, shape=())
w_bn, b_bn = decode_batchnorm(batchnorm_module)
return w * w_bn, b * w_bn + b_bn
| interval-bound-propagation-master | interval_bound_propagation/src/layer_utils.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Naive bound calculation for common neural network layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from interval_bound_propagation.src import bounds as basic_bounds
from interval_bound_propagation.src import relative_bounds
import sonnet as snt
import tensorflow.compat.v1 as tf
class SimplexBounds(basic_bounds.AbstractBounds):
"""Specifies a bounding simplex within an embedding space."""
def __init__(self, vertices, nominal, r):
"""Initialises the simplex bounds.
Args:
vertices: Tensor of shape (num_vertices, *input_shape)
or of shape (batch_size, num_vertices, *input_shape)
containing the vertices in embedding space.
nominal: Tensor of shape (batch_size, *input_shape) specifying
the unperturbed inputs in embedding space, where `*input_shape`
denotes either (embedding_size,) for flat input (e.g. bag-of-words)
or (input_length, embedding_channels) for sequence input.
r: Scalar specifying the dilation factor of the simplex. The dilated
simplex will have vertices `nominal + r * (vertices-nominal)`.
"""
super(SimplexBounds, self).__init__()
self._vertices = vertices
self._nominal = nominal
self._r = r
@property
def vertices(self):
return self._vertices
@property
def nominal(self):
return self._nominal
@property
def r(self):
return self._r
@property
def shape(self):
return self.nominal.shape.as_list()
@classmethod
def convert(cls, bounds):
if not isinstance(bounds, cls):
raise ValueError('Cannot convert "{}" to "{}"'.format(bounds,
cls.__name__))
return bounds
def apply_batch_reshape(self, wrapper, shape):
reshape = snt.BatchReshape(shape)
if self.vertices.shape.ndims == self.nominal.shape.ndims:
reshape_vertices = reshape
else:
reshape_vertices = snt.BatchReshape(shape, preserve_dims=2)
return SimplexBounds(reshape_vertices(self.vertices),
reshape(self.nominal),
self.r)
def apply_linear(self, wrapper, w, b):
mapped_centres = tf.matmul(self.nominal, w)
mapped_vertices = tf.tensordot(self.vertices, w, axes=1)
lb, ub = _simplex_bounds(mapped_vertices, mapped_centres, self.r, -2)
nominal_out = tf.matmul(self.nominal, w)
if b is not None:
nominal_out += b
return relative_bounds.RelativeIntervalBounds(lb, ub, nominal_out)
def apply_conv1d(self, wrapper, w, b, padding, stride):
mapped_centres = tf.nn.conv1d(self.nominal, w,
padding=padding, stride=stride)
if self.vertices.shape.ndims == 3:
# `self.vertices` has no batch dimension; its shape is
# (num_vertices, input_length, embedding_channels).
mapped_vertices = tf.nn.conv1d(self.vertices, w,
padding=padding, stride=stride)
elif self.vertices.shape.ndims == 4:
# `self.vertices` has shape
# (batch_size, num_vertices, input_length, embedding_channels).
# Vertices are different for each example in the batch,
# e.g. for word perturbations.
mapped_vertices = snt.BatchApply(
lambda x: tf.nn.conv1d(x, w, padding=padding, stride=stride))(
self.vertices)
else:
raise ValueError('"vertices" must have either 3 or 4 dimensions.')
lb, ub = _simplex_bounds(mapped_vertices, mapped_centres, self.r, -3)
nominal_out = tf.nn.conv1d(self.nominal, w,
padding=padding, stride=stride)
if b is not None:
nominal_out += b
return relative_bounds.RelativeIntervalBounds(lb, ub, nominal_out)
def apply_conv2d(self, wrapper, w, b, padding, strides):
mapped_centres = tf.nn.convolution(self.nominal, w,
padding=padding, strides=strides)
if self.vertices.shape.ndims == 4:
# `self.vertices` has no batch dimension; its shape is
# (num_vertices, input_height, input_width, input_channels).
mapped_vertices = tf.nn.convolution(self.vertices, w,
padding=padding, strides=strides)
elif self.vertices.shape.ndims == 5:
# `self.vertices` has shape
# (batch_size, num_vertices, input_height, input_width, input_channels).
# Vertices are different for each example in the batch.
mapped_vertices = snt.BatchApply(
lambda x: tf.nn.convolution(x, w, padding=padding, strides=strides))(
self.vertices)
else:
raise ValueError('"vertices" must have either 4 or 5 dimensions.')
lb, ub = _simplex_bounds(mapped_vertices, mapped_centres, self.r, -4)
nominal_out = tf.nn.convolution(self.nominal, w,
padding=padding, strides=strides)
if b is not None:
nominal_out += b
return relative_bounds.RelativeIntervalBounds(lb, ub, nominal_out)
def apply_increasing_monotonic_fn(self, wrapper, fn, *args, **parameters):
if fn.__name__ in ('add', 'reduce_mean', 'reduce_sum', 'avg_pool'):
if self.vertices.shape.ndims == self.nominal.shape.ndims:
vertices_fn = fn
else:
vertices_fn = snt.BatchApply(fn, n_dims=2)
return SimplexBounds(
vertices_fn(self.vertices, *[bounds.vertices for bounds in args]),
fn(self.nominal, *[bounds.nominal for bounds in args]),
self.r)
elif fn.__name__ == 'quotient':
return SimplexBounds(
self.vertices / tf.expand_dims(parameters['denom'], axis=1),
fn(self.nominal),
self.r)
else:
return super(SimplexBounds, self).apply_increasing_monotonic_fn(
wrapper, fn, *args, **parameters)
def _simplex_bounds(mapped_vertices, mapped_centres, r, axis):
"""Calculates naive bounds on the given layer-mapped vertices.
Args:
mapped_vertices: Tensor of shape (num_vertices, *output_shape)
or of shape (batch_size, num_vertices, *output_shape)
containing the vertices in the layer's output space.
mapped_centres: Tensor of shape (batch_size, *output_shape)
containing the layer's nominal outputs.
r: Scalar in [0, 1) specifying the radius (in vocab space) of the simplex.
axis: Index of the `num_vertices` dimension of `mapped_vertices`.
Returns:
lb_out: Tensor of shape (batch_size, *output_shape) with lower bounds
on the outputs of the affine layer.
ub_out: Tensor of shape (batch_size, *output_shape) with upper bounds
on the outputs of the affine layer.
"""
# Use the negative of r, instead of the complement of r, as
# we're shifting the input domain to be centred at the origin.
lb_out = -r * mapped_centres + r * tf.reduce_min(mapped_vertices, axis=axis)
ub_out = -r * mapped_centres + r * tf.reduce_max(mapped_vertices, axis=axis)
return lb_out, ub_out
| interval-bound-propagation-master | interval_bound_propagation/src/simplex_bounds.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper around modules that provides additional facilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import types
from absl import logging
from interval_bound_propagation.src import layers
import six
import sonnet as snt
import tensorflow.compat.v1 as tf
@six.add_metaclass(abc.ABCMeta)
class VerifiableWrapper(object):
"""Abstract wrapper class."""
def __init__(self, module):
self._module = module
self._input_bounds = None
self._output_bounds = None
@property
def input_bounds(self):
assert self._input_bounds is not None
return self._input_bounds
@property
def output_bounds(self):
return self._output_bounds
@property
def module(self):
return self._module
def __str__(self):
if isinstance(self._module, tf.Tensor):
return str(self._module)
if isinstance(self._module, types.LambdaType):
return self._module.__name__
if isinstance(self._module, snt.AbstractModule):
return self._module.module_name
if hasattr(self._module, '__class__'):
return self._module.__class__.__name__
return str(self._module)
def propagate_bounds(self, *input_bounds):
"""Propagates bounds and saves input and output bounds."""
output_bounds = self._propagate_through(self.module, *input_bounds)
if len(input_bounds) == 1:
self._input_bounds = input_bounds[0]
else:
self._input_bounds = tuple(input_bounds)
self._output_bounds = output_bounds
return output_bounds
@abc.abstractmethod
def _propagate_through(self, module, *input_bounds):
"""Propagates bounds through a verifiable wrapper.
Args:
module: This wrapped module, through which bounds are to be propagated.
*input_bounds: Bounds on the node's input(s).
Returns:
New bounds on the node's output.
"""
class ModelInputWrapper(object):
"""Virtual node representing the network's inputs."""
def __init__(self, index):
super(ModelInputWrapper, self).__init__()
self._index = index
self._output_bounds = None
@property
def index(self):
return self._index
@property
def output_bounds(self):
return self._output_bounds
@output_bounds.setter
def output_bounds(self, bounds):
self._output_bounds = bounds
def __str__(self):
return 'Model input {}'.format(self.index)
class ConstWrapper(VerifiableWrapper):
"""Wraps a constant tensor."""
def _propagate_through(self, module):
# Make sure that the constant value can be converted to a tensor.
return tf.convert_to_tensor(module)
class LinearFCWrapper(VerifiableWrapper):
"""Wraps fully-connected layers."""
def __init__(self, module):
if not isinstance(module, snt.Linear):
raise ValueError('Cannot wrap {} with a LinearFCWrapper.'.format(module))
super(LinearFCWrapper, self).__init__(module)
def _propagate_through(self, module, input_bounds):
w = module.w
b = module.b if module.has_bias else None
return input_bounds.apply_linear(self, w, b)
class LinearConvWrapper(VerifiableWrapper):
"""Wraps convolutional layers."""
class LinearConv1dWrapper(LinearConvWrapper):
"""Wraps 1-D convolutional layers."""
def __init__(self, module):
if not isinstance(module, snt.Conv1D):
raise ValueError('Cannot wrap {} with a LinearConv1dWrapper.'.format(
module))
super(LinearConv1dWrapper, self).__init__(module)
def _propagate_through(self, module, input_bounds):
w = module.w
b = module.b if module.has_bias else None
padding = module.padding
stride = module.stride[1]
return input_bounds.apply_conv1d(self, w, b, padding, stride)
class LinearConv2dWrapper(LinearConvWrapper):
"""Wraps 2-D convolutional layers."""
def __init__(self, module):
if not isinstance(module, snt.Conv2D):
raise ValueError('Cannot wrap {} with a LinearConv2dWrapper.'.format(
module))
super(LinearConv2dWrapper, self).__init__(module)
def _propagate_through(self, module, input_bounds):
w = module.w
b = module.b if module.has_bias else None
padding = module.padding
strides = module.stride[1:-1]
return input_bounds.apply_conv2d(self, w, b, padding, strides)
class IncreasingMonotonicWrapper(VerifiableWrapper):
"""Wraps monotonically increasing functions of the inputs."""
def __init__(self, module, **parameters):
super(IncreasingMonotonicWrapper, self).__init__(module)
self._parameters = parameters
@property
def parameters(self):
return self._parameters
def _propagate_through(self, module, main_bounds, *other_input_bounds):
return main_bounds.apply_increasing_monotonic_fn(self, module,
*other_input_bounds,
**self.parameters)
class SoftmaxWrapper(VerifiableWrapper):
"""Wraps softmax layers."""
def __init__(self):
super(SoftmaxWrapper, self).__init__(None)
def _propagate_through(self, module, input_bounds):
return input_bounds.apply_softmax(self)
class PiecewiseMonotonicWrapper(VerifiableWrapper):
"""Wraps a piecewise (not necessarily increasing) monotonic function."""
def __init__(self, module, boundaries=()):
super(PiecewiseMonotonicWrapper, self).__init__(module)
self._boundaries = boundaries
@property
def boundaries(self):
return self._boundaries
def _propagate_through(self, module, main_bounds, *other_input_bounds):
return main_bounds.apply_piecewise_monotonic_fn(self, module,
self.boundaries,
*other_input_bounds)
class ImageNormWrapper(IncreasingMonotonicWrapper):
"""Convenience wrapper for getting track of the ImageNorm layer."""
def __init__(self, module):
if not isinstance(module, layers.ImageNorm):
raise ValueError('Cannot wrap {} with a ImageNormWrapper.'.format(module))
super(ImageNormWrapper, self).__init__(module.apply)
self._inner_module = module
@property
def inner_module(self):
return self._inner_module
class BatchNormWrapper(VerifiableWrapper):
"""Wraps batch normalization."""
def __init__(self, module):
if not isinstance(module, snt.BatchNorm):
raise ValueError('Cannot wrap {} with a BatchNormWrapper.'.format(
module))
super(BatchNormWrapper, self).__init__(module)
def _propagate_through(self, module, input_bounds):
if isinstance(module, layers.BatchNorm):
# This IBP-specific batch-norm implementation exposes stats recorded
# the most recent time the BatchNorm module was connected.
# These will be either the batch stats (e.g. if training) or the moving
# averages, depending on how the module was called.
mean = module.mean
variance = module.variance
epsilon = module.epsilon
scale = module.scale
bias = module.bias
else:
# This plain Sonnet batch-norm implementation only exposes the
# moving averages.
logging.warn('Sonnet BatchNorm module encountered: %s. '
'IBP will always use its moving averages, not the local '
'batch stats, even in training mode.', str(module))
mean = module.moving_mean
variance = module.moving_variance
epsilon = module._eps # pylint: disable=protected-access
try:
bias = module.beta
except snt.Error:
bias = None
try:
scale = module.gamma
except snt.Error:
scale = None
return input_bounds.apply_batch_norm(self, mean, variance,
scale, bias, epsilon)
class BatchReshapeWrapper(VerifiableWrapper):
"""Wraps batch reshape."""
def __init__(self, module, shape):
if not isinstance(module, snt.BatchReshape):
raise ValueError('Cannot wrap {} with a BatchReshapeWrapper.'.format(
module))
super(BatchReshapeWrapper, self).__init__(module)
self._shape = shape
@property
def shape(self):
return self._shape
def _propagate_through(self, module, input_bounds):
return input_bounds.apply_batch_reshape(self, self.shape)
class BatchFlattenWrapper(BatchReshapeWrapper):
"""Wraps batch flatten."""
def __init__(self, module):
if not isinstance(module, snt.BatchFlatten):
raise ValueError('Cannot wrap {} with a BatchFlattenWrapper.'.format(
module))
super(BatchFlattenWrapper, self).__init__(module, [-1])
| interval-bound-propagation-master | interval_bound_propagation/src/verifiable_wrapper.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library to train verifiably robust neural networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| interval-bound-propagation-master | interval_bound_propagation/src/__init__.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interval bounds expressed relative to a nominal value."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from interval_bound_propagation.src import bounds as basic_bounds
import sonnet as snt
import tensorflow.compat.v1 as tf
class RelativeIntervalBounds(basic_bounds.AbstractBounds):
"""Upper and lower bounds, as a delta relative to nominal values."""
def __init__(self, lower_offset, upper_offset, nominal):
super(RelativeIntervalBounds, self).__init__()
self._lower_offset = lower_offset
self._upper_offset = upper_offset
self._nominal = nominal
@property
def lower_offset(self):
"""Returns lower bounds, expressed relative to nominal values."""
return self._lower_offset
@property
def upper_offset(self):
"""Returns upper bounds, expressed relative to nominal values."""
return self._upper_offset
@property
def nominal(self):
return self._nominal
@property
def lower(self):
"""Returns absolute lower bounds."""
return self.nominal + self.lower_offset
@property
def upper(self):
"""Returns absolute upper bounds."""
return self.nominal + self.upper_offset
@property
def shape(self):
return self.lower_offset.shape.as_list()
@classmethod
def convert(cls, bounds):
if isinstance(bounds, tf.Tensor):
return cls(tf.zeros_like(bounds), tf.zeros_like(bounds), bounds)
bounds = bounds.concretize()
if not isinstance(bounds, cls):
raise ValueError('Cannot convert "{}" to "{}"'.format(bounds,
cls.__name__))
return bounds
def apply_batch_reshape(self, wrapper, shape):
"""Propagates the bounds through a reshape.
Args:
wrapper: Contains prior bounds from a previous iteration.
shape: output shape, excluding the batch dimension.
Returns:
Output bounds.
"""
reshape = snt.BatchReshape(shape)
return RelativeIntervalBounds(
reshape(self.lower_offset),
reshape(self.upper_offset),
reshape(self.nominal))
def apply_linear(self, wrapper, w, b):
"""Propagates the bounds through a linear layer.
Args:
wrapper: Contains prior bounds from a previous iteration.
w: 2D tensor of shape (input_size, output_size) containing
weights for the linear layer.
b: 1D tensor of shape (output_size) containing biases for the linear
layer, or `None` if no bias.
Returns:
Output bounds.
"""
w_pos = tf.maximum(w, 0)
w_neg = tf.minimum(w, 0)
lb = (tf.matmul(self.lower_offset, w_pos) +
tf.matmul(self.upper_offset, w_neg))
ub = (tf.matmul(self.upper_offset, w_pos) +
tf.matmul(self.lower_offset, w_neg))
nominal_out = tf.matmul(self.nominal, w)
if b is not None:
nominal_out += b
return RelativeIntervalBounds(lb, ub, nominal_out)
def apply_conv1d(self, wrapper, w, b, padding, stride):
"""Propagates the bounds through a 1D convolution layer.
Args:
wrapper: Contains prior bounds from a previous iteration.
w: 3D tensor of shape (kernel_length, input_channels, output_channels)
containing weights for the convolution.
b: 1D tensor of shape (output_channels) containing biases for the
convolution, or `None` if no bias.
padding: `"VALID"` or `"SAME"`, the convolution's padding algorithm.
stride: Integer stride.
Returns:
Output bounds.
"""
w_pos = tf.maximum(w, 0)
w_neg = tf.minimum(w, 0)
lb = (tf.nn.conv1d(self.lower_offset, w_pos,
padding=padding, stride=stride) +
tf.nn.conv1d(self.upper_offset, w_neg,
padding=padding, stride=stride))
ub = (tf.nn.conv1d(self.upper_offset, w_pos,
padding=padding, stride=stride) +
tf.nn.conv1d(self.lower_offset, w_neg,
padding=padding, stride=stride))
nominal_out = tf.nn.conv1d(self.nominal, w,
padding=padding, stride=stride)
if b is not None:
nominal_out += b
return RelativeIntervalBounds(lb, ub, nominal_out)
def apply_conv2d(self, wrapper, w, b, padding, strides):
"""Propagates the bounds through a 2D convolution layer.
Args:
wrapper: Contains prior bounds from a previous iteration.
w: 4D tensor of shape (kernel_height, kernel_width, input_channels,
output_channels) containing weights for the convolution.
b: 1D tensor of shape (output_channels) containing biases for the
convolution, or `None` if no bias.
padding: `"VALID"` or `"SAME"`, the convolution's padding algorithm.
strides: Integer list of length N: `[vertical_stride, horizontal_stride]`.
Returns:
Output bounds.
"""
w_pos = tf.maximum(w, 0)
w_neg = tf.minimum(w, 0)
lb = (tf.nn.convolution(self.lower_offset, w_pos,
padding=padding, strides=strides) +
tf.nn.convolution(self.upper_offset, w_neg,
padding=padding, strides=strides))
ub = (tf.nn.convolution(self.upper_offset, w_pos,
padding=padding, strides=strides) +
tf.nn.convolution(self.lower_offset, w_neg,
padding=padding, strides=strides))
nominal_out = tf.nn.convolution(self.nominal, w,
padding=padding, strides=strides)
if b is not None:
nominal_out += b
return RelativeIntervalBounds(lb, ub, nominal_out)
def apply_increasing_monotonic_fn(self, wrapper, fn, *args, **parameters):
"""Propagates the bounds through a non-linear activation layer or `add` op.
Args:
wrapper: Contains prior bounds from a previous iteration.
fn: String specifying non-linear activation function.
May be one of: sig, relu, tanh, elu, leaky_relu.
Anything else denotes identity.
*args: Other inputs' bounds, for a multi-input node (e.g. Add).
**parameters: Optional parameters if activation is parameterised, e.g.
`{'alpha': 0.2}` for leaky ReLu.
Returns:
Output bounds.
"""
if fn.__name__ in ('add', 'reduce_mean', 'reduce_sum', 'avg_pool'):
return RelativeIntervalBounds(
fn(self.lower_offset, *[bounds.lower_offset for bounds in args]),
fn(self.upper_offset, *[bounds.upper_offset for bounds in args]),
fn(self.nominal, *[bounds.nominal for bounds in args]))
else:
assert not args, 'unary function expected'
nominal_out = fn(self.nominal)
if fn.__name__ == 'reduce_max':
lb, ub = _maxpool_bounds(fn, None, None,
self.lower_offset, self.upper_offset,
nominal_in=self.nominal,
nominal_out=nominal_out)
elif fn.__name__ == 'max_pool':
lb, ub = _maxpool_bounds(fn,
parameters['ksize'][1:-1],
parameters['strides'][1:-1],
self.lower_offset, self.upper_offset,
nominal_in=self.nominal,
nominal_out=nominal_out)
else:
lb, ub = _activation_bounds(fn, self.lower_offset, self.upper_offset,
nominal_in=self.nominal,
parameters=parameters)
return RelativeIntervalBounds(lb, ub, nominal_out)
def apply_batch_norm(self, wrapper, mean, variance, scale, bias, epsilon):
"""Propagates the bounds through a batch norm layer.
Args:
wrapper: Contains prior bounds from a previous iteration.
mean: Learnt batch mean.
variance: Learnt batch variance.
scale: Trained component-wise scale variable.
bias: Trained component-wise bias variable.
epsilon: Epsilon for avoiding instability when `variance` is very small.
Returns:
Output bounds.
"""
lb = tf.nn.batch_normalization(self.lower_offset,
tf.zeros_like(mean), variance,
None, scale, epsilon)
ub = tf.nn.batch_normalization(self.upper_offset,
tf.zeros_like(mean), variance,
None, scale, epsilon)
# It's just possible that the batchnorm's scale is negative.
lb, ub = tf.minimum(lb, ub), tf.maximum(lb, ub)
nominal_out = tf.nn.batch_normalization(self.nominal,
mean, variance,
bias, scale, epsilon)
return RelativeIntervalBounds(lb, ub, nominal_out)
def _set_up_cache(self):
self._lower_offset, update_lower = self._cache_with_update_op(
self._lower_offset)
self._upper_offset, update_upper = self._cache_with_update_op(
self._upper_offset)
return tf.group([update_lower, update_upper])
def _maxpool_bounds(module, kernel_shape, strides, lb_in, ub_in,
nominal_in, nominal_out):
"""Calculates naive bounds on output of an N-D max pool layer.
Args:
module: Callable for max-pool operation.
kernel_shape: Integer list of `[kernel_height, kernel_width]`,
or `None` to aggregate over the layer`s entire spatial extent.
strides: Integer list of `[vertical_stride, horizontal_stride]`.
lb_in: (N+2)D tensor of shape (batch_size, input_height, input_width,
layer_channels) containing lower bounds on the inputs to the
max pool layer.
ub_in: (N+2)D tensor of shape (batch_size, input_height, input_width,
layer_channels) containing upper bounds on the inputs to the
max pool layer.
nominal_in: (N+2)D tensor of shape (batch_size, input_height, input_width,
layer_channels) containing nominal input values.
Inputs bounds are interpreted relative to this.
nominal_out: (N+2)D tensor of shape (batch_size, output_height,output_width,
layer_channels) containing nominal input values.
The returned output bounds are expressed relative to this.
Returns:
lb_out: (N+2)D tensor of shape (batch_size, output_height, output_width,
layer_channels) with lower bounds on the outputs of the max pool layer.
ub_out: (N+2)D tensor of shape (batch_size, output_height, output_width,
layer_channels) with upper bounds on the outputs of the max pool layer.
"""
if kernel_shape is None:
nominal_out = tf.reduce_max(nominal_in,
axis=list(range(1, nominal_in.shape.ndims-1)),
keepdims=True)
return (module((nominal_in - nominal_out) + lb_in),
module((nominal_in - nominal_out) + ub_in))
else:
# Must perform the max on absolute bounds, as the kernels may overlap.
# TODO(stanforth) investigate a more numerically stable implementation
del strides
return (module(nominal_in + lb_in) - nominal_out,
module(nominal_in + ub_in) - nominal_out)
def _activation_bounds(nl_fun, lb_in, ub_in, nominal_in, parameters=None):
"""Calculates naive bounds on output of an activation layer.
Inputs bounds are interpreted relative to `nominal_in`, and the returned
output bounds are expressed relative to `nominal_out=nl(nominal_in)`.
Args:
nl_fun: Callable implementing the activation function itself.
lb_in: (N+2)D tensor of shape (batch_size, layer_height, layer_width,
layer_channels) containing lower bounds on the pre-activations.
ub_in: (N+2)D tensor of shape (batch_size, layer_height, layer_width,
layer_channels) containing upper bounds on the pre-activations.
nominal_in: (N+2)D tensor of shape (batch_size, input_height, input_width,
layer_channels) containing nominal input values.
parameters: Optional parameter dict if activation is parameterised, e.g.
`{'alpha': 0.2}` for leaky ReLu.
Returns:
lb_out: 2D tensor of shape (batch_size, layer_size) or
4D tensor of shape (batch_size, layer_height, layer_width, layer_channels)
with lower bounds on the activations.
ub_out: 2D tensor of shape (batch_size, layer_size) or
4D tensor of shape (batch_size, layer_height, layer_width, layer_channels)
with upper bounds on the activations.
"""
if nl_fun.__name__ == 'relu':
return (
tf.maximum(tf.minimum(nominal_in, 0.) + lb_in,
tf.minimum(-nominal_in, 0.)), # pylint:disable=invalid-unary-operand-type
tf.maximum(tf.minimum(nominal_in, 0.) + ub_in,
tf.minimum(-nominal_in, 0.))) # pylint:disable=invalid-unary-operand-type
elif nl_fun.__name__ == 'leaky_relu':
alpha = parameters['alpha']
return (
tf.maximum(
lb_in + tf.minimum(nominal_in, 0.) * (1. - alpha),
alpha * lb_in + tf.minimum(-nominal_in, 0.) * (1. - alpha)), # pylint:disable=invalid-unary-operand-type
tf.maximum(
ub_in + tf.minimum(nominal_in, 0.) * (1. - alpha),
alpha * ub_in + tf.minimum(-nominal_in, 0.) * (1. - alpha))) # pylint:disable=invalid-unary-operand-type
else:
nominal_out = nl_fun(nominal_in)
return (nl_fun(nominal_in + lb_in) - nominal_out,
nl_fun(nominal_in + ub_in) - nominal_out)
| interval-bound-propagation-master | interval_bound_propagation/src/relative_bounds.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast-Lin symbolic bound calculation for common neural network layers.
The Fast-Lin algorithm expresses lower and upper bounds of each layer of
a neural network as a symbolic linear expression in the input neurons,
relaxing the ReLU layers to retain linearity at the expense of tightness.
Reference: "Towards Fast Computation of Certified Robustness for ReLU Networks",
https://arxiv.org/pdf/1804.09699.pdf.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl import logging
from interval_bound_propagation.src import bounds as basic_bounds
from interval_bound_propagation.src import relative_bounds
import sonnet as snt
import tensorflow.compat.v1 as tf
# Holds the linear expressions serving as bounds.
# w: [batch_size, input_size, output_shape] storing the weights.
# b: [batch_size, output_shape] storing the bias.
# lower: [batch_size, input_size] storing the lower bounds on inputs.
# upper: [batch_size, input_size] storing the upper bounds on inputs.
# `lower` and `upper` tensors are always flattened representations of the
# original inputs.
LinearExpression = collections.namedtuple(
'LinearExpression', ['w', 'b', 'lower', 'upper'])
class SymbolicBounds(basic_bounds.AbstractBounds):
"""Fast-Lin bounds (https://arxiv.org/abs/1804.09699)."""
def __init__(self, lower, upper):
super(SymbolicBounds, self).__init__()
self._lower = lower
self._upper = upper
self._prior_bounds = None
self._concretized = None
@property
def lower(self):
return self._lower
@property
def upper(self):
return self._upper
@property
def shape(self):
return self.lower.b.shape.as_list()
def concretize(self):
"""Returns lower and upper interval bounds."""
if self._concretized is None:
# Construct once and cache.
lb, ub = self._concretize_bounds(self.lower, self.upper)
# Apply intersections with prior runs.
if self._prior_bounds is not None:
lb = tf.maximum(lb, self._prior_bounds.lower)
ub = tf.minimum(ub, self._prior_bounds.upper)
self._concretized = basic_bounds.IntervalBounds(lb, ub)
return self._concretized
def with_priors(self, existing_bounds):
if existing_bounds is not None:
self._prior_bounds = existing_bounds.concretize()
# These priors are applied the next time concretize() is called.
self._concretized = None
return self
@classmethod
def convert(cls, bounds):
if isinstance(bounds, cls):
return bounds
if isinstance(bounds, tf.Tensor):
bounds = basic_bounds.IntervalBounds(bounds, bounds)
bounds = bounds.concretize()
if not isinstance(bounds, basic_bounds.IntervalBounds):
raise ValueError('Cannot convert "{}" to "SymbolicBounds"'.format(bounds))
lower, upper = cls._initial_symbolic_bounds(bounds.lower, bounds.upper)
return cls(lower, upper)
def apply_linear(self, wrapper, w, b):
w_pos = tf.maximum(w, 0)
w_neg = tf.minimum(w, 0)
lb = self._add_expression(
self._scale_expression(self.lower, w_pos),
self._scale_expression(self.upper, w_neg)
)
lb = self._add_bias(lb, b)
ub = self._add_expression(
self._scale_expression(self.lower, w_neg),
self._scale_expression(self.upper, w_pos)
)
ub = self._add_bias(ub, b)
return SymbolicBounds(lb, ub).with_priors(wrapper.output_bounds)
def apply_conv1d(self, wrapper, w, b, padding, stride):
w_pos = tf.maximum(w, 0)
w_neg = tf.minimum(w, 0)
lb = self._add_expression(
self._conv1d_expression(self.lower, w_pos, padding, stride),
self._conv1d_expression(self.upper, w_neg, padding, stride))
lb = self._add_bias(lb, b)
ub = self._add_expression(
self._conv1d_expression(self.upper, w_pos, padding, stride),
self._conv1d_expression(self.lower, w_neg, padding, stride))
ub = self._add_bias(ub, b)
return SymbolicBounds(lb, ub).with_priors(wrapper.output_bounds)
def apply_conv2d(self, wrapper, w, b, padding, strides):
w_pos = tf.maximum(w, 0)
w_neg = tf.minimum(w, 0)
lb = self._add_expression(
self._conv2d_expression(self.lower, w_pos, padding, strides),
self._conv2d_expression(self.upper, w_neg, padding, strides))
lb = self._add_bias(lb, b)
ub = self._add_expression(
self._conv2d_expression(self.upper, w_pos, padding, strides),
self._conv2d_expression(self.lower, w_neg, padding, strides))
ub = self._add_bias(ub, b)
return SymbolicBounds(lb, ub).with_priors(wrapper.output_bounds)
def apply_increasing_monotonic_fn(self, wrapper, fn, *args, **parameters):
if fn.__name__ != 'relu':
# Fallback to regular interval bound propagation for unsupported
# operations.
logging.warn('"%s" is not supported by SymbolicBounds. '
'Fallback on IntervalBounds.', fn.__name__)
interval_bounds = basic_bounds.IntervalBounds.convert(self)
converted_args = [basic_bounds.IntervalBounds.convert(b) for b in args]
interval_bounds = interval_bounds._increasing_monotonic_fn( # pylint: disable=protected-access
fn, *converted_args)
return self.convert(interval_bounds)
concrete = self.concretize()
lb, ub = concrete.lower, concrete.upper
is_ambiguous = tf.logical_and(ub > 0, lb < 0)
# Ensure denominator is always positive, even when not needed.
ambiguous_denom = tf.where(is_ambiguous, ub - lb, tf.ones_like(ub))
scale = tf.where(
is_ambiguous, ub / ambiguous_denom,
tf.where(lb >= 0, tf.ones_like(lb), tf.zeros_like(lb)))
bias = tf.where(is_ambiguous, -lb, tf.zeros_like(lb))
lb_out = LinearExpression(
w=tf.expand_dims(scale, 1) * self.lower.w,
b=scale * self.lower.b,
lower=self.lower.lower, upper=self.lower.upper)
ub_out = LinearExpression(
w=tf.expand_dims(scale, 1) * self.upper.w,
b=scale * (self.upper.b + bias),
lower=self.upper.lower, upper=self.upper.upper)
return SymbolicBounds(lb_out, ub_out).with_priors(wrapper.output_bounds)
def apply_batch_reshape(self, wrapper, shape):
return SymbolicBounds(self._batch_reshape_expression(self.lower, shape),
self._batch_reshape_expression(self.upper, shape)
).with_priors(wrapper.output_bounds)
# Helper methods.
@staticmethod
def _add_bias(expr, b):
"""Add bias b to a linear expression."""
if b is None:
return expr
return LinearExpression(w=expr.w, b=expr.b + b,
lower=expr.lower, upper=expr.upper)
@staticmethod
def _add_expression(expr_a, expr_b):
"""Add two expression together."""
return LinearExpression(w=expr_a.w + expr_b.w, b=expr_a.b + expr_b.b,
lower=expr_a.lower, upper=expr_b.upper)
@staticmethod
def _scale_expression(expr, w):
"""Scale a linear expression by w."""
b = tf.matmul(expr.b, w)
w = tf.tensordot(expr.w, w, axes=1)
return LinearExpression(w=w, b=b, lower=expr.lower, upper=expr.upper)
@staticmethod
def _conv1d_expression(expr, w, padding, stride):
"""Scale a linear expression by w (through a convolutional layer)."""
b = tf.nn.conv1d(expr.b, w, padding=padding, stride=stride)
shape = tf.concat([[tf.reduce_prod(tf.shape(expr.w)[:2])],
tf.shape(expr.w)[2:]], axis=0)
w = tf.nn.conv1d(tf.reshape(expr.w, shape), w, padding=padding,
stride=stride)
shape = tf.concat([tf.shape(expr.w)[:2], tf.shape(w)[1:]], axis=0)
w = tf.reshape(w, shape)
return LinearExpression(w=w, b=b, lower=expr.lower, upper=expr.upper)
@staticmethod
def _conv2d_expression(expr, w, padding, strides):
"""Scale a linear expression by w (through a convolutional layer)."""
b = tf.nn.convolution(expr.b, w, padding=padding, strides=strides)
shape = tf.concat([[tf.reduce_prod(tf.shape(expr.w)[:2])],
tf.shape(expr.w)[2:]], axis=0)
w = tf.nn.convolution(tf.reshape(expr.w, shape), w, padding=padding,
strides=strides)
shape = tf.concat([tf.shape(expr.w)[:2], tf.shape(w)[1:]], axis=0)
w = tf.reshape(w, shape)
return LinearExpression(w=w, b=b, lower=expr.lower, upper=expr.upper)
@staticmethod
def _batch_reshape_expression(expr, shape):
w = snt.BatchReshape(shape, preserve_dims=2)(expr.w)
b = snt.BatchReshape(shape)(expr.b)
return LinearExpression(w=w, b=b, lower=expr.lower, upper=expr.upper)
@staticmethod
def _concretize_bounds(lower, upper):
"""Returns lower and upper interval bounds."""
if len(lower.b.shape) == 2:
equation = 'ijk,ij->ik'
elif len(lower.b.shape) == 3:
equation = 'ijnc,ij->inc'
elif len(lower.b.shape) == 4:
equation = 'ijhwc,ij->ihwc'
else:
raise NotImplementedError('Shape unsupported: {}'.format(lower.b.shape))
lb = (tf.einsum(equation, tf.maximum(lower.w, 0), lower.lower) +
tf.einsum(equation, tf.minimum(lower.w, 0), lower.upper) +
lower.b)
ub = (tf.einsum(equation, tf.maximum(upper.w, 0), upper.upper) +
tf.einsum(equation, tf.minimum(upper.w, 0), upper.lower) +
upper.b)
return lb, ub
@staticmethod
def _initial_symbolic_bounds(lb, ub):
"""Returns symbolic bounds for the given interval bounds."""
batch_size = tf.shape(lb)[0]
input_shape = lb.shape[1:]
zero = tf.zeros_like(lb)
lb = snt.BatchFlatten()(lb)
ub = snt.BatchFlatten()(ub)
input_size = tf.shape(lb)[1]
output_shape = tf.concat([[input_size], input_shape], axis=0)
identity = tf.reshape(tf.eye(input_size), output_shape)
identity = tf.expand_dims(identity, 0)
identity = tf.tile(identity, [batch_size] + [1] * (len(input_shape) + 1))
expr = LinearExpression(w=identity, b=zero,
lower=lb, upper=ub)
return expr, expr
class RelativeSymbolicBounds(SymbolicBounds):
"""Relative-to-nominal variant of Fast-Lin bounds."""
def __init__(self, lower_offset, upper_offset, nominal):
super(RelativeSymbolicBounds, self).__init__(lower_offset, upper_offset)
self._nominal = nominal
def concretize(self):
"""Returns lower and upper interval bounds."""
if self._concretized is None:
# Construct once and cache.
lb_offset, ub_offset = self._concretize_bounds(self.lower, self.upper)
# Apply intersections with prior runs.
if self._prior_bounds is not None:
lb_offset = tf.maximum(lb_offset, self._prior_bounds.lower_offset)
ub_offset = tf.minimum(ub_offset, self._prior_bounds.upper_offset)
self._concretized = relative_bounds.RelativeIntervalBounds(
lb_offset, ub_offset, self._nominal)
return self._concretized
@classmethod
def convert(cls, bounds):
if isinstance(bounds, cls):
return bounds
if isinstance(bounds, tf.Tensor):
bounds = relative_bounds.RelativeIntervalBounds(
tf.zeros_like(bounds), tf.zeros_like(bounds), bounds)
bounds = bounds.concretize()
if not isinstance(bounds, relative_bounds.RelativeIntervalBounds):
raise ValueError(
'Cannot convert "{}" to "RelativeSymbolicBounds"'.format(bounds))
lower, upper = cls._initial_symbolic_bounds(bounds.lower_offset,
bounds.upper_offset)
return cls(lower, upper, bounds.nominal)
def apply_linear(self, wrapper, w, b):
bounds_out = super(RelativeSymbolicBounds, self).apply_linear(
wrapper, w, b=None)
nominal_out = tf.matmul(self._nominal, w)
if b is not None:
nominal_out += b
return RelativeSymbolicBounds(
bounds_out.lower, bounds_out.upper, nominal_out).with_priors(
wrapper.output_bounds)
def apply_conv1d(self, wrapper, w, b, padding, stride):
bounds_out = super(RelativeSymbolicBounds, self).apply_conv1d(
wrapper, w, b=None, padding=padding, stride=stride)
nominal_out = tf.nn.conv1d(self._nominal, w,
padding=padding, stride=stride)
if b is not None:
nominal_out += b
return RelativeSymbolicBounds(
bounds_out.lower, bounds_out.upper, nominal_out).with_priors(
wrapper.output_bounds)
def apply_conv2d(self, wrapper, w, b, padding, strides):
bounds_out = super(RelativeSymbolicBounds, self).apply_conv2d(
wrapper, w, b=None, padding=padding, strides=strides)
nominal_out = tf.nn.convolution(self._nominal, w,
padding=padding, strides=strides)
if b is not None:
nominal_out += b
return RelativeSymbolicBounds(
bounds_out.lower, bounds_out.upper, nominal_out).with_priors(
wrapper.output_bounds)
def apply_increasing_monotonic_fn(self, wrapper, fn, *args, **parameters):
if fn.__name__ != 'relu':
# Fallback to regular interval bound propagation for unsupported
# operations.
logging.warn('"%s" is not supported by RelativeSymbolicBounds. '
'Fallback on RelativeIntervalBounds.', fn.__name__)
interval_bounds = relative_bounds.RelativeIntervalBounds.convert(self)
converted_args = [relative_bounds.RelativeIntervalBounds.convert(b)
for b in args]
interval_bounds = interval_bounds._increasing_monotonic_fn( # pylint: disable=protected-access
fn, *converted_args)
return self.convert(interval_bounds)
concrete = self.concretize()
lb, ub = concrete.lower_offset, concrete.upper_offset
is_ambiguous = tf.logical_and(ub > -self._nominal, lb < -self._nominal)
# Ensure denominator is always positive, even when not needed.
ambiguous_denom = tf.where(is_ambiguous, ub - lb, tf.ones_like(ub))
scale = tf.where(
is_ambiguous, (self._nominal + ub) / ambiguous_denom,
tf.where(lb >= -self._nominal, tf.ones_like(lb), tf.zeros_like(lb)))
scale_complement = tf.where(
is_ambiguous, -(self._nominal + lb) / ambiguous_denom,
tf.where(lb >= -self._nominal, tf.zeros_like(lb), tf.ones_like(lb)))
# Need lb_out.b = scale * (nom_in + lb_in.b) - nom_out
# and ub_out.b = scale * (nom_in + ub_in.b - min(nom_in + lb, 0)) - nom_out
lower_bias = (scale * (tf.minimum(self._nominal, 0.)) +
scale_complement * tf.minimum(-self._nominal, 0.))
upper_bias = (scale * tf.maximum(tf.minimum(-self._nominal, 0.) - lb,
tf.minimum(self._nominal, 0.)) +
scale_complement * tf.minimum(-self._nominal, 0.))
lb_out = LinearExpression(
w=tf.expand_dims(scale, 1) * self.lower.w,
b=scale * self.lower.b + lower_bias,
lower=self.lower.lower, upper=self.lower.upper)
ub_out = LinearExpression(
w=tf.expand_dims(scale, 1) * self.upper.w,
b=scale * self.upper.b + upper_bias,
lower=self.upper.lower, upper=self.upper.upper)
nominal_out = tf.nn.relu(self._nominal)
return RelativeSymbolicBounds(
lb_out, ub_out, nominal_out).with_priors(wrapper.output_bounds)
def apply_batch_reshape(self, wrapper, shape):
bounds_out = super(RelativeSymbolicBounds, self).apply_batch_reshape(
wrapper, shape)
nominal_out = snt.BatchReshape(shape)(self._nominal)
return RelativeSymbolicBounds(
bounds_out.lower, bounds_out.upper, nominal_out).with_priors(
wrapper.output_bounds)
| interval-bound-propagation-master | interval_bound_propagation/src/fastlin.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sonnet modules that represent the predictor network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl import logging
from interval_bound_propagation.src import layers
from interval_bound_propagation.src import verifiable_wrapper
import numpy as np
import sonnet as snt
import tensorflow.compat.v1 as tf
# Set of supported activations. Must be monotonic and attributes of `tf.nn`.
_ALLOWED_ACTIVATIONS = set([
'elu',
'leaky_relu',
'relu',
'relu6',
'selu',
'sigmoid',
'softplus',
'softsign',
'tanh',
])
# Mapping between graph node ops and their TensorFlow function.
_MONOTONIC_NODE_OPS = {
'Elu': tf.nn.elu,
'LeakyRelu': tf.nn.leaky_relu,
'Relu': tf.nn.relu,
'Relu6': tf.nn.relu6,
'Selu': tf.nn.selu,
'Sigmoid': tf.nn.sigmoid,
'Softplus': tf.nn.softplus,
'Softsign': tf.nn.softsign,
'Tanh': tf.nn.tanh,
}
class VerifiableModelWrapper(snt.AbstractModule):
"""Wraps a predictor network."""
def __init__(self, net_builder, name='verifiable_predictor'):
"""Constructor for the verifiable model.
Args:
net_builder: A callable that returns output logits from an input.
net_builder must accept two arguments: the input (as the first
argument) and is_training (as the second).
name: Sonnet module name.
"""
super(VerifiableModelWrapper, self).__init__(name=name)
self._net_builder = net_builder
@property
def wrapped_network(self):
return self._net_builder
@property
def output_size(self):
self._ensure_is_connected()
return self._num_classes
@property
def logits(self):
self._ensure_is_connected()
return self._logits
@property
def inputs(self):
self._ensure_is_connected()
return self._inputs
@property
def input_wrappers(self):
self._ensure_is_connected()
return self._model_inputs
@property
def modules(self):
self._ensure_is_connected()
return self._modules
def dependencies(self, module):
self._ensure_is_connected()
return self._module_depends_on[module]
@property
def output_module(self):
self._ensure_is_connected()
return self._produced_by[self._logits.name]
def fanout_of(self, node):
"""Looks up fan-out for a given node.
Args:
node: `ibp.VerifiableWrapper` occurring in the network either as an
operation, or as the initial input.
Returns:
Number of times `node` occurs as the input of another operation within
the network, or 1 if `node` is the overall output.
"""
return self._fanouts[node]
def _build(self, *z0, **kwargs):
"""Outputs logits from input z0.
Args:
*z0: inputs as `Tensor`.
**kwargs: Other arguments passed directly to the _build() function of the
wrapper model. Assumes the possible presence of `override` (defaults to
False). However, if False, this function does not update any internal
state and reuses any components computed by a previous call to _build().
If there were no previous calls to _build(), behaves as if it was set to
True.
Returns:
logits resulting from using z0 as inputs.
"""
override = not self.is_connected
if 'override' in kwargs:
override = kwargs['override'] or override
del kwargs['override']
if override:
self._inputs = z0[0] if len(z0) == 1 else z0
# Build underlying verifiable modules.
self._model_inputs = []
self._modules = []
self._produced_by = {} # Connection graph.
self._fanouts = collections.Counter()
for i, z in enumerate(z0):
self._model_inputs.append(verifiable_wrapper.ModelInputWrapper(i))
self._produced_by[z.name] = self._model_inputs[-1]
self._module_depends_on = collections.defaultdict(list)
self._output_by_module = {}
with snt.observe_connections(self._observer):
logits = self._net_builder(*z0, **kwargs)
# Logits might be produced by a non-Sonnet module.
self._backtrack(logits, max_depth=100)
# Log analysis.
for m in self._modules:
logging.info('Found: %s', m)
output_shape = self._output_by_module[m].shape.as_list()[1:]
logging.info(' Output shape: %s => %d units', output_shape,
np.prod(output_shape))
for depends in self._module_depends_on[m]:
logging.info(' Depends on: %s', depends)
logging.info('Final logits produced by: %s',
self._produced_by[logits.name])
self._logits = logits
self._num_classes = logits.shape[-1].value
else:
# Must have been connected once before.
self._ensure_is_connected()
logits = self._net_builder(*z0, **kwargs)
return logits
def _observer(self, subgraph):
input_nodes = self._inputs_for_observed_module(subgraph)
if input_nodes is None:
# We do not fail as we want to allow higher-level Sonnet components.
# In practice, the rest of the logic will fail if we are unable to
# connect all low-level modules.
logging.warn('Unprocessed module "%s"', str(subgraph.module))
return
if subgraph.outputs in input_nodes:
# The Sonnet module is just returning its input as its output.
# This may happen with a reshape in which the shape does not change.
return
self._add_module(self._wrapper_for_observed_module(subgraph),
subgraph.outputs, *input_nodes)
def _inputs_for_observed_module(self, subgraph):
"""Extracts input tensors from a connected Sonnet module.
This default implementation supports common layer types, but should be
overridden if custom layer types are to be supported.
Args:
subgraph: `snt.ConnectedSubGraph` specifying the Sonnet module being
connected, and its inputs and outputs.
Returns:
List of input tensors, or None if not a supported Sonnet module.
"""
m = subgraph.module
# Only support a few operations for now.
if not (isinstance(m, snt.BatchReshape) or
isinstance(m, snt.Linear) or
isinstance(m, snt.Conv1D) or
isinstance(m, snt.Conv2D) or
isinstance(m, snt.BatchNorm) or
isinstance(m, layers.ImageNorm)):
return None
if isinstance(m, snt.BatchNorm):
return subgraph.inputs['input_batch'],
else:
return subgraph.inputs['inputs'],
def _wrapper_for_observed_module(self, subgraph):
"""Creates a wrapper for a connected Sonnet module.
This default implementation supports common layer types, but should be
overridden if custom layer types are to be supported.
Args:
subgraph: `snt.ConnectedSubGraph` specifying the Sonnet module being
connected, and its inputs and outputs.
Returns:
`ibp.VerifiableWrapper` for the Sonnet module.
"""
m = subgraph.module
if isinstance(m, snt.BatchReshape):
shape = subgraph.outputs.get_shape()[1:].as_list()
return verifiable_wrapper.BatchReshapeWrapper(m, shape)
elif isinstance(m, snt.Linear):
return verifiable_wrapper.LinearFCWrapper(m)
elif isinstance(m, snt.Conv1D):
return verifiable_wrapper.LinearConv1dWrapper(m)
elif isinstance(m, snt.Conv2D):
return verifiable_wrapper.LinearConv2dWrapper(m)
elif isinstance(m, layers.ImageNorm):
return verifiable_wrapper.ImageNormWrapper(m)
else:
assert isinstance(m, snt.BatchNorm)
return verifiable_wrapper.BatchNormWrapper(m)
def _backtrack(self, node, max_depth=100):
if node.name not in self._produced_by:
if max_depth <= 0:
raise ValueError('Unable to backtrack through the graph. '
'Consider using more basic Sonnet modules.')
self._wrap_node(node, max_depth=(max_depth - 1))
self._fanouts[self._produced_by[node.name]] += 1
def _wrap_node(self, node, **kwargs):
"""Adds an IBP wrapper for the node, and backtracks through its inputs.
This default implementation supports common layer types, but should be
overridden if custom layer types are to be supported.
Implementations should create a `ibp.VerifiableWrapper` and then invoke
`self._add_module(wrapper, node, *input_node, **kwargs)`.
Args:
node: TensorFlow graph node to wrap for IBP.
**kwargs: Context to pass to `self._add_module`.
"""
# Group all unary monotonic ops at the end.
if node.op.type in ('Add', 'AddV2', 'Mul', 'Sub', 'Maximum', 'Minimum'):
input_node0 = node.op.inputs[0]
input_node1 = node.op.inputs[1]
if node.op.type in ('Add', 'AddV2'):
w = verifiable_wrapper.IncreasingMonotonicWrapper(tf.add)
elif node.op.type == 'Mul':
w = verifiable_wrapper.PiecewiseMonotonicWrapper(tf.multiply)
elif node.op.type == 'Sub':
w = verifiable_wrapper.PiecewiseMonotonicWrapper(tf.subtract)
elif node.op.type == 'Maximum':
w = verifiable_wrapper.IncreasingMonotonicWrapper(tf.maximum)
elif node.op.type == 'Minimum':
w = verifiable_wrapper.IncreasingMonotonicWrapper(tf.minimum)
self._add_module(w, node, input_node0, input_node1, **kwargs)
return
elif node.op.type == 'ConcatV2':
num_inputs = node.op.get_attr('N')
assert num_inputs == len(node.op.inputs) - 1
inputs = node.op.inputs[:num_inputs]
axis = node.op.inputs[num_inputs]
def concat(*args):
return tf.concat(args, axis=axis)
self._add_module(
verifiable_wrapper.IncreasingMonotonicWrapper(concat, axis=axis),
node, *inputs, **kwargs)
return
elif node.op.type == 'Softmax':
input_node = node.op.inputs[0]
self._add_module(verifiable_wrapper.SoftmaxWrapper(), node, input_node,
**kwargs)
return
elif node.op.type == 'Const':
self._add_module(verifiable_wrapper.ConstWrapper(node), node, **kwargs)
return
# The rest are all unary monotonic ops.
parameters = dict()
if node.op.type in _MONOTONIC_NODE_OPS:
input_node = node.op.inputs[0]
# Leaky ReLUs are a special case since they have a second argument.
if node.op.type == 'LeakyRelu':
parameters = dict(alpha=node.op.get_attr('alpha'))
# Use function definition instead of lambda for clarity.
def leaky_relu(x):
return tf.nn.leaky_relu(x, **parameters)
fn = leaky_relu
else:
fn = _MONOTONIC_NODE_OPS[node.op.type]
elif node.op.type in ('Mean', 'Max', 'Sum', 'Min'):
# reduce_mean/max have two inputs. The first one should be produced by a
# upstream node, while the two one should represent the axis.
input_node = node.op.inputs[0]
parameters = dict(axis=node.op.inputs[1],
keep_dims=node.op.get_attr('keep_dims'))
# Use function definition instead of lambda for clarity.
def reduce_max(x):
return tf.reduce_max(x, **parameters)
def reduce_mean(x):
return tf.reduce_mean(x, **parameters)
def reduce_min(x):
return tf.reduce_min(x, **parameters)
def reduce_sum(x):
return tf.reduce_sum(x, **parameters)
fn = dict(
Max=reduce_max, Mean=reduce_mean, Sum=reduce_sum,
Min=reduce_min)[node.op.type]
elif node.op.type == 'ExpandDims':
input_node = node.op.inputs[0]
parameters = dict(axis=node.op.inputs[1])
def expand_dims(x):
return tf.expand_dims(x, **parameters)
fn = expand_dims
elif node.op.type == 'Transpose':
input_node = node.op.inputs[0]
parameters = dict(perm=node.op.inputs[1])
def transpose(x):
return tf.transpose(x, **parameters)
fn = transpose
elif node.op.type == 'Squeeze':
input_node = node.op.inputs[0]
parameters = dict(axis=node.op.get_attr('squeeze_dims'))
def squeeze(x):
return tf.squeeze(x, **parameters)
fn = squeeze
elif node.op.type == 'Pad':
input_node = node.op.inputs[0]
parameters = dict(paddings=node.op.inputs[1])
def pad(x):
return tf.pad(x, **parameters)
fn = pad
elif node.op.type in ('MaxPool', 'AvgPool'):
input_node = node.op.inputs[0]
parameters = dict(
ksize=node.op.get_attr('ksize'),
strides=node.op.get_attr('strides'),
padding=node.op.get_attr('padding'),
data_format=node.op.get_attr('data_format'),
)
if node.op.type == 'MaxPool':
def max_pool(x):
return tf.nn.max_pool(x, **parameters)
fn = max_pool
elif node.op.type == 'AvgPool':
def avg_pool(x):
return tf.nn.avg_pool(x, **parameters)
fn = avg_pool
elif node.op.type == 'Reshape':
input_node = node.op.inputs[0]
parameters = dict(shape=node.op.inputs[1])
def reshape(x):
return tf.reshape(x, **parameters)
fn = reshape
elif node.op.type == 'Identity':
input_node = node.op.inputs[0]
def identity(x):
return tf.identity(x)
fn = identity
elif node.op.type == 'MatrixDiag':
input_node = node.op.inputs[0]
def matrix_diag(x):
return tf.matrix_diag(x)
fn = matrix_diag
elif node.op.type == 'Slice':
input_node = node.op.inputs[0]
parameters = dict(
begin=node.op.inputs[1],
size=node.op.inputs[2],
)
def regular_slice(x):
return tf.slice(x, **parameters)
fn = regular_slice
elif node.op.type == 'StridedSlice':
input_node = node.op.inputs[0]
parameters = dict(
begin=node.op.inputs[1],
end=node.op.inputs[2],
strides=node.op.inputs[3],
begin_mask=node.op.get_attr('begin_mask'),
end_mask=node.op.get_attr('end_mask'),
ellipsis_mask=node.op.get_attr('ellipsis_mask'),
new_axis_mask=node.op.get_attr('new_axis_mask'),
shrink_axis_mask=node.op.get_attr('shrink_axis_mask'),
)
def strided_slice(x):
return tf.strided_slice(x, **parameters)
fn = strided_slice
elif node.op.type == 'Fill':
input_node = node.op.inputs[1] # Shape is the first argument.
dims = node.op.inputs[0]
parameters = dict(dims=dims)
def fill(x):
return tf.fill(dims, x)
fn = fill
elif node.op.type == 'RealDiv':
# The denominator is assumed to be constant but is permitted to be
# example-dependent, for example a sequence's length prior to padding.
input_node = node.op.inputs[0]
denom = node.op.inputs[1]
parameters = dict(denom=denom)
def quotient(x):
return x / denom
fn = quotient
else:
raise NotImplementedError(
'Unsupported operation: "{}" with\n{}.'.format(node.op.type, node.op))
self._add_module(
verifiable_wrapper.IncreasingMonotonicWrapper(fn, **parameters),
node, input_node, **kwargs)
def _add_module(self, wrapper, node, *input_nodes, **kwargs):
"""Adds the given node wrapper, first backtracking through its inputs.
Args:
wrapper: `ibp.VerifiableWrapper` for the node.
node: TensorFlow graph node.
*input_nodes: Input nodes for `node`.
**kwargs: Contains the `max_depth` argument for recursive _backtrack call.
"""
for input_node in input_nodes:
self._backtrack(input_node, **kwargs)
self._modules.append(wrapper)
self._produced_by[node.name] = self._modules[-1]
self._module_depends_on[self._modules[-1]].extend(
[self._produced_by[input_node.name] for input_node in input_nodes])
self._output_by_module[self._modules[-1]] = node
def propagate_bounds(self, *input_bounds):
"""Propagates input bounds through the network.
Args:
*input_bounds: `AbstractBounds` instance corresponding to z0.
Returns:
The final output bounds corresponding to the output logits.
"""
self._ensure_is_connected()
def _get_bounds(input_module):
"""Retrieves the bounds corresponding to a module."""
# All bounds need to be canonicalized to the same type. In particular, we
# need to handle the case of constant bounds specially. We convert them
# to the same type as input_bounds.
if isinstance(input_module, verifiable_wrapper.ConstWrapper):
return input_bounds[0].convert(input_module.output_bounds)
return input_module.output_bounds
# Initialise inputs' bounds.
for model_input in self._model_inputs:
model_input.output_bounds = input_bounds[model_input.index]
# By construction, this list is topologically sorted.
for m in self._modules:
# Construct combined input bounds.
upstream_bounds = [_get_bounds(b) for b in self._module_depends_on[m]]
m.propagate_bounds(*upstream_bounds)
# We assume that the last module is the final output layer.
return self._produced_by[self._logits.name].output_bounds
class StandardModelWrapper(snt.AbstractModule):
"""Wraps a predictor network that keeps track of inputs and logits."""
def __init__(self, net_builder, name='verifiable_predictor'):
"""Constructor for a non-verifiable model.
This wrapper can be used to seamlessly use loss.py and utils.py without
IBP verification.
Args:
net_builder: A callable that returns output logits from an input.
net_builder must accept two arguments: the input (as the first
argument) and is_training (as the second).
name: Sonnet module name.
"""
super(StandardModelWrapper, self).__init__(name=name)
self._net_builder = net_builder
@property
def wrapped_network(self):
return self._net_builder
@property
def output_size(self):
self._ensure_is_connected()
return self._num_classes
@property
def logits(self):
self._ensure_is_connected()
return self._logits
@property
def inputs(self):
self._ensure_is_connected()
return self._inputs
@property
def modules(self):
raise RuntimeError('Model is not wrapped by a VerifiableModelWrapper. '
'Bounds cannot be propagated.')
def propagate_bounds(self, *input_bounds):
raise RuntimeError('Model is not wrapped by a VerifiableModelWrapper. '
'Bounds cannot be propagated.')
def _build(self, *z0, **kwargs):
"""Outputs logits from input z0.
Args:
*z0: inputs as `Tensor`.
**kwargs: Other arguments passed directly to the _build() function of the
wrapper model. Assumes the possible presence of `override` (defaults to
False). However, if False, this function does not update any internal
state and reuses any components computed by a previous call to _build().
If there were no previous calls to _build(), behaves as if it was set to
True.
Returns:
logits resulting from using z0 as inputs.
"""
override = not self.is_connected
if 'override' in kwargs:
override = kwargs['override'] or override
del kwargs['override']
if override:
self._inputs = z0[0] if len(z0) == 1 else z0
logits = self._net_builder(*z0, **kwargs)
self._logits = logits
self._num_classes = logits.shape[-1].value
else:
# Must have been connected once before.
self._ensure_is_connected()
logits = self._net_builder(*z0, **kwargs)
return logits
class DNN(snt.AbstractModule):
"""Simple feed-forward neural network."""
def __init__(self, num_classes, layer_types, l2_regularization_scale=0.,
name='predictor'):
"""Constructor for the DNN.
Args:
num_classes: Output size.
layer_types: Iterable of tuples. Each tuple must be one of the following:
* ('conv2d', (kernel_height, width), channels, padding, stride)
* ('linear', output_size)
* ('batch_normalization',)
* ('activation', activation)
Convolutional layers must precede all linear layers.
l2_regularization_scale: Scale of the L2 regularization on the weights
of each layer.
name: Sonnet module name.
"""
super(DNN, self).__init__(name=name)
self._layer_types = list(layer_types)
self._layer_types.append(('linear', num_classes))
if l2_regularization_scale > 0.:
regularizer = tf.keras.regularizers.l2(l=0.5*l2_regularization_scale)
self._regularizers = {'w': regularizer}
else:
self._regularizers = None
# The following allows to reuse previous batch norm statistics.
self._batch_norms = {}
def _build(self, z0, is_training=True, test_local_stats=False, reuse=False):
"""Outputs logits."""
zk = z0
conv2d_id = 0
linear_id = 0
name = None
for spec in self._layer_types:
if spec[0] == 'conv2d':
if linear_id > 0:
raise ValueError('Convolutional layers must precede fully connected '
'layers.')
name = 'conv2d_{}'.format(conv2d_id)
conv2d_id += 1
(_, (kernel_height, kernel_width), channels, padding, stride) = spec
m = snt.Conv2D(output_channels=channels,
kernel_shape=(kernel_height, kernel_width),
padding=padding, stride=stride, use_bias=True,
regularizers=self._regularizers,
initializers=_create_conv2d_initializer(
zk.get_shape().as_list()[1:], channels,
(kernel_height, kernel_width)),
name=name)
zk = m(zk)
elif spec[0] == 'linear':
must_flatten = (linear_id == 0 and len(zk.shape) > 2)
if must_flatten:
zk = snt.BatchFlatten()(zk)
name = 'linear_{}'.format(linear_id)
linear_id += 1
output_size = spec[1]
m = snt.Linear(output_size,
regularizers=self._regularizers,
initializers=_create_linear_initializer(
np.prod(zk.get_shape().as_list()[1:]), output_size),
name=name)
zk = m(zk)
elif spec[0] == 'batch_normalization':
if name is None:
raise ValueError('Batch normalization only supported after linear '
'layers.')
name += '_batch_norm'
m = layers.BatchNorm(name=name)
if reuse:
if m.scope_name not in self._batch_norms:
raise ValueError('Cannot set reuse to True without connecting the '
'module once before.')
m = self._batch_norms[m.scope_name]
else:
self._batch_norms[m.scope_name] = m
zk = m(zk, is_training=is_training, test_local_stats=test_local_stats,
reuse=reuse)
elif spec[0] == 'activation':
if spec[1] not in _ALLOWED_ACTIVATIONS:
raise NotImplementedError(
'Only the following activations are supported {}'.format(
list(_ALLOWED_ACTIVATIONS)))
name = None
m = getattr(tf.nn, spec[1])
zk = m(zk)
return zk
def _create_conv2d_initializer(
input_shape, output_channels, kernel_shape, dtype=tf.float32): # pylint: disable=unused-argument
"""Returns a default initializer for the weights of a convolutional module."""
return {
'w': tf.orthogonal_initializer(),
'b': tf.zeros_initializer(dtype=dtype),
}
def _create_linear_initializer(input_size, output_size, dtype=tf.float32): # pylint: disable=unused-argument
"""Returns a default initializer for the weights of a linear module."""
return {
'w': tf.orthogonal_initializer(),
'b': tf.zeros_initializer(dtype=dtype),
}
| interval-bound-propagation-master | interval_bound_propagation/src/model.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper to keep track of the different losses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import sonnet as snt
import tensorflow.compat.v1 as tf
# Used to pick the least violated specification.
_BIG_NUMBER = 1e25
ScalarMetrics = collections.namedtuple('ScalarMetrics', [
'nominal_accuracy',
'verified_accuracy',
'attack_accuracy',
'attack_success'])
ScalarLosses = collections.namedtuple('ScalarLosses', [
'nominal_cross_entropy',
'attack_cross_entropy',
'verified_loss'])
class Losses(snt.AbstractModule):
"""Helper to compute our losses."""
def __init__(self, predictor, specification=None, pgd_attack=None,
interval_bounds_loss_type='xent',
interval_bounds_hinge_margin=10.,
label_smoothing=0.):
super(Losses, self).__init__(name='losses')
self._predictor = predictor
self._specification = specification
self._attack = pgd_attack
# Loss type can be any combination of:
# xent: cross-entropy loss
# hinge: hinge loss
# softplus: softplus loss
# with
# all: using all specifications.
# most: using only the specification that is the most violated.
# least: using only the specification that is the least violated.
# random_n: using a random subset of the specifications.
# E.g.: "xent_max" or "hinge_random_3".
tokens = interval_bounds_loss_type.split('_', 1)
if len(tokens) == 1:
loss_type, loss_mode = tokens[0], 'all'
else:
loss_type, loss_mode = tokens
if loss_mode.startswith('random'):
loss_mode, num_samples = loss_mode.split('_', 1)
self._interval_bounds_loss_n = int(num_samples)
if loss_type not in ('xent', 'hinge', 'softplus'):
raise ValueError('interval_bounds_loss_type must be either "xent", '
'"hinge" or "softplus".')
if loss_mode not in ('all', 'most', 'random', 'least'):
raise ValueError('interval_bounds_loss_type must be followed by either '
'"all", "most", "random_N" or "least".')
self._interval_bounds_loss_type = loss_type
self._interval_bounds_loss_mode = loss_mode
self._interval_bounds_hinge_margin = interval_bounds_hinge_margin
self._label_smoothing = label_smoothing
def _build(self, labels):
self._build_nominal_loss(labels)
self._build_verified_loss(labels)
self._build_attack_loss(labels)
def _build_nominal_loss(self, labels):
"""Build natural cross-entropy loss on clean data."""
# Cross-entropy.
nominal_logits = self._predictor.logits
if self._label_smoothing > 0:
num_classes = nominal_logits.shape[1].value
one_hot_labels = tf.one_hot(labels, num_classes)
smooth_positives = 1. - self._label_smoothing
smooth_negatives = self._label_smoothing / num_classes
one_hot_labels = one_hot_labels * smooth_positives + smooth_negatives
nominal_cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=one_hot_labels, logits=nominal_logits)
self._one_hot_labels = one_hot_labels
else:
nominal_cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=nominal_logits)
self._cross_entropy = tf.reduce_mean(nominal_cross_entropy)
# Accuracy.
nominal_correct_examples = tf.equal(labels, tf.argmax(nominal_logits, 1))
self._nominal_accuracy = tf.reduce_mean(
tf.cast(nominal_correct_examples, tf.float32))
def _get_specification_bounds(self):
"""Get upper bounds on specification. Used for building verified loss."""
ibp_bounds = self._specification(self._predictor.modules)
# Compute verified accuracy using IBP bounds.
v = tf.reduce_max(ibp_bounds, axis=1)
self._interval_bounds_accuracy = tf.reduce_mean(
tf.cast(v <= 0., tf.float32))
return ibp_bounds
def _build_verified_loss(self, labels):
"""Build verified loss using an upper bound on specification."""
if not self._specification:
self._verified_loss = tf.constant(0.)
self._interval_bounds_accuracy = tf.constant(0.)
return
# Interval bounds.
bounds = self._get_specification_bounds()
# Select specifications.
if self._interval_bounds_loss_mode == 'all':
pass # Keep bounds the way it is.
elif self._interval_bounds_loss_mode == 'most':
bounds = tf.reduce_max(bounds, axis=1, keepdims=True)
elif self._interval_bounds_loss_mode == 'random':
idx = tf.random.uniform(
[tf.shape(bounds)[0], self._interval_bounds_loss_n],
0, tf.shape(bounds)[1], dtype=tf.int32)
bounds = tf.batch_gather(bounds, idx)
else:
assert self._interval_bounds_loss_mode == 'least'
# This picks the least violated contraint.
mask = tf.cast(bounds < 0., tf.float32)
smallest_violation = tf.reduce_min(
bounds + mask * _BIG_NUMBER, axis=1, keepdims=True)
has_violations = tf.less(
tf.reduce_sum(mask, axis=1, keepdims=True) + .5,
tf.cast(tf.shape(bounds)[1], tf.float32))
largest_bounds = tf.reduce_max(bounds, axis=1, keepdims=True)
bounds = tf.where(has_violations, smallest_violation, largest_bounds)
if self._interval_bounds_loss_type == 'xent':
v = tf.concat(
[bounds, tf.zeros([tf.shape(bounds)[0], 1], dtype=bounds.dtype)],
axis=1)
l = tf.concat(
[tf.zeros_like(bounds),
tf.ones([tf.shape(bounds)[0], 1], dtype=bounds.dtype)],
axis=1)
self._verified_loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(
labels=tf.stop_gradient(l), logits=v))
elif self._interval_bounds_loss_type == 'softplus':
self._verified_loss = tf.reduce_mean(
tf.nn.softplus(bounds + self._interval_bounds_hinge_margin))
else:
assert self._interval_bounds_loss_type == 'hinge'
self._verified_loss = tf.reduce_mean(
tf.maximum(bounds, -self._interval_bounds_hinge_margin))
def _build_attack_loss(self, labels):
"""Build adversarial loss using PGD attack."""
# PGD attack.
if not self._attack:
self._attack_accuracy = tf.constant(0.)
self._attack_success = tf.constant(1.)
self._attack_cross_entropy = tf.constant(0.)
return
if not isinstance(self._predictor.inputs, tf.Tensor):
raise ValueError('Multiple inputs is not supported.')
self._attack(self._predictor.inputs, labels)
correct_examples = tf.equal(labels, tf.argmax(self._attack.logits, 1))
self._attack_accuracy = tf.reduce_mean(
tf.cast(correct_examples, tf.float32))
self._attack_success = tf.reduce_mean(
tf.cast(self._attack.success, tf.float32))
if self._label_smoothing > 0:
attack_cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=self._one_hot_labels, logits=self._attack.logits)
else:
attack_cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=self._attack.logits)
self._attack_cross_entropy = tf.reduce_mean(attack_cross_entropy)
@property
def scalar_metrics(self):
self._ensure_is_connected()
return ScalarMetrics(self._nominal_accuracy,
self._interval_bounds_accuracy,
self._attack_accuracy,
self._attack_success)
@property
def scalar_losses(self):
self._ensure_is_connected()
return ScalarLosses(self._cross_entropy,
self._attack_cross_entropy,
self._verified_loss)
| interval-bound-propagation-master | interval_bound_propagation/src/loss.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from absl import logging
from interval_bound_propagation.src import attacks
from interval_bound_propagation.src import bounds
from interval_bound_propagation.src import layers
from interval_bound_propagation.src import loss
from interval_bound_propagation.src import specification
import numpy as np
import tensorflow.compat.v1 as tf
# Defines a dataset sample."""
Sample = collections.namedtuple('Sample', ['image', 'label'])
def build_dataset(raw_data, batch_size=50, sequential=True):
"""Builds a dataset from raw NumPy tensors."""
images, labels = raw_data
# We need width, height and channel.
if len(images.shape) == 3:
images = np.expand_dims(images, -1)
samples = Sample(images.astype(np.float32) / 255., labels.astype(np.int64))
data = tf.data.Dataset.from_tensor_slices(samples)
if not sequential:
data = data.shuffle(1000)
return data.repeat().batch(batch_size).make_one_shot_iterator().get_next()
def randomize(images, init_shape, expand_shape=None, crop_shape=None,
vertical_flip=False):
"""Returns a function that randomly translates and flips images."""
def random_image(image):
"""Randmly translates and flips images."""
image = tf.reshape(image, init_shape)
current_shape = init_shape
if expand_shape is not None and expand_shape != current_shape:
if expand_shape[-1] != current_shape[-1]:
raise ValueError('Number channels is not specified correctly.')
image = tf.image.resize_image_with_crop_or_pad(
image, expand_shape[0], expand_shape[1])
current_shape = expand_shape
if crop_shape is not None and crop_shape != current_shape:
image = tf.random_crop(image, crop_shape)
if vertical_flip:
image = tf.image.random_flip_left_right(image)
return image
return tf.map_fn(random_image, images)
def linear_schedule(step, init_step, final_step, init_value, final_value):
"""Linear schedule."""
assert final_step >= init_step
if init_step == final_step:
return final_value
rate = tf.cast(step - init_step, tf.float32) / float(final_step - init_step)
linear_value = rate * (final_value - init_value) + init_value
return tf.clip_by_value(linear_value, min(init_value, final_value),
max(init_value, final_value))
def smooth_schedule(step, init_step, final_step, init_value, final_value,
mid_point=.25, beta=4.):
"""Smooth schedule that slowly morphs into a linear schedule."""
assert final_value > init_value
assert final_step >= init_step
assert beta >= 2.
assert mid_point >= 0. and mid_point <= 1.
mid_step = int((final_step - init_step) * mid_point) + init_step
if mid_step <= init_step:
alpha = 1.
else:
t = (mid_step - init_step) ** (beta - 1.)
alpha = (final_value - init_value) / ((final_step - mid_step) * beta * t +
(mid_step - init_step) * t)
mid_value = alpha * (mid_step - init_step) ** beta + init_value
# Tensorflow operation.
is_ramp = tf.cast(step > init_step, tf.float32)
is_linear = tf.cast(step >= mid_step, tf.float32)
return (is_ramp * (
(1. - is_linear) * (
init_value +
alpha * tf.pow(tf.cast(step - init_step, tf.float32), beta)) +
is_linear * linear_schedule(
step, mid_step, final_step, mid_value, final_value)) +
(1. - is_ramp) * init_value)
def build_loss_schedule(step, warmup_steps, rampup_steps, init, final,
warmup=None):
"""Linear schedule builder.
Args:
step: Current step number.
warmup_steps: When step < warmup_steps, set value to warmup.
rampup_steps: Ramp up schedule value from init to final in rampup_step.
init: Initial schedule value after warmup_steps.
final: Final schedule value after warmup_steps + rampup_steps.
warmup: Schedule value before warmup_steps. When set to None, the warmup
period value is set to init.
Returns:
A schedule tensor.
"""
if warmup is None and init == final:
return init
if rampup_steps < 0:
if warmup is not None:
return tf.cond(step < warmup_steps, lambda: tf.constant(warmup),
lambda: tf.constant(final))
return final
schedule = linear_schedule(
step, warmup_steps, warmup_steps + rampup_steps, init, final)
if warmup is not None:
# Set the value to warmup during warmup process.
return tf.cond(step < warmup_steps,
lambda: tf.constant(warmup), lambda: schedule)
return schedule
def add_image_normalization(model, mean, std):
def _model(x, *args, **kwargs):
return model(layers.ImageNorm(mean, std)(x), *args, **kwargs)
return _model
def create_specification(label, num_classes, logits,
specification_type='one_vs_all', collapse=True):
"""Creates a specification of the desired type."""
def _num_targets(name):
tokens = name.rsplit('_', 1)
return int(tokens[1]) if len(tokens) > 1 else 1
if specification_type == 'one_vs_all':
return specification.ClassificationSpecification(label, num_classes,
collapse=collapse)
elif specification_type.startswith('random'):
return specification.RandomClassificationSpecification(
label, num_classes, _num_targets(specification_type), collapse=collapse)
elif specification_type.startswith('least_likely'):
return specification.LeastLikelyClassificationSpecification(
label, num_classes, logits, _num_targets(specification_type),
collapse=collapse)
else:
raise ValueError('Unknown specification type: "{}"'.format(
specification_type))
def create_classification_losses(
global_step, inputs, label, predictor_network, epsilon, loss_weights,
warmup_steps=0, rampup_steps=-1, input_bounds=(0., 1.),
loss_builder=loss.Losses, options=None):
"""Create the training loss."""
# Whether to elide the last linear layer with the specification.
elide = True
# Which loss to use for the IBP loss.
loss_type = 'xent'
# If the loss_type is 'hinge', which margin to use.
loss_margin = 10.
# Amount of label smoothing.
label_smoothing = 0.
# If True, batch normalization stops training after warm-up.
is_training_off_after = -1
# If True, epsilon changes more smoothly.
smooth_epsilon_schedule = False
# Either 'one_vs_all', 'random_n', 'least_likely_n' or 'none'.
verified_specification = 'one_vs_all'
# Attack options.
attack_specification = 'UntargetedPGDAttack_7x1x1_UnrolledAdam_.1'
attack_scheduled = False
attack_random_init = 1.
# Model arguments.
nominal_args = dict(is_training=True, test_local_stats=False, reuse=False)
attack_args = {
'intermediate': dict(is_training=False, test_local_stats=False,
reuse=True),
'final': dict(is_training=False, test_local_stats=False, reuse=True),
}
if options is not None:
elide = options.get('elide_last_layer', elide)
loss_type = options.get('verified_loss_type', loss_type)
loss_margin = options.get('verified_loss_margin', loss_type)
label_smoothing = options.get('label_smoothing', label_smoothing)
is_training_off_after = options.get(
'is_training_off_after', is_training_off_after)
smooth_epsilon_schedule = options.get(
'smooth_epsilon_schedule', smooth_epsilon_schedule)
verified_specification = options.get(
'verified_specification', verified_specification)
attack_specification = options.get(
'attack_specification', attack_specification)
attack_scheduled = options.get('attack_scheduled', attack_scheduled)
attack_random_init = options.get('attack_random_init', attack_random_init)
nominal_args = dict(options.get('nominal_args', nominal_args))
attack_args = dict(options.get('attack_args', attack_args))
def _get_schedule(init, final, warmup=None):
return build_loss_schedule(global_step, warmup_steps, rampup_steps, init,
final, warmup)
def _is_loss_active(init, final, warmup=None):
return init > 0. or final > 0. or (warmup is not None and warmup > 0.)
nominal_xent = _get_schedule(**loss_weights.get('nominal'))
attack_xent = _get_schedule(**loss_weights.get('attack'))
use_attack = _is_loss_active(**loss_weights.get('attack'))
verified_loss = _get_schedule(**loss_weights.get('verified'))
use_verification = _is_loss_active(**loss_weights.get('verified'))
if verified_specification == 'none':
use_verification = False
weight_mixture = loss.ScalarLosses(
nominal_cross_entropy=nominal_xent,
attack_cross_entropy=attack_xent,
verified_loss=verified_loss)
# Ramp-up.
if rampup_steps < 0:
train_epsilon = tf.constant(epsilon)
else:
if smooth_epsilon_schedule:
train_epsilon = smooth_schedule(
global_step, warmup_steps, warmup_steps + rampup_steps, 0., epsilon)
else:
train_epsilon = linear_schedule(
global_step, warmup_steps, warmup_steps + rampup_steps, 0., epsilon)
# Set is_training according to options.
if is_training_off_after >= 0:
is_training = global_step < is_training_off_after
else:
is_training = True
# If the build arguments want training off, we set is_training to False.
# Otherwise, we respect the is_training_off_after option.
def _update_is_training(kwargs):
if 'is_training' in kwargs:
kwargs['is_training'] &= is_training
_update_is_training(nominal_args)
_update_is_training(attack_args['intermediate'])
_update_is_training(attack_args['final'])
logits = predictor_network(inputs, override=True, **nominal_args)
num_classes = predictor_network.output_size
if use_verification:
logging.info('Verification active.')
input_interval_bounds = bounds.IntervalBounds(
tf.maximum(inputs - train_epsilon, input_bounds[0]),
tf.minimum(inputs + train_epsilon, input_bounds[1]))
predictor_network.propagate_bounds(input_interval_bounds)
spec = create_specification(label, num_classes, logits,
verified_specification, collapse=elide)
else:
logging.info('Verification disabled.')
spec = None
if use_attack:
logging.info('Attack active.')
pgd_attack = create_attack(
attack_specification, predictor_network, label,
train_epsilon if attack_scheduled else epsilon,
input_bounds=input_bounds, random_init=attack_random_init,
predictor_kwargs=attack_args)
else:
logging.info('Attack disabled.')
pgd_attack = None
losses = loss_builder(predictor_network, spec, pgd_attack,
interval_bounds_loss_type=loss_type,
interval_bounds_hinge_margin=loss_margin,
label_smoothing=label_smoothing)
losses(label)
train_loss = sum(l * w for l, w in zip(losses.scalar_losses,
weight_mixture))
# Add a regularization loss.
regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
train_loss = train_loss + tf.reduce_sum(regularizers)
return losses, train_loss, train_epsilon
# Additional helper code to build specific PGD attacks.
def get_attack_builder(logits, label, name='UntargetedPGDAttack',
random_seed=None, manual_target_class=None):
"""Returns a callable with the same arguments as PGDAttack.
In addition to the callable, this function also returns the targeted class
indices as a Tensor with the same shape as label.
Usage is as follows:
logits = model(inputs)
attack_cls, specification, target_class = get_attack_builder(logits, labels)
# target_class is None, if attack_cls is not a targeted attack.
attack_instance = attack_cls(model, specification, epsilon)
perturbed_inputs = attack_instance(inputs, labels)
Args:
logits: Tensor of nominal logits of shape [batch_size, num_classes].
label: Tensor of labels of shape [batch_size].
name: Name of a PGDAttack class or any of "RandomMoreLikelyPGDAttack",
"RandomMostLikelyPGDAttack", "LeastLikelyMoreLikelyPGDAttack",
"LeastLikelyMostLikelyPGDAttack", "ManualMoreLikelyPGDAttack",
"ManualMostLikelyPGDAttack". Any attack name can be postfixed by
"Xent" to use the cross-entropy loss rather than margin loss.
random_seed: Sets the random seed for "Random*" attacks.
manual_target_class: For "Manual*" attacks, Tensor of target class indices
of shape [batch_size].
Returns:
A callable, a Specification and a Tensor of target label (or None if the
attack is not targeted).
"""
if name.endswith('Xent'):
use_xent = True
name = name[:-4]
else:
use_xent = False
if name.endswith('Linf'):
use_l2 = False
name = name[:-4] # Just for syntactic sugar.
elif name.endswith('L2'):
use_l2 = True
name = name[:-2]
else:
use_l2 = False
num_classes = logits.shape[1].value
if num_classes is None:
raise ValueError('Cannot determine the number of classes from logits.')
# Special case for multi-targeted attacks.
m = re.match(r'((?:MemoryEfficient)?MultiTargetedPGDAttack)'
r'(?:(Top|Random)(\d)*)?', name)
if m is not None:
# Request for a multi-targeted attack.
is_multitargeted = True
name = m.group(1)
is_random = (m.group(2) == 'Random')
max_specs = m.group(3)
max_specs = int(max_specs) if max_specs is not None else 0
else:
is_multitargeted = False
# Any of the readily available attack classes use the standard classification
# specification (one-vs-all) and are untargeted.
if hasattr(attacks, name):
attack_cls = getattr(attacks, name)
parameters = {}
if use_xent:
parameters['objective_fn'] = _maximize_cross_entropy
if use_l2:
parameters['project_perturbation'] = _get_projection(2)
if is_multitargeted:
parameters['max_specifications'] = max_specs
parameters['random_specifications'] = is_random
if parameters:
attack_cls = _change_parameters(attack_cls, **parameters)
attack_specification = specification.ClassificationSpecification(
label, num_classes)
return attack_cls, attack_specification, None
# Attacks can use an adaptive scheme.
if name.endswith('AdaptivePGDAttack'):
name = name[:-len('AdaptivePGDAttack')] + 'PGDAttack'
is_adaptive = True
else:
is_adaptive = False
# Attacks can be preceded by a number to indicate the number of target
# classes. For efficiency, this is only available for *MoreLikely attacks.
m = re.match(r'(\d*)(.*MoreLikelyPGDAttack)', name)
if m is not None:
num_targets = int(m.group(1))
name = m.group(2)
else:
num_targets = 1
# All attacks that are not directly listed in the attacks library are
# targeted attacks that need to be manually constructed.
if name not in ('RandomMoreLikelyPGDAttack', 'RandomMostLikelyPGDAttack',
'LeastLikelyMoreLikelyPGDAttack',
'LeastLikelyMostLikelyPGDAttack',
'ManualMoreLikelyPGDAttack', 'ManualMostLikelyPGDAttack'):
raise ValueError('Unknown attack "{}".'.format(name))
base_attack_cls = (attacks.AdaptiveUntargetedPGDAttack if is_adaptive else
attacks.UntargetedPGDAttack)
if 'More' in name:
if use_xent:
raise ValueError('Using cross-entropy is not supported by '
'"*MoreLikelyPGDAttack".')
attack_cls = base_attack_cls
else:
# We need to reverse the attack direction w.r.t. the specifications.
attack_cls = _change_parameters(
base_attack_cls,
objective_fn=(_minimize_cross_entropy if use_xent else
_minimize_margin),
success_fn=_all_smaller)
if use_l2:
attack_cls = _change_parameters(
attack_cls, project_perturbation=_get_projection(2))
# Set attack specification and target class.
if name == 'RandomMoreLikelyPGDAttack':
# A random target class should become more likely than the true class.
attack_specification = specification.RandomClassificationSpecification(
label, num_classes, num_targets=num_targets, seed=random_seed)
target_class = (tf.squeeze(attack_specification.target_class, 1)
if num_targets == 1 else None)
elif name == 'LeastLikelyMoreLikelyPGDAttack':
attack_specification = specification.LeastLikelyClassificationSpecification(
label, num_classes, logits, num_targets=num_targets)
target_class = (tf.squeeze(attack_specification.target_class, 1)
if num_targets == 1 else None)
elif name == 'ManualMoreLikelyPGDAttack':
attack_specification = specification.TargetedClassificationSpecification(
label, num_classes, manual_target_class)
target_class = (tf.squeeze(attack_specification.target_class, 1)
if num_targets == 1 else None)
elif name == 'RandomMostLikelyPGDAttack':
# This attack needs to make the random target the highest logits for
# it is be successful.
target_class = _get_random_class(label, num_classes, seed=random_seed)
attack_specification = specification.ClassificationSpecification(
target_class, num_classes)
elif name == 'LeastLikelyMostLikelyPGDAttack':
# This attack needs to make the least likely target the highest logits
# for it is be successful.
target_class = _get_least_likely_class(label, num_classes, logits)
attack_specification = specification.ClassificationSpecification(
target_class, num_classes)
else:
assert name == 'ManualMostLikelyPGDAttack'
target_class = manual_target_class
attack_specification = specification.ClassificationSpecification(
target_class, num_classes)
return attack_cls, attack_specification, target_class
def create_attack(attack_config, predictor, label, epsilon,
input_bounds=(0., 1.), random_init=1., random_seed=None,
predictor_kwargs=None, logits=None):
"""Creates an attack from a textual configuration.
Args:
attack_config: String with format "[AttackClass]_[steps]x
[inner_restarts]x[outer_restarts]_[OptimizerClass]_[step_size]". Inner
restarts involve tiling the input (they are more runtime efficient but
use more memory), while outer restarts use a tf.while_loop.
predictor: A VerifiableModelWrapper or StandardModelWrapper instance.
label: A Tensor of labels.
epsilon: Perturbation radius.
input_bounds: Tuple with minimum and maximum value allowed on inputs.
random_init: Probability of starting from random location rather than
nominal input image.
random_seed: Sets the random seed for "Random*" attacks.
predictor_kwargs: Dict of arguments passed to the predictor network.
logits: Logits corresponding to the nominal inputs. If None, it assumes that
predictor has a property named `logits`.
Returns:
An Attack instance.
"""
if attack_config:
name, steps_and_restarts, optimizer, step_size = re.split(
r'_\s*(?![^()]*\))', attack_config, maxsplit=3)
# Optimizers can specify contructor arguments using
# (arg1=value1;arg2=value2) syntax.
m = re.match(r'([^\(]*)\(([^\)]*)\)', optimizer)
if m is not None:
optimizer = m.group(1)
kwargs = 'dict(' + m.group(2).replace(';', ',') + ')'
kwargs = eval(kwargs) # pylint: disable=eval-used
else:
kwargs = {}
optimizer = getattr(attacks, optimizer)
# Wrap optimizer if needed.
if kwargs:
optimizer = attacks.wrap_optimizer(optimizer, **kwargs)
num_steps, inner_restarts, outer_restarts = (
int(i) for i in steps_and_restarts.split('x', 3))
step_size = step_size.replace(':', ',')
else:
name = 'UntargetedPGDAttack'
num_steps = 200
inner_restarts = 1
outer_restarts = 1
optimizer = attacks.UnrolledAdam
step_size = .1
def attack_learning_rate_fn(t):
return parse_learning_rate(t, step_size)
if logits is None:
logits = predictor.logits
attack_cls, attack_specification, target_class = get_attack_builder(
logits, label, name=name, random_seed=random_seed)
attack_strategy = attack_cls(
predictor, attack_specification, epsilon, num_steps=num_steps,
num_restarts=inner_restarts, input_bounds=input_bounds,
optimizer_builder=optimizer, lr_fn=attack_learning_rate_fn,
random_init=random_init, predictor_kwargs=predictor_kwargs)
attack_strategy.target_class = target_class
if outer_restarts > 1:
attack_strategy = attacks.RestartedAttack(
attack_strategy, num_restarts=outer_restarts)
return attack_strategy
def parse_learning_rate(step, learning_rate):
"""Returns the learning rate as a tensor."""
if isinstance(learning_rate, float):
return learning_rate
# Learning rate schedule of the form:
# initial_learning_rate[,learning@steps]*. E.g., "1e-3" or
# "1e-3,1e-4@15000,1e-5@25000". We use eval to allow learning specified as
# fractions (e.g., 2/255).
tokens = learning_rate.split(',')
first_lr = float(eval(tokens[0])) # pylint: disable=eval-used
if len(tokens) == 1:
return tf.constant(first_lr, dtype=tf.float32)
# Parse steps.
init_values = [first_lr]
final_values = []
init_step = [0]
final_step = []
for t in tokens[1:]:
if '@' in t:
lr, boundary = t.split('@', 1)
is_linear = False
elif 'S' in t: # Syntactic sugar to indicate a step.
lr, boundary = t.split('S', 1)
is_linear = False
elif 'L' in t:
lr, boundary = t.split('L', 1)
is_linear = True
else:
raise ValueError('Unknown specification.')
lr = float(eval(lr)) # pylint: disable=eval-used
init_values.append(lr)
if is_linear:
final_values.append(lr)
else:
final_values.append(init_values[-2])
boundary = int(boundary)
init_step.append(boundary)
final_step.append(boundary)
large_step = max(final_step) + 1
final_step.append(large_step)
final_values.append(lr)
# Find current index.
boundaries = list(final_step) + [large_step + 2]
boundaries = tf.convert_to_tensor(boundaries, dtype=tf.int64)
b = boundaries - tf.minimum(step + 1, large_step + 1)
large_step = tf.constant(
large_step, shape=boundaries.shape, dtype=step.dtype)
b = tf.where(b < 0, large_step, b)
idx = tf.minimum(tf.argmin(b), len(init_values) - 1)
init_step = tf.convert_to_tensor(init_step, dtype=tf.float32)
final_step = tf.convert_to_tensor(final_step, dtype=tf.float32)
init_values = tf.convert_to_tensor(init_values, dtype=tf.float32)
final_values = tf.convert_to_tensor(final_values, dtype=tf.float32)
x1 = tf.gather(init_step, idx)
x2 = tf.gather(final_step, idx)
y1 = tf.gather(init_values, idx)
y2 = tf.gather(final_values, idx)
return (tf.cast(step, tf.float32) - x1) / (x2 - x1) * (y2 - y1) + y1
def _change_parameters(attack_cls, **updated_kwargs):
def _build_new_attack(*args, **kwargs):
kwargs.update(updated_kwargs)
return attack_cls(*args, **kwargs)
return _build_new_attack
def _get_random_class(label, num_classes, seed=None):
batch_size = tf.shape(label)[0]
target_label = tf.random.uniform(
shape=(batch_size,), minval=1, maxval=num_classes, dtype=tf.int64,
seed=seed)
return tf.mod(tf.cast(label, tf.int64) + target_label, num_classes)
def _get_least_likely_class(label, num_classes, logits):
target_label = tf.argmin(logits, axis=1, output_type=tf.int64)
# In the off-chance that the least likely class is the true class, the target
# class is changed to the be the next index.
return tf.mod(target_label + tf.cast(
tf.equal(target_label, tf.cast(label, tf.int64)), tf.int64), num_classes)
def _maximize_cross_entropy(specification_bounds):
"""Used to maximize the cross entropy loss."""
# Bounds has shape [num_restarts, batch_size, num_specs].
shape = tf.shape(specification_bounds)
added_shape = [shape[0], shape[1], 1]
v = tf.concat([
specification_bounds,
tf.zeros(added_shape, dtype=specification_bounds.dtype)], axis=2)
l = tf.concat([
tf.zeros_like(specification_bounds),
tf.ones(added_shape, dtype=specification_bounds.dtype)], axis=2)
# Minimize the cross-entropy loss w.r.t. target.
return tf.nn.softmax_cross_entropy_with_logits_v2(
labels=tf.stop_gradient(l), logits=v)
def _minimize_cross_entropy(specification_bounds):
return -_maximize_cross_entropy(specification_bounds)
def _maximize_margin(specification_bounds):
# Bounds has shape [num_restarts, batch_size, num_specs].
return tf.reduce_max(specification_bounds, axis=-1)
def _minimize_margin(specification_bounds):
return -_maximize_margin(specification_bounds)
def _all_smaller(specification_bounds):
specification_bounds = tf.reduce_max(specification_bounds, axis=-1)
return specification_bounds < 0
def _get_projection(p):
"""Returns a projection function."""
if p == np.inf:
def _projection(perturbation, epsilon, input_image, image_bounds):
clipped_perturbation = tf.clip_by_value(perturbation, -epsilon, epsilon)
new_image = tf.clip_by_value(input_image + clipped_perturbation,
image_bounds[0], image_bounds[1])
return new_image - input_image
return _projection
elif p == 2:
def _projection(perturbation, epsilon, input_image, image_bounds):
axes = list(range(1, len(perturbation.get_shape())))
clipped_perturbation = tf.clip_by_norm(perturbation, epsilon, axes=axes)
new_image = tf.clip_by_value(input_image + clipped_perturbation,
image_bounds[0], image_bounds[1])
return new_image - input_image
return _projection
else:
raise ValueError('p must be np.inf or 2.')
| interval-bound-propagation-master | interval_bound_propagation/src/utils.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the output specifications."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from absl import logging
from interval_bound_propagation.src import bounds as bounds_lib
from interval_bound_propagation.src import verifiable_wrapper
import six
import sonnet as snt
import tensorflow.compat.v1 as tf
@six.add_metaclass(abc.ABCMeta)
class Specification(snt.AbstractModule):
"""Defines a specification."""
def __init__(self, name, collapse=True):
super(Specification, self).__init__(name=name)
self._collapse = collapse
@abc.abstractmethod
def _build(self, modules):
"""Computes the worst-case specification value."""
@abc.abstractmethod
def evaluate(self, logits):
"""Computes the specification value.
Args:
logits: The logits Tensor can have different shapes, i.e.,
[batch_size, num_classes]: The output should be [batch_size, num_specs].
[num_restarts, batch_size, num_classes]: The output should be
[num_restarts, batch_size, num_specs]. Used by UntargetedPGDAttack.
[num_restarts, num_specs, batch_size, num_classes]: The output should
be [num_restarts, batch_size, num_specs]. For this case, the
specifications must be evaluated individually for each column
(axis = 1). Used by MultiTargetedPGDAttack.
Returns:
The specification values evaluated at the network output.
"""
@abc.abstractproperty
def num_specifications(self):
"""Returns the number of specifications."""
@property
def collapse(self):
return self._collapse
class LinearSpecification(Specification):
"""Linear specifications: c^T * z_K + d <= 0."""
def __init__(self, c, d=None, prune_irrelevant=True, collapse=True):
"""Builds a linear specification module."""
super(LinearSpecification, self).__init__(name='specs', collapse=collapse)
# c has shape [batch_size, num_specifications, num_outputs]
# d has shape [batch_size, num_specifications]
# Some specifications may be irrelevant (not a function of the output).
# We automatically remove them for clarity. We expect the number of
# irrelevant specs to be equal for all elements of a batch.
# Shape is [batch_size, num_specifications]
if prune_irrelevant:
irrelevant = tf.equal(tf.reduce_sum(
tf.cast(tf.abs(c) > 1e-6, tf.int32), axis=-1, keepdims=True), 0)
batch_size = tf.shape(c)[0]
num_outputs = tf.shape(c)[2]
irrelevant = tf.tile(irrelevant, [1, 1, num_outputs])
self._c = tf.reshape(
tf.boolean_mask(c, tf.logical_not(irrelevant)),
[batch_size, -1, num_outputs])
else:
self._c = c
self._d = d
def _build(self, modules):
"""Outputs specification value."""
# inputs have shape [batch_size, num_outputs].
if not (self.collapse and
isinstance(modules[-1], verifiable_wrapper.LinearFCWrapper)):
logging.info('Elision of last layer disabled.')
bounds = modules[-1].output_bounds
w = self._c
b = self._d
else:
logging.info('Elision of last layer active.')
# Collapse the last layer.
bounds = modules[-1].input_bounds
w = modules[-1].module.w
b = modules[-1].module.b
w = tf.einsum('ijk,lk->ijl', self._c, w)
b = tf.einsum('ijk,k->ij', self._c, b)
if self._d is not None:
b += self._d
# Maximize z * w + b s.t. lower <= z <= upper.
bounds = bounds_lib.IntervalBounds.convert(bounds)
c = (bounds.lower + bounds.upper) / 2.
r = (bounds.upper - bounds.lower) / 2.
c = tf.einsum('ij,ikj->ik', c, w)
if b is not None:
c += b
r = tf.einsum('ij,ikj->ik', r, tf.abs(w))
# output has shape [batch_size, num_specifications].
return c + r
def evaluate(self, logits):
if len(logits.shape) == 2:
output = tf.einsum('ij,ikj->ik', logits, self._c)
elif len(logits.shape) == 3:
output = tf.einsum('rij,ikj->rik', logits, self._c)
else:
assert len(logits.shape) == 4
output = tf.einsum('rsbo,bso->rbs', logits, self._c)
if self._d is not None:
output += self._d
return output
@property
def num_specifications(self):
return tf.shape(self._c)[1]
@property
def c(self):
return self._c
@property
def d(self):
return self._d
class ClassificationSpecification(Specification):
"""Creates a linear specification that corresponds to a classification.
This class is not a standard LinearSpecification as it does not materialize
the c and d tensors.
"""
def __init__(self, label, num_classes, collapse=True):
super(ClassificationSpecification, self).__init__(name='specs',
collapse=collapse)
self._label = label
self._num_classes = num_classes
# Precompute indices.
with self._enter_variable_scope():
indices = []
for i in range(self._num_classes):
indices.append(list(range(i)) + list(range(i + 1, self._num_classes)))
indices = tf.constant(indices, dtype=tf.int32)
self._correct_idx, self._wrong_idx = self._build_indices(label, indices)
def _build(self, modules):
if not (self.collapse and
isinstance(modules[-1], verifiable_wrapper.LinearFCWrapper)):
logging.info('Elision of last layer disabled.')
bounds = modules[-1].output_bounds
bounds = bounds_lib.IntervalBounds.convert(bounds)
correct_class_logit = tf.gather_nd(bounds.lower, self._correct_idx)
wrong_class_logits = tf.gather_nd(bounds.upper, self._wrong_idx)
return wrong_class_logits - tf.expand_dims(correct_class_logit, 1)
logging.info('Elision of last layer active.')
bounds = modules[-1].input_bounds
bounds = bounds_lib.IntervalBounds.convert(bounds)
batch_size = tf.shape(bounds.lower)[0]
w = modules[-1].module.w
b = modules[-1].module.b
w_t = tf.tile(tf.expand_dims(tf.transpose(w), 0), [batch_size, 1, 1])
b_t = tf.tile(tf.expand_dims(b, 0), [batch_size, 1])
w_correct = tf.expand_dims(tf.gather_nd(w_t, self._correct_idx), -1)
b_correct = tf.expand_dims(tf.gather_nd(b_t, self._correct_idx), 1)
w_wrong = tf.transpose(tf.gather_nd(w_t, self._wrong_idx), [0, 2, 1])
b_wrong = tf.gather_nd(b_t, self._wrong_idx)
w = w_wrong - w_correct
b = b_wrong - b_correct
# Maximize z * w + b s.t. lower <= z <= upper.
c = (bounds.lower + bounds.upper) / 2.
r = (bounds.upper - bounds.lower) / 2.
c = tf.einsum('ij,ijk->ik', c, w)
if b is not None:
c += b
r = tf.einsum('ij,ijk->ik', r, tf.abs(w))
return c + r
def evaluate(self, logits):
if len(logits.shape) == 2:
correct_class_logit = tf.gather_nd(logits, self._correct_idx)
correct_class_logit = tf.expand_dims(correct_class_logit, -1)
wrong_class_logits = tf.gather_nd(logits, self._wrong_idx)
elif len(logits.shape) == 3:
# [num_restarts, batch_size, num_classes] to
# [num_restarts, batch_size, num_specs]
logits = tf.transpose(logits, [1, 2, 0]) # Put restart dimension last.
correct_class_logit = tf.gather_nd(logits, self._correct_idx)
correct_class_logit = tf.transpose(correct_class_logit)
correct_class_logit = tf.expand_dims(correct_class_logit, -1)
wrong_class_logits = tf.gather_nd(logits, self._wrong_idx)
wrong_class_logits = tf.transpose(wrong_class_logits, [2, 0, 1])
else:
assert len(logits.shape) == 4
# [num_restarts, num_specs, batch_size, num_classes] to
# [num_restarts, batch_size, num_specs].
logits = tf.transpose(logits, [2, 3, 1, 0])
correct_class_logit = tf.gather_nd(logits, self._correct_idx)
correct_class_logit = tf.transpose(correct_class_logit, [2, 0, 1])
batch_size = tf.shape(logits)[0]
wrong_idx = tf.concat([
self._wrong_idx,
tf.tile(tf.reshape(tf.range(self.num_specifications, dtype=tf.int32),
[1, self.num_specifications, 1]),
[batch_size, 1, 1])], axis=-1)
wrong_class_logits = tf.gather_nd(logits, wrong_idx)
wrong_class_logits = tf.transpose(wrong_class_logits, [2, 0, 1])
return wrong_class_logits - correct_class_logit
@property
def num_specifications(self):
return self._num_classes - 1
@property
def correct_idx(self):
return self._correct_idx
@property
def wrong_idx(self):
return self._wrong_idx
def _build_indices(self, label, indices):
batch_size = tf.shape(label)[0]
i = tf.range(batch_size, dtype=tf.int32)
correct_idx = tf.stack([i, tf.cast(label, tf.int32)], axis=1)
wrong_idx = tf.stack([
tf.tile(tf.reshape(i, [batch_size, 1]), [1, self._num_classes - 1]),
tf.gather(indices, label),
], axis=2)
return correct_idx, wrong_idx
class TargetedClassificationSpecification(ClassificationSpecification):
"""Defines a specification that compares the true class with another."""
def __init__(self, label, num_classes, target_class, collapse=True):
super(TargetedClassificationSpecification, self).__init__(
label, num_classes, collapse=collapse)
batch_size = tf.shape(label)[0]
if len(target_class.shape) == 1:
target_class = tf.reshape(target_class, [batch_size, 1])
self._num_specifications = target_class.shape[1].value
if self._num_specifications is None:
raise ValueError('Cannot retrieve the number of target classes')
self._target_class = target_class
i = tf.range(batch_size, dtype=tf.int32)
self._wrong_idx = tf.stack([
tf.tile(tf.reshape(i, [batch_size, 1]), [1, self.num_specifications]),
target_class
], axis=2)
@property
def target_class(self):
"""Returns the target class index."""
return self._target_class
@property
def num_specifications(self):
return self._num_specifications
class RandomClassificationSpecification(TargetedClassificationSpecification):
"""Creates a single random specification that targets a random class."""
def __init__(self, label, num_classes, num_targets=1, seed=None,
collapse=True):
# Overwrite the target indices. Each session.run() call gets new target
# indices, the indices should remain the same across restarts.
batch_size = tf.shape(label)[0]
j = tf.random.uniform(shape=(batch_size, num_targets), minval=1,
maxval=num_classes, dtype=tf.int32, seed=seed)
target_class = tf.mod(tf.cast(tf.expand_dims(label, -1), tf.int32) + j,
num_classes)
super(RandomClassificationSpecification, self).__init__(
label, num_classes, target_class, collapse=collapse)
class LeastLikelyClassificationSpecification(
TargetedClassificationSpecification):
"""Creates a single specification that targets the least likely class."""
def __init__(self, label, num_classes, logits, num_targets=1, collapse=True):
# Do not target the true class. If the true class is the least likely to
# be predicted, it is fine to target any other class as the attack will
# be successful anyways.
j = tf.nn.top_k(-logits, k=num_targets, sorted=False).indices
l = tf.expand_dims(label, 1)
target_class = tf.mod(
j + tf.cast(tf.equal(j, tf.cast(l, tf.int32)), tf.int32), num_classes)
super(LeastLikelyClassificationSpecification, self).__init__(
label, num_classes, target_class, collapse=collapse)
| interval-bound-propagation-master | interval_bound_propagation/src/specification.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Additional Sonnet modules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sonnet as snt
import tensorflow.compat.v1 as tf
# Slightly altered version of snt.BatchNorm that allows to easily grab which
# mean and variance are currently in use (whether the last _build was
# invoked with is_training=True or False).
# Modifications include:
# - Removing fused option (which we do not support).
# - Removing test_local_stats (which we do not support).
# - Providing a mean and variance property.
# - Provides scale, bias properties that return None if there are none.
class BatchNorm(snt.BatchNorm):
"""Batch normalization module, including optional affine transformation."""
def __init__(self, axis=None, offset=True, scale=False,
decay_rate=0.999, eps=1e-3, initializers=None,
partitioners=None, regularizers=None,
update_ops_collection=None, name='batch_norm'):
"""Constructs a BatchNorm module. See original code for more details."""
super(BatchNorm, self).__init__(
axis=axis, offset=offset, scale=scale, decay_rate=decay_rate, eps=eps,
initializers=initializers, partitioners=partitioners,
regularizers=regularizers, fused=False,
update_ops_collection=update_ops_collection, name=name)
def _build_statistics(self, input_batch, axis, use_batch_stats, stat_dtype):
"""Builds the statistics part of the graph when using moving variance."""
self._mean, self._variance = super(BatchNorm, self)._build_statistics(
input_batch, axis, use_batch_stats, stat_dtype)
return self._mean, self._variance
def _build(self, input_batch, is_training=True, test_local_stats=False,
reuse=False):
"""Connects the BatchNorm module into the graph.
Args:
input_batch: A Tensor of arbitrary dimension. By default, the final
dimension is not reduced over when computing the minibatch statistics.
is_training: A boolean to indicate if the module should be connected in
training mode, meaning the moving averages are updated. Can be a Tensor.
test_local_stats: A boolean to indicate if the statistics should be from
the local batch. When is_training is True, test_local_stats is not used.
reuse: If True, the statistics computed by previous call to _build
are used and is_training is ignored. Otherwise, behaves like a normal
batch normalization layer.
Returns:
A tensor with the same shape as `input_batch`.
Raises:
ValueError: If `axis` is not valid for the
input shape or has negative entries.
"""
if reuse:
self._ensure_is_connected()
return tf.nn.batch_normalization(
input_batch, self._mean, self._variance, self._beta, self._gamma,
self._eps, name='batch_norm')
else:
return super(BatchNorm, self)._build(input_batch, is_training,
test_local_stats=test_local_stats)
@property
def scale(self):
self._ensure_is_connected()
return tf.stop_gradient(self._gamma) if self._gamma is not None else None
@property
def bias(self):
self._ensure_is_connected()
return tf.stop_gradient(self._beta) if self._beta is not None else None
@property
def mean(self):
self._ensure_is_connected()
return tf.stop_gradient(self._mean)
@property
def variance(self):
self._ensure_is_connected()
return tf.stop_gradient(self._variance)
@property
def epsilon(self):
self._ensure_is_connected()
return self._eps
class ImageNorm(snt.AbstractModule):
"""Module that does per channel normalization."""
def __init__(self, mean, std, name='image_norm'):
"""Constructs a module that does (x[:, :, c] - mean[c]) / std[c]."""
super(ImageNorm, self).__init__(name=name)
if isinstance(mean, float):
mean = [mean]
if isinstance(std, float):
std = [std]
scale = []
for s in std:
if s <= 0.:
raise ValueError('Cannot use negative standard deviations.')
scale.append(1. / s)
with self._enter_variable_scope():
# Using broadcasting.
self._scale = tf.constant(scale, dtype=tf.float32)
self._offset = tf.constant(mean, dtype=tf.float32)
def _build(self, inputs):
return self.apply(inputs)
@property
def scale(self):
return self._scale
@property
def offset(self):
return self._offset
# Provide a function that allows to use the IncreasingMonotonicWrapper.
def apply(self, inputs):
return (inputs - self._offset) * self._scale
| interval-bound-propagation-master | interval_bound_propagation/src/layers.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to define attacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import six
import sonnet as snt
import tensorflow.compat.v1 as tf
nest = tf.nest
@six.add_metaclass(abc.ABCMeta)
class UnrolledOptimizer(object):
"""In graph optimizer to be used in tf.while_loop."""
def __init__(self, colocate_gradients_with_ops=False):
self._colocate_gradients_with_ops = colocate_gradients_with_ops
@abc.abstractmethod
def minimize(self, loss, x, optim_state):
"""Compute a new value of `x` to minimize `loss`.
Args:
loss: A scalar Tensor, the value to be minimized. `loss` should be a
continuous function of `x` which supports gradients, `loss = f(x)`.
x: A list of Tensors, the values to be updated. This is analogous to the
`var_list` argument in standard TF Optimizer.
optim_state: A (possibly nested) dict, containing any state info needed
for the optimizer.
Returns:
new_x: A list of Tensors, the same length as `x`, which are updated
new_optim_state: A new state, with the same structure as `optim_state`,
which have been updated.
"""
@abc.abstractmethod
def init_state(self, x):
"""Returns the initial state of the optimizer.
Args:
x: A list of Tensors, which will be optimized.
Returns:
Any structured output.
"""
class UnrolledGradientDescent(UnrolledOptimizer):
"""Vanilla gradient descent optimizer."""
_State = collections.namedtuple('State', ['iteration']) # pylint: disable=invalid-name
def __init__(self, lr=.1, lr_fn=None, fgsm=False,
colocate_gradients_with_ops=False):
super(UnrolledGradientDescent, self).__init__(
colocate_gradients_with_ops=colocate_gradients_with_ops)
self._lr_fn = (lambda i: lr) if lr_fn is None else lr_fn
self._fgsm = fgsm
def init_state(self, unused_x):
return self._State(tf.constant(0, dtype=tf.int64))
def minimize(self, loss, x, optim_state):
"""Refer to parent class documentation."""
lr = self._lr_fn(optim_state.iteration)
grads = self.gradients(loss, x)
if self._fgsm:
grads = [tf.sign(g) for g in grads]
new_x = [None] * len(x)
for i in range(len(x)):
new_x[i] = x[i] - lr * grads[i]
new_optim_state = self._State(optim_state.iteration + 1)
return new_x, new_optim_state
def gradients(self, loss, x):
return tf.gradients(
loss, x, colocate_gradients_with_ops=self._colocate_gradients_with_ops)
# Syntactic sugar.
class UnrolledFGSMDescent(UnrolledGradientDescent):
"""Identical to UnrolledGradientDescent but forces FGM steps."""
def __init__(self, lr=.1, lr_fn=None,
colocate_gradients_with_ops=False):
super(UnrolledFGSMDescent, self).__init__(
lr, lr_fn, True, colocate_gradients_with_ops)
class UnrolledAdam(UnrolledOptimizer):
"""The Adam optimizer defined in https://arxiv.org/abs/1412.6980."""
_State = collections.namedtuple('State', ['t', 'm', 'u']) # pylint: disable=invalid-name
def __init__(self, lr=0.1, lr_fn=None, beta1=0.9, beta2=0.999, epsilon=1e-9,
colocate_gradients_with_ops=False):
super(UnrolledAdam, self).__init__(
colocate_gradients_with_ops=colocate_gradients_with_ops)
self._lr_fn = (lambda i: lr) if lr_fn is None else lr_fn
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
def init_state(self, x):
return self._State(
t=tf.constant(0, dtype=tf.int64),
m=[tf.zeros_like(v) for v in x],
u=[tf.zeros_like(v) for v in x])
def _apply_gradients(self, grads, x, optim_state):
"""Applies gradients."""
lr = self._lr_fn(optim_state.t)
new_optim_state = self._State(
t=optim_state.t + 1,
m=[None] * len(x),
u=[None] * len(x))
t = tf.cast(new_optim_state.t, tf.float32)
new_x = [None] * len(x)
for i in range(len(x)):
g = grads[i]
m_old = optim_state.m[i]
u_old = optim_state.u[i]
new_optim_state.m[i] = self._beta1 * m_old + (1. - self._beta1) * g
new_optim_state.u[i] = self._beta2 * u_old + (1. - self._beta2) * g * g
m_hat = new_optim_state.m[i] / (1. - tf.pow(self._beta1, t))
u_hat = new_optim_state.u[i] / (1. - tf.pow(self._beta2, t))
new_x[i] = x[i] - lr * m_hat / (tf.sqrt(u_hat) + self._epsilon)
return new_x, new_optim_state
def minimize(self, loss, x, optim_state):
grads = self.gradients(loss, x)
return self._apply_gradients(grads, x, optim_state)
def gradients(self, loss, x):
return tf.gradients(
loss, x, colocate_gradients_with_ops=self._colocate_gradients_with_ops)
def _spsa_gradients(loss_fn, x, delta=0.01, num_samples=16, num_iterations=4):
"""Compute gradient estimates using SPSA.
Args:
loss_fn: Callable that takes a single argument of shape [batch_size, ...]
and returns the loss contribution of each element of the batch as a
tensor of shape [batch_size].
x: List of tensors with a single element. We only support computation of
the gradient of the loss with respect to x[0]. We take a list as input to
keep the same API call as tf.gradients.
delta: The gradients are computed by computing the loss within x - delta and
x + delta.
num_samples: The total number of random samples used to compute the gradient
is `num_samples` times `num_iterations`. `num_samples` contributes to the
gradient by tiling `x` `num_samples` times.
num_iterations: The total number of random samples used to compute the
gradient is `num_samples` times `num_iterations`. `num_iterations`
contributes to the gradient by iterating using a `tf.while_loop`.
Returns:
List of tensors with a single element corresponding to the gradient of
loss_fn(x[0]) with respect to x[0].
"""
if len(x) != 1:
raise NotImplementedError('SPSA gradients with respect to multiple '
'variables is not supported.')
# loss_fn takes a single argument.
tensor = x[0]
def _get_delta(x):
return delta * tf.sign(
tf.random_uniform(tf.shape(x), minval=-1., maxval=1., dtype=x.dtype))
# Process batch_size samples at a time.
def cond(i, *_):
return tf.less(i, num_iterations)
def loop_body(i, total_grad):
"""Compute gradient estimate."""
batch_size = tf.shape(tensor)[0]
# The tiled tensor has shape [num_samples, batch_size, ...]
tiled_tensor = tf.expand_dims(tensor, axis=0)
tiled_tensor = tf.tile(tiled_tensor,
[num_samples] + [1] * len(tensor.shape))
# The tiled tensor has now shape [2, num_samples, batch_size, ...].
delta = _get_delta(tiled_tensor)
tiled_tensor = tf.stack(
[tiled_tensor + delta, tiled_tensor - delta], axis=0)
# Compute loss with shape [2, num_samples, batch_size].
losses = loss_fn(
tf.reshape(tiled_tensor,
[2 * num_samples, batch_size] + tensor.shape.as_list()[1:]))
losses = tf.reshape(losses, [2, num_samples, batch_size])
# Compute approximate gradient using broadcasting.
shape = losses.shape.as_list() + [1] * (len(tensor.shape) - 1)
shape = [(s or -1) for s in shape] # Remove None.
losses = tf.reshape(losses, shape)
g = tf.reduce_mean((losses[0] - losses[1]) / (2. * delta), axis=0)
return [i + 1, g / num_iterations + total_grad]
_, g = tf.while_loop(
cond,
loop_body,
loop_vars=[tf.constant(0.), tf.zeros_like(tensor)],
parallel_iterations=1,
back_prop=False)
return [g]
@six.add_metaclass(abc.ABCMeta)
class UnrolledSPSA(object):
"""Abstract class that represents an optimizer based on SPSA."""
class UnrolledSPSAGradientDescent(UnrolledGradientDescent, UnrolledSPSA):
"""Optimizer for gradient-free attacks in https://arxiv.org/abs/1802.05666.
Gradients estimates are computed using Simultaneous Perturbation Stochastic
Approximation (SPSA).
"""
def __init__(self, lr=0.1, lr_fn=None, fgsm=False,
colocate_gradients_with_ops=False, delta=0.01, num_samples=32,
num_iterations=4, loss_fn=None):
super(UnrolledSPSAGradientDescent, self).__init__(
lr, lr_fn, fgsm, colocate_gradients_with_ops)
assert num_samples % 2 == 0, 'Number of samples must be even'
self._delta = delta
self._num_samples = num_samples // 2 # Since we mirror +/- delta later.
self._num_iterations = num_iterations
assert loss_fn is not None, 'loss_fn must be specified.'
self._loss_fn = loss_fn
def gradients(self, loss, x):
return _spsa_gradients(self._loss_fn, x, self._delta, self._num_samples,
self._num_iterations)
# Syntactic sugar.
class UnrolledSPSAFGSMDescent(UnrolledSPSAGradientDescent):
"""Identical to UnrolledSPSAGradientDescent but forces FGSM steps."""
def __init__(self, lr=.1, lr_fn=None,
colocate_gradients_with_ops=False, delta=0.01, num_samples=32,
num_iterations=4, loss_fn=None):
super(UnrolledSPSAFGSMDescent, self).__init__(
lr, lr_fn, True, colocate_gradients_with_ops, delta, num_samples,
num_iterations, loss_fn)
class UnrolledSPSAAdam(UnrolledAdam, UnrolledSPSA):
"""Optimizer for gradient-free attacks in https://arxiv.org/abs/1802.05666.
Gradients estimates are computed using Simultaneous Perturbation Stochastic
Approximation (SPSA), combined with the ADAM update rule.
"""
def __init__(self, lr=0.1, lr_fn=None, beta1=0.9, beta2=0.999, epsilon=1e-9,
colocate_gradients_with_ops=False, delta=0.01, num_samples=32,
num_iterations=4, loss_fn=None):
super(UnrolledSPSAAdam, self).__init__(lr, lr_fn, beta1, beta2, epsilon,
colocate_gradients_with_ops)
assert num_samples % 2 == 0, 'Number of samples must be even'
self._delta = delta
self._num_samples = num_samples // 2 # Since we mirror +/- delta later.
self._num_iterations = num_iterations
assert loss_fn is not None, 'loss_fn must be specified.'
self._loss_fn = loss_fn
def gradients(self, loss, x):
return _spsa_gradients(self._loss_fn, x, self._delta, self._num_samples,
self._num_iterations)
def _is_spsa_optimizer(cls):
return issubclass(cls, UnrolledSPSA)
def wrap_optimizer(cls, **default_kwargs):
"""Wraps an optimizer such that __init__ uses the specified kwargs."""
class WrapperUnrolledOptimizer(cls):
def __init__(self, *args, **kwargs):
new_kwargs = default_kwargs.copy()
new_kwargs.update(kwargs)
super(WrapperUnrolledOptimizer, self).__init__(*args, **new_kwargs)
return WrapperUnrolledOptimizer
def _project_perturbation(perturbation, epsilon, input_image, image_bounds):
"""Project `perturbation` onto L-infinity ball of radius `epsilon`."""
clipped_perturbation = tf.clip_by_value(perturbation, -epsilon, epsilon)
new_image = tf.clip_by_value(input_image + clipped_perturbation,
image_bounds[0], image_bounds[1])
return new_image - input_image
def pgd_attack(loss_fn, input_image, epsilon, num_steps,
optimizer=UnrolledGradientDescent(),
project_perturbation=_project_perturbation,
image_bounds=None, random_init=1.):
"""Projected gradient descent for generating adversarial images.
Args:
loss_fn: A callable which takes `input_image` and `label` as arguments, and
returns the loss, a scalar Tensor, we will be minimized
input_image: Tensor, a batch of images
epsilon: float, the L-infinity norm of the maximum allowable perturbation
num_steps: int, the number of steps of gradient descent
optimizer: An `UnrolledOptimizer` object
project_perturbation: A function, which will be used to enforce some
constraint. It should have the same signature as `_project_perturbation`.
Note that if you use a custom projection function, you should double-check
your implementation, since an incorrect implementation will not error,
and will appear to work fine.
image_bounds: A pair of floats: minimum and maximum pixel value. If None
(default), the bounds are assumed to be 0 and 1.
random_init: Probability of starting from random location rather than
nominal input image.
Returns:
adversarial version of `input_image`, with L-infinity difference less than
epsilon, which tries to minimize loss_fn.
"""
image_bounds = image_bounds or (0., 1.)
random_shape = [tf.shape(input_image)[0]] + [1] * (len(input_image.shape) - 1)
use_random_init = tf.cast(
tf.random_uniform(random_shape) < float(random_init), tf.float32)
init_perturbation = use_random_init * tf.random_uniform(
tf.shape(input_image), minval=-epsilon, maxval=epsilon)
init_perturbation = project_perturbation(init_perturbation,
epsilon, input_image, image_bounds)
init_optim_state = optimizer.init_state([init_perturbation])
def loop_body(i, perturbation, flat_optim_state):
"""Update perturbation to input image."""
optim_state = nest.pack_sequence_as(structure=init_optim_state,
flat_sequence=flat_optim_state)
loss = loss_fn(input_image + perturbation)
new_perturbation_list, new_optim_state = optimizer.minimize(
loss, [perturbation], optim_state)
projected_perturbation = project_perturbation(
new_perturbation_list[0], epsilon, input_image, image_bounds)
return i + 1, projected_perturbation, nest.flatten(new_optim_state)
def cond(i, *_):
return tf.less(i, num_steps)
flat_init_optim_state = nest.flatten(init_optim_state)
_, final_perturbation, _ = tf.while_loop(
cond,
loop_body,
loop_vars=[tf.constant(0.), init_perturbation, flat_init_optim_state],
parallel_iterations=1,
back_prop=False)
adversarial_image = input_image + final_perturbation
return tf.stop_gradient(adversarial_image)
@six.add_metaclass(abc.ABCMeta)
class Attack(snt.AbstractModule):
"""Defines an attack as a Sonnet module."""
def __init__(self, predictor, specification, name, predictor_kwargs=None):
super(Attack, self).__init__(name=name)
self._predictor = predictor
self._specification = specification
if predictor_kwargs is None:
self._kwargs = {'intermediate': {}, 'final': {}}
else:
self._kwargs = predictor_kwargs
self._forced_mode = None
self._target_class = None
def _eval_fn(self, x, mode='intermediate'):
"""Runs the logits corresponding to `x`.
Args:
x: input to the predictor network.
mode: Either "intermediate" or "final". Selects the desired predictor
arguments.
Returns:
Tensor of logits.
"""
if self._forced_mode is not None:
mode = self._forced_mode
return self._predictor(x, **self._kwargs[mode])
@abc.abstractmethod
def _build(self, inputs, labels):
"""Returns the adversarial attack around inputs."""
@abc.abstractproperty
def logits(self):
"""Returns the logits corresponding to the best attack."""
@abc.abstractproperty
def attack(self):
"""Returns the best attack."""
@abc.abstractproperty
def success(self):
"""Returns whether the attack was successful."""
def force_mode(self, mode):
"""Only used by RestartedAttack to force the evaluation mode."""
self._forced_mode = mode
@property
def target_class(self):
"""Returns the target class if this attack is a targeted attacks."""
return self._target_class
@target_class.setter
def target_class(self, t):
self._target_class = t
@six.add_metaclass(abc.ABCMeta)
class PGDAttack(Attack):
"""Defines a PGD attack."""
def __init__(self, predictor, specification, epsilon, lr=.1, lr_fn=None,
num_steps=20, num_restarts=1, input_bounds=(0., 1.),
random_init=1., optimizer_builder=UnrolledGradientDescent,
project_perturbation=_project_perturbation,
predictor_kwargs=None):
super(PGDAttack, self).__init__(predictor, specification, name='pgd',
predictor_kwargs=predictor_kwargs)
self._num_steps = num_steps
self._num_restarts = num_restarts
self._epsilon = epsilon
self._lr = lr
self._lr_fn = lr_fn
self._input_bounds = input_bounds
self._random_init = random_init
self._optimizer_builder = optimizer_builder
self._project_perturbation = project_perturbation
# Helper functions.
def prepare_inputs(self, inputs):
"""Tiles inputs according to number of restarts."""
batch_size = tf.shape(inputs)[0]
input_shape = list(inputs.shape.as_list()[1:])
duplicated_inputs = tf.expand_dims(inputs, axis=0)
# Shape is [num_restarts, batch_size, ...]
duplicated_inputs = tf.tile(
duplicated_inputs,
[self._num_restarts, 1] + [1] * len(input_shape))
# Shape is [num_restarts * batch_size, ...]
duplicated_inputs = tf.reshape(
duplicated_inputs, [self._num_restarts * batch_size] + input_shape)
return batch_size, input_shape, duplicated_inputs
def prepare_labels(self, labels):
"""Tiles labels according to number of restarts."""
return tf.tile(labels, [self._num_restarts])
def find_worst_attack(self, objective_fn, adversarial_input, batch_size,
input_shape):
"""Returns the attack that maximizes objective_fn."""
adversarial_objective = objective_fn(adversarial_input)
adversarial_objective = tf.reshape(adversarial_objective, [-1, batch_size])
adversarial_input = tf.reshape(adversarial_input,
[-1, batch_size] + input_shape)
i = tf.argmax(adversarial_objective, axis=0)
j = tf.cast(tf.range(tf.shape(adversarial_objective)[1]), i.dtype)
ij = tf.stack([i, j], axis=1)
return tf.gather_nd(adversarial_input, ij)
def _maximize_margin(bounds):
# Bounds has shape [num_restarts, batch_size, num_specs].
return tf.reduce_max(bounds, axis=-1)
def _any_greater(bounds):
# Bounds has shape [batch_size, num_specs].
bounds = tf.reduce_max(bounds, axis=-1)
return bounds > 0.
def _maximize_topk_hinge_margin(bounds, k=5, margin=.1):
# Bounds has shape [num_restarts, batch_size, num_specs].
b = tf.nn.top_k(bounds, k=k, sorted=False).values
return tf.reduce_sum(tf.minimum(b, margin), axis=-1)
def _topk_greater(bounds, k=5):
# Bounds has shape [batch_size, num_specs].
b = tf.nn.top_k(bounds, k=k, sorted=False).values
return tf.reduce_min(b, axis=-1) > 0.
class UntargetedPGDAttack(PGDAttack):
"""Defines an untargeted PGD attack."""
def __init__(self, predictor, specification, epsilon, lr=.1, lr_fn=None,
num_steps=20, num_restarts=1, input_bounds=(0., 1.),
random_init=1., optimizer_builder=UnrolledGradientDescent,
project_perturbation=_project_perturbation,
objective_fn=_maximize_margin, success_fn=_any_greater,
predictor_kwargs=None):
super(UntargetedPGDAttack, self).__init__(
predictor, specification, epsilon, lr, lr_fn, num_steps, num_restarts,
input_bounds, random_init, optimizer_builder, project_perturbation,
predictor_kwargs)
self._objective_fn = objective_fn
self._success_fn = success_fn
def _build(self, inputs, labels):
batch_size, input_shape, duplicated_inputs = self.prepare_inputs(inputs)
duplicated_labels = self.prepare_labels(labels)
# Define objectives.
def objective_fn(x):
model_logits = self._eval_fn(x) # [restarts * batch_size, output].
model_logits = tf.reshape(
model_logits, [self._num_restarts, batch_size, -1])
bounds = self._specification.evaluate(model_logits)
# Output has dimension [num_restarts, batch_size].
return self._objective_fn(bounds)
# Only used for SPSA.
# The input to this loss is the perturbation (not the image).
# The first dimension corresponds to the number of SPSA samples.
# Shape of perturbations is [num_samples, restarts * batch_size, ...]
def spsa_loss_fn(perturbation):
"""Computes the loss per SPSA sample."""
x = tf.reshape(
perturbation + tf.expand_dims(duplicated_inputs, axis=0),
[-1] + duplicated_inputs.shape.as_list()[1:])
model_logits = self._eval_fn(x)
num_outputs = tf.shape(model_logits)[1]
model_logits = tf.reshape(
model_logits, [-1, batch_size, num_outputs])
bounds = self._specification.evaluate(model_logits)
losses = -self._objective_fn(bounds)
return tf.reshape(losses, [-1])
def reduced_loss_fn(x):
# Pick worse attack, output has shape [num_restarts, batch_size].
return -tf.reduce_sum(objective_fn(x))
# Use targeted attacks as specified by the specification.
if _is_spsa_optimizer(self._optimizer_builder):
optimizer = self._optimizer_builder(lr=self._lr, lr_fn=self._lr_fn,
loss_fn=spsa_loss_fn)
else:
optimizer = self._optimizer_builder(lr=self._lr, lr_fn=self._lr_fn)
adversarial_input = pgd_attack(
reduced_loss_fn, duplicated_inputs, epsilon=self._epsilon,
num_steps=self._num_steps, image_bounds=self._input_bounds,
random_init=self._random_init, optimizer=optimizer,
project_perturbation=self._project_perturbation)
adversarial_input = self.adapt(duplicated_inputs, adversarial_input,
duplicated_labels)
self._attack = self.find_worst_attack(objective_fn, adversarial_input,
batch_size, input_shape)
self._logits = self._eval_fn(self._attack, mode='final')
self._success = self._success_fn(self._specification.evaluate(self._logits))
return self._attack
@property
def logits(self):
self._ensure_is_connected()
return self._logits
@property
def attack(self):
self._ensure_is_connected()
return self._attack
@property
def success(self):
self._ensure_is_connected()
return self._success
def adapt(self, original_inputs, adversarial_inputs, labels):
"""Function called after PGD to adapt adversarial examples."""
return adversarial_inputs
class UntargetedTop5PGDAttack(UntargetedPGDAttack):
"""Defines an untargeted PGD attack on top-5."""
def __init__(self, predictor, specification, epsilon, lr=.1, lr_fn=None,
num_steps=20, num_restarts=1, input_bounds=(0., 1.),
random_init=1., optimizer_builder=UnrolledGradientDescent,
project_perturbation=_project_perturbation,
objective_fn=_maximize_topk_hinge_margin, predictor_kwargs=None):
super(UntargetedTop5PGDAttack, self).__init__(
predictor, specification, epsilon, lr=lr, lr_fn=lr_fn,
num_steps=num_steps, num_restarts=num_restarts,
input_bounds=input_bounds, random_init=random_init,
optimizer_builder=optimizer_builder,
project_perturbation=project_perturbation, objective_fn=objective_fn,
success_fn=_topk_greater, predictor_kwargs=predictor_kwargs)
class UntargetedAdaptivePGDAttack(UntargetedPGDAttack):
"""Uses an adaptive scheme to pick attacks that are just strong enough."""
def adapt(self, original_inputs, adversarial_inputs, labels):
"""Runs binary search to find the first misclassified input."""
batch_size = tf.shape(original_inputs)[0]
binary_search_iterations = 10
def cond(i, *_):
return tf.less(i, binary_search_iterations)
def get(m):
m = tf.reshape(m, [batch_size] + [1] * (len(original_inputs.shape) - 1))
return (adversarial_inputs - original_inputs) * m + original_inputs
def is_attack_successful(m):
logits = self._eval_fn(get(m))
return self._success_fn(self._specification.evaluate(logits))
def loop_body(i, lower, upper):
m = (lower + upper) * .5
success = is_attack_successful(m)
new_lower = tf.where(success, lower, m)
new_upper = tf.where(success, m, upper)
return i + 1, new_lower, new_upper
lower = tf.zeros(shape=[batch_size])
upper = tf.ones(shape=[batch_size])
_, lower, upper = tf.while_loop(
cond,
loop_body,
loop_vars=[tf.constant(0.), lower, upper],
parallel_iterations=1,
back_prop=False)
# If lower is incorrectly classified, pick lower; otherwise pick upper.
success = is_attack_successful(lower)
return get(tf.where(success, lower, upper))
class MultiTargetedPGDAttack(PGDAttack):
"""Runs targeted attacks for each specification."""
def __init__(self, predictor, specification, epsilon, lr=.1, lr_fn=None,
num_steps=20, num_restarts=1, input_bounds=(0., 1.),
random_init=1., optimizer_builder=UnrolledGradientDescent,
project_perturbation=_project_perturbation,
max_specifications=0, random_specifications=False,
predictor_kwargs=None):
super(MultiTargetedPGDAttack, self).__init__(
predictor, specification, epsilon, lr=lr, lr_fn=lr_fn,
num_steps=num_steps, num_restarts=num_restarts,
input_bounds=input_bounds, random_init=random_init,
optimizer_builder=optimizer_builder,
project_perturbation=project_perturbation,
predictor_kwargs=predictor_kwargs)
self._max_specifications = max_specifications
self._random_specifications = random_specifications
def _build(self, inputs, labels):
batch_size = tf.shape(inputs)[0]
num_specs = self._specification.num_specifications
if self._max_specifications > 0 and self._max_specifications < num_specs:
model_logits = self._eval_fn(inputs)
bounds = self._specification.evaluate(model_logits)
_, idx = tf.math.top_k(bounds, k=self._max_specifications, sorted=False)
if self._random_specifications:
idx = tf.random.uniform(shape=tf.shape(idx),
maxval=self._specification.num_specifications,
dtype=idx.dtype)
idx = tf.tile(tf.expand_dims(idx, 0), [self._num_restarts, 1, 1])
select_fn = lambda x: tf.gather(x, idx, batch_dims=len(idx.shape) - 1)
else:
select_fn = lambda x: x
input_shape = list(inputs.shape.as_list()[1:])
duplicated_inputs = tf.expand_dims(inputs, axis=0)
# Shape is [num_restarts * num_specifications, batch_size, ...]
duplicated_inputs = tf.tile(
duplicated_inputs,
[self._num_restarts * num_specs, 1] + [1] * len(input_shape))
# Shape is [num_restarts * num_specifications * batch_size, ...]
duplicated_inputs = tf.reshape(duplicated_inputs, [-1] + input_shape)
def objective_fn(x):
# Output has shape [restarts * num_specs * batch_size, output].
model_logits = self._eval_fn(x)
model_logits = tf.reshape(
model_logits, [self._num_restarts, num_specs, batch_size, -1])
# Output has shape [num_restarts, batch_size, num_specs].
return self._specification.evaluate(model_logits)
def reduced_loss_fn(x):
# Negate as we minimize.
return -tf.reduce_sum(select_fn(objective_fn(x)))
# Use targeted attacks as specified by the specification.
if _is_spsa_optimizer(self._optimizer_builder):
raise ValueError('"UnrolledSPSA*" unsupported in '
'MultiTargetedPGDAttack')
optimizer = self._optimizer_builder(lr=self._lr, lr_fn=self._lr_fn)
adversarial_input = pgd_attack(
reduced_loss_fn, duplicated_inputs,
epsilon=self._epsilon, num_steps=self._num_steps,
image_bounds=self._input_bounds, random_init=self._random_init,
optimizer=optimizer, project_perturbation=self._project_perturbation)
# Get best attack.
adversarial_objective = objective_fn(adversarial_input)
adversarial_objective = tf.transpose(adversarial_objective, [0, 2, 1])
adversarial_objective = tf.reshape(adversarial_objective, [-1, batch_size])
adversarial_input = tf.reshape(adversarial_input,
[-1, batch_size] + input_shape)
i = tf.argmax(adversarial_objective, axis=0)
j = tf.cast(tf.range(tf.shape(adversarial_objective)[1]), i.dtype)
ij = tf.stack([i, j], axis=1)
self._attack = tf.gather_nd(adversarial_input, ij)
self._logits = self._eval_fn(self._attack, mode='final')
# Count the number of sample that violate any specification.
bounds = tf.reduce_max(self._specification.evaluate(self._logits), axis=1)
self._success = (bounds > 0.)
return self._attack
@property
def logits(self):
self._ensure_is_connected()
return self._logits
@property
def attack(self):
self._ensure_is_connected()
return self._attack
@property
def success(self):
self._ensure_is_connected()
return self._success
class MemoryEfficientMultiTargetedPGDAttack(PGDAttack):
"""Defines a targeted PGD attack for each specification using while_loop."""
def __init__(self, predictor, specification, epsilon, lr=.1, lr_fn=None,
num_steps=20, num_restarts=1, input_bounds=(0., 1.),
random_init=1., optimizer_builder=UnrolledGradientDescent,
project_perturbation=_project_perturbation,
max_specifications=0, random_specifications=False,
predictor_kwargs=None):
super(MemoryEfficientMultiTargetedPGDAttack, self).__init__(
predictor, specification, epsilon, lr=lr, lr_fn=lr_fn,
num_steps=num_steps, num_restarts=num_restarts,
input_bounds=input_bounds, random_init=random_init,
optimizer_builder=optimizer_builder,
project_perturbation=project_perturbation,
predictor_kwargs=predictor_kwargs)
self._max_specifications = max_specifications
self._random_specifications = random_specifications
def _build(self, inputs, labels):
batch_size, input_shape, duplicated_inputs = self.prepare_inputs(inputs)
if (self._max_specifications > 0 and
self._max_specifications < self._specification.num_specifications):
num_specs = self._max_specifications
model_logits = self._eval_fn(inputs)
bounds = self._specification.evaluate(model_logits)
_, idx = tf.math.top_k(bounds, k=num_specs, sorted=False)
if self._random_specifications:
idx = tf.random.uniform(shape=tf.shape(idx),
maxval=self._specification.num_specifications,
dtype=idx.dtype)
idx = tf.tile(tf.expand_dims(idx, 0), [self._num_restarts, 1, 1])
def select_fn(x, i):
return tf.squeeze(
tf.gather(x, tf.expand_dims(idx[:, :, i], -1),
batch_dims=len(idx.shape) - 1),
axis=-1)
else:
num_specs = self._specification.num_specifications
select_fn = lambda x, i: x[:, :, i]
def objective_fn(x):
model_logits = self._eval_fn(x) # [restarts * batch_size, output].
model_logits = tf.reshape(
model_logits, [self._num_restarts, batch_size, -1])
# Output has dimension [num_restarts, batch_size, num_specifications].
return self._specification.evaluate(model_logits)
def flat_objective_fn(x):
return _maximize_margin(objective_fn(x))
def build_loss_fn(idx):
def _reduced_loss_fn(x):
# Pick worse attack, output has shape [num_restarts, batch_size].
return -tf.reduce_sum(select_fn(objective_fn(x), idx))
return _reduced_loss_fn
if _is_spsa_optimizer(self._optimizer_builder):
raise ValueError('"UnrolledSPSA*" unsupported in '
'MultiTargetedPGDAttack')
optimizer = self._optimizer_builder(lr=self._lr, lr_fn=self._lr_fn)
# Run a separate PGD attack for each specification.
def cond(spec_idx, unused_attack, success):
# If we are already successful, we break.
return tf.logical_and(spec_idx < num_specs,
tf.logical_not(tf.reduce_all(success)))
def body(spec_idx, attack, success):
"""Runs a separate PGD attack for each specification."""
adversarial_input = pgd_attack(
build_loss_fn(spec_idx), duplicated_inputs,
epsilon=self._epsilon, num_steps=self._num_steps,
image_bounds=self._input_bounds, random_init=self._random_init,
optimizer=optimizer, project_perturbation=self._project_perturbation)
new_attack = self.find_worst_attack(flat_objective_fn, adversarial_input,
batch_size, input_shape)
new_logits = self._eval_fn(new_attack)
# Count the number of sample that violate any specification.
new_success = _any_greater(self._specification.evaluate(new_logits))
# The first iteration always sets the attack and logits.
use_new_values = tf.logical_or(tf.equal(spec_idx, 0), new_success)
print_op = tf.print('Processed specification #', spec_idx)
with tf.control_dependencies([print_op]):
new_spec_idx = spec_idx + 1
return (new_spec_idx,
tf.where(use_new_values, new_attack, attack),
tf.logical_or(success, new_success))
_, self._attack, self._success = tf.while_loop(
cond, body, back_prop=False, parallel_iterations=1,
loop_vars=[
tf.constant(0, dtype=tf.int32),
inputs,
tf.zeros([tf.shape(inputs)[0]], dtype=tf.bool),
])
self._logits = self._eval_fn(self._attack, mode='final')
return self._attack
@property
def logits(self):
self._ensure_is_connected()
return self._logits
@property
def attack(self):
self._ensure_is_connected()
return self._attack
@property
def success(self):
self._ensure_is_connected()
return self._success
class RestartedAttack(Attack):
"""Wraps an attack to run it multiple times using a tf.while_loop."""
def __init__(self, inner_attack, num_restarts=1):
super(RestartedAttack, self).__init__(
inner_attack._predictor, # pylint: disable=protected-access
inner_attack._specification, # pylint: disable=protected-access
name='restarted_' + inner_attack.module_name,
predictor_kwargs=inner_attack._kwargs) # pylint: disable=protected-access
self._inner_attack = inner_attack
self._num_restarts = num_restarts
# Prevent the inner attack from updating batch normalization statistics.
self._inner_attack.force_mode('intermediate')
def _build(self, inputs, labels):
def cond(i, unused_attack, success):
# If we are already successful, we break.
return tf.logical_and(i < self._num_restarts,
tf.logical_not(tf.reduce_all(success)))
def body(i, attack, success):
new_attack = self._inner_attack(inputs, labels)
new_success = self._inner_attack.success
# The first iteration always sets the attack.
use_new_values = tf.logical_or(tf.equal(i, 0), new_success)
return (i + 1,
tf.where(use_new_values, new_attack, attack),
tf.logical_or(success, new_success))
_, self._attack, self._success = tf.while_loop(
cond, body, back_prop=False, parallel_iterations=1,
loop_vars=[
tf.constant(0, dtype=tf.int32),
inputs,
tf.zeros([tf.shape(inputs)[0]], dtype=tf.bool),
])
self._logits = self._eval_fn(self._attack, mode='final')
return self._attack
@property
def logits(self):
self._ensure_is_connected()
return self._logits
@property
def attack(self):
self._ensure_is_connected()
return self._attack
@property
def success(self):
self._ensure_is_connected()
return self._success
| interval-bound-propagation-master | interval_bound_propagation/src/attacks.py |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of input bounds to each layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import itertools
import six
import sonnet as snt
import tensorflow.compat.v1 as tf
@six.add_metaclass(abc.ABCMeta)
class AbstractBounds(object):
"""Abstract bounds class."""
def __init__(self):
self._update_cache_op = None
@classmethod
@abc.abstractmethod
def convert(cls, bounds):
"""Converts another bound type to this type."""
@abc.abstractproperty
def shape(self):
"""Returns shape (as list) of the tensor, including batch dimension."""
def concretize(self):
return self
def _raise_not_implemented(self, name):
raise NotImplementedError(
'{} modules are not supported by "{}".'.format(
name, self.__class__.__name__))
def apply_linear(self, wrapper, w, b): # pylint: disable=unused-argument
self._raise_not_implemented('snt.Linear')
def apply_conv1d(self, wrapper, w, b, padding, stride): # pylint: disable=unused-argument
self._raise_not_implemented('snt.Conv1D')
def apply_conv2d(self, wrapper, w, b, padding, strides): # pylint: disable=unused-argument
self._raise_not_implemented('snt.Conv2D')
def apply_increasing_monotonic_fn(self, wrapper, fn, *args, **parameters): # pylint: disable=unused-argument
self._raise_not_implemented(fn.__name__)
def apply_piecewise_monotonic_fn(self, wrapper, fn, boundaries, *args): # pylint: disable=unused-argument
self._raise_not_implemented(fn.__name__)
def apply_batch_norm(self, wrapper, mean, variance, scale, bias, epsilon): # pylint: disable=unused-argument
self._raise_not_implemented('ibp.BatchNorm')
def apply_batch_reshape(self, wrapper, shape): # pylint: disable=unused-argument
self._raise_not_implemented('snt.BatchReshape')
def apply_softmax(self, wrapper): # pylint: disable=unused-argument
self._raise_not_implemented('tf.nn.softmax')
@property
def update_cache_op(self):
"""TF op to update cached bounds for re-use across session.run calls."""
if self._update_cache_op is None:
raise ValueError('Bounds not cached: enable_caching() not called.')
return self._update_cache_op
def enable_caching(self):
"""Enables caching the bounds for re-use across session.run calls."""
if self._update_cache_op is not None:
raise ValueError('Bounds already cached: enable_caching() called twice.')
self._update_cache_op = self._set_up_cache()
def _set_up_cache(self):
"""Replace fields with cached versions.
Returns:
TensorFlow op to update the cache.
"""
return tf.no_op() # By default, don't cache.
def _cache_with_update_op(self, tensor):
"""Creates non-trainable variable to cache the tensor across sess.run calls.
Args:
tensor: Tensor to cache.
Returns:
cached_tensor: Non-trainable variable to contain the cached value
of `tensor`.
update_op: TensorFlow op to re-evaluate `tensor` and assign the result
to `cached_tensor`.
"""
cached_tensor = tf.get_variable(
tensor.name.replace(':', '__') + '_ibp_cache',
shape=tensor.shape, dtype=tensor.dtype, trainable=False)
update_op = tf.assign(cached_tensor, tensor)
return cached_tensor, update_op
class IntervalBounds(AbstractBounds):
"""Axis-aligned bounding box."""
def __init__(self, lower, upper):
super(IntervalBounds, self).__init__()
self._lower = lower
self._upper = upper
@property
def lower(self):
return self._lower
@property
def upper(self):
return self._upper
@property
def shape(self):
return self.lower.shape.as_list()
def __iter__(self):
yield self.lower
yield self.upper
@classmethod
def convert(cls, bounds):
if isinstance(bounds, tf.Tensor):
return cls(bounds, bounds)
bounds = bounds.concretize()
if not isinstance(bounds, cls):
raise ValueError('Cannot convert "{}" to "{}"'.format(bounds,
cls.__name__))
return bounds
def apply_linear(self, wrapper, w, b):
return self._affine(w, b, tf.matmul)
def apply_conv1d(self, wrapper, w, b, padding, stride):
return self._affine(w, b, tf.nn.conv1d, padding=padding, stride=stride)
def apply_conv2d(self, wrapper, w, b, padding, strides):
return self._affine(w, b, tf.nn.convolution,
padding=padding, strides=strides)
def _affine(self, w, b, fn, **kwargs):
c = (self.lower + self.upper) / 2.
r = (self.upper - self.lower) / 2.
c = fn(c, w, **kwargs)
if b is not None:
c = c + b
r = fn(r, tf.abs(w), **kwargs)
return IntervalBounds(c - r, c + r)
def apply_increasing_monotonic_fn(self, wrapper, fn, *args, **parameters):
args_lower = [self.lower] + [a.lower for a in args]
args_upper = [self.upper] + [a.upper for a in args]
return IntervalBounds(fn(*args_lower), fn(*args_upper))
def apply_piecewise_monotonic_fn(self, wrapper, fn, boundaries, *args):
valid_values = []
for a in [self] + list(args):
vs = []
vs.append(a.lower)
vs.append(a.upper)
for b in boundaries:
vs.append(
tf.maximum(a.lower, tf.minimum(a.upper, b * tf.ones_like(a.lower))))
valid_values.append(vs)
outputs = []
for inputs in itertools.product(*valid_values):
outputs.append(fn(*inputs))
outputs = tf.stack(outputs, axis=-1)
return IntervalBounds(tf.reduce_min(outputs, axis=-1),
tf.reduce_max(outputs, axis=-1))
def apply_batch_norm(self, wrapper, mean, variance, scale, bias, epsilon):
# Element-wise multiplier.
multiplier = tf.rsqrt(variance + epsilon)
if scale is not None:
multiplier *= scale
w = multiplier
# Element-wise bias.
b = -multiplier * mean
if bias is not None:
b += bias
b = tf.squeeze(b, axis=0)
# Because the scale might be negative, we need to apply a strategy similar
# to linear.
c = (self.lower + self.upper) / 2.
r = (self.upper - self.lower) / 2.
c = tf.multiply(c, w) + b
r = tf.multiply(r, tf.abs(w))
return IntervalBounds(c - r, c + r)
def apply_batch_reshape(self, wrapper, shape):
return IntervalBounds(snt.BatchReshape(shape)(self.lower),
snt.BatchReshape(shape)(self.upper))
def apply_softmax(self, wrapper):
ub = self.upper
lb = self.lower
# Keep diagonal and take opposite bound for non-diagonals.
lbs = tf.matrix_diag(lb) + tf.expand_dims(ub, axis=-2) - tf.matrix_diag(ub)
ubs = tf.matrix_diag(ub) + tf.expand_dims(lb, axis=-2) - tf.matrix_diag(lb)
# Get diagonal entries after softmax operation.
ubs = tf.matrix_diag_part(tf.nn.softmax(ubs))
lbs = tf.matrix_diag_part(tf.nn.softmax(lbs))
return IntervalBounds(lbs, ubs)
def _set_up_cache(self):
self._lower, update_lower_op = self._cache_with_update_op(self._lower)
self._upper, update_upper_op = self._cache_with_update_op(self._upper)
return tf.group([update_lower_op, update_upper_op])
| interval-bound-propagation-master | interval_bound_propagation/src/bounds.py |
# Copyright 2021 the Ithaca Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup module for Ithaca.
Only installs the inference components.
See README.md for more details.
"""
import pathlib
import setuptools
here = pathlib.Path(__file__).parent.resolve()
long_description = (here / 'README.md').read_text(encoding='utf-8')
setuptools.setup(
name='ithaca',
author='Ithaca team',
author_email='[email protected]',
version='0.1.0',
license='Apache License, Version 2.0',
description='Ithaca library for ancient text restoration and attribution.',
long_description=long_description,
long_description_content_type='text/markdown',
packages=setuptools.find_packages(exclude=('train',)),
package_data={'': ['*.txt']},
install_requires=(here / 'requirements.txt').read_text().splitlines(),
extras_require={
'train': [
'optax',
'jaxline==0.0.5',
'tensorflow-datasets',
]
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
| ithaca-main | setup.py |
# Copyright 2021 the Ithaca Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example for running inference. See also colab."""
import functools
import pickle
from absl import app
from absl import flags
from ithaca.eval import inference
from ithaca.models.model import Model
from ithaca.util.alphabet import GreekAlphabet
import jax
FLAGS = flags.FLAGS
flags.DEFINE_string(
'input', '', 'Text to directly pass to the model. Only one of --input and '
'--input_file can be specified.')
flags.DEFINE_string(
'input_file', '', 'File containing text to pass to the model. Only one of '
'--input and --input_file can be specified.')
flags.DEFINE_string('checkpoint_path', 'checkpoint.pkl',
'Path to model checkpoint pickle.')
flags.DEFINE_string('attribute_json', '', 'Path to save attribution JSON to.')
flags.DEFINE_string('restore_json', '', 'Path to save restoration JSON to.')
def load_checkpoint(path):
"""Loads a checkpoint pickle.
Args:
path: path to checkpoint pickle
Returns:
a model config dictionary (arguments to the model's constructor), a dict of
dicts containing region mapping information, a GreekAlphabet instance with
indices and words populated from the checkpoint, a dict of Jax arrays
`params`, and a `forward` function.
"""
# Pickled checkpoint dict containing params and various config:
with open(path, 'rb') as f:
checkpoint = pickle.load(f)
# We reconstruct the model using the same arguments as during training, which
# are saved as a dict in the "model_config" key, and construct a `forward`
# function of the form required by attribute() and restore().
params = jax.device_put(checkpoint['params'])
model = Model(**checkpoint['model_config'])
forward = functools.partial(model.apply, params)
# Contains the mapping between region IDs and names:
region_map = checkpoint['region_map']
# Use vocabulary mapping from the checkpoint, the rest of the values in the
# class are fixed and constant e.g. the padding symbol
alphabet = GreekAlphabet()
alphabet.idx2word = checkpoint['alphabet']['idx2word']
alphabet.word2idx = checkpoint['alphabet']['word2idx']
return checkpoint['model_config'], region_map, alphabet, params, forward
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
if FLAGS.input and not FLAGS.input_file:
input_text = FLAGS.input
elif not FLAGS.input and FLAGS.input_file:
with open(FLAGS.input_file, 'r', encoding='utf8') as f:
input_text = f.read()
else:
raise app.UsageError('Specify exactly one of --input and --input_file.')
if not 50 <= len(input_text) <= 750:
raise app.UsageError(
f'Text should be between 50 and 750 chars long, but the input was '
f'{len(input_text)} characters')
# Load the checkpoint pickle and extract from it the pieces needed for calling
# the attribute() and restore() functions:
(model_config, region_map, alphabet, params,
forward) = load_checkpoint(FLAGS.checkpoint_path)
vocab_char_size = model_config['vocab_char_size']
vocab_word_size = model_config['vocab_word_size']
attribution = inference.attribute(
input_text,
forward=forward,
params=params,
alphabet=alphabet,
region_map=region_map,
vocab_char_size=vocab_char_size,
vocab_word_size=vocab_word_size)
if FLAGS.attribute_json:
with open(FLAGS.attribute_json, 'w') as f:
f.write(attribution.json(indent=2))
else:
print('Attribution:', attribution.json())
restoration = inference.restore(
input_text,
forward=forward,
params=params,
alphabet=alphabet,
vocab_char_size=vocab_char_size,
vocab_word_size=vocab_word_size)
if FLAGS.restore_json:
with open(FLAGS.restore_json, 'w') as f:
f.write(restoration.json(indent=2))
else:
print('Restoration:', restoration.json())
if __name__ == '__main__':
app.run(main)
| ithaca-main | inference_example.py |
# Copyright 2021 the Ithaca Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| ithaca-main | ithaca/__init__.py |
# Copyright 2021 the Ithaca Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Subregion mapping used to train the model.
The subregion IDs originate from the I.PHI generator and may be subject to
change in future versions of the PHI dataset.
"""
def load_region_maps(region_file):
"""Extracts creates a map from PHI region id to a continuous region id."""
region_ids = [] # Used mainly for eval
region_ids_inv = {} # Used in data loader
region_names_inv = {} # Used in eval
for l in region_file.read().strip().split('\n'):
tok_name_id, _ = l.strip().split(';') # second field is frequency, unused
region_name, region_id = tok_name_id.split('_')
region_name = region_name.strip()
region_id = int(region_id)
# Ignore unknown regions:
if ((region_name == 'Unknown Provenances' and region_id == 884) or
(region_name == 'unspecified subregion' and region_id == 885) or
(region_name == 'unspecified subregion' and region_id == 1439)):
continue
region_ids.append(region_id)
region_ids_inv[region_id] = len(region_ids_inv)
region_names_inv[len(region_names_inv)] = region_name
return {
'ids': region_ids,
'ids_inv': region_ids_inv,
'names_inv': region_names_inv
}
| ithaca-main | ithaca/util/region_names.py |
# Copyright 2021 the Ithaca Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| ithaca-main | ithaca/util/__init__.py |
# Copyright 2021 the Ithaca Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loss functions."""
import chex
from flax.deprecated import nn
import jax
import jax.numpy as jnp
def smooth_labels(labels, num_classes, label_smoothing):
if not 0 <= label_smoothing < 1:
raise ValueError(
f"'label_smoothing is {label_smoothing} and should be in [0, 1)")
one = jax.lax.convert_element_type(1, labels.dtype)
label_smoothing = jax.lax.convert_element_type(label_smoothing,
labels.dtype)
num_classes = jax.lax.convert_element_type(num_classes, labels.dtype)
return (one - label_smoothing) * labels + (label_smoothing / num_classes)
def categorical_kl_divergence(p_logits, q_logits, temperature=1.):
"""Compute the KL between two categorical distributions from their logits.
Args:
p_logits: unnormalized logits for the first distribution.
q_logits: unnormalized logits for the second distribution.
temperature: the temperature for the softmax distribution, defaults at 1.
Returns:
the kl divergence between the distributions.
"""
chex.assert_type([p_logits, q_logits], float)
p_logits /= temperature
q_logits /= temperature
p = jax.nn.softmax(p_logits)
log_p = jax.nn.log_softmax(p_logits)
log_q = jax.nn.log_softmax(q_logits)
kl = jnp.sum(p * (log_p - log_q), axis=-1)
return jax.nn.relu(kl) # Guard against numerical issues giving negative KL.
def cross_entropy_label_smoothing_loss(logits,
labels,
mask=None,
label_smoothing=0.1):
"""Cross entropy loss with label smoothing."""
num_classes = logits.shape[-1]
labels_onehot = jax.nn.one_hot(labels, num_classes, dtype=logits.dtype)
if label_smoothing > 0:
labels_onehot = smooth_labels(labels_onehot, num_classes, label_smoothing)
loss = -jnp.sum(labels_onehot * jax.nn.log_softmax(logits), axis=-1)
if mask is not None:
loss = jnp.multiply(loss, mask.astype(logits.dtype))
return loss
@jax.vmap
def cross_entropy_loss(logits, label):
logits = nn.log_softmax(logits)
return -logits[label]
def cross_entropy_mask_loss(logits, label, mask):
nll = -nn.log_softmax(logits)[label]
loss = jnp.multiply(nll, mask.astype(logits.dtype))
return loss
def date_loss_l2(pred,
target_min,
target_max,
mask):
"""L2 loss function for dates."""
pred = jnp.squeeze(pred, 0)
loss = 0.
loss += (pred - target_min)**2 * jnp.less(pred, target_min).astype(
pred.dtype)
loss += (pred - target_max)**2 * jnp.greater(pred, target_max).astype(
pred.dtype)
# Mask loss
loss = jnp.multiply(loss, mask.astype(loss.dtype))
return loss
def date_loss_l1(pred,
target_min,
target_max,
mask):
"""L1 loss function for dates."""
pred = jnp.squeeze(pred, 0)
loss = 0.
loss += jnp.abs(pred - target_min) * jnp.less(pred, target_min).astype(
pred.dtype)
loss += jnp.abs(pred - target_max) * jnp.greater(pred, target_max).astype(
pred.dtype)
# Mask loss
loss = jnp.multiply(loss, mask.astype(loss.dtype))
return loss
| ithaca-main | ithaca/util/loss.py |
# Copyright 2021 the Ithaca Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Text processing functions."""
import random
import re
import unicodedata
import numpy as np
def idx_to_text(idxs, alphabet, strip_sos=True, strip_pad=True):
"""Converts a list of indices to a string."""
idxs = np.array(idxs)
out = ''
for i in range(idxs.size):
idx = idxs[i]
if strip_pad and idx == alphabet.pad_idx:
break
elif strip_sos and idx == alphabet.sos_idx:
pass
else:
out += alphabet.idx2char[idx]
return out
def idx_to_text_batch(idxs, alphabet, lengths=None):
"""Converts batched lists of indices to strings."""
b = []
for i in range(idxs.shape[0]):
idxs_i = idxs[i]
if lengths:
idxs_i = idxs_i[:lengths[i]]
b.append(idx_to_text(idxs_i, alphabet))
return b
def random_mask_span(t, geometric_p=0.2, limit_chars=None):
"""Masks a span of sequential words."""
# Obtain span indexes (indlusive)
span_idx = [(ele.start(), ele.end()) for ele in re.finditer(r'[\w\s]+', t)]
if not span_idx:
return []
# Select a span to mask
span_start, span_end = random.choice(span_idx)
# Sample a random span length using a geomteric distribution
if geometric_p and limit_chars:
span_len = np.clip(
np.random.geometric(geometric_p),
1, min(limit_chars, span_end - span_start))
elif geometric_p:
span_len = np.clip(
np.random.geometric(geometric_p),
1, span_end - span_start)
elif limit_chars:
span_len = min(limit_chars, span_end - span_start)
else:
raise ValueError('geometric_p or limit_chars should be set.')
# Pick a random start index
span_start = np.random.randint(span_start, span_end - span_len + 1)
assert span_start + span_len <= span_end
# Clip to limit chars
if limit_chars is not None and span_len >= limit_chars:
span_len = limit_chars
# Create mask indices
mask_idx = list(range(span_start, span_start + span_len))
return mask_idx
def random_sentence_swap(sentences, p):
"""Swaps sentences with probability p."""
def swap_sentence(s):
idx_1 = random.randint(0, len(s) - 1)
idx_2 = idx_1
counter = 0
while idx_2 == idx_1:
idx_2 = random.randint(0, len(s) - 1)
counter += 1
if counter > 3:
return s
s[idx_1], s[idx_2] = s[idx_2], s[idx_1]
return s
new_sentences = sentences.copy()
n = int(p * len(sentences))
for _ in range(n):
new_sentences = swap_sentence(new_sentences)
return new_sentences
def random_word_delete(sentence, p):
"""Deletes a word from a sentence with probability p."""
words = sentence.split(' ')
# Return if one word.
if len(words) == 1:
return words[0]
# Randomly delete words.
new_words = []
for word in words:
if random.uniform(0, 1) > p:
new_words.append(word)
# If all words are removed return one.
if not new_words:
rand_int = random.randint(0, len(words) - 1)
return words[rand_int]
sentence = ' '.join(new_words)
return sentence
def random_word_swap(sentence, p):
"""Swaps words from a sentence with probability p."""
def swap_word(new_words):
idx_1 = random.randint(0, len(new_words) - 1)
idx_2 = idx_1
counter = 0
while idx_2 == idx_1:
idx_2 = random.randint(0, len(new_words) - 1)
counter += 1
if counter > 3:
return new_words
new_words[idx_1], new_words[idx_2] = new_words[idx_2], new_words[idx_1]
return new_words
words = sentence.split(' ')
new_words = words.copy()
n = int(p * len(words))
for _ in range(n):
new_words = swap_word(new_words)
sentence = ' '.join(new_words)
return sentence
def strip_accents(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def text_to_idx(t, alphabet):
"""Converts a string to character indices."""
return np.array([alphabet.char2idx[c] for c in t], dtype=np.int32)
def text_to_word_idx(t, alphabet):
"""Converts a string to word indices."""
out = np.full(len(t), alphabet.word2idx[alphabet.unk], dtype=np.int32)
for m in re.finditer(r'\w+', t):
if m.group() in alphabet.word2idx:
out[m.start():m.end()] = alphabet.word2idx[m.group()]
return out
| ithaca-main | ithaca/util/text.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.