python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Lite Python Interface: Sanity check."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.lite.python import convert
from tensorflow.lite.python import lite_constants
from tensorflow.lite.python import op_hint
from tensorflow.lite.python.interpreter import Interpreter
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.framework.graph_util_impl import _bfs_for_reachable_nodes
from tensorflow.python.framework.graph_util_impl import _extract_graph_summary
from tensorflow.python.framework.graph_util_impl import _node_name
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
@test_util.run_v1_only("Incompatible with 2.0.")
class ConvertTest(test_util.TensorFlowTestCase):
def testBasic(self):
in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3],
dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Try running on valid graph
tflite_model = convert.toco_convert(sess.graph_def, [in_tensor],
[out_tensor])
self.assertTrue(tflite_model)
# TODO(aselle): remove tests that fail (we must get TOCO to not fatal
# all the time).
# Try running on identity graph (known fail)
# with self.assertRaisesRegexp(RuntimeError, "!model->operators.empty()"):
# result = convert.toco_convert(sess.graph_def, [in_tensor], [in_tensor])
def testQuantization(self):
in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3],
dtype=dtypes.float32)
out_tensor = array_ops.fake_quant_with_min_max_args(in_tensor + in_tensor,
min=0., max=1.)
sess = session.Session()
tflite_model = convert.toco_convert(
sess.graph_def, [in_tensor], [out_tensor],
inference_type=lite_constants.QUANTIZED_UINT8,
quantized_input_stats=[(0., 1.)])
self.assertTrue(tflite_model)
def testQuantizationInvalid(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor + in_tensor, min=0., max=1.)
sess = session.Session()
with self.assertRaises(ValueError) as error:
convert.toco_convert(
sess.graph_def, [in_tensor], [out_tensor],
inference_type=lite_constants.QUANTIZED_UINT8)
self.assertEqual(
"std_dev and mean must be defined when inference_input_type is "
"QUANTIZED_UINT8.", str(error.exception))
def testGraphDefBasic(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name="input")
_ = in_tensor + in_tensor
sess = session.Session()
tflite_model = convert.toco_convert_graph_def(
sess.graph_def, [("input", [1, 16, 16, 3])], ["add"],
inference_type=lite_constants.FLOAT)
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual("input", input_details[0]["name"])
self.assertEqual(np.float32, input_details[0]["dtype"])
self.assertTrue(([1, 16, 16, 3] == input_details[0]["shape"]).all())
self.assertEqual((0., 0.), input_details[0]["quantization"])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual("add", output_details[0]["name"])
self.assertEqual(np.float32, output_details[0]["dtype"])
self.assertTrue(([1, 16, 16, 3] == output_details[0]["shape"]).all())
self.assertEqual((0., 0.), output_details[0]["quantization"])
def testGraphDefQuantization(self):
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputA")
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputB")
_ = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name="output")
sess = session.Session()
input_arrays_map = [("inputA", [1, 16, 16, 3]), ("inputB", [1, 16, 16, 3])]
output_arrays = ["output"]
tflite_model = convert.toco_convert_graph_def(
sess.graph_def,
input_arrays_map,
output_arrays,
inference_type=lite_constants.QUANTIZED_UINT8,
quantized_input_stats=[(0., 1.), (0., 1.)])
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual("inputA", input_details[0]["name"])
self.assertEqual(np.uint8, input_details[0]["dtype"])
self.assertTrue(([1, 16, 16, 3] == input_details[0]["shape"]).all())
self.assertEqual((1., 0.),
input_details[0]["quantization"]) # scale, zero_point
self.assertEqual("inputB", input_details[1]["name"])
self.assertEqual(np.uint8, input_details[1]["dtype"])
self.assertTrue(([1, 16, 16, 3] == input_details[1]["shape"]).all())
self.assertEqual((1., 0.),
input_details[1]["quantization"]) # scale, zero_point
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual("output", output_details[0]["name"])
self.assertEqual(np.uint8, output_details[0]["dtype"])
self.assertTrue(([1, 16, 16, 3] == output_details[0]["shape"]).all())
self.assertTrue(output_details[0]["quantization"][0] > 0) # scale
def testGraphDefQuantizationInvalid(self):
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputA")
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputB")
_ = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name="output")
sess = session.Session()
input_arrays_map = [("inputA", [1, 16, 16, 3]), ("inputB", [1, 16, 16, 3])]
output_arrays = ["output"]
with self.assertRaises(ValueError) as error:
convert.toco_convert_graph_def(
sess.graph_def,
input_arrays_map,
output_arrays,
inference_type=lite_constants.QUANTIZED_UINT8)
self.assertEqual(
"std_dev and mean must be defined when inference_input_type is "
"QUANTIZED_UINT8.", str(error.exception))
@test_util.run_v1_only("Incompatible with 2.0.")
class ConvertTestOpHint(test_util.TensorFlowTestCase):
"""Test the hint to stub functionality."""
def _getGraphOpTypes(self, graphdef, output_nodes):
"""Returns used op types in `graphdef` reachable from `output_nodes`.
This is used to check that after the stub transformation the expected
nodes are there.
NOTE: this is not a exact test that the graph is the correct output, but
it balances compact expressibility of test with sanity checking.
Args:
graphdef: TensorFlow proto graphdef.
output_nodes: A list of output node names that we need to reach.
Returns:
A set of node types reachable from `output_nodes`.
"""
name_to_input_name, name_to_node, _ = (
_extract_graph_summary(graphdef))
# Find all nodes that are needed by the outputs
used_node_names = _bfs_for_reachable_nodes(output_nodes, name_to_input_name)
return set([name_to_node[node_name].op for node_name in used_node_names])
def _countIdentities(self, nodes):
"""Count the number of "Identity" op types in the list of proto nodes.
Args:
nodes: NodeDefs of the graph.
Returns:
The number of nodes with op type "Identity" found.
"""
return len([x for x in nodes if x.op == "Identity"])
def testSwishLiteHint(self):
"""Makes a custom op swish and makes sure it gets converted as a unit."""
image = array_ops.constant([1., 2., 3., 4.])
swish_scale = array_ops.constant(1.0)
def _swish(input_tensor, scale):
custom = op_hint.OpHint("cool_activation")
input_tensor, scale = custom.add_inputs(input_tensor, scale)
output = math_ops.sigmoid(input_tensor) * input_tensor * scale
output, = custom.add_outputs(output)
return output
output = array_ops.identity(_swish(image, swish_scale), name="ModelOutput")
with self.cached_session() as sess:
# check if identities have been put into the graph (2 input, 1 output,
# and 1 final output).
self.assertEqual(self._countIdentities(sess.graph_def.node), 4)
stubbed_graphdef = op_hint.convert_op_hints_to_stubs(
graph_def=sess.graph_def)
self.assertEqual(
self._getGraphOpTypes(
stubbed_graphdef,
output_nodes=[op_hint._tensor_name_base(output.name)]),
set(["cool_activation", "Const", "Identity"]))
def testScaleAndBiasAndIdentity(self):
"""This tests a scaled add which has 3 inputs and 2 outputs."""
a = array_ops.constant(1.)
x = array_ops.constant([2., 3.])
b = array_ops.constant([4., 5.])
def _scaled_and_bias_and_identity(a, x, b):
custom = op_hint.OpHint("scale_and_bias_and_identity")
a, x, b = custom.add_inputs(a, x, b)
return custom.add_outputs(a * x + b, x)
output = array_ops.identity(_scaled_and_bias_and_identity(a, x, b),
name="ModelOutput")
with self.cached_session() as sess:
# make sure one identity for each input (3) and output (2) => 3 + 2 = 5
# +1 for the final output
self.assertEqual(self._countIdentities(sess.graph_def.node), 6)
stubbed_graphdef = op_hint.convert_op_hints_to_stubs(
graph_def=sess.graph_def)
self.assertEqual(
self._getGraphOpTypes(
stubbed_graphdef,
output_nodes=[op_hint._tensor_name_base(output.name)]),
set(["scale_and_bias_and_identity", "Const", "Identity", "Pack"]))
def testTwoFunctions(self):
"""Tests if two functions are converted correctly."""
a = array_ops.constant([1.])
b = array_ops.constant([1.])
def _double_values(x):
custom = op_hint.OpHint("add_test")
x, = custom.add_inputs(x)
output = math_ops.multiply(x, x)
output, = custom.add_outputs(output)
return output
output = array_ops.identity(
math_ops.add(_double_values(a), _double_values(b)), name="ModelOutput")
with self.cached_session() as sess:
# make sure one identity for each input (2) and output (2) => 2 + 2
# +1 for the final output
self.assertEqual(self._countIdentities(sess.graph_def.node), 5)
stubbed_graphdef = op_hint.convert_op_hints_to_stubs(
graph_def=sess.graph_def)
self.assertEqual(
self._getGraphOpTypes(
stubbed_graphdef,
output_nodes=[op_hint._tensor_name_base(output.name)]),
set(["add_test", "Const", "Identity", "Add"]))
def _get_input_index(self, x):
return x.op.node_def.attr[op_hint.OpHint.FUNCTION_INPUT_INDEX_ATTR].i
def _get_output_index(self, x):
return x.op.node_def.attr[op_hint.OpHint.FUNCTION_OUTPUT_INDEX_ATTR].i
def _get_sort_index(self, x):
return x.op.node_def.attr[op_hint.OpHint.FUNCTION_SORT_INDEX_ATTR].i
def testTags(self):
"""Test if multiple args with the same tag are grouped."""
a = array_ops.constant([1.])
b = array_ops.constant([2.])
c = array_ops.constant([3.])
d = array_ops.constant([4.])
custom = op_hint.OpHint("test_tag")
a = custom.add_input(a, tag="mytag",
aggregate=op_hint.OpHint.AGGREGATE_STACK)
b, = custom.add_inputs(b)
c = custom.add_input(c, tag="mytag",
aggregate=op_hint.OpHint.AGGREGATE_STACK)
d = custom.add_input(d, tag="mytag2",
aggregate=op_hint.OpHint.AGGREGATE_STACK)
res = math_ops.add(math_ops.mul(a, b), math_ops.mul(c, b))
custom.add_outputs([res])
with self.cached_session():
self.assertEqual(self._get_input_index(a), 0)
self.assertEqual(self._get_sort_index(a), 0)
self.assertEqual(self._get_input_index(b), 1)
self.assertEqual(self._get_sort_index(b), 0)
self.assertEqual(self._get_input_index(c), 0)
self.assertEqual(self._get_sort_index(c), 1)
def testOverrideIndex(self):
a = array_ops.constant([1.])
b = array_ops.constant([2.])
c = array_ops.constant([3.])
custom = op_hint.OpHint("test_override")
b = custom.add_input(b) # should auto assign 0
a = custom.add_input(a, index_override=1)
c = custom.add_input(c) # should auto assign 2
with self.cached_session():
self.assertEqual(self._get_input_index(a), 1)
self.assertEqual(self._get_input_index(b), 0)
self.assertEqual(self._get_input_index(c), 2)
def testAggregate(self):
a = array_ops.constant([3., 4.])
b = array_ops.constant([5., 6.])
hint = op_hint.OpHint("agg")
a0, a1 = array_ops.unstack(a)
b0, b1 = array_ops.unstack(b)
a0 = hint.add_input(a0, tag="c", aggregate=op_hint.OpHint.AGGREGATE_STACK)
b0 = hint.add_input(b0, tag="n", aggregate=op_hint.OpHint.AGGREGATE_STACK)
a1 = hint.add_input(a1, tag="c", aggregate=op_hint.OpHint.AGGREGATE_STACK)
b1 = hint.add_input(b1, tag="n", aggregate=op_hint.OpHint.AGGREGATE_STACK)
c0 = math_ops.add(a0, b0, name="addleft")
c1 = math_ops.add(a1, b1, name="addright")
c0 = hint.add_output(
c0, tag="out", aggregate=op_hint.OpHint.AGGREGATE_STACK)
c1 = hint.add_output(
c1, tag="out", aggregate=op_hint.OpHint.AGGREGATE_STACK)
curr = array_ops.stack([c0, c1])
output = array_ops.identity(curr, name="FINAL_OUTPUT")
with self.cached_session() as sess:
stubbed_graphdef = op_hint.convert_op_hints_to_stubs(
graph_def=sess.graph_def)
self.assertEqual(
self._getGraphOpTypes(
stubbed_graphdef,
output_nodes=[op_hint._tensor_name_base(output.name)]),
set(["agg", "Const", "Identity"]))
def testFindHintedOutputNodes(self):
"""Test if all hinted output nodes are correctly found."""
def _build_ophinted_op(name, input1, input2):
custom_op = op_hint.OpHint(name)
input1 = custom_op.add_input(input1)
input2 = custom_op.add_input(input2)
output = math_ops.mul(input1, input2)
return custom_op.add_output(output)
output_1 = _build_ophinted_op("custom_op_1", array_ops.constant([1.]),
array_ops.constant([2.]))
output_2 = _build_ophinted_op("custom_op_2", array_ops.constant([3.]),
array_ops.constant([4.]))
with self.cached_session() as sess:
hinted_outputs_nodes = op_hint.find_all_hinted_output_nodes(sess)
expected_hinted_output_nodes = [
_node_name(output_1.name),
_node_name(output_2.name)
]
self.assertEqual(
len(hinted_outputs_nodes), len(expected_hinted_output_nodes))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/lite/python/convert_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Replaces a subgraph of a TensorFlow GraphDef with a single node.
In conjunction with TOCO's --allow_custom_op this script allows selected
portions of a TensorFlow GraphDef to be executed by custom code.
Example:
bazel run tensorflow/lite/python:create_custom_op -- \
--input_graph=/tmp/input.pb \
--output_graph=/tmp/output.pb \
--inputs=concat,concat_1 \
--outputs=detection_classes \
--op_definition='op:"PostProcessing" attr{key:"num" value:{i:10}}'
The above will identify a subgraph starting at nodes 'concat' and 'concat_1',
and ending at 'detection_classes'. All nodes in between will be removed and
replaced by a new op called 'PostProcessing'.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import uuid as _uuid
from absl import app
from absl import flags
from google.protobuf import text_format
from tensorflow.contrib.framework.python.framework.graph_util import fuse_op
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.platform import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string("input_graph", "", "Binary graphdef to load.")
flags.DEFINE_string("output_graph", "", "Resulting binary graphdef.")
flags.DEFINE_string("inputs", "",
"Comma-separated list of inputs to the subgraph.")
flags.DEFINE_string("outputs", "",
"Comma-separated list of outputs of the subgraph.")
flags.DEFINE_string("op_definition", "",
"A text NodeDef defining the contents of the custom op.")
def _read_graph_def(filename):
if not gfile.Exists(filename):
raise ValueError("Input graph file '" + filename + "' does not exist!")
graph_def = graph_pb2.GraphDef()
with gfile.GFile(filename, "rb") as f:
graph_def.ParseFromString(f.read())
return graph_def
def _write_graph_def(graph_def, filename):
if not filename:
raise ValueError("Output graph file not specified")
with gfile.Open(filename, "wb") as f:
f.write(graph_def.SerializeToString())
def _collapse_subgraph(graph_def, inputs, outputs, op_definition):
"""Substitute a custom op for the subgraph delimited by inputs and outputs."""
name = _uuid.uuid1().hex
# We need a default type, but it can be changed using 'op_definition'.
default_type = types_pb2.DT_FLOAT
new_graph = fuse_op(
graph_def=graph_def,
input_nodes=inputs,
output_nodes=outputs,
output_dtypes=[default_type for _ in outputs],
output_quantized=False,
op_name=name,
op_type="CustomTfLiteOp")
node_def = node_def_pb2.NodeDef()
text_format.Parse(op_definition, node_def)
for node in new_graph.node:
if node.name == name:
node.MergeFrom(node_def)
return new_graph
def main(argv):
del argv # unused
graph = _read_graph_def(filename=flags.FLAGS.input_graph)
graph = _collapse_subgraph(
graph_def=graph,
inputs=flags.FLAGS.inputs.split(","),
outputs=flags.FLAGS.outputs.split(","),
op_definition=flags.FLAGS.op_definition)
_write_graph_def(graph_def=graph, filename=flags.FLAGS.output_graph)
if __name__ == "__main__":
app.run(main)
|
tensorflow-master
|
tensorflow/lite/python/create_custom_op.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Constants for TFLite."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.lite.toco import toco_flags_pb2 as _toco_flags_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.util.all_util import remove_undocumented
from tensorflow.python.util.tf_export import tf_export as _tf_export
FLOAT = dtypes.float32
INT32 = dtypes.int32
INT64 = dtypes.int64
STRING = dtypes.string
QUANTIZED_UINT8 = dtypes.uint8
INT8 = dtypes.int8
COMPLEX64 = dtypes.complex64
TENSORFLOW_GRAPHDEF = _toco_flags_pb2.TENSORFLOW_GRAPHDEF
TFLITE = _toco_flags_pb2.TFLITE
GRAPHVIZ_DOT = _toco_flags_pb2.GRAPHVIZ_DOT
_tf_export(v1=["lite.constants.FLOAT"]).export_constant(__name__, "FLOAT")
_tf_export(v1=["lite.constants.INT32"]).export_constant(__name__, "INT32")
_tf_export(v1=["lite.constants.INT64"]).export_constant(__name__, "INT64")
_tf_export(v1=["lite.constants.STRING"]).export_constant(__name__, "STRING")
_tf_export(v1=["lite.constants.QUANTIZED_UINT8"]).export_constant(
__name__, "QUANTIZED_UINT8")
_tf_export(v1=["lite.constants.INT8"]).export_constant(__name__, "INT8")
_tf_export(v1=["lite.constants.TFLITE"]).export_constant(__name__, "TFLITE")
_tf_export(v1=["lite.constants.GRAPHVIZ_DOT"]).export_constant(
__name__, "GRAPHVIZ_DOT")
# Currently the default mode of operation is to shell to another python process
# to protect against crashes. However, it breaks some dependent targets because
# it forces us to depend on an external py_binary. The experimental API doesn't
# have that drawback.
EXPERIMENTAL_USE_TOCO_API_DIRECTLY = False
_allowed_symbols = [
"FLOAT",
"INT32",
"INT64",
"STRING",
"QUANTIZED_UINT8",
"INT8",
"COMPLEX64",
"TENSORFLOW_GRAPHDEF",
"TFLITE",
"GRAPHVIZ_DOT",
"EXPERIMENTAL_USE_TOCO_API_DIRECTLY",
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/lite/python/lite_constants.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for post training quantization with calibration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.util.lazy_loader import LazyLoader
# Lazy load since some of the performance benchmark skylark rules
# break dependencies. Must use double quotes to match code internal rewrite
# rule.
_calibration_wrapper = LazyLoader(
"_calibration_wrapper", globals(),
"tensorflow.lite.python.optimize."
"tensorflow_lite_wrap_calibration_wrapper")
class Calibrator(object):
"""Calibrates a floating point model and then quantizes it.
This is an internal class, not a public interface.
"""
def __init__(self, model_content):
"""Constructor.
Args:
model_content: Content of a TF-Lite Flatbuffer file.
Raises:
ValueError: If the calibrator was unable to open the model.
"""
if not model_content:
raise ValueError("`model_content` must be specified.")
try:
self._calibrator = (_calibration_wrapper.CalibrationWrapper
.CreateWrapperCPPFromBuffer(model_content))
except Exception as e:
raise ValueError("Failed to parse the model: %s." % e)
if not self._calibrator:
raise ValueError("Failed to parse the model.")
def calibrate_and_quantize(self, dataset_gen, input_type, output_type,
allow_float):
"""Calibrates the model with specified generator and then quantizes it.
Returns:
A quantized model.
Args:
dataset_gen: A generator that generates calibration samples.
input_type: A tf.dtype representing the desired real-value input type.
output_type: A tf.dtype representing the desired real-value output type.
allow_float: A boolean. False if the resulting model cannot perform float
computation, useful when targeting an integer-only backend.
If False, an error will be thrown if an operation cannot be
quantized, otherwise the model will fallback to float ops.
"""
self._calibrator.Prepare()
for calibration_sample in dataset_gen():
self._calibrator.FeedTensor(calibration_sample)
return self._calibrator.QuantizeModel(
np.dtype(input_type.as_numpy_dtype()).num,
np.dtype(output_type.as_numpy_dtype()).num, allow_float)
|
tensorflow-master
|
tensorflow/lite/python/optimize/calibrator.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Calibrator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.lite.python import lite_constants as constants
from tensorflow.lite.python.optimize import calibrator as _calibrator
from tensorflow.python.framework import test_util
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
class CalibratorTest(test_util.TensorFlowTestCase):
def test_calibration_with_quantization(self):
model_path = resource_loader.get_path_to_datafile(
'test_data/mobilenet_like_model.bin')
float_model = open(model_path, 'rb').read()
quantizer = _calibrator.Calibrator(float_model)
# Input generator for the model.
def input_gen():
for _ in range(10):
yield [np.ones(shape=(1, 5, 5, 3), dtype=np.float32)]
quantized_model = quantizer.calibrate_and_quantize(input_gen,
constants.FLOAT,
constants.FLOAT, False)
self.assertIsNotNone(quantized_model)
def test_calibration_with_quantization_allow_float(self):
model_path = resource_loader.get_path_to_datafile(
'test_data/mobilenet_like_model.bin')
float_model = open(model_path, 'rb').read()
quantizer = _calibrator.Calibrator(float_model)
# Input generator for the model.
def input_gen():
for _ in range(10):
yield [np.ones(shape=(1, 5, 5, 3), dtype=np.float32)]
quantized_model = quantizer.calibrate_and_quantize(input_gen,
constants.FLOAT,
constants.FLOAT, True)
self.assertIsNotNone(quantized_model)
def test_calibration_with_quantization_multiple_inputs(self):
# Load multi add model from test data.
# This model has 4 inputs of size (1, 8, 8, 3).
model_path = resource_loader.get_path_to_datafile(
'../../testdata/multi_add.bin')
float_model = open(model_path, 'rb').read()
quantizer = _calibrator.Calibrator(float_model)
# Input generator for the model.
def input_gen():
for _ in range(10):
yield [np.ones(shape=(1, 8, 8, 3), dtype=np.float32) for _ in range(4)]
quantized_model = quantizer.calibrate_and_quantize(input_gen,
constants.FLOAT,
constants.FLOAT, False)
self.assertIsNotNone(quantized_model)
def test_invalid_model_buffer(self):
float_model = b'\0' * 100
with self.assertRaisesWithRegexpMatch(ValueError,
'Failed to parse the model'):
_calibrator.Calibrator(float_model)
def test_empty_calibrator_gen(self):
model_path = resource_loader.get_path_to_datafile(
'test_data/mobilenet_like_model.bin')
float_model = open(model_path, 'rb').read()
quantizer = _calibrator.Calibrator(float_model)
def empty_input_gen():
for i in ():
yield i
with self.assertRaises(RuntimeError):
quantizer.calibrate_and_quantize(empty_input_gen, constants.FLOAT,
constants.FLOAT, False)
def test_invalid_shape_calibrator_gen(self):
model_path = resource_loader.get_path_to_datafile(
'test_data/mobilenet_like_model.bin')
float_model = open(model_path, 'rb').read()
quantizer = _calibrator.Calibrator(float_model)
# Input generator with incorrect shape.
def input_gen():
for _ in range(10):
yield [np.ones(shape=(1, 2, 2, 3), dtype=np.float32)]
with self.assertRaisesWithRegexpMatch(ValueError, 'Dimension mismatch'):
quantizer.calibrate_and_quantize(input_gen, constants.FLOAT,
constants.FLOAT, False)
def test_invalid_type_calibrator_gen(self):
model_path = resource_loader.get_path_to_datafile(
'test_data/mobilenet_like_model.bin')
float_model = open(model_path, 'rb').read()
quantizer = _calibrator.Calibrator(float_model)
# Input generator with incorrect shape.
def input_gen():
for _ in range(10):
yield np.ones(shape=(1, 5, 5, 3), dtype=np.int32)
with self.assertRaises(ValueError):
quantizer.calibrate_and_quantize(input_gen, constants.FLOAT,
constants.FLOAT, False)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/lite/python/optimize/calibrator_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Make HTML tables that report where TF and TOCO failed to convert models.
This is primarily used by generate_examples.py. See it or
`make_report_table` for more details on usage.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cgi
import json
FAILED = "FAILED"
SUCCESS = "SUCCESS"
NOTRUN = "NOTRUN"
def make_report_table(fp, title, reports):
"""Make an HTML report of the success/failure reports.
Args:
fp: File-like object in which to put the html.
title: "Title of the zip file this pertains to."
reports: a list of conversion attempts. (report_args, report_vals) i.e.
({"shape": [1,2,3], "type": "tf.float32"},
{"tf": "SUCCESS", "toco": "FAILURE", "toco_log": "Unsupported type.",
"tf_log": ""})
"""
# sort reports by if TOCO failure and then TF failure (reversed)
reports.sort(key=lambda x: x[1]["toco"], reverse=False)
reports.sort(key=lambda x: x[1]["tf"], reverse=True)
def result_cell(x, row, col):
"""Produce a cell with the condition string `x`."""
s = cgi.escape(repr(x), quote=True)
color = "#44ff44" if x == SUCCESS else (
"#ff4444" if x == FAILED else "#eeeeee")
handler = "ShowLog(%d, %d)" % (row, col)
fp.write("<td style='background-color: %s' onclick='%s'>%s</td>\n" % (
color, handler, s))
fp.write("""<html>
<head>
<title>tflite report</title>
<style>
body { font-family: Arial; }
th { background-color: #555555; color: #eeeeee; }
td { vertical-align: top; }
td.horiz {width: 50%;}
pre { white-space: pre-wrap; word-break: keep-all; }
table {width: 100%;}
</style>
</head>
""")
# Write the log data to a javascript variable and also make a function
# in javascript to show the log when an item is clicked.
fp.write("<script> \n")
fp.write("""
function ShowLog(row, col) {
var log = document.getElementById("log");
log.innerHTML = "<pre>" + data[row][col] + "</pre>";
}
""")
fp.write("var data = \n")
fp.write(json.dumps([[cgi.escape(x[1]["tf_log"], quote=True),
cgi.escape(x[1]["toco_log"], quote=True)]
for x in reports]))
fp.write(";</script>\n")
# Write the main table and use onclick on the items that have log items.
fp.write("""
<body>
<h1>TOCO Conversion</h1>
<h2>%s</h2>
""" % title)
# Get a list of keys that are in any of the records.
param_keys = {}
for params, _ in reports:
for k in params.keys():
param_keys[k] = True
fp.write("<table>\n")
fp.write("<tr><td class='horiz'>\n")
fp.write("<div style='height:1000px; overflow:auto'>\n")
fp.write("<table>\n")
fp.write("<tr>\n")
for p in param_keys:
fp.write("<th>%s</th>\n" % cgi.escape(p, quote=True))
fp.write("<th>TensorFlow</th>\n")
fp.write("<th>TOCO</th>\n")
fp.write("</tr>\n")
for idx, (params, vals) in enumerate(reports):
fp.write("<tr>\n")
for p in param_keys:
fp.write(" <td>%s</td>\n" % cgi.escape(repr(params[p]), quote=True))
result_cell(vals["tf"], idx, 0)
result_cell(vals["toco"], idx, 1)
fp.write("</tr>\n")
fp.write("</table>\n")
fp.write("</div>\n")
fp.write("</td>\n")
fp.write("<td class='horiz' id='log'></td></tr>\n")
fp.write("</table>\n")
fp.write("<script>\n")
fp.write("</script>\n")
fp.write("""
</body>
</html>
""")
|
tensorflow-master
|
tensorflow/lite/testing/generate_examples_report.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate a series of TensorFlow graphs that become tflite test cases.
Usage:
generate_examples <output directory>
bazel run //tensorflow/lite/testing:generate_examples
To more easily debug failures use (or override) the --save_graphdefs flag to
place text proto graphdefs into the generated zip files.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import operator
import os
import random
import re
import string
import tempfile
import traceback
import zipfile
import numpy as np
from six import StringIO
from six.moves import xrange
# TODO(aselle): Disable GPU for now
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# pylint: disable=g-import-not-at-top
import tensorflow as tf
from google.protobuf import text_format
# TODO(aselle): switch to TensorFlow's resource_loader
from tensorflow.lite.testing import generate_examples_report as report_lib
from tensorflow.lite.testing import string_util_wrapper
from tensorflow.python.framework import test_util
from tensorflow.python.framework import graph_util as tf_graph_util
from tensorflow.python.ops import rnn
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import spectral_ops_test_util
RANDOM_SEED = 342
TEST_INPUT_DEPTH = 3
# A map from regular expression to bug number. Any test failure with label
# matching the expression will be considered due to the corresponding bug.
KNOWN_BUGS = {
# TOCO doesn't support scalars as input.
# Concat doesn't work with a single input tensor
r"concat.*num_tensors=1": "67378344",
# Transposition in MatMul is not fully supported.
"fully_connected.*transpose_a=True": "67586970",
# Softmax graphs are too complex.
r"softmax.*dim=0": "67749831",
# BatchToSpaceND only supports 4D tensors.
r"batch_to_space_nd.*input_shape=\[8,2,2,2,1,1\]": "70594733",
# Div will use floordiv.
r"div.*int32": "72051395",
}
class Options(object):
"""All options for example generation."""
def __init__(self):
# Directory where the outputs will be go.
self.output_path = None
# Particular zip to output.
self.zip_to_output = None
# Path to toco tool.
self.toco = None
# If a particular model is affected by a known bug count it as a Toco
# error.
self.known_bugs_are_errors = False
# Raise an exception if any converter error is encountered.
self.ignore_converter_errors = False
# Include intermediate graphdefs in the output zip files.
self.save_graphdefs = False
# Whether the TFLite Flex converter is being used.
self.run_with_flex = False
# The function to convert a TensorFLow model to TFLite model.
# See the document for `toco_convert` function for its required signature.
# TODO(ycling): Decouple `toco_convert` function from this module, and
# remove the `toco` attribute in this class.
self.tflite_convert_function = toco_convert
# A map from regular expression to bug number. Any test failure with label
# matching the expression will be considered due to the corresponding bug.
self.known_bugs = KNOWN_BUGS
# A map from names to functions which make test cases.
_MAKE_TEST_FUNCTIONS_MAP = {}
# A decorator to register the make test functions.
# Usage:
# All the make_*_test should be registered. Example:
# @register_make_test_function()
# def make_conv_tests(options):
# # ...
# If a function is decorated by other decorators, it's required to specify the
# name explicitly. Example:
# @register_make_test_function(name="make_unidirectional_sequence_lstm_tests")
# @test_util.enable_control_flow_v2
# def make_unidirectional_sequence_lstm_tests(options):
# # ...
def register_make_test_function(name=None):
def decorate(function, name=name):
if name is None:
name = function.__name__
_MAKE_TEST_FUNCTIONS_MAP[name] = function
return decorate
class ExtraTocoOptions(object):
"""Additional toco options besides input, output, shape."""
def __init__(self):
# Whether to ignore control dependency nodes.
self.drop_control_dependency = False
# Allow custom ops in the toco conversion.
self.allow_custom_ops = False
# Rnn states that are used to support rnn / lstm cells.
self.rnn_states = None
# Split the LSTM inputs from 5 inoputs to 18 inputs for TFLite.
self.split_tflite_lstm_inputs = None
def toco_options(data_types,
input_arrays,
output_arrays,
shapes,
extra_toco_options=ExtraTocoOptions()):
"""Create TOCO options to process a model.
Args:
data_types: input and inference types used by TOCO.
input_arrays: names of the input tensors
output_arrays: name of the output tensors
shapes: shapes of the input tensors
extra_toco_options: additional toco options
Returns:
the options in a string.
"""
shape_str = ":".join([",".join(str(y) for y in x) for x in shapes if x])
inference_type = "FLOAT"
# TODO(ahentz): if we get multi-input quantization to work we need this
# to change
if data_types[0] == "QUANTIZED_UINT8":
inference_type = "QUANTIZED_UINT8"
s = (" --input_data_types=%s" % ",".join(data_types) +
" --inference_type=%s" % inference_type +
" --input_format=TENSORFLOW_GRAPHDEF" + " --output_format=TFLITE" +
" --input_arrays=%s" % ",".join(input_arrays) +
" --output_arrays=%s" % ",".join(output_arrays))
if shape_str:
s += (" --input_shapes=%s" % shape_str)
if extra_toco_options.drop_control_dependency:
s += " --drop_control_dependency"
if extra_toco_options.allow_custom_ops:
s += " --allow_custom_ops"
if extra_toco_options.rnn_states:
s += (" --rnn_states='" + extra_toco_options.rnn_states + "'")
if extra_toco_options.split_tflite_lstm_inputs is not None:
if extra_toco_options.split_tflite_lstm_inputs:
s += " --split_tflite_lstm_inputs=true"
else:
s += " --split_tflite_lstm_inputs=false"
return s
def format_result(t):
"""Convert a tensor to a format that can be used in test specs."""
if t.dtype.kind not in [np.dtype(np.string_).kind, np.dtype(np.object_).kind]:
# Output 9 digits after the point to ensure the precision is good enough.
values = ["{:.9f}".format(value) for value in list(t.flatten())]
return ",".join(values)
else:
return string_util_wrapper.SerializeAsHexString(t.flatten())
def write_examples(fp, examples):
"""Given a list `examples`, write a text format representation.
The file format is csv like with a simple repeated pattern. We would ike
to use proto here, but we can't yet due to interfacing with the Android
team using this format.
Args:
fp: File-like object to write to.
examples: Example dictionary consiting of keys "inputs" and "outputs"
"""
def write_tensor(fp, x):
"""Write tensor in file format supported by TFLITE example."""
fp.write("dtype,%s\n" % x.dtype)
fp.write("shape," + ",".join(map(str, x.shape)) + "\n")
fp.write("values," + format_result(x) + "\n")
fp.write("test_cases,%d\n" % len(examples))
for example in examples:
fp.write("inputs,%d\n" % len(example["inputs"]))
for i in example["inputs"]:
write_tensor(fp, i)
fp.write("outputs,%d\n" % len(example["outputs"]))
for i in example["outputs"]:
write_tensor(fp, i)
def write_test_cases(fp, model_name, examples):
"""Given a dictionary of `examples`, write a text format representation.
The file format is protocol-buffer-like, even though we don't use proto due
to the needs of the Android team.
Args:
fp: File-like object to write to.
model_name: Filename where the model was written to, relative to filename.
examples: Example dictionary consiting of keys "inputs" and "outputs"
"""
fp.write("load_model: %s\n" % os.path.basename(model_name))
for example in examples:
fp.write("reshape {\n")
for t in example["inputs"]:
fp.write(" input: \"" + ",".join(map(str, t.shape)) + "\"\n")
fp.write("}\n")
fp.write("invoke {\n")
for t in example["inputs"]:
fp.write(" input: \"" + format_result(t) + "\"\n")
for t in example["outputs"]:
fp.write(" output: \"" + format_result(t) + "\"\n")
fp.write(" output_shape: \"" + ",".join([str(dim) for dim in t.shape]) +
"\"\n")
fp.write("}\n")
_TF_TYPE_INFO = {
tf.float32: (np.float32, "FLOAT"),
tf.float16: (np.float16, "FLOAT"),
tf.int32: (np.int32, "INT32"),
tf.uint8: (np.uint8, "QUANTIZED_UINT8"),
tf.int16: (np.int16, "QUANTIZED_INT16"),
tf.int64: (np.int64, "INT64"),
tf.bool: (np.bool, "BOOL"),
tf.string: (np.string_, "STRING"),
}
def create_tensor_data(dtype, shape, min_value=-100, max_value=100):
"""Build tensor data spreading the range [min_value, max_value)."""
if dtype in _TF_TYPE_INFO:
dtype = _TF_TYPE_INFO[dtype][0]
if dtype in (tf.float32, tf.float16):
value = (max_value-min_value)*np.random.random_sample(shape)+min_value
elif dtype in (tf.int32, tf.uint8, tf.int64, tf.int16):
value = np.random.randint(min_value, max_value+1, shape)
elif dtype == tf.bool:
value = np.random.choice([True, False], size=shape)
elif dtype == np.string_:
# Not the best strings, but they will do for some basic testing.
letters = list(string.ascii_uppercase)
return np.random.choice(letters, size=shape).astype(dtype)
return np.dtype(dtype).type(value) if np.isscalar(value) else value.astype(
dtype)
def create_scalar_data(dtype, min_value=-100, max_value=100):
"""Build scalar tensor data range from min_value to max_value exclusively."""
if dtype in _TF_TYPE_INFO:
dtype = _TF_TYPE_INFO[dtype][0]
if dtype in (tf.float32, tf.float16):
value = (max_value - min_value) * np.random.random() + min_value
elif dtype in (tf.int32, tf.uint8, tf.int64, tf.int16):
value = np.random.randint(min_value, max_value + 1)
return np.array(value, dtype=dtype)
def freeze_graph(session, outputs):
"""Freeze the current graph.
Args:
session: Tensorflow sessions containing the graph
outputs: List of output tensors
Returns:
The frozen graph_def.
"""
return tf_graph_util.convert_variables_to_constants(
session, session.graph.as_graph_def(), [x.op.name for x in outputs])
@register_make_test_function()
def make_control_dep_tests(options):
"""Make a set of tests that use control dependencies."""
test_parameters = [{
"input_shape": [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
filter_value = tf.zeros((3, 3, TEST_INPUT_DEPTH, 8), tf.float32)
assert_op = tf.assert_greater_equal(input_tensor, input_tensor - 1)
with tf.control_dependencies([assert_op]):
out = tf.nn.conv2d(input_tensor, filter_value,
strides=(1, 1, 1, 1), padding="SAME")
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(tf.float32, parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
extra_toco_options = ExtraTocoOptions()
extra_toco_options.drop_control_dependency = True
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
extra_toco_options,
expected_tf_failures=3)
def toco_convert(options, graph_def, input_tensors, output_tensors, **kwargs):
"""Convert a model's graph def into a tflite model.
NOTE: this currently shells out to the toco binary, but we would like
convert to Python API tooling in the future.
Args:
options: An Options instance.
graph_def: A GraphDef object.
input_tensors: List of input tensor tuples `(name, shape, type)`.
output_tensors: List of output tensors (names).
**kwargs: Extra options to be passed.
Returns:
output tflite model, log_txt from conversion
or None, log_txt if it did not convert properly.
"""
# Convert ophint ops if presented.
graph_def = tf.lite.experimental.convert_op_hints_to_stubs(
graph_def=graph_def)
graph_def_str = graph_def.SerializeToString()
extra_toco_options = kwargs.get("extra_toco_options", ExtraTocoOptions())
test_params = kwargs.get("test_params", {})
input_arrays = [x[0] for x in input_tensors]
data_types = [_TF_TYPE_INFO[x[2]][1] for x in input_tensors]
if test_params.get("fully_quantize", False):
with tempfile.NamedTemporaryFile() as graphdef_file:
graphdef_file.write(graph_def_str)
graphdef_file.flush()
input_shapes = get_input_shapes_map(input_tensors)
converter = tf.lite.TocoConverter.from_frozen_graph(
graphdef_file.name, input_arrays, output_tensors, input_shapes)
def representative_dataset(input_tensors):
calibration_inputs = []
for _, shape, _ in input_tensors:
if shape:
dims = [dim.value for dim in shape.dims]
calibration_inputs.append(
np.random.uniform(-1, 1, tuple(dims)).astype(np.float32))
return calibration_inputs
def representative_dataset_gen():
for _ in range(100):
yield representative_dataset(input_tensors)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS_INT8
]
converter.representative_dataset = representative_dataset_gen
try:
tflite_model = converter.convert()
return tflite_model, ""
except Exception as e:
log = "{0}\n{1}".format(str(e), traceback.format_exc())
return None, log
else:
opts = toco_options(
data_types=data_types,
input_arrays=input_arrays,
shapes=[x[1] for x in input_tensors],
output_arrays=output_tensors,
extra_toco_options=extra_toco_options)
with tempfile.NamedTemporaryFile() as graphdef_file, \
tempfile.NamedTemporaryFile() as output_file, \
tempfile.NamedTemporaryFile("w+") as stdout_file:
graphdef_file.write(graph_def_str)
graphdef_file.flush()
# TODO(aselle): Switch this to subprocess at some point.
if "pb2lite" in bin_path and options.run_with_flex:
opts = ("--input_arrays={0} --output_arrays={1}".format(
",".join(input_arrays), ",".join(output_tensors)))
elif options.run_with_flex:
opts += " --enable_select_tf_ops --force_select_tf_ops"
cmd = ("%s --input_file=%s --output_file=%s %s > %s 2>&1" %
(bin_path, graphdef_file.name, output_file.name, opts,
stdout_file.name))
exit_code = os.system(cmd)
log = (
cmd + "exited with code %d" % exit_code + "\n------------------\n" +
stdout_file.read())
return (None if exit_code != 0 else output_file.read()), log
def get_input_shapes_map(input_tensors):
"""Gets a map of input names to shapes.
Args:
input_tensors: List of input tensor tuples `(name, shape, type)`.
Returns:
{string : list of integers}.
"""
input_arrays = [tensor[0] for tensor in input_tensors]
input_shapes_list = []
for _, shape, _ in input_tensors:
dims = None
if shape:
dims = [dim.value for dim in shape.dims]
input_shapes_list.append(dims)
input_shapes = {
name: shape
for name, shape in zip(input_arrays, input_shapes_list)
if shape
}
return input_shapes
def normalize_output_name(output_name):
"""Remove :0 suffix from tensor names."""
return output_name.split(":")[0] if output_name.endswith(
":0") else output_name
# How many test cases we may have in a zip file. Too many test cases will
# slow down the test data generation process.
_MAX_TESTS_PER_ZIP = 500
def make_zip_of_tests(options,
test_parameters,
make_graph,
make_test_inputs,
extra_toco_options=ExtraTocoOptions(),
use_frozen_graph=False,
expected_tf_failures=0):
"""Helper to make a zip file of a bunch of TensorFlow models.
This does a cartestian product of the dictionary of test_parameters and
calls make_graph() for each item in the cartestian product set.
If the graph is built successfully, then make_test_inputs() is called to
build expected input/output value pairs. The model is then converted to tflite
with toco, and the examples are serialized with the tflite model into a zip
file (2 files per item in the cartesian product set).
Args:
options: An Options instance.
test_parameters: Dictionary mapping to lists for each parameter.
e.g. `{"strides": [[1,3,3,1], [1,2,2,1]], "foo": [1.2, 1.3]}`
make_graph: function that takes current parameters and returns tuple
`[input1, input2, ...], [output1, output2, ...]`
make_test_inputs: function taking `curr_params`, `session`, `input_tensors`,
`output_tensors` and returns tuple `(input_values, output_values)`.
extra_toco_options: Additional toco options.
use_frozen_graph: Whether or not freeze graph before toco converter.
expected_tf_failures: Number of times tensorflow is expected to fail in
executing the input graphs. In some cases it is OK for TensorFlow to
fail because the one or more combination of parameters is invalid.
Raises:
RuntimeError: if there are converter errors that can't be ignored.
"""
zip_path = os.path.join(options.output_path, options.zip_to_output)
parameter_count = 0
for parameters in test_parameters:
parameter_count += functools.reduce(
operator.mul, [len(values) for values in parameters.values()])
if parameter_count > _MAX_TESTS_PER_ZIP:
raise RuntimeError(
"Too many parameter combinations for generating '%s'.\n"
"There are %d combinations while the upper limit is %d.\n"
"Having too many combinations will slow down the tests.\n"
"Please consider splitting the test into multiple functions.\n"
% (zip_path, parameter_count, _MAX_TESTS_PER_ZIP))
# TODO(aselle): Make this allow multiple inputs outputs.
archive = zipfile.PyZipFile(zip_path, "w")
zip_manifest = []
convert_report = []
toco_errors = 0
processed_labels = set()
for parameters in test_parameters:
keys = parameters.keys()
for curr in itertools.product(*parameters.values()):
label = zip_path.replace(".zip", "_") + (",".join(
"%s=%r" % z for z in sorted(zip(keys, curr))).replace(" ", ""))
if label[0] == "/":
label = label[1:]
if label in processed_labels:
# Do not populate data for the same label more than once. It will cause
# errors when unzipping.
continue
processed_labels.add(label)
param_dict = dict(zip(keys, curr))
def build_example(label, param_dict_real):
"""Build the model with parameter values set in param_dict_real.
Args:
label: Label of the model (i.e. the filename in the zip).
param_dict_real: Parameter dictionary (arguments to the factories
make_graph and make_test_inputs)
Returns:
(tflite_model_binary, report) where tflite_model_binary is the
serialized flatbuffer as a string and report is a dictionary with
keys `toco_log` (log of toco conversion), `tf_log` (log of tf
conversion), `toco` (a string of success status of the conversion),
`tf` (a string success status of the conversion).
"""
np.random.seed(RANDOM_SEED)
report = {"toco": report_lib.NOTRUN, "tf": report_lib.FAILED}
# Build graph
report["tf_log"] = ""
report["toco_log"] = ""
tf.reset_default_graph()
with tf.device("/cpu:0"):
try:
inputs, outputs = make_graph(param_dict_real)
except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError,
ValueError):
report["tf_log"] += traceback.format_exc()
return None, report
sess = tf.Session()
try:
baseline_inputs, baseline_outputs = (make_test_inputs(
param_dict_real, sess, inputs, outputs))
except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError,
ValueError):
report["tf_log"] += traceback.format_exc()
return None, report
report["toco"] = report_lib.FAILED
report["tf"] = report_lib.SUCCESS
# Convert graph to toco
input_tensors = [(input_tensor.name.split(":")[0], input_tensor.shape,
input_tensor.dtype) for input_tensor in inputs]
output_tensors = [normalize_output_name(out.name) for out in outputs]
graph_def = freeze_graph(
sess,
tf.global_variables() + inputs +
outputs) if use_frozen_graph else sess.graph_def
if "split_tflite_lstm_inputs" in param_dict_real:
extra_toco_options.split_tflite_lstm_inputs = param_dict_real[
"split_tflite_lstm_inputs"]
tflite_model_binary, toco_log = options.tflite_convert_function(
options,
graph_def,
input_tensors,
output_tensors,
extra_toco_options=extra_toco_options,
test_params=param_dict_real)
report["toco"] = (report_lib.SUCCESS if tflite_model_binary is not None
else report_lib.FAILED)
report["toco_log"] = toco_log
if True or options.save_graphdefs:
archive.writestr(label + ".pbtxt",
text_format.MessageToString(graph_def),
zipfile.ZIP_DEFLATED)
if tflite_model_binary:
archive.writestr(label + ".bin", tflite_model_binary,
zipfile.ZIP_DEFLATED)
example = {"inputs": baseline_inputs, "outputs": baseline_outputs}
example_fp = StringIO()
write_examples(example_fp, [example])
archive.writestr(label + ".inputs",
example_fp.getvalue(), zipfile.ZIP_DEFLATED)
example_fp2 = StringIO()
write_test_cases(example_fp2, label + ".bin", [example])
archive.writestr(label + "_tests.txt",
example_fp2.getvalue(), zipfile.ZIP_DEFLATED)
zip_manifest.append(label + "\n")
return tflite_model_binary, report
_, report = build_example(label, param_dict)
if report["toco"] == report_lib.FAILED:
ignore_error = False
if not options.known_bugs_are_errors:
for pattern, bug_number in options.known_bugs.items():
if re.search(pattern, label):
print("Ignored converter error due to bug %s" % bug_number)
ignore_error = True
if not ignore_error:
toco_errors += 1
print("-----------------\nconverter error!\n%s\n-----------------\n" %
report["toco_log"])
convert_report.append((param_dict, report))
report_io = StringIO()
report_lib.make_report_table(report_io, zip_path, convert_report)
archive.writestr("report.html", report_io.getvalue())
archive.writestr("manifest.txt", "".join(zip_manifest), zipfile.ZIP_DEFLATED)
# Log statistics of what succeeded
total_conversions = len(convert_report)
tf_success = sum(1 for x in convert_report
if x[1]["tf"] == report_lib.SUCCESS)
toco_success = sum(1 for x in convert_report
if x[1]["toco"] == report_lib.SUCCESS)
percent = 0
if tf_success > 0:
percent = float(toco_success) / float(tf_success) * 100.
tf.logging.info(("Archive %s Considered %d graphs, %d TF evaluated graphs "
" and %d TOCO converted graphs (%.1f%%"), zip_path,
total_conversions, tf_success, toco_success, percent)
tf_failures = parameter_count - tf_success
if tf_failures / parameter_count > 0.8:
raise RuntimeError(("Test for '%s' is not very useful. "
"TensorFlow fails in %d percent of the cases.") %
(zip_path, int(100 * tf_failures / parameter_count)))
if tf_failures != expected_tf_failures:
raise RuntimeError(("Expected TF to fail %d times while generating '%s', "
"but that happened %d times") % (expected_tf_failures,
zip_path, tf_failures))
if not options.ignore_converter_errors and toco_errors > 0:
raise RuntimeError(
"Found %d errors while generating toco models" % toco_errors)
def make_pool_tests(pool_op_in):
"""Make a set of tests to do average pooling.
Args:
pool_op_in: TensorFlow pooling operation to test i.e. `tf.nn.avg_pool2d`.
Returns:
A function representing the true generator (after curried pool_op_in).
"""
pool_op = pool_op_in
def f(options, expected_tf_failures=0):
"""Actual function that generates examples.
Args:
options: An Options instance.
expected_tf_failures: number of expected tensorflow failures.
"""
# Chose a set of parameters
test_parameters = [{
"ksize": [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]],
"strides": [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]],
# TODO(aselle): should add in a degenerate shape (e.g. [1, 0, 1, 1]).
"input_shape": [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"], # TODO(aselle): NCHW would be good
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = pool_op(
input_tensor,
ksize=parameters["ksize"],
strides=parameters["strides"],
data_format=parameters["data_format"],
padding=parameters["padding"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(tf.float32, parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=expected_tf_failures)
return f
@register_make_test_function()
def make_l2_pool_tests(options):
make_pool_tests(make_l2_pool)(options, expected_tf_failures=80)
@register_make_test_function()
def make_avg_pool_tests(options):
make_pool_tests(tf.nn.avg_pool)(options, expected_tf_failures=80)
@register_make_test_function()
def make_max_pool_tests(options):
make_pool_tests(tf.nn.max_pool)(options, expected_tf_failures=80)
@register_make_test_function()
def make_abs_tests(options):
"""Make a set of tests to do relu."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3],
[3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.abs(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-10, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_elu_tests(options):
"""Make a set of tests to do (float) tf.nn.elu."""
test_parameters = [
{
"input_shape": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3],
[3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
},
]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.elu(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build the inputs for the test case."""
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_identity_tests(options):
"""Make a set of tests to do identity."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1], [3, 3]],
"use_snapshot": [False, True],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
# We add the Multiply before Identity just as a walk-around to make the test
# pass when input_shape is scalar.
# During graph transformation, TOCO will replace the Identity op with
# Reshape when input has shape. However, currently TOCO can't distinguish
# between missing shape and scalar shape. As a result, when input has scalar
# shape, this conversion still fails.
# TODO(b/129197312), remove the walk-around code once the bug is fixed.
input_doubled = input_tensor * 2.0
if parameters["use_snapshot"]:
identity_output = array_ops.snapshot(input_doubled)
else:
identity_output = tf.identity(input_doubled)
return [input_tensor], [identity_output]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_relu_tests(options):
"""Make a set of tests to do relu."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3],
[3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.relu(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_relu1_tests(options):
"""Make a set of tests to do relu1."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3],
[3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
# Note that the following is not supported:
# out = tf.maximum(-1.0, tf.minimum(input_tensor, 1.0))
out = tf.minimum(1.0, tf.maximum(input_tensor, -1.0))
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-3, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_relu6_tests(options):
"""Make a set of tests to do relu6."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3],
[3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.relu(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-3, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_prelu_tests(options):
"""Make a set of tests to do PReLU."""
test_parameters = [
{
# The canonical case for image processing is having a 4D `input`
# (NHWC)and `shared_axes`=[1, 2], so the alpha parameter is per
# channel.
"input_shape": [[1, 10, 10, 3], [3, 3, 3, 3]],
"shared_axes": [[1, 2], [1]],
},
{
# 2D-3D example. Share the 2nd axis.
"input_shape": [[20, 20], [20, 20, 20]],
"shared_axes": [[1]],
}
]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
prelu = tf.keras.layers.PReLU(shared_axes=parameters["shared_axes"])
out = prelu(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build the inputs for the test case."""
input_shape = parameters["input_shape"]
input_values = create_tensor_data(
np.float32, input_shape, min_value=-10, max_value=10)
shared_axes = parameters["shared_axes"]
alpha_shape = []
for dim in range(1, len(input_shape)):
alpha_shape.append(1 if dim in shared_axes else input_shape[dim])
alpha_values = create_tensor_data(np.float32, alpha_shape)
# There should be only 1 trainable variable tensor.
variables = tf.all_variables()
assert len(variables) == 1
sess.run(variables[0].assign(alpha_values))
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True)
@register_make_test_function()
def make_leaky_relu_tests(options):
"""Make a set of tests to do LeakyRelu."""
test_parameters = [
{
"input_shape": [[], [1], [5], [1, 10, 10, 3], [3, 3, 3, 3]],
"alpha": [0.1, 1.0, 2.0, -0.1, -1.0, -2.0],
},
]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.leaky_relu(input_tensor, alpha=parameters["alpha"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build the inputs for the test case."""
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-3, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
# This function tests various TensorFLow functions that generates Const op,
# including `tf.ones`, `tf.zeros` and random functions.
@register_make_test_function()
def make_constant_tests(options):
"""Make a set of tests to do constant ops."""
test_parameters = [{
"dtype": [tf.float32, tf.int32],
"input_shape": [[], [1], [2], [1, 1, 1, 1], [2, 2, 2, 2]],
"constant_is_also_output": [True, False],
# This is a regression test for a bug where Toco rejects models with
# unread inputs.
"has_unread_input": [True, False],
}]
def build_graph(parameters):
dummy_input = tf.placeholder(
dtype=parameters["dtype"],
name="input1",
shape=parameters["input_shape"])
constant = tf.constant(
create_tensor_data(parameters["dtype"], parameters["input_shape"]))
outputs = [tf.maximum(dummy_input, constant)]
if parameters["constant_is_also_output"]:
outputs.append(constant)
inputs = [dummy_input]
if parameters["has_unread_input"]:
unread_input = tf.placeholder(
dtype=parameters["dtype"],
name="unread_input",
shape=parameters["input_shape"])
inputs.append(unread_input)
return inputs, outputs
def build_inputs(parameters, sess, inputs, outputs):
dummy_input = np.zeros(
parameters["input_shape"], dtype=_TF_TYPE_INFO[parameters["dtype"]][0])
return [dummy_input], sess.run(outputs, feed_dict={inputs[0]: dummy_input})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
def make_binary_op_tests(options, binary_operator, expected_tf_failures=0):
"""Make a set of tests to do binary ops with and without broadcast."""
test_parameters = [
# Avoid creating all combinations to keep the test size small.
{
"dtype": [tf.float32, tf.int32],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[1, 3, 4, 3]],
"activation": [True],
},
{
"dtype": [tf.float32],
"input_shape_1": [[5]],
"input_shape_2": [[5]],
"activation": [False, True],
},
{
"dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[3]],
"activation": [True, False],
},
{
"dtype": [tf.float32, tf.int32],
"input_shape_1": [[3]],
"input_shape_2": [[1, 3, 4, 3]],
"activation": [True, False],
},
{
"dtype": [tf.float32],
"input_shape_1": [[]],
"input_shape_2": [[]],
"activation": [False],
},
{
"dtype": [tf.float32],
"input_shape_1": [[0]],
"input_shape_2": [[1]],
"activation": [False],
}
]
def build_graph(parameters):
"""Builds the graph given the current parameters."""
input1 = tf.placeholder(
dtype=parameters["dtype"],
name="input1",
shape=parameters["input_shape_1"])
input2 = tf.placeholder(
dtype=parameters["dtype"],
name="input2",
shape=parameters["input_shape_2"])
out = binary_operator(input1, input2)
if parameters["activation"]:
out = tf.nn.relu(out)
return [input1, input2], [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Builds operand inputs for op."""
input1 = create_tensor_data(parameters["dtype"],
parameters["input_shape_1"])
input2 = create_tensor_data(parameters["dtype"],
parameters["input_shape_2"])
return [input1, input2], sess.run(
outputs, feed_dict={
inputs[0]: input1,
inputs[1]: input2
})
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=expected_tf_failures)
def make_reduce_tests(reduce_op,
min_value=-10,
max_value=10,
boolean_tensor_only=False):
"""Make a set of tests to do reduce operation.
Args:
reduce_op: TensorFlow reduce operation to test, i.e. `tf.reduce_mean`.
min_value: min value for created tensor data.
max_value: max value for created tensor data.
boolean_tensor_only: If true, will only generate tensor with boolean value.
Returns:
a function representing the true generator with `reduce_op_in` curried.
"""
def f(options):
"""Actual function that generates examples."""
test_parameters = [
{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape": [[3, 3, 2, 4]],
"axis": [
0, 1, 2, [0, 1], [0, 2], [1, 2], [0, 1, 2], [1, 0], [2, 0],
[2, 1], [2, 1, 0], [2, 0, 1], -1, -2, -3, [1, -1], [0, -1],
[-1, 0], [-1, -2, -3], [0, 0, 0], [2, 2, 0], [1, 0, -3, -3]
],
"const_axis": [True, False],
"keepdims": [True, False],
},
{
"input_dtype": [tf.float32],
"input_shape": [[1, 8, 8, 3]],
"axis": [
0, 1, 2, 3, [1, 2], [0, 3], [1, 2, 3], [0, 1, 2,
3], [3, 2, 1, 0],
[3, 1, 0, 2], [2, 0], [3, 0], [3, 1], [1, 0], -1, -2, -3, -4,
[0, -2], [2, 3, -1, 0], [3, 1, 2, -3], [3, -4], [2, 2, 2],
[2, 2, 3], [-3, -3, -4], [-3, 2, 1]
],
"const_axis": [True, False],
"keepdims": [True, False],
},
{
"input_dtype": [tf.float32],
"input_shape": [[], [1, 8, 8, 3], [3, 2, 4]],
"axis": [[]], # shape is: [0]
"const_axis": [False],
"keepdims": [True, False],
},
{
"input_dtype": [tf.float32],
"input_shape": [[], [1, 8, 8, 3], [3, 2, 4]],
"axis": [None], # shape is: []
"const_axis": [True],
"keepdims": [True, False],
}
]
def build_graph(parameters):
"""Build the mean op testing graph."""
dtype = parameters["input_dtype"]
if boolean_tensor_only:
dtype = tf.bool
input_tensor = tf.placeholder(
dtype=dtype, name="input", shape=parameters["input_shape"])
# Get axis as either a placeholder or constants.
if parameters["const_axis"]:
axis = parameters["axis"]
input_tensors = [input_tensor]
else:
if isinstance(parameters["axis"], list):
shape = [len(parameters["axis"])]
else:
shape = [] # shape for None or integers.
axis = tf.placeholder(dtype=tf.int32, name="axis", shape=shape)
input_tensors = [input_tensor, axis]
out = reduce_op(
input_tensor, axis=axis, keepdims=parameters["keepdims"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
dtype = parameters["input_dtype"]
if boolean_tensor_only:
dtype = tf.bool
values = [
create_tensor_data(
dtype,
parameters["input_shape"],
min_value=min_value,
max_value=max_value)
]
if not parameters["const_axis"]:
values.append(np.array(parameters["axis"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
return f
@register_make_test_function()
def make_mean_tests(options):
"""Make a set of tests to do mean."""
return make_reduce_tests(tf.reduce_mean)(options)
@register_make_test_function()
def make_sum_tests(options):
"""Make a set of tests to do sum."""
return make_reduce_tests(tf.reduce_sum)(options)
@register_make_test_function()
def make_reduce_prod_tests(options):
"""Make a set of tests to do prod."""
# set min max value to be -2, 2 to avoid overflow.
return make_reduce_tests(tf.reduce_prod, -2, 2)(options)
@register_make_test_function()
def make_reduce_max_tests(options):
"""Make a set of tests to do max."""
return make_reduce_tests(tf.reduce_max)(options)
@register_make_test_function()
def make_reduce_min_tests(options):
"""Make a set of tests to do min."""
return make_reduce_tests(tf.reduce_min)(options)
@register_make_test_function()
def make_reduce_any_tests(options):
"""Make a set of tests to do any."""
return make_reduce_tests(tf.reduce_any, boolean_tensor_only=True)(options)
@register_make_test_function()
def make_exp_tests(options):
"""Make a set of tests to do exp."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
}]
def build_graph(parameters):
"""Build the exp op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.exp(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["input_dtype"], parameters["input_shape"],
min_value=-100, max_value=9)
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_cos_tests(options):
"""Make a set of tests to do cos."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
}]
def build_graph(parameters):
"""Build the cos op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.cos(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["input_dtype"], parameters["input_shape"],
min_value=-np.pi, max_value=np.pi)
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_log_softmax_tests(options):
"""Make a set of tests to do log_softmax."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[1, 100], [4, 2], [5, 224]],
}]
def build_graph(parameters):
"""Build the log_softmax op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.nn.log_softmax(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(
parameters["input_dtype"],
parameters["input_shape"],
min_value=-100,
max_value=9)
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_maximum_tests(options):
"""Make a set of tests to do maximum."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape_1": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
"input_shape_2": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
}]
def build_graph(parameters):
"""Build the maximum op testing graph."""
input_tensor_1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input_1",
shape=parameters["input_shape_1"])
input_tensor_2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input_2",
shape=parameters["input_shape_2"])
out = tf.maximum(input_tensor_1, input_tensor_2)
return [input_tensor_1, input_tensor_2], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["input_dtype"],
parameters["input_shape_1"]),
create_tensor_data(parameters["input_dtype"],
parameters["input_shape_2"])
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=8)
@register_make_test_function()
def make_minimum_tests(options):
"""Make a set of tests to do minimum."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape_1": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
"input_shape_2": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
}]
def build_graph(parameters):
"""Build the minimum op testing graph."""
input_tensor_1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input_1",
shape=parameters["input_shape_1"])
input_tensor_2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input_2",
shape=parameters["input_shape_2"])
out = tf.minimum(input_tensor_1, input_tensor_2)
return [input_tensor_1, input_tensor_2], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["input_dtype"],
parameters["input_shape_1"]),
create_tensor_data(parameters["input_dtype"],
parameters["input_shape_2"])
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=8)
def make_binary_op_tests_func(binary_operator):
"""Return a function that does a test on a binary operator."""
return lambda options: make_binary_op_tests(options, binary_operator)
@register_make_test_function()
def make_add_tests(options):
make_binary_op_tests(options, tf.add)
@register_make_test_function()
def make_add_n_tests(options):
"""Make a set of tests for AddN op."""
test_parameters = [
{
"dtype": [tf.float32, tf.int32],
"input_shape": [[2, 5, 3, 1]],
"num_inputs": [2, 3, 4, 5],
},
{
"dtype": [tf.float32, tf.int32],
"input_shape": [[5]],
"num_inputs": [2, 3, 4, 5],
},
{
"dtype": [tf.float32, tf.int32],
"input_shape": [[]],
"num_inputs": [2, 3, 4, 5],
},
]
def build_graph(parameters):
"""Builds the graph given the current parameters."""
input_tensors = []
for i in range(parameters["num_inputs"]):
input_tensors.append(
tf.placeholder(
dtype=parameters["dtype"],
name="input_{}".format(i),
shape=parameters["input_shape"]))
out = tf.add_n(input_tensors)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Builds operand inputs for op."""
input_data = []
for i in range(parameters["num_inputs"]):
input_data.append(
create_tensor_data(parameters["dtype"], parameters["input_shape"]))
return input_data, sess.run(
outputs, feed_dict={i: d for i, d in zip(inputs, input_data)})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_div_tests(options):
make_binary_op_tests(options, tf.div)
@register_make_test_function()
def make_sub_tests(options):
make_binary_op_tests(options, tf.subtract)
@register_make_test_function()
def make_mul_tests(options):
make_binary_op_tests(options, tf.multiply)
@register_make_test_function()
def make_pow_tests(options):
make_binary_op_tests(options, tf.pow, expected_tf_failures=7)
@register_make_test_function()
def make_floor_div_tests(options):
make_binary_op_tests(options, tf.floor_div)
@register_make_test_function()
def make_floor_mod_tests(options):
make_binary_op_tests(options, tf.floormod)
@register_make_test_function()
def make_squared_difference_tests(options):
make_binary_op_tests(options, tf.squared_difference)
@register_make_test_function()
def make_gather_tests(options):
"""Make a set of tests to do gather."""
test_parameters = [
{
"params_dtype": [tf.float32, tf.int32, tf.int64],
"params_shape": [[10], [1, 2, 20]],
"indices_dtype": [tf.int32, tf.int64],
"indices_shape": [[3], [5]],
"axis": [-1, 0, 1],
},
{
# TODO(b/123895910): add Nd support for strings.
"params_dtype": [tf.string],
"params_shape": [[8]],
"indices_dtype": [tf.int32],
"indices_shape": [[3]],
"axis": [0],
}
]
def build_graph(parameters):
"""Build the gather op testing graph."""
params = tf.placeholder(
dtype=parameters["params_dtype"],
name="params",
shape=parameters["params_shape"])
indices = tf.placeholder(
dtype=parameters["indices_dtype"],
name="indices",
shape=parameters["indices_shape"])
axis = min(len(parameters["params_shape"]), parameters["axis"])
out = tf.gather(params, indices, axis=axis)
return [params, indices], [out]
def build_inputs(parameters, sess, inputs, outputs):
params = create_tensor_data(parameters["params_dtype"],
parameters["params_shape"])
indices = create_tensor_data(parameters["indices_dtype"],
parameters["indices_shape"], 0,
parameters["params_shape"][0] - 1)
return [params, indices], sess.run(
outputs, feed_dict=dict(zip(inputs, [params, indices])))
# Note that TF can't execute with index=1 and params_shape=[10].
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=12)
@register_make_test_function()
def make_gather_nd_tests(options):
"""Make a set of tests to do gather_nd."""
test_parameters = [
{
"params_dtype": [tf.float32, tf.int32, tf.int64],
"params_shape": [[5, 1]],
"indices_dtype": [tf.int32, tf.int64],
"indices_shape": [[1, 1]],
},
{
"params_dtype": [tf.float32, tf.int32, tf.int64],
"params_shape": [[5, 5]],
"indices_dtype": [tf.int32, tf.int64],
"indices_shape": [[2, 1], [2, 2]],
},
{
"params_dtype": [tf.float32, tf.int32, tf.int64],
"params_shape": [[5, 5, 10]],
"indices_dtype": [tf.int32, tf.int64],
"indices_shape": [[3, 1], [2, 2], [2, 3], [2, 1, 3]],
},
]
def build_graph(parameters):
"""Build the gather_nd op testing graph."""
params = tf.placeholder(
dtype=parameters["params_dtype"],
name="params",
shape=parameters["params_shape"])
indices = tf.placeholder(
dtype=parameters["indices_dtype"],
name="indices",
shape=parameters["indices_shape"])
out = tf.gather_nd(params, indices)
return [params, indices], [out]
def build_inputs(parameters, sess, inputs, outputs):
params = create_tensor_data(parameters["params_dtype"],
parameters["params_shape"])
indices = create_tensor_data(parameters["indices_dtype"],
parameters["indices_shape"], 0,
parameters["params_shape"][0] - 1)
return [params, indices], sess.run(
outputs, feed_dict=dict(zip(inputs, [params, indices])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_gather_with_constant_tests(options):
"""Make a set of test which feed a constant to gather toco."""
test_parameters = [{
"input_shape": [[3]],
"reference_shape": [[2]],
}, {
"input_shape": [[2, 3]],
"reference_shape": [[2, 3]],
}]
def build_graph(parameters):
"""Build a graph where the inputs to Gather are constants."""
reference = tf.placeholder(
dtype=tf.int32, shape=parameters["reference_shape"])
gather_input = tf.constant(
create_tensor_data(tf.int32, parameters["input_shape"]))
gather_indices = tf.constant([0, 1], tf.int32)
out = tf.equal(reference, tf.gather(gather_input, gather_indices))
return [reference], [out]
def build_inputs(parameters, sess, inputs, outputs):
reference_values = np.zeros(parameters["reference_shape"], dtype=np.int32)
return [reference_values], sess.run(
outputs, feed_dict={inputs[0]: reference_values})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_embedding_lookup_tests(options):
"""Make a set of tests to do gather."""
test_parameters = [
{
"params_dtype": [tf.float32],
"params_shape": [[10], [10, 10]],
"ids_dtype": [tf.int32],
"ids_shape": [[3], [5]],
},
]
def build_graph(parameters):
"""Build the gather op testing graph."""
params = tf.placeholder(
dtype=parameters["params_dtype"],
name="params",
shape=parameters["params_shape"])
ids = tf.placeholder(
dtype=parameters["ids_dtype"],
name="ids",
shape=parameters["ids_shape"])
out = tf.nn.embedding_lookup(params, ids)
return [params, ids], [out]
def build_inputs(parameters, sess, inputs, outputs):
params = create_tensor_data(parameters["params_dtype"],
parameters["params_shape"])
ids = create_tensor_data(parameters["ids_dtype"],
parameters["ids_shape"], 0,
parameters["params_shape"][0] - 1)
return [params, ids], sess.run(
outputs, feed_dict=dict(zip(inputs, [params, ids])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs)
@register_make_test_function()
def make_global_batch_norm_tests(options):
"""Make a set of tests to do batch_norm_with_global_normalization."""
test_parameters = [{
"dtype": [tf.float32],
"input_shape": [[1, 1, 6, 2], [3, 4, 5, 4]],
"epsilon": [0.1, 0.0001],
"scale_after": [True, False],
}]
def build_graph(parameters):
"""Build the global batch norm testing graph."""
input_shape = parameters["input_shape"]
scale_shape = input_shape[3]
scale = create_tensor_data(parameters["dtype"], scale_shape)
offset = create_tensor_data(parameters["dtype"], scale_shape)
mean = create_tensor_data(parameters["dtype"], scale_shape)
variance = create_tensor_data(parameters["dtype"], scale_shape)
x = create_tensor_data(parameters["dtype"], parameters["input_shape"])
x_norm = tf.nn.batch_norm_with_global_normalization(
x, mean, variance, scale, offset,
parameters["epsilon"], parameters["scale_after"])
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.add(input_tensor, x_norm)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_fused_batch_norm_tests(options):
"""Make a set of tests to do fused_batch_norm."""
test_parameters = [{
"dtype": [tf.float32],
"input_shape": [[1, 1, 6, 2]],
"epsilon": [0.001, 0.1],
}]
def build_graph(parameters):
"""Build the testing graph for fused batch normalization."""
input_shape = parameters["input_shape"]
scale_shape = input_shape[3]
scale = create_tensor_data(parameters["dtype"], scale_shape)
offset = create_tensor_data(parameters["dtype"], scale_shape)
mean = create_tensor_data(parameters["dtype"], scale_shape)
variance = create_tensor_data(parameters["dtype"], scale_shape)
x = create_tensor_data(parameters["dtype"], parameters["input_shape"])
[x_norm, _, _] = tf.nn.fused_batch_norm(
x, scale, offset, mean, variance,
parameters["epsilon"], data_format="NHWC", is_training=False)
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.add(input_tensor, x_norm)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_conv_tests(options):
"""Make a set of tests to do convolution."""
test_parameters = [
{
"input_shape": [[1, 3, 4, 3], [4, 6, 6, 1]],
"filter_shape": [[1, 1], [2, 3], [3, 3]],
"strides": [[1, 1, 1, 1], [1, 2, 3, 1]],
"dilations": [[1, 1, 1, 1], [1, 3, 2, 1], [1, 2, 2, 1]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"], # TODO(aselle): NCHW would be good
"constant_filter": [True, False],
"channel_multiplier": [1, 2],
"fully_quantize": [False],
},
# TODO(b/134702301): The fully_quantize param is just ignored by the MLIR
# testing path now, resulting in duplicate tests. Either ignore these
# tests or handle it properly in the mlir_convert() function.
{
"input_shape": [[1, 3, 4, 3], [4, 6, 6, 1]],
"filter_shape": [[1, 1], [2, 3], [3, 3]],
"strides": [[1, 1, 1, 1], [1, 2, 3, 1]],
"dilations": [[1, 1, 1, 1], [1, 3, 2, 1], [1, 2, 2, 1]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"], # TODO(aselle): NCHW would be good
"constant_filter": [True],
"channel_multiplier": [1, 2],
"fully_quantize": [True],
}
]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_shape"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
# Get filter input either as a placeholder or constants. Also get a list of
# the input tensors that are represented as placeholders.
if parameters["constant_filter"]:
filter_input = create_tensor_data(
np.float32, filter_shape, min_value=-10, max_value=10)
input_tensors = [input_tensor]
else:
filter_input = tf.placeholder(
dtype=tf.float32, name="filter", shape=filter_shape)
input_tensors = [input_tensor, filter_input]
out = tf.nn.conv2d(
input_tensor,
filter_input,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# Build list of input values either containing 1 tensor (input) or 2 tensors
# (input, filter) based on whether filter is constant or variable input.
input_shape, filter_shape = get_tensor_shapes(parameters)
values = [
create_tensor_data(np.float32, input_shape, min_value=-1, max_value=1)
]
if not parameters["constant_filter"]:
values.append(create_tensor_data(np.float32, filter_shape))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=60)
# Note: This is a regression test for a bug (b/122651451) that Toco incorrectly
# erases the reduction indices array while it's shared with other ops.
@register_make_test_function()
def make_l2norm_shared_epsilon_tests(options):
"""Regression test for a bug (b/122651451)."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[5, 7]],
"dim": [1],
"epsilon": [1e-8],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
epsilon = tf.constant(parameters["epsilon"])
out1 = tf.nn.l2_normalize(input_tensor, parameters["dim"], epsilon=epsilon)
out2 = tf.nn.l2_normalize(input_tensor, parameters["dim"], epsilon=epsilon)
out = out1 + out2
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
# Note: This is a regression test for a bug (b/112436267) that Toco incorrectly
# fuses weights when multiple Conv2D/FULLY_CONNECTED ops share the same constant
# weight tensor.
@register_make_test_function()
def make_conv_with_shared_weights_tests(options):
"""Make a test where 2 Conv ops shared the same constant weight tensor."""
test_parameters = [{
"input_shape": [[1, 10, 10, 3]],
"filter_shape": [[3, 3]],
"strides": [[1, 1, 1, 1]],
"dilations": [[1, 1, 1, 1]],
"padding": ["SAME"],
"data_format": ["NHWC"],
"channel_multiplier": [1],
}]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_shape"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
input_tensors = [input_tensor]
# Construct a constant weights tensor which will be used by both Conv2D.
filter_tensor = tf.constant(
create_tensor_data(np.float32, filter_shape), dtype=tf.float32)
# Ensure that FuseBinaryIntoFollowingAffine works with an input which
# is shared by multiple affine ops.
conv_input = input_tensor + 0.1
# Construct 2 Conv2D operations which use exactly the same input and
# weights.
result1 = tf.nn.conv2d(
conv_input,
filter_tensor,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
result2 = tf.nn.conv2d(
conv_input,
filter_tensor,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
# Add MUL ops after Conv2D ops. These MUL ops should be fused into the
# weights of Conv2D.
result1 = result1 * 2
result2 = result2 * 3
# Add the 2 results up.
out = result1 + result2
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# Build list of input values either containing 1 tensor (input) or 2 tensors
# (input, filter) based on whether filter is constant or variable input.
input_shape, unused_filter_shape = get_tensor_shapes(parameters)
values = [create_tensor_data(np.float32, input_shape)]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
# Note: This is a regression test for a bug (b/112303004) that Toco incorrectly
# transforms Conv into DepthwiseConv when two Conv ops share the same constant
# weight tensor.
@register_make_test_function()
def make_conv_to_depthwiseconv_with_shared_weights_tests(options):
"""Make a test where 2 Conv ops shared the same constant weight tensor."""
test_parameters = [{
"input_shape": [[1, 10, 10, 1]],
"filter_shape": [[3, 3]],
"strides": [[1, 1, 1, 1]],
"dilations": [[1, 1, 1, 1]],
"padding": ["SAME"],
"data_format": ["NHWC"],
"channel_multiplier": [3],
}]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_shape"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
# Construct a constant weights tensor which will be used by both Conv2D.
filter_tensor = tf.constant(
create_tensor_data(np.float32, filter_shape), dtype=tf.float32)
input_tensors = [input_tensor]
# Construct 2 Conv2D operations which use exactly the same input and
# weights.
result1 = tf.nn.conv2d(
input_tensor,
filter_tensor,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
result2 = tf.nn.conv2d(
input_tensor,
filter_tensor,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
# Add the 2 results up.
out = result1 + result2
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# Build list of input values either containing 1 tensor (input) or 2 tensors
# (input, filter) based on whether filter is constant or variable input.
input_shape, unused_filter_shape = get_tensor_shapes(parameters)
values = [create_tensor_data(np.float32, input_shape)]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_depthwiseconv_tests(options):
"""Make a set of tests to do convolution."""
# Tensorflow only supports equal strides
test_parameters = [
{
"input_shape": [[1, 3, 4, 3], [1, 10, 10, 3]],
"filter_size": [[1, 1], [1, 2], [3, 3]],
"strides": [[1, 1, 1, 1], [1, 3, 3, 1]],
"dilations": [[1, 1, 1, 1], [1, 3, 2, 1], [1, 2, 2, 1]],
"channel_multiplier": [1, 2],
"rate": [[1, 1]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"],
"constant_filter": [True, False],
},
{
"input_shape": [[1, 3, 4, 3]],
"filter_size": [[1, 1]],
"strides": [[1, 1, 2, 1]], # TF needs [1, x, x, 1]
"dilations": [[1, 1, 1, 1], [1, 2, 2, 1]],
"channel_multiplier": [2],
"rate": [[2, 2]], # Only [1, 1] is supported
"padding": ["SAME"],
"data_format": ["NHWC"],
"constant_filter": [True, False],
}
]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_size"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a depthwise conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
# Get filter input either as a placeholder or constants. Also get a list of
# the input tensors that are represented as placeholders.
if parameters["constant_filter"]:
filter_input = create_tensor_data(np.float32, filter_shape)
input_tensors = [input_tensor]
else:
filter_input = tf.placeholder(
dtype=tf.float32, name="filter", shape=filter_shape)
input_tensors = [input_tensor, filter_input]
out = tf.nn.depthwise_conv2d(
input_tensor,
filter_input,
strides=parameters["strides"],
rate=parameters["rate"],
padding=parameters["padding"],
data_format=parameters["data_format"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# Build list of input values either containing 1 tensor (input) or 2 tensors
# (input, filter) based on whether filter is constant or variable input.
input_shape, filter_shape = get_tensor_shapes(parameters)
values = [create_tensor_data(np.float32, input_shape)]
if not parameters["constant_filter"]:
values.append(create_tensor_data(np.float32, filter_shape))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=4)
@register_make_test_function()
def make_split_tests(options):
"""Make a set of tests to do tf.split."""
test_parameters = [{
"input_shape": [[1, 3, 4, 6], [2, 4, 1], [6, 4], [8]],
"num_or_size_splits": [1, 2, 3, 4, 5],
"axis": [0, 1, 2, 3, -4, -3, -2, -1],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.split(
input_tensor, parameters["num_or_size_splits"], parameters["axis"])
return [input_tensor], [out[0]]
def build_inputs(parameters, sess, inputs, outputs):
values = [create_tensor_data(np.float32, parameters["input_shape"])]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=112)
@register_make_test_function()
def make_splitv_tests(options):
"""Make a set of tests to do tf.split_v."""
test_parameters = [{
"input_shape": [[1, 3, 4, 6], [2, 4, 1], [6, 4], [8]],
"size_splits": [[2, 2], [1, 3], [4, 2], [5, 3],
[-1, 1], [-1, 2], [-1, 4]],
"axis": [0, 1, 2, 3, -4, -3, -2, -1],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.split(input_tensor, parameters["size_splits"], parameters["axis"])
return [input_tensor], [out[0]]
def build_inputs(parameters, sess, inputs, outputs):
values = [create_tensor_data(np.float32, parameters["input_shape"])]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=158)
@register_make_test_function()
def make_concat_tests(options):
"""Make a set of tests to do concatenation."""
test_parameters = [{
"base_shape": [[1, 3, 4, 3], [3, 4]],
"num_tensors": [1, 2, 3, 4, 5, 6],
"axis": [0, 1, 2, 3, -3, -2, -1],
"type": [tf.float32, tf.uint8, tf.int32, tf.int64],
}]
def get_shape(parameters, delta):
"""Return a tweaked version of 'base_shape'."""
axis = parameters["axis"]
shape = parameters["base_shape"][:]
if axis < 0:
axis += len(shape)
if axis < len(shape):
shape[axis] += delta
return shape
def build_graph(parameters):
all_tensors = []
for n in range(0, parameters["num_tensors"]):
input_tensor = tf.placeholder(dtype=parameters["type"],
name=("input%d" % n),
shape=get_shape(parameters, n))
all_tensors.append(input_tensor)
out = tf.concat(all_tensors, parameters["axis"])
return all_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
all_values = []
for n in range(0, parameters["num_tensors"]):
input_values = create_tensor_data(
parameters["type"], get_shape(parameters, n))
all_values.append(input_values)
return all_values, sess.run(
outputs, feed_dict=dict(zip(inputs, all_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=60)
@register_make_test_function()
def make_fully_connected_tests(options):
"""Make a set of tests to do fully_connected."""
test_parameters = [{
"shape1": [[3, 3]],
"shape2": [[3, 3]],
"transpose_a": [True, False],
"transpose_b": [True, False],
"constant_filter": [True, False],
}, {
"shape1": [[4, 4], [1, 4], [4]],
"shape2": [[4, 4], [4, 1], [4]],
"transpose_a": [False],
"transpose_b": [False],
"constant_filter": [True, False],
}, {
"shape1": [[40, 37]],
"shape2": [[37, 40]],
"transpose_a": [False],
"transpose_b": [False],
"constant_filter": [True, False],
}, {
"shape1": [[40, 37]],
"shape2": [[40, 37]],
"transpose_a": [False],
"transpose_b": [True],
"constant_filter": [True, False],
}]
def build_graph(parameters):
"""Build a matmul graph given `parameters`."""
input_tensor1 = tf.placeholder(dtype=tf.float32, name="input1",
shape=parameters["shape1"])
# Get input_tensor2 either as a placeholder or constants. Also get a list of
# the input tensors that are represented as placeholders.
if parameters["constant_filter"]:
input_tensor2 = create_tensor_data(np.float32, parameters["shape2"])
input_tensors = [input_tensor1]
else:
input_tensor2 = tf.placeholder(
dtype=tf.float32, name="input2", shape=parameters["shape2"])
input_tensors = [input_tensor1, input_tensor2]
out = tf.matmul(input_tensor1, input_tensor2,
transpose_a=parameters["transpose_a"],
transpose_b=parameters["transpose_b"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# Build list of input values either containing 1 tensor (input_values1) or 2
# tensors (input_values1, input_values2) based on whether the second input
# is a constant or variable input.
values = [create_tensor_data(np.float32, shape=parameters["shape1"])]
if not parameters["constant_filter"]:
values.append(create_tensor_data(np.float32, parameters["shape2"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=10)
@register_make_test_function()
def make_l2norm_tests(options):
"""Make a set of tests to do l2norm."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[5, 7], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3],
[3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
"dim": [0, 1, 2, 3, [2, 3], -2],
"epsilon": [None, 1e-12, 1e-3],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
if parameters["epsilon"]:
out = tf.nn.l2_normalize(
input_tensor, parameters["dim"], epsilon=parameters["epsilon"])
else:
out = tf.nn.l2_normalize(input_tensor, parameters["dim"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=9)
@register_make_test_function()
def make_local_response_norm_tests(options):
"""Make a set of tests to do local_response_norm."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3]],
"depth_radius": [None, 0, 1, 3, 5],
"bias": [None, 0.3, -0.1],
"alpha": [None, 2, -3],
"beta": [None, 0.25, 2],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.local_response_normalization(
input_tensor, depth_radius=parameters["depth_radius"],
bias=parameters["bias"], alpha=parameters["alpha"],
beta=parameters["beta"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_pad_tests(options):
"""Make a set of tests to do pad."""
# TODO(nupurgarg): Add test for tf.uint8.
test_parameters = [
# 4D:
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 1, 2, 1], [2, 1, 1, 1]],
"paddings": [[[0, 0], [0, 1], [2, 3], [0, 0]], [[0, 1], [0, 0],
[0, 0], [2, 3]]],
"constant_paddings": [True, False],
},
# 2D:
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 2]],
"paddings": [[[0, 1], [2, 3]]],
"constant_paddings": [True, False],
},
# 1D:
{
"dtype": [tf.int32],
"input_shape": [[1]],
"paddings": [[[1, 2]]],
"constant_paddings": [False],
},
]
def build_graph(parameters):
"""Build a pad graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
# Get paddings as either a placeholder or constants.
if parameters["constant_paddings"]:
paddings = parameters["paddings"]
input_tensors = [input_tensor]
else:
shape = [len(parameters["paddings"]), 2]
paddings = tf.placeholder(dtype=tf.int32, name="padding", shape=shape)
input_tensors = [input_tensor, paddings]
out = tf.pad(input_tensor, paddings=paddings)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_paddings"]:
values.append(np.array(parameters["paddings"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_padv2_tests(options):
"""Make a set of tests to do padv2."""
# TODO(nupurgarg): Add test for tf.uint8.
test_parameters = [
# 4D:
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 1, 2, 1], [2, 1, 1, 1]],
"paddings": [[[0, 0], [0, 1], [2, 3], [0, 0]], [[0, 1], [0, 0],
[0, 0], [2, 3]]],
"constant_paddings": [True, False],
"constant_values": [0, 2],
},
# 2D:
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 2]],
"paddings": [[[0, 1], [2, 3]]],
"constant_paddings": [True, False],
"constant_values": [0, 2],
},
# 1D:
{
"dtype": [tf.int32],
"input_shape": [[1]],
"paddings": [[[0, 1]]],
"constant_paddings": [False],
"constant_values": [0, 2],
},
]
def build_graph(parameters):
"""Build a pad graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
# Get paddings as either a placeholder or constants.
if parameters["constant_paddings"]:
paddings = parameters["paddings"]
input_tensors = [input_tensor]
else:
shape = [len(parameters["paddings"]), 2]
paddings = tf.placeholder(dtype=tf.int32, name="padding", shape=shape)
input_tensors = [input_tensor, paddings]
out = tf.pad(input_tensor, paddings=paddings,
constant_values=parameters["constant_values"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_paddings"]:
values.append(np.array(parameters["paddings"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_reshape_tests(options):
"""Make a set of tests to do reshape."""
# All shapes below are suitable for tensors with 420 elements.
test_parameters = [{
"dtype": [tf.float32, tf.int32],
"input_shape": [[3, 4, 5, 7], [4, 105], [21, 5, 2, 2], [420]],
"output_shape": [[15, 28], [420], [1, -1, 5, 7], [-1]],
"constant_shape": [True, False],
}, {
"dtype": [tf.float32],
"input_shape": [[1]],
"output_shape": [[]],
"constant_shape": [True, False],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
# Get shape as either a placeholder or constants.
if parameters["constant_shape"]:
output_shape = parameters["output_shape"]
input_tensors = [input_tensor]
else:
# The shape of the shape tensor.
shape_tensor_shape = [len(parameters["output_shape"])]
output_shape = tf.placeholder(
dtype=tf.int32, name="output_shape", shape=shape_tensor_shape)
input_tensors = [input_tensor, output_shape]
out = tf.reshape(input_tensor, shape=output_shape)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_shape"]:
values.append(np.array(parameters["output_shape"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_shape_tests(options):
"""Make a set of tests to do shape."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[], [0], [1, 1, 1, 3], [2, 3, 4, 5], [5, 5], [10]],
"out_type": [tf.int32, tf.int64],
}]
def build_graph(parameters):
"""Build the shape op testing graph."""
# Note that we intentionally leave out the shape from the input placeholder
# to prevent the Shape operation from being optimized out during conversion.
input_value = tf.placeholder(dtype=parameters["input_dtype"], name="input")
out = tf.shape(input_value, out_type=parameters["out_type"])
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_rank_tests(options):
"""Make a set of tests to do rank."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[], [0], [1, 1, 1, 3], [2, 3, 4, 5], [5, 5], [10]],
}]
def build_graph(parameters):
"""Build the rank op testing graph."""
input_value = tf.placeholder(dtype=parameters["input_dtype"], name="input")
out = tf.rank(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_one_hot_tests(options):
"""Make a set of tests to do one_hot."""
test_parameters = [{
"indices_type": [tf.int32, tf.int64],
"indices_shape": [[3], [4, 4], [1, 5], [5, 1]],
"axis": [0, 1],
"dtype": [tf.int32, tf.int64, tf.float32],
"provide_optional_inputs": [True, False],
}]
def build_graph(parameters):
indices = tf.placeholder(
dtype=parameters["indices_type"],
name="indices",
shape=parameters["indices_shape"])
depth = tf.placeholder(dtype=tf.int32, name="depth", shape=())
if not parameters["provide_optional_inputs"]:
out = tf.one_hot(indices=indices, depth=depth)
return [indices, depth], [out]
on_value = tf.placeholder(
dtype=parameters["dtype"], name="on_value", shape=())
off_value = tf.placeholder(
dtype=parameters["dtype"], name="off_value", shape=())
out = tf.one_hot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
axis=parameters["axis"],
dtype=parameters["dtype"])
return [indices, depth, on_value, off_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = [
create_tensor_data(
parameters["indices_type"],
shape=parameters["indices_shape"],
min_value=-1,
max_value=10),
create_tensor_data(tf.int32, shape=None, min_value=1, max_value=10),
]
if parameters["provide_optional_inputs"]:
input_values.append(
create_tensor_data(
parameters["dtype"], shape=None, min_value=1, max_value=10))
input_values.append(
create_tensor_data(
parameters["dtype"], shape=None, min_value=-1, max_value=0))
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_resize_bilinear_tests(options):
"""Make a set of tests to do resize_bilinear."""
test_parameters = [{
"dtype": [tf.float32, tf.int32],
"input_shape": [[1, 3, 4, 3], [1, 10, 2, 1]],
"size": [[1, 1], [4, 3], [2, 2], [5, 6]],
"align_corners": [None, True, False],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.image.resize_bilinear(input_tensor, size=parameters["size"],
align_corners=parameters["align_corners"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_resize_nearest_neighbor_tests(options):
"""Make a set of tests to do resize_nearest_neighbor."""
test_parameters = [{
"dtype": [tf.float32, tf.int32],
"input_shape": [[1, 3, 4, 3], [1, 10, 2, 1]],
"size": [[1, 1], [4, 3], [2, 2], [5, 6]],
"align_corners": [False],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.image.resize_nearest_neighbor(
input_tensor,
size=parameters["size"],
align_corners=parameters["align_corners"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_sigmoid_tests(options):
"""Make a set of tests to do sigmoid."""
test_parameters = [{
"dtype": [tf.float32],
"input_shape": [[1, 3, 4, 3], [4], [], [1, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.sigmoid(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_softmax_tests(options):
"""Make a set of tests to do softmax."""
test_parameters = [{
"dtype": [tf.float32],
"input_shape": [[1, 3, 4, 3], [2, 3]],
"dim": [-1, 0],
}, {
"dtype": [tf.float32],
"input_shape": [[4, 7]],
"dim": [-1, 1],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.nn.softmax(input_tensor, dim=parameters["dim"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_space_to_depth_tests(options):
"""Make a set of tests to do space_to_depth."""
test_parameters = [{
"dtype": [tf.float32, tf.int32, tf.uint8, tf.int64],
"input_shape": [[2, 12, 24, 1]],
"block_size": [2, 3, 4],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.space_to_depth(input_tensor, block_size=parameters["block_size"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_space_to_batch_nd_tests(options):
"""Make a set of tests to do space_to_batch_nd."""
# TODO(nupurgarg): Add test for uint8.
test_parameters = [
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 2, 2, 3], [2, 2, 4, 1]],
"block_shape": [[1, 3], [2, 2]],
"paddings": [[[0, 0], [0, 0]], [[0, 0], [2, 0]], [[1, 1], [1, 1]]],
"constant_block_shape": [True, False],
"constant_paddings": [True, False],
},
{
"dtype": [tf.float32],
"input_shape": [[2, 3, 7, 3]],
"block_shape": [[1, 3], [2, 2]],
"paddings": [[[0, 0], [2, 0]], [[1, 0], [1, 0]]],
"constant_block_shape": [True, False],
"constant_paddings": [True, False],
},
# Non-4D use case: 1 bath dimension, 3 spatial dimensions, 2 others.
{
"dtype": [tf.float32],
"input_shape": [[1, 4, 4, 4, 1, 1]],
"block_shape": [[2, 2, 2]],
"paddings": [[[0, 0], [0, 0], [0, 0]]],
"constant_block_shape": [True, False],
"constant_paddings": [True, False],
},
]
def build_graph(parameters):
"""Build a space_to_batch graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
input_tensors = [input_tensor]
# Get block_shape either as a const or as a placeholder (tensor).
if parameters["constant_block_shape"]:
block_shape = parameters["block_shape"]
else:
shape = [len(parameters["block_shape"])]
block_shape = tf.placeholder(dtype=tf.int32, name="shape", shape=shape)
input_tensors.append(block_shape)
# Get paddings either as a const or as a placeholder (tensor).
if parameters["constant_paddings"]:
paddings = parameters["paddings"]
else:
shape = [len(parameters["paddings"]), 2]
paddings = tf.placeholder(dtype=tf.int32, name="paddings", shape=shape)
input_tensors.append(paddings)
out = tf.space_to_batch_nd(input_tensor, block_shape, paddings)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_block_shape"]:
values.append(np.array(parameters["block_shape"]))
if not parameters["constant_paddings"]:
values.append(np.array(parameters["paddings"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=56)
@register_make_test_function()
def make_batch_to_space_nd_tests(options):
"""Make a set of tests to do batch_to_space_nd."""
test_parameters = [
{
"dtype": [tf.float32, tf.int64, tf.int32],
"input_shape": [[12, 3, 3, 1]],
"block_shape": [[1, 4], [2, 2], [3, 4]],
"crops": [[[0, 0], [0, 0]], [[1, 1], [1, 1]]],
"constant_block_shape": [True, False],
"constant_crops": [True, False],
},
# Single batch (no-op)
{
"dtype": [tf.float32],
"input_shape": [[1, 3, 3, 1]],
"block_shape": [[1, 1]],
"crops": [[[0, 0], [0, 0]], [[1, 1], [1, 1]]],
"constant_block_shape": [True],
"constant_crops": [True],
},
# Non-4D use case: 1 batch dimension, 3 spatial dimensions, 2 others.
{
"dtype": [tf.float32],
"input_shape": [[8, 2, 2, 2, 1, 1]],
"block_shape": [[2, 2, 2]],
"crops": [[[0, 0], [0, 0], [0, 0]]],
"constant_block_shape": [True, False],
"constant_crops": [True, False],
},
]
def build_graph(parameters):
"""Build a batch_to_space graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
input_tensors = [input_tensor]
# Get block_shape either as a const or as a placeholder (tensor).
if parameters["constant_block_shape"]:
block_shape = parameters["block_shape"]
else:
shape = [len(parameters["block_shape"])]
block_shape = tf.placeholder(dtype=tf.int32, name="shape", shape=shape)
input_tensors.append(block_shape)
# Get crops either as a const or as a placeholder (tensor).
if parameters["constant_crops"]:
crops = parameters["crops"]
else:
shape = [len(parameters["crops"]), 2]
crops = tf.placeholder(dtype=tf.int32, name="crops", shape=shape)
input_tensors.append(crops)
out = tf.batch_to_space_nd(input_tensor, block_shape, crops)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_block_shape"]:
values.append(np.array(parameters["block_shape"]))
if not parameters["constant_crops"]:
values.append(np.array(parameters["crops"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_transpose_tests(options):
"""Make a set of tests to do transpose."""
# TODO(nupurgarg): Add test for uint8.
test_parameters = [{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[2, 2, 3]],
"perm": [[0, 1, 2], [0, 2, 1]],
"constant_perm": [True, False],
}, {
"dtype": [tf.float32],
"input_shape": [[1, 2, 3, 4]],
"perm": [[0, 1, 2, 3], [3, 0, 1, 2]],
"constant_perm": [True, False],
}, {
"dtype": [tf.float32],
"input_shape": [[1, 2, 3, 4, 5]],
"perm": [[4, 3, 2, 1, 0]],
"constant_perm": [True, False],
}]
def build_graph(parameters):
"""Build a transpose graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
if parameters["constant_perm"]:
perm = parameters["perm"]
input_tensors = [input_tensor]
else:
shape = [len(parameters["perm"]), 2]
perm = tf.placeholder(dtype=tf.int32, name="perm", shape=shape)
input_tensors = [input_tensor, perm]
out = tf.transpose(input_tensor, perm=perm)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_perm"]:
values.append(np.array(parameters["perm"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=9)
@register_make_test_function()
def make_squeeze_tests(options):
"""Make a set of tests to do squeeze."""
test_parameters = [{
"dtype": [tf.int32, tf.float32, tf.int64],
"input_shape": [[1, 2, 1, 3, 1, 4, 1, 1]],
"axis": [
None, [], [0, 2], [4, 7], [-1, 0, 2, 0, 7, -6], [1], [2, 3, 2],
[-1, -2, -4, -6, -8], [0, 2, 4, 6, 7], [7, 6, 4, 2, 0], [6, 6],
[0, 1, 2, 3, 4, 5, 6, 7], [-2, -3, 1, 0, 7, -5]
],
}, {
"dtype": [tf.int32, tf.float32, tf.int64],
"input_shape": [[1]],
"axis": [None, [], [0], [-1]],
}, {
"dtype": [tf.int32, tf.float32, tf.int64],
"input_shape": [[1, 1, 1, 1, 1]],
"axis": [None, [], [0], [3, 0], [-2, 0, 3, 2]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.squeeze(input_tensor, axis=parameters["axis"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=12)
@register_make_test_function()
def make_squeeze_transpose_tests(options):
"""Make a set of tests to do squeeze followed by transpose."""
test_parameters = [{
"dtype": [tf.int32, tf.float32, tf.int64],
"input_shape": [[1, 4, 10, 1]],
"axis": [[-1], [3]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.squeeze(input_tensor, axis=parameters["axis"])
out = tf.transpose(out, perm=[1, 2])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=0)
def _make_strided_slice_tests(options, test_parameters,
expected_tf_failures=0):
"""Utility function to make strided_slice_tests based on parameters."""
def build_graph(parameters):
"""Build graph for stride_slice test."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
if parameters["constant_indices"]:
begin = parameters["begin"]
end = parameters["end"]
strides = parameters["strides"]
tensors = [input_tensor]
else:
begin = tf.placeholder(
dtype=parameters["index_type"],
name="begin",
shape=[len(parameters["input_shape"])])
end = tf.placeholder(
dtype=parameters["index_type"],
name="end",
shape=[len(parameters["input_shape"])])
strides = (
tf.placeholder(
dtype=parameters["index_type"],
name="strides",
shape=[len(parameters["input_shape"])])
if parameters["strides"] is not None else None)
tensors = [input_tensor, begin, end]
if strides is not None:
tensors.append(strides)
out = tf.strided_slice(
input_tensor,
begin,
end,
strides,
begin_mask=parameters["begin_mask"],
end_mask=parameters["end_mask"])
return tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build inputs for stride_slice test."""
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
index_type = _TF_TYPE_INFO[parameters["index_type"]][0]
values = [input_values]
if not parameters["constant_indices"]:
begin_values = np.array(parameters["begin"]).astype(index_type)
end_values = np.array(parameters["end"]).astype(index_type)
stride_values = (
np.array(parameters["strides"]).astype(index_type)
if parameters["strides"] is not None else None)
values.append(begin_values)
values.append(end_values)
if stride_values is not None:
values.append(stride_values)
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=expected_tf_failures)
@register_make_test_function()
def make_strided_slice_tests(options):
"""Make a set of tests to do strided_slice."""
# TODO(soroosh): add test/support for uint8.
test_parameters = [
# 4-D (basic cases with const/non-const indices).
{
"dtype": [tf.float32, tf.int32, tf.int64],
"index_type": [tf.int32],
"input_shape": [[12, 2, 2, 5]],
"strides": [None, [2, 1, 3, 1]],
"begin": [[0, 0, 0, 0]],
"end": [[12, 2, 2, 5]],
"begin_mask": [None],
"end_mask": [None],
"shrink_axis_mask": [None],
"constant_indices": [False, True],
},
# 4-D with non-trivial begin & end.
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[12, 2, 2, 5]],
"begin": [[0, 0, 0, 0], [1, 0, 1, 0]],
"end": [[8, 2, 2, 3], [12, 2, 2, 5]],
"strides": [None, [2, 1, 3, 1]],
"begin_mask": [None, 8],
"end_mask": [None, 3],
"shrink_axis_mask": [None, 15, -1],
"constant_indices": [True],
},
# Begin, end, strides dim are different from input shape
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[12, 2, 2, 5]],
"begin": [[0]],
"end": [[1]],
"strides": [None, [1]],
"begin_mask": [0],
"end_mask": [0],
"shrink_axis_mask": [1],
"constant_indices": [True],
},
# 2-D
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[2, 3]],
"begin": [[0, 0]],
"end": [[2, 2]],
"strides": [None, [2, 2]],
"begin_mask": [None, 1, 2],
"end_mask": [None, 1, 2],
"shrink_axis_mask": [None, 1, 2, 3, -1],
"constant_indices": [False, True],
},
# Negative strides
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[2, 3]],
"begin": [[0, -1]],
"end": [[2, -3]],
"strides": [[1, -1]],
"begin_mask": [None, 1, 2],
"end_mask": [None, 1, 2],
"shrink_axis_mask": [None, 1, 2, 3, -1],
"constant_indices": [False],
},
]
_make_strided_slice_tests(options, test_parameters, expected_tf_failures=2)
@register_make_test_function()
def make_strided_slice_1d_exhaustive_tests(options):
"""Make a set of exhaustive tests for 1D strided_slice."""
test_parameters = [
# 1-D Exhaustive
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[3]],
"begin": [[-2], [-1], [0], [1], [2]],
"end": [[-2], [-1], [0], [1], [2]],
"strides": [[-2], [-1], [1], [2]],
"begin_mask": [0, 1],
"end_mask": [0, 1],
"shrink_axis_mask": [0],
"constant_indices": [False],
},
]
_make_strided_slice_tests(options, test_parameters)
# For verifying https://github.com/tensorflow/tensorflow/issues/23599
# TODO(chaomei): refactor the test to cover more cases, like negative stride,
# negative array index etc.
@register_make_test_function()
def make_resolve_constant_strided_slice_tests(options):
"""Make a set of tests to show strided_slice yields incorrect results."""
test_parameters = [{
"unused_iteration_counter": [1],
}]
def build_graph(parameters):
"""Build the strided_slice op testing graph."""
del parameters
input_values = tf.placeholder(dtype=tf.float32, shape=[4, 2])
data = tf.constant([[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]], tf.float32)
return [input_values], [input_values + data[:, :2]]
def build_inputs(parameters, sess, inputs, outputs):
del parameters
input_values = np.zeros([4, 2], dtype=np.float32)
return [input_values], sess.run(
outputs, feed_dict={inputs[0]: input_values})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_lstm_tests(options):
"""Make a set of tests to do basic Lstm cell."""
test_parameters = [
{
"dtype": [tf.float32],
"num_batchs": [1],
"time_step_size": [1],
"input_vec_size": [3],
"num_cells": [4],
"split_tflite_lstm_inputs": [False],
},
]
def build_graph(parameters):
"""Build a simple graph with BasicLSTMCell."""
num_batchs = parameters["num_batchs"]
time_step_size = parameters["time_step_size"]
input_vec_size = parameters["input_vec_size"]
num_cells = parameters["num_cells"]
inputs_after_split = []
for i in xrange(time_step_size):
one_timestamp_input = tf.placeholder(
dtype=parameters["dtype"],
name="split_{}".format(i),
shape=[num_batchs, input_vec_size])
inputs_after_split.append(one_timestamp_input)
# Currently lstm identifier has a few limitations: only supports
# forget_bias == 0, inner state activation == tanh.
# TODO(zhixianyan): Add another test with forget_bias == 1.
# TODO(zhixianyan): Add another test with relu as activation.
lstm_cell = tf.contrib.rnn.BasicLSTMCell(
num_cells, forget_bias=0.0, state_is_tuple=True)
cell_outputs, _ = rnn.static_rnn(
lstm_cell, inputs_after_split, dtype=tf.float32)
out = cell_outputs[-1]
return inputs_after_split, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Feed inputs, assign variables, and freeze graph."""
with tf.variable_scope("", reuse=True):
kernel = tf.get_variable("rnn/basic_lstm_cell/kernel")
bias = tf.get_variable("rnn/basic_lstm_cell/bias")
kernel_values = create_tensor_data(
parameters["dtype"], [kernel.shape[0], kernel.shape[1]], -1, 1)
bias_values = create_tensor_data(parameters["dtype"], [bias.shape[0]], 0,
1)
sess.run(tf.group(kernel.assign(kernel_values), bias.assign(bias_values)))
num_batchs = parameters["num_batchs"]
time_step_size = parameters["time_step_size"]
input_vec_size = parameters["input_vec_size"]
input_values = []
for _ in xrange(time_step_size):
tensor_data = create_tensor_data(parameters["dtype"],
[num_batchs, input_vec_size], 0, 1)
input_values.append(tensor_data)
out = sess.run(outputs, feed_dict=dict(zip(inputs, input_values)))
return input_values, out
# TODO(zhixianyan): Automatically generate rnn_states for lstm cell.
extra_toco_options = ExtraTocoOptions()
extra_toco_options.rnn_states = (
"{state_array:rnn/BasicLSTMCellZeroState/zeros,"
"back_edge_source_array:rnn/basic_lstm_cell/Add_1,size:4},"
"{state_array:rnn/BasicLSTMCellZeroState/zeros_1,"
"back_edge_source_array:rnn/basic_lstm_cell/Mul_2,size:4}")
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
extra_toco_options,
use_frozen_graph=True)
def make_l2_pool(input_tensor, ksize, strides, padding, data_format):
"""Given an input perform a sequence of TensorFlow ops to produce l2pool."""
return tf.sqrt(tf.nn.avg_pool(
tf.square(input_tensor), ksize=ksize, strides=strides,
padding=padding, data_format=data_format))
@register_make_test_function()
def make_topk_tests(options):
"""Make a set of tests to do topk."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[10], [5, 20]],
"input_k": [None, 1, 3],
}]
def build_graph(parameters):
"""Build the topk op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
if parameters["input_k"] is not None:
k = tf.placeholder(dtype=tf.int32, name="input_k", shape=[])
inputs = [input_value, k]
else:
k = tf.constant(3, name="k")
inputs = [input_value]
out = tf.nn.top_k(input_value, k)
return inputs, [out[1]]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
if parameters["input_k"] is not None:
k = np.array(parameters["input_k"], dtype=np.int32)
return [input_value, k], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value, k])))
else:
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_arg_min_max_tests(options):
"""Make a set of tests to do arg_max."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[], [1, 1, 1, 3], [2, 3, 4, 5], [2, 3, 3], [5, 5], [10]],
"output_type": [tf.int32, tf.int64],
"is_arg_max": [True],
}]
def build_graph(parameters):
"""Build the topk op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
axis = random.randint(0, max(len(parameters["input_shape"]) - 1, 0))
if parameters["is_arg_max"]:
out = tf.arg_max(input_value, axis, output_type=parameters["output_type"])
else:
out = tf.arg_min(input_value, axis, output_type=parameters["output_type"])
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=4)
@register_make_test_function()
def make_equal_tests(options):
"""Make a set of tests to do equal."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([], []),
([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the equal op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.equal(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_not_equal_tests(options):
"""Make a set of tests to do not equal."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the not euqal op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.not_equal(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_greater_tests(options):
"""Make a set of tests to do greater."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the greater op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.greater(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_greater_equal_tests(options):
"""Make a set of tests to do greater_equal."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the greater_equal op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.greater_equal(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_less_tests(options):
"""Make a set of tests to do less."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the less op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.less(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_less_equal_tests(options):
"""Make a set of tests to do less_equal."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the less_equal op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.less_equal(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_floor_tests(options):
"""Make a set of tests to do floor."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the floor op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape"])
out = tf.floor(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(outputs, feed_dict={inputs[0]: input_value})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_ceil_tests(options):
"""Make a set of tests to do ceil."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the ceil op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape"])
out = tf.ceil(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict={inputs[0]: input_value})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_round_tests(options):
"""Build the round op testing graph."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the round op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape"])
out = tf.round(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(outputs, feed_dict={inputs[0]: input_value})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_neg_tests(options):
"""Make a set of tests to do neg."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[1, 3, 4, 3], [5], []],
}]
def build_graph(parameters):
"""Build the neg op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.negative(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [values], sess.run(outputs, feed_dict=dict(zip(inputs, [values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_zeros_like_tests(options):
"""Make a set of tests to do zeros_like."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the zeros_like op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
zeros = tf.zeros_like(input_tensor)
# This maximum node is so that toco can perform the constants-propagation
# through the above zeros_like, which it can't do if the output of the
# zeros_like as an output of the whole graphs (graph outputs can't be
# constants). If toco does not perform such constants-propagation then
# the resulting tflite graph retains the zeros_like as a Fill op, which
# is unsupported by TFLite, even as a custom op.
out = tf.maximum(zeros, input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [values], sess.run(outputs, feed_dict=dict(zip(inputs, [values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
def _make_elementwise_tests(op):
"""Make a set of tests to do element-wise operations."""
def f(options):
"""Actual function that generates examples."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the unary op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape"])
out = op(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict={inputs[0]: input_value})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
return f
@register_make_test_function()
def make_sin_tests(options):
"""Make a set of tests to do sin."""
return _make_elementwise_tests(tf.sin)(options)
@register_make_test_function()
def make_log_tests(options):
"""Make a set of tests to do log."""
return _make_elementwise_tests(tf.log)(options)
@register_make_test_function()
def make_sqrt_tests(options):
"""Make a set of tests to do sqrt."""
return _make_elementwise_tests(tf.sqrt)(options)
@register_make_test_function()
def make_rsqrt_tests(options):
"""Make a set of tests to do 1/sqrt."""
return _make_elementwise_tests(tf.rsqrt)(options)
@register_make_test_function()
def make_square_tests(options):
"""Make a set of tests to do square."""
return _make_elementwise_tests(tf.square)(options)
@register_make_test_function()
def make_where_tests(options):
"""Make a set of tests to do where."""
test_parameters = [
{
"input_dtype": [tf.float32, tf.int32],
"input_shape_set": [([1, 2, 3, 4], [1, 2, 3, 4]),],
"use_where_v2": [False, True],
},
{
"input_dtype": [tf.float32, tf.int32],
"input_shape_set": [([1, 2, 3, 4], [1, 2, 3, 1]),],
"use_where_v2": [True],
},
]
def build_graph(parameters):
"""Build the where op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_set"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input3",
shape=parameters["input_shape_set"][1])
less = tf.less(input_value1, input_value2)
where = tf.where_v2 if parameters["use_where_v2"] else tf.where
out = where(less, input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_set"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_set"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_slice_tests(options):
"""Make a set of tests to do slice."""
# TODO(renjieliu): add test/support for uint8.
test_parameters = [
# 4-D
{
"dtype": [tf.float32, tf.int32, tf.int64, tf.string],
"index_type": [tf.int32, tf.int64],
"input_shape": [[12, 2, 2, 5]],
"begin": [[0, 0, 0, 0], [1, 0, 1, 0]],
"size": [[8, 2, 2, 3], [11, 2, 1, 5]],
},
# 2-D
{
"dtype": [tf.float32, tf.int32, tf.int64, tf.string],
"index_type": [tf.int32, tf.int64],
"input_shape": [[2, 3]],
"begin": [[0, 0], [1, 0]],
"size": [[2, 3], [2, 2]],
},
# 4-D with size -1
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[4, 4, 4, 4]],
"begin": [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0],
[0, 0, 0, 1]],
"size": [[-1, 1, 1, 1], [1, -1, 1, 1], [1, 1, -1, 1], [1, 1, 1, -1]],
},
]
def build_graph(parameters):
"""Build graph for slice test."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
begin = tf.placeholder(
dtype=parameters["index_type"],
name="begin",
shape=[len(parameters["input_shape"])])
size = tf.placeholder(
dtype=parameters["index_type"],
name="size",
shape=[len(parameters["input_shape"])])
tensors = [input_tensor, begin, size]
out = tf.slice(input_tensor, begin, size)
return tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build inputs for slice test."""
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
index_type = _TF_TYPE_INFO[parameters["index_type"]][0]
begin_values = np.array(parameters["begin"]).astype(index_type)
size_values = np.array(parameters["size"]).astype(index_type)
values = [input_values, begin_values, size_values]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=24)
@register_make_test_function()
def make_conv2d_transpose_tests(options):
"""Make a set of tests to do transpose_conv."""
test_parameters = [{
"input_shape": [[1, 50, 54, 3]],
"filter_shape": [[1, 1, 8, 3], [1, 2, 8, 3], [1, 3, 8, 3], [1, 4, 8, 3]],
"output_shape": [[1, 100, 108, 8]],
"dynamic_output_shape": [True, False],
}, {
"input_shape": [[1, 16, 1, 512]],
"filter_shape": [[4, 1, 512, 512]],
"output_shape": [[1, 32, 1, 512]],
"dynamic_output_shape": [True, False],
}, {
"input_shape": [[1, 128, 128, 1]],
"filter_shape": [[4, 4, 1, 1]],
"output_shape": [[1, 256, 256, 1]],
"dynamic_output_shape": [True, False],
}]
def build_graph(parameters):
"""Build a transpose_conv graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
filter_tensor = tf.placeholder(
dtype=tf.float32, name="filter", shape=parameters["filter_shape"])
input_tensors = [input_tensor, filter_tensor]
if parameters["dynamic_output_shape"]:
output_shape = tf.placeholder(dtype=tf.int32, shape=[4])
input_tensors.append(output_shape)
else:
output_shape = parameters["output_shape"]
out = tf.nn.conv2d_transpose(
input_tensor,
filter_tensor,
output_shape=output_shape,
padding="SAME",
strides=(1, 2, 2, 1))
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(np.float32, parameters["input_shape"]),
create_tensor_data(np.float32, parameters["filter_shape"])
]
if parameters["dynamic_output_shape"]:
values.append(np.array(parameters["output_shape"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
# Since compute output_shape is fairly complicated for
# tf.nn.conv2d_transpose input_sizes argument, so we here first perform a
# "conv2d" operation to get the output, then we use the output to feed in
# tf.nn.conv2d_backprop_input.
# This test will depend on the "conv2d" operation's correctness.
@register_make_test_function()
def make_transpose_conv_tests(options):
"""Make a set of tests to do transpose_conv."""
# Tensorflow only supports equal strides
test_parameters = [{
"input_shape": [[1, 3, 4, 1], [1, 10, 10, 3], [3, 20, 20, 1]],
"filter_size": [[1, 1], [1, 2], [3, 3]],
"strides": [[1, 1, 1, 1], [1, 3, 3, 1]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"],
"channel_multiplier": [1, 2],
}]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_size"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a transpose_conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
filter_input = tf.placeholder(
dtype=tf.float32, name="filter", shape=filter_shape)
conv_outputs = tf.nn.conv2d(
input_tensor,
filter_input,
strides=parameters["strides"],
padding=parameters["padding"],
data_format=parameters["data_format"])
out = tf.nn.conv2d_backprop_input(
input_shape,
filter_input,
conv_outputs,
strides=parameters["strides"],
padding=parameters["padding"],
data_format=parameters["data_format"])
input_tensors = [input_tensor, filter_input]
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
input_shape, filter_shape = get_tensor_shapes(parameters)
values = [
create_tensor_data(np.float32, input_shape),
create_tensor_data(np.float32, filter_shape)
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_tile_tests(options):
"""Make a set of tests to do tile."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.bool],
"input_shape": [[3, 2, 1], [2, 2, 2]],
"multiplier_dtype": [tf.int32, tf.int64],
"multiplier_shape": [[3]]
}]
def build_graph(parameters):
"""Build the tile op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
shape=parameters["input_shape"],
name="input")
multiplier_value = tf.placeholder(
dtype=parameters["multiplier_dtype"],
shape=parameters["multiplier_shape"],
name="multiplier")
out = tf.tile(input_value, multiplier_value)
return [input_value, multiplier_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
multipliers_value = create_tensor_data(
parameters["multiplier_dtype"],
parameters["multiplier_shape"],
min_value=0)
return [input_value, multipliers_value], sess.run(
outputs,
feed_dict={
inputs[0]: input_value,
inputs[1]: multipliers_value
})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_expand_dims_tests(options):
"""Make a set of tests to do expand_dims."""
test_parameters = [{
"input_type": [tf.float32, tf.int32],
"input_shape": [[5, 4]],
"axis_value": [0, 1, 2, -1, -2, -3],
"constant_axis": [True, False],
}]
def build_graph(parameters):
"""Build the where op testing graph."""
inputs = []
input_value = tf.placeholder(
dtype=parameters["input_type"],
name="input",
shape=parameters["input_shape"])
inputs.append(input_value)
if parameters["constant_axis"]:
axis_value = tf.constant(
parameters["axis_value"], dtype=tf.int32, shape=[1])
else:
axis_value = tf.placeholder(dtype=tf.int32, name="axis", shape=[1])
inputs.append(axis_value)
out = tf.expand_dims(input_value, axis=axis_value)
return inputs, [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = []
input_values.append(
create_tensor_data(parameters["input_type"], parameters["input_shape"]))
if not parameters["constant_axis"]:
input_values.append(np.array([parameters["axis_value"]], dtype=np.int32))
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_sparse_to_dense_tests(options):
"""Make a set of tests to do sparse to dense."""
test_parameters = [{
"value_dtype": [tf.float32, tf.int32, tf.int64],
"index_dtype": [tf.int32, tf.int64],
"value_count": [1, 3, 6, 8],
"dense_shape": [[15], [3, 10], [4, 4, 4, 4], [7, 10, 9]],
"default_value": [0, -1],
"value_is_scalar": [True, False],
}]
# Return a single value for 1-D dense shape, but a tuple for other shapes.
def generate_index(dense_shape):
if len(dense_shape) == 1:
return np.random.randint(dense_shape[0])
else:
index = []
for shape in dense_shape:
index.append(np.random.randint(shape))
return tuple(index)
def build_graph(parameters):
"""Build the sparse_to_dense op testing graph."""
dense_shape = parameters["dense_shape"]
# Special handle for value_is_scalar case.
# value_count must be 1.
if parameters["value_is_scalar"] and parameters["value_count"] == 1:
value = tf.placeholder(
name="value", dtype=parameters["value_dtype"], shape=())
else:
value = tf.placeholder(
name="value",
dtype=parameters["value_dtype"],
shape=[parameters["value_count"]])
indices = set()
while len(indices) < parameters["value_count"]:
indices.add(generate_index(dense_shape))
indices = tf.constant(tuple(indices), dtype=parameters["index_dtype"])
# TODO(renjieliu): Add test for validate_indices case.
out = tf.sparse_to_dense(
indices,
dense_shape,
value,
parameters["default_value"],
validate_indices=False)
return [value], [out]
def build_inputs(parameters, sess, inputs, outputs):
if parameters["value_is_scalar"] and parameters["value_count"] == 1:
input_value = create_scalar_data(parameters["value_dtype"])
else:
input_value = create_tensor_data(parameters["value_dtype"],
[parameters["value_count"]])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_pack_tests(options):
"""Make a set of tests to do stack."""
test_parameters = [
# Avoid creating all combinations to keep the test size small.
{
"dtype": [tf.float32],
"base_shape": [[3, 4, 3], [3, 4], [5]],
"num_tensors": [1, 2, 3, 4, 5, 6],
"axis": [0, 1, 2, 3],
"additional_shape": [1, 2, 3],
},
{
"dtype": [tf.int32],
"base_shape": [[3, 4, 3], [3, 4], [5]],
"num_tensors": [6],
"axis": [0, 1, 2, 3],
"additional_shape": [1, 2, 3],
},
{
"dtype": [tf.int64],
"base_shape": [[3, 4, 3], [3, 4], [5]],
"num_tensors": [5],
"axis": [0, 1, 2, 3],
"additional_shape": [1, 2, 3],
}
]
def get_shape(parameters):
"""Return a tweaked version of 'base_shape'."""
axis = parameters["axis"]
shape = parameters["base_shape"][:]
if axis < len(shape):
shape[axis] += parameters["additional_shape"]
return shape
def build_graph(parameters):
all_tensors = []
for n in range(0, parameters["num_tensors"]):
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name=("input%d" % n),
shape=get_shape(parameters))
all_tensors.append(input_tensor)
out = tf.stack(all_tensors, parameters["axis"])
return all_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
all_values = []
for _ in range(0, parameters["num_tensors"]):
input_values = create_tensor_data(np.float32, get_shape(parameters))
all_values.append(input_values)
return all_values, sess.run(
outputs, feed_dict=dict(zip(inputs, all_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=72)
@register_make_test_function()
def make_unpack_tests(options):
"""Make a set of tests to do unstack."""
test_parameters = [{
"base_shape": [[3, 4, 3], [3, 4], [5, 6, 7, 8]],
"axis": [0, 1, 2, 3],
}]
def get_valid_axis(parameters):
"""Return a tweaked version of 'axis'."""
axis = parameters["axis"]
shape = parameters["base_shape"][:]
while axis > len(shape) - 1:
axis -= 1
return axis
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name=("input"), shape=parameters["base_shape"])
outs = tf.unstack(input_tensor, axis=get_valid_axis(parameters))
return [input_tensor], [outs[0]]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(np.float32, shape=parameters["base_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_range_tests(options):
"""Make a set of tests to do range."""
test_parameters = [{
"dtype": [tf.int32, tf.float32],
"offset": [10, 100, 1000],
"delta": [1, 2, 3, 4, -1, -2, -3, -4],
}]
def build_graph(parameters):
"""Build the range op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"], name=("start"), shape=[])
if parameters["delta"] < 0:
offset = parameters["offset"] * -1
else:
offset = parameters["offset"]
delta = parameters["delta"]
limit_tensor = input_tensor + offset
delta_tensor = tf.constant(delta, dtype=parameters["dtype"])
out = tf.range(input_tensor, limit_tensor, delta_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_scalar_data(parameters["dtype"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_fill_tests(options):
"""Make a set of tests to do fill."""
test_parameters = [{
"dims_dtype": [tf.int32, tf.int64],
"dims_shape": [[], [1], [3], [3, 3]],
"value_dtype": [tf.int32, tf.int64, tf.float32],
}]
def build_graph(parameters):
"""Build the fill op testing graph."""
input1 = tf.placeholder(
dtype=parameters["dims_dtype"],
name="dims",
shape=parameters["dims_shape"])
input2 = tf.placeholder(
dtype=parameters["value_dtype"], name="value", shape=[])
out = tf.fill(input1, input2)
return [input1, input2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input1 = create_tensor_data(parameters["dims_dtype"],
parameters["dims_shape"], 1)
input2 = create_scalar_data(parameters["value_dtype"])
return [input1, input2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input1, input2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=12)
def _make_logical_tests(op):
"""Make a set of tests to do logical operations."""
def logical(options, expected_tf_failures=0):
"""Generate examples."""
test_parameters = [{
"input_shape_pair": [([], []), ([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the logical testing graph."""
input_value1 = tf.placeholder(
dtype=tf.bool, name="input1", shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=tf.bool, name="input2", shape=parameters["input_shape_pair"][1])
out = op(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(tf.bool,
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(tf.bool,
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=expected_tf_failures)
return logical
@register_make_test_function()
def make_logical_or_tests(options):
"""Make a set of tests to do logical_or."""
return _make_logical_tests(tf.logical_or)(options, expected_tf_failures=1)
@register_make_test_function()
def make_logical_and_tests(options):
"""Make a set of tests to do logical_and."""
return _make_logical_tests(tf.logical_and)(options, expected_tf_failures=1)
@register_make_test_function()
def make_logical_xor_tests(options):
"""Make a set of tests to do logical_xor.
Test logical_not as well.
"""
return _make_logical_tests(tf.logical_xor)(options, expected_tf_failures=1)
@register_make_test_function()
def make_mirror_pad_tests(options):
"""Make a set of tests to do mirror_pad."""
test_parameters = [
{
"input_shape": [[2, 3]],
"padding_matrix": [[[1, 1], [2, 1]]],
"mode": ["REFLECT"],
"type": ["const"]
},
{
"input_shape": [[2, 3]],
"padding_matrix": [[[1, 1], [1, 1]]],
"mode": ["REFLECT"],
"type": ["const"]
},
{
"input_shape": [[2, 3]],
"padding_matrix": [[[1, 1], [2, 1]]],
"mode": ["SYMMETRIC"],
"type": ["placeholder"]
},
{
"input_shape": [[2, 3]],
"padding_matrix": [[[1, 1], [2, 1]]],
"mode": ["REFLECT"],
"type": ["placeholder"]
},
{
"input_shape": [[3]],
"padding_matrix": [[[0, 2]]],
"mode": ["SYMMETRIC"],
"type": ["placeholder"]
},
{
"input_shape": [[3]],
"padding_matrix": [[[0, 2]]],
"mode": ["SYMMETRIC"],
"type": ["const"]
},
{
"input_shape": [[3]],
"padding_matrix": [[[0, 2]]],
"mode": ["REFLECT"],
"type": ["const"]
},
{
"input_shape": [[3, 2, 4, 5]],
"padding_matrix": [[[1, 1], [2, 2], [1, 1], [1, 1]]],
"mode": ["SYMMETRIC"],
"type": ["placeholder"]
},
]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.placeholder(
dtype=tf.int32, name="input", shape=parameters["input_shape"])
if parameters["type"] != "const":
padding_matrix = tf.placeholder(
dtype=tf.int32,
name="padding",
shape=[len(parameters["input_shape"]), 2])
input_tensors = [input_tensor, padding_matrix]
else:
padding_matrix = tf.constant(np.array(parameters["padding_matrix"]))
input_tensors = [input_tensor]
output = tf.pad(
input_tensor, paddings=padding_matrix, mode=parameters["mode"])
return input_tensors, [output]
def build_inputs(parameters, sess, inputs, outputs):
input_values = [create_tensor_data(tf.int32, parameters["input_shape"])]
if parameters["type"] != "const":
input_values.append(np.array(parameters["padding_matrix"]))
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_unroll_batch_matmul_tests(options):
"""Make a set of tests to test unroll_batch_matmul."""
# The test cases below requires broadcasting support (BatchMatMulV2 semantic),
# whis isn't supported as of this change.
broadcast_shape_params = [
# Simple broadcast.
[(1, 2, 3), (3, 5), False, False],
# Empty batch broadcast.
[(2, 5, 3), (3, 7), False, False],
# Single batch with non-empty batch broadcast.
[(1, 5, 3), (4, 3, 7), False, False],
# Broadcast both operands
[(3, 1, 5, 3), (1, 4, 3, 7), False, False],
]
test_parameters = [{
"dtype": [tf.float32],
"shape": [
[(2, 2, 3), (2, 3, 2), False, False],
[(2, 2, 3), (2, 3, 2), True, True],
[(2, 2, 3), (2, 2, 3), False, True],
[(2, 2, 3), (2, 2, 3), True, False],
[(4, 2, 2, 3), (4, 2, 3, 2), False, False],
[(4, 2, 2, 3), (4, 2, 3, 2), True, True],
[(4, 2, 2, 3), (4, 2, 2, 3), False, True],
[(4, 2, 2, 3), (4, 2, 2, 3), True, False]
] + broadcast_shape_params,
# TODO(b/130887442): Improve the forward compatibility tests for every
# ops.
"forward_compatibility_test": [False, True],
}]
def build_graph(parameters):
"""Build the batch_matmul op testing graph."""
def _build_graph():
input_tensor1 = tf.placeholder(
dtype=parameters["dtype"], shape=parameters["shape"][0])
input_tensor2 = tf.placeholder(
dtype=parameters["dtype"], shape=parameters["shape"][1])
# Should be unrolled and replaced with fully_connected ops in the end.
out = tf.matmul(
input_tensor1,
input_tensor2,
transpose_a=parameters["shape"][2],
transpose_b=parameters["shape"][3])
return [input_tensor1, input_tensor2], [out]
if parameters["forward_compatibility_test"]:
# This is hardcoded to the date after MatMulV2 is activated.
# TODO(b/130887442): Improve the forward compatibility tests for every
# ops, and remove the hardcoded date.
with tf.compat.forward_compatibility_horizon(2019, 4, 26):
return _build_graph()
else:
return _build_graph()
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(
parameters["dtype"], shape=parameters["shape"][0])
input_value2 = create_tensor_data(
parameters["dtype"], shape=parameters["shape"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_placeholder_with_default_tests(options):
"""Make a set of tests to test placeholder_with_default."""
test_parameters = [{
"dtype": [tf.float32, tf.int32, tf.int64],
}]
def build_graph(parameters):
"""Build the placeholder_with_default testing graph."""
const_node = tf.constant(
[1, 2, 2, 0], shape=[2, 2], dtype=parameters["dtype"])
input_tensor = tf.placeholder_with_default(
const_node, shape=[2, 2], name="input")
out = tf.equal(input_tensor, const_node, name="output")
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
numpy_type = _TF_TYPE_INFO[parameters["dtype"]][0]
input_value = np.array([[1, 0], [2, 1]], numpy_type)
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_unique_tests(options):
"""Make a set of tests for Unique op."""
test_parameters = [
{
"input_shape": [[1]],
"index_type": [tf.int32, tf.int64, None],
"input_values": [3]
},
{
"input_shape": [[5]],
"index_type": [tf.int32, tf.int64],
"input_values": [[3, 2, 1, 2, 3]]
},
{
"input_shape": [[7]],
"index_type": [tf.int32, tf.int64],
"input_values": [[1, 1, 1, 1, 1, 1, 1]]
},
{
"input_shape": [[5]],
"index_type": [tf.int32, tf.int64],
"input_values": [[3, 2, 1, 0, -1]]
}]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.placeholder(
dtype=tf.int32, name="input", shape=parameters["input_shape"])
if parameters["index_type"] is None:
output = tf.unique(input_tensor)
else:
output = tf.unique(input_tensor, parameters["index_type"])
return [input_tensor], output
def build_inputs(parameters, sess, inputs, outputs):
input_values = [create_tensor_data(tf.int32, parameters["input_shape"])]
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_reverse_v2_tests(options):
"""Make a set of tests to do reverse_v2."""
test_parameters = [{
"base_shape": [[3, 4, 3], [3, 4], [5, 6, 7, 8]],
"axis": [0, 1, 2, 3],
}]
def get_valid_axis(parameters):
"""Return a tweaked version of 'axis'."""
axis = parameters["axis"]
shape = parameters["base_shape"][:]
while axis > len(shape) - 1:
axis -= 1
return axis
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name=("input"), shape=parameters["base_shape"])
outs = tf.reverse(input_tensor, axis=[get_valid_axis(parameters)])
return [input_tensor], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(np.float32, shape=parameters["base_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_reverse_sequence_tests(options):
"""Make a set of tests to do reverse_sequence."""
test_parameters = [
{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape": [[8, 4, 5, 5, 6], [4, 4, 3, 5]],
"seq_lengths": [[2, 2, 2, 2], [2, 1, 1, 0]],
"seq_axis": [0, 3],
"batch_axis": [1]
},
{
"input_dtype": [tf.float32],
"input_shape": [[2, 4, 5, 5, 6]],
"seq_lengths": [[2, 1]],
"seq_axis": [2],
"batch_axis": [0]
},
{
"input_dtype": [tf.float32],
"input_shape": [[4, 2]],
"seq_lengths": [[3, 1]],
"seq_axis": [0],
"batch_axis": [1]
}]
def build_graph(parameters):
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
outs = tf.reverse_sequence(
input_value,
seq_lengths=parameters["seq_lengths"],
batch_axis=parameters["batch_axis"],
seq_axis=parameters["seq_axis"])
return [input_value], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_matrix_diag_tests(options):
"""Make a set of tests for tf.linalg.diag op."""
test_parameters = [
{
"input_shape": [[3], [2, 3], [3, 4, 5], [2, 4, 6, 8]],
"input_dtype": [tf.int32, tf.float32],
},
]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
outs = tf.matrix_diag(input_tensor)
return [input_tensor], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_matrix_set_diag_tests(options):
"""Make a set of tests for tf.linalg.set_diag op."""
test_parameters = [
{
"input_diag_shapes": [([3, 3], [3]), ([2, 3], [2]), ([2, 4, 4],
[2, 4]),
([3, 4, 5, 6], [3, 4, 5])],
"input_dtype": [tf.int32, tf.float32, tf.uint8],
},
]
def build_graph(parameters):
input_shape = parameters["input_diag_shapes"][0]
diag_shape = parameters["input_diag_shapes"][1]
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"], name="input", shape=input_shape)
diag_tensor = tf.placeholder(
dtype=parameters["input_dtype"], name="diagonal", shape=diag_shape)
outs = tf.matrix_set_diag(input_tensor, diag_tensor)
return [input_tensor, diag_tensor], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_shape = parameters["input_diag_shapes"][0]
diag_shape = parameters["input_diag_shapes"][1]
input_values = create_tensor_data(parameters["input_dtype"], input_shape)
diag_values = create_tensor_data(parameters["input_dtype"], diag_shape)
return [input_values, diag_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values, diag_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_eye_tests(options):
"""Make a set of tests for tf.eye op."""
test_parameters = [{
"num_rows_shape": [[]],
"num_cols_shape": [[]],
"batch_shape": [[3], [2, 4], [4, 5, 6], None],
"use_num_cols": [True, False],
"dtype": [tf.float32, tf.int32],
}]
def build_graph(parameters):
input_tensor0 = tf.placeholder(
dtype=tf.int32, name="num_rows", shape=parameters["num_rows_shape"])
input_tensor1 = tf.placeholder(
dtype=tf.int32, name="num_columns", shape=parameters["num_cols_shape"])
if parameters["use_num_cols"]:
outs = tf.eye(
num_rows=input_tensor0,
num_columns=input_tensor1,
batch_shape=parameters["batch_shape"],
dtype=parameters["dtype"])
return [input_tensor0, input_tensor1], [outs]
else:
outs = tf.eye(num_rows=input_tensor0, dtype=parameters["dtype"])
return [input_tensor0], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_value0 = create_scalar_data(dtype=np.int32, min_value=1)
input_value1 = create_scalar_data(dtype=np.int32, min_value=1)
if parameters["use_num_cols"]:
return [input_value0, input_value1], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value0, input_value1])))
else:
return [input_value0], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value0])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function(name="make_unidirectional_sequence_lstm_tests")
@test_util.enable_control_flow_v2
def make_unidirectional_sequence_lstm_tests(options):
"""Make a set of tests to do unidirectional_sequence_lstm."""
test_parameters = [{
"batch_size": [2, 4, 6],
"seq_length": [1, 3],
"units": [4, 5],
"use_peepholes": [False, True],
"is_dynamic_rnn": [False, True]
}]
def build_graph(parameters):
input_values = []
if parameters["is_dynamic_rnn"]:
shape = [
parameters["seq_length"], parameters["batch_size"],
parameters["units"]
]
input_value = tf.placeholder(dtype=tf.float32, name="input", shape=shape)
input_values.append(input_value)
lstm_cell = tf.lite.experimental.nn.TFLiteLSTMCell(
parameters["units"],
use_peepholes=parameters["use_peepholes"])
outs, _ = tf.lite.experimental.nn.dynamic_rnn(
lstm_cell, input_value, dtype=tf.float32, time_major=True)
outs = tf.unstack(outs, axis=1)
else:
shape = [parameters["batch_size"], parameters["units"]]
for i in range(parameters["seq_length"]):
input_value = tf.placeholder(
dtype=tf.float32, name=("input_%d" % i), shape=shape)
input_values.append(input_value)
lstm_cell = tf.lite.experimental.nn.TFLiteLSTMCell(
parameters["units"], use_peepholes=parameters["use_peepholes"])
outs, _ = tf.nn.static_rnn(lstm_cell, input_values, dtype=tf.float32)
real_output = tf.zeros([1], dtype=tf.float32) + outs[-1]
return input_values, [real_output]
def build_inputs(parameters, sess, inputs, outputs):
input_values = []
if parameters["is_dynamic_rnn"]:
shape = [
parameters["seq_length"], parameters["batch_size"],
parameters["units"]
]
input_value = create_tensor_data(tf.float32, shape)
input_values.append(input_value)
else:
shape = [parameters["batch_size"], parameters["units"]]
for i in range(parameters["seq_length"]):
input_value = create_tensor_data(tf.float32, shape)
input_values.append(input_value)
init = tf.global_variables_initializer()
sess.run(init)
# Tflite fused kernel takes input as [time, batch, input].
# For static unidirectional sequence lstm, the input is an array sized of
# time, and pack the array together, however, for time = 1, the input is
# not packed.
tflite_input_values = input_values
if not parameters["is_dynamic_rnn"] and parameters["seq_length"] == 1:
tflite_input_values = [
input_values[0].reshape((1, parameters["batch_size"],
parameters["units"]))
]
return tflite_input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True)
@register_make_test_function(name="make_unidirectional_sequence_rnn_tests")
@test_util.enable_control_flow_v2
def make_unidirectional_sequence_rnn_tests(options):
"""Make a set of tests to do unidirectional_sequence_rnn."""
test_parameters = [{
"batch_size": [2, 4, 6],
"seq_length": [1, 3],
"units": [4, 5],
"is_dynamic_rnn": [False, True]
}]
def build_graph(parameters):
input_values = []
if parameters["is_dynamic_rnn"]:
shape = [
parameters["seq_length"], parameters["batch_size"],
parameters["units"]
]
input_value = tf.placeholder(dtype=tf.float32, name="input", shape=shape)
input_values.append(input_value)
rnn_cell = tf.lite.experimental.nn.TfLiteRNNCell(parameters["units"])
outs, _ = tf.lite.experimental.nn.dynamic_rnn(
rnn_cell, input_value, dtype=tf.float32, time_major=True)
outs = tf.unstack(outs, axis=1)
else:
shape = [parameters["batch_size"], parameters["units"]]
for i in range(parameters["seq_length"]):
input_value = tf.placeholder(
dtype=tf.float32, name=("input_%d" % i), shape=shape)
input_values.append(input_value)
rnn_cell = tf.lite.experimental.nn.TfLiteRNNCell(parameters["units"])
outs, _ = tf.nn.static_rnn(rnn_cell, input_values, dtype=tf.float32)
real_output = tf.zeros([1], dtype=tf.float32) + outs[-1]
return input_values, [real_output]
def build_inputs(parameters, sess, inputs, outputs):
input_values = []
if parameters["is_dynamic_rnn"]:
shape = [
parameters["seq_length"], parameters["batch_size"],
parameters["units"]
]
input_value = create_tensor_data(tf.float32, shape)
input_values.append(input_value)
else:
shape = [parameters["batch_size"], parameters["units"]]
for i in range(parameters["seq_length"]):
input_value = create_tensor_data(tf.float32, shape)
input_values.append(input_value)
init = tf.global_variables_initializer()
sess.run(init)
# Tflite fused kernel takes input as [time, batch, input].
# For static unidirectional sequence rnn, the input is an array sized of
# time, and pack the array together, however, for time = 1, the input is
# not packed.
tflite_input_values = input_values
if not parameters["is_dynamic_rnn"] and parameters["seq_length"] == 1:
tflite_input_values = [
input_values[0].reshape((1, parameters["batch_size"],
parameters["units"]))
]
return tflite_input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True)
@register_make_test_function()
def make_unfused_gru_tests(options):
"""Make a set of tests for unfused gru op."""
test_parameters = [{
"units": [2, 5],
"batch_size": [1, 2],
"time": [3],
}]
def build_graph(parameters):
inputs = [
tf.placeholder(tf.float32,
[parameters["batch_size"], parameters["units"]])
for _ in range(parameters["time"])
]
cell_fw = tf.nn.rnn_cell.GRUCell(parameters["units"])
cell_bw = tf.nn.rnn_cell.GRUCell(parameters["units"])
outputs, _, _ = tf.nn.static_bidirectional_rnn(
cell_fw, cell_bw, inputs, dtype=tf.float32)
return inputs, outputs
def build_inputs(parameters, sess, inputs, outputs):
input_values = [
create_tensor_data(tf.float32,
[parameters["batch_size"], parameters["units"]])
for _ in range(parameters["time"])
]
init = tf.global_variables_initializer()
sess.run(init)
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True)
@register_make_test_function()
def make_rfft2d_tests(options):
"""Make a set of tests to do rfft2d."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[8, 8], [3, 8, 8]],
"fft_length": [
None, [4, 4], [4, 8], [8, 4], [8, 8], [8, 16], [16, 8], [16, 16]
]
}]
def build_graph(parameters):
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
with spectral_ops_test_util.fft_kernel_label_map():
outs = tf.signal.rfft2d(input_value, fft_length=parameters["fft_length"])
return [input_value], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
extra_toco_options = ExtraTocoOptions()
extra_toco_options.allow_custom_ops = True
make_zip_of_tests(options, test_parameters, build_graph, build_inputs,
extra_toco_options)
# Toco binary path provided by the generate rule.
bin_path = None
def generate_examples(options):
global bin_path
def mkdir_if_not_exist(x):
if not os.path.isdir(x):
os.mkdir(x)
if not os.path.isdir(x):
raise RuntimeError("Failed to create dir %r" % x)
opstest_path = os.path.join(options.output_path)
mkdir_if_not_exist(opstest_path)
out = options.zip_to_output
bin_path = options.toco
# Some zip filenames contain a postfix identifying the conversion mode. The
# list of valid conversion modes is defined in
# generated_test_conversion_modes() in build_def.bzl.
test_function = ("make_%s_tests" % (out.replace(".zip", "").replace(
"pb2lite", "").replace("toco-flex", "").rstrip("_")))
if test_function not in _MAKE_TEST_FUNCTIONS_MAP:
raise RuntimeError("Can't find a test function to create %r. Tried %r" %
(out, test_function))
_MAKE_TEST_FUNCTIONS_MAP[test_function](options)
|
tensorflow-master
|
tensorflow/lite/testing/generate_examples_lib.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate a series of TensorFlow graphs that become tflite test cases.
Usage:
generate_examples <output directory>
bazel run //tensorflow/lite/testing:generate_examples
To more easily debug failures use (or override) the --save_graphdefs flag to
place text proto graphdefs into the generated zip files.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import argparse
import os
import sys
from tensorflow.lite.testing import generate_examples_lib
# TODO(aselle): Disable GPU for now
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
parser = argparse.ArgumentParser(description="Script to generate TFLite tests.")
parser.add_argument("output_path",
help="Directory where the outputs will be go.")
parser.add_argument(
"--zip_to_output",
type=str,
help="Particular zip to output.",
required=True)
parser.add_argument("--toco",
type=str,
help="Path to toco tool.",
required=True)
parser.add_argument(
"--known_bugs_are_errors",
action="store_true",
help=("If a particular model is affected by a known bug,"
" count it as a converter error."))
parser.add_argument(
"--ignore_converter_errors",
action="store_true",
help="Raise an exception if any converter error is encountered.")
parser.add_argument(
"--save_graphdefs",
action="store_true",
help="Include intermediate graphdefs in the output zip files.")
parser.add_argument(
"--run_with_flex",
action="store_true",
help="Whether the TFLite Flex converter is being used.")
# Toco binary path provided by the generate rule.
bin_path = None
def main(unused_args):
options = generate_examples_lib.Options()
options.output_path = FLAGS.output_path
options.zip_to_output = FLAGS.zip_to_output
options.toco = FLAGS.toco
options.known_bugs_are_errors = FLAGS.known_bugs_are_errors
options.ignore_converter_errors = FLAGS.ignore_converter_errors
options.save_graphdefs = FLAGS.save_graphdefs
options.run_with_flex = FLAGS.run_with_flex
generate_examples_lib.generate_examples(options)
if __name__ == "__main__":
FLAGS, unparsed = parser.parse_known_args()
if unparsed:
print("Usage: %s <path out> <zip file to generate>")
else:
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/lite/testing/generate_examples.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to test TFLite models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from six import PY3
from google.protobuf import text_format as _text_format
from google.protobuf.message import DecodeError
from tensorflow.core.framework import graph_pb2 as _graph_pb2
from tensorflow.lite.python import convert_saved_model as _convert_saved_model
from tensorflow.lite.python import lite as _lite
from tensorflow.lite.python import util as _util
from tensorflow.python import keras as _keras
from tensorflow.python.client import session as _session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework.importer import import_graph_def as _import_graph_def
from tensorflow.python.keras.preprocessing import image
from tensorflow.python.lib.io import file_io as _file_io
from tensorflow.python.platform import resource_loader as _resource_loader
from tensorflow.python.saved_model import load as _load
from tensorflow.python.saved_model import loader as _loader
from tensorflow.python.saved_model import signature_constants as _signature_constants
from tensorflow.python.saved_model import tag_constants as _tag_constants
def get_filepath(filename, base_dir=None):
"""Returns the full path of the filename.
Args:
filename: Subdirectory and name of the model file.
base_dir: Base directory containing model file.
Returns:
str.
"""
if base_dir is None:
base_dir = "learning/brain/mobile/tflite_compat_models"
return os.path.join(_resource_loader.get_root_dir_with_all_resources(),
base_dir, filename)
def get_image(size):
"""Returns an image loaded into an np.ndarray with dims [1, size, size, 3].
Args:
size: Size of image.
Returns:
np.ndarray.
"""
img_filename = _resource_loader.get_path_to_datafile(
"testdata/grace_hopper.jpg")
img = image.load_img(img_filename, target_size=(size, size))
img_array = image.img_to_array(img)
img_array = np.expand_dims(img_array, axis=0)
return img_array
def _convert(converter, **kwargs):
"""Converts the model.
Args:
converter: TFLiteConverter object.
**kwargs: Additional arguments to be passed into the converter. Supported
flags are {"target_ops", "post_training_quantize"}.
Returns:
The converted TFLite model in serialized format.
Raises:
ValueError: Invalid version number.
"""
if "target_ops" in kwargs:
converter.target_spec.supported_ops = kwargs["target_ops"]
if "post_training_quantize" in kwargs:
converter.post_training_quantize = kwargs["post_training_quantize"]
return converter.convert()
def _get_input_data_map(tflite_model, input_data):
"""Generates a map of input data based on the TFLite model.
Args:
tflite_model: Serialized TensorFlow Lite model.
input_data: List of np.ndarray.
Returns:
{str: [np.ndarray]}.
"""
interpreter = _lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
return {
input_tensor["name"]: data
for input_tensor, data in zip(input_details, input_data)
}
def _generate_random_input_data(tflite_model, seed=None):
"""Generates input data based on the input tensors in the TFLite model.
Args:
tflite_model: Serialized TensorFlow Lite model.
seed: Integer seed for the random generator. (default None)
Returns:
([np.ndarray], {str : [np.ndarray]}).
"""
interpreter = _lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
if seed:
np.random.seed(seed=seed)
input_data = [
np.array(
np.random.random_sample(input_tensor["shape"]),
dtype=input_tensor["dtype"]) for input_tensor in input_details
]
input_data_map = _get_input_data_map(tflite_model, input_data)
return input_data, input_data_map
def _evaluate_tflite_model(tflite_model, input_data):
"""Returns evaluation of input data on TFLite model.
Args:
tflite_model: Serialized TensorFlow Lite model.
input_data: List of np.ndarray.
Returns:
List of np.ndarray.
"""
interpreter = _lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
for input_tensor, tensor_data in zip(input_details, input_data):
interpreter.set_tensor(input_tensor["index"], tensor_data)
interpreter.invoke()
output_data = [
interpreter.get_tensor(output_tensor["index"])
for output_tensor in output_details
]
output_labels = [output_tensor["name"] for output_tensor in output_details]
return output_data, output_labels
def evaluate_frozen_graph(filename, input_arrays, output_arrays):
"""Returns a function that evaluates the frozen graph on input data.
Args:
filename: Full filepath of file containing frozen GraphDef.
input_arrays: List of input tensors to freeze graph with.
output_arrays: List of output tensors to freeze graph with.
Returns:
Lambda function ([np.ndarray data] : [np.ndarray result]).
"""
with _file_io.FileIO(filename, "rb") as f:
file_content = f.read()
graph_def = _graph_pb2.GraphDef()
try:
graph_def.ParseFromString(file_content)
except (_text_format.ParseError, DecodeError):
if not isinstance(file_content, str):
if PY3:
file_content = file_content.decode("utf-8")
else:
file_content = file_content.encode("utf-8")
_text_format.Merge(file_content, graph_def)
graph = ops.Graph()
with graph.as_default():
_import_graph_def(graph_def, name="")
inputs = _util.get_tensors_from_tensor_names(graph, input_arrays)
outputs = _util.get_tensors_from_tensor_names(graph, output_arrays)
def run_session(input_data):
with _session.Session(graph=graph) as sess:
return sess.run(outputs, dict(zip(inputs, input_data)))
return run_session
def evaluate_saved_model(directory, tag_set, signature_key):
"""Returns a function that evaluates the SavedModel on input data.
Args:
directory: SavedModel directory to convert.
tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present.
signature_key: Key identifying SignatureDef containing inputs and outputs.
Returns:
Lambda function ([np.ndarray data] : [np.ndarray result]).
"""
with _session.Session().as_default() as sess:
if tag_set is None:
tag_set = set([_tag_constants.SERVING])
if signature_key is None:
signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
meta_graph = _loader.load(sess, tag_set, directory)
signature_def = _convert_saved_model.get_signature_def(
meta_graph, signature_key)
inputs, outputs = _convert_saved_model.get_inputs_outputs(signature_def)
return lambda input_data: sess.run(outputs, dict(zip(inputs, input_data)))
def evaluate_keras_model(filename):
"""Returns a function that evaluates the tf.keras model on input data.
Args:
filename: Full filepath of HDF5 file containing the tf.keras model.
Returns:
Lambda function ([np.ndarray data] : [np.ndarray result]).
"""
keras_model = _keras.models.load_model(filename)
return lambda input_data: [keras_model.predict(input_data)]
def compare_models(tflite_model, tf_eval_func, input_data=None, tolerance=5):
"""Compares TensorFlow and TFLite models.
Unless the input data is provided, the models are compared with random data.
Args:
tflite_model: Serialized TensorFlow Lite model.
tf_eval_func: Lambda function that takes in input data and outputs the
results of the TensorFlow model ([np.ndarray data] : [np.ndarray result]).
input_data: np.ndarray to pass into models during inference. (default None)
tolerance: Decimal place to check accuracy to. (default 5)
"""
if input_data is None:
input_data, _ = _generate_random_input_data(tflite_model)
tf_results = tf_eval_func(input_data)
tflite_results, _ = _evaluate_tflite_model(tflite_model, input_data)
for tf_result, tflite_result in zip(tf_results, tflite_results):
np.testing.assert_almost_equal(tf_result, tflite_result, tolerance)
def compare_models_v2(tflite_model, tf_eval_func, input_data=None, tolerance=5):
"""Compares TensorFlow and TFLite models for TensorFlow 2.0.
Unless the input data is provided, the models are compared with random data.
Currently only 1 input and 1 output are supported by this function.
Args:
tflite_model: Serialized TensorFlow Lite model.
tf_eval_func: Function to evaluate TensorFlow model. Either a lambda
function that takes in input data and outputs the results or a TensorFlow
ConcreteFunction.
input_data: np.ndarray to pass into models during inference. (default None)
tolerance: Decimal place to check accuracy to. (default 5)
"""
# Convert the input data into a map.
if input_data is None:
input_data, input_data_map = _generate_random_input_data(tflite_model)
else:
input_data_map = _get_input_data_map(tflite_model, input_data)
input_data_func_map = {
input_name: constant_op.constant(input_data)
for input_name, input_data in input_data_map.items()
}
if len(input_data) > 1:
tf_results = tf_eval_func(**input_data_func_map)
else:
tf_results = tf_eval_func(constant_op.constant(input_data[0]))
tflite_results, tflite_labels = _evaluate_tflite_model(
tflite_model, input_data)
# Convert the output TensorFlow results into an ordered list.
if isinstance(tf_results, dict):
if len(tf_results) == 1:
tf_results = [tf_results[tf_results.keys()[0]]]
else:
tf_results = [tf_results[tflite_label] for tflite_label in tflite_labels]
for tf_result, tflite_result in zip(tf_results, tflite_results):
np.testing.assert_almost_equal(tf_result, tflite_result, tolerance)
def test_frozen_graph_quant(filename,
input_arrays,
output_arrays,
input_shapes=None,
**kwargs):
"""Sanity check to validate post quantize flag alters the graph.
This test does not check correctness of the converted model. It converts the
TensorFlow frozen graph to TFLite with and without the post_training_quantized
flag. It ensures some tensors have different types between the float and
quantized models in the case of an all TFLite model or mix-and-match model.
It ensures tensor types do not change in the case of an all Flex model.
Args:
filename: Full filepath of file containing frozen GraphDef.
input_arrays: List of input tensors to freeze graph with.
output_arrays: List of output tensors to freeze graph with.
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" : None}).
(default None)
**kwargs: Additional arguments to be passed into the converter.
Raises:
ValueError: post_training_quantize flag doesn't act as intended.
"""
# Convert and load the float model.
converter = _lite.TFLiteConverter.from_frozen_graph(
filename, input_arrays, output_arrays, input_shapes)
tflite_model_float = _convert(converter, **kwargs)
interpreter_float = _lite.Interpreter(model_content=tflite_model_float)
interpreter_float.allocate_tensors()
float_tensors = interpreter_float.get_tensor_details()
# Convert and load the quantized model.
converter = _lite.TFLiteConverter.from_frozen_graph(filename, input_arrays,
output_arrays)
tflite_model_quant = _convert(
converter, post_training_quantize=True, **kwargs)
interpreter_quant = _lite.Interpreter(model_content=tflite_model_quant)
interpreter_quant.allocate_tensors()
quant_tensors = interpreter_quant.get_tensor_details()
quant_tensors_map = {
tensor_detail["name"]: tensor_detail for tensor_detail in quant_tensors
}
# Check if weights are of different types in the float and quantized models.
num_tensors_float = len(float_tensors)
num_tensors_same_dtypes = sum(
float_tensor["dtype"] == quant_tensors_map[float_tensor["name"]]["dtype"]
for float_tensor in float_tensors)
has_quant_tensor = num_tensors_float != num_tensors_same_dtypes
if ("target_ops" in kwargs and
set(kwargs["target_ops"]) == set([_lite.OpsSet.SELECT_TF_OPS])):
if has_quant_tensor:
raise ValueError("--post_training_quantize flag unexpectedly altered the "
"full Flex mode graph.")
elif not has_quant_tensor:
raise ValueError("--post_training_quantize flag was unable to quantize the "
"graph as expected in TFLite and mix-and-match mode.")
def test_frozen_graph(filename,
input_arrays,
output_arrays,
input_shapes=None,
input_data=None,
**kwargs):
"""Validates the TensorFlow frozen graph converts to a TFLite model.
Converts the TensorFlow frozen graph to TFLite and checks the accuracy of the
model on random data.
Args:
filename: Full filepath of file containing frozen GraphDef.
input_arrays: List of input tensors to freeze graph with.
output_arrays: List of output tensors to freeze graph with.
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" : None}).
(default None)
input_data: np.ndarray to pass into models during inference. (default None)
**kwargs: Additional arguments to be passed into the converter.
"""
converter = _lite.TFLiteConverter.from_frozen_graph(
filename, input_arrays, output_arrays, input_shapes)
tflite_model = _convert(converter, **kwargs)
tf_eval_func = evaluate_frozen_graph(filename, input_arrays, output_arrays)
compare_models(tflite_model, tf_eval_func, input_data=input_data)
def test_saved_model(directory,
input_shapes=None,
tag_set=None,
signature_key=None,
input_data=None,
**kwargs):
"""Validates the TensorFlow SavedModel converts to a TFLite model.
Converts the TensorFlow SavedModel to TFLite and checks the accuracy of the
model on random data.
Args:
directory: SavedModel directory to convert.
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" : None}).
(default None)
tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present.
signature_key: Key identifying SignatureDef containing inputs and outputs.
input_data: np.ndarray to pass into models during inference. (default None)
**kwargs: Additional arguments to be passed into the converter.
"""
converter = _lite.TFLiteConverter.from_saved_model(
directory,
input_shapes=input_shapes,
tag_set=tag_set,
signature_key=signature_key)
tflite_model = _convert(converter, **kwargs)
tf_eval_func = evaluate_saved_model(directory, tag_set, signature_key)
compare_models(tflite_model, tf_eval_func, input_data=input_data)
def test_saved_model_v2(directory,
tag_set=None,
signature_key=None,
input_data=None,
**kwargs):
"""Validates the TensorFlow SavedModel converts to a TFLite model.
Converts the TensorFlow SavedModel to TFLite and checks the accuracy of the
model on random data.
Args:
directory: SavedModel directory to convert.
tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present.
signature_key: Key identifying SignatureDef containing inputs and outputs.
input_data: np.ndarray to pass into models during inference. (default None)
**kwargs: Additional arguments to be passed into the converter.
"""
model = _load.load(directory, tags=tag_set)
if not signature_key:
signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
concrete_func = model.signatures[signature_key]
converter = _lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = _convert(converter, **kwargs)
compare_models_v2(tflite_model, concrete_func, input_data=input_data)
def test_keras_model(filename,
input_arrays=None,
input_shapes=None,
input_data=None,
**kwargs):
"""Validates the tf.keras model converts to a TFLite model.
Converts the tf.keras model to TFLite and checks the accuracy of the model on
random data.
Args:
filename: Full filepath of HDF5 file containing the tf.keras model.
input_arrays: List of input tensors to freeze graph with.
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" : None}).
(default None)
input_data: np.ndarray to pass into models during inference. (default None)
**kwargs: Additional arguments to be passed into the converter.
"""
converter = _lite.TFLiteConverter.from_keras_model_file(
filename, input_arrays=input_arrays, input_shapes=input_shapes)
tflite_model = _convert(converter, **kwargs)
tf_eval_func = evaluate_keras_model(filename)
compare_models(tflite_model, tf_eval_func, input_data=input_data)
def test_keras_model_v2(filename, input_shapes=None, input_data=None, **kwargs):
"""Validates the tf.keras model converts to a TFLite model.
Converts the tf.keras model to TFLite and checks the accuracy of the model on
random data.
Args:
filename: Full filepath of HDF5 file containing the tf.keras model.
input_shapes: List of list of integers representing input shapes in the
order of the tf.keras model's .input attribute (e.g., [[1, 16, 16, 3]]).
(default None)
input_data: np.ndarray to pass into models during inference. (default None)
**kwargs: Additional arguments to be passed into the converter.
"""
keras_model = _keras.models.load_model(filename)
if input_shapes:
for tensor, shape in zip(keras_model.inputs, input_shapes):
tensor.set_shape(shape)
converter = _lite.TFLiteConverterV2.from_keras_model(keras_model)
tflite_model = _convert(converter, **kwargs)
tf_eval_func = evaluate_keras_model(filename)
compare_models_v2(tflite_model, tf_eval_func, input_data=input_data)
|
tensorflow-master
|
tensorflow/lite/testing/model_coverage/model_coverage_lib.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for model_coverage_lib.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from tensorflow.lite.python import lite
from tensorflow.lite.testing.model_coverage import model_coverage_lib as model_coverage
from tensorflow.python import keras
from tensorflow.python.client import session
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import saved_model
from tensorflow.python.training.training_util import write_graph
@test_util.run_v1_only('Incompatible with 2.0.')
class EvaluateFrozenGraph(test.TestCase):
def _saveFrozenGraph(self, sess):
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
return graph_def_file
def testFloat(self):
with session.Session().as_default() as sess:
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
filename = self._saveFrozenGraph(sess)
model_coverage.test_frozen_graph(filename, ['Placeholder'], ['add'])
def testMultipleOutputs(self):
with session.Session().as_default() as sess:
in_tensor_1 = array_ops.placeholder(
shape=[1, 16], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16], dtype=dtypes.float32, name='inputB')
weight = constant_op.constant(-1.0, shape=[16, 16])
bias = constant_op.constant(-1.0, shape=[16])
layer = math_ops.matmul(in_tensor_1, weight) + bias
_ = math_ops.reduce_mean(math_ops.square(layer - in_tensor_2))
filename = self._saveFrozenGraph(sess)
model_coverage.test_frozen_graph(filename, ['inputA', 'inputB'],
['add', 'Mean'])
@test_util.run_in_graph_and_eager_modes
def testFunctions(self):
"""Tests functions."""
@def_function.function
def plus_placeholder(x, placeholder):
return x + placeholder
with ops.Graph().as_default():
placeholder = array_ops.placeholder(
dtype=dtypes.float32, shape=[1], name='input')
variable_node = constant_op.constant(1.0, name='variable_node')
defun_node = plus_placeholder(variable_node, placeholder)
_ = math_ops.multiply(defun_node, 2.0, name='output_node')
# Initialize variables in the model.
sess = session.Session()
filename = self._saveFrozenGraph(sess)
model_coverage.test_frozen_graph(filename, ['input'], ['output_node'])
def _getQuantizedModel(self):
np.random.seed(0)
with session.Session().as_default() as sess:
# The tensor needs to have more than 1024 elements for quantize_weights to
# kick in. Thus, the [33, 33] shape.
in_tensor_1 = array_ops.placeholder(
shape=[33, 33], dtype=dtypes.float32, name='inputA')
in_tensor_2 = constant_op.constant(
np.random.uniform(low=-10., high=10., size=(33, 33)),
shape=[33, 33],
dtype=dtypes.float32,
name='inputB')
_ = math_ops.matmul(in_tensor_1, in_tensor_2, name='output')
filename = self._saveFrozenGraph(sess)
return filename
def testQuantized(self):
filename = self._getQuantizedModel()
model_coverage.test_frozen_graph_quant(filename, ['inputA'], ['output'])
def testQuantizedInputShapes(self):
filename = self._getQuantizedModel()
model_coverage.test_frozen_graph_quant(
filename, ['inputA'], ['output'], input_shapes={'inputA': [33, 33]})
def testQuantizedFlexAll(self):
filename = self._getQuantizedModel()
model_coverage.test_frozen_graph_quant(
filename, ['inputA'], ['output'],
target_ops=set([lite.OpsSet.SELECT_TF_OPS]))
@test_util.run_v1_only('Incompatible with 2.0.')
class EvaluateSavedModel(test.TestCase):
def testFloat(self):
saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')
with session.Session().as_default() as sess:
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
out_tensor = in_tensor_1 + in_tensor_2
inputs = {'x': in_tensor_1, 'y': in_tensor_2}
outputs = {'z': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
model_coverage.test_saved_model(saved_model_dir)
@test_util.run_v1_only('Incompatible with 2.0.')
class EvaluateKerasModel(test.TestCase):
def _getSingleInputKerasModel(self):
"""Returns single input Sequential tf.keras model."""
keras.backend.clear_session()
xs = [-1, 0, 1, 2, 3, 4]
ys = [-3, -1, 1, 3, 5, 7]
model = keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.train_on_batch(xs, ys)
return model
def _saveKerasModel(self, model):
try:
fd, keras_file = tempfile.mkstemp('.h5')
keras.models.save_model(model, keras_file)
finally:
os.close(fd)
return keras_file
def testFloat(self):
model = self._getSingleInputKerasModel()
keras_file = self._saveKerasModel(model)
model_coverage.test_keras_model(keras_file)
def testPostTrainingQuantize(self):
model = self._getSingleInputKerasModel()
keras_file = self._saveKerasModel(model)
model_coverage.test_keras_model(keras_file, post_training_quantize=True)
def testTargetOps(self):
model = self._getSingleInputKerasModel()
keras_file = self._saveKerasModel(model)
model_coverage.test_keras_model(
keras_file,
target_ops=set([lite.OpsSet.TFLITE_BUILTINS,
lite.OpsSet.SELECT_TF_OPS]))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/lite/testing/model_coverage/model_coverage_lib_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing for updating TensorFlow lite schema."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import tempfile
from tensorflow.lite.schema import upgrade_schema as upgrade_schema_lib
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
EMPTY_TEST_SCHEMA_V1 = {
"version": 1,
"operator_codes": [],
"subgraphs": [],
}
EMPTY_TEST_SCHEMA_V3 = {
"version": 3,
"operator_codes": [],
"subgraphs": [],
"buffers": [{
"data": []
}]
}
TEST_SCHEMA_V0 = {
"operator_codes": [],
"tensors": [],
"inputs": [],
"outputs": [],
"operators": [],
"version": 0
}
TEST_SCHEMA_V3 = {
"operator_codes": [],
"buffers": [{
"data": []
}],
"subgraphs": [{
"tensors": [],
"inputs": [],
"outputs": [],
"operators": [],
}],
"version":
3
}
FULL_TEST_SCHEMA_V1 = {
"version":
1,
"operator_codes": [
{
"builtin_code": "CONVOLUTION"
},
{
"builtin_code": "DEPTHWISE_CONVOLUTION"
},
{
"builtin_code": "AVERAGE_POOL"
},
{
"builtin_code": "MAX_POOL"
},
{
"builtin_code": "L2_POOL"
},
{
"builtin_code": "SIGMOID"
},
{
"builtin_code": "L2NORM"
},
{
"builtin_code": "LOCAL_RESPONSE_NORM"
},
{
"builtin_code": "ADD"
},
{
"builtin_code": "Basic_RNN"
},
],
"subgraphs": [{
"operators": [
{
"builtin_options_type": "PoolOptions"
},
{
"builtin_options_type": "DepthwiseConvolutionOptions"
},
{
"builtin_options_type": "ConvolutionOptions"
},
{
"builtin_options_type": "LocalResponseNormOptions"
},
{
"builtin_options_type": "BasicRNNOptions"
},
],
}],
"description":
"",
}
FULL_TEST_SCHEMA_V3 = {
"version":
3,
"operator_codes": [
{
"builtin_code": "CONV_2D"
},
{
"builtin_code": "DEPTHWISE_CONV_2D"
},
{
"builtin_code": "AVERAGE_POOL_2D"
},
{
"builtin_code": "MAX_POOL_2D"
},
{
"builtin_code": "L2_POOL_2D"
},
{
"builtin_code": "LOGISTIC"
},
{
"builtin_code": "L2_NORMALIZATION"
},
{
"builtin_code": "LOCAL_RESPONSE_NORMALIZATION"
},
{
"builtin_code": "ADD"
},
{
"builtin_code": "RNN"
},
],
"subgraphs": [{
"operators": [
{
"builtin_options_type": "Pool2DOptions"
},
{
"builtin_options_type": "DepthwiseConv2DOptions"
},
{
"builtin_options_type": "Conv2DOptions"
},
{
"builtin_options_type": "LocalResponseNormalizationOptions"
},
{
"builtin_options_type": "RNNOptions"
},
],
}],
"description":
"",
"buffers": [{
"data": []
}]
}
BUFFER_TEST_V2 = {
"operator_codes": [],
"buffers": [],
"subgraphs": [{
"tensors": [
{
"data_buffer": [1, 2, 3, 4]
},
{
"data_buffer": [1, 2, 3, 4, 5, 6, 7, 8]
},
{
"data_buffer": []
},
],
"inputs": [],
"outputs": [],
"operators": [],
}],
"version":
2
}
BUFFER_TEST_V3 = {
"operator_codes": [],
"subgraphs": [{
"tensors": [
{
"buffer": 1
},
{
"buffer": 2
},
{
"buffer": 0
},
],
"inputs": [],
"outputs": [],
"operators": [],
}],
"buffers": [
{
"data": []
},
{
"data": [1, 2, 3, 4]
},
{
"data": [1, 2, 3, 4, 5, 6, 7, 8]
},
],
"version":
3
}
def JsonDumpAndFlush(data, fp):
"""Write the dictionary `data` to a JSON file `fp` (and flush).
Args:
data: in a dictionary that is JSON serializable.
fp: File-like object
"""
json.dump(data, fp)
fp.flush()
class TestSchemaUpgrade(test_util.TensorFlowTestCase):
def testNonExistentFile(self):
converter = upgrade_schema_lib.Converter()
non_existent = tempfile.mktemp(suffix=".json")
with self.assertRaisesRegexp(IOError, "No such file or directory"):
converter.Convert(non_existent, non_existent)
def testInvalidExtension(self):
converter = upgrade_schema_lib.Converter()
invalid_extension = tempfile.mktemp(suffix=".foo")
with self.assertRaisesRegexp(ValueError, "Invalid extension on input"):
converter.Convert(invalid_extension, invalid_extension)
with tempfile.NamedTemporaryFile(suffix=".json", mode="w+") as in_json:
JsonDumpAndFlush(EMPTY_TEST_SCHEMA_V1, in_json)
with self.assertRaisesRegexp(ValueError, "Invalid extension on output"):
converter.Convert(in_json.name, invalid_extension)
def CheckConversion(self, data_old, data_expected):
"""Given a data dictionary, test upgrading to current version.
Args:
data_old: TFLite model as a dictionary (arbitrary version).
data_expected: TFLite model as a dictionary (upgraded).
"""
converter = upgrade_schema_lib.Converter()
with tempfile.NamedTemporaryFile(suffix=".json", mode="w+") as in_json, \
tempfile.NamedTemporaryFile(
suffix=".json", mode="w+") as out_json, \
tempfile.NamedTemporaryFile(
suffix=".bin", mode="w+b") as out_bin, \
tempfile.NamedTemporaryFile(
suffix=".tflite", mode="w+b") as out_tflite:
JsonDumpAndFlush(data_old, in_json)
# Test JSON output
converter.Convert(in_json.name, out_json.name)
# Test binary output
# Convert to .tflite and then to .bin and check if binary is equal
converter.Convert(in_json.name, out_tflite.name)
converter.Convert(out_tflite.name, out_bin.name)
self.assertEqual(
open(out_bin.name, "rb").read(),
open(out_tflite.name, "rb").read())
# Test that conversion actually produced successful new json.
converted_schema = json.load(out_json)
self.assertEqual(converted_schema, data_expected)
def testAlreadyUpgraded(self):
"""A file already at version 3 should stay at version 3."""
self.CheckConversion(EMPTY_TEST_SCHEMA_V3, EMPTY_TEST_SCHEMA_V3)
self.CheckConversion(TEST_SCHEMA_V3, TEST_SCHEMA_V3)
self.CheckConversion(BUFFER_TEST_V3, BUFFER_TEST_V3)
# Disable this while we have incorrectly versioned structures around.
# def testV0Upgrade_IntroducesSubgraphs(self):
# """V0 did not have subgraphs; check to make sure they get introduced."""
# self.CheckConversion(TEST_SCHEMA_V0, TEST_SCHEMA_V3)
def testV1Upgrade_RenameOps(self):
"""V1 had many different names for ops; check to make sure they rename."""
self.CheckConversion(EMPTY_TEST_SCHEMA_V1, EMPTY_TEST_SCHEMA_V3)
self.CheckConversion(FULL_TEST_SCHEMA_V1, FULL_TEST_SCHEMA_V3)
def testV2Upgrade_CreateBuffers(self):
"""V2 did not have buffers; check to make sure they are created."""
self.CheckConversion(BUFFER_TEST_V2, BUFFER_TEST_V3)
if __name__ == "__main__":
test_lib.main()
|
tensorflow-master
|
tensorflow/lite/schema/upgrade_schema_test.py
|
# ==============================================================================
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Upgrade script to move from pre-release schema to new schema.
Usage examples:
bazel run tensorflow/lite/schema/upgrade_schema -- in.json out.json
bazel run tensorflow/lite/schema/upgrade_schema -- in.bin out.bin
bazel run tensorflow/lite/schema/upgrade_schema -- in.bin out.json
bazel run tensorflow/lite/schema/upgrade_schema -- in.json out.bin
bazel run tensorflow/lite/schema/upgrade_schema -- in.tflite out.tflite
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import contextlib
import json
import os
import shutil
import subprocess
import sys
import tempfile
import tensorflow as tf
from tensorflow.python.platform import resource_loader
parser = argparse.ArgumentParser(
description="Script to move TFLite models from pre-release schema to "
"new schema.")
parser.add_argument(
"input",
type=str,
help="Input TensorFlow lite file in `.json`, `.bin` or `.tflite` format.")
parser.add_argument(
"output",
type=str,
help="Output json or bin TensorFlow lite model compliant with "
"the new schema. Extension must be `.json`, `.bin` or `.tflite`.")
# RAII Temporary Directory, because flatc doesn't allow direct use of tempfiles.
@contextlib.contextmanager
def TemporaryDirectoryResource():
temporary = tempfile.mkdtemp()
try:
yield temporary
finally:
shutil.rmtree(temporary)
class Converter(object):
"""Converts TensorFlow flatbuffer models from old to new version of schema.
This can convert between any version to the latest version. It uses
an incremental upgrade strategy to go from version to version.
Usage:
converter = Converter()
converter.Convert("a.tflite", "a.json")
converter.Convert("b.json", "b.tflite")
"""
def __init__(self):
# TODO(aselle): make this work in the open source version with better
# path.
paths_to_try = [
"../../../../flatbuffers/flatc", # not bazel
"../../../../external/flatbuffers/flatc" # bazel
]
for p in paths_to_try:
self._flatc_path = resource_loader.get_path_to_datafile(p)
if os.path.exists(self._flatc_path): break
def FindSchema(base_name):
return resource_loader.get_path_to_datafile("%s" % base_name)
# Supported schemas for upgrade.
self._schemas = [
(0, FindSchema("schema_v0.fbs"), True, self._Upgrade0To1),
(1, FindSchema("schema_v1.fbs"), True, self._Upgrade1To2),
(2, FindSchema("schema_v2.fbs"), True, self._Upgrade2To3),
(3, FindSchema("schema_v3.fbs"), False, None) # Non-callable by design.
]
# Ensure schemas are sorted, and extract latest version and upgrade
# dispatch function table.
self._schemas.sort()
self._new_version, self._new_schema = self._schemas[-1][:2]
self._upgrade_dispatch = {
version: dispatch
for version, unused1, unused2, dispatch in self._schemas}
def _Read(self, input_file, schema, raw_binary=False):
"""Read a tflite model assuming the given flatbuffer schema.
If `input_file` is in bin, then we must use flatc to convert the schema
from binary to json.
Args:
input_file: a binary (flatbuffer) or json file to read from. Extension
must be `.tflite`, `.bin`, or `.json` for FlatBuffer Binary or
FlatBuffer JSON.
schema: which schema to use for reading
raw_binary: whether to assume raw_binary (versions previous to v3)
that lacked file_identifier require this.
Raises:
RuntimeError: 1. When flatc cannot be invoked.
2. When json file does not exists.
ValueError: When the extension is not json or bin.
Returns:
A dictionary representing the read tflite model.
"""
raw_binary = ["--raw-binary"] if raw_binary else []
with TemporaryDirectoryResource() as tempdir:
basename = os.path.basename(input_file)
basename_no_extension, extension = os.path.splitext(basename)
if extension in [".bin", ".tflite"]:
# Convert to json using flatc
returncode = subprocess.call([
self._flatc_path,
"-t",
"--strict-json",
"--defaults-json",
] + raw_binary + ["-o", tempdir, schema, "--", input_file])
if returncode != 0:
raise RuntimeError("flatc failed to convert from binary to json.")
json_file = os.path.join(tempdir, basename_no_extension + ".json")
if not os.path.exists(json_file):
raise RuntimeError("Could not find %r" % json_file)
elif extension == ".json":
json_file = input_file
else:
raise ValueError("Invalid extension on input file %r" % input_file)
return json.load(open(json_file))
def _Write(self, data, output_file):
"""Output a json or bin version of the flatbuffer model.
Args:
data: Dict representing the TensorFlow Lite model to write.
output_file: filename to write the converted flatbuffer to. (json,
tflite, or bin extension is required).
Raises:
ValueError: When the extension is not json or bin
RuntimeError: When flatc fails to convert json data to binary.
"""
_, extension = os.path.splitext(output_file)
with TemporaryDirectoryResource() as tempdir:
if extension == ".json":
json.dump(data, open(output_file, "w"), sort_keys=True, indent=2)
elif extension in [".tflite", ".bin"]:
input_json = os.path.join(tempdir, "temp.json")
with open(input_json, "w") as fp:
json.dump(data, fp, sort_keys=True, indent=2)
returncode = subprocess.call([
self._flatc_path, "-b", "--defaults-json", "--strict-json", "-o",
tempdir, self._new_schema, input_json
])
if returncode != 0:
raise RuntimeError("flatc failed to convert upgraded json to binary.")
shutil.copy(os.path.join(tempdir, "temp.tflite"), output_file)
else:
raise ValueError("Invalid extension on output file %r" % output_file)
def _Upgrade0To1(self, data):
"""Upgrade data from Version 0 to Version 1.
Changes: Added subgraphs (which contains a subset of formally global
entries).
Args:
data: Dictionary representing the TensorFlow lite data to be upgraded.
This will be modified in-place to be an upgraded version.
"""
subgraph = {}
for key_to_promote in ["tensors", "operators", "inputs", "outputs"]:
subgraph[key_to_promote] = data[key_to_promote]
del data[key_to_promote]
data["subgraphs"] = [subgraph]
def _Upgrade1To2(self, data):
"""Upgrade data from Version 1 to Version 2.
Changes: Rename operators to Conform to NN API.
Args:
data: Dictionary representing the TensorFlow lite data to be upgraded.
This will be modified in-place to be an upgraded version.
Raises:
ValueError: Throws when model builtins are numeric rather than symbols.
"""
def RemapOperator(opcode_name):
"""Go from old schema op name to new schema op name.
Args:
opcode_name: String representing the ops (see :schema.fbs).
Returns:
Converted opcode_name from V1 to V2.
"""
old_name_to_new_name = {
"CONVOLUTION": "CONV_2D",
"DEPTHWISE_CONVOLUTION": "DEPTHWISE_CONV_2D",
"AVERAGE_POOL": "AVERAGE_POOL_2D",
"MAX_POOL": "MAX_POOL_2D",
"L2_POOL": "L2_POOL_2D",
"SIGMOID": "LOGISTIC",
"L2NORM": "L2_NORMALIZATION",
"LOCAL_RESPONSE_NORM": "LOCAL_RESPONSE_NORMALIZATION",
"Basic_RNN": "RNN",
}
return (old_name_to_new_name[opcode_name]
if opcode_name in old_name_to_new_name else opcode_name)
def RemapOperatorType(operator_type):
"""Remap operator structs from old names to new names.
Args:
operator_type: String representing the builtin operator data type
string.
(see :schema.fbs).
Raises:
ValueError: When the model has consistency problems.
Returns:
Upgraded builtin operator data type as a string.
"""
old_to_new = {
"PoolOptions": "Pool2DOptions",
"DepthwiseConvolutionOptions": "DepthwiseConv2DOptions",
"ConvolutionOptions": "Conv2DOptions",
"LocalResponseNormOptions": "LocalResponseNormalizationOptions",
"BasicRNNOptions": "RNNOptions",
}
return (old_to_new[operator_type]
if operator_type in old_to_new else operator_type)
for subgraph in data["subgraphs"]:
for ops in subgraph["operators"]:
ops["builtin_options_type"] = RemapOperatorType(
ops["builtin_options_type"])
# Upgrade the operator codes
for operator_code in data["operator_codes"]:
# Check if builtin_code is the appropriate string type
# use type("") instead of str or unicode. for py2and3
if not isinstance(operator_code["builtin_code"], type(u"")):
raise ValueError("builtin_code %r is non-string. this usually means "
"your model has consistency problems." %
(operator_code["builtin_code"]))
operator_code["builtin_code"] = (RemapOperator(
operator_code["builtin_code"]))
def _Upgrade2To3(self, data):
"""Upgrade data from Version 2 to Version 3.
Changed actual read-only tensor data to be in a buffers table instead
of inline with the tensor.
Args:
data: Dictionary representing the TensorFlow lite data to be upgraded.
This will be modified in-place to be an upgraded version.
"""
buffers = [{"data": []}] # Start with 1 empty buffer
for subgraph in data["subgraphs"]:
if "tensors" not in subgraph:
continue
for tensor in subgraph["tensors"]:
if "data_buffer" not in tensor:
tensor["buffer"] = 0
else:
if tensor["data_buffer"]:
tensor[u"buffer"] = len(buffers)
buffers.append({"data": tensor["data_buffer"]})
else:
tensor["buffer"] = 0
del tensor["data_buffer"]
data["buffers"] = buffers
def _PerformUpgrade(self, data):
"""Manipulate the `data` (parsed JSON) based on changes in format.
This incrementally will upgrade from version to version within data.
Args:
data: Dictionary representing the TensorFlow data. This will be upgraded
in place.
"""
while data["version"] < self._new_version:
self._upgrade_dispatch[data["version"]](data)
data["version"] += 1
def Convert(self, input_file, output_file):
"""Perform schema conversion from input_file to output_file.
Args:
input_file: Filename of TensorFlow Lite data to convert from. Must
be `.json` or `.bin` extension files for JSON or Binary forms of
the TensorFlow FlatBuffer schema.
output_file: Filename to write to. Extension also must be `.json`
or `.bin`.
Raises:
RuntimeError: Generated when none of the upgrader supported schemas
matche the `input_file` data.
"""
# Read data in each schema (since they are incompatible). Version is
# always present. Use the read data that matches the version of the
# schema.
for version, schema, raw_binary, _ in self._schemas:
try:
data_candidate = self._Read(input_file, schema, raw_binary)
except RuntimeError:
continue # Skip and hope another schema works
if "version" not in data_candidate: # Assume version 1 if not present.
data_candidate["version"] = 1
elif data_candidate["version"] == 0: # Version 0 doesn't exist in wild.
data_candidate["version"] = 1
if data_candidate["version"] == version:
self._PerformUpgrade(data_candidate)
self._Write(data_candidate, output_file)
return
raise RuntimeError("No schema that the converter understands worked with "
"the data file you provided.")
def main(argv):
del argv
Converter().Convert(FLAGS.input, FLAGS.output)
if __name__ == "__main__":
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-master
|
tensorflow/lite/schema/upgrade_schema.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""label_image for tflite"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
from PIL import Image
from tensorflow.lite.python import interpreter as interpreter_wrapper
def load_labels(filename):
my_labels = []
input_file = open(filename, 'r')
for l in input_file:
my_labels.append(l.strip())
return my_labels
if __name__ == "__main__":
floating_model = False
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--image", default="/tmp/grace_hopper.bmp", \
help="image to be classified")
parser.add_argument("-m", "--model_file", \
default="/tmp/mobilenet_v1_1.0_224_quant.tflite", \
help=".tflite model to be executed")
parser.add_argument("-l", "--label_file", default="/tmp/labels.txt", \
help="name of file containing labels")
parser.add_argument("--input_mean", default=127.5, help="input_mean")
parser.add_argument("--input_std", default=127.5, \
help="input standard deviation")
args = parser.parse_args()
interpreter = interpreter_wrapper.Interpreter(model_path=args.model_file)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# check the type of the input tensor
if input_details[0]['dtype'] == np.float32:
floating_model = True
# NxHxWxC, H:1, W:2
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
img = Image.open(args.image)
img = img.resize((width, height))
# add N dim
input_data = np.expand_dims(img, axis=0)
if floating_model:
input_data = (np.float32(input_data) - args.input_mean) / args.input_std
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
results = np.squeeze(output_data)
top_k = results.argsort()[-5:][::-1]
labels = load_labels(args.label_file)
for i in top_k:
if floating_model:
print('{0:08.6f}'.format(float(results[i]))+":", labels[i])
else:
print('{0:08.6f}'.format(float(results[i]/255.0))+":", labels[i])
|
tensorflow-master
|
tensorflow/lite/examples/python/label_image.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.data.Dataset interface to the MNIST dataset.
This is cloned from
https://github.com/tensorflow/models/blob/master/official/mnist/dataset.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import shutil
import tempfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
def read32(bytestream):
"""Read 4 bytes from bytestream as an unsigned 32-bit integer."""
dt = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(bytestream.read(4), dtype=dt)[0]
def check_image_file_header(filename):
"""Validate that filename corresponds to images for the MNIST dataset."""
with tf.gfile.Open(filename, 'rb') as f:
magic = read32(f)
read32(f) # num_images, unused
rows = read32(f)
cols = read32(f)
if magic != 2051:
raise ValueError('Invalid magic number %d in MNIST file %s' % (magic,
f.name))
if rows != 28 or cols != 28:
raise ValueError(
'Invalid MNIST file %s: Expected 28x28 images, found %dx%d' %
(f.name, rows, cols))
def check_labels_file_header(filename):
"""Validate that filename corresponds to labels for the MNIST dataset."""
with tf.gfile.Open(filename, 'rb') as f:
magic = read32(f)
read32(f) # num_items, unused
if magic != 2049:
raise ValueError('Invalid magic number %d in MNIST file %s' % (magic,
f.name))
def download(directory, filename):
"""Download (and unzip) a file from the MNIST dataset if not already done."""
filepath = os.path.join(directory, filename)
if tf.gfile.Exists(filepath):
return filepath
if not tf.gfile.Exists(directory):
tf.gfile.MakeDirs(directory)
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz'
_, zipped_filepath = tempfile.mkstemp(suffix='.gz')
print('Downloading %s to %s' % (url, zipped_filepath))
urllib.request.urlretrieve(url, zipped_filepath)
with gzip.open(zipped_filepath, 'rb') as f_in, \
tf.gfile.Open(filepath, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(zipped_filepath)
return filepath
def dataset(directory, images_file, labels_file):
"""Download and parse MNIST dataset."""
images_file = download(directory, images_file)
labels_file = download(directory, labels_file)
check_image_file_header(images_file)
check_labels_file_header(labels_file)
def decode_image(image):
# Normalize from [0, 255] to [0.0, 1.0]
image = tf.decode_raw(image, tf.uint8)
image = tf.cast(image, tf.float32)
image = tf.reshape(image, [784])
return image / 255.0
def decode_label(label):
label = tf.decode_raw(label, tf.uint8) # tf.string -> [tf.uint8]
label = tf.reshape(label, []) # label is a scalar
return tf.to_int32(label)
images = tf.data.FixedLengthRecordDataset(
images_file, 28 * 28, header_bytes=16).map(decode_image)
labels = tf.data.FixedLengthRecordDataset(
labels_file, 1, header_bytes=8).map(decode_label)
return tf.data.Dataset.zip((images, labels))
def train(directory):
"""tf.data.Dataset object for MNIST training data."""
return dataset(directory, 'train-images-idx3-ubyte',
'train-labels-idx1-ubyte')
def test(directory):
"""tf.data.Dataset object for MNIST test data."""
return dataset(directory, 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte')
|
tensorflow-master
|
tensorflow/lite/tutorials/dataset.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script to evaluate accuracy of TFLite flatbuffer model on mnist dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf # pylint: disable=g-bad-import-order
from tensorflow.lite.tutorials import dataset
flags = tf.app.flags
flags.DEFINE_string('data_dir', '/tmp/data_dir',
'Directory where data is stored.')
flags.DEFINE_string('model_file', '',
'The path to the TFLite flatbuffer model file.')
flags = flags.FLAGS
def test_image_generator():
# Generates an iterator over images
with tf.Session() as sess:
input_data = tf.compat.v1.data.make_one_shot_iterator(dataset.test(
flags.data_dir)).get_next()
try:
while True:
yield sess.run(input_data)
except tf.errors.OutOfRangeError:
pass
def run_eval(interpreter, input_image):
"""Performs evaluation for input image over specified model.
Args:
interpreter: TFLite interpreter initialized with model to execute.
input_image: Image input to the model.
Returns:
output: output tensor of model being executed.
"""
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Test model on the input images.
input_image = np.reshape(input_image, input_details[0]['shape'])
interpreter.set_tensor(input_details[0]['index'], input_image)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
output = np.squeeze(output_data)
return output
def main(_):
interpreter = tf.lite.Interpreter(model_path=flags.model_file)
interpreter.allocate_tensors()
num_correct, total = 0, 0
for input_data in test_image_generator():
output = run_eval(interpreter, input_data[0])
total += 1
if output == input_data[1]:
num_correct += 1
if total % 500 == 0:
print('Accuracy after %i images: %f' %
(total, float(num_correct) / float(total)))
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main)
|
tensorflow-master
|
tensorflow/lite/tutorials/mnist_tflite.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import tensorflow as tf
from tensorflow.lite.toco import model_flags_pb2
from tensorflow.lite.toco import toco_flags_pb2
from tensorflow.lite.toco import types_pb2
from tensorflow.python.platform import googletest
from tensorflow.python.platform import resource_loader
def TensorName(x):
"""Get the canonical (non foo:0 name)."""
return x.name.split(":")[0]
class TocoFromProtosTest(googletest.TestCase):
def _run(self, sess, in_tensor, out_tensor, should_succeed):
"""Use toco binary to check conversion from graphdef to tflite.
Args:
sess: Active TensorFlow session containing graph.
in_tensor: TensorFlow tensor to use as input.
out_tensor: TensorFlow tensor to use as output.
should_succeed: Whether this is a valid conversion.
"""
# Build all protos and extract graphdef
graph_def = sess.graph_def
toco_flags = toco_flags_pb2.TocoFlags()
toco_flags.input_format = toco_flags_pb2.TENSORFLOW_GRAPHDEF
toco_flags.output_format = toco_flags_pb2.TFLITE
toco_flags.inference_input_type = types_pb2.FLOAT
toco_flags.inference_type = types_pb2.FLOAT
toco_flags.allow_custom_ops = True
model_flags = model_flags_pb2.ModelFlags()
input_array = model_flags.input_arrays.add()
input_array.name = TensorName(in_tensor)
input_array.shape.dims.extend(map(int, in_tensor.shape))
model_flags.output_arrays.append(TensorName(out_tensor))
# Shell out to run toco (in case it crashes)
with tempfile.NamedTemporaryFile() as fp_toco, \
tempfile.NamedTemporaryFile() as fp_model, \
tempfile.NamedTemporaryFile() as fp_input, \
tempfile.NamedTemporaryFile() as fp_output:
fp_model.write(model_flags.SerializeToString())
fp_toco.write(toco_flags.SerializeToString())
fp_input.write(graph_def.SerializeToString())
fp_model.flush()
fp_toco.flush()
fp_input.flush()
tflite_bin = resource_loader.get_path_to_datafile("toco_from_protos.par")
cmdline = " ".join([
tflite_bin, fp_model.name, fp_toco.name, fp_input.name, fp_output.name
])
exitcode = os.system(cmdline)
if exitcode == 0:
stuff = fp_output.read()
self.assertEqual(stuff is not None, should_succeed)
else:
self.assertFalse(should_succeed)
def test_toco(self):
"""Run a couple of TensorFlow graphs against TOCO through the python bin."""
with tf.Session() as sess:
img = tf.placeholder(name="img", dtype=tf.float32, shape=(1, 64, 64, 3))
val = img + tf.constant([1., 2., 3.]) + tf.constant([1., 4., 4.])
out = tf.identity(val, name="out")
out2 = tf.sin(val, name="out2")
# This is a valid mdoel
self._run(sess, img, out, True)
# This uses an invalid function.
# TODO(aselle): Check to make sure a warning is included.
self._run(sess, img, out2, True)
# This is an identity graph, which doesn't work
self._run(sess, img, img, False)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/lite/toco/python/toco_from_protos_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python console command to invoke TOCO from serialized protos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.lite.toco.python import tensorflow_wrap_toco
from tensorflow.python.platform import app
FLAGS = None
def execute(unused_args):
model_str = open(FLAGS.model_proto_file, "rb").read()
toco_str = open(FLAGS.toco_proto_file, "rb").read()
input_str = open(FLAGS.model_input_file, "rb").read()
output_str = tensorflow_wrap_toco.TocoConvert(model_str, toco_str, input_str)
open(FLAGS.model_output_file, "wb").write(output_str)
sys.exit(0)
def main():
global FLAGS
parser = argparse.ArgumentParser(
description="Invoke toco using protos as input.")
parser.add_argument(
"model_proto_file",
type=str,
help="File containing serialized proto that describes the model.")
parser.add_argument(
"toco_proto_file",
type=str,
help="File containing serialized proto describing how TOCO should run.")
parser.add_argument(
"model_input_file", type=str, help="Input model is read from this file.")
parser.add_argument(
"model_output_file",
type=str,
help="Result of applying TOCO conversion is written here.")
FLAGS, unparsed = parser.parse_known_args()
app.run(main=execute, argv=[sys.argv[0]] + unparsed)
if __name__ == "__main__":
main()
|
tensorflow-master
|
tensorflow/lite/toco/python/toco_from_protos.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fetch android artifacts and update pom properties."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import string
import sys
import urllib2
def get_args():
"""Parse command line args."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--version', required=True, help='Version for the artifact.')
parser.add_argument(
'--dir',
required=True,
help='Directory where the pom and aar artifact will be written.')
parser.add_argument(
'--template', required=True, help='Path to pom template file.')
return parser.parse_args()
def get_json(url):
"""Load the contents of the URL as a json object."""
return json.load(urllib2.urlopen(url))
def get_commit_id(build_info):
"""Fetch the git commit id from the build info json object."""
release_commit_id = build_info.get('build_commit_id')
if release_commit_id:
return release_commit_id
actions = build_info.get('actions')
build_data = next(
a for a in actions
if a.get('_class') == 'hudson.plugins.git.util.BuildData')
if not build_data:
raise ValueError('Missing BuildData: %s' % build_info)
revision_info = build_data.get('lastBuiltRevision')
if not revision_info:
raise ValueError('Missing lastBuiltRevision: %s' % build_info)
return revision_info.get('SHA1')
def get_aar_url(build_info):
"""Given the json build info, find the URL to the tensorflow.aar artifact."""
base_url = build_info.get('url')
if not base_url:
raise ValueError('Missing url: %s' % build_info)
build_class = build_info.get('_class')
if (build_class == 'hudson.model.FreeStyleBuild' or
build_class == 'hudson.matrix.MatrixRun'):
aar_info = next(
a for a in build_info.get('artifacts')
if a.get('fileName') == 'tensorflow.aar')
if not aar_info:
raise ValueError('Missing aar artifact: %s' % build_info)
return '%s/artifact/%s' % (base_url, aar_info.get('relativePath'))
raise ValueError('Unknown build_type %s' % build_info)
def read_template(path):
with open(path) as f:
return string.Template(f.read())
def main():
args = get_args()
release_prefix = 'https://storage.googleapis.com/tensorflow/libtensorflow'
info_url = '%s/android_buildinfo-%s.json' % (release_prefix, args.version)
aar_url = '%s/tensorflow-%s.aar' % (release_prefix, args.version)
build_type = 'release-android'
# Retrieve build information
build_info = get_json(info_url)
# Check all required build info is present
build_commit_id = get_commit_id(build_info)
if not build_commit_id:
raise ValueError('Missing commit id: %s' % build_info)
# Write the pom file updated with build attributes.
template = read_template(args.template)
with open('%s/pom-android.xml' % args.dir, 'w') as f:
f.write(
template.substitute({
'build_commit_id': build_commit_id,
'build_type': build_type,
'version': args.version
}))
# Retrieve the aar location if needed.
if not aar_url:
aar_url = get_aar_url(build_info)
# And download the aar to the desired location.
with open('%s/tensorflow.aar' % args.dir, 'w') as f:
aar = urllib2.urlopen(aar_url)
f.write(aar.read())
if __name__ == '__main__':
sys.exit(main())
|
tensorflow-master
|
tensorflow/java/maven/tensorflow-android/update.py
|
# pylint: disable=g-import-not-at-top
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""contrib module containing volatile or experimental code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import platform
# Add projects here, they will show up under tf.contrib.
from tensorflow.contrib import autograph
from tensorflow.contrib import batching
from tensorflow.contrib import bayesflow
from tensorflow.contrib import checkpoint
if os.name != "nt" and platform.machine() != "s390x":
from tensorflow.contrib import cloud
from tensorflow.contrib import cluster_resolver
from tensorflow.contrib import compiler
from tensorflow.contrib import constrained_optimization
from tensorflow.contrib import copy_graph
from tensorflow.contrib import crf
from tensorflow.contrib import cudnn_rnn
from tensorflow.contrib import data
from tensorflow.contrib import deprecated
from tensorflow.contrib import distribute
from tensorflow.contrib import distributions
from tensorflow.contrib import estimator
from tensorflow.contrib import factorization
from tensorflow.contrib import feature_column
from tensorflow.contrib import framework
from tensorflow.contrib import gan
from tensorflow.contrib import graph_editor
from tensorflow.contrib import grid_rnn
from tensorflow.contrib import image
from tensorflow.contrib import input_pipeline
from tensorflow.contrib import integrate
from tensorflow.contrib import keras
from tensorflow.contrib import kernel_methods
from tensorflow.contrib import labeled_tensor
from tensorflow.contrib import layers
from tensorflow.contrib import learn
from tensorflow.contrib import legacy_seq2seq
from tensorflow.contrib import linear_optimizer
from tensorflow.contrib import lookup
from tensorflow.contrib import losses
from tensorflow.contrib import memory_stats
from tensorflow.contrib import metrics
from tensorflow.contrib import mixed_precision
from tensorflow.contrib import model_pruning
from tensorflow.contrib import nn
from tensorflow.contrib import opt
from tensorflow.contrib import periodic_resample
from tensorflow.contrib import predictor
from tensorflow.contrib import proto
from tensorflow.contrib import quantization
from tensorflow.contrib import quantize
from tensorflow.contrib import reduce_slice_ops
from tensorflow.contrib import resampler
from tensorflow.contrib import rnn
from tensorflow.contrib import rpc
from tensorflow.contrib import saved_model
from tensorflow.contrib import seq2seq
from tensorflow.contrib import signal
from tensorflow.contrib import slim
from tensorflow.contrib import solvers
from tensorflow.contrib import sparsemax
from tensorflow.contrib import staging
from tensorflow.contrib import stat_summarizer
from tensorflow.contrib import stateless
from tensorflow.contrib import tensor_forest
from tensorflow.contrib import tensorboard
from tensorflow.contrib import testing
from tensorflow.contrib import tfprof
from tensorflow.contrib import timeseries
from tensorflow.contrib import tpu
from tensorflow.contrib import training
from tensorflow.contrib import util
from tensorflow.contrib.eager.python import tfe as eager
from tensorflow.contrib.optimizer_v2 import optimizer_v2_symbols as optimizer_v2
from tensorflow.contrib.receptive_field import receptive_field_api as receptive_field
from tensorflow.contrib.recurrent.python import recurrent_api as recurrent
from tensorflow.contrib.remote_fused_graph import pylib as remote_fused_graph
from tensorflow.contrib.specs import python as specs
from tensorflow.contrib.summary import summary
from tensorflow.python.util.lazy_loader import LazyLoader
ffmpeg = LazyLoader("ffmpeg", globals(),
"tensorflow.contrib.ffmpeg")
del os
del platform
del LazyLoader
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This is the legacy module for AutoGraph, kept for backward compatibility.
New users should instead use `tensorflow.python.autograph`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph import * # pylint:disable=wildcard-import
|
tensorflow-master
|
tensorflow/contrib/autograph/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common benchmarking code.
See https://www.tensorflow.org/community/benchmarks for usage.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import tensorflow as tf
class ReportingBenchmark(tf.test.Benchmark):
"""Base class for a benchmark that reports general performance metrics.
Subclasses only need to call one of the _profile methods, and optionally
report_results.
"""
def time_execution(self, name, target, iters, warm_up_iters=5):
for _ in range(warm_up_iters):
target()
all_times = []
for _ in range(iters):
iter_time = time.time()
target()
all_times.append(time.time() - iter_time)
avg_time = np.average(all_times)
extras = {}
extras['all_times'] = all_times
if isinstance(name, tuple):
extras['name'] = name
name = '_'.join(str(piece) for piece in name)
self.report_benchmark(
iters=iters, wall_time=avg_time, name=name, extras=extras)
if __name__ == '__main__':
tf.test.main()
|
tensorflow-master
|
tensorflow/contrib/autograph/examples/benchmarks/benchmark_base.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A basic RL cartpole benchmark.
The RL model uses the OpenAI Gym environment to train a simple network using
the policy gradients method. The training scales the gradients for each step
by the episode's cumulative discounted reward and averages these gradients over
a fixed number of games before applying the optimization step.
For benchmarking purposes, we replace the OpenAI Gym environment to a fake
that returns random actions and rewards and never ends the episode. This way
the benchmarks compare the same amount of computation at each step.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gym
import numpy as np
import tensorflow as tf
from tensorflow.contrib import eager
from tensorflow.contrib.autograph.examples.benchmarks import benchmark_base
from tensorflow.python import autograph as ag
from tensorflow.python.eager import context
#
# AutoGraph implementation
#
@ag.convert()
def graph_append_discounted_rewards(destination, rewards, discount_rate):
"""Discounts episode rewards and appends them to destination."""
ag.set_element_type(rewards, tf.float32)
cdr = 0.0
reverse_discounted = []
ag.set_element_type(reverse_discounted, tf.float32)
for i in range(len(rewards) - 1, -1, -1):
cdr = cdr * discount_rate + rewards[i]
cdr.set_shape(())
reverse_discounted.append(cdr)
retval = destination
# Note: AutoGraph doesn't yet support reversed() so we use a loop instead.
for i in range(len(reverse_discounted) - 1, -1, -1):
retval.append(reverse_discounted[i])
return retval
class GraphPolicyNetwork(tf.keras.Model):
"""Policy network for the cart-pole reinforcement learning problem.
The forward path of the network takes an observation from the cart-pole
environment (length-4 vector) and outputs an action.
"""
def __init__(self, hidden_size):
super(GraphPolicyNetwork, self).__init__()
self._hidden_layer = tf.keras.layers.Dense(
hidden_size, activation=tf.nn.elu)
self._output_layer = tf.keras.layers.Dense(1)
def call(self, inputs):
"""Calculates logits and action.
Args:
inputs: Observations from a step in the cart-pole environment, of shape
`(batch_size, input_size)`
Returns:
logits: the logits output by the output layer. This can be viewed as the
likelihood vales of choosing the left (0) action. Shape:
`(batch_size, 1)`.
actions: randomly selected actions ({0, 1}) based on the logits. Shape:
`(batch_size, 1)`.
"""
hidden = self._hidden_layer(inputs)
logits = self._output_layer(hidden)
left_prob = tf.nn.sigmoid(logits)
action_probs = tf.concat([left_prob, 1.0 - left_prob], 1)
actions = tf.multinomial(tf.log(action_probs), 1)
return logits, actions
# TODO(mdan): Move this method out of the class.
@ag.convert()
def train(self, cart_pole_env, optimizer, discount_rate, num_games,
max_steps_per_game):
var_list = tf.trainable_variables()
grad_list = [
tf.TensorArray(tf.float32, 0, dynamic_size=True) for _ in var_list
]
step_counts = []
discounted_rewards = []
ag.set_element_type(discounted_rewards, tf.float32)
ag.set_element_type(step_counts, tf.int32)
# Note: we use a shared object, cart_pole_env here. Because calls to the
# object's method are made through py_func, TensorFlow cannot detect its
# data dependencies. Hence we must manually synchronize access to it
# and ensure the control dependencies are set in such a way that
# calls to reset(), take_one_step, etc. are made in the correct order.
sync_counter = tf.constant(0)
for _ in tf.range(num_games):
with tf.control_dependencies([sync_counter]):
obs = cart_pole_env.reset()
with tf.control_dependencies([obs]):
sync_counter += 1
game_rewards = []
ag.set_element_type(game_rewards, tf.float32)
for step in tf.range(max_steps_per_game):
logits, actions = self(obs) # pylint:disable=not-callable
logits = tf.reshape(logits, ())
actions = tf.reshape(actions, ())
labels = 1.0 - tf.cast(actions, tf.float32)
loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels, logits=logits)
grads = tf.gradients(loss, var_list)
for i in range(len(grads)):
grad_list[i].append(grads[i])
with tf.control_dependencies([sync_counter]):
obs, reward, done = cart_pole_env.step(actions)
with tf.control_dependencies([obs]):
sync_counter += 1
obs = tf.reshape(obs, (1, 4))
game_rewards.append(reward)
if reward < 0.1 or done:
step_counts.append(step + 1)
break
discounted_rewards = graph_append_discounted_rewards(
discounted_rewards, game_rewards, discount_rate)
discounted_rewards = ag.stack(discounted_rewards)
discounted_rewards.set_shape((None,))
mean, variance = tf.nn.moments(discounted_rewards, [0])
normalized_rewards = (discounted_rewards - mean) / tf.sqrt(variance)
for i in range(len(grad_list)):
g = ag.stack(grad_list[i])
# This block just adjusts the shapes to match for multiplication.
r = normalized_rewards
if r.shape.ndims < g.shape.ndims:
r = tf.expand_dims(r, -1)
if r.shape.ndims < g.shape.ndims:
r = tf.expand_dims(r, -1)
grad_list[i] = tf.reduce_mean(g * r, axis=0)
optimizer.apply_gradients(
zip(grad_list, var_list), global_step=tf.train.get_global_step())
return ag.stack(step_counts)
@ag.convert()
def graph_train_model(policy_network, cart_pole_env, optimizer, iterations):
"""Trains the policy network for a given number of iterations."""
i = tf.constant(0)
mean_steps_per_iteration = []
ag.set_element_type(mean_steps_per_iteration, tf.int32)
while i < iterations:
steps_per_game = policy_network.train(
cart_pole_env,
optimizer,
discount_rate=0.95,
num_games=20,
max_steps_per_game=200)
mean_steps_per_iteration.append(tf.reduce_mean(steps_per_game))
i += 1
return ag.stack(mean_steps_per_iteration)
class GraphGymCartpoleEnv(object):
"""An env backed by OpenAI Gym's CartPole environment.
Used to confirm a functional model only.
"""
def __init__(self):
cart_pole_env = gym.make('CartPole-v1')
cart_pole_env.seed(0)
cart_pole_env.reset()
self.env = cart_pole_env
def reset(self):
obs = ag.utils.wrap_py_func(self.env.reset, tf.float64, ())
obs = tf.reshape(obs, (1, 4))
obs = tf.cast(obs, tf.float32)
return obs
def step(self, actions):
def take_one_step(actions):
obs, reward, done, _ = self.env.step(actions)
obs = obs.astype(np.float32)
reward = np.float32(reward)
return obs, reward, done
return ag.utils.wrap_py_func(take_one_step,
(tf.float32, tf.float32, tf.bool), (actions,))
class GraphRandomCartpoleEnv(object):
"""An environment that returns random actions and never finishes.
Used during benchmarking, it will cause training to run a constant number of
steps.
"""
def reset(self):
return tf.random.normal((1, 4))
def step(self, actions):
with tf.control_dependencies([actions]):
random_obs = tf.random.normal((1, 4))
fixed_reward = tf.constant(0.001)
done = tf.constant(False)
return random_obs, fixed_reward, done
#
# Eager implementation
#
def eager_append_discounted_rewards(discounted_rewards, rewards, discount_rate):
cdr = 0.0
reverse_discounted = []
for i in range(len(rewards) - 1, -1, -1):
cdr = cdr * discount_rate + rewards[i]
reverse_discounted.append(cdr)
discounted_rewards.extend(reversed(reverse_discounted))
return discounted_rewards
class EagerPolicyNetwork(tf.keras.Model):
"""Policy network for the cart-pole reinforcement learning problem.
The forward path of the network takes an observation from the cart-pole
environment (length-4 vector) and outputs an action.
"""
def __init__(self, hidden_size):
super(EagerPolicyNetwork, self).__init__()
self._hidden_layer = tf.keras.layers.Dense(
hidden_size, activation=tf.nn.elu)
self._output_layer = tf.keras.layers.Dense(1)
def call(self, inputs):
"""Calculates logits and action.
Args:
inputs: Observations from a step in the cart-pole environment, of shape
`(batch_size, input_size)`
Returns:
logits: the logits output by the output layer. This can be viewed as the
likelihood vales of choosing the left (0) action. Shape:
`(batch_size, 1)`.
actions: randomly selected actions ({0, 1}) based on the logits. Shape:
`(batch_size, 1)`.
"""
hidden = self._hidden_layer(inputs)
logits = self._output_layer(hidden)
left_prob = tf.nn.sigmoid(logits)
action_probs = tf.concat([left_prob, 1.0 - left_prob], 1)
self._grad_fn = eager.implicit_gradients(
self._get_cross_entropy_and_save_actions)
actions = tf.multinomial(tf.log(action_probs), 1)
return logits, actions
def _get_cross_entropy_and_save_actions(self, inputs):
logits, actions = self(inputs) # pylint:disable=not-callable
self._current_actions = actions
labels = 1.0 - tf.cast(actions, tf.float32)
return tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits)
def train(self, cart_pole_env, optimizer, discount_rate, num_games,
max_steps_per_game):
grad_list = None
step_counts = []
discounted_rewards = []
for _ in range(num_games):
obs = cart_pole_env.reset()
game_rewards = []
for step in range(max_steps_per_game):
grads_and_vars = self._grad_fn(tf.constant([obs], dtype=tf.float32))
grads, var_list = zip(*grads_and_vars)
actions = self._current_actions.numpy()[0][0]
if grad_list is None:
grad_list = [[g] for g in grads]
else:
for i in range(len(grads)):
grad_list[i].append(grads[i])
obs, reward, done = cart_pole_env.step(actions)
game_rewards.append(reward)
if reward < 0.1 or done:
step_counts.append(step + 1)
break
discounted_rewards = eager_append_discounted_rewards(
discounted_rewards, game_rewards, discount_rate)
discounted_rewards = tf.stack(discounted_rewards)
mean, variance = tf.nn.moments(discounted_rewards, [0])
normalized_rewards = (discounted_rewards - mean) / tf.sqrt(variance)
for i in range(len(grad_list)):
g = tf.stack(grad_list[i])
r = normalized_rewards
while r.shape.ndims < g.shape.ndims:
r = tf.expand_dims(r, -1)
grad_list[i] = tf.reduce_mean(g * r, axis=0)
optimizer.apply_gradients(
zip(grad_list, var_list), global_step=tf.train.get_global_step())
return tf.stack(step_counts)
def eager_train_model(policy_network, cart_pole_env, optimizer, iterations):
"""Trains the policy network for a given number of iterations."""
mean_steps_per_iteration = []
for _ in range(iterations):
steps_per_game = policy_network.train(
cart_pole_env,
optimizer,
discount_rate=0.95,
num_games=20,
max_steps_per_game=200)
mean_steps_per_iteration.append(tf.reduce_mean(steps_per_game))
return mean_steps_per_iteration
class EagerGymCartpoleEnv(object):
"""An env backed by OpenAI Gym's CartPole environment.
Used to confirm a functional model only.
"""
def __init__(self):
cart_pole_env = gym.make('CartPole-v1')
cart_pole_env.seed(0)
cart_pole_env.reset()
self.env = cart_pole_env
def reset(self):
return self.env.reset()
def step(self, actions):
obs, reward, done, _ = self.env.step(actions)
return obs, reward, done
class EagerRandomCartpoleEnv(object):
"""An environment that returns random actions and never finishes.
Used during benchmarking, it will cause training to run a constant number of
steps.
"""
def reset(self):
return np.random.normal(size=(4,))
def step(self, actions):
with tf.control_dependencies([actions]):
random_obs = np.random.normal(size=(4,))
fixed_reward = 0.001
done = False
return random_obs, fixed_reward, done
def graph_demo_training():
"""Not used in the benchmark. Used to confirm a functional model."""
with tf.Graph().as_default():
tf.set_random_seed(0)
network = GraphPolicyNetwork(hidden_size=5)
network.build((1, 4))
env = GraphGymCartpoleEnv()
opt = tf.train.AdamOptimizer(0.05)
train_ops = graph_train_model(network, env, opt, iterations=5)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
steps_per_iteration = sess.run(train_ops)
for i, steps in enumerate(steps_per_iteration):
print('Step {} iterations: {}'.format(i, steps))
def eager_demo_training():
with context.eager_mode():
network = EagerPolicyNetwork(hidden_size=5)
network.build((1, 4))
env = EagerGymCartpoleEnv()
opt = tf.train.AdamOptimizer(0.05)
steps_per_iteration = eager_train_model(network, env, opt, iterations=5)
for i, steps in enumerate(steps_per_iteration):
print('Step {} iterations: {}'.format(i, steps))
class RLCartPoleBenchmark(benchmark_base.ReportingBenchmark):
"""Actual benchmark.
Trains the RL agent a fixed number of times, on random environments that
result in constant number of steps.
"""
def benchmark_cartpole(self):
def train_session(sess, ops):
return lambda: sess.run(ops)
def train_eager(network, env, opt):
return lambda: eager_train_model(network, env, opt, iterations=10)
for model_size in (10, 100, 1000):
with tf.Graph().as_default():
network = GraphPolicyNetwork(hidden_size=model_size)
network.build((1, 4))
env = GraphRandomCartpoleEnv()
opt = tf.train.AdamOptimizer(0.05)
train_ops = graph_train_model(network, env, opt, iterations=10)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
self.time_execution(('cartpole', 'autograph', model_size),
train_session(sess, train_ops), 20)
with context.eager_mode():
network = EagerPolicyNetwork(hidden_size=model_size)
network.build((1, 4))
env = EagerRandomCartpoleEnv()
opt = tf.train.AdamOptimizer(0.05)
self.time_execution(('cartpole', 'eager', model_size),
train_eager(network, env, opt), 20)
if __name__ == '__main__':
tf.test.main()
|
tensorflow-master
|
tensorflow/contrib/autograph/examples/benchmarks/cartpole_benchmark.py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TF-GAN is a lightweight library for training and evaluating GANs.
In addition to providing the infrastructure for easily training and evaluating
GANS, this library contains modules for a TFGAN-backed Estimator,
evaluation metrics, features (such as virtual batch normalization), and losses.
Please see README.md for details and usage.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Collapse TF-GAN into a tiered namespace.
from tensorflow.contrib.gan.python import estimator
from tensorflow.contrib.gan.python import eval # pylint:disable=redefined-builtin
from tensorflow.contrib.gan.python import features
from tensorflow.contrib.gan.python import losses
from tensorflow.contrib.gan.python import namedtuples
from tensorflow.contrib.gan.python import train
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.gan.python.namedtuples import *
from tensorflow.contrib.gan.python.train import *
# pylint: enable=unused-import,wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'estimator',
'eval',
'features',
'losses',
]
_allowed_symbols += train.__all__
_allowed_symbols += namedtuples.__all__
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/gan/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for gan.python.train."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.gan.python import namedtuples
from tensorflow.contrib.gan.python import train
from tensorflow.contrib.gan.python.features.python import random_tensor_pool
from tensorflow.contrib.slim.python.slim import learning as slim_learning
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.platform import test
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import coordinator
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
def generator_model(inputs):
return variable_scope.get_variable('dummy_g', initializer=2.0) * inputs
class Generator(object):
def __call__(self, inputs):
return generator_model(inputs)
def infogan_generator_model(inputs):
return variable_scope.get_variable('dummy_g', initializer=2.0) * inputs[0]
class InfoGANGenerator(object):
def __call__(self, inputs):
return infogan_generator_model(inputs)
def discriminator_model(inputs, _):
return variable_scope.get_variable('dummy_d', initializer=2.0) * inputs
class Discriminator(object):
def __call__(self, inputs, _):
return discriminator_model(inputs, _)
def infogan_discriminator_model(inputs, _):
return (variable_scope.get_variable('dummy_d', initializer=2.0) * inputs,
[categorical.Categorical([1.0])])
class InfoGANDiscriminator(object):
def __call__(self, inputs, _):
return infogan_discriminator_model(inputs, _)
def acgan_discriminator_model(inputs, _, num_classes=10):
return (
discriminator_model(inputs, _),
array_ops.one_hot(
# TODO(haeusser): infer batch size from input
random_ops.random_uniform(
[3], maxval=num_classes, dtype=dtypes.int32),
num_classes))
class ACGANDiscriminator(object):
def __call__(self, inputs, _, num_classes=10):
return (
discriminator_model(inputs, _),
array_ops.one_hot(
# TODO(haeusser): infer batch size from input
random_ops.random_uniform(
[3], maxval=num_classes, dtype=dtypes.int32),
num_classes))
def stargan_generator_model(inputs, _):
"""Dummy generator for StarGAN."""
return variable_scope.get_variable('dummy_g', initializer=0.5) * inputs
class StarGANGenerator(object):
def __call__(self, inputs, _):
return stargan_generator_model(inputs, _)
def stargan_discriminator_model(inputs, num_domains):
"""Differentiable dummy discriminator for StarGAN."""
hidden = layers.flatten(inputs)
output_src = math_ops.reduce_mean(hidden, axis=1)
output_cls = layers.fully_connected(
inputs=hidden,
num_outputs=num_domains,
activation_fn=None,
normalizer_fn=None,
biases_initializer=None)
return output_src, output_cls
class StarGANDiscriminator(object):
def __call__(self, inputs, num_domains):
return stargan_discriminator_model(inputs, num_domains)
def get_gan_model():
# TODO(joelshor): Find a better way of creating a variable scope.
with variable_scope.variable_scope('generator') as gen_scope:
pass
with variable_scope.variable_scope('discriminator') as dis_scope:
pass
return namedtuples.GANModel(
generator_inputs=None,
generated_data=None,
generator_variables=None,
generator_scope=gen_scope,
generator_fn=generator_model,
real_data=array_ops.ones([1, 2, 3]),
discriminator_real_outputs=array_ops.ones([1, 2, 3]),
discriminator_gen_outputs=array_ops.ones([1, 2, 3]),
discriminator_variables=None,
discriminator_scope=dis_scope,
discriminator_fn=discriminator_model)
def get_callable_gan_model():
ganmodel = get_gan_model()
return ganmodel._replace(
generator_fn=Generator(), discriminator_fn=Discriminator())
def create_gan_model():
return train.gan_model(
generator_model,
discriminator_model,
real_data=array_ops.zeros([1, 2]),
generator_inputs=random_ops.random_normal([1, 2]))
def create_callable_gan_model():
return train.gan_model(
Generator(),
Discriminator(),
real_data=array_ops.zeros([1, 2]),
generator_inputs=random_ops.random_normal([1, 2]))
def get_infogan_model():
return namedtuples.InfoGANModel(
*get_gan_model(),
structured_generator_inputs=[constant_op.constant(0)],
predicted_distributions=[categorical.Categorical([1.0])],
discriminator_and_aux_fn=infogan_discriminator_model)
def get_callable_infogan_model():
return namedtuples.InfoGANModel(
*get_callable_gan_model(),
structured_generator_inputs=[constant_op.constant(0)],
predicted_distributions=[categorical.Categorical([1.0])],
discriminator_and_aux_fn=infogan_discriminator_model)
def create_infogan_model():
return train.infogan_model(
infogan_generator_model,
infogan_discriminator_model,
real_data=array_ops.zeros([1, 2]),
unstructured_generator_inputs=[],
structured_generator_inputs=[random_ops.random_normal([1, 2])])
def create_callable_infogan_model():
return train.infogan_model(
InfoGANGenerator(),
InfoGANDiscriminator(),
real_data=array_ops.zeros([1, 2]),
unstructured_generator_inputs=[],
structured_generator_inputs=[random_ops.random_normal([1, 2])])
def get_acgan_model():
return namedtuples.ACGANModel(
*get_gan_model(),
one_hot_labels=array_ops.one_hot([0, 1, 2], 10),
discriminator_real_classification_logits=array_ops.one_hot([0, 1, 3], 10),
discriminator_gen_classification_logits=array_ops.one_hot([0, 1, 4], 10))
def get_callable_acgan_model():
return namedtuples.ACGANModel(
*get_callable_gan_model(),
one_hot_labels=array_ops.one_hot([0, 1, 2], 10),
discriminator_real_classification_logits=array_ops.one_hot([0, 1, 3], 10),
discriminator_gen_classification_logits=array_ops.one_hot([0, 1, 4], 10))
def create_acgan_model():
return train.acgan_model(
generator_model,
acgan_discriminator_model,
real_data=array_ops.zeros([1, 2]),
generator_inputs=random_ops.random_normal([1, 2]),
one_hot_labels=array_ops.one_hot([0, 1, 2], 10))
def create_callable_acgan_model():
return train.acgan_model(
Generator(),
ACGANDiscriminator(),
real_data=array_ops.zeros([1, 2]),
generator_inputs=random_ops.random_normal([1, 2]),
one_hot_labels=array_ops.one_hot([0, 1, 2], 10))
def get_cyclegan_model():
return namedtuples.CycleGANModel(
model_x2y=get_gan_model(),
model_y2x=get_gan_model(),
reconstructed_x=array_ops.ones([1, 2, 3]),
reconstructed_y=array_ops.zeros([1, 2, 3]))
def get_callable_cyclegan_model():
return namedtuples.CycleGANModel(
model_x2y=get_callable_gan_model(),
model_y2x=get_callable_gan_model(),
reconstructed_x=array_ops.ones([1, 2, 3]),
reconstructed_y=array_ops.zeros([1, 2, 3]))
def create_cyclegan_model():
return train.cyclegan_model(
generator_model,
discriminator_model,
data_x=array_ops.zeros([1, 2]),
data_y=array_ops.ones([1, 2]))
def create_callable_cyclegan_model():
return train.cyclegan_model(
Generator(),
Discriminator(),
data_x=array_ops.zeros([1, 2]),
data_y=array_ops.ones([1, 2]))
def get_stargan_model():
"""Similar to get_gan_model()."""
# TODO(joelshor): Find a better way of creating a variable scope.
with variable_scope.variable_scope('generator') as gen_scope:
pass
with variable_scope.variable_scope('discriminator') as dis_scope:
pass
return namedtuples.StarGANModel(
input_data=array_ops.ones([1, 2, 2, 3]),
input_data_domain_label=array_ops.ones([1, 2]),
generated_data=array_ops.ones([1, 2, 2, 3]),
generated_data_domain_target=array_ops.ones([1, 2]),
reconstructed_data=array_ops.ones([1, 2, 2, 3]),
discriminator_input_data_source_predication=array_ops.ones([1]),
discriminator_generated_data_source_predication=array_ops.ones([1]),
discriminator_input_data_domain_predication=array_ops.ones([1, 2]),
discriminator_generated_data_domain_predication=array_ops.ones([1, 2]),
generator_variables=None,
generator_scope=gen_scope,
generator_fn=stargan_generator_model,
discriminator_variables=None,
discriminator_scope=dis_scope,
discriminator_fn=stargan_discriminator_model)
def get_callable_stargan_model():
model = get_stargan_model()
return model._replace(
generator_fn=StarGANGenerator(), discriminator_fn=StarGANDiscriminator())
def create_stargan_model():
return train.stargan_model(
stargan_generator_model, stargan_discriminator_model,
array_ops.ones([1, 2, 2, 3]), array_ops.ones([1, 2]))
def create_callable_stargan_model():
return train.stargan_model(StarGANGenerator(), StarGANDiscriminator(),
array_ops.ones([1, 2, 2, 3]),
array_ops.ones([1, 2]))
def get_sync_optimizer():
return sync_replicas_optimizer.SyncReplicasOptimizer(
gradient_descent.GradientDescentOptimizer(learning_rate=1.0),
replicas_to_aggregate=1)
class GANModelTest(test.TestCase, parameterized.TestCase):
"""Tests for `gan_model`."""
@parameterized.named_parameters(
('gan', get_gan_model, namedtuples.GANModel),
('callable_gan', get_callable_gan_model, namedtuples.GANModel),
('infogan', get_infogan_model, namedtuples.InfoGANModel),
('callable_infogan', get_callable_infogan_model,
namedtuples.InfoGANModel),
('acgan', get_acgan_model, namedtuples.ACGANModel),
('callable_acgan', get_callable_acgan_model, namedtuples.ACGANModel),
('cyclegan', get_cyclegan_model, namedtuples.CycleGANModel),
('callable_cyclegan', get_callable_cyclegan_model,
namedtuples.CycleGANModel),
('stargan', get_stargan_model, namedtuples.StarGANModel),
('callabel_stargan', get_callable_stargan_model, namedtuples.StarGANModel)
)
def test_output_type(self, create_fn, expected_tuple_type):
"""Test that output type is as expected."""
self.assertIsInstance(create_fn(), expected_tuple_type)
def test_no_shape_check(self):
def dummy_generator_model(_):
return (None, None)
def dummy_discriminator_model(data, conditioning): # pylint: disable=unused-argument
return 1
with self.assertRaisesRegexp(AttributeError, 'object has no attribute'):
train.gan_model(
dummy_generator_model,
dummy_discriminator_model,
real_data=array_ops.zeros([1, 2]),
generator_inputs=array_ops.zeros([1]),
check_shapes=True)
train.gan_model(
dummy_generator_model,
dummy_discriminator_model,
real_data=array_ops.zeros([1, 2]),
generator_inputs=array_ops.zeros([1]),
check_shapes=False)
class StarGANModelTest(test.TestCase):
"""Tests for `stargan_model`."""
@staticmethod
def create_input_and_label_tensor(batch_size, img_size, c_size, num_domains):
input_tensor_list = []
label_tensor_list = []
for _ in range(num_domains):
input_tensor_list.append(
random_ops.random_uniform((batch_size, img_size, img_size, c_size)))
domain_idx = random_ops.random_uniform(
[batch_size], minval=0, maxval=num_domains, dtype=dtypes.int32)
label_tensor_list.append(array_ops.one_hot(domain_idx, num_domains))
return input_tensor_list, label_tensor_list
def test_generate_stargan_random_domain_target(self):
batch_size = 8
domain_numbers = 3
target_tensor = train._generate_stargan_random_domain_target(
batch_size, domain_numbers)
with self.cached_session() as sess:
targets = sess.run(target_tensor)
self.assertTupleEqual((batch_size, domain_numbers), targets.shape)
for target in targets:
self.assertEqual(1, np.sum(target))
self.assertEqual(1, np.max(target))
def test_stargan_model_output_type(self):
batch_size = 2
img_size = 16
c_size = 3
num_domains = 5
input_tensor, label_tensor = StarGANModelTest.create_input_and_label_tensor(
batch_size, img_size, c_size, num_domains)
model = train.stargan_model(
generator_fn=stargan_generator_model,
discriminator_fn=stargan_discriminator_model,
input_data=input_tensor,
input_data_domain_label=label_tensor)
self.assertIsInstance(model, namedtuples.StarGANModel)
self.assertTrue(isinstance(model.discriminator_variables, list))
self.assertTrue(isinstance(model.generator_variables, list))
self.assertIsInstance(model.discriminator_scope,
variable_scope.VariableScope)
self.assertTrue(model.generator_scope, variable_scope.VariableScope)
self.assertTrue(callable(model.discriminator_fn))
self.assertTrue(callable(model.generator_fn))
def test_stargan_model_generator_output(self):
batch_size = 2
img_size = 16
c_size = 3
num_domains = 5
input_tensor, label_tensor = StarGANModelTest.create_input_and_label_tensor(
batch_size, img_size, c_size, num_domains)
model = train.stargan_model(
generator_fn=stargan_generator_model,
discriminator_fn=stargan_discriminator_model,
input_data=input_tensor,
input_data_domain_label=label_tensor)
with self.test_session(use_gpu=True) as sess:
sess.run(variables.global_variables_initializer())
input_data, generated_data, reconstructed_data = sess.run(
[model.input_data, model.generated_data, model.reconstructed_data])
self.assertTupleEqual(
(batch_size * num_domains, img_size, img_size, c_size),
input_data.shape)
self.assertTupleEqual(
(batch_size * num_domains, img_size, img_size, c_size),
generated_data.shape)
self.assertTupleEqual(
(batch_size * num_domains, img_size, img_size, c_size),
reconstructed_data.shape)
def test_stargan_model_discriminator_output(self):
batch_size = 2
img_size = 16
c_size = 3
num_domains = 5
input_tensor, label_tensor = StarGANModelTest.create_input_and_label_tensor(
batch_size, img_size, c_size, num_domains)
model = train.stargan_model(
generator_fn=stargan_generator_model,
discriminator_fn=stargan_discriminator_model,
input_data=input_tensor,
input_data_domain_label=label_tensor)
with self.test_session(use_gpu=True) as sess:
sess.run(variables.global_variables_initializer())
disc_input_data_source_pred, disc_gen_data_source_pred = sess.run([
model.discriminator_input_data_source_predication,
model.discriminator_generated_data_source_predication
])
self.assertEqual(1, len(disc_input_data_source_pred.shape))
self.assertEqual(batch_size * num_domains,
disc_input_data_source_pred.shape[0])
self.assertEqual(1, len(disc_gen_data_source_pred.shape))
self.assertEqual(batch_size * num_domains,
disc_gen_data_source_pred.shape[0])
input_label, disc_input_label, gen_label, disc_gen_label = sess.run([
model.input_data_domain_label,
model.discriminator_input_data_domain_predication,
model.generated_data_domain_target,
model.discriminator_generated_data_domain_predication
])
self.assertTupleEqual((batch_size * num_domains, num_domains),
input_label.shape)
self.assertTupleEqual((batch_size * num_domains, num_domains),
disc_input_label.shape)
self.assertTupleEqual((batch_size * num_domains, num_domains),
gen_label.shape)
self.assertTupleEqual((batch_size * num_domains, num_domains),
disc_gen_label.shape)
class GANLossTest(test.TestCase, parameterized.TestCase):
"""Tests for `gan_loss`."""
@parameterized.named_parameters(
('gan', get_gan_model),
('callable_gan', get_callable_gan_model),
('infogan', get_infogan_model),
('callable_infogan', get_callable_infogan_model),
('acgan', get_acgan_model),
('callable_acgan', get_callable_acgan_model),
)
def test_output_type(self, get_gan_model_fn):
"""Test output type."""
loss = train.gan_loss(get_gan_model_fn(), add_summaries=True)
self.assertIsInstance(loss, namedtuples.GANLoss)
self.assertNotEmpty(ops.get_collection(ops.GraphKeys.SUMMARIES))
@parameterized.named_parameters(
('cyclegan', create_cyclegan_model),
('callable_cyclegan', create_callable_cyclegan_model),
)
def test_cyclegan_output_type(self, get_gan_model_fn):
loss = train.cyclegan_loss(get_gan_model_fn(), add_summaries=True)
self.assertIsInstance(loss, namedtuples.CycleGANLoss)
self.assertNotEmpty(ops.get_collection(ops.GraphKeys.SUMMARIES))
@parameterized.named_parameters(
('gan', create_gan_model, False),
('gan_one_sided', create_gan_model, True),
('callable_gan', create_callable_gan_model, False),
('callable_gan_one_sided', create_callable_gan_model, True),
('infogan', create_infogan_model, False),
('infogan_one_sided', create_infogan_model, True),
('callable_infogan', create_callable_infogan_model, False),
('callable_infogan_one_sided', create_callable_infogan_model, True),
('acgan', create_acgan_model, False),
('acgan_one_sided', create_acgan_model, True),
('callable_acgan', create_callable_acgan_model, False),
('callable_acgan_one_sided', create_callable_acgan_model, True),
)
def test_grad_penalty(self, create_gan_model_fn, one_sided):
"""Test gradient penalty option."""
model = create_gan_model_fn()
loss = train.gan_loss(model)
loss_gp = train.gan_loss(
model,
gradient_penalty_weight=1.0,
gradient_penalty_one_sided=one_sided)
self.assertIsInstance(loss_gp, namedtuples.GANLoss)
# Check values.
with self.test_session(use_gpu=True) as sess:
variables.global_variables_initializer().run()
loss_gen_np, loss_gen_gp_np = sess.run(
[loss.generator_loss, loss_gp.generator_loss])
loss_dis_np, loss_dis_gp_np = sess.run(
[loss.discriminator_loss, loss_gp.discriminator_loss])
self.assertEqual(loss_gen_np, loss_gen_gp_np)
self.assertLess(loss_dis_np, loss_dis_gp_np)
@parameterized.named_parameters(
('infogan', get_infogan_model),
('callable_infogan', get_callable_infogan_model),
)
def test_mutual_info_penalty(self, create_gan_model_fn):
"""Test mutual information penalty option."""
train.gan_loss(
create_gan_model_fn(),
mutual_information_penalty_weight=constant_op.constant(1.0))
@parameterized.named_parameters(
('gan', get_gan_model),
('callable_gan', get_callable_gan_model),
('infogan', get_infogan_model),
('callable_infogan', get_callable_infogan_model),
('acgan', get_acgan_model),
('callable_acgan', get_callable_acgan_model),
)
def test_regularization_helper(self, get_gan_model_fn):
"""Test regularization loss."""
# Evaluate losses without regularization.
no_reg_loss = train.gan_loss(get_gan_model_fn())
with self.test_session(use_gpu=True):
no_reg_loss_gen_np = no_reg_loss.generator_loss.eval()
no_reg_loss_dis_np = no_reg_loss.discriminator_loss.eval()
with ops.name_scope(get_gan_model_fn().generator_scope.name):
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES,
constant_op.constant(3.0))
with ops.name_scope(get_gan_model_fn().discriminator_scope.name):
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES,
constant_op.constant(2.0))
# Check that losses now include the correct regularization values.
reg_loss = train.gan_loss(get_gan_model_fn())
with self.test_session(use_gpu=True):
reg_loss_gen_np = reg_loss.generator_loss.eval()
reg_loss_dis_np = reg_loss.discriminator_loss.eval()
self.assertEqual(3.0, reg_loss_gen_np - no_reg_loss_gen_np)
self.assertEqual(2.0, reg_loss_dis_np - no_reg_loss_dis_np)
@parameterized.named_parameters(
('notcallable', create_acgan_model),
('callable', create_callable_acgan_model),
)
def test_acgan(self, create_gan_model_fn):
"""Test that ACGAN models work."""
model = create_gan_model_fn()
loss = train.gan_loss(model)
loss_ac_gen = train.gan_loss(model, aux_cond_generator_weight=1.0)
loss_ac_dis = train.gan_loss(model, aux_cond_discriminator_weight=1.0)
self.assertIsInstance(loss, namedtuples.GANLoss)
self.assertIsInstance(loss_ac_gen, namedtuples.GANLoss)
self.assertIsInstance(loss_ac_dis, namedtuples.GANLoss)
# Check values.
with self.test_session(use_gpu=True) as sess:
variables.global_variables_initializer().run()
loss_gen_np, loss_ac_gen_gen_np, loss_ac_dis_gen_np = sess.run([
loss.generator_loss, loss_ac_gen.generator_loss,
loss_ac_dis.generator_loss
])
loss_dis_np, loss_ac_gen_dis_np, loss_ac_dis_dis_np = sess.run([
loss.discriminator_loss, loss_ac_gen.discriminator_loss,
loss_ac_dis.discriminator_loss
])
self.assertLess(loss_gen_np, loss_dis_np)
self.assertTrue(np.isscalar(loss_ac_gen_gen_np))
self.assertTrue(np.isscalar(loss_ac_dis_gen_np))
self.assertTrue(np.isscalar(loss_ac_gen_dis_np))
self.assertTrue(np.isscalar(loss_ac_dis_dis_np))
@parameterized.named_parameters(
('notcallable', create_cyclegan_model),
('callable', create_callable_cyclegan_model),
)
def test_cyclegan(self, create_gan_model_fn):
"""Test that CycleGan models work."""
model = create_gan_model_fn()
loss = train.cyclegan_loss(model)
self.assertIsInstance(loss, namedtuples.CycleGANLoss)
# Check values.
with self.test_session(use_gpu=True) as sess:
variables.global_variables_initializer().run()
(loss_x2y_gen_np, loss_x2y_dis_np, loss_y2x_gen_np,
loss_y2x_dis_np) = sess.run([
loss.loss_x2y.generator_loss, loss.loss_x2y.discriminator_loss,
loss.loss_y2x.generator_loss, loss.loss_y2x.discriminator_loss
])
self.assertGreater(loss_x2y_gen_np, loss_x2y_dis_np)
self.assertGreater(loss_y2x_gen_np, loss_y2x_dis_np)
self.assertTrue(np.isscalar(loss_x2y_gen_np))
self.assertTrue(np.isscalar(loss_x2y_dis_np))
self.assertTrue(np.isscalar(loss_y2x_gen_np))
self.assertTrue(np.isscalar(loss_y2x_dis_np))
@parameterized.named_parameters(
('notcallable', create_stargan_model),
('callable', create_callable_stargan_model),
)
def test_stargan(self, create_gan_model_fn):
model = create_gan_model_fn()
model_loss = train.stargan_loss(model)
self.assertIsInstance(model_loss, namedtuples.GANLoss)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
gen_loss, disc_loss = sess.run(
[model_loss.generator_loss, model_loss.discriminator_loss])
self.assertTrue(np.isscalar(gen_loss))
self.assertTrue(np.isscalar(disc_loss))
@parameterized.named_parameters(
('gan', create_gan_model),
('callable_gan', create_callable_gan_model),
('infogan', create_infogan_model),
('callable_infogan', create_callable_infogan_model),
('acgan', create_acgan_model),
('callable_acgan', create_callable_acgan_model),
)
def test_tensor_pool(self, create_gan_model_fn):
"""Test tensor pool option."""
model = create_gan_model_fn()
tensor_pool_fn = lambda x: random_tensor_pool.tensor_pool(x, pool_size=5)
loss = train.gan_loss(model, tensor_pool_fn=tensor_pool_fn)
self.assertIsInstance(loss, namedtuples.GANLoss)
# Check values.
with self.test_session(use_gpu=True) as sess:
variables.global_variables_initializer().run()
for _ in range(10):
sess.run([loss.generator_loss, loss.discriminator_loss])
def test_discriminator_only_sees_pool(self):
"""Checks that discriminator only sees pooled values."""
def checker_gen_fn(_):
return constant_op.constant(0.0)
model = train.gan_model(
checker_gen_fn,
discriminator_model,
real_data=array_ops.zeros([]),
generator_inputs=random_ops.random_normal([]))
def tensor_pool_fn(_):
return (random_ops.random_uniform([]), random_ops.random_uniform([]))
def checker_dis_fn(inputs, _):
"""Discriminator that checks that it only sees pooled Tensors."""
self.assertFalse(constant_op.is_constant(inputs))
return inputs
model = model._replace(
discriminator_fn=checker_dis_fn)
train.gan_loss(model, tensor_pool_fn=tensor_pool_fn)
def test_doesnt_crash_when_in_nested_scope(self):
with variable_scope.variable_scope('outer_scope'):
gan_model = train.gan_model(
generator_model,
discriminator_model,
real_data=array_ops.zeros([1, 2]),
generator_inputs=random_ops.random_normal([1, 2]))
# This should work inside a scope.
train.gan_loss(gan_model, gradient_penalty_weight=1.0)
# This should also work outside a scope.
train.gan_loss(gan_model, gradient_penalty_weight=1.0)
class TensorPoolAdjusteModelTest(test.TestCase):
def _check_tensor_pool_adjusted_model_outputs(
self, tensor1, tensor2, pool_size):
history_values = []
with self.test_session(use_gpu=True) as sess:
variables.global_variables_initializer().run()
for i in range(2 * pool_size):
t1, t2 = sess.run([tensor1, tensor2])
history_values.append(t1)
if i < pool_size:
# For [0, pool_size), the pool is not full, tensor1 should be equal
# to tensor2 as the pool.
self.assertAllEqual(t1, t2)
else:
# For [pool_size, ?), the pool is full, tensor2 must be equal to some
# historical values of tensor1 (which is previously stored in the
# pool).
self.assertTrue(any((v == t2).all() for v in history_values))
def _make_new_model_and_check(self, model, pool_size):
pool_fn = lambda x: random_tensor_pool.tensor_pool(x, pool_size=pool_size)
new_model = train._tensor_pool_adjusted_model(model, pool_fn)
# 'Generator/dummy_g:0' and 'Discriminator/dummy_d:0'
self.assertEqual(2, len(ops.get_collection(ops.GraphKeys.VARIABLES)))
self.assertIsNot(new_model.discriminator_gen_outputs,
model.discriminator_gen_outputs)
return new_model
def test_tensor_pool_adjusted_model_gan(self):
"""Test `_tensor_pool_adjusted_model` for gan model."""
pool_size = 5
model = create_gan_model()
new_model = self._make_new_model_and_check(model, pool_size)
# Check values.
self._check_tensor_pool_adjusted_model_outputs(
model.discriminator_gen_outputs, new_model.discriminator_gen_outputs,
pool_size)
def test_tensor_pool_adjusted_model_infogan(self):
"""Test _tensor_pool_adjusted_model for infogan model."""
pool_size = 5
model = create_infogan_model()
new_model = self._make_new_model_and_check(model, pool_size)
# Check values.
self.assertIsNot(new_model.predicted_distributions,
model.predicted_distributions)
self._check_tensor_pool_adjusted_model_outputs(
model.discriminator_gen_outputs, new_model.discriminator_gen_outputs,
pool_size)
def test_tensor_pool_adjusted_model_acgan(self):
"""Test _tensor_pool_adjusted_model for acgan model."""
pool_size = 5
model = create_acgan_model()
new_model = self._make_new_model_and_check(model, pool_size)
# Check values.
self.assertIsNot(new_model.discriminator_gen_classification_logits,
model.discriminator_gen_classification_logits)
self._check_tensor_pool_adjusted_model_outputs(
model.discriminator_gen_outputs, new_model.discriminator_gen_outputs,
pool_size)
class GANTrainOpsTest(test.TestCase, parameterized.TestCase):
"""Tests for `gan_train_ops`."""
@parameterized.named_parameters(
('gan', create_gan_model),
('callable_gan', create_callable_gan_model),
('infogan', create_infogan_model),
('callable_infogan', create_callable_infogan_model),
('acgan', create_acgan_model),
('callable_acgan', create_callable_acgan_model),
)
def test_output_type(self, create_gan_model_fn):
model = create_gan_model_fn()
loss = train.gan_loss(model)
g_opt = gradient_descent.GradientDescentOptimizer(1.0)
d_opt = gradient_descent.GradientDescentOptimizer(1.0)
train_ops = train.gan_train_ops(
model,
loss,
g_opt,
d_opt,
summarize_gradients=True,
colocate_gradients_with_ops=True)
self.assertIsInstance(train_ops, namedtuples.GANTrainOps)
# Make sure there are no training hooks populated accidentally.
self.assertEmpty(train_ops.train_hooks)
# TODO(joelshor): Add a test to check that custom update op is run.
@parameterized.named_parameters(
('gan', create_gan_model, False),
('gan_provideupdates', create_gan_model, True),
('callable_gan', create_callable_gan_model, False),
('callable_gan_provideupdates', create_callable_gan_model, True),
('infogan', create_infogan_model, False),
('infogan_provideupdates', create_infogan_model, True),
('callable_infogan', create_callable_infogan_model, False),
('callable_infogan_provideupdates', create_callable_infogan_model, True),
('acgan', create_acgan_model, False),
('acgan_provideupdates', create_acgan_model, True),
('callable_acgan', create_callable_acgan_model, False),
('callable_acgan_provideupdates', create_callable_acgan_model, True),
)
def test_unused_update_ops(self, create_gan_model_fn, provide_update_ops):
model = create_gan_model_fn()
loss = train.gan_loss(model)
# Add generator and discriminator update ops.
with variable_scope.variable_scope(model.generator_scope):
gen_update_count = variable_scope.get_variable('gen_count', initializer=0)
gen_update_op = gen_update_count.assign_add(1)
ops.add_to_collection(ops.GraphKeys.UPDATE_OPS, gen_update_op)
with variable_scope.variable_scope(model.discriminator_scope):
dis_update_count = variable_scope.get_variable('dis_count', initializer=0)
dis_update_op = dis_update_count.assign_add(1)
ops.add_to_collection(ops.GraphKeys.UPDATE_OPS, dis_update_op)
# Add an update op outside the generator and discriminator scopes.
if provide_update_ops:
kwargs = {
'update_ops': [
constant_op.constant(1.0), gen_update_op, dis_update_op
]
}
else:
ops.add_to_collection(ops.GraphKeys.UPDATE_OPS, constant_op.constant(1.0))
kwargs = {}
g_opt = gradient_descent.GradientDescentOptimizer(1.0)
d_opt = gradient_descent.GradientDescentOptimizer(1.0)
with self.assertRaisesRegexp(ValueError, 'There are unused update ops:'):
train.gan_train_ops(
model, loss, g_opt, d_opt, check_for_unused_update_ops=True, **kwargs)
train_ops = train.gan_train_ops(
model, loss, g_opt, d_opt, check_for_unused_update_ops=False, **kwargs)
with self.test_session(use_gpu=True) as sess:
sess.run(variables.global_variables_initializer())
self.assertEqual(0, gen_update_count.eval())
self.assertEqual(0, dis_update_count.eval())
train_ops.generator_train_op.eval()
self.assertEqual(1, gen_update_count.eval())
self.assertEqual(0, dis_update_count.eval())
train_ops.discriminator_train_op.eval()
self.assertEqual(1, gen_update_count.eval())
self.assertEqual(1, dis_update_count.eval())
@parameterized.named_parameters(
('gan', create_gan_model, False),
('callable_gan', create_callable_gan_model, False),
('infogan', create_infogan_model, False),
('callable_infogan', create_callable_infogan_model, False),
('acgan', create_acgan_model, False),
('callable_acgan', create_callable_acgan_model, False),
('gan_canbeint32', create_gan_model, True),
)
def test_sync_replicas(self, create_gan_model_fn, create_global_step):
model = create_gan_model_fn()
loss = train.gan_loss(model)
num_trainable_vars = len(variables_lib.get_trainable_variables())
if create_global_step:
gstep = variable_scope.get_variable(
'custom_gstep', dtype=dtypes.int32, initializer=0, trainable=False)
ops.add_to_collection(ops.GraphKeys.GLOBAL_STEP, gstep)
g_opt = get_sync_optimizer()
d_opt = get_sync_optimizer()
train_ops = train.gan_train_ops(
model, loss, generator_optimizer=g_opt, discriminator_optimizer=d_opt)
self.assertIsInstance(train_ops, namedtuples.GANTrainOps)
# No new trainable variables should have been added.
self.assertLen(variables_lib.get_trainable_variables(), num_trainable_vars)
# Sync hooks should be populated in the GANTrainOps.
self.assertLen(train_ops.train_hooks, 2)
for hook in train_ops.train_hooks:
self.assertIsInstance(
hook, sync_replicas_optimizer._SyncReplicasOptimizerHook)
sync_opts = [hook._sync_optimizer for hook in train_ops.train_hooks]
self.assertSetEqual(frozenset(sync_opts), frozenset((g_opt, d_opt)))
g_sync_init_op = g_opt.get_init_tokens_op(num_tokens=1)
d_sync_init_op = d_opt.get_init_tokens_op(num_tokens=1)
# Check that update op is run properly.
global_step = training_util.get_or_create_global_step()
with self.test_session(use_gpu=True) as sess:
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
g_opt.chief_init_op.run()
d_opt.chief_init_op.run()
gstep_before = global_step.eval()
# Start required queue runner for SyncReplicasOptimizer.
coord = coordinator.Coordinator()
g_threads = g_opt.get_chief_queue_runner().create_threads(sess, coord)
d_threads = d_opt.get_chief_queue_runner().create_threads(sess, coord)
g_sync_init_op.run()
d_sync_init_op.run()
train_ops.generator_train_op.eval()
# Check that global step wasn't incremented.
self.assertEqual(gstep_before, global_step.eval())
train_ops.discriminator_train_op.eval()
# Check that global step wasn't incremented.
self.assertEqual(gstep_before, global_step.eval())
coord.request_stop()
coord.join(g_threads + d_threads)
@parameterized.named_parameters(
('is_chief', True),
('is_not_chief', False),
)
def test_is_chief_in_train_hooks(self, is_chief):
"""Make sure is_chief is propagated correctly to sync hooks."""
model = create_gan_model()
loss = train.gan_loss(model)
g_opt = get_sync_optimizer()
d_opt = get_sync_optimizer()
train_ops = train.gan_train_ops(
model,
loss,
g_opt,
d_opt,
is_chief=is_chief,
summarize_gradients=True,
colocate_gradients_with_ops=True)
self.assertLen(train_ops.train_hooks, 2)
for hook in train_ops.train_hooks:
self.assertIsInstance(
hook, sync_replicas_optimizer._SyncReplicasOptimizerHook)
is_chief_list = [hook._is_chief for hook in train_ops.train_hooks]
self.assertListEqual(is_chief_list, [is_chief, is_chief])
class GANTrainTest(test.TestCase, parameterized.TestCase):
"""Tests for `gan_train`."""
def _gan_train_ops(self, generator_add, discriminator_add):
step = training_util.create_global_step()
# Increment the global count every time a train op is run so we can count
# the number of times they're run.
# NOTE: `use_locking=True` is required to avoid race conditions with
# joint training.
train_ops = namedtuples.GANTrainOps(
generator_train_op=step.assign_add(generator_add, use_locking=True),
discriminator_train_op=step.assign_add(
discriminator_add, use_locking=True),
global_step_inc_op=step.assign_add(1))
return train_ops
@parameterized.named_parameters(
('gan', create_gan_model),
('callable_gan', create_callable_gan_model),
('infogan', create_infogan_model),
('callable_infogan', create_callable_infogan_model),
('acgan', create_acgan_model),
('callable_acgan', create_callable_acgan_model),
)
def test_run_helper(self, create_gan_model_fn):
random_seed.set_random_seed(1234)
model = create_gan_model_fn()
loss = train.gan_loss(model)
g_opt = gradient_descent.GradientDescentOptimizer(1.0)
d_opt = gradient_descent.GradientDescentOptimizer(1.0)
train_ops = train.gan_train_ops(model, loss, g_opt, d_opt)
final_step = train.gan_train(
train_ops,
logdir='',
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=2)])
self.assertTrue(np.isscalar(final_step))
self.assertEqual(2, final_step)
@parameterized.named_parameters(
('seq_train_steps', train.get_sequential_train_hooks),
('efficient_seq_train_steps', train.get_joint_train_hooks),
)
def test_multiple_steps(self, get_hooks_fn_fn):
"""Test multiple train steps."""
train_ops = self._gan_train_ops(generator_add=10, discriminator_add=100)
train_steps = namedtuples.GANTrainSteps(
generator_train_steps=3, discriminator_train_steps=4)
final_step = train.gan_train(
train_ops,
get_hooks_fn=get_hooks_fn_fn(train_steps),
logdir='',
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=1)])
self.assertTrue(np.isscalar(final_step))
self.assertEqual(1 + 3 * 10 + 4 * 100, final_step)
def test_supervisor_run_gan_model_train_ops_multiple_steps(self):
step = training_util.create_global_step()
train_ops = namedtuples.GANTrainOps(
generator_train_op=constant_op.constant(3.0),
discriminator_train_op=constant_op.constant(2.0),
global_step_inc_op=step.assign_add(1))
train_steps = namedtuples.GANTrainSteps(
generator_train_steps=3, discriminator_train_steps=4)
final_loss = slim_learning.train(
train_op=train_ops,
logdir='',
global_step=step,
number_of_steps=1,
train_step_fn=train.get_sequential_train_steps(train_steps))
self.assertTrue(np.isscalar(final_loss))
self.assertEqual(17.0, final_loss)
@parameterized.named_parameters(
('gan', create_gan_model),
('callable_gan', create_callable_gan_model),
('infogan', create_infogan_model),
('callable_infogan', create_callable_infogan_model),
('acgan', create_acgan_model),
('callable_acgan', create_callable_acgan_model),
)
def test_train_hooks_exist_in_get_hooks_fn(self, create_gan_model_fn):
model = create_gan_model_fn()
loss = train.gan_loss(model)
g_opt = get_sync_optimizer()
d_opt = get_sync_optimizer()
train_ops = train.gan_train_ops(
model,
loss,
g_opt,
d_opt,
summarize_gradients=True,
colocate_gradients_with_ops=True)
sequential_train_hooks = train.get_sequential_train_hooks()(train_ops)
self.assertLen(sequential_train_hooks, 4)
sync_opts = [
hook._sync_optimizer for hook in sequential_train_hooks if
isinstance(hook, sync_replicas_optimizer._SyncReplicasOptimizerHook)]
self.assertLen(sync_opts, 2)
self.assertSetEqual(frozenset(sync_opts), frozenset((g_opt, d_opt)))
joint_train_hooks = train.get_joint_train_hooks()(train_ops)
self.assertLen(joint_train_hooks, 5)
sync_opts = [
hook._sync_optimizer for hook in joint_train_hooks if
isinstance(hook, sync_replicas_optimizer._SyncReplicasOptimizerHook)]
self.assertLen(sync_opts, 2)
self.assertSetEqual(frozenset(sync_opts), frozenset((g_opt, d_opt)))
class PatchGANTest(test.TestCase, parameterized.TestCase):
"""Tests that functions work on PatchGAN style output."""
@parameterized.named_parameters(
('gan', create_gan_model),
('callable_gan', create_callable_gan_model),
('infogan', create_infogan_model),
('callable_infogan', create_callable_infogan_model),
('acgan', create_acgan_model),
('callable_acgan', create_callable_acgan_model),
)
def test_patchgan(self, create_gan_model_fn):
"""Ensure that patch-based discriminators work end-to-end."""
random_seed.set_random_seed(1234)
model = create_gan_model_fn()
loss = train.gan_loss(model)
g_opt = gradient_descent.GradientDescentOptimizer(1.0)
d_opt = gradient_descent.GradientDescentOptimizer(1.0)
train_ops = train.gan_train_ops(model, loss, g_opt, d_opt)
final_step = train.gan_train(
train_ops,
logdir='',
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=2)])
self.assertTrue(np.isscalar(final_step))
self.assertEqual(2, final_step)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/gan/python/train_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The TF-GAN project provides a lightweight GAN training/testing framework.
This file contains the core helper functions to create and train a GAN model.
See the README or examples in `tensorflow_models` for details on how to use.
TF-GAN training occurs in four steps:
1) Create a model
2) Add a loss
3) Create train ops
4) Run the train ops
The functions in this file are organized around these four steps. Each function
corresponds to one of the steps.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.gan.python import losses as tfgan_losses
from tensorflow.contrib.gan.python import namedtuples
from tensorflow.contrib.gan.python.losses.python import losses_impl as tfgan_losses_impl
from tensorflow.contrib.slim.python.slim import learning as slim_learning
from tensorflow.contrib.training.python.training import training
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.losses import losses
from tensorflow.python.summary import summary
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
__all__ = [
'gan_model',
'infogan_model',
'acgan_model',
'cyclegan_model',
'stargan_model',
'gan_loss',
'cyclegan_loss',
'stargan_loss',
'gan_train_ops',
'gan_train',
'get_sequential_train_hooks',
'get_joint_train_hooks',
'get_sequential_train_steps',
'RunTrainOpsHook',
]
def gan_model(
# Lambdas defining models.
generator_fn,
discriminator_fn,
# Real data and conditioning.
real_data,
generator_inputs,
# Optional scopes.
generator_scope='Generator',
discriminator_scope='Discriminator',
# Options.
check_shapes=True):
"""Returns GAN model outputs and variables.
Args:
generator_fn: A python lambda that takes `generator_inputs` as inputs and
returns the outputs of the GAN generator.
discriminator_fn: A python lambda that takes `real_data`/`generated data`
and `generator_inputs`. Outputs a Tensor in the range [-inf, inf].
real_data: A Tensor representing the real data.
generator_inputs: A Tensor or list of Tensors to the generator. In the
vanilla GAN case, this might be a single noise Tensor. In the conditional
GAN case, this might be the generator's conditioning.
generator_scope: Optional generator variable scope. Useful if you want to
reuse a subgraph that has already been created.
discriminator_scope: Optional discriminator variable scope. Useful if you
want to reuse a subgraph that has already been created.
check_shapes: If `True`, check that generator produces Tensors that are the
same shape as real data. Otherwise, skip this check.
Returns:
A GANModel namedtuple.
Raises:
ValueError: If the generator outputs a Tensor that isn't the same shape as
`real_data`.
"""
# Create models
with variable_scope.variable_scope(generator_scope) as gen_scope:
generator_inputs = _convert_tensor_or_l_or_d(generator_inputs)
generated_data = generator_fn(generator_inputs)
with variable_scope.variable_scope(discriminator_scope) as dis_scope:
discriminator_gen_outputs = discriminator_fn(generated_data,
generator_inputs)
with variable_scope.variable_scope(dis_scope, reuse=True):
real_data = _convert_tensor_or_l_or_d(real_data)
discriminator_real_outputs = discriminator_fn(real_data, generator_inputs)
if check_shapes:
if not generated_data.shape.is_compatible_with(real_data.shape):
raise ValueError(
'Generator output shape (%s) must be the same shape as real data '
'(%s).' % (generated_data.shape, real_data.shape))
# Get model-specific variables.
generator_variables = variables_lib.get_trainable_variables(gen_scope)
discriminator_variables = variables_lib.get_trainable_variables(dis_scope)
return namedtuples.GANModel(generator_inputs, generated_data,
generator_variables, gen_scope, generator_fn,
real_data, discriminator_real_outputs,
discriminator_gen_outputs,
discriminator_variables, dis_scope,
discriminator_fn)
def infogan_model(
# Lambdas defining models.
generator_fn,
discriminator_fn,
# Real data and conditioning.
real_data,
unstructured_generator_inputs,
structured_generator_inputs,
# Optional scopes.
generator_scope='Generator',
discriminator_scope='Discriminator'):
"""Returns an InfoGAN model outputs and variables.
See https://arxiv.org/abs/1606.03657 for more details.
Args:
generator_fn: A python lambda that takes a list of Tensors as inputs and
returns the outputs of the GAN generator.
discriminator_fn: A python lambda that takes `real_data`/`generated data`
and `generator_inputs`. Outputs a 2-tuple of (logits, distribution_list).
`logits` are in the range [-inf, inf], and `distribution_list` is a list
of Tensorflow distributions representing the predicted noise distribution
of the ith structure noise.
real_data: A Tensor representing the real data.
unstructured_generator_inputs: A list of Tensors to the generator. These
tensors represent the unstructured noise or conditioning.
structured_generator_inputs: A list of Tensors to the generator. These
tensors must have high mutual information with the recognizer.
generator_scope: Optional generator variable scope. Useful if you want to
reuse a subgraph that has already been created.
discriminator_scope: Optional discriminator variable scope. Useful if you
want to reuse a subgraph that has already been created.
Returns:
An InfoGANModel namedtuple.
Raises:
ValueError: If the generator outputs a Tensor that isn't the same shape as
`real_data`.
ValueError: If the discriminator output is malformed.
"""
# Create models
with variable_scope.variable_scope(generator_scope) as gen_scope:
unstructured_generator_inputs = _convert_tensor_or_l_or_d(
unstructured_generator_inputs)
structured_generator_inputs = _convert_tensor_or_l_or_d(
structured_generator_inputs)
generator_inputs = (
unstructured_generator_inputs + structured_generator_inputs)
generated_data = generator_fn(generator_inputs)
with variable_scope.variable_scope(discriminator_scope) as disc_scope:
dis_gen_outputs, predicted_distributions = discriminator_fn(
generated_data, generator_inputs)
_validate_distributions(predicted_distributions, structured_generator_inputs)
with variable_scope.variable_scope(disc_scope, reuse=True):
real_data = ops.convert_to_tensor(real_data)
dis_real_outputs, _ = discriminator_fn(real_data, generator_inputs)
if not generated_data.get_shape().is_compatible_with(real_data.get_shape()):
raise ValueError(
'Generator output shape (%s) must be the same shape as real data '
'(%s).' % (generated_data.get_shape(), real_data.get_shape()))
# Get model-specific variables.
generator_variables = variables_lib.get_trainable_variables(gen_scope)
discriminator_variables = variables_lib.get_trainable_variables(disc_scope)
return namedtuples.InfoGANModel(
generator_inputs,
generated_data,
generator_variables,
gen_scope,
generator_fn,
real_data,
dis_real_outputs,
dis_gen_outputs,
discriminator_variables,
disc_scope,
lambda x, y: discriminator_fn(x, y)[0], # conform to non-InfoGAN API
structured_generator_inputs,
predicted_distributions,
discriminator_fn)
def acgan_model(
# Lambdas defining models.
generator_fn,
discriminator_fn,
# Real data and conditioning.
real_data,
generator_inputs,
one_hot_labels,
# Optional scopes.
generator_scope='Generator',
discriminator_scope='Discriminator',
# Options.
check_shapes=True):
"""Returns an ACGANModel contains all the pieces needed for ACGAN training.
The `acgan_model` is the same as the `gan_model` with the only difference
being that the discriminator additionally outputs logits to classify the input
(real or generated).
Therefore, an explicit field holding one_hot_labels is necessary, as well as a
discriminator_fn that outputs a 2-tuple holding the logits for real/fake and
classification.
See https://arxiv.org/abs/1610.09585 for more details.
Args:
generator_fn: A python lambda that takes `generator_inputs` as inputs and
returns the outputs of the GAN generator.
discriminator_fn: A python lambda that takes `real_data`/`generated data`
and `generator_inputs`. Outputs a tuple consisting of two Tensors: (1)
real/fake logits in the range [-inf, inf] (2) classification logits in
the range [-inf, inf]
real_data: A Tensor representing the real data.
generator_inputs: A Tensor or list of Tensors to the generator. In the
vanilla GAN case, this might be a single noise Tensor. In the conditional
GAN case, this might be the generator's conditioning.
one_hot_labels: A Tensor holding one-hot-labels for the batch. Needed by
acgan_loss.
generator_scope: Optional generator variable scope. Useful if you want to
reuse a subgraph that has already been created.
discriminator_scope: Optional discriminator variable scope. Useful if you
want to reuse a subgraph that has already been created.
check_shapes: If `True`, check that generator produces Tensors that are the
same shape as real data. Otherwise, skip this check.
Returns:
A ACGANModel namedtuple.
Raises:
ValueError: If the generator outputs a Tensor that isn't the same shape as
`real_data`.
TypeError: If the discriminator does not output a tuple consisting of
(discrimination logits, classification logits).
"""
# Create models
with variable_scope.variable_scope(generator_scope) as gen_scope:
generator_inputs = _convert_tensor_or_l_or_d(generator_inputs)
generated_data = generator_fn(generator_inputs)
with variable_scope.variable_scope(discriminator_scope) as dis_scope:
with ops.name_scope(dis_scope.name + '/generated/'):
(discriminator_gen_outputs, discriminator_gen_classification_logits
) = _validate_acgan_discriminator_outputs(
discriminator_fn(generated_data, generator_inputs))
with variable_scope.variable_scope(dis_scope, reuse=True):
with ops.name_scope(dis_scope.name + '/real/'):
real_data = ops.convert_to_tensor(real_data)
(discriminator_real_outputs, discriminator_real_classification_logits
) = _validate_acgan_discriminator_outputs(
discriminator_fn(real_data, generator_inputs))
if check_shapes:
if not generated_data.shape.is_compatible_with(real_data.shape):
raise ValueError(
'Generator output shape (%s) must be the same shape as real data '
'(%s).' % (generated_data.shape, real_data.shape))
# Get model-specific variables.
generator_variables = variables_lib.get_trainable_variables(gen_scope)
discriminator_variables = variables_lib.get_trainable_variables(dis_scope)
return namedtuples.ACGANModel(generator_inputs, generated_data,
generator_variables, gen_scope, generator_fn,
real_data, discriminator_real_outputs,
discriminator_gen_outputs,
discriminator_variables, dis_scope,
discriminator_fn, one_hot_labels,
discriminator_real_classification_logits,
discriminator_gen_classification_logits)
def cyclegan_model(
# Lambdas defining models.
generator_fn,
discriminator_fn,
# data X and Y.
data_x,
data_y,
# Optional scopes.
generator_scope='Generator',
discriminator_scope='Discriminator',
model_x2y_scope='ModelX2Y',
model_y2x_scope='ModelY2X',
# Options.
check_shapes=True):
"""Returns a CycleGAN model outputs and variables.
See https://arxiv.org/abs/1703.10593 for more details.
Args:
generator_fn: A python lambda that takes `data_x` or `data_y` as inputs and
returns the outputs of the GAN generator.
discriminator_fn: A python lambda that takes `real_data`/`generated data`
and `generator_inputs`. Outputs a Tensor in the range [-inf, inf].
data_x: A `Tensor` of dataset X. Must be the same shape as `data_y`.
data_y: A `Tensor` of dataset Y. Must be the same shape as `data_x`.
generator_scope: Optional generator variable scope. Useful if you want to
reuse a subgraph that has already been created. Defaults to 'Generator'.
discriminator_scope: Optional discriminator variable scope. Useful if you
want to reuse a subgraph that has already been created. Defaults to
'Discriminator'.
model_x2y_scope: Optional variable scope for model x2y variables. Defaults
to 'ModelX2Y'.
model_y2x_scope: Optional variable scope for model y2x variables. Defaults
to 'ModelY2X'.
check_shapes: If `True`, check that generator produces Tensors that are the
same shape as `data_x` (`data_y`). Otherwise, skip this check.
Returns:
A `CycleGANModel` namedtuple.
Raises:
ValueError: If `check_shapes` is True and `data_x` or the generator output
does not have the same shape as `data_y`.
"""
# Create models.
def _define_partial_model(input_data, output_data):
return gan_model(
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
real_data=output_data,
generator_inputs=input_data,
generator_scope=generator_scope,
discriminator_scope=discriminator_scope,
check_shapes=check_shapes)
with variable_scope.variable_scope(model_x2y_scope):
model_x2y = _define_partial_model(data_x, data_y)
with variable_scope.variable_scope(model_y2x_scope):
model_y2x = _define_partial_model(data_y, data_x)
with variable_scope.variable_scope(model_y2x.generator_scope, reuse=True):
reconstructed_x = model_y2x.generator_fn(model_x2y.generated_data)
with variable_scope.variable_scope(model_x2y.generator_scope, reuse=True):
reconstructed_y = model_x2y.generator_fn(model_y2x.generated_data)
return namedtuples.CycleGANModel(model_x2y, model_y2x, reconstructed_x,
reconstructed_y)
def stargan_model(generator_fn,
discriminator_fn,
input_data,
input_data_domain_label,
generator_scope='Generator',
discriminator_scope='Discriminator'):
"""Returns a StarGAN model outputs and variables.
See https://arxiv.org/abs/1711.09020 for more details.
Args:
generator_fn: A python lambda that takes `inputs` and `targets` as inputs
and returns 'generated_data' as the transformed version of `input` based
on the `target`. `input` has shape (n, h, w, c), `targets` has shape (n,
num_domains), and `generated_data` has the same shape as `input`.
discriminator_fn: A python lambda that takes `inputs` and `num_domains` as
inputs and returns a tuple (`source_prediction`, `domain_prediction`).
`source_prediction` represents the source(real/generated) prediction by
the discriminator, and `domain_prediction` represents the domain
prediction/classification by the discriminator. `source_prediction` has
shape (n) and `domain_prediction` has shape (n, num_domains).
input_data: Tensor or a list of tensor of shape (n, h, w, c) representing
the real input images.
input_data_domain_label: Tensor or a list of tensor of shape (batch_size,
num_domains) representing the domain label associated with the real
images.
generator_scope: Optional generator variable scope. Useful if you want to
reuse a subgraph that has already been created.
discriminator_scope: Optional discriminator variable scope. Useful if you
want to reuse a subgraph that has already been created.
Returns:
StarGANModel nametuple return the tensor that are needed to compute the
loss.
Raises:
ValueError: If the shape of `input_data_domain_label` is not rank 2 or fully
defined in every dimensions.
"""
# Convert to tensor.
input_data = _convert_tensor_or_l_or_d(input_data)
input_data_domain_label = _convert_tensor_or_l_or_d(input_data_domain_label)
# Convert list of tensor to a single tensor if applicable.
if isinstance(input_data, (list, tuple)):
input_data = array_ops.concat(
[ops.convert_to_tensor(x) for x in input_data], 0)
if isinstance(input_data_domain_label, (list, tuple)):
input_data_domain_label = array_ops.concat(
[ops.convert_to_tensor(x) for x in input_data_domain_label], 0)
# Get batch_size, num_domains from the labels.
input_data_domain_label.shape.assert_has_rank(2)
input_data_domain_label.shape.assert_is_fully_defined()
batch_size, num_domains = input_data_domain_label.shape.as_list()
# Transform input_data to random target domains.
with variable_scope.variable_scope(generator_scope) as generator_scope:
generated_data_domain_target = _generate_stargan_random_domain_target(
batch_size, num_domains)
generated_data = generator_fn(input_data, generated_data_domain_target)
# Transform generated_data back to the original input_data domain.
with variable_scope.variable_scope(generator_scope, reuse=True):
reconstructed_data = generator_fn(generated_data, input_data_domain_label)
# Predict source and domain for the generated_data using the discriminator.
with variable_scope.variable_scope(
discriminator_scope) as discriminator_scope:
disc_gen_data_source_pred, disc_gen_data_domain_pred = discriminator_fn(
generated_data, num_domains)
# Predict source and domain for the input_data using the discriminator.
with variable_scope.variable_scope(discriminator_scope, reuse=True):
disc_input_data_source_pred, disc_input_data_domain_pred = discriminator_fn(
input_data, num_domains)
# Collect trainable variables from the neural networks.
generator_variables = variables_lib.get_trainable_variables(generator_scope)
discriminator_variables = variables_lib.get_trainable_variables(
discriminator_scope)
# Create the StarGANModel namedtuple.
return namedtuples.StarGANModel(
input_data=input_data,
input_data_domain_label=input_data_domain_label,
generated_data=generated_data,
generated_data_domain_target=generated_data_domain_target,
reconstructed_data=reconstructed_data,
discriminator_input_data_source_predication=disc_input_data_source_pred,
discriminator_generated_data_source_predication=disc_gen_data_source_pred,
discriminator_input_data_domain_predication=disc_input_data_domain_pred,
discriminator_generated_data_domain_predication=disc_gen_data_domain_pred,
generator_variables=generator_variables,
generator_scope=generator_scope,
generator_fn=generator_fn,
discriminator_variables=discriminator_variables,
discriminator_scope=discriminator_scope,
discriminator_fn=discriminator_fn)
def _validate_aux_loss_weight(aux_loss_weight, name='aux_loss_weight'):
if isinstance(aux_loss_weight, ops.Tensor):
aux_loss_weight.shape.assert_is_compatible_with([])
with ops.control_dependencies(
[check_ops.assert_greater_equal(aux_loss_weight, 0.0)]):
aux_loss_weight = array_ops.identity(aux_loss_weight)
elif aux_loss_weight is not None and aux_loss_weight < 0:
raise ValueError('`%s` must be greater than 0. Instead, was %s' %
(name, aux_loss_weight))
return aux_loss_weight
def _use_aux_loss(aux_loss_weight):
if aux_loss_weight is not None:
if not isinstance(aux_loss_weight, ops.Tensor):
return aux_loss_weight > 0
else:
return True
else:
return False
def _tensor_pool_adjusted_model(model, tensor_pool_fn):
"""Adjusts model using `tensor_pool_fn`.
Args:
model: A GANModel tuple.
tensor_pool_fn: A function that takes (generated_data, generator_inputs),
stores them in an internal pool and returns a previously stored
(generated_data, generator_inputs) with some probability. For example
tfgan.features.tensor_pool.
Returns:
A new GANModel tuple where discriminator outputs are adjusted by taking
pooled generator outputs as inputs. Returns the original model if
`tensor_pool_fn` is None.
Raises:
ValueError: If tensor pool does not support the `model`.
"""
if isinstance(model, namedtuples.GANModel):
pooled_generator_inputs, pooled_generated_data = tensor_pool_fn(
(model.generator_inputs, model.generated_data))
with variable_scope.variable_scope(model.discriminator_scope, reuse=True):
dis_gen_outputs = model.discriminator_fn(pooled_generated_data,
pooled_generator_inputs)
return model._replace(
generator_inputs=pooled_generator_inputs,
generated_data=pooled_generated_data,
discriminator_gen_outputs=dis_gen_outputs)
elif isinstance(model, namedtuples.ACGANModel):
pooled_generator_inputs, pooled_generated_data = tensor_pool_fn(
(model.generator_inputs, model.generated_data))
with variable_scope.variable_scope(model.discriminator_scope, reuse=True):
(pooled_discriminator_gen_outputs,
pooled_discriminator_gen_classification_logits) = model.discriminator_fn(
pooled_generated_data, pooled_generator_inputs)
return model._replace(
generator_inputs=pooled_generator_inputs,
generated_data=pooled_generated_data,
discriminator_gen_outputs=pooled_discriminator_gen_outputs,
discriminator_gen_classification_logits=pooled_discriminator_gen_classification_logits # pylint: disable=line-too-long
)
elif isinstance(model, namedtuples.InfoGANModel):
pooled_generator_inputs, pooled_generated_data, pooled_structured_input = (
tensor_pool_fn((model.generator_inputs, model.generated_data,
model.structured_generator_inputs)))
with variable_scope.variable_scope(model.discriminator_scope, reuse=True):
(pooled_discriminator_gen_outputs,
pooled_predicted_distributions) = model.discriminator_and_aux_fn(
pooled_generated_data, pooled_generator_inputs)
return model._replace(
generator_inputs=pooled_generator_inputs,
generated_data=pooled_generated_data,
structured_generator_inputs=pooled_structured_input,
discriminator_gen_outputs=pooled_discriminator_gen_outputs,
predicted_distributions=pooled_predicted_distributions)
else:
raise ValueError('Tensor pool does not support `model`: %s.' % type(model))
def gan_loss(
# GANModel.
model,
# Loss functions.
generator_loss_fn=tfgan_losses.wasserstein_generator_loss,
discriminator_loss_fn=tfgan_losses.wasserstein_discriminator_loss,
# Auxiliary losses.
gradient_penalty_weight=None,
gradient_penalty_epsilon=1e-10,
gradient_penalty_target=1.0,
gradient_penalty_one_sided=False,
mutual_information_penalty_weight=None,
aux_cond_generator_weight=None,
aux_cond_discriminator_weight=None,
tensor_pool_fn=None,
# Options.
add_summaries=True):
"""Returns losses necessary to train generator and discriminator.
Args:
model: A GANModel tuple.
generator_loss_fn: The loss function on the generator. Takes a GANModel
tuple.
discriminator_loss_fn: The loss function on the discriminator. Takes a
GANModel tuple.
gradient_penalty_weight: If not `None`, must be a non-negative Python number
or Tensor indicating how much to weight the gradient penalty. See
https://arxiv.org/pdf/1704.00028.pdf for more details.
gradient_penalty_epsilon: If `gradient_penalty_weight` is not None, the
small positive value used by the gradient penalty function for numerical
stability. Note some applications will need to increase this value to
avoid NaNs.
gradient_penalty_target: If `gradient_penalty_weight` is not None, a Python
number or `Tensor` indicating the target value of gradient norm. See the
CIFAR10 section of https://arxiv.org/abs/1710.10196. Defaults to 1.0.
gradient_penalty_one_sided: If `True`, penalty proposed in
https://arxiv.org/abs/1709.08894 is used. Defaults to `False`.
mutual_information_penalty_weight: If not `None`, must be a non-negative
Python number or Tensor indicating how much to weight the mutual
information penalty. See https://arxiv.org/abs/1606.03657 for more
details.
aux_cond_generator_weight: If not None: add a classification loss as in
https://arxiv.org/abs/1610.09585
aux_cond_discriminator_weight: If not None: add a classification loss as in
https://arxiv.org/abs/1610.09585
tensor_pool_fn: A function that takes (generated_data, generator_inputs),
stores them in an internal pool and returns previous stored
(generated_data, generator_inputs). For example
`tf.gan.features.tensor_pool`. Defaults to None (not using tensor pool).
add_summaries: Whether or not to add summaries for the losses.
Returns:
A GANLoss 2-tuple of (generator_loss, discriminator_loss). Includes
regularization losses.
Raises:
ValueError: If any of the auxiliary loss weights is provided and negative.
ValueError: If `mutual_information_penalty_weight` is provided, but the
`model` isn't an `InfoGANModel`.
"""
# Validate arguments.
gradient_penalty_weight = _validate_aux_loss_weight(
gradient_penalty_weight, 'gradient_penalty_weight')
mutual_information_penalty_weight = _validate_aux_loss_weight(
mutual_information_penalty_weight, 'infogan_weight')
aux_cond_generator_weight = _validate_aux_loss_weight(
aux_cond_generator_weight, 'aux_cond_generator_weight')
aux_cond_discriminator_weight = _validate_aux_loss_weight(
aux_cond_discriminator_weight, 'aux_cond_discriminator_weight')
# Verify configuration for mutual information penalty
if (_use_aux_loss(mutual_information_penalty_weight) and
not isinstance(model, namedtuples.InfoGANModel)):
raise ValueError(
'When `mutual_information_penalty_weight` is provided, `model` must be '
'an `InfoGANModel`. Instead, was %s.' % type(model))
# Verify configuration for mutual auxiliary condition loss (ACGAN).
if ((_use_aux_loss(aux_cond_generator_weight) or
_use_aux_loss(aux_cond_discriminator_weight)) and
not isinstance(model, namedtuples.ACGANModel)):
raise ValueError(
'When `aux_cond_generator_weight` or `aux_cond_discriminator_weight` '
'is provided, `model` must be an `ACGANModel`. Instead, was %s.' %
type(model))
# Optionally create pooled model.
if tensor_pool_fn:
pooled_model = _tensor_pool_adjusted_model(model, tensor_pool_fn)
else:
pooled_model = model
# Create standard losses.
gen_loss = generator_loss_fn(model, add_summaries=add_summaries)
dis_loss = discriminator_loss_fn(pooled_model, add_summaries=add_summaries)
# Add optional extra losses.
if _use_aux_loss(gradient_penalty_weight):
gp_loss = tfgan_losses.wasserstein_gradient_penalty(
pooled_model,
epsilon=gradient_penalty_epsilon,
target=gradient_penalty_target,
one_sided=gradient_penalty_one_sided,
add_summaries=add_summaries)
dis_loss += gradient_penalty_weight * gp_loss
if _use_aux_loss(mutual_information_penalty_weight):
gen_info_loss = tfgan_losses.mutual_information_penalty(
model, add_summaries=add_summaries)
if tensor_pool_fn is None:
dis_info_loss = gen_info_loss
else:
dis_info_loss = tfgan_losses.mutual_information_penalty(
pooled_model, add_summaries=add_summaries)
gen_loss += mutual_information_penalty_weight * gen_info_loss
dis_loss += mutual_information_penalty_weight * dis_info_loss
if _use_aux_loss(aux_cond_generator_weight):
ac_gen_loss = tfgan_losses.acgan_generator_loss(
model, add_summaries=add_summaries)
gen_loss += aux_cond_generator_weight * ac_gen_loss
if _use_aux_loss(aux_cond_discriminator_weight):
ac_disc_loss = tfgan_losses.acgan_discriminator_loss(
pooled_model, add_summaries=add_summaries)
dis_loss += aux_cond_discriminator_weight * ac_disc_loss
# Gathers auxiliary losses.
if model.generator_scope:
gen_reg_loss = losses.get_regularization_loss(model.generator_scope.name)
else:
gen_reg_loss = 0
if model.discriminator_scope:
dis_reg_loss = losses.get_regularization_loss(
model.discriminator_scope.name)
else:
dis_reg_loss = 0
return namedtuples.GANLoss(gen_loss + gen_reg_loss, dis_loss + dis_reg_loss)
def cyclegan_loss(
model,
# Loss functions.
generator_loss_fn=tfgan_losses.least_squares_generator_loss,
discriminator_loss_fn=tfgan_losses.least_squares_discriminator_loss,
# Auxiliary losses.
cycle_consistency_loss_fn=tfgan_losses.cycle_consistency_loss,
cycle_consistency_loss_weight=10.0,
# Options
**kwargs):
"""Returns the losses for a `CycleGANModel`.
See https://arxiv.org/abs/1703.10593 for more details.
Args:
model: A `CycleGANModel` namedtuple.
generator_loss_fn: The loss function on the generator. Takes a `GANModel`
named tuple.
discriminator_loss_fn: The loss function on the discriminator. Takes a
`GANModel` namedtuple.
cycle_consistency_loss_fn: The cycle consistency loss function. Takes a
`CycleGANModel` namedtuple.
cycle_consistency_loss_weight: A non-negative Python number or a scalar
`Tensor` indicating how much to weigh the cycle consistency loss.
**kwargs: Keyword args to pass directly to `gan_loss` to construct the loss
for each partial model of `model`.
Returns:
A `CycleGANLoss` namedtuple.
Raises:
ValueError: If `model` is not a `CycleGANModel` namedtuple.
"""
# Sanity checks.
if not isinstance(model, namedtuples.CycleGANModel):
raise ValueError('`model` must be a `CycleGANModel`. Instead, was %s.' %
type(model))
# Defines cycle consistency loss.
cycle_consistency_loss = cycle_consistency_loss_fn(
model, add_summaries=kwargs.get('add_summaries', True))
cycle_consistency_loss_weight = _validate_aux_loss_weight(
cycle_consistency_loss_weight, 'cycle_consistency_loss_weight')
aux_loss = cycle_consistency_loss_weight * cycle_consistency_loss
# Defines losses for each partial model.
def _partial_loss(partial_model):
partial_loss = gan_loss(
partial_model,
generator_loss_fn=generator_loss_fn,
discriminator_loss_fn=discriminator_loss_fn,
**kwargs)
return partial_loss._replace(generator_loss=partial_loss.generator_loss +
aux_loss)
with ops.name_scope('cyclegan_loss_x2y'):
loss_x2y = _partial_loss(model.model_x2y)
with ops.name_scope('cyclegan_loss_y2x'):
loss_y2x = _partial_loss(model.model_y2x)
return namedtuples.CycleGANLoss(loss_x2y, loss_y2x)
# Begin google-internal
# The four major parts can be found here: http://screen/tMRMBAohDYG.
# End google-internal
def stargan_loss(
model,
generator_loss_fn=tfgan_losses.stargan_generator_loss_wrapper(
tfgan_losses_impl.wasserstein_generator_loss),
discriminator_loss_fn=tfgan_losses.stargan_discriminator_loss_wrapper(
tfgan_losses_impl.wasserstein_discriminator_loss),
gradient_penalty_weight=10.0,
gradient_penalty_epsilon=1e-10,
gradient_penalty_target=1.0,
gradient_penalty_one_sided=False,
reconstruction_loss_fn=losses.absolute_difference,
reconstruction_loss_weight=10.0,
classification_loss_fn=losses.softmax_cross_entropy,
classification_loss_weight=1.0,
classification_one_hot=True,
add_summaries=True):
"""StarGAN Loss.
Args:
model: (StarGAN) Model output of the stargan_model() function call.
generator_loss_fn: The loss function on the generator. Takes a
`StarGANModel` named tuple.
discriminator_loss_fn: The loss function on the discriminator. Takes a
`StarGANModel` namedtuple.
gradient_penalty_weight: (float) Gradient penalty weight. Default to 10 per
the original paper https://arxiv.org/abs/1711.09020. Set to 0 or None to
turn off gradient penalty.
gradient_penalty_epsilon: (float) A small positive number added for
numerical stability when computing the gradient norm.
gradient_penalty_target: (float, or tf.float `Tensor`) The target value of
gradient norm. Defaults to 1.0.
gradient_penalty_one_sided: (bool) If `True`, penalty proposed in
https://arxiv.org/abs/1709.08894 is used. Defaults to `False`.
reconstruction_loss_fn: The reconstruction loss function. Default to L1-norm
and the function must conform to the `tf.losses` API.
reconstruction_loss_weight: Reconstruction loss weight. Default to 10.0.
classification_loss_fn: The loss function on the discriminator's ability to
classify domain of the input. Default to one-hot softmax cross entropy
loss, and the function must conform to the `tf.losses` API.
classification_loss_weight: (float) Classification loss weight. Default to
1.0.
classification_one_hot: (bool) If the label is one hot representation.
Default to True. If False, classification classification_loss_fn need to
be sigmoid cross entropy loss instead.
add_summaries: (bool) Add the loss to the summary
Returns:
GANLoss namedtuple where we have generator loss and discriminator loss.
Raises:
ValueError: If input StarGANModel.input_data_domain_label does not have rank
2, or dimension 2 is not defined.
"""
def _classification_loss_helper(true_labels, predict_logits, scope_name):
"""Classification Loss Function Helper.
Args:
true_labels: Tensor of shape [batch_size, num_domains] representing the
label where each row is an one-hot vector.
predict_logits: Tensor of shape [batch_size, num_domains] representing the
predicted label logit, which is UNSCALED output from the NN.
scope_name: (string) Name scope of the loss component.
Returns:
Single scalar tensor representing the classification loss.
"""
with ops.name_scope(scope_name, values=(true_labels, predict_logits)):
loss = classification_loss_fn(
onehot_labels=true_labels, logits=predict_logits)
if not classification_one_hot:
loss = math_ops.reduce_sum(loss, axis=1)
loss = math_ops.reduce_mean(loss)
if add_summaries:
summary.scalar(scope_name, loss)
return loss
# Check input shape.
model.input_data_domain_label.shape.assert_has_rank(2)
model.input_data_domain_label.shape[1:].assert_is_fully_defined()
# Adversarial Loss.
generator_loss = generator_loss_fn(model, add_summaries=add_summaries)
discriminator_loss = discriminator_loss_fn(model, add_summaries=add_summaries)
# Gradient Penalty.
if _use_aux_loss(gradient_penalty_weight):
gradient_penalty_fn = tfgan_losses.stargan_gradient_penalty_wrapper(
tfgan_losses_impl.wasserstein_gradient_penalty)
discriminator_loss += gradient_penalty_fn(
model,
epsilon=gradient_penalty_epsilon,
target=gradient_penalty_target,
one_sided=gradient_penalty_one_sided,
add_summaries=add_summaries) * gradient_penalty_weight
# Reconstruction Loss.
reconstruction_loss = reconstruction_loss_fn(model.input_data,
model.reconstructed_data)
generator_loss += reconstruction_loss * reconstruction_loss_weight
if add_summaries:
summary.scalar('reconstruction_loss', reconstruction_loss)
# Classification Loss.
generator_loss += _classification_loss_helper(
true_labels=model.generated_data_domain_target,
predict_logits=model.discriminator_generated_data_domain_predication,
scope_name='generator_classification_loss') * classification_loss_weight
discriminator_loss += _classification_loss_helper(
true_labels=model.input_data_domain_label,
predict_logits=model.discriminator_input_data_domain_predication,
scope_name='discriminator_classification_loss'
) * classification_loss_weight
return namedtuples.GANLoss(generator_loss, discriminator_loss)
def _get_update_ops(kwargs, gen_scope, dis_scope, check_for_unused_ops=True):
"""Gets generator and discriminator update ops.
Args:
kwargs: A dictionary of kwargs to be passed to `create_train_op`.
`update_ops` is removed, if present.
gen_scope: A scope for the generator.
dis_scope: A scope for the discriminator.
check_for_unused_ops: A Python bool. If `True`, throw Exception if there are
unused update ops.
Returns:
A 2-tuple of (generator update ops, discriminator train ops).
Raises:
ValueError: If there are update ops outside of the generator or
discriminator scopes.
"""
if 'update_ops' in kwargs:
update_ops = set(kwargs['update_ops'])
del kwargs['update_ops']
else:
update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS))
all_gen_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS, gen_scope))
all_dis_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS, dis_scope))
if check_for_unused_ops:
unused_ops = update_ops - all_gen_ops - all_dis_ops
if unused_ops:
raise ValueError('There are unused update ops: %s' % unused_ops)
gen_update_ops = list(all_gen_ops & update_ops)
dis_update_ops = list(all_dis_ops & update_ops)
return gen_update_ops, dis_update_ops
def gan_train_ops(
model,
loss,
generator_optimizer,
discriminator_optimizer,
check_for_unused_update_ops=True,
is_chief=True,
# Optional args to pass directly to the `create_train_op`.
**kwargs):
"""Returns GAN train ops.
The highest-level call in TF-GAN. It is composed of functions that can also
be called, should a user require more control over some part of the GAN
training process.
Args:
model: A GANModel.
loss: A GANLoss.
generator_optimizer: The optimizer for generator updates.
discriminator_optimizer: The optimizer for the discriminator updates.
check_for_unused_update_ops: If `True`, throws an exception if there are
update ops outside of the generator or discriminator scopes.
is_chief: Specifies whether or not the training is being run by the primary
replica during replica training.
**kwargs: Keyword args to pass directly to `training.create_train_op` for
both the generator and discriminator train op.
Returns:
A GANTrainOps tuple of (generator_train_op, discriminator_train_op) that can
be used to train a generator/discriminator pair.
"""
if isinstance(model, namedtuples.CycleGANModel):
# Get and store all arguments other than model and loss from locals.
# Contents of locals should not be modified, may not affect values. So make
# a copy. https://docs.python.org/2/library/functions.html#locals.
saved_params = dict(locals())
saved_params.pop('model', None)
saved_params.pop('loss', None)
kwargs = saved_params.pop('kwargs', {})
saved_params.update(kwargs)
with ops.name_scope('cyclegan_x2y_train'):
train_ops_x2y = gan_train_ops(model.model_x2y, loss.loss_x2y,
**saved_params)
with ops.name_scope('cyclegan_y2x_train'):
train_ops_y2x = gan_train_ops(model.model_y2x, loss.loss_y2x,
**saved_params)
return namedtuples.GANTrainOps(
(train_ops_x2y.generator_train_op, train_ops_y2x.generator_train_op),
(train_ops_x2y.discriminator_train_op,
train_ops_y2x.discriminator_train_op),
training_util.get_or_create_global_step().assign_add(1))
# Create global step increment op.
global_step = training_util.get_or_create_global_step()
global_step_inc = global_step.assign_add(1)
# Get generator and discriminator update ops. We split them so that update
# ops aren't accidentally run multiple times. For now, throw an error if
# there are update ops that aren't associated with either the generator or
# the discriminator. Might modify the `kwargs` dictionary.
gen_update_ops, dis_update_ops = _get_update_ops(
kwargs, model.generator_scope.name, model.discriminator_scope.name,
check_for_unused_update_ops)
# Get the sync hooks if these are needed.
sync_hooks = []
generator_global_step = None
if isinstance(generator_optimizer,
sync_replicas_optimizer.SyncReplicasOptimizer):
# TODO(joelshor): Figure out a way to get this work without including the
# dummy global step in the checkpoint.
# WARNING: Making this variable a local variable causes sync replicas to
# hang forever.
generator_global_step = variable_scope.get_variable(
'dummy_global_step_generator',
shape=[],
dtype=global_step.dtype.base_dtype,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=[ops.GraphKeys.GLOBAL_VARIABLES])
gen_update_ops += [generator_global_step.assign(global_step)]
sync_hooks.append(generator_optimizer.make_session_run_hook(is_chief))
with ops.name_scope('generator_train'):
gen_train_op = training.create_train_op(
total_loss=loss.generator_loss,
optimizer=generator_optimizer,
variables_to_train=model.generator_variables,
global_step=generator_global_step,
update_ops=gen_update_ops,
**kwargs)
discriminator_global_step = None
if isinstance(discriminator_optimizer,
sync_replicas_optimizer.SyncReplicasOptimizer):
# See comment above `generator_global_step`.
discriminator_global_step = variable_scope.get_variable(
'dummy_global_step_discriminator',
shape=[],
dtype=global_step.dtype.base_dtype,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=[ops.GraphKeys.GLOBAL_VARIABLES])
dis_update_ops += [discriminator_global_step.assign(global_step)]
sync_hooks.append(discriminator_optimizer.make_session_run_hook(is_chief))
with ops.name_scope('discriminator_train'):
disc_train_op = training.create_train_op(
total_loss=loss.discriminator_loss,
optimizer=discriminator_optimizer,
variables_to_train=model.discriminator_variables,
global_step=discriminator_global_step,
update_ops=dis_update_ops,
**kwargs)
return namedtuples.GANTrainOps(gen_train_op, disc_train_op, global_step_inc,
sync_hooks)
# TODO(joelshor): Implement a dynamic GAN train loop, as in `Real-Time Adaptive
# Image Compression` (https://arxiv.org/abs/1705.05823)
class RunTrainOpsHook(session_run_hook.SessionRunHook):
"""A hook to run train ops a fixed number of times."""
def __init__(self, train_ops, train_steps):
"""Run train ops a certain number of times.
Args:
train_ops: A train op or iterable of train ops to run.
train_steps: The number of times to run the op(s).
"""
if not isinstance(train_ops, (list, tuple)):
train_ops = [train_ops]
self._train_ops = train_ops
self._train_steps = train_steps
def before_run(self, run_context):
for _ in range(self._train_steps):
run_context.session.run(self._train_ops)
def get_sequential_train_hooks(train_steps=namedtuples.GANTrainSteps(1, 1)):
"""Returns a hooks function for sequential GAN training.
Args:
train_steps: A `GANTrainSteps` tuple that determines how many generator and
discriminator training steps to take.
Returns:
A function that takes a GANTrainOps tuple and returns a list of hooks.
"""
def get_hooks(train_ops):
generator_hook = RunTrainOpsHook(train_ops.generator_train_op,
train_steps.generator_train_steps)
discriminator_hook = RunTrainOpsHook(train_ops.discriminator_train_op,
train_steps.discriminator_train_steps)
return [generator_hook, discriminator_hook] + list(train_ops.train_hooks)
return get_hooks
def _num_joint_steps(train_steps):
g_steps = train_steps.generator_train_steps
d_steps = train_steps.discriminator_train_steps
# Get the number of each type of step that should be run.
num_d_and_g_steps = min(g_steps, d_steps)
num_g_steps = g_steps - num_d_and_g_steps
num_d_steps = d_steps - num_d_and_g_steps
return num_d_and_g_steps, num_g_steps, num_d_steps
def get_joint_train_hooks(train_steps=namedtuples.GANTrainSteps(1, 1)):
"""Returns a hooks function for joint GAN training.
When using these train hooks, IT IS RECOMMENDED TO USE `use_locking=True` ON
ALL OPTIMIZERS TO AVOID RACE CONDITIONS.
The order of steps taken is:
1) Combined generator and discriminator steps
2) Generator only steps, if any remain
3) Discriminator only steps, if any remain
**NOTE**: Unlike `get_sequential_train_hooks`, this method performs updates
for the generator and discriminator simultaneously whenever possible. This
reduces the number of `tf.compat.v1.Session` calls, and can also change the
training
semantics.
To illustrate the difference look at the following example:
`train_steps=namedtuples.GANTrainSteps(3, 5)` will cause
`get_sequential_train_hooks` to make 8 session calls:
1) 3 generator steps
2) 5 discriminator steps
In contrast, `get_joint_train_steps` will make 5 session calls:
1) 3 generator + discriminator steps
2) 2 discriminator steps
Args:
train_steps: A `GANTrainSteps` tuple that determines how many generator and
discriminator training steps to take.
Returns:
A function that takes a GANTrainOps tuple and returns a list of hooks.
"""
num_d_and_g_steps, num_g_steps, num_d_steps = _num_joint_steps(train_steps)
def get_hooks(train_ops):
g_op = train_ops.generator_train_op
d_op = train_ops.discriminator_train_op
joint_hook = RunTrainOpsHook([g_op, d_op], num_d_and_g_steps)
g_hook = RunTrainOpsHook(g_op, num_g_steps)
d_hook = RunTrainOpsHook(d_op, num_d_steps)
return [joint_hook, g_hook, d_hook] + list(train_ops.train_hooks)
return get_hooks
# TODO(joelshor): This function currently returns the global step. Find a
# good way for it to return the generator, discriminator, and final losses.
def gan_train(train_ops,
logdir,
get_hooks_fn=get_sequential_train_hooks(),
master='',
is_chief=True,
scaffold=None,
hooks=None,
chief_only_hooks=None,
save_checkpoint_secs=600,
save_summaries_steps=100,
config=None):
"""A wrapper around `contrib.training.train` that uses GAN hooks.
Args:
train_ops: A GANTrainOps named tuple.
logdir: The directory where the graph and checkpoints are saved.
get_hooks_fn: A function that takes a GANTrainOps tuple and returns a list
of hooks.
master: The URL of the master.
is_chief: Specifies whether or not the training is being run by the primary
replica during replica training.
scaffold: An tf.compat.v1.train.Scaffold instance.
hooks: List of `tf.estimator.SessionRunHook` callbacks which are run inside
the training loop.
chief_only_hooks: List of `tf.estimator.SessionRunHook` instances which are
run inside the training loop for the chief trainer only.
save_checkpoint_secs: The frequency, in seconds, that a checkpoint is saved
using a default checkpoint saver. If `save_checkpoint_secs` is set to
`None`, then the default checkpoint saver isn't used.
save_summaries_steps: The frequency, in number of global steps, that the
summaries are written to disk using a default summary saver. If
`save_summaries_steps` is set to `None`, then the default summary saver
isn't used.
config: An instance of `tf.compat.v1.ConfigProto`.
Returns:
Output of the call to `training.train`.
"""
new_hooks = get_hooks_fn(train_ops)
if hooks is not None:
hooks = list(hooks) + list(new_hooks)
else:
hooks = new_hooks
return training.train(
train_ops.global_step_inc_op,
logdir,
master=master,
is_chief=is_chief,
scaffold=scaffold,
hooks=hooks,
chief_only_hooks=chief_only_hooks,
save_checkpoint_secs=save_checkpoint_secs,
save_summaries_steps=save_summaries_steps,
config=config)
def get_sequential_train_steps(train_steps=namedtuples.GANTrainSteps(1, 1)):
"""Returns a thin wrapper around slim.learning.train_step, for GANs.
This function is to provide support for the Supervisor. For new code, please
use `MonitoredSession` and `get_sequential_train_hooks`.
Args:
train_steps: A `GANTrainSteps` tuple that determines how many generator and
discriminator training steps to take.
Returns:
A function that can be used for `train_step_fn` for GANs.
"""
def sequential_train_steps(sess, train_ops, global_step, train_step_kwargs):
"""A thin wrapper around slim.learning.train_step, for GANs.
Args:
sess: A Tensorflow session.
train_ops: A GANTrainOps tuple of train ops to run.
global_step: The global step.
train_step_kwargs: Dictionary controlling `train_step` behavior.
Returns:
A scalar final loss and a bool whether or not the train loop should stop.
"""
# Only run `should_stop` at the end, if required. Make a local copy of
# `train_step_kwargs`, if necessary, so as not to modify the caller's
# dictionary.
should_stop_op, train_kwargs = None, train_step_kwargs
if 'should_stop' in train_step_kwargs:
should_stop_op = train_step_kwargs['should_stop']
train_kwargs = train_step_kwargs.copy()
del train_kwargs['should_stop']
# Run generator training steps.
gen_loss = 0
for _ in range(train_steps.generator_train_steps):
cur_gen_loss, _ = slim_learning.train_step(sess,
train_ops.generator_train_op,
global_step, train_kwargs)
gen_loss += cur_gen_loss
# Run discriminator training steps.
dis_loss = 0
for _ in range(train_steps.discriminator_train_steps):
cur_dis_loss, _ = slim_learning.train_step(
sess, train_ops.discriminator_train_op, global_step, train_kwargs)
dis_loss += cur_dis_loss
sess.run(train_ops.global_step_inc_op)
# Run the `should_stop` op after the global step has been incremented, so
# that the `should_stop` aligns with the proper `global_step` count.
if should_stop_op is not None:
should_stop = sess.run(should_stop_op)
else:
should_stop = False
return gen_loss + dis_loss, should_stop
return sequential_train_steps
# Helpers
def _convert_tensor_or_l_or_d(tensor_or_l_or_d):
"""Convert input, list of inputs, or dictionary of inputs to Tensors."""
if isinstance(tensor_or_l_or_d, (list, tuple)):
return [ops.convert_to_tensor(x) for x in tensor_or_l_or_d]
elif isinstance(tensor_or_l_or_d, dict):
return {k: ops.convert_to_tensor(v) for k, v in tensor_or_l_or_d.items()}
else:
return ops.convert_to_tensor(tensor_or_l_or_d)
def _validate_distributions(distributions_l, noise_l):
if not isinstance(distributions_l, (tuple, list)):
raise ValueError('`predicted_distributions` must be a list. Instead, found '
'%s.' % type(distributions_l))
if len(distributions_l) != len(noise_l):
raise ValueError('Length of `predicted_distributions` %i must be the same '
'as the length of structured noise %i.' %
(len(distributions_l), len(noise_l)))
def _validate_acgan_discriminator_outputs(discriminator_output):
try:
a, b = discriminator_output
except (TypeError, ValueError):
raise TypeError(
'A discriminator function for ACGAN must output a tuple '
'consisting of (discrimination logits, classification logits).')
return a, b
def _generate_stargan_random_domain_target(batch_size, num_domains):
"""Generate random domain label.
Args:
batch_size: (int) Number of random domain label.
num_domains: (int) Number of domains representing with the label.
Returns:
Tensor of shape (batch_size, num_domains) representing random label.
"""
domain_idx = random_ops.random_uniform([batch_size],
minval=0,
maxval=num_domains,
dtype=dtypes.int32)
return array_ops.one_hot(domain_idx, num_domains)
|
tensorflow-master
|
tensorflow/contrib/gan/python/train.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Named tuples for TF-GAN.
TF-GAN training occurs in four steps, and each step communicates with the next
step via one of these named tuples. At each step, you can either use a TF-GAN
helper function in `train.py`, or you can manually construct a tuple.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
__all__ = [
'GANModel',
'InfoGANModel',
'ACGANModel',
'CycleGANModel',
'StarGANModel',
'GANLoss',
'CycleGANLoss',
'GANTrainOps',
'GANTrainSteps',
]
class GANModel(
collections.namedtuple('GANModel', (
'generator_inputs',
'generated_data',
'generator_variables',
'generator_scope',
'generator_fn',
'real_data',
'discriminator_real_outputs',
'discriminator_gen_outputs',
'discriminator_variables',
'discriminator_scope',
'discriminator_fn',
))):
"""A GANModel contains all the pieces needed for GAN training.
Generative Adversarial Networks (https://arxiv.org/abs/1406.2661) attempt
to create an implicit generative model of data by solving a two agent game.
The generator generates candidate examples that are supposed to match the
data distribution, and the discriminator aims to tell the real examples
apart from the generated samples.
Args:
generator_inputs: The random noise source that acts as input to the
generator.
generated_data: The generated output data of the GAN.
generator_variables: A list of all generator variables.
generator_scope: Variable scope all generator variables live in.
generator_fn: The generator function.
real_data: A tensor or real data.
discriminator_real_outputs: The discriminator's output on real data.
discriminator_gen_outputs: The discriminator's output on generated data.
discriminator_variables: A list of all discriminator variables.
discriminator_scope: Variable scope all discriminator variables live in.
discriminator_fn: The discriminator function.
"""
# TODO(joelshor): Have this class inherit from `GANModel`.
class InfoGANModel(
collections.namedtuple('InfoGANModel', GANModel._fields + (
'structured_generator_inputs',
'predicted_distributions',
'discriminator_and_aux_fn',
))):
"""An InfoGANModel contains all the pieces needed for InfoGAN training.
See https://arxiv.org/abs/1606.03657 for more details.
Args:
structured_generator_inputs: A list of Tensors representing the random noise
that must have high mutual information with the generator output. List
length should match `predicted_distributions`.
predicted_distributions: A list of `tfp.distributions.Distribution`s.
Predicted by the recognizer, and used to evaluate the likelihood of the
structured noise. List length should match `structured_generator_inputs`.
discriminator_and_aux_fn: The original discriminator function that returns
a tuple of (logits, `predicted_distributions`).
"""
class ACGANModel(
collections.namedtuple('ACGANModel', GANModel._fields +
('one_hot_labels',
'discriminator_real_classification_logits',
'discriminator_gen_classification_logits',))):
"""An ACGANModel contains all the pieces needed for ACGAN training.
See https://arxiv.org/abs/1610.09585 for more details.
Args:
one_hot_labels: A Tensor holding one-hot-labels for the batch.
discriminator_real_classification_logits: Classification logits for real
data.
discriminator_gen_classification_logits: Classification logits for generated
data.
"""
class CycleGANModel(
collections.namedtuple(
'CycleGANModel',
('model_x2y', 'model_y2x', 'reconstructed_x', 'reconstructed_y'))):
"""An CycleGANModel contains all the pieces needed for CycleGAN training.
The model `model_x2y` generator F maps data set X to Y, while the model
`model_y2x` generator G maps data set Y to X.
See https://arxiv.org/abs/1703.10593 for more details.
Args:
model_x2y: A `GANModel` namedtuple whose generator maps data set X to Y.
model_y2x: A `GANModel` namedtuple whose generator maps data set Y to X.
reconstructed_x: A `Tensor` of reconstructed data X which is G(F(X)).
reconstructed_y: A `Tensor` of reconstructed data Y which is F(G(Y)).
"""
class StarGANModel(
collections.namedtuple('StarGANModel', (
'input_data',
'input_data_domain_label',
'generated_data',
'generated_data_domain_target',
'reconstructed_data',
'discriminator_input_data_source_predication',
'discriminator_generated_data_source_predication',
'discriminator_input_data_domain_predication',
'discriminator_generated_data_domain_predication',
'generator_variables',
'generator_scope',
'generator_fn',
'discriminator_variables',
'discriminator_scope',
'discriminator_fn',
))):
"""A StarGANModel contains all the pieces needed for StarGAN training.
Args:
input_data: The real images that need to be transferred by the generator.
input_data_domain_label: The real domain labels associated with the real
images.
generated_data: The generated images produced by the generator. It has the
same shape as the input_data.
generated_data_domain_target: The target domain that the generated images
belong to. It has the same shape as the input_data_domain_label.
reconstructed_data: The reconstructed images produced by the G(enerator).
reconstructed_data = G(G(input_data, generated_data_domain_target),
input_data_domain_label).
discriminator_input_data_source: The discriminator's output for predicting
the source (real/generated) of input_data.
discriminator_generated_data_source: The discriminator's output for
predicting the source (real/generated) of generated_data.
discriminator_input_data_domain_predication: The discriminator's output for
predicting the domain_label for the input_data.
discriminator_generated_data_domain_predication: The discriminatorr's output
for predicting the domain_target for the generated_data.
generator_variables: A list of all generator variables.
generator_scope: Variable scope all generator variables live in.
generator_fn: The generator function.
discriminator_variables: A list of all discriminator variables.
discriminator_scope: Variable scope all discriminator variables live in.
discriminator_fn: The discriminator function.
"""
class GANLoss(
collections.namedtuple('GANLoss', (
'generator_loss',
'discriminator_loss'
))):
"""GANLoss contains the generator and discriminator losses.
Args:
generator_loss: A tensor for the generator loss.
discriminator_loss: A tensor for the discriminator loss.
"""
class CycleGANLoss(
collections.namedtuple('CycleGANLoss', ('loss_x2y', 'loss_y2x'))):
"""CycleGANLoss contains the losses for `CycleGANModel`.
See https://arxiv.org/abs/1703.10593 for more details.
Args:
loss_x2y: A `GANLoss` namedtuple representing the loss of `model_x2y`.
loss_y2x: A `GANLoss` namedtuple representing the loss of `model_y2x`.
"""
class GANTrainOps(
collections.namedtuple('GANTrainOps', (
'generator_train_op',
'discriminator_train_op',
'global_step_inc_op',
'train_hooks'
))):
"""GANTrainOps contains the training ops.
Args:
generator_train_op: Op that performs a generator update step.
discriminator_train_op: Op that performs a discriminator update step.
global_step_inc_op: Op that increments the shared global step.
train_hooks: a list or tuple containing hooks related to training that need
to be populated when training ops are instantiated. Used primarily for
sync hooks.
"""
def __new__(cls, generator_train_op, discriminator_train_op,
global_step_inc_op, train_hooks=()):
return super(GANTrainOps, cls).__new__(cls, generator_train_op,
discriminator_train_op,
global_step_inc_op, train_hooks)
class GANTrainSteps(
collections.namedtuple('GANTrainSteps', (
'generator_train_steps',
'discriminator_train_steps'
))):
"""Contains configuration for the GAN Training.
Args:
generator_train_steps: Number of generator steps to take in each GAN step.
discriminator_train_steps: Number of discriminator steps to take in each GAN
step.
"""
|
tensorflow-master
|
tensorflow/contrib/gan/python/namedtuples.py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TFGAN losses and penalties.
Losses can be used with individual arguments or with GANModel tuples.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Collapse losses into a single namespace.
from tensorflow.contrib.gan.python.losses.python import losses_wargs as wargs
from tensorflow.contrib.gan.python.losses.python import tuple_losses
# pylint: disable=wildcard-import
from tensorflow.contrib.gan.python.losses.python.tuple_losses import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['wargs'] + tuple_losses.__all__
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/gan/python/losses/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.gan.python.losses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.contrib import layers
from tensorflow.contrib.gan.python import namedtuples
from tensorflow.contrib.gan.python.losses.python import losses_impl as tfgan_losses_impl
from tensorflow.contrib.gan.python.losses.python import tuple_losses_impl as tfgan_losses
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class ArgsToGanModelTest(test.TestCase):
def test_args_to_gan_model(self):
"""Test `_args_to_gan_model`."""
tuple_type = collections.namedtuple('fake_type', ['arg1', 'arg3'])
def args_loss(arg1, arg2, arg3=3, arg4=4):
return arg1 + arg2 + arg3 + arg4
gan_model_loss = tfgan_losses._args_to_gan_model(args_loss)
# Value is correct.
self.assertEqual(1 + 2 + 5 + 6,
gan_model_loss(tuple_type(1, 2), arg2=5, arg4=6))
# Uses tuple argument with defaults.
self.assertEqual(1 + 5 + 3 + 7,
gan_model_loss(tuple_type(1, None), arg2=5, arg4=7))
# Uses non-tuple argument with defaults.
self.assertEqual(1 + 5 + 2 + 4,
gan_model_loss(tuple_type(1, 2), arg2=5))
# Requires non-tuple, non-default arguments.
with self.assertRaisesRegexp(ValueError, '`arg2` must be supplied'):
gan_model_loss(tuple_type(1, 2))
# Can't pass tuple argument outside tuple.
with self.assertRaisesRegexp(
ValueError, 'present in both the tuple and keyword args'):
gan_model_loss(tuple_type(1, 2), arg2=1, arg3=5)
def test_args_to_gan_model_name(self):
"""Test that `_args_to_gan_model` produces correctly named functions."""
def loss_fn(x):
return x
new_loss_fn = tfgan_losses._args_to_gan_model(loss_fn)
self.assertEqual('loss_fn', new_loss_fn.__name__)
self.assertTrue('The gan_model version of' in new_loss_fn.__docstring__)
def test_tuple_respects_optional_args(self):
"""Test that optional args can be changed with tuple losses."""
tuple_type = collections.namedtuple('fake_type', ['arg1', 'arg2'])
def args_loss(arg1, arg2, arg3=3):
return arg1 + 2 * arg2 + 3 * arg3
loss_fn = tfgan_losses._args_to_gan_model(args_loss)
loss = loss_fn(tuple_type(arg1=-1, arg2=2), arg3=4)
# If `arg3` were not set properly, this value would be different.
self.assertEqual(-1 + 2 * 2 + 3 * 4, loss)
def test_works_with_child_classes(self):
"""`args_to_gan_model` should work with classes derived from namedtuple."""
tuple_type = collections.namedtuple('fake_type', ['arg1', 'arg2'])
class InheritedType(tuple_type):
pass
def args_loss(arg1, arg2, arg3=3):
return arg1 + 2 * arg2 + 3 * arg3
loss_fn = tfgan_losses._args_to_gan_model(args_loss)
loss = loss_fn(InheritedType(arg1=-1, arg2=2), arg3=4)
# If `arg3` were not set properly, this value would be different.
self.assertEqual(-1 + 2 * 2 + 3 * 4, loss)
class ConsistentLossesTest(test.TestCase):
pass
def _tuple_from_dict(args_dict):
return collections.namedtuple('Tuple', args_dict.keys())(**args_dict)
def add_loss_consistency_test(test_class, loss_name_str, loss_args):
tuple_loss = getattr(tfgan_losses, loss_name_str)
arg_loss = getattr(tfgan_losses.losses_impl, loss_name_str)
def consistency_test(self):
self.assertEqual(arg_loss.__name__, tuple_loss.__name__)
with self.cached_session():
self.assertEqual(arg_loss(**loss_args).eval(),
tuple_loss(_tuple_from_dict(loss_args)).eval())
test_name = 'test_loss_consistency_%s' % loss_name_str
setattr(test_class, test_name, consistency_test)
# A list of consistency tests which need to be manually written.
manual_tests = [
'acgan_discriminator_loss',
'acgan_generator_loss',
'combine_adversarial_loss',
'mutual_information_penalty',
'wasserstein_gradient_penalty',
'cycle_consistency_loss',
'stargan_generator_loss_wrapper',
'stargan_discriminator_loss_wrapper',
'stargan_gradient_penalty_wrapper'
]
discriminator_keyword_args = {
'discriminator_real_outputs': np.array([[3.4, 2.3, -2.3],
[6.3, -2.1, 0.2]]),
'discriminator_gen_outputs': np.array([[6.2, -1.5, 2.3],
[-2.9, -5.1, 0.1]]),
}
generator_keyword_args = {
'discriminator_gen_outputs': np.array([[6.2, -1.5, 2.3],
[-2.9, -5.1, 0.1]]),
}
class CycleConsistencyLossTest(test.TestCase):
def setUp(self):
super(CycleConsistencyLossTest, self).setUp()
def _partial_model(generator_inputs_np):
model = namedtuples.GANModel(*[None] * 11)
return model._replace(
generator_inputs=constant_op.constant(
generator_inputs_np, dtype=dtypes.float32))
self._model_x2y = _partial_model([1, 2])
self._model_y2x = _partial_model([5, 6])
def test_model_type(self):
"""Test the input model type for `cycle_consistency_loss`."""
with self.assertRaises(ValueError):
tfgan_losses.cycle_consistency_loss(self._model_x2y)
def test_correct_loss(self):
"""Test the output of `cycle_consistency_loss`."""
loss = tfgan_losses.cycle_consistency_loss(
namedtuples.CycleGANModel(
model_x2y=self._model_x2y,
model_y2x=self._model_y2x,
reconstructed_x=constant_op.constant([9, 8], dtype=dtypes.float32),
reconstructed_y=constant_op.constant([7, 2], dtype=dtypes.float32)))
with self.test_session(use_gpu=True):
variables.global_variables_initializer().run()
self.assertNear(5.0, loss.eval(), 1e-5)
class StarGANLossWrapperTest(test.TestCase):
def setUp(self):
super(StarGANLossWrapperTest, self).setUp()
self.input_data = array_ops.ones([1, 2, 2, 3])
self.input_data_domain_label = constant_op.constant([[0, 1]])
self.generated_data = array_ops.ones([1, 2, 2, 3])
self.discriminator_input_data_source_predication = array_ops.ones([1])
self.discriminator_generated_data_source_predication = array_ops.ones([1])
def _discriminator_fn(inputs, num_domains):
"""Differentiable dummy discriminator for StarGAN."""
hidden = layers.flatten(inputs)
output_src = math_ops.reduce_mean(hidden, axis=1)
output_cls = layers.fully_connected(
inputs=hidden,
num_outputs=num_domains,
activation_fn=None,
normalizer_fn=None,
biases_initializer=None)
return output_src, output_cls
with variable_scope.variable_scope('discriminator') as dis_scope:
pass
self.model = namedtuples.StarGANModel(
input_data=self.input_data,
input_data_domain_label=self.input_data_domain_label,
generated_data=self.generated_data,
generated_data_domain_target=None,
reconstructed_data=None,
discriminator_input_data_source_predication=self.
discriminator_input_data_source_predication,
discriminator_generated_data_source_predication=self.
discriminator_generated_data_source_predication,
discriminator_input_data_domain_predication=None,
discriminator_generated_data_domain_predication=None,
generator_variables=None,
generator_scope=None,
generator_fn=None,
discriminator_variables=None,
discriminator_scope=dis_scope,
discriminator_fn=_discriminator_fn)
self.discriminator_fn = _discriminator_fn
self.discriminator_scope = dis_scope
def test_stargan_generator_loss_wrapper(self):
"""Test StarGAN generator loss wrapper."""
loss_fn = tfgan_losses_impl.wasserstein_generator_loss
wrapped_loss_fn = tfgan_losses.stargan_generator_loss_wrapper(loss_fn)
loss_result_tensor = loss_fn(
self.discriminator_generated_data_source_predication)
wrapped_loss_result_tensor = wrapped_loss_fn(self.model)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
loss_result, wrapped_loss_result = sess.run(
[loss_result_tensor, wrapped_loss_result_tensor])
self.assertAlmostEqual(loss_result, wrapped_loss_result)
def test_stargan_discriminator_loss_wrapper(self):
"""Test StarGAN discriminator loss wrapper."""
loss_fn = tfgan_losses_impl.wasserstein_discriminator_loss
wrapped_loss_fn = tfgan_losses.stargan_discriminator_loss_wrapper(loss_fn)
loss_result_tensor = loss_fn(
self.discriminator_generated_data_source_predication,
self.discriminator_generated_data_source_predication)
wrapped_loss_result_tensor = wrapped_loss_fn(self.model)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
loss_result, wrapped_loss_result = sess.run(
[loss_result_tensor, wrapped_loss_result_tensor])
self.assertAlmostEqual(loss_result, wrapped_loss_result)
def test_stargan_gradient_penalty_wrapper(self):
"""Test StaGAN gradient penalty wrapper.
Notes:
The random interpolates are handled by given setting the reconstruction to
be the same as the input.
"""
loss_fn = tfgan_losses_impl.wasserstein_gradient_penalty
wrapped_loss_fn = tfgan_losses.stargan_gradient_penalty_wrapper(loss_fn)
loss_result_tensor = loss_fn(
real_data=self.input_data,
generated_data=self.generated_data,
generator_inputs=self.input_data_domain_label.shape.as_list()[-1],
discriminator_fn=self.discriminator_fn,
discriminator_scope=self.discriminator_scope)
wrapped_loss_result_tensor = wrapped_loss_fn(self.model)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
loss_result, wrapped_loss_result = sess.run(
[loss_result_tensor, wrapped_loss_result_tensor])
self.assertAlmostEqual(loss_result, wrapped_loss_result)
if __name__ == '__main__':
for loss_name in tfgan_losses.__all__:
if loss_name in manual_tests: continue
keyword_args = (generator_keyword_args if 'generator' in loss_name else
discriminator_keyword_args)
add_loss_consistency_test(ConsistentLossesTest, loss_name, keyword_args)
test.main()
|
tensorflow-master
|
tensorflow/contrib/gan/python/losses/python/tuple_losses_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Losses that are useful for training GANs.
The losses belong to two main groups, but there are others that do not:
1) xxxxx_generator_loss
2) xxxxx_discriminator_loss
Example:
1) wasserstein_generator_loss
2) wasserstein_discriminator_loss
Other example:
wasserstein_gradient_penalty
All losses must be able to accept 1D or 2D Tensors, so as to be compatible with
patchGAN style losses (https://arxiv.org/abs/1611.07004).
To make these losses usable in the TF-GAN framework, please create a tuple
version of the losses with `losses_utils.py`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import variables as contrib_variables_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.losses import losses
from tensorflow.python.ops.losses import util
from tensorflow.python.summary import summary
__all__ = [
'acgan_discriminator_loss',
'acgan_generator_loss',
'least_squares_discriminator_loss',
'least_squares_generator_loss',
'modified_discriminator_loss',
'modified_generator_loss',
'minimax_discriminator_loss',
'minimax_generator_loss',
'wasserstein_discriminator_loss',
'wasserstein_generator_loss',
'wasserstein_gradient_penalty',
'mutual_information_penalty',
'combine_adversarial_loss',
'cycle_consistency_loss',
]
def _to_float(tensor):
return math_ops.cast(tensor, dtypes.float32)
# Wasserstein losses from `Wasserstein GAN` (https://arxiv.org/abs/1701.07875).
def wasserstein_generator_loss(
discriminator_gen_outputs,
weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""Wasserstein generator loss for GANs.
See `Wasserstein GAN` (https://arxiv.org/abs/1701.07875) for more details.
Args:
discriminator_gen_outputs: Discriminator output on generated data. Expected
to be in the range of (-inf, inf).
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`discriminator_gen_outputs`, and must be broadcastable to
`discriminator_gen_outputs` (i.e., all dimensions must be either `1`, or
the same as the corresponding dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.compat.v1.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add detailed summaries for the loss.
Returns:
A loss Tensor. The shape depends on `reduction`.
"""
with ops.name_scope(scope, 'generator_wasserstein_loss',
(discriminator_gen_outputs, weights)) as scope:
discriminator_gen_outputs = _to_float(discriminator_gen_outputs)
loss = -discriminator_gen_outputs
loss = losses.compute_weighted_loss(loss, weights, scope, loss_collection,
reduction)
if add_summaries:
summary.scalar('generator_wass_loss', loss)
return loss
def wasserstein_discriminator_loss(
discriminator_real_outputs,
discriminator_gen_outputs,
real_weights=1.0,
generated_weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""Wasserstein discriminator loss for GANs.
See `Wasserstein GAN` (https://arxiv.org/abs/1701.07875) for more details.
Args:
discriminator_real_outputs: Discriminator output on real data.
discriminator_gen_outputs: Discriminator output on generated data. Expected
to be in the range of (-inf, inf).
real_weights: Optional `Tensor` whose rank is either 0, or the same rank as
`discriminator_real_outputs`, and must be broadcastable to
`discriminator_real_outputs` (i.e., all dimensions must be either `1`, or
the same as the corresponding dimension).
generated_weights: Same as `real_weights`, but for
`discriminator_gen_outputs`.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.compat.v1.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. The shape depends on `reduction`.
"""
with ops.name_scope(scope, 'discriminator_wasserstein_loss',
(discriminator_real_outputs, discriminator_gen_outputs,
real_weights, generated_weights)) as scope:
discriminator_real_outputs = _to_float(discriminator_real_outputs)
discriminator_gen_outputs = _to_float(discriminator_gen_outputs)
discriminator_real_outputs.shape.assert_is_compatible_with(
discriminator_gen_outputs.shape)
loss_on_generated = losses.compute_weighted_loss(
discriminator_gen_outputs,
generated_weights,
scope,
loss_collection=None,
reduction=reduction)
loss_on_real = losses.compute_weighted_loss(
discriminator_real_outputs,
real_weights,
scope,
loss_collection=None,
reduction=reduction)
loss = loss_on_generated - loss_on_real
util.add_loss(loss, loss_collection)
if add_summaries:
summary.scalar('discriminator_gen_wass_loss', loss_on_generated)
summary.scalar('discriminator_real_wass_loss', loss_on_real)
summary.scalar('discriminator_wass_loss', loss)
return loss
# ACGAN losses from `Conditional Image Synthesis With Auxiliary Classifier GANs`
# (https://arxiv.org/abs/1610.09585).
def acgan_discriminator_loss(discriminator_real_classification_logits,
discriminator_gen_classification_logits,
one_hot_labels,
label_smoothing=0.0,
real_weights=1.0,
generated_weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""ACGAN loss for the discriminator.
The ACGAN loss adds a classification loss to the conditional discriminator.
Therefore, the discriminator must output a tuple consisting of
(1) the real/fake prediction and
(2) the logits for the classification (usually the last conv layer,
flattened).
For more details:
ACGAN: https://arxiv.org/abs/1610.09585
Args:
discriminator_real_classification_logits: Classification logits for real
data.
discriminator_gen_classification_logits: Classification logits for generated
data.
one_hot_labels: A Tensor holding one-hot labels for the batch.
label_smoothing: A float in [0, 1]. If greater than 0, smooth the labels for
"discriminator on real data" as suggested in
https://arxiv.org/pdf/1701.00160
real_weights: Optional `Tensor` whose rank is either 0, or the same rank as
`discriminator_real_outputs`, and must be broadcastable to
`discriminator_real_outputs` (i.e., all dimensions must be either `1`, or
the same as the corresponding dimension).
generated_weights: Same as `real_weights`, but for
`discriminator_gen_classification_logits`.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.compat.v1.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. Shape depends on `reduction`.
Raises:
TypeError: If the discriminator does not output a tuple.
"""
with ops.name_scope(
scope, 'acgan_discriminator_loss',
(discriminator_real_classification_logits,
discriminator_gen_classification_logits, one_hot_labels)) as scope:
loss_on_generated = losses.softmax_cross_entropy(
one_hot_labels,
discriminator_gen_classification_logits,
weights=generated_weights,
scope=scope,
loss_collection=None,
reduction=reduction)
loss_on_real = losses.softmax_cross_entropy(
one_hot_labels,
discriminator_real_classification_logits,
weights=real_weights,
label_smoothing=label_smoothing,
scope=scope,
loss_collection=None,
reduction=reduction)
loss = loss_on_generated + loss_on_real
util.add_loss(loss, loss_collection)
if add_summaries:
summary.scalar('discriminator_gen_ac_loss', loss_on_generated)
summary.scalar('discriminator_real_ac_loss', loss_on_real)
summary.scalar('discriminator_ac_loss', loss)
return loss
def acgan_generator_loss(discriminator_gen_classification_logits,
one_hot_labels,
weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""ACGAN loss for the generator.
The ACGAN loss adds a classification loss to the conditional discriminator.
Therefore, the discriminator must output a tuple consisting of
(1) the real/fake prediction and
(2) the logits for the classification (usually the last conv layer,
flattened).
For more details:
ACGAN: https://arxiv.org/abs/1610.09585
Args:
discriminator_gen_classification_logits: Classification logits for generated
data.
one_hot_labels: A Tensor holding one-hot labels for the batch.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`discriminator_gen_classification_logits`, and must be broadcastable to
`discriminator_gen_classification_logits` (i.e., all dimensions must be
either `1`, or the same as the corresponding dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.compat.v1.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. Shape depends on `reduction`.
Raises:
ValueError: if arg module not either `generator` or `discriminator`
TypeError: if the discriminator does not output a tuple.
"""
with ops.name_scope(
scope, 'acgan_generator_loss',
(discriminator_gen_classification_logits, one_hot_labels)) as scope:
loss = losses.softmax_cross_entropy(
one_hot_labels,
discriminator_gen_classification_logits,
weights=weights,
scope=scope,
loss_collection=loss_collection,
reduction=reduction)
if add_summaries:
summary.scalar('generator_ac_loss', loss)
return loss
# Wasserstein Gradient Penalty losses from `Improved Training of Wasserstein
# GANs` (https://arxiv.org/abs/1704.00028).
def wasserstein_gradient_penalty(
real_data,
generated_data,
generator_inputs,
discriminator_fn,
discriminator_scope,
epsilon=1e-10,
target=1.0,
one_sided=False,
weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""The gradient penalty for the Wasserstein discriminator loss.
See `Improved Training of Wasserstein GANs`
(https://arxiv.org/abs/1704.00028) for more details.
Args:
real_data: Real data.
generated_data: Output of the generator.
generator_inputs: Exact argument to pass to the generator, which is used as
optional conditioning to the discriminator.
discriminator_fn: A discriminator function that conforms to TF-GAN API.
discriminator_scope: If not `None`, reuse discriminators from this scope.
epsilon: A small positive number added for numerical stability when
computing the gradient norm.
target: Optional Python number or `Tensor` indicating the target value of
gradient norm. Defaults to 1.0.
one_sided: If `True`, penalty proposed in https://arxiv.org/abs/1709.08894
is used. Defaults to `False`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`real_data` and `generated_data`, and must be broadcastable to them (i.e.,
all dimensions must be either `1`, or the same as the corresponding
dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.compat.v1.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. The shape depends on `reduction`.
Raises:
ValueError: If the rank of data Tensors is unknown.
"""
with ops.name_scope(scope, 'wasserstein_gradient_penalty',
(real_data, generated_data)) as scope:
real_data = ops.convert_to_tensor(real_data)
generated_data = ops.convert_to_tensor(generated_data)
if real_data.shape.ndims is None:
raise ValueError('`real_data` can\'t have unknown rank.')
if generated_data.shape.ndims is None:
raise ValueError('`generated_data` can\'t have unknown rank.')
differences = generated_data - real_data
batch_size = differences.shape.dims[0].value or array_ops.shape(
differences)[0]
alpha_shape = [batch_size] + [1] * (differences.shape.ndims - 1)
alpha = random_ops.random_uniform(shape=alpha_shape)
interpolates = real_data + (alpha * differences)
with ops.name_scope(None): # Clear scope so update ops are added properly.
# Reuse variables if variables already exists.
with variable_scope.variable_scope(
discriminator_scope,
'gpenalty_dscope',
reuse=variable_scope.AUTO_REUSE):
disc_interpolates = discriminator_fn(interpolates, generator_inputs)
if isinstance(disc_interpolates, tuple):
# ACGAN case: disc outputs more than one tensor
disc_interpolates = disc_interpolates[0]
gradients = gradients_impl.gradients(disc_interpolates, interpolates)[0]
gradient_squares = math_ops.reduce_sum(
math_ops.square(gradients), axis=list(range(1, gradients.shape.ndims)))
# Propagate shape information, if possible.
if isinstance(batch_size, int):
gradient_squares.set_shape([batch_size] +
gradient_squares.shape.as_list()[1:])
# For numerical stability, add epsilon to the sum before taking the square
# root. Note tf.norm does not add epsilon.
slopes = math_ops.sqrt(gradient_squares + epsilon)
penalties = slopes / target - 1.0
if one_sided:
penalties = math_ops.maximum(0., penalties)
penalties_squared = math_ops.square(penalties)
penalty = losses.compute_weighted_loss(
penalties_squared,
weights,
scope=scope,
loss_collection=loss_collection,
reduction=reduction)
if add_summaries:
summary.scalar('gradient_penalty_loss', penalty)
return penalty
# Original losses from `Generative Adversarial Nets`
# (https://arxiv.org/abs/1406.2661).
def minimax_discriminator_loss(
discriminator_real_outputs,
discriminator_gen_outputs,
label_smoothing=0.25,
real_weights=1.0,
generated_weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""Original minimax discriminator loss for GANs, with label smoothing.
Note that the authors don't recommend using this loss. A more practically
useful loss is `modified_discriminator_loss`.
L = - real_weights * log(sigmoid(D(x)))
- generated_weights * log(1 - sigmoid(D(G(z))))
See `Generative Adversarial Nets` (https://arxiv.org/abs/1406.2661) for more
details.
Args:
discriminator_real_outputs: Discriminator output on real data.
discriminator_gen_outputs: Discriminator output on generated data. Expected
to be in the range of (-inf, inf).
label_smoothing: The amount of smoothing for positive labels. This technique
is taken from `Improved Techniques for Training GANs`
(https://arxiv.org/abs/1606.03498). `0.0` means no smoothing.
real_weights: Optional `Tensor` whose rank is either 0, or the same rank as
`real_data`, and must be broadcastable to `real_data` (i.e., all
dimensions must be either `1`, or the same as the corresponding
dimension).
generated_weights: Same as `real_weights`, but for `generated_data`.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.compat.v1.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. The shape depends on `reduction`.
"""
with ops.name_scope(
scope, 'discriminator_minimax_loss',
(discriminator_real_outputs, discriminator_gen_outputs, real_weights,
generated_weights, label_smoothing)) as scope:
# -log((1 - label_smoothing) - sigmoid(D(x)))
loss_on_real = losses.sigmoid_cross_entropy(
array_ops.ones_like(discriminator_real_outputs),
discriminator_real_outputs,
real_weights,
label_smoothing,
scope,
loss_collection=None,
reduction=reduction)
# -log(- sigmoid(D(G(x))))
loss_on_generated = losses.sigmoid_cross_entropy(
array_ops.zeros_like(discriminator_gen_outputs),
discriminator_gen_outputs,
generated_weights,
scope=scope,
loss_collection=None,
reduction=reduction)
loss = loss_on_real + loss_on_generated
util.add_loss(loss, loss_collection)
if add_summaries:
summary.scalar('discriminator_gen_minimax_loss', loss_on_generated)
summary.scalar('discriminator_real_minimax_loss', loss_on_real)
summary.scalar('discriminator_minimax_loss', loss)
return loss
def minimax_generator_loss(discriminator_gen_outputs,
label_smoothing=0.0,
weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""Original minimax generator loss for GANs.
Note that the authors don't recommend using this loss. A more practically
useful loss is `modified_generator_loss`.
L = log(sigmoid(D(x))) + log(1 - sigmoid(D(G(z))))
See `Generative Adversarial Nets` (https://arxiv.org/abs/1406.2661) for more
details.
Args:
discriminator_gen_outputs: Discriminator output on generated data. Expected
to be in the range of (-inf, inf).
label_smoothing: The amount of smoothing for positive labels. This technique
is taken from `Improved Techniques for Training GANs`
(https://arxiv.org/abs/1606.03498). `0.0` means no smoothing.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`discriminator_gen_outputs`, and must be broadcastable to
`discriminator_gen_outputs` (i.e., all dimensions must be either `1`, or
the same as the corresponding dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.compat.v1.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. The shape depends on `reduction`.
"""
with ops.name_scope(scope, 'generator_minimax_loss') as scope:
loss = -minimax_discriminator_loss(
array_ops.ones_like(discriminator_gen_outputs),
discriminator_gen_outputs,
label_smoothing,
weights,
weights,
scope,
loss_collection,
reduction,
add_summaries=False)
if add_summaries:
summary.scalar('generator_minimax_loss', loss)
return loss
def modified_discriminator_loss(
discriminator_real_outputs,
discriminator_gen_outputs,
label_smoothing=0.25,
real_weights=1.0,
generated_weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""Same as minimax discriminator loss.
See `Generative Adversarial Nets` (https://arxiv.org/abs/1406.2661) for more
details.
Args:
discriminator_real_outputs: Discriminator output on real data.
discriminator_gen_outputs: Discriminator output on generated data. Expected
to be in the range of (-inf, inf).
label_smoothing: The amount of smoothing for positive labels. This technique
is taken from `Improved Techniques for Training GANs`
(https://arxiv.org/abs/1606.03498). `0.0` means no smoothing.
real_weights: Optional `Tensor` whose rank is either 0, or the same rank as
`discriminator_gen_outputs`, and must be broadcastable to
`discriminator_gen_outputs` (i.e., all dimensions must be either `1`, or
the same as the corresponding dimension).
generated_weights: Same as `real_weights`, but for
`discriminator_gen_outputs`.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.compat.v1.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. The shape depends on `reduction`.
"""
return minimax_discriminator_loss(discriminator_real_outputs,
discriminator_gen_outputs, label_smoothing,
real_weights, generated_weights, scope or
'discriminator_modified_loss',
loss_collection, reduction, add_summaries)
def modified_generator_loss(discriminator_gen_outputs,
label_smoothing=0.0,
weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""Modified generator loss for GANs.
L = -log(sigmoid(D(G(z))))
This is the trick used in the original paper to avoid vanishing gradients
early in training. See `Generative Adversarial Nets`
(https://arxiv.org/abs/1406.2661) for more details.
Args:
discriminator_gen_outputs: Discriminator output on generated data. Expected
to be in the range of (-inf, inf).
label_smoothing: The amount of smoothing for positive labels. This technique
is taken from `Improved Techniques for Training GANs`
(https://arxiv.org/abs/1606.03498). `0.0` means no smoothing.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`discriminator_gen_outputs`, and must be broadcastable to `labels` (i.e.,
all dimensions must be either `1`, or the same as the corresponding
dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.compat.v1.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. The shape depends on `reduction`.
"""
with ops.name_scope(scope, 'generator_modified_loss',
[discriminator_gen_outputs]) as scope:
loss = losses.sigmoid_cross_entropy(
array_ops.ones_like(discriminator_gen_outputs),
discriminator_gen_outputs, weights, label_smoothing, scope,
loss_collection, reduction)
if add_summaries:
summary.scalar('generator_modified_loss', loss)
return loss
# Least Squares loss from `Least Squares Generative Adversarial Networks`
# (https://arxiv.org/abs/1611.04076).
def least_squares_generator_loss(
discriminator_gen_outputs,
real_label=1,
weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""Least squares generator loss.
This loss comes from `Least Squares Generative Adversarial Networks`
(https://arxiv.org/abs/1611.04076).
L = 1/2 * (D(G(z)) - `real_label`) ** 2
where D(y) are discriminator logits.
Args:
discriminator_gen_outputs: Discriminator output on generated data. Expected
to be in the range of (-inf, inf).
real_label: The value that the generator is trying to get the discriminator
to output on generated data.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`discriminator_gen_outputs`, and must be broadcastable to
`discriminator_gen_outputs` (i.e., all dimensions must be either `1`, or
the same as the corresponding dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.compat.v1.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. The shape depends on `reduction`.
"""
with ops.name_scope(scope, 'lsq_generator_loss',
(discriminator_gen_outputs, real_label)) as scope:
discriminator_gen_outputs = _to_float(discriminator_gen_outputs)
loss = math_ops.squared_difference(discriminator_gen_outputs,
real_label) / 2.0
loss = losses.compute_weighted_loss(loss, weights, scope, loss_collection,
reduction)
if add_summaries:
summary.scalar('generator_lsq_loss', loss)
return loss
def least_squares_discriminator_loss(
discriminator_real_outputs,
discriminator_gen_outputs,
real_label=1,
fake_label=0,
real_weights=1.0,
generated_weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""Least squares discriminator loss.
This loss comes from `Least Squares Generative Adversarial Networks`
(https://arxiv.org/abs/1611.04076).
L = 1/2 * (D(x) - `real`) ** 2 +
1/2 * (D(G(z)) - `fake_label`) ** 2
where D(y) are discriminator logits.
Args:
discriminator_real_outputs: Discriminator output on real data.
discriminator_gen_outputs: Discriminator output on generated data. Expected
to be in the range of (-inf, inf).
real_label: The value that the discriminator tries to output for real data.
fake_label: The value that the discriminator tries to output for fake data.
real_weights: Optional `Tensor` whose rank is either 0, or the same rank as
`discriminator_real_outputs`, and must be broadcastable to
`discriminator_real_outputs` (i.e., all dimensions must be either `1`, or
the same as the corresponding dimension).
generated_weights: Same as `real_weights`, but for
`discriminator_gen_outputs`.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.compat.v1.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. The shape depends on `reduction`.
"""
with ops.name_scope(scope, 'lsq_discriminator_loss',
(discriminator_gen_outputs, real_label)) as scope:
discriminator_real_outputs = _to_float(discriminator_real_outputs)
discriminator_gen_outputs = _to_float(discriminator_gen_outputs)
discriminator_real_outputs.shape.assert_is_compatible_with(
discriminator_gen_outputs.shape)
real_losses = math_ops.squared_difference(discriminator_real_outputs,
real_label) / 2.0
fake_losses = math_ops.squared_difference(discriminator_gen_outputs,
fake_label) / 2.0
loss_on_real = losses.compute_weighted_loss(
real_losses,
real_weights,
scope,
loss_collection=None,
reduction=reduction)
loss_on_generated = losses.compute_weighted_loss(
fake_losses,
generated_weights,
scope,
loss_collection=None,
reduction=reduction)
loss = loss_on_real + loss_on_generated
util.add_loss(loss, loss_collection)
if add_summaries:
summary.scalar('discriminator_gen_lsq_loss', loss_on_generated)
summary.scalar('discriminator_real_lsq_loss', loss_on_real)
summary.scalar('discriminator_lsq_loss', loss)
return loss
# InfoGAN loss from `InfoGAN: Interpretable Representation Learning by
# `Information Maximizing Generative Adversarial Nets`
# https://arxiv.org/abs/1606.03657
def _validate_distributions(distributions):
if not isinstance(distributions, (list, tuple)):
raise ValueError('`distributions` must be a list or tuple. Instead, '
'found %s.' % type(distributions))
for x in distributions:
# We used to check with `isinstance(x, tf.compat.v1.distributions.Distribution)`.
# However, distributions have migrated to `tfp.distributions.Distribution`,
# which is a new code repo, so we can't check this way anymore until
# TF-GAN is migrated to a new repo as well.
# This new check is not sufficient, but is a useful heuristic for now.
if not callable(getattr(x, 'log_prob', None)):
raise ValueError('`distributions` must be a list of `Distributions`. '
'Instead, found %s.' % type(x))
def _validate_information_penalty_inputs(structured_generator_inputs,
predicted_distributions):
"""Validate input to `mutual_information_penalty`."""
_validate_distributions(predicted_distributions)
if len(structured_generator_inputs) != len(predicted_distributions):
raise ValueError(
'`structured_generator_inputs` length %i must be the same '
'as `predicted_distributions` length %i.' %
(len(structured_generator_inputs), len(predicted_distributions)))
def mutual_information_penalty(
structured_generator_inputs,
predicted_distributions,
weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""Returns a penalty on the mutual information in an InfoGAN model.
This loss comes from an InfoGAN paper https://arxiv.org/abs/1606.03657.
Args:
structured_generator_inputs: A list of Tensors representing the random noise
that must have high mutual information with the generator output. List
length should match `predicted_distributions`.
predicted_distributions: A list of `tfp.distributions.Distribution`s.
Predicted by the recognizer, and used to evaluate the likelihood of the
structured noise. List length should match `structured_generator_inputs`.
weights: Optional `Tensor` whose rank is either 0, or the same dimensions as
`structured_generator_inputs`.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.compat.v1.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A scalar Tensor representing the mutual information loss.
"""
_validate_information_penalty_inputs(structured_generator_inputs,
predicted_distributions)
with ops.name_scope(scope, 'mutual_information_loss') as scope:
# Calculate the negative log-likelihood of the reconstructed noise.
log_probs = [
math_ops.reduce_mean(dist.log_prob(noise)) for dist, noise in zip(
predicted_distributions, structured_generator_inputs)
]
loss = -1 * losses.compute_weighted_loss(
log_probs,
weights,
scope,
loss_collection=loss_collection,
reduction=reduction)
if add_summaries:
summary.scalar('mutual_information_penalty', loss)
return loss
def _numerically_stable_global_norm(tensor_list):
"""Compute the global norm of a list of Tensors, with improved stability.
The global norm computation sometimes overflows due to the intermediate L2
step. To avoid this, we divide by a cheap-to-compute max over the
matrix elements.
Args:
tensor_list: A list of tensors, or `None`.
Returns:
A scalar tensor with the global norm.
"""
if all(x is None for x in tensor_list):
return 0.0
list_max = math_ops.reduce_max([
math_ops.reduce_max(math_ops.abs(x)) for x in tensor_list if x is not None
])
return list_max * clip_ops.global_norm(
[x / list_max for x in tensor_list if x is not None])
def _used_weight(weights_list):
for weight in weights_list:
if weight is not None:
return tensor_util.constant_value(ops.convert_to_tensor(weight))
def _validate_args(losses_list, weight_factor, gradient_ratio):
for loss in losses_list:
loss.shape.assert_is_compatible_with([])
if weight_factor is None and gradient_ratio is None:
raise ValueError(
'`weight_factor` and `gradient_ratio` cannot both be `None.`')
if weight_factor is not None and gradient_ratio is not None:
raise ValueError(
'`weight_factor` and `gradient_ratio` cannot both be specified.')
# TODO(joelshor): Add ability to pass in gradients, to avoid recomputing.
def combine_adversarial_loss(main_loss,
adversarial_loss,
weight_factor=None,
gradient_ratio=None,
gradient_ratio_epsilon=1e-6,
variables=None,
scalar_summaries=True,
gradient_summaries=True,
scope=None):
"""Utility to combine main and adversarial losses.
This utility combines the main and adversarial losses in one of two ways.
1) Fixed coefficient on adversarial loss. Use `weight_factor` in this case.
2) Fixed ratio of gradients. Use `gradient_ratio` in this case. This is often
used to make sure both losses affect weights roughly equally, as in
https://arxiv.org/pdf/1705.05823.
One can optionally also visualize the scalar and gradient behavior of the
losses.
Args:
main_loss: A floating scalar Tensor indicating the main loss.
adversarial_loss: A floating scalar Tensor indication the adversarial loss.
weight_factor: If not `None`, the coefficient by which to multiply the
adversarial loss. Exactly one of this and `gradient_ratio` must be
non-None.
gradient_ratio: If not `None`, the ratio of the magnitude of the gradients.
Specifically, gradient_ratio = grad_mag(main_loss) /
grad_mag(adversarial_loss) Exactly one of this and `weight_factor` must be
non-None.
gradient_ratio_epsilon: An epsilon to add to the adversarial loss
coefficient denominator, to avoid division-by-zero.
variables: List of variables to calculate gradients with respect to. If not
present, defaults to all trainable variables.
scalar_summaries: Create scalar summaries of losses.
gradient_summaries: Create gradient summaries of losses.
scope: Optional name scope.
Returns:
A floating scalar Tensor indicating the desired combined loss.
Raises:
ValueError: Malformed input.
"""
_validate_args([main_loss, adversarial_loss], weight_factor, gradient_ratio)
if variables is None:
variables = contrib_variables_lib.get_trainable_variables()
with ops.name_scope(
scope, 'adversarial_loss', values=[main_loss, adversarial_loss]):
# Compute gradients if we will need them.
if gradient_summaries or gradient_ratio is not None:
main_loss_grad_mag = _numerically_stable_global_norm(
gradients_impl.gradients(main_loss, variables))
adv_loss_grad_mag = _numerically_stable_global_norm(
gradients_impl.gradients(adversarial_loss, variables))
# Add summaries, if applicable.
if scalar_summaries:
summary.scalar('main_loss', main_loss)
summary.scalar('adversarial_loss', adversarial_loss)
if gradient_summaries:
summary.scalar('main_loss_gradients', main_loss_grad_mag)
summary.scalar('adversarial_loss_gradients', adv_loss_grad_mag)
# Combine losses in the appropriate way.
# If `weight_factor` is always `0`, avoid computing the adversarial loss
# tensor entirely.
if _used_weight((weight_factor, gradient_ratio)) == 0:
final_loss = main_loss
elif weight_factor is not None:
final_loss = (
main_loss + array_ops.stop_gradient(weight_factor) * adversarial_loss)
elif gradient_ratio is not None:
grad_mag_ratio = main_loss_grad_mag / (
adv_loss_grad_mag + gradient_ratio_epsilon)
adv_coeff = grad_mag_ratio / gradient_ratio
summary.scalar('adversarial_coefficient', adv_coeff)
final_loss = (
main_loss + array_ops.stop_gradient(adv_coeff) * adversarial_loss)
return final_loss
def cycle_consistency_loss(data_x,
reconstructed_data_x,
data_y,
reconstructed_data_y,
scope=None,
add_summaries=False):
"""Defines the cycle consistency loss.
The cyclegan model has two partial models where `model_x2y` generator F maps
data set X to Y, `model_y2x` generator G maps data set Y to X. For a `data_x`
in data set X, we could reconstruct it by
* reconstructed_data_x = G(F(data_x))
Similarly
* reconstructed_data_y = F(G(data_y))
The cycle consistency loss is about the difference between data and
reconstructed data, namely
* loss_x2x = |data_x - G(F(data_x))| (L1-norm)
* loss_y2y = |data_y - F(G(data_y))| (L1-norm)
* loss = (loss_x2x + loss_y2y) / 2
where `loss` is the final result.
For the L1-norm, we follow the original implementation:
https://github.com/junyanz/CycleGAN/blob/master/models/cycle_gan_model.lua
we use L1-norm of pixel-wise error normalized by data size such that
`cycle_loss_weight` can be specified independent of image size.
See https://arxiv.org/abs/1703.10593 for more details.
Args:
data_x: A `Tensor` of data X.
reconstructed_data_x: A `Tensor` of reconstructed data X.
data_y: A `Tensor` of data Y.
reconstructed_data_y: A `Tensor` of reconstructed data Y.
scope: The scope for the operations performed in computing the loss.
Defaults to None.
add_summaries: Whether or not to add detailed summaries for the loss.
Defaults to False.
Returns:
A scalar `Tensor` of cycle consistency loss.
"""
with ops.name_scope(
scope,
'cycle_consistency_loss',
values=[data_x, reconstructed_data_x, data_y, reconstructed_data_y]):
loss_x2x = losses.absolute_difference(data_x, reconstructed_data_x)
loss_y2y = losses.absolute_difference(data_y, reconstructed_data_y)
loss = (loss_x2x + loss_y2y) / 2.0
if add_summaries:
summary.scalar('cycle_consistency_loss_x2x', loss_x2x)
summary.scalar('cycle_consistency_loss_y2y', loss_y2y)
summary.scalar('cycle_consistency_loss', loss)
return loss
|
tensorflow-master
|
tensorflow/contrib/gan/python/losses/python/losses_impl.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TFGAN utilities for loss functions that accept GANModel namedtuples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.gan.python.losses.python import tuple_losses_impl
from tensorflow.contrib.gan.python.losses.python.tuple_losses_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
__all__ = tuple_losses_impl.__all__
remove_undocumented(__name__, __all__)
|
tensorflow-master
|
tensorflow/contrib/gan/python/losses/python/tuple_losses.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TF-GAN utilities for loss functions that accept GANModel namedtuples.
The losses and penalties in this file all correspond to losses in
`losses_impl.py`. Losses in that file take individual arguments, whereas in this
file they take a `GANModel` tuple. For example:
losses_impl.py:
```python
def wasserstein_discriminator_loss(
discriminator_real_outputs,
discriminator_gen_outputs,
real_weights=1.0,
generated_weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False)
```
tuple_losses_impl.py:
```python
def wasserstein_discriminator_loss(
gan_model,
real_weights=1.0,
generated_weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False)
```
Example usage:
```python
# `tfgan.losses.wargs` losses take individual arguments.
w_loss = tfgan.losses.wargs.wasserstein_discriminator_loss(
discriminator_real_outputs,
discriminator_gen_outputs)
# `tfgan.losses` losses take GANModel namedtuples.
w_loss2 = tfgan.losses.wasserstein_discriminator_loss(gan_model)
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python import namedtuples
from tensorflow.contrib.gan.python.losses.python import losses_impl
from tensorflow.python.util import tf_inspect
__all__ = [
'acgan_discriminator_loss',
'acgan_generator_loss',
'least_squares_discriminator_loss',
'least_squares_generator_loss',
'modified_discriminator_loss',
'modified_generator_loss',
'minimax_discriminator_loss',
'minimax_generator_loss',
'wasserstein_discriminator_loss',
'wasserstein_generator_loss',
'wasserstein_gradient_penalty',
'mutual_information_penalty',
'combine_adversarial_loss',
'cycle_consistency_loss',
'stargan_generator_loss_wrapper',
'stargan_discriminator_loss_wrapper',
'stargan_gradient_penalty_wrapper'
]
def _args_to_gan_model(loss_fn):
"""Converts a loss taking individual args to one taking a GANModel namedtuple.
The new function has the same name as the original one.
Args:
loss_fn: A python function taking a `GANModel` object and returning a loss
Tensor calculated from that object. The shape of the loss depends on
`reduction`.
Returns:
A new function that takes a GANModel namedtuples and returns the same loss.
"""
# Match arguments in `loss_fn` to elements of `namedtuple`.
# TODO(joelshor): Properly handle `varargs` and `keywords`.
argspec = tf_inspect.getargspec(loss_fn)
defaults = argspec.defaults or []
required_args = set(argspec.args[:-len(defaults)])
args_with_defaults = argspec.args[-len(defaults):]
default_args_dict = dict(zip(args_with_defaults, defaults))
def new_loss_fn(gan_model, **kwargs): # pylint:disable=missing-docstring
def _asdict(namedtuple):
"""Returns a namedtuple as a dictionary.
This is required because `_asdict()` in Python 3.x.x is broken in classes
that inherit from `collections.namedtuple`. See
https://bugs.python.org/issue24931 for more details.
Args:
namedtuple: An object that inherits from `collections.namedtuple`.
Returns:
A dictionary version of the tuple.
"""
return {k: getattr(namedtuple, k) for k in namedtuple._fields}
gan_model_dict = _asdict(gan_model)
# Make sure non-tuple required args are supplied.
args_from_tuple = set(argspec.args).intersection(set(gan_model._fields))
required_args_not_from_tuple = required_args - args_from_tuple
for arg in required_args_not_from_tuple:
if arg not in kwargs:
raise ValueError('`%s` must be supplied to %s loss function.' % (
arg, loss_fn.__name__))
# Make sure tuple args aren't also supplied as keyword args.
ambiguous_args = set(gan_model._fields).intersection(set(kwargs.keys()))
if ambiguous_args:
raise ValueError(
'The following args are present in both the tuple and keyword args '
'for %s: %s' % (loss_fn.__name__, ambiguous_args))
# Add required args to arg dictionary.
required_args_from_tuple = required_args.intersection(args_from_tuple)
for arg in required_args_from_tuple:
assert arg not in kwargs
kwargs[arg] = gan_model_dict[arg]
# Add arguments that have defaults.
for arg in default_args_dict:
val_from_tuple = gan_model_dict[arg] if arg in gan_model_dict else None
val_from_kwargs = kwargs[arg] if arg in kwargs else None
assert not (val_from_tuple is not None and val_from_kwargs is not None)
kwargs[arg] = (val_from_tuple if val_from_tuple is not None else
val_from_kwargs if val_from_kwargs is not None else
default_args_dict[arg])
return loss_fn(**kwargs)
new_docstring = """The gan_model version of %s.""" % loss_fn.__name__
new_loss_fn.__docstring__ = new_docstring
new_loss_fn.__name__ = loss_fn.__name__
new_loss_fn.__module__ = loss_fn.__module__
return new_loss_fn
# Wasserstein losses from `Wasserstein GAN` (https://arxiv.org/abs/1701.07875).
wasserstein_generator_loss = _args_to_gan_model(
losses_impl.wasserstein_generator_loss)
wasserstein_discriminator_loss = _args_to_gan_model(
losses_impl.wasserstein_discriminator_loss)
wasserstein_gradient_penalty = _args_to_gan_model(
losses_impl.wasserstein_gradient_penalty)
# ACGAN losses from `Conditional Image Synthesis With Auxiliary Classifier GANs`
# (https://arxiv.org/abs/1610.09585).
acgan_discriminator_loss = _args_to_gan_model(
losses_impl.acgan_discriminator_loss)
acgan_generator_loss = _args_to_gan_model(
losses_impl.acgan_generator_loss)
# Original losses from `Generative Adversarial Nets`
# (https://arxiv.org/abs/1406.2661).
minimax_discriminator_loss = _args_to_gan_model(
losses_impl.minimax_discriminator_loss)
minimax_generator_loss = _args_to_gan_model(
losses_impl.minimax_generator_loss)
modified_discriminator_loss = _args_to_gan_model(
losses_impl.modified_discriminator_loss)
modified_generator_loss = _args_to_gan_model(
losses_impl.modified_generator_loss)
# Least Squares loss from `Least Squares Generative Adversarial Networks`
# (https://arxiv.org/abs/1611.04076).
least_squares_generator_loss = _args_to_gan_model(
losses_impl.least_squares_generator_loss)
least_squares_discriminator_loss = _args_to_gan_model(
losses_impl.least_squares_discriminator_loss)
# InfoGAN loss from `InfoGAN: Interpretable Representation Learning by
# `Information Maximizing Generative Adversarial Nets`
# https://arxiv.org/abs/1606.03657
mutual_information_penalty = _args_to_gan_model(
losses_impl.mutual_information_penalty)
def combine_adversarial_loss(gan_loss,
gan_model,
non_adversarial_loss,
weight_factor=None,
gradient_ratio=None,
gradient_ratio_epsilon=1e-6,
scalar_summaries=True,
gradient_summaries=True):
"""Combine adversarial loss and main loss.
Uses `combine_adversarial_loss` to combine the losses, and returns
a modified GANLoss namedtuple.
Args:
gan_loss: A GANLoss namedtuple. Assume the GANLoss.generator_loss is the
adversarial loss.
gan_model: A GANModel namedtuple. Used to access the generator's variables.
non_adversarial_loss: Same as `main_loss` from
`combine_adversarial_loss`.
weight_factor: Same as `weight_factor` from
`combine_adversarial_loss`.
gradient_ratio: Same as `gradient_ratio` from
`combine_adversarial_loss`.
gradient_ratio_epsilon: Same as `gradient_ratio_epsilon` from
`combine_adversarial_loss`.
scalar_summaries: Same as `scalar_summaries` from
`combine_adversarial_loss`.
gradient_summaries: Same as `gradient_summaries` from
`combine_adversarial_loss`.
Returns:
A modified GANLoss namedtuple, with `non_adversarial_loss` included
appropriately.
"""
combined_loss = losses_impl.combine_adversarial_loss(
non_adversarial_loss,
gan_loss.generator_loss,
weight_factor,
gradient_ratio,
gradient_ratio_epsilon,
gan_model.generator_variables,
scalar_summaries,
gradient_summaries)
return gan_loss._replace(generator_loss=combined_loss)
def cycle_consistency_loss(cyclegan_model, scope=None, add_summaries=False):
"""Defines the cycle consistency loss.
Uses `cycle_consistency_loss` to compute the cycle consistency loss for a
`cyclegan_model`.
Args:
cyclegan_model: A `CycleGANModel` namedtuple.
scope: The scope for the operations performed in computing the loss.
Defaults to None.
add_summaries: Whether or not to add detailed summaries for the loss.
Defaults to False.
Returns:
A scalar `Tensor` of cycle consistency loss.
Raises:
ValueError: If `cyclegan_model` is not a `CycleGANModel` namedtuple.
"""
if not isinstance(cyclegan_model, namedtuples.CycleGANModel):
raise ValueError(
'`cyclegan_model` must be a `CycleGANModel`. Instead, was %s.' %
type(cyclegan_model))
return losses_impl.cycle_consistency_loss(
cyclegan_model.model_x2y.generator_inputs, cyclegan_model.reconstructed_x,
cyclegan_model.model_y2x.generator_inputs, cyclegan_model.reconstructed_y,
scope, add_summaries)
def stargan_generator_loss_wrapper(loss_fn):
"""Convert a generator loss function to take a StarGANModel.
The new function has the same name as the original one.
Args:
loss_fn: A python function taking Discriminator's real/fake prediction for
generated data.
Returns:
A new function that takes a StarGANModel namedtuple and returns the same
loss.
"""
def new_loss_fn(stargan_model, **kwargs):
return loss_fn(
stargan_model.discriminator_generated_data_source_predication, **kwargs)
new_docstring = """The stargan_model version of %s.""" % loss_fn.__name__
new_loss_fn.__docstring__ = new_docstring
new_loss_fn.__name__ = loss_fn.__name__
new_loss_fn.__module__ = loss_fn.__module__
return new_loss_fn
def stargan_discriminator_loss_wrapper(loss_fn):
"""Convert a discriminator loss function to take a StarGANModel.
The new function has the same name as the original one.
Args:
loss_fn: A python function taking Discriminator's real/fake prediction for
real data and generated data.
Returns:
A new function that takes a StarGANModel namedtuple and returns the same
loss.
"""
def new_loss_fn(stargan_model, **kwargs):
return loss_fn(
stargan_model.discriminator_input_data_source_predication,
stargan_model.discriminator_generated_data_source_predication, **kwargs)
new_docstring = """The stargan_model version of %s.""" % loss_fn.__name__
new_loss_fn.__docstring__ = new_docstring
new_loss_fn.__name__ = loss_fn.__name__
new_loss_fn.__module__ = loss_fn.__module__
return new_loss_fn
def stargan_gradient_penalty_wrapper(loss_fn):
"""Convert a gradient penalty function to take a StarGANModel.
The new function has the same name as the original one.
Args:
loss_fn: A python function taking real_data, generated_data,
generator_inputs for Discriminator's condition (i.e. number of domains),
discriminator_fn, and discriminator_scope.
Returns:
A new function that takes a StarGANModel namedtuple and returns the same
loss.
"""
def new_loss_fn(stargan_model, **kwargs):
num_domains = stargan_model.input_data_domain_label.shape.as_list()[-1]
return loss_fn(
real_data=stargan_model.input_data,
generated_data=stargan_model.generated_data,
generator_inputs=num_domains,
discriminator_fn=stargan_model.discriminator_fn,
discriminator_scope=stargan_model.discriminator_scope,
**kwargs)
new_docstring = """The stargan_model version of %s.""" % loss_fn.__name__
new_loss_fn.__docstring__ = new_docstring
new_loss_fn.__name__ = loss_fn.__name__
new_loss_fn.__module__ = loss_fn.__module__
return new_loss_fn
|
tensorflow-master
|
tensorflow/contrib/gan/python/losses/python/tuple_losses_impl.py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TFGAN grouped API. Please see README.md for details and usage."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.gan.python.losses.python import losses_impl
from tensorflow.contrib.gan.python.losses.python.losses_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__, losses_impl.__all__)
|
tensorflow-master
|
tensorflow/contrib/gan/python/losses/python/losses_wargs.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TFGAN losses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python.losses.python import losses_impl as tfgan_losses
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.ops.distributions import normal
from tensorflow.python.ops.losses import losses as tf_losses
from tensorflow.python.platform import test
# TODO(joelshor): Use `parameterized` tests when opensourced.
class _LossesTest(object):
def init_constants(self):
self._discriminator_real_outputs_np = [-5.0, 1.4, 12.5, 2.7]
self._discriminator_gen_outputs_np = [10.0, 4.4, -5.5, 3.6]
self._weights = 2.3
self._discriminator_real_outputs = constant_op.constant(
self._discriminator_real_outputs_np, dtype=dtypes.float32)
self._discriminator_gen_outputs = constant_op.constant(
self._discriminator_gen_outputs_np, dtype=dtypes.float32)
def test_generator_all_correct(self):
loss = self._g_loss_fn(self._discriminator_gen_outputs)
self.assertEqual(self._discriminator_gen_outputs.dtype, loss.dtype)
self.assertEqual(self._generator_loss_name, loss.op.name)
with self.cached_session():
self.assertAlmostEqual(self._expected_g_loss, loss.eval(), 5)
def test_discriminator_all_correct(self):
loss = self._d_loss_fn(
self._discriminator_real_outputs, self._discriminator_gen_outputs)
self.assertEqual(self._discriminator_gen_outputs.dtype, loss.dtype)
self.assertEqual(self._discriminator_loss_name, loss.op.name)
with self.cached_session():
self.assertAlmostEqual(self._expected_d_loss, loss.eval(), 5)
def test_generator_loss_collection(self):
self.assertEqual(0, len(ops.get_collection('collection')))
self._g_loss_fn(
self._discriminator_gen_outputs, loss_collection='collection')
self.assertEqual(1, len(ops.get_collection('collection')))
def test_discriminator_loss_collection(self):
self.assertEqual(0, len(ops.get_collection('collection')))
self._d_loss_fn(
self._discriminator_real_outputs, self._discriminator_gen_outputs,
loss_collection='collection')
self.assertEqual(1, len(ops.get_collection('collection')))
def test_generator_no_reduction(self):
loss = self._g_loss_fn(
self._discriminator_gen_outputs, reduction=tf_losses.Reduction.NONE)
self.assertAllEqual([4], loss.shape)
def test_discriminator_no_reduction(self):
loss = self._d_loss_fn(
self._discriminator_real_outputs, self._discriminator_gen_outputs,
reduction=tf_losses.Reduction.NONE)
self.assertAllEqual([4], loss.shape)
def test_generator_patch(self):
loss = self._g_loss_fn(
array_ops.reshape(self._discriminator_gen_outputs, [2, 2]))
self.assertEqual(self._discriminator_gen_outputs.dtype, loss.dtype)
with self.cached_session():
self.assertAlmostEqual(self._expected_g_loss, loss.eval(), 5)
def test_discriminator_patch(self):
loss = self._d_loss_fn(
array_ops.reshape(self._discriminator_real_outputs, [2, 2]),
array_ops.reshape(self._discriminator_gen_outputs, [2, 2]))
self.assertEqual(self._discriminator_gen_outputs.dtype, loss.dtype)
with self.cached_session():
self.assertAlmostEqual(self._expected_d_loss, loss.eval(), 5)
def test_generator_loss_with_placeholder_for_logits(self):
logits = array_ops.placeholder(dtypes.float32, shape=(None, 4))
weights = array_ops.ones_like(logits, dtype=dtypes.float32)
loss = self._g_loss_fn(logits, weights=weights)
self.assertEqual(logits.dtype, loss.dtype)
with self.cached_session() as sess:
loss = sess.run(loss,
feed_dict={
logits: [[10.0, 4.4, -5.5, 3.6]],
})
self.assertAlmostEqual(self._expected_g_loss, loss, 5)
def test_discriminator_loss_with_placeholder_for_logits(self):
logits = array_ops.placeholder(dtypes.float32, shape=(None, 4))
logits2 = array_ops.placeholder(dtypes.float32, shape=(None, 4))
real_weights = array_ops.ones_like(logits, dtype=dtypes.float32)
generated_weights = array_ops.ones_like(logits, dtype=dtypes.float32)
loss = self._d_loss_fn(
logits, logits2, real_weights=real_weights,
generated_weights=generated_weights)
with self.cached_session() as sess:
loss = sess.run(loss,
feed_dict={
logits: [self._discriminator_real_outputs_np],
logits2: [self._discriminator_gen_outputs_np],
})
self.assertAlmostEqual(self._expected_d_loss, loss, 5)
def test_generator_with_python_scalar_weight(self):
loss = self._g_loss_fn(
self._discriminator_gen_outputs, weights=self._weights)
with self.cached_session():
self.assertAlmostEqual(self._expected_g_loss * self._weights,
loss.eval(), 4)
def test_discriminator_with_python_scalar_weight(self):
loss = self._d_loss_fn(
self._discriminator_real_outputs, self._discriminator_gen_outputs,
real_weights=self._weights, generated_weights=self._weights)
with self.cached_session():
self.assertAlmostEqual(self._expected_d_loss * self._weights,
loss.eval(), 4)
def test_generator_with_scalar_tensor_weight(self):
loss = self._g_loss_fn(self._discriminator_gen_outputs,
weights=constant_op.constant(self._weights))
with self.cached_session():
self.assertAlmostEqual(self._expected_g_loss * self._weights,
loss.eval(), 4)
def test_discriminator_with_scalar_tensor_weight(self):
weights = constant_op.constant(self._weights)
loss = self._d_loss_fn(
self._discriminator_real_outputs, self._discriminator_gen_outputs,
real_weights=weights, generated_weights=weights)
with self.cached_session():
self.assertAlmostEqual(self._expected_d_loss * self._weights,
loss.eval(), 4)
def test_generator_add_summaries(self):
self.assertEqual(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
self._g_loss_fn(self._discriminator_gen_outputs, add_summaries=True)
self.assertLess(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
def test_discriminator_add_summaries(self):
self.assertEqual(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
self._d_loss_fn(
self._discriminator_real_outputs, self._discriminator_gen_outputs,
add_summaries=True)
self.assertLess(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
class LeastSquaresLossTest(test.TestCase, _LossesTest):
"""Tests for least_squares_xxx_loss."""
def setUp(self):
super(LeastSquaresLossTest, self).setUp()
self.init_constants()
self._expected_g_loss = 17.69625
self._expected_d_loss = 41.73375
self._generator_loss_name = 'lsq_generator_loss/value'
self._discriminator_loss_name = 'lsq_discriminator_loss/add'
self._g_loss_fn = tfgan_losses.least_squares_generator_loss
self._d_loss_fn = tfgan_losses.least_squares_discriminator_loss
class ModifiedLossTest(test.TestCase, _LossesTest):
"""Tests for modified_xxx_loss."""
def setUp(self):
super(ModifiedLossTest, self).setUp()
self.init_constants()
self._expected_g_loss = 1.38582
self._expected_d_loss = 6.19637
self._generator_loss_name = 'generator_modified_loss/value'
self._discriminator_loss_name = 'discriminator_modified_loss/add_1'
self._g_loss_fn = tfgan_losses.modified_generator_loss
self._d_loss_fn = tfgan_losses.modified_discriminator_loss
class MinimaxLossTest(test.TestCase, _LossesTest):
"""Tests for minimax_xxx_loss."""
def setUp(self):
super(MinimaxLossTest, self).setUp()
self.init_constants()
self._expected_g_loss = -4.82408
self._expected_d_loss = 6.19637
self._generator_loss_name = 'generator_minimax_loss/Neg'
self._discriminator_loss_name = 'discriminator_minimax_loss/add_1'
self._g_loss_fn = tfgan_losses.minimax_generator_loss
self._d_loss_fn = tfgan_losses.minimax_discriminator_loss
class WassersteinLossTest(test.TestCase, _LossesTest):
"""Tests for wasserstein_xxx_loss."""
def setUp(self):
super(WassersteinLossTest, self).setUp()
self.init_constants()
self._expected_g_loss = -3.12500
self._expected_d_loss = 0.22500
self._generator_loss_name = 'generator_wasserstein_loss/value'
self._discriminator_loss_name = 'discriminator_wasserstein_loss/sub'
self._g_loss_fn = tfgan_losses.wasserstein_generator_loss
self._d_loss_fn = tfgan_losses.wasserstein_discriminator_loss
# TODO(joelshor): Use `parameterized` tests when opensourced.
# TODO(joelshor): Refactor this test to use the same code as the other losses.
class ACGANLossTest(test.TestCase):
"""Tests for wasserstein_xxx_loss."""
def setUp(self):
super(ACGANLossTest, self).setUp()
self._g_loss_fn = tfgan_losses.acgan_generator_loss
self._d_loss_fn = tfgan_losses.acgan_discriminator_loss
self._discriminator_gen_classification_logits_np = [[10.0, 4.4, -5.5, 3.6],
[-4.0, 4.4, 5.2, 4.6],
[1.1, 2.4, -3.5, 5.6],
[1.1, 2.4, -3.5, 5.6]]
self._discriminator_real_classification_logits_np = [[-2.0, 0.4, 12.5, 2.7],
[-1.2, 1.9, 12.3, 2.6],
[-2.4, -1.7, 2.5, 2.7],
[1.1, 2.4, -3.5, 5.6]]
self._one_hot_labels_np = [[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]]
self._weights = 2.3
self._discriminator_gen_classification_logits = constant_op.constant(
self._discriminator_gen_classification_logits_np, dtype=dtypes.float32)
self._discriminator_real_classification_logits = constant_op.constant(
self._discriminator_real_classification_logits_np, dtype=dtypes.float32)
self._one_hot_labels = constant_op.constant(
self._one_hot_labels_np, dtype=dtypes.float32)
self._generator_kwargs = {
'discriminator_gen_classification_logits':
self._discriminator_gen_classification_logits,
'one_hot_labels': self._one_hot_labels,
}
self._discriminator_kwargs = {
'discriminator_gen_classification_logits':
self._discriminator_gen_classification_logits,
'discriminator_real_classification_logits':
self._discriminator_real_classification_logits,
'one_hot_labels': self._one_hot_labels,
}
self._generator_loss_name = 'acgan_generator_loss/value'
self._discriminator_loss_name = 'acgan_discriminator_loss/add'
self._expected_g_loss = 3.84974
self._expected_d_loss = 9.43950
def test_generator_all_correct(self):
loss = self._g_loss_fn(**self._generator_kwargs)
self.assertEqual(
self._discriminator_gen_classification_logits.dtype, loss.dtype)
self.assertEqual(self._generator_loss_name, loss.op.name)
with self.cached_session():
self.assertAlmostEqual(self._expected_g_loss, loss.eval(), 5)
def test_discriminator_all_correct(self):
loss = self._d_loss_fn(**self._discriminator_kwargs)
self.assertEqual(
self._discriminator_gen_classification_logits.dtype, loss.dtype)
self.assertEqual(self._discriminator_loss_name, loss.op.name)
with self.cached_session():
self.assertAlmostEqual(self._expected_d_loss, loss.eval(), 5)
def test_generator_loss_collection(self):
self.assertEqual(0, len(ops.get_collection('collection')))
self._g_loss_fn(loss_collection='collection', **self._generator_kwargs)
self.assertEqual(1, len(ops.get_collection('collection')))
def test_discriminator_loss_collection(self):
self.assertEqual(0, len(ops.get_collection('collection')))
self._d_loss_fn(loss_collection='collection', **self._discriminator_kwargs)
self.assertEqual(1, len(ops.get_collection('collection')))
def test_generator_no_reduction(self):
loss = self._g_loss_fn(
reduction=tf_losses.Reduction.NONE, **self._generator_kwargs)
self.assertAllEqual([4], loss.shape)
def test_discriminator_no_reduction(self):
loss = self._d_loss_fn(
reduction=tf_losses.Reduction.NONE, **self._discriminator_kwargs)
self.assertAllEqual([4], loss.shape)
def test_generator_patch(self):
patch_args = {x: array_ops.reshape(y, [2, 2, 4]) for x, y in
self._generator_kwargs.items()}
loss = self._g_loss_fn(**patch_args)
with self.cached_session():
self.assertAlmostEqual(self._expected_g_loss, loss.eval(), 5)
def test_discriminator_patch(self):
patch_args = {x: array_ops.reshape(y, [2, 2, 4]) for x, y in
self._discriminator_kwargs.items()}
loss = self._d_loss_fn(**patch_args)
with self.cached_session():
self.assertAlmostEqual(self._expected_d_loss, loss.eval(), 5)
def test_generator_loss_with_placeholder_for_logits(self):
gen_logits = array_ops.placeholder(dtypes.float32, shape=(None, 4))
one_hot_labels = array_ops.placeholder(dtypes.int32, shape=(None, 4))
loss = self._g_loss_fn(gen_logits, one_hot_labels)
with self.cached_session() as sess:
loss = sess.run(
loss, feed_dict={
gen_logits: self._discriminator_gen_classification_logits_np,
one_hot_labels: self._one_hot_labels_np,
})
self.assertAlmostEqual(self._expected_g_loss, loss, 5)
def test_discriminator_loss_with_placeholder_for_logits_and_weights(self):
gen_logits = array_ops.placeholder(dtypes.float32, shape=(None, 4))
real_logits = array_ops.placeholder(dtypes.float32, shape=(None, 4))
one_hot_labels = array_ops.placeholder(dtypes.int32, shape=(None, 4))
loss = self._d_loss_fn(gen_logits, real_logits, one_hot_labels)
with self.cached_session() as sess:
loss = sess.run(
loss, feed_dict={
gen_logits: self._discriminator_gen_classification_logits_np,
real_logits: self._discriminator_real_classification_logits_np,
one_hot_labels: self._one_hot_labels_np,
})
self.assertAlmostEqual(self._expected_d_loss, loss, 5)
def test_generator_with_python_scalar_weight(self):
loss = self._g_loss_fn(weights=self._weights, **self._generator_kwargs)
with self.cached_session():
self.assertAlmostEqual(self._expected_g_loss * self._weights,
loss.eval(), 4)
def test_discriminator_with_python_scalar_weight(self):
loss = self._d_loss_fn(
real_weights=self._weights, generated_weights=self._weights,
**self._discriminator_kwargs)
with self.cached_session():
self.assertAlmostEqual(self._expected_d_loss * self._weights,
loss.eval(), 4)
def test_generator_with_scalar_tensor_weight(self):
loss = self._g_loss_fn(
weights=constant_op.constant(self._weights), **self._generator_kwargs)
with self.cached_session():
self.assertAlmostEqual(self._expected_g_loss * self._weights,
loss.eval(), 4)
def test_discriminator_with_scalar_tensor_weight(self):
weights = constant_op.constant(self._weights)
loss = self._d_loss_fn(real_weights=weights, generated_weights=weights,
**self._discriminator_kwargs)
with self.cached_session():
self.assertAlmostEqual(self._expected_d_loss * self._weights,
loss.eval(), 4)
def test_generator_add_summaries(self):
self.assertEqual(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
self._g_loss_fn(add_summaries=True, **self._generator_kwargs)
self.assertLess(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
def test_discriminator_add_summaries(self):
self.assertEqual(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
self._d_loss_fn(add_summaries=True, **self._discriminator_kwargs)
self.assertLess(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
class _PenaltyTest(object):
def test_all_correct(self):
loss = self._penalty_fn(**self._kwargs)
self.assertEqual(self._expected_dtype, loss.dtype)
# NOTE: Op names will change, it is inappropriate to include them in tests.
# See go/tf-breaking-change.
# self.assertEqual(self._expected_op_name, loss.op.name)
with self.cached_session():
variables.global_variables_initializer().run()
self.assertAlmostEqual(self._expected_loss, loss.eval(), 6)
def test_loss_collection(self):
self.assertEqual(0, len(ops.get_collection('collection')))
self._penalty_fn(loss_collection='collection', **self._kwargs)
self.assertEqual(1, len(ops.get_collection('collection')))
def test_no_reduction(self):
loss = self._penalty_fn(reduction=tf_losses.Reduction.NONE, **self._kwargs)
self.assertAllEqual([self._batch_size], loss.shape)
def test_python_scalar_weight(self):
loss = self._penalty_fn(weights=2.3, **self._kwargs)
with self.cached_session():
variables.global_variables_initializer().run()
self.assertAlmostEqual(self._expected_loss * 2.3, loss.eval(), 3)
def test_scalar_tensor_weight(self):
loss = self._penalty_fn(weights=constant_op.constant(2.3), **self._kwargs)
with self.cached_session():
variables.global_variables_initializer().run()
self.assertAlmostEqual(self._expected_loss * 2.3, loss.eval(), 3)
class GradientPenaltyTest(test.TestCase, _PenaltyTest):
"""Tests for wasserstein_gradient_penalty."""
def setUp(self):
super(GradientPenaltyTest, self).setUp()
self._penalty_fn = tfgan_losses.wasserstein_gradient_penalty
self._generated_data_np = [[3.1, 2.3, -12.3, 32.1]]
self._real_data_np = [[-12.3, 23.2, 16.3, -43.2]]
self._expected_dtype = dtypes.float32
with variable_scope.variable_scope('fake_scope') as self._scope:
self._discriminator_fn(0.0, 0.0)
self._kwargs = {
'generated_data': constant_op.constant(
self._generated_data_np, dtype=self._expected_dtype),
'real_data': constant_op.constant(
self._real_data_np, dtype=self._expected_dtype),
'generator_inputs': None,
'discriminator_fn': self._discriminator_fn,
'discriminator_scope': self._scope,
}
self._expected_loss = 9.00000
self._expected_op_name = 'wasserstein_gradient_penalty/value'
self._batch_size = 1
def _discriminator_fn(self, inputs, _):
ops.add_to_collection('fake_update_ops', constant_op.constant(1.0))
return variable_scope.get_variable('dummy_d', initializer=2.0) * inputs
def test_loss_with_placeholder(self):
generated_data = array_ops.placeholder(dtypes.float32, shape=(None, None))
real_data = array_ops.placeholder(dtypes.float32, shape=(None, None))
loss = tfgan_losses.wasserstein_gradient_penalty(
generated_data,
real_data,
self._kwargs['generator_inputs'],
self._kwargs['discriminator_fn'],
self._kwargs['discriminator_scope'])
self.assertEqual(generated_data.dtype, loss.dtype)
with self.cached_session() as sess:
variables.global_variables_initializer().run()
loss = sess.run(loss,
feed_dict={
generated_data: self._generated_data_np,
real_data: self._real_data_np,
})
self.assertAlmostEqual(self._expected_loss, loss, 5)
def test_loss_using_one_sided_mode(self):
generated_data = array_ops.placeholder(dtypes.float32, shape=(None, None))
real_data = array_ops.placeholder(dtypes.float32, shape=(None, None))
loss = tfgan_losses.wasserstein_gradient_penalty(
generated_data,
real_data,
self._kwargs['generator_inputs'],
self._kwargs['discriminator_fn'],
self._kwargs['discriminator_scope'],
one_sided=True)
self.assertEqual(generated_data.dtype, loss.dtype)
with self.cached_session() as sess:
variables.global_variables_initializer().run()
loss = sess.run(loss,
feed_dict={
generated_data: self._generated_data_np,
real_data: self._real_data_np,
})
self.assertAlmostEqual(self._expected_loss, loss, 5)
def test_loss_with_gradient_norm_target(self):
"""Test loss value with non default gradient norm target."""
generated_data = array_ops.placeholder(dtypes.float32, shape=(None, None))
real_data = array_ops.placeholder(dtypes.float32, shape=(None, None))
loss = tfgan_losses.wasserstein_gradient_penalty(
generated_data,
real_data,
self._kwargs['generator_inputs'],
self._kwargs['discriminator_fn'],
self._kwargs['discriminator_scope'],
target=2.0)
with self.cached_session() as sess:
variables.global_variables_initializer().run()
loss = sess.run(
loss,
feed_dict={
generated_data: self._generated_data_np,
real_data: self._real_data_np,
})
self.assertAlmostEqual(1.0, loss, 5)
def test_reuses_scope(self):
"""Test that gradient penalty reuses discriminator scope."""
num_vars = len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
tfgan_losses.wasserstein_gradient_penalty(**self._kwargs)
self.assertEqual(
num_vars, len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
def test_works_with_get_collection(self):
"""Tests that gradient penalty works inside other scopes."""
# We ran the discriminator once in the setup, so there should be an op
# already in the collection.
self.assertEqual(1, len(ops.get_collection(
'fake_update_ops', self._kwargs['discriminator_scope'].name)))
# Make sure the op is added to the collection even if it's in a name scope.
with ops.name_scope('loss'):
tfgan_losses.wasserstein_gradient_penalty(**self._kwargs)
self.assertEqual(2, len(ops.get_collection(
'fake_update_ops', self._kwargs['discriminator_scope'].name)))
# Make sure the op is added to the collection even if it's in a variable
# scope.
with variable_scope.variable_scope('loss_vscope'):
tfgan_losses.wasserstein_gradient_penalty(**self._kwargs)
self.assertEqual(3, len(ops.get_collection(
'fake_update_ops', self._kwargs['discriminator_scope'].name)))
class MutualInformationPenaltyTest(test.TestCase, _PenaltyTest):
"""Tests for mutual_information_penalty."""
def setUp(self):
super(MutualInformationPenaltyTest, self).setUp()
self._penalty_fn = tfgan_losses.mutual_information_penalty
self._structured_generator_inputs = [1.0, 2.0]
self._predicted_distributions = [categorical.Categorical(logits=[1.0, 2.0]),
normal.Normal([0.0], [1.0])]
self._expected_dtype = dtypes.float32
self._kwargs = {
'structured_generator_inputs': self._structured_generator_inputs,
'predicted_distributions': self._predicted_distributions,
}
self._expected_loss = 1.61610
self._expected_op_name = 'mutual_information_loss/mul_1'
self._batch_size = 2
class CombineAdversarialLossTest(test.TestCase):
"""Tests for combine_adversarial_loss."""
def setUp(self):
super(CombineAdversarialLossTest, self).setUp()
self._generated_data_np = [[3.1, 2.3, -12.3, 32.1]]
self._real_data_np = [[-12.3, 23.2, 16.3, -43.2]]
self._generated_data = constant_op.constant(
self._generated_data_np, dtype=dtypes.float32)
self._real_data = constant_op.constant(
self._real_data_np, dtype=dtypes.float32)
self._generated_inputs = None
self._expected_loss = 9.00000
def _test_correct_helper(self, use_weight_factor):
variable_list = [variables.Variable(1.0)]
main_loss = variable_list[0] * 2
adversarial_loss = variable_list[0] * 3
gradient_ratio_epsilon = 1e-6
if use_weight_factor:
weight_factor = constant_op.constant(2.0)
gradient_ratio = None
adv_coeff = 2.0
expected_loss = 1.0 * 2 + adv_coeff * 1.0 * 3
else:
weight_factor = None
gradient_ratio = constant_op.constant(0.5)
adv_coeff = 2.0 / (3 * 0.5 + gradient_ratio_epsilon)
expected_loss = 1.0 * 2 + adv_coeff * 1.0 * 3
combined_loss = tfgan_losses.combine_adversarial_loss(
main_loss,
adversarial_loss,
weight_factor=weight_factor,
gradient_ratio=gradient_ratio,
gradient_ratio_epsilon=gradient_ratio_epsilon,
variables=variable_list)
with self.test_session(use_gpu=True):
variables.global_variables_initializer().run()
self.assertNear(expected_loss, combined_loss.eval(), 1e-5)
def test_correct_useweightfactor(self):
self._test_correct_helper(True)
def test_correct_nouseweightfactor(self):
self._test_correct_helper(False)
def _test_no_weight_skips_adversarial_loss_helper(self, use_weight_factor):
"""Test the 0 adversarial weight or grad ratio skips adversarial loss."""
main_loss = constant_op.constant(1.0)
adversarial_loss = constant_op.constant(1.0)
weight_factor = 0.0 if use_weight_factor else None
gradient_ratio = None if use_weight_factor else 0.0
combined_loss = tfgan_losses.combine_adversarial_loss(
main_loss,
adversarial_loss,
weight_factor=weight_factor,
gradient_ratio=gradient_ratio,
gradient_summaries=False)
with self.test_session(use_gpu=True):
self.assertEqual(1.0, combined_loss.eval())
def test_no_weight_skips_adversarial_loss_useweightfactor(self):
self._test_no_weight_skips_adversarial_loss_helper(True)
def test_no_weight_skips_adversarial_loss_nouseweightfactor(self):
self._test_no_weight_skips_adversarial_loss_helper(False)
def test_stable_global_norm_avoids_overflow(self):
tensors = [array_ops.ones([4]), array_ops.ones([4, 4]) * 1e19, None]
gnorm_is_inf = math_ops.is_inf(clip_ops.global_norm(tensors))
stable_gnorm_is_inf = math_ops.is_inf(
tfgan_losses._numerically_stable_global_norm(tensors))
with self.test_session(use_gpu=True):
self.assertTrue(gnorm_is_inf.eval())
self.assertFalse(stable_gnorm_is_inf.eval())
def test_stable_global_norm_unchanged(self):
"""Test that preconditioning doesn't change global norm value."""
random_seed.set_random_seed(1234)
tensors = [random_ops.random_uniform([3]*i, -10.0, 10.0) for i in range(6)]
gnorm = clip_ops.global_norm(tensors)
precond_gnorm = tfgan_losses._numerically_stable_global_norm(tensors)
with self.test_session(use_gpu=True) as sess:
for _ in range(10): # spot check closeness on more than one sample.
gnorm_np, precond_gnorm_np = sess.run([gnorm, precond_gnorm])
self.assertNear(gnorm_np, precond_gnorm_np, 1e-4)
class CycleConsistencyLossTest(test.TestCase):
"""Tests for cycle_consistency_loss."""
def setUp(self):
super(CycleConsistencyLossTest, self).setUp()
self._data_x_np = [[1.0, 2, 3], [4, 5, 6]]
self._reconstructed_data_x_np = [[7.0, 8, 9], [10, 11, 12]]
self._data_y_np = [1.0, 9]
self._reconstructed_data_y_np = [-2.0, 3]
self._data_x = constant_op.constant(self._data_x_np, dtype=dtypes.float32)
self._reconstructed_data_x = constant_op.constant(
self._reconstructed_data_x_np, dtype=dtypes.float32)
self._data_y = constant_op.constant(self._data_y_np, dtype=dtypes.float32)
self._reconstructed_data_y = constant_op.constant(
self._reconstructed_data_y_np, dtype=dtypes.float32)
def test_correct_loss(self):
loss = tfgan_losses.cycle_consistency_loss(
self._data_x, self._reconstructed_data_x, self._data_y,
self._reconstructed_data_y)
with self.test_session(use_gpu=True):
variables.global_variables_initializer().run()
self.assertNear(5.25, loss.eval(), 1e-5)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/gan/python/losses/python/losses_impl_test.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TF-GAN estimator module.
GANEstimator provides all the infrastructure support of a TensorFlow Estimator
with the feature support of TF-GAN.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Collapse `estimator` into a single namespace.
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.gan.python.estimator.python import gan_estimator
from tensorflow.contrib.gan.python.estimator.python import head
from tensorflow.contrib.gan.python.estimator.python import latent_gan_estimator
from tensorflow.contrib.gan.python.estimator.python import stargan_estimator
from tensorflow.contrib.gan.python.estimator.python import tpu_gan_estimator
from tensorflow.contrib.gan.python.estimator.python.gan_estimator import *
from tensorflow.contrib.gan.python.estimator.python.head import *
from tensorflow.contrib.gan.python.estimator.python.latent_gan_estimator import *
from tensorflow.contrib.gan.python.estimator.python.stargan_estimator import *
from tensorflow.contrib.gan.python.estimator.python.tpu_gan_estimator import *
# pylint: enable=unused-import,wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ([
'gan_estimator',
'stargan_estimator',
'tpu_gan_estimator',
'latent_gan_estimator',
'head',
] + gan_estimator.__all__ + stargan_estimator.__all__ + head.__all__ +
tpu_gan_estimator.__all__ + latent_gan_estimator.__all__)
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/gan/python/estimator/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A TF-GAN-backed GAN Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.contrib.gan.python import namedtuples as tfgan_tuples
from tensorflow.contrib.gan.python import train as tfgan_train
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.canned import head
from tensorflow.python.estimator.export import export_output
from tensorflow.python.framework import ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.util import deprecation
__all__ = [
'GANHead',
'gan_head',
]
def _summary_key(head_name, val):
return '%s/%s' % (val, head_name) if head_name else val
@deprecation.deprecated(
None, 'Please use tf.contrib.gan.GANEstimator without explicitly making a '
'GANHead.')
def gan_head(generator_loss_fn, discriminator_loss_fn, generator_optimizer,
discriminator_optimizer, use_loss_summaries=True,
get_hooks_fn=tfgan_train.get_sequential_train_hooks(),
get_eval_metric_ops_fn=None, name=None):
"""Creates a `GANHead`.
Args:
generator_loss_fn: A TFGAN loss function for the generator. Takes a
`GANModel` and returns a scalar.
discriminator_loss_fn: Same as `generator_loss_fn`, but for the
discriminator.
generator_optimizer: The optimizer for generator updates.
discriminator_optimizer: Same as `generator_optimizer`, but for the
discriminator updates.
use_loss_summaries: If `True`, add loss summaries. If `False`, does not.
If `None`, uses defaults.
get_hooks_fn: A function that takes a `GANTrainOps` tuple and returns a
list of hooks.
get_eval_metric_ops_fn: A function that takes a `GANModel`, and returns a
dict of metric results keyed by name. The output of this function is
passed into `tf.estimator.EstimatorSpec` during evaluation.
name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + name`.
Returns:
An instance of `GANHead`.
"""
return GANHead(generator_loss_fn=generator_loss_fn,
discriminator_loss_fn=discriminator_loss_fn,
generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
use_loss_summaries=use_loss_summaries,
get_hooks_fn=get_hooks_fn,
get_eval_metric_ops_fn=get_eval_metric_ops_fn,
name=name)
class GANHead(head._Head): # pylint: disable=protected-access
"""`Head` for a GAN."""
@deprecation.deprecated(
None, 'Please use tf.contrib.gan.GANEstimator without explicitly making '
'a GANHead.')
def __init__(self, generator_loss_fn, discriminator_loss_fn,
generator_optimizer, discriminator_optimizer,
use_loss_summaries=True,
get_hooks_fn=None,
get_eval_metric_ops_fn=None,
name=None):
"""`Head` for GAN training.
Args:
generator_loss_fn: A TFGAN loss function for the generator. Takes a
`GANModel` and returns a scalar.
discriminator_loss_fn: Same as `generator_loss_fn`, but for the
discriminator.
generator_optimizer: The optimizer for generator updates.
discriminator_optimizer: Same as `generator_optimizer`, but for the
discriminator updates.
use_loss_summaries: If `True`, add loss summaries. If `False`, does not.
If `None`, uses defaults.
get_hooks_fn: A function that takes a `GANTrainOps` tuple and returns a
list of hooks. Defaults to `train.get_sequential_train_hooks()`
get_eval_metric_ops_fn: A function that takes a `GANModel`, and returns a
dict of metric results keyed by name. The output of this function is
passed into `tf.estimator.EstimatorSpec` during evaluation.
name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + name`.
"""
if not callable(generator_loss_fn):
raise TypeError('generator_loss_fn must be callable.')
if not callable(discriminator_loss_fn):
raise TypeError('discriminator_loss_fn must be callable.')
if use_loss_summaries not in [True, False, None]:
raise ValueError('use_loss_summaries must be True, False or None.')
if get_hooks_fn is not None and not callable(get_hooks_fn):
raise TypeError('get_hooks_fn must be callable.')
if name is not None and not isinstance(name, str):
raise TypeError('name must be string.')
if get_hooks_fn is None:
get_hooks_fn = tfgan_train.get_sequential_train_hooks()
if use_loss_summaries in [True, False]:
generator_loss_fn = functools.partial(
generator_loss_fn, add_summaries=use_loss_summaries)
discriminator_loss_fn = functools.partial(
discriminator_loss_fn, add_summaries=use_loss_summaries)
self._generator_loss_fn = generator_loss_fn
self._discriminator_loss_fn = discriminator_loss_fn
self._generator_optimizer = generator_optimizer
self._discriminator_optimizer = discriminator_optimizer
self._get_hooks_fn = get_hooks_fn
self._get_eval_metric_ops_fn = get_eval_metric_ops_fn
self._name = name
@property
def name(self):
return self._name
@property
def logits_dimension(self):
return None
def create_loss(self, features, mode, logits, labels):
"""Returns a GANLoss tuple from the provided GANModel.
See `Head` for more details.
Args:
features: Input `dict` of `Tensor` objects. Unused.
mode: Estimator's `ModeKeys`.
logits: A GANModel tuple.
labels: Must be `None`.
Returns:
A GANLoss tuple.
"""
_validate_logits_and_labels(logits, labels)
del mode, labels, features # unused for this head.
gan_model = logits # rename variable for clarity
return tfgan_tuples.GANLoss(
generator_loss=self._generator_loss_fn(gan_model),
discriminator_loss=self._discriminator_loss_fn(gan_model))
def create_estimator_spec(
self, features, mode, logits, labels=None,
train_op_fn=tfgan_train.gan_train_ops):
"""Returns `EstimatorSpec` that a model_fn can return.
See `Head` for more details.
Args:
features: Must be `None`.
mode: Estimator's `ModeKeys`.
logits: A GANModel tuple.
labels: Must be `None`.
train_op_fn: Function that takes a GANModel, GANLoss, generator optimizer,
and discriminator optimizer, and returns a `GANTrainOps` tuple. For
example, this function can come from TFGAN's `train.py` library, or can
be custom.
Returns:
`EstimatorSpec`.
Raises:
ValueError: If `features` isn't `None`.
ValueError: If `train_op_fn` isn't provided in train mode.
"""
_validate_logits_and_labels(logits, labels)
if features is not None:
raise ValueError('`features` should be `None`. Instead, found: %s' %
features)
gan_model = logits # rename variable for clarity
with ops.name_scope('GANHead'):
if mode == model_fn_lib.ModeKeys.PREDICT:
return model_fn_lib.EstimatorSpec(
mode=model_fn_lib.ModeKeys.PREDICT,
predictions=gan_model.generated_data,
export_outputs={
'predict': export_output.PredictOutput(gan_model.generated_data)
})
elif mode == model_fn_lib.ModeKeys.EVAL:
gan_loss = self.create_loss(
features=None, mode=mode, logits=gan_model, labels=None)
scalar_loss = gan_loss.generator_loss + gan_loss.discriminator_loss
with ops.name_scope(None, 'metrics',
[gan_loss.generator_loss,
gan_loss.discriminator_loss]):
eval_metric_ops = {
_summary_key(self._name, 'generator_loss'):
metrics_lib.mean(gan_loss.generator_loss),
_summary_key(self._name, 'discriminator_loss'):
metrics_lib.mean(gan_loss.discriminator_loss)
}
if self._get_eval_metric_ops_fn is not None:
custom_eval_metric_ops = self._get_eval_metric_ops_fn(gan_model)
if not isinstance(custom_eval_metric_ops, dict):
raise TypeError('get_eval_metric_ops_fn must return a dict, '
'received: {}'.format(custom_eval_metric_ops))
eval_metric_ops.update(custom_eval_metric_ops)
return model_fn_lib.EstimatorSpec(
mode=model_fn_lib.ModeKeys.EVAL,
predictions=gan_model.generated_data,
loss=scalar_loss,
eval_metric_ops=eval_metric_ops)
elif mode == model_fn_lib.ModeKeys.TRAIN:
if train_op_fn is None:
raise ValueError('train_op_fn can not be None.')
gan_loss = self.create_loss(None, mode, gan_model, None)
scalar_loss = gan_loss.generator_loss + gan_loss.discriminator_loss
train_ops = train_op_fn(gan_model, gan_loss, self._generator_optimizer,
self._discriminator_optimizer)
training_hooks = self._get_hooks_fn(train_ops)
return model_fn_lib.EstimatorSpec(
loss=scalar_loss,
mode=model_fn_lib.ModeKeys.TRAIN,
train_op=train_ops.global_step_inc_op,
training_hooks=training_hooks)
else:
raise ValueError('Mode not recognized: %s' % mode)
def _validate_logits_and_labels(logits, labels):
if labels is not None:
raise ValueError('`GANHead`\'s `create_estimator_spec` input `labels` must '
'be `None`. Instead, found: %s' % labels)
if not isinstance(logits, tfgan_tuples.GANModel):
raise ValueError('`GANHead`\'s `create_estimator_spec` input `logits` must '
'be an instnace of a `GANModel`. Instead, found: %s' %
logits)
|
tensorflow-master
|
tensorflow/contrib/gan/python/estimator/python/head_impl.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A TF-GAN-backed GAN Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import enum
from tensorflow.contrib.framework.python.ops import variables as variable_lib
from tensorflow.contrib.gan.python import namedtuples as tfgan_tuples
from tensorflow.contrib.gan.python import train as tfgan_train
from tensorflow.contrib.gan.python.eval.python import summaries as tfgan_summaries
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import tf_inspect as inspect
__all__ = [
'GANEstimator',
'SummaryType'
]
class SummaryType(enum.IntEnum):
NONE = 0
VARIABLES = 1
IMAGES = 2
IMAGE_COMPARISON = 3
_summary_type_map = {
SummaryType.VARIABLES: tfgan_summaries.add_gan_model_summaries,
SummaryType.IMAGES: tfgan_summaries.add_gan_model_image_summaries,
SummaryType.IMAGE_COMPARISON: tfgan_summaries.add_image_comparison_summaries, # pylint:disable=line-too-long
}
class GANEstimator(estimator.Estimator):
"""An estimator for Generative Adversarial Networks (GANs).
This Estimator is backed by TF-GAN. The network functions follow the TF-GAN
API except for one exception: if either `generator_fn` or `discriminator_fn`
have an argument called `mode`, then the tf.Estimator mode is passed in for
that argument. This helps with operations like batch normalization, which have
different train and evaluation behavior.
Example:
```python
import tensorflow as tf
tfgan = tf.contrib.gan
# See TF-GAN's `train.py` for a description of the generator and
# discriminator API.
def generator_fn(generator_inputs):
...
return generated_data
def discriminator_fn(data, conditioning):
...
return logits
# Create GAN estimator.
gan_estimator = tfgan.estimator.GANEstimator(
model_dir,
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_loss_fn=tfgan.losses.wasserstein_generator_loss,
discriminator_loss_fn=tfgan.losses.wasserstein_discriminator_loss,
generator_optimizer=tf.compat.v1.train.AdamOptimizer(0.1, 0.5),
discriminator_optimizer=tf.compat.v1.train.AdamOptimizer(0.1, 0.5))
# Train estimator.
gan_estimator.train(train_input_fn, steps)
# Evaluate resulting estimator.
gan_estimator.evaluate(eval_input_fn)
# Generate samples from generator.
predictions = np.array([
x for x in gan_estimator.predict(predict_input_fn)])
```
"""
def __init__(self,
model_dir=None,
generator_fn=None,
discriminator_fn=None,
generator_loss_fn=None,
discriminator_loss_fn=None,
generator_optimizer=None,
discriminator_optimizer=None,
get_hooks_fn=None,
get_eval_metric_ops_fn=None,
add_summaries=None,
use_loss_summaries=True,
config=None,
warm_start_from=None,
is_chief=True):
"""Initializes a GANEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
generator_fn: A python function that takes a Tensor, Tensor list, or
Tensor dictionary as inputs and returns the outputs of the GAN
generator. See `TF-GAN` for more details and examples. Additionally, if
it has an argument called `mode`, the Estimator's `mode` will be passed
in (ex TRAIN, EVAL, PREDICT). This is useful for things like batch
normalization.
discriminator_fn: A python function that takes the output of
`generator_fn` or real data in the GAN setup, and `generator_inputs`.
Outputs a Tensor in the range [-inf, inf]. See `TF-GAN` for more details
and examples.
generator_loss_fn: The loss function on the generator. Takes a `GANModel`
tuple.
discriminator_loss_fn: The loss function on the discriminator. Takes a
`GANModel` tuple.
generator_optimizer: The optimizer for generator updates, or a function
that takes no arguments and returns an optimizer. This function will
be called when the default graph is the `GANEstimator`'s graph, so
utilities like `tf.contrib.framework.get_or_create_global_step` will
work.
discriminator_optimizer: Same as `generator_optimizer`, but for the
discriminator updates.
get_hooks_fn: A function that takes a `GANTrainOps` tuple and returns a
list of hooks. These hooks are run on the generator and discriminator
train ops, and can be used to implement the GAN training scheme.
Defaults to `train.get_sequential_train_hooks()`.
get_eval_metric_ops_fn: A function that takes a `GANModel`, and returns a
dict of metric results keyed by name. The output of this function is
passed into `tf.estimator.EstimatorSpec` during evaluation.
add_summaries: `None`, a single `SummaryType`, or a list of `SummaryType`.
use_loss_summaries: If `True`, add loss summaries. If `False`, does not.
If `None`, uses defaults.
config: `RunConfig` object to configure the runtime settings.
warm_start_from: A filepath to a checkpoint or saved model, or a
WarmStartSettings object to configure initialization.
is_chief: Whether or not this Estimator is running on a chief or worker.
Needs to be set appropriately if using SyncReplicasOptimizers.
Raises:
ValueError: If loss functions aren't callable.
ValueError: If `use_loss_summaries` isn't boolean or `None`.
ValueError: If `get_hooks_fn` isn't callable or `None`.
"""
if not callable(generator_loss_fn):
raise ValueError('generator_loss_fn must be callable.')
if not callable(discriminator_loss_fn):
raise ValueError('discriminator_loss_fn must be callable.')
if use_loss_summaries not in [True, False, None]:
raise ValueError('use_loss_summaries must be True, False or None.')
if get_hooks_fn is not None and not callable(get_hooks_fn):
raise TypeError('get_hooks_fn must be callable.')
def _model_fn(features, labels, mode):
"""GANEstimator model function."""
if mode not in [model_fn_lib.ModeKeys.TRAIN, model_fn_lib.ModeKeys.EVAL,
model_fn_lib.ModeKeys.PREDICT]:
raise ValueError('Mode not recognized: %s' % mode)
real_data = labels # rename inputs for clarity
generator_inputs = features # rename inputs for clarity
# Make GANModel, which encapsulates the GAN model architectures.
gan_model = _get_gan_model(
mode, generator_fn, discriminator_fn, real_data, generator_inputs,
add_summaries)
# Make the EstimatorSpec, which incorporates the GANModel, losses, eval
# metrics, and optimizers (if required).
return _get_estimator_spec(
mode, gan_model, generator_loss_fn, discriminator_loss_fn,
get_eval_metric_ops_fn, generator_optimizer, discriminator_optimizer,
get_hooks_fn, use_loss_summaries, is_chief)
super(GANEstimator, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config,
warm_start_from=warm_start_from)
def _get_gan_model(
mode, generator_fn, discriminator_fn, real_data, generator_inputs,
add_summaries, generator_scope='Generator'):
"""Makes the GANModel tuple, which encapsulates the GAN model architecture."""
if mode == model_fn_lib.ModeKeys.PREDICT:
if real_data is not None:
raise ValueError('`labels` must be `None` when mode is `predict`. '
'Instead, found %s' % real_data)
gan_model = _make_prediction_gan_model(
generator_inputs, generator_fn, generator_scope)
else: # model_fn_lib.ModeKeys.TRAIN or model_fn_lib.ModeKeys.EVAL
gan_model = _make_gan_model(
generator_fn, discriminator_fn, real_data, generator_inputs,
generator_scope, add_summaries, mode)
return gan_model
def _get_estimator_spec(
mode, gan_model, generator_loss_fn, discriminator_loss_fn,
get_eval_metric_ops_fn, generator_optimizer, discriminator_optimizer,
get_hooks_fn=None, use_loss_summaries=True, is_chief=True):
"""Get the EstimatorSpec for the current mode."""
if mode == model_fn_lib.ModeKeys.PREDICT:
estimator_spec = model_fn_lib.EstimatorSpec(
mode=mode, predictions=gan_model.generated_data)
else:
gan_loss = tfgan_tuples.GANLoss(
generator_loss=generator_loss_fn(
gan_model, add_summaries=use_loss_summaries),
discriminator_loss=discriminator_loss_fn(
gan_model, add_summaries=use_loss_summaries))
if mode == model_fn_lib.ModeKeys.EVAL:
estimator_spec = _get_eval_estimator_spec(
gan_model, gan_loss, get_eval_metric_ops_fn)
else: # model_fn_lib.ModeKeys.TRAIN:
if callable(generator_optimizer):
generator_optimizer = generator_optimizer()
if callable(discriminator_optimizer):
discriminator_optimizer = discriminator_optimizer()
get_hooks_fn = get_hooks_fn or tfgan_train.get_sequential_train_hooks()
estimator_spec = _get_train_estimator_spec(
gan_model, gan_loss, generator_optimizer, discriminator_optimizer,
get_hooks_fn, is_chief=is_chief)
return estimator_spec
def _make_gan_model(generator_fn, discriminator_fn, real_data,
generator_inputs, generator_scope, add_summaries, mode):
"""Construct a `GANModel`, and optionally pass in `mode`."""
# If network functions have an argument `mode`, pass mode to it.
if 'mode' in inspect.getargspec(generator_fn).args:
generator_fn = functools.partial(generator_fn, mode=mode)
if 'mode' in inspect.getargspec(discriminator_fn).args:
discriminator_fn = functools.partial(discriminator_fn, mode=mode)
gan_model = tfgan_train.gan_model(
generator_fn,
discriminator_fn,
real_data,
generator_inputs,
generator_scope=generator_scope,
check_shapes=False)
if add_summaries:
if not isinstance(add_summaries, (tuple, list)):
add_summaries = [add_summaries]
with ops.name_scope(None):
for summary_type in add_summaries:
_summary_type_map[summary_type](gan_model)
return gan_model
def _make_prediction_gan_model(generator_inputs, generator_fn, generator_scope):
"""Make a `GANModel` from just the generator."""
# If `generator_fn` has an argument `mode`, pass mode to it.
if 'mode' in inspect.getargspec(generator_fn).args:
generator_fn = functools.partial(generator_fn,
mode=model_fn_lib.ModeKeys.PREDICT)
with variable_scope.variable_scope(generator_scope) as gen_scope:
generator_inputs = tfgan_train._convert_tensor_or_l_or_d(generator_inputs) # pylint:disable=protected-access
generated_data = generator_fn(generator_inputs)
generator_variables = variable_lib.get_trainable_variables(gen_scope)
return tfgan_tuples.GANModel(
generator_inputs,
generated_data,
generator_variables,
gen_scope,
generator_fn,
real_data=None,
discriminator_real_outputs=None,
discriminator_gen_outputs=None,
discriminator_variables=None,
discriminator_scope=None,
discriminator_fn=None)
def _get_eval_estimator_spec(gan_model, gan_loss, get_eval_metric_ops_fn=None,
name=None):
"""Return an EstimatorSpec for the eval case."""
scalar_loss = gan_loss.generator_loss + gan_loss.discriminator_loss
with ops.name_scope(None, 'metrics',
[gan_loss.generator_loss,
gan_loss.discriminator_loss]):
def _summary_key(head_name, val):
return '%s/%s' % (val, head_name) if head_name else val
eval_metric_ops = {
_summary_key(name, 'generator_loss'):
metrics_lib.mean(gan_loss.generator_loss),
_summary_key(name, 'discriminator_loss'):
metrics_lib.mean(gan_loss.discriminator_loss)
}
if get_eval_metric_ops_fn is not None:
custom_eval_metric_ops = get_eval_metric_ops_fn(gan_model)
if not isinstance(custom_eval_metric_ops, dict):
raise TypeError('get_eval_metric_ops_fn must return a dict, '
'received: {}'.format(custom_eval_metric_ops))
eval_metric_ops.update(custom_eval_metric_ops)
return model_fn_lib.EstimatorSpec(
mode=model_fn_lib.ModeKeys.EVAL,
predictions=gan_model.generated_data,
loss=scalar_loss,
eval_metric_ops=eval_metric_ops)
def _get_train_estimator_spec(
gan_model, gan_loss, generator_optimizer, discriminator_optimizer,
get_hooks_fn, train_op_fn=tfgan_train.gan_train_ops, is_chief=True):
"""Return an EstimatorSpec for the train case."""
scalar_loss = gan_loss.generator_loss + gan_loss.discriminator_loss
train_ops = train_op_fn(gan_model, gan_loss, generator_optimizer,
discriminator_optimizer, is_chief=is_chief)
training_hooks = get_hooks_fn(train_ops)
return model_fn_lib.EstimatorSpec(
loss=scalar_loss,
mode=model_fn_lib.ModeKeys.TRAIN,
train_op=train_ops.global_step_inc_op,
training_hooks=training_hooks)
|
tensorflow-master
|
tensorflow/contrib/gan/python/estimator/python/gan_estimator_impl.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-GAN's TPU Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib import layers
from tensorflow.contrib.gan.python import namedtuples as tfgan_tuples
from tensorflow.contrib.gan.python.estimator.python import tpu_gan_estimator_impl as estimator
from tensorflow.contrib.gan.python.losses.python import tuple_losses as losses
from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_estimator
from tensorflow.contrib.tpu.python.tpu import tpu_optimizer
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.estimator import WarmStartSettings
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework.errors_impl import NotFoundError
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import training
from tensorflow.python.training import training_util
FLAGS = flags.FLAGS
flags.DEFINE_bool('use_tpu', False, 'Whether to run test on TPU or not.')
def generator_fn(noise, mode):
del mode
return layers.fully_connected(noise, tensor_shape.dimension_value(
noise.shape[1]))
def discriminator_fn(data, unused_conditioning, mode):
del unused_conditioning, mode
return layers.fully_connected(data, 1)
def get_dummy_gan_model():
# TODO(joelshor): Find a better way of creating a variable scope.
with variable_scope.variable_scope('generator') as gen_scope:
gen_var = variable_scope.get_variable('dummy_var', initializer=0.0)
with variable_scope.variable_scope('discriminator') as dis_scope:
dis_var = variable_scope.get_variable('dummy_var', initializer=0.0)
return tfgan_tuples.GANModel(
generator_inputs=None,
generated_data=array_ops.ones([3, 4]),
generator_variables=[gen_var],
generator_scope=gen_scope,
generator_fn=None,
real_data=array_ops.zeros([3, 4]),
discriminator_real_outputs=array_ops.ones([1, 2, 3]) * dis_var,
discriminator_gen_outputs=array_ops.ones([1, 2, 3]) * gen_var * dis_var,
discriminator_variables=[dis_var],
discriminator_scope=dis_scope,
discriminator_fn=None)
def get_metrics(generator_inputs, generated_data, real_data,
discriminator_real_outputs, discriminator_gen_outputs):
del generator_inputs, discriminator_real_outputs, discriminator_gen_outputs
return {
'mse_custom_metric': metrics_lib.mean_squared_error(
real_data, generated_data)
}
class GetTPUEstimatorSpecTest(test.TestCase, parameterized.TestCase):
"""Tests that the EstimatorSpec is constructed appropriately."""
@classmethod
def setUpClass(cls):
super(GetTPUEstimatorSpecTest, cls).setUpClass()
cls._generator_optimizer = tpu_optimizer.CrossShardOptimizer(
training.GradientDescentOptimizer(1.0))
cls._discriminator_optimizer = tpu_optimizer.CrossShardOptimizer(
training.GradientDescentOptimizer(1.0))
@parameterized.named_parameters(
('joint_train', model_fn_lib.ModeKeys.TRAIN, True),
('train_sequential', model_fn_lib.ModeKeys.TRAIN, False),
('eval', model_fn_lib.ModeKeys.EVAL, None),
('predict', model_fn_lib.ModeKeys.PREDICT, None))
def test_get_estimator_spec(self, mode, joint_train):
with ops.Graph().as_default():
self._gan_model = get_dummy_gan_model()
spec = estimator._get_estimator_spec(
mode,
self._gan_model,
generator_loss_fn=losses.wasserstein_generator_loss,
discriminator_loss_fn=losses.wasserstein_discriminator_loss,
get_eval_metric_ops_fn=get_metrics,
generator_optimizer=self._generator_optimizer,
discriminator_optimizer=self._discriminator_optimizer,
joint_train=joint_train,
is_on_tpu=FLAGS.use_tpu,
gan_train_steps=tfgan_tuples.GANTrainSteps(1, 1))
self.assertIsInstance(spec, tpu_estimator.TPUEstimatorSpec)
self.assertEqual(mode, spec.mode)
if mode == model_fn_lib.ModeKeys.PREDICT:
self.assertEqual({'generated_data': self._gan_model.generated_data},
spec.predictions)
elif mode == model_fn_lib.ModeKeys.TRAIN:
self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar
self.assertIsNotNone(spec.train_op)
self.assertIsNotNone(spec.training_hooks)
elif mode == model_fn_lib.ModeKeys.EVAL:
self.assertEqual(self._gan_model.generated_data, spec.predictions)
self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar
self.assertIsNotNone(spec.eval_metrics)
class TPUGANEstimatorIntegrationTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super(TPUGANEstimatorIntegrationTest, self).setUp()
self._model_dir = tempfile.mkdtemp()
self._config = tpu_config.RunConfig(model_dir=self._model_dir)
def tearDown(self):
super(TPUGANEstimatorIntegrationTest, self).tearDown()
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, prediction_size,
lr_decay=False, joint_train=True):
def make_opt():
gstep = training_util.get_or_create_global_step()
lr = learning_rate_decay.exponential_decay(1.0, gstep, 10, 0.9)
return training.GradientDescentOptimizer(lr)
gopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
dopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
est = estimator.TPUGANEstimator(
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_loss_fn=losses.wasserstein_generator_loss,
discriminator_loss_fn=losses.wasserstein_discriminator_loss,
generator_optimizer=gopt,
discriminator_optimizer=dopt,
joint_train=joint_train,
get_eval_metric_ops_fn=get_metrics,
train_batch_size=4,
eval_batch_size=10,
predict_batch_size=8,
use_tpu=FLAGS.use_tpu,
config=self._config)
# Train.
num_steps_train = 10
est.train(train_input_fn, steps=num_steps_train)
# Evaluate.
num_steps_eval = 2
scores = est.evaluate(eval_input_fn, steps=num_steps_eval)
self.assertIn(ops.GraphKeys.GLOBAL_STEP, scores)
self.assertIn('loss', scores)
self.assertEqual(scores['discriminator_loss'] + scores['generator_loss'],
scores['loss'])
self.assertIn('mse_custom_metric', scores)
# Predict.
predictions = np.array([x['generated_data'] for x in
est.predict(predict_input_fn)])
self.assertAllEqual(prediction_size, predictions.shape)
@parameterized.named_parameters(
('joint_train', True, False, False),
('train_sequential', False, False, False),
('lr_decay', False, True, False),
('train_sequential_ds', False, False, True))
def test_numpy_input_fn(self, joint_train, lr_decay, return_ds):
"""Tests complete flow with numpy_input_fn."""
input_dim = 4
def train_input_fn(params):
data = np.zeros([input_dim], dtype=np.float32)
ds = (dataset_ops.Dataset
.from_tensors((data, data))
.repeat()
.batch(params['batch_size'], drop_remainder=True))
if return_ds:
return ds
else:
x, y = ds.make_one_shot_iterator().get_next()
return x, y
def eval_input_fn(params):
data = np.zeros([input_dim], dtype=np.float32)
ds = (dataset_ops.Dataset
.from_tensors((data, data))
.repeat()
.batch(params['batch_size'], drop_remainder=True))
if return_ds:
return ds
else:
x, y = ds.make_one_shot_iterator().get_next()
return x, y
predict_size = 10
def predict_input_fn(params):
del params # unused
data = np.zeros([input_dim], dtype=np.float32)
ds = (dataset_ops.Dataset
.from_tensors(data)
.repeat(predict_size)
.batch(1, drop_remainder=True))
return ds
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
prediction_size=[predict_size, input_dim],
lr_decay=lr_decay,
joint_train=joint_train)
class TPUGANEstimatorWarmStartTest(test.TestCase):
def setUp(self):
self._model_dir = self.get_temp_dir()
self._config = tpu_config.RunConfig(model_dir=self._model_dir)
self.new_variable_name = 'new_var'
self.new_variable_value = [1.0, 2.0, 3.0]
def tearDown(self):
writer_cache.FileWriterCache.clear()
def _test_warm_start(self, warm_start_from=None):
"""Tests whether WarmStartSettings work as intended."""
def generator_with_new_variable(noise_dict, mode):
variable_scope.get_variable(name=self.new_variable_name,
initializer=self.new_variable_value,
trainable=True)
return generator_fn(noise_dict, mode)
est = estimator.TPUGANEstimator(
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_loss_fn=losses.wasserstein_generator_loss,
discriminator_loss_fn=losses.wasserstein_discriminator_loss,
generator_optimizer=training.GradientDescentOptimizer(1.0),
discriminator_optimizer=training.GradientDescentOptimizer(1.0),
train_batch_size=4,
use_tpu=FLAGS.use_tpu,
config=self._config)
def train_input_fn(params):
data = np.zeros([params['batch_size'], 4], dtype=np.float32)
return data, data
est.train(train_input_fn, steps=1)
est_warm = estimator.TPUGANEstimator(
generator_fn=generator_with_new_variable,
discriminator_fn=discriminator_fn,
generator_loss_fn=losses.wasserstein_generator_loss,
discriminator_loss_fn=losses.wasserstein_discriminator_loss,
generator_optimizer=training.GradientDescentOptimizer(1.0),
discriminator_optimizer=training.GradientDescentOptimizer(1.0),
config=tpu_config.RunConfig(
model_dir=None if warm_start_from else self._model_dir),
train_batch_size=4,
use_tpu=FLAGS.use_tpu,
warm_start_from=warm_start_from)
est_warm.train(train_input_fn, steps=1)
return est_warm
def test_warm_start_error(self):
"""Test if exception when reloading different estimators."""
with self.assertRaises(NotFoundError):
self._test_warm_start()
def test_warm_start_success(self):
"""Test if GANEstimator allows explicit warm start variable assignment."""
# Regex matches all variable names in ckpt except for new_var.
var_regex = '^(?!.*%s.*)' % self.new_variable_name
warmstart = WarmStartSettings(ckpt_to_initialize_from=self._model_dir,
vars_to_warm_start=var_regex)
est_warm = self._test_warm_start(warm_start_from=warmstart)
full_variable_name = 'Generator/%s' % self.new_variable_name
self.assertIn(full_variable_name, est_warm.get_variable_names())
equal_vals = np.array_equal(est_warm.get_variable_value(full_variable_name),
self.new_variable_value)
self.assertTrue(equal_vals)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/gan/python/estimator/python/tpu_gan_estimator_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for latent_gan_estimator.
See g3.tp.tensorflow.contrib.gan.python.estimator.python.latent_gan_estimator.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
from tensorflow.contrib.gan.python.estimator.python import latent_gan_estimator
from tensorflow.python.estimator import run_config as run_config
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import training
class TrainInputEstimatorTest(test.TestCase):
def test_get_input_training_estimator(self):
"""Integration test to make sure the input_training_estimator works."""
# Create dummy test input tensors.
true_features = np.reshape(np.random.uniform(size=100), (10, 10))
true_labels = np.reshape(np.random.uniform(size=100), (5, 20))
expected_z_output = [[1, -1], [-1, 1]]
# Fill out required parameters randomly, includes optimizer kwargs.
params = {
'batch_size': 2,
'z_shape': [2],
'learning_rate': 1.0,
'input_clip': 1.0,
'add_summaries': False,
'opt_kwargs': {
'beta1': 0.1
}
}
input_z_shape = [params['batch_size']] + params['z_shape']
# Create dummy model functions that represent an underlying GANEstimator and
# the input training wrapper. Make sure that everything is wired up
# correctly in the internals of each dummy function.
def _generator(net, mode):
"""The generator function will get the newly created z variable."""
del mode
self.assertSequenceEqual(net.shape, input_z_shape)
gen_dummy_var = variable_scope.get_variable(
name='generator_dummy_variable',
initializer=array_ops.ones(input_z_shape))
return net * gen_dummy_var
def _discriminator(net, condition, mode):
"""The discriminator function will get either the z variable or labels."""
del condition, mode
try:
self.assertSequenceEqual(net.shape, true_labels.shape)
except AssertionError:
self.assertSequenceEqual(net.shape, input_z_shape)
return net
def _loss(gan_model, features, labels, _):
"""Make sure that features and labels are passed in from input."""
self.assertTrue(np.array_equal(features, true_features))
self.assertTrue(np.array_equal(labels, true_labels))
return losses.absolute_difference(expected_z_output,
gan_model.generated_data)
optimizer = training.AdamOptimizer
# We are not loading checkpoints, so set the corresponding directory to a
# dummy directories.
tmp_dir = tempfile.mkdtemp()
config = run_config.RunConfig(model_dir=tmp_dir,
save_summary_steps=None,
save_checkpoints_steps=1,
save_checkpoints_secs=None)
# Get the estimator. Disable warm start so that there is no attempted
# checkpoint reloading.
estimator = latent_gan_estimator.get_latent_gan_estimator(
_generator, _discriminator, _loss, optimizer, params, config, tmp_dir,
warmstart_options=None)
# Train for a few steps.
def dummy_input():
return true_features, true_labels
estimator.train(input_fn=dummy_input, steps=10)
# Make sure the generator variables did not change, but the z variables did
# change.
self.assertTrue(np.array_equal(
estimator.get_variable_value('Generator/generator_dummy_variable'),
np.ones(input_z_shape)))
self.assertTrue(np.array_equal(
estimator.get_variable_value('new_var_z_input'),
expected_z_output))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/gan/python/estimator/python/latent_gan_estimator_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A TF-GAN-backed StarGAN Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import enum
from tensorflow.contrib.framework.python.ops import variables as variable_lib
from tensorflow.contrib.gan.python import namedtuples as tfgan_tuples
from tensorflow.contrib.gan.python import train as tfgan_train
from tensorflow.contrib.gan.python.eval.python import summaries as tfgan_summaries
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import tf_inspect as inspect
__all__ = ['StarGANEstimator', 'SummaryType']
class SummaryType(enum.IntEnum):
NONE = 0
VARIABLES = 1
IMAGES = 2
IMAGE_COMPARISON = 3
_summary_type_map = {
SummaryType.VARIABLES: tfgan_summaries.add_gan_model_summaries,
SummaryType.IMAGES: tfgan_summaries.add_stargan_image_summaries,
}
class StarGANEstimator(estimator.Estimator):
"""An estimator for Generative Adversarial Networks (GANs).
This Estimator is backed by TFGAN. The network functions follow the TFGAN API
except for one exception: if either `generator_fn` or `discriminator_fn` have
an argument called `mode`, then the tf.Estimator mode is passed in for that
argument. This helps with operations like batch normalization, which have
different train and evaluation behavior.
Example:
```python
import tensorflow as tf
tfgan = tf.contrib.gan
# See TFGAN's `train.py` for a description of the generator and
# discriminator API.
def generator_fn(generator_inputs):
...
return generated_data
def discriminator_fn(data, conditioning):
...
return logits
# Create GAN estimator.
stargan_estimator = tfgan.estimator.StarGANEstimator(
model_dir,
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
loss_fn=loss_fn,
generator_optimizer=tf.compat.v1.train.AdamOptimizer(0.1, 0.5),
discriminator_optimizer=tf.compat.v1.train.AdamOptimizer(0.1, 0.5))
# Train estimator.
stargan_estimator.train(train_input_fn, steps)
# Evaluate resulting estimator.
stargan_estimator.evaluate(eval_input_fn)
# Generate samples from generator.
stargan_estimator = np.array([
x for x in stargan_estimator.predict(predict_input_fn)])
```
"""
def __init__(self,
model_dir=None,
generator_fn=None,
discriminator_fn=None,
loss_fn=None,
generator_optimizer=None,
discriminator_optimizer=None,
get_hooks_fn=None,
get_eval_metric_ops_fn=None,
add_summaries=None,
use_loss_summaries=True,
config=None):
"""Initializes a StarGANEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
generator_fn: A python function that takes a Tensor, Tensor list, or
Tensor dictionary as inputs and returns the outputs of the GAN
generator. See `TFGAN` for more details and examples. Additionally, if
it has an argument called `mode`, the Estimator's `mode` will be passed
in (ex TRAIN, EVAL, PREDICT). This is useful for things like batch
normalization.
discriminator_fn: A python function that takes the output of
`generator_fn` or real data in the GAN setup, and `input_data`. Outputs
a Tensor in the range [-inf, inf]. See `TFGAN` for more details and
examples.
loss_fn: The loss function on the generator. Takes a `StarGANModel`
namedtuple and return a `GANLoss` namedtuple.
generator_optimizer: The optimizer for generator updates, or a function
that takes no arguments and returns an optimizer. This function will be
called when the default graph is the `StarGANEstimator`'s graph, so
utilities like `tf.contrib.framework.get_or_create_global_step` will
work.
discriminator_optimizer: Same as `generator_optimizer`, but for the
discriminator updates.
get_hooks_fn: A function that takes a `GANTrainOps` tuple and returns a
list of hooks. These hooks are run on the generator and discriminator
train ops, and can be used to implement the GAN training scheme.
Defaults to `train.get_sequential_train_hooks()`.
get_eval_metric_ops_fn: A function that takes a `GANModel`, and returns a
dict of metric results keyed by name. The output of this function is
passed into `tf.estimator.EstimatorSpec` during evaluation.
add_summaries: `None`, a single `SummaryType`, or a list of `SummaryType`.
use_loss_summaries: If `True`, add loss summaries. If `False`, does not.
If `None`, uses defaults.
config: `RunConfig` object to configure the runtime settings.
Raises:
ValueError: If loss functions aren't callable.
ValueError: If `use_loss_summaries` isn't boolean or `None`.
ValueError: If `get_hooks_fn` isn't callable or `None`.
"""
if not callable(loss_fn):
raise ValueError('loss_fn must be callable.')
if use_loss_summaries not in [True, False, None]:
raise ValueError('use_loss_summaries must be True, False or None.')
if get_hooks_fn is not None and not callable(get_hooks_fn):
raise TypeError('get_hooks_fn must be callable.')
def _model_fn(features, labels, mode):
"""StarGANEstimator model function."""
if mode not in [
model_fn_lib.ModeKeys.TRAIN, model_fn_lib.ModeKeys.EVAL,
model_fn_lib.ModeKeys.PREDICT
]:
raise ValueError('Mode not recognized: %s' % mode)
if mode == model_fn_lib.ModeKeys.PREDICT:
input_data = features[0]
input_data_domain_label = features[1]
else:
input_data = features # rename inputs for clarity
input_data_domain_label = labels # rename inputs for clarity
# Make StarGANModel, which encapsulates the GAN model architectures.
gan_model = _get_gan_model(mode, generator_fn, discriminator_fn,
input_data, input_data_domain_label,
add_summaries)
# Make the EstimatorSpec, which incorporates the StarGANModel, losses,
# eval, metrics, and optimizers (if required).
return _get_estimator_spec(mode, gan_model, loss_fn,
get_eval_metric_ops_fn, generator_optimizer,
discriminator_optimizer, get_hooks_fn)
super(StarGANEstimator, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
def _get_gan_model(mode,
generator_fn,
discriminator_fn,
input_data,
input_data_domain_label,
add_summaries,
generator_scope='Generator'):
"""Makes the StarGANModel tuple."""
if mode == model_fn_lib.ModeKeys.PREDICT:
gan_model = _make_prediction_gan_model(input_data, input_data_domain_label,
generator_fn, generator_scope)
else: # model_fn_lib.ModeKeys.TRAIN or model_fn_lib.ModeKeys.EVAL
gan_model = _make_gan_model(generator_fn, discriminator_fn, input_data,
input_data_domain_label, generator_scope,
add_summaries, mode)
return gan_model
def _get_estimator_spec(mode,
gan_model,
loss_fn,
get_eval_metric_ops_fn,
generator_optimizer,
discriminator_optimizer,
get_hooks_fn=None):
"""Get the EstimatorSpec for the current mode."""
if mode == model_fn_lib.ModeKeys.PREDICT:
estimator_spec = model_fn_lib.EstimatorSpec(
mode=mode, predictions=gan_model.generated_data)
else:
gan_loss = loss_fn(gan_model)
if mode == model_fn_lib.ModeKeys.EVAL:
estimator_spec = _get_eval_estimator_spec(gan_model, gan_loss,
get_eval_metric_ops_fn)
else: # model_fn_lib.ModeKeys.TRAIN:
gopt = (
generator_optimizer()
if callable(generator_optimizer) else generator_optimizer)
dopt = (
discriminator_optimizer()
if callable(discriminator_optimizer) else discriminator_optimizer)
get_hooks_fn = get_hooks_fn or tfgan_train.get_sequential_train_hooks()
estimator_spec = _get_train_estimator_spec(gan_model, gan_loss, gopt,
dopt, get_hooks_fn)
return estimator_spec
def _make_gan_model(generator_fn, discriminator_fn, input_data,
input_data_domain_label, generator_scope, add_summaries,
mode):
"""Construct a `StarGANModel`, and optionally pass in `mode`."""
# If network functions have an argument `mode`, pass mode to it.
if 'mode' in inspect.getargspec(generator_fn).args:
generator_fn = functools.partial(generator_fn, mode=mode)
if 'mode' in inspect.getargspec(discriminator_fn).args:
discriminator_fn = functools.partial(discriminator_fn, mode=mode)
gan_model = tfgan_train.stargan_model(
generator_fn,
discriminator_fn,
input_data,
input_data_domain_label,
generator_scope=generator_scope)
if add_summaries:
if not isinstance(add_summaries, (tuple, list)):
add_summaries = [add_summaries]
with ops.name_scope(None):
for summary_type in add_summaries:
_summary_type_map[summary_type](gan_model)
return gan_model
def _make_prediction_gan_model(input_data, input_data_domain_label,
generator_fn, generator_scope):
"""Make a `StarGANModel` from just the generator."""
# If `generator_fn` has an argument `mode`, pass mode to it.
if 'mode' in inspect.getargspec(generator_fn).args:
generator_fn = functools.partial(
generator_fn, mode=model_fn_lib.ModeKeys.PREDICT)
with variable_scope.variable_scope(generator_scope) as gen_scope:
# pylint:disable=protected-access
input_data = tfgan_train._convert_tensor_or_l_or_d(input_data)
input_data_domain_label = tfgan_train._convert_tensor_or_l_or_d(
input_data_domain_label)
# pylint:enable=protected-access
generated_data = generator_fn(input_data, input_data_domain_label)
generator_variables = variable_lib.get_trainable_variables(gen_scope)
return tfgan_tuples.StarGANModel(
input_data=input_data,
input_data_domain_label=None,
generated_data=generated_data,
generated_data_domain_target=input_data_domain_label,
reconstructed_data=None,
discriminator_input_data_source_predication=None,
discriminator_generated_data_source_predication=None,
discriminator_input_data_domain_predication=None,
discriminator_generated_data_domain_predication=None,
generator_variables=generator_variables,
generator_scope=generator_scope,
generator_fn=generator_fn,
discriminator_variables=None,
discriminator_scope=None,
discriminator_fn=None)
def _get_eval_estimator_spec(gan_model,
gan_loss,
get_eval_metric_ops_fn=None,
name=None):
"""Return an EstimatorSpec for the eval case."""
scalar_loss = gan_loss.generator_loss + gan_loss.discriminator_loss
with ops.name_scope(None, 'metrics',
[gan_loss.generator_loss, gan_loss.discriminator_loss]):
def _summary_key(head_name, val):
return '%s/%s' % (val, head_name) if head_name else val
eval_metric_ops = {
_summary_key(name, 'generator_loss'):
metrics_lib.mean(gan_loss.generator_loss),
_summary_key(name, 'discriminator_loss'):
metrics_lib.mean(gan_loss.discriminator_loss)
}
if get_eval_metric_ops_fn is not None:
custom_eval_metric_ops = get_eval_metric_ops_fn(gan_model)
if not isinstance(custom_eval_metric_ops, dict):
raise TypeError('get_eval_metric_ops_fn must return a dict, '
'received: {}'.format(custom_eval_metric_ops))
eval_metric_ops.update(custom_eval_metric_ops)
return model_fn_lib.EstimatorSpec(
mode=model_fn_lib.ModeKeys.EVAL,
predictions=gan_model.generated_data,
loss=scalar_loss,
eval_metric_ops=eval_metric_ops)
def _get_train_estimator_spec(gan_model,
gan_loss,
generator_optimizer,
discriminator_optimizer,
get_hooks_fn,
train_op_fn=tfgan_train.gan_train_ops):
"""Return an EstimatorSpec for the train case."""
scalar_loss = gan_loss.generator_loss + gan_loss.discriminator_loss
train_ops = train_op_fn(gan_model, gan_loss, generator_optimizer,
discriminator_optimizer)
training_hooks = get_hooks_fn(train_ops)
return model_fn_lib.EstimatorSpec(
loss=scalar_loss,
mode=model_fn_lib.ModeKeys.TRAIN,
train_op=train_ops.global_step_inc_op,
training_hooks=training_hooks)
def stargan_prediction_input_fn_wrapper(fn):
"""StarGAN Estimator prediction input_fn wrapper.
Since estimator will disregard the "label" variable pass to the model, we will
use a wrapper to pack the (feature, label) tuple as feature passed to the
model.
Args:
fn: input_fn for the prediction.
Returns:
A tuple ((feature, label), None) where the second element is the dummy label
to be disregarded and the first element is the true input to the estimator.
"""
def new_fn():
return fn(), None
return new_fn
|
tensorflow-master
|
tensorflow/contrib/gan/python/estimator/python/stargan_estimator_impl.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`tf.Learn` components for `GANEstimator`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python.estimator.python import stargan_estimator_impl
# pylint: disable=wildcard-import
from tensorflow.contrib.gan.python.estimator.python.stargan_estimator_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
__all__ = stargan_estimator_impl.__all__
remove_undocumented(__name__, __all__)
|
tensorflow-master
|
tensorflow/contrib/gan/python/estimator/python/stargan_estimator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`tf.Learn` components for `TPUGANEstimator`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python.estimator.python import tpu_gan_estimator_impl
# pylint: disable=wildcard-import
from tensorflow.contrib.gan.python.estimator.python.tpu_gan_estimator_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
__all__ = tpu_gan_estimator_impl.__all__
remove_undocumented(__name__, __all__)
|
tensorflow-master
|
tensorflow/contrib/gan/python/estimator/python/tpu_gan_estimator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements an estimator wrapper that allows training the input latent space.
This file implements a latent gan estimator that wraps around a previously
trained GAN. The latent gan estimator trains a single variable z, representing
the hidden latent distribution that is the 'noise' input to the GAN. By training
z, the inpainting estimator can move around the latent z space towards
minimizing a specific loss function.
The latent gan estimator has a few key differences from a normal estimator.
First: the variables in the estimator should not be saved, as we are not
updating the original GAN and are only adding a new z variable that is meant
to be different for each run. In order to do distributed training using
train_and_evaluate, the Tensorflow RunConfig is expected to save checkpoints
by having either save_checkpoints_steps or save_checkpoints_secs saved.
To avoid this conflict, we purposely set the save_checkpoints_steps value in
the RunConfig to be one step more than the total number of steps that the
inpainter estimator will run.
Second: we need to specify warm start settings, as we are reloading the
GAN model into a different graph (specifically, one with a new z variable).
The warm start settings defined below reload all GAN variables and ignore the
new z variable (and the optimizer).
Usage:
def _generator(net, mode):
...
def _discriminator(net, condition, mode):
...
def _loss(gan_model, features, labels, add_summaries):
...
def optimizer():
...
params = {<required params>}
config = tf.estimator.RunConfig()
tmp_dir = path/to/output/storage
estimator = latent_gan_estimator.get_latent_gan_estimator(
_generator, _discriminator, _loss, optimizer, params, config, tmp_dir)
def input_fn():
...
estimator.train(input_fn=input_fn)
See latent_gan_estimator_test.py or tensorflow_models/gan/face_inpainting for
further examples.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.contrib.gan.python import train as tfgan_train
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.summary import summary
from tensorflow.python.training import training_util
INPUT_NAME = 'new_var_z_input' # The name for the new z space input variable.
OPTIMIZER_NAME = 'latent_gan_optimizer' # The name for the new optimizer vars.
__all__ = [
'get_latent_gan_estimator',
]
def _get_latent_gan_model_fn(generator_fn, discriminator_fn, loss_fn,
optimizer):
"""Sets up a model function that wraps around a given GAN."""
def model_fn(features, labels, mode, params):
"""Model function defining an inpainting estimator."""
batch_size = params['batch_size']
z_shape = [batch_size] + params['z_shape']
add_summaries = params['add_summaries']
input_clip = params['input_clip']
z = variable_scope.get_variable(
name=INPUT_NAME, initializer=random_ops.truncated_normal(z_shape),
constraint=lambda x: clip_ops.clip_by_value(x, -input_clip, input_clip))
generator = functools.partial(generator_fn, mode=mode)
discriminator = functools.partial(discriminator_fn, mode=mode)
gan_model = tfgan_train.gan_model(generator_fn=generator,
discriminator_fn=discriminator,
real_data=labels,
generator_inputs=z,
check_shapes=False)
loss = loss_fn(gan_model, features, labels, add_summaries)
# Use a variable scope to make sure that estimator variables dont cause
# save/load problems when restoring from ckpts.
with variable_scope.variable_scope(OPTIMIZER_NAME):
opt = optimizer(learning_rate=params['learning_rate'],
**params['opt_kwargs'])
train_op = opt.minimize(
loss=loss, global_step=training_util.get_or_create_global_step(),
var_list=[z])
if add_summaries:
z_grads = gradients_impl.gradients(loss, z)
summary.scalar('z_loss/z_grads', clip_ops.global_norm(z_grads))
summary.scalar('z_loss/loss', loss)
return model_fn_lib.EstimatorSpec(mode=mode,
predictions=gan_model.generated_data,
loss=loss,
train_op=train_op)
return model_fn
def get_latent_gan_estimator(generator_fn, discriminator_fn, loss_fn,
optimizer, params, config, ckpt_dir,
warmstart_options=True):
"""Gets an estimator that passes gradients to the input.
This function takes in a generator and adds a trainable z variable that is
used as input to this generator_fn. The generator itself is treated as a black
box through which gradients can pass through without updating any weights. The
result is a trainable way to traverse the GAN latent space. The loss_fn is
used to actually train the z variable. The generator_fn and discriminator_fn
should be previously trained by the tfgan library (on reload, the variables
are expected to follow the tfgan format. It may be possible to use the
latent gan estimator with entirely custom GANs that do not use the tfgan
library as long as the appropriate variables are wired properly).
Args:
generator_fn: a function defining a Tensorflow graph for a GAN generator.
The weights defined in this graph should already be defined in the given
checkpoint location. Should have 'mode' as an argument.
discriminator_fn: a function defining a Tensorflow graph for a GAN
discriminator. Should have 'mode' as an argument.
loss_fn: a function defining a Tensorflow graph for a GAN loss. Takes in a
GANModel tuple, features, labels, and add_summaries as inputs.
optimizer: a tf.Optimizer or a function that returns a tf.Optimizer with no
inputs.
params: An object containing the following parameters:
- batch_size: an int indicating the size of the training batch.
- z_shape: the desired shape of the input z values (not counting batch).
- learning_rate: a scalar or function defining a learning rate applied to
optimizer.
- input_clip: the amount to clip the x training variable by.
- add_summaries: whether or not to add summaries.
- opt_kwargs: optimizer kwargs.
config: tf.RunConfig. Should point model to output dir and should indicate
whether to save checkpoints (to avoid saving checkpoints, set
save_checkpoints_steps to a number larger than the number of train steps).
The model_dir field in the RunConfig should point to a directory WITHOUT
any saved checkpoints.
ckpt_dir: the directory where the model checkpoints live. The checkpoint is
used to warm start the underlying GAN. This should NOT be the same as
config.model_dir.
warmstart_options: boolean, None, or a WarmStartSettings object. If set to
True, uses a default WarmStartSettings object. If set to False or None,
does not use warm start. If using a custom WarmStartSettings object, make
sure that new variables are properly accounted for when reloading the
underlying GAN. Defaults to True.
Returns:
An estimator spec defining a GAN input training estimator.
"""
model_fn = _get_latent_gan_model_fn(generator_fn, discriminator_fn,
loss_fn, optimizer)
if isinstance(warmstart_options, estimator.WarmStartSettings):
ws = warmstart_options
elif warmstart_options:
# Default WarmStart loads all variable names except INPUT_NAME and
# OPTIMIZER_NAME.
var_regex = '^(?!.*(%s|%s).*)' % (INPUT_NAME, OPTIMIZER_NAME)
ws = estimator.WarmStartSettings(ckpt_to_initialize_from=ckpt_dir,
vars_to_warm_start=var_regex)
else:
ws = None
if 'opt_kwargs' not in params:
params['opt_kwargs'] = {}
return estimator.Estimator(model_fn=model_fn, config=config, params=params,
warm_start_from=ws)
|
tensorflow-master
|
tensorflow/contrib/gan/python/estimator/python/latent_gan_estimator_impl.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-GAN's estimator.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib import layers
from tensorflow.contrib.gan.python import namedtuples as tfgan_tuples
from tensorflow.contrib.gan.python.estimator.python import gan_estimator_impl as estimator
from tensorflow.contrib.gan.python.losses.python import tuple_losses as losses
from tensorflow.contrib.learn.python.learn.learn_io import graph_io
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.estimator import WarmStartSettings
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework.errors_impl import NotFoundError
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training
from tensorflow.python.training import training_util
def generator_fn(noise_dict, mode):
del mode
noise = noise_dict['x']
return layers.fully_connected(noise, tensor_shape.dimension_value(
noise.shape[1]))
def discriminator_fn(data, unused_conditioning, mode):
del unused_conditioning, mode
return layers.fully_connected(data, 1)
class GetGANModelTest(test.TestCase, parameterized.TestCase):
"""Tests that `GetGANModel` produces the correct model."""
@parameterized.named_parameters(
('train', model_fn_lib.ModeKeys.TRAIN),
('eval', model_fn_lib.ModeKeys.EVAL),
('predict', model_fn_lib.ModeKeys.PREDICT))
def test_get_gan_model(self, mode):
with ops.Graph().as_default():
generator_inputs = {'x': array_ops.ones([3, 4])}
is_predict = mode == model_fn_lib.ModeKeys.PREDICT
real_data = array_ops.zeros([3, 4]) if not is_predict else None
gan_model = estimator._get_gan_model(
mode, generator_fn, discriminator_fn, real_data, generator_inputs,
add_summaries=False)
self.assertEqual(generator_inputs, gan_model.generator_inputs)
self.assertIsNotNone(gan_model.generated_data)
self.assertLen(gan_model.generator_variables, 2) # 1 FC layer
self.assertIsNotNone(gan_model.generator_fn)
if mode == model_fn_lib.ModeKeys.PREDICT:
self.assertIsNone(gan_model.real_data)
self.assertIsNone(gan_model.discriminator_real_outputs)
self.assertIsNone(gan_model.discriminator_gen_outputs)
self.assertIsNone(gan_model.discriminator_variables)
self.assertIsNone(gan_model.discriminator_scope)
self.assertIsNone(gan_model.discriminator_fn)
else:
self.assertIsNotNone(gan_model.real_data)
self.assertIsNotNone(gan_model.discriminator_real_outputs)
self.assertIsNotNone(gan_model.discriminator_gen_outputs)
self.assertLen(gan_model.discriminator_variables, 2) # 1 FC layer
self.assertIsNotNone(gan_model.discriminator_scope)
self.assertIsNotNone(gan_model.discriminator_fn)
def get_dummy_gan_model():
# TODO(joelshor): Find a better way of creating a variable scope.
with variable_scope.variable_scope('generator') as gen_scope:
gen_var = variable_scope.get_variable('dummy_var', initializer=0.0)
with variable_scope.variable_scope('discriminator') as dis_scope:
dis_var = variable_scope.get_variable('dummy_var', initializer=0.0)
return tfgan_tuples.GANModel(
generator_inputs=None,
generated_data=array_ops.ones([3, 4]),
generator_variables=[gen_var],
generator_scope=gen_scope,
generator_fn=None,
real_data=array_ops.zeros([3, 4]),
discriminator_real_outputs=array_ops.ones([1, 2, 3]) * dis_var,
discriminator_gen_outputs=array_ops.ones([1, 2, 3]) * gen_var * dis_var,
discriminator_variables=[dis_var],
discriminator_scope=dis_scope,
discriminator_fn=None)
def dummy_loss_fn(gan_model, add_summaries=True):
del add_summaries
return math_ops.reduce_sum(gan_model.discriminator_real_outputs -
gan_model.discriminator_gen_outputs)
def get_metrics(gan_model):
return {
'mse_custom_metric': metrics_lib.mean_squared_error(
gan_model.real_data, gan_model.generated_data)
}
class GetEstimatorSpecTest(test.TestCase, parameterized.TestCase):
"""Tests that the EstimatorSpec is constructed appropriately."""
@classmethod
def setUpClass(cls):
super(GetEstimatorSpecTest, cls).setUpClass()
cls._generator_optimizer = training.GradientDescentOptimizer(1.0)
cls._discriminator_optimizer = training.GradientDescentOptimizer(1.0)
@parameterized.named_parameters(
('train', model_fn_lib.ModeKeys.TRAIN),
('eval', model_fn_lib.ModeKeys.EVAL),
('predict', model_fn_lib.ModeKeys.PREDICT))
def test_get_estimator_spec(self, mode):
with ops.Graph().as_default():
self._gan_model = get_dummy_gan_model()
spec = estimator._get_estimator_spec(
mode,
self._gan_model,
generator_loss_fn=dummy_loss_fn,
discriminator_loss_fn=dummy_loss_fn,
get_eval_metric_ops_fn=get_metrics,
generator_optimizer=self._generator_optimizer,
discriminator_optimizer=self._discriminator_optimizer)
self.assertEqual(mode, spec.mode)
if mode == model_fn_lib.ModeKeys.PREDICT:
self.assertEqual(self._gan_model.generated_data, spec.predictions)
elif mode == model_fn_lib.ModeKeys.TRAIN:
self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar
self.assertIsNotNone(spec.train_op)
self.assertIsNotNone(spec.training_hooks)
elif mode == model_fn_lib.ModeKeys.EVAL:
self.assertEqual(self._gan_model.generated_data, spec.predictions)
self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar
self.assertIsNotNone(spec.eval_metric_ops)
def test_get_sync_estimator_spec(self):
"""Make sure spec is loaded with sync hooks for sync opts."""
def get_sync_optimizer():
return sync_replicas_optimizer.SyncReplicasOptimizer(
training.GradientDescentOptimizer(learning_rate=1.0),
replicas_to_aggregate=1)
with ops.Graph().as_default():
self._gan_model = get_dummy_gan_model()
g_opt = get_sync_optimizer()
d_opt = get_sync_optimizer()
spec = estimator._get_estimator_spec(
model_fn_lib.ModeKeys.TRAIN,
self._gan_model,
generator_loss_fn=dummy_loss_fn,
discriminator_loss_fn=dummy_loss_fn,
get_eval_metric_ops_fn=get_metrics,
generator_optimizer=g_opt,
discriminator_optimizer=d_opt)
self.assertLen(spec.training_hooks, 4)
sync_opts = [
hook._sync_optimizer for hook in spec.training_hooks if
isinstance(hook, sync_replicas_optimizer._SyncReplicasOptimizerHook)]
self.assertLen(sync_opts, 2)
self.assertSetEqual(frozenset(sync_opts), frozenset((g_opt, d_opt)))
class GANEstimatorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, prediction_size,
lr_decay=False):
def make_opt():
gstep = training_util.get_or_create_global_step()
lr = learning_rate_decay.exponential_decay(1.0, gstep, 10, 0.9)
return training.GradientDescentOptimizer(lr)
gopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
dopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
est = estimator.GANEstimator(
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_loss_fn=losses.wasserstein_generator_loss,
discriminator_loss_fn=losses.wasserstein_discriminator_loss,
generator_optimizer=gopt,
discriminator_optimizer=dopt,
get_eval_metric_ops_fn=get_metrics,
model_dir=self._model_dir)
# Train.
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# Evaluate.
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', scores)
self.assertEqual(scores['discriminator_loss'] + scores['generator_loss'],
scores['loss'])
self.assertIn('mse_custom_metric', scores)
# Predict.
predictions = np.array([x for x in est.predict(predict_input_fn)])
self.assertAllEqual(prediction_size, predictions.shape)
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
input_dim = 4
batch_size = 5
data = np.zeros([batch_size, input_dim])
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
prediction_size=[batch_size, input_dim])
def test_numpy_input_fn_lrdecay(self):
"""Tests complete flow with numpy_input_fn."""
input_dim = 4
batch_size = 5
data = np.zeros([batch_size, input_dim])
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
prediction_size=[batch_size, input_dim],
lr_decay=True)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dim = 4
batch_size = 6
data = np.zeros([batch_size, input_dim])
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dim], dtypes.float32),
'y': parsing_ops.FixedLenFeature([input_dim], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(
serialized_examples, feature_spec)
_, features = graph_io.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
_, features = graph_io.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
_, features = graph_io.queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
prediction_size=[batch_size, input_dim])
class GANEstimatorWarmStartTest(test.TestCase):
def setUp(self):
self._model_dir = self.get_temp_dir()
self.new_variable_name = 'new_var'
self.new_variable_value = [1, 2, 3]
def tearDown(self):
writer_cache.FileWriterCache.clear()
def _test_warm_start(self, warm_start_from=None):
"""Tests whether WarmStartSettings work as intended."""
def generator_with_new_variable(noise_dict, mode):
variable_scope.get_variable(name=self.new_variable_name,
initializer=self.new_variable_value,
trainable=True)
return generator_fn(noise_dict, mode)
def train_input_fn():
data = np.zeros([3, 4])
return {'x': data}, data
est = estimator.GANEstimator(
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_loss_fn=losses.wasserstein_generator_loss,
discriminator_loss_fn=losses.wasserstein_discriminator_loss,
generator_optimizer=training.GradientDescentOptimizer(1.0),
discriminator_optimizer=training.GradientDescentOptimizer(1.0),
model_dir=self._model_dir)
est.train(train_input_fn, steps=1)
est_warm = estimator.GANEstimator(
generator_fn=generator_with_new_variable,
discriminator_fn=discriminator_fn,
generator_loss_fn=losses.wasserstein_generator_loss,
discriminator_loss_fn=losses.wasserstein_discriminator_loss,
generator_optimizer=training.GradientDescentOptimizer(1.0),
discriminator_optimizer=training.GradientDescentOptimizer(1.0),
model_dir=None if warm_start_from else self._model_dir,
warm_start_from=warm_start_from)
est_warm.train(train_input_fn, steps=1)
return est_warm
def test_warm_start_error(self):
"""Test if exception when reloading different estimators."""
with self.assertRaises(NotFoundError):
self._test_warm_start()
def test_warm_start_success(self):
"""Test if GANEstimator allows explicit warm start variable assignment."""
# Regex matches all variable names in ckpt except for new_var.
var_regex = '^(?!.*%s.*)' % self.new_variable_name
warmstart = WarmStartSettings(ckpt_to_initialize_from=self._model_dir,
vars_to_warm_start=var_regex)
est_warm = self._test_warm_start(warm_start_from=warmstart)
full_variable_name = 'Generator/%s' % self.new_variable_name
self.assertIn(full_variable_name, est_warm.get_variable_names())
equal_vals = np.array_equal(est_warm.get_variable_value(full_variable_name),
self.new_variable_value)
self.assertTrue(equal_vals)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/gan/python/estimator/python/gan_estimator_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A TF-GAN-backed GAN Estimator that works on TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python import namedtuples as tfgan_tuples
from tensorflow.contrib.gan.python import train as tfgan_train
from tensorflow.contrib.gan.python.estimator.python import gan_estimator_impl as gan_estimator_lib
from tensorflow.contrib.tpu.python.tpu import tpu_estimator
from tensorflow.contrib.tpu.python.tpu import tpu_optimizer
from tensorflow.contrib.training.python.training import training
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops.losses import losses
__all__ = [
'TPUGANEstimator',
]
class TPUGANEstimator(tpu_estimator.TPUEstimator):
"""An estimator for Generative Adversarial Networks (GANs) on TPU.
This Estimator is backed by TFGAN. It is similar to `tfgan.GANEstimator`,
but works on TPU.
Example:
```python
import tensorflow as tf
tfgan = tf.contrib.gan
# See TFGAN's `train.py` for a description of the generator and
# discriminator API.
def generator_fn(generator_inputs):
...
return generated_data
def discriminator_fn(data, conditioning):
...
return logits
# Create GAN estimator.
config = tpu_config.RunConfig(model_dir='/my/dir')
gan_estimator = tfgan.estimator.TPUGANEstimator(
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_loss_fn=tfgan.losses.wasserstein_generator_loss,
discriminator_loss_fn=tfgan.losses.wasserstein_discriminator_loss,
generator_optimizer=tf.compat.v1.train.AdamOptimizer(0.1, 0.5),
discriminator_optimizer=tf.compat.v1.train.AdamOptimizer(0.1, 0.5),
train_batch_size=4,
config=config)
# Train estimator.
gan_estimator.train(train_input_fn, train_steps)
# Evaluate resulting estimator.
gan_estimator.evaluate(eval_input_fn, eval_steps)
# Generate samples from generator.
predictions = np.array([
x['generated_data'] for x in gan_estimator.predict(predict_input_fn)])
```
"""
def __init__(self,
# Arguments to construct the `model_fn`.
generator_fn=None,
discriminator_fn=None,
generator_loss_fn=None,
discriminator_loss_fn=None,
generator_optimizer=None,
discriminator_optimizer=None,
get_eval_metric_ops_fn=None,
add_summaries=None,
joint_train=False,
gan_train_steps=tfgan_tuples.GANTrainSteps(1, 1),
# TPUEstimator options.
model_dir=None,
config=None,
params=None,
use_tpu=True,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None,
batch_axis=None,
eval_on_tpu=True,
export_to_tpu=True,
warm_start_from=None):
"""Initializes a TPUGANEstimator instance.
Args:
generator_fn: A python function that takes a Tensor, Tensor list, or
Tensor dictionary as inputs and returns the outputs of the GAN
generator. See `TFGAN` for more details and examples. Additionally, if
it has an argument called `mode`, the Estimator's `mode` will be passed
in (ex TRAIN, EVAL, PREDICT). This is useful for things like batch
normalization.
discriminator_fn: A python function that takes the output of
`generator_fn` or real data in the GAN setup, and `generator_inputs`.
Outputs a Tensor in the range [-inf, inf]. See `TFGAN` for more details
and examples.
generator_loss_fn: The loss function on the generator. Takes a `GANModel`
tuple.
discriminator_loss_fn: The loss function on the discriminator. Takes a
`GANModel` tuple.
generator_optimizer: The optimizer for generator updates, or a function
that takes no arguments and returns an optimizer. This function will
be called when the default graph is the `GANEstimator`'s graph, so
utilities like `tf.contrib.framework.get_or_create_global_step` will
work.
discriminator_optimizer: Same as `generator_optimizer`, but for the
discriminator updates.
get_eval_metric_ops_fn: A function that takes a list of arguments and
returns a dict of metric results keyed by name. The output of this
function is passed into `tf.estimator.EstimatorSpec` during evaluation.
The arguments must be:
* generator_inputs
* generated_data
* real_data
* discriminator_real_outputs
* discriminator_gen_outputs
add_summaries: `None`, a single `SummaryType`, or a list of `SummaryType`.
This is ignored for jobs that run on TPU, such as the train job if
`use_tpu` is `True` or the eval job if `eval_on_tpu` is `True`.
joint_train: A Python boolean. If `True`, jointly train the generator and
the discriminator. If `False`, sequentially train them. See `train.py`
in TFGAN for more details on the differences between the two GAN
training methods.
gan_train_steps: A `tfgan.GANTrainSteps` named tuple describing the ratio
of generator to discriminator steps. For now, only supports 1:1
training.
model_dir: Same as `TPUEstimator`: Directory to save model parameters,
graph and etc. This can also be used to load checkpoints from the
directory into a estimator to continue training a previously saved
model. If `None`, the model_dir in `config` will be used if set. If both
are set, they must be same. If both are `None`, a temporary directory
will be used.
config: Same as `TPUEstimator`: An `tpu_config.RunConfig` configuration
object. Cannot be `None`.
params: Same as `TPUEstimator`: An optional `dict` of hyper parameters
that will be passed into `input_fn` and `model_fn`. Keys are names of
parameters, values are basic python types. There are reserved keys for
`TPUEstimator`, including 'batch_size'.
use_tpu: Same as `TPUEstimator`: A bool indicating whether TPU support is
enabled. Currently, TPU training and evaluation respect this bit, but
eval_on_tpu can override execution of eval. See below. Predict still
happens on CPU.
train_batch_size: Same as `TPUEstimator`: An int representing the global
training batch size. TPUEstimator transforms this global batch size to a
per-shard batch size, as params['batch_size'], when calling `input_fn`
and `model_fn`. Cannot be `None` if `use_tpu` is `True`. Must be
divisible by total number of replicas.
eval_batch_size: Same as `TPUEstimator`: An int representing evaluation
batch size. Must be divisible by total number of replicas.
predict_batch_size: Same as `TPUEstimator`: An int representing the
prediction batch size. Must be divisible by total number of replicas.
batch_axis: Same as `TPUEstimator`: A python tuple of int values
describing how each tensor produced by the Estimator `input_fn` should
be split across the TPU compute shards. For example, if your input_fn
produced (images, labels) where the images tensor is in `HWCN` format,
your shard dimensions would be [3, 0], where 3 corresponds to the `N`
dimension of your images Tensor, and 0 corresponds to the dimension
along which to split the labels to match up with the corresponding
images. If None is supplied, and per_host_input_for_training is True,
batches will be sharded based on the major dimension. If
tpu_config.per_host_input_for_training is False or `PER_HOST_V2`,
batch_axis is ignored.
eval_on_tpu: Same as `TPUEstimator`: If False, evaluation runs on CPU or
GPU. In this case, the model_fn must return `EstimatorSpec` when called
with `mode` as `EVAL`.
export_to_tpu: Same as `TPUEstimator`: If True, `export_savedmodel()`
exports a metagraph for serving on TPU besides the one on CPU.
warm_start_from: Same as `TPUEstimator`: Optional string filepath to a
checkpoint or SavedModel to warm-start from, or a
`tf.estimator.WarmStartSettings` object to fully configure
warm-starting. If the string filepath is provided instead of a
`WarmStartSettings`, then all variables are warm-started, and it is
assumed that vocabularies and Tensor names are unchanged.
Raises:
ValueError: If loss functions aren't callable.
ValueError: If `gan_train_steps` isn't a `tfgan_tuples.GANTrainSteps`
tuple.
ValueError: If `gan_train_steps` isn't 1:1 training.
"""
if not callable(generator_loss_fn):
raise ValueError('generator_loss_fn must be callable.')
if not callable(discriminator_loss_fn):
raise ValueError('discriminator_loss_fn must be callable.')
if not isinstance(gan_train_steps, tfgan_tuples.GANTrainSteps):
raise ValueError(
'`gan_train_steps` must be `tfgan_tuples.GANTrainSteps`. Instead, '
'was type: %s' % type(gan_train_steps))
if (gan_train_steps.generator_train_steps != 1 or
gan_train_steps.discriminator_train_steps != 1):
raise ValueError('Estimator currently only supports 1:1 training.')
if use_tpu:
generator_optimizer = _maybe_make_cross_shard_optimizer(
generator_optimizer)
discriminator_optimizer = _maybe_make_cross_shard_optimizer(
discriminator_optimizer)
def _model_fn(features, labels, mode, params):
"""GANEstimator model function."""
del params # unused
if mode not in [model_fn_lib.ModeKeys.TRAIN, model_fn_lib.ModeKeys.EVAL,
model_fn_lib.ModeKeys.PREDICT]:
raise ValueError('Mode not recognized: %s' % mode)
real_data = labels # rename inputs for clarity
generator_inputs = features # rename inputs for clarity
# Make GANModel, which encapsulates the GAN model architectures.
# TODO(joelshor): Switch TF-GAN over to TPU-compatible summaries, then
# remove `add_summaries` logic below.
is_on_tpu = _is_on_tpu(mode, use_tpu, eval_on_tpu)
gan_model = gan_estimator_lib._get_gan_model( # pylint:disable=protected-access
mode, generator_fn, discriminator_fn, real_data, generator_inputs,
add_summaries=None if is_on_tpu else add_summaries)
# Make the TPUEstimatorSpec, which incorporates the GANModel, losses, eval
# metrics, and optimizers (if required).
estimator_spec = _get_estimator_spec(
mode, gan_model, generator_loss_fn, discriminator_loss_fn,
get_eval_metric_ops_fn, generator_optimizer, discriminator_optimizer,
joint_train, is_on_tpu, gan_train_steps)
assert isinstance(estimator_spec, tpu_estimator.TPUEstimatorSpec)
return estimator_spec
super(TPUGANEstimator, self).__init__(
model_fn=_model_fn,
model_dir=model_dir,
config=config,
params=params,
use_tpu=use_tpu,
train_batch_size=train_batch_size,
eval_batch_size=eval_batch_size,
predict_batch_size=predict_batch_size,
batch_axis=batch_axis,
eval_on_tpu=eval_on_tpu,
export_to_tpu=export_to_tpu,
warm_start_from=warm_start_from)
def _is_on_tpu(mode, use_tpu, eval_on_tpu):
if mode == model_fn_lib.ModeKeys.TRAIN:
return use_tpu
elif mode == model_fn_lib.ModeKeys.EVAL:
return eval_on_tpu
else:
return False
def _get_estimator_spec(
mode, gan_model, generator_loss_fn, discriminator_loss_fn,
get_eval_metric_ops_fn, generator_optimizer, discriminator_optimizer,
joint_train, is_on_tpu, gan_train_steps):
"""Get the TPUEstimatorSpec for the current mode."""
if mode == model_fn_lib.ModeKeys.PREDICT:
estimator_spec = tpu_estimator.TPUEstimatorSpec(
mode=mode, predictions={'generated_data': gan_model.generated_data})
elif mode == model_fn_lib.ModeKeys.EVAL:
gan_loss = tfgan_tuples.GANLoss(
generator_loss=generator_loss_fn(
gan_model, add_summaries=not is_on_tpu),
discriminator_loss=discriminator_loss_fn(
gan_model, add_summaries=not is_on_tpu))
# Eval losses for metrics must preserve batch dimension.
gan_loss_no_reduction = tfgan_tuples.GANLoss(
generator_loss=generator_loss_fn(
gan_model, add_summaries=False, reduction=losses.Reduction.NONE),
discriminator_loss=discriminator_loss_fn(
gan_model, add_summaries=False, reduction=losses.Reduction.NONE))
estimator_spec = _get_eval_estimator_spec(
gan_model, gan_loss, gan_loss_no_reduction, get_eval_metric_ops_fn)
else: # model_fn_lib.ModeKeys.TRAIN:
gan_loss = tfgan_tuples.GANLoss(
generator_loss=generator_loss_fn(
gan_model, add_summaries=not is_on_tpu),
discriminator_loss=discriminator_loss_fn(
gan_model, add_summaries=not is_on_tpu))
# Construct optimizers if arguments were callable. For TPUs, they must be
# `CrossShardOptimizer`.
g_callable = callable(generator_optimizer)
gopt = generator_optimizer() if g_callable else generator_optimizer
d_callable = callable(discriminator_optimizer)
dopt = discriminator_optimizer() if d_callable else discriminator_optimizer
estimator_spec = _get_train_estimator_spec(
gan_model, gan_loss, gopt, dopt, joint_train, gan_train_steps)
return estimator_spec
def _get_eval_estimator_spec(gan_model, gan_loss, gan_loss_no_reduction,
get_eval_metric_ops_fn):
"""Return an TPUEstimatorSpec for the eval case."""
# Make the metric function and tensor names.
if get_eval_metric_ops_fn is not None:
def metric_fn(
generator_inputs, generated_data, real_data, discriminator_real_outputs,
discriminator_gen_outputs, generator_loss, discriminator_loss):
"""`metric_fn` used in TPUEstimator to calculate metrics."""
eval_metric_ops = {
'generator_loss': metrics_lib.mean(generator_loss),
'discriminator_loss': metrics_lib.mean(discriminator_loss),
}
custom_eval_metric_ops = get_eval_metric_ops_fn(
generator_inputs, generated_data, real_data,
discriminator_real_outputs, discriminator_gen_outputs)
if not isinstance(custom_eval_metric_ops, dict):
raise TypeError('`get_eval_metric_ops_fn` must return a dict, '
'received: {}'.format(custom_eval_metric_ops))
eval_metric_ops.update(custom_eval_metric_ops)
return eval_metric_ops
tensors = {
'generator_loss': gan_loss_no_reduction.generator_loss,
'discriminator_loss': gan_loss_no_reduction.discriminator_loss,
'generator_inputs': gan_model.generator_inputs,
'generated_data': gan_model.generated_data,
'real_data': gan_model.real_data,
'discriminator_real_outputs': gan_model.discriminator_real_outputs,
'discriminator_gen_outputs': gan_model.discriminator_gen_outputs,
}
else:
def metric_fn(generator_loss, discriminator_loss):
return {
'generator_loss': metrics_lib.mean(generator_loss),
'discriminator_loss': metrics_lib.mean(discriminator_loss),
}
tensors = {
'generator_loss': gan_loss_no_reduction.generator_loss,
'discriminator_loss': gan_loss_no_reduction.discriminator_loss,
}
scalar_loss = gan_loss.generator_loss + gan_loss.discriminator_loss
return tpu_estimator.TPUEstimatorSpec(
mode=model_fn_lib.ModeKeys.EVAL,
predictions=gan_model.generated_data,
loss=scalar_loss,
eval_metrics=(metric_fn, tensors))
def _get_train_estimator_spec(
gan_model, gan_loss, generator_optimizer, discriminator_optimizer,
joint_train, gan_train_steps):
"""Return a TPUEstimatorSpec for the train case."""
scalar_loss = gan_loss.generator_loss + gan_loss.discriminator_loss
# Get generator and discriminator update ops. We split them so that update
# ops aren't accidentally run multiple times. For now, throw an error if
# there are update ops that aren't associated with either the generator or
# the discriminator. Might modify the `kwargs` dictionary.
gen_update_ops, dis_update_ops = tfgan_train._get_update_ops( # pylint:disable=protected-access
{}, gan_model.generator_scope.name, gan_model.discriminator_scope.name)
def gen_train_op():
with ops.name_scope('generator_train'):
return training.create_train_op(
total_loss=gan_loss.generator_loss,
optimizer=generator_optimizer,
variables_to_train=gan_model.generator_variables,
update_ops=gen_update_ops)
def dis_train_op():
with ops.name_scope('discriminator_train'):
return training.create_train_op(
total_loss=gan_loss.discriminator_loss,
optimizer=discriminator_optimizer,
variables_to_train=gan_model.discriminator_variables,
update_ops=dis_update_ops)
# Either optimize the generator and discriminator sequentially or jointly.
tpu_train_op = _combine_train_ops(gen_train_op, dis_train_op, joint_train,
gan_train_steps)
return tpu_estimator.TPUEstimatorSpec(
loss=scalar_loss,
mode=model_fn_lib.ModeKeys.TRAIN,
train_op=tpu_train_op)
# TODO(joelshor): Add support for multiple D / G steps.
def _combine_train_ops(gen_train_op, dis_train_op, joint_train,
gan_train_steps):
"""Combine generator and discriminator train ops into a single op."""
del gan_train_steps
if joint_train:
tpu_train_op = control_flow_ops.group(gen_train_op(), dis_train_op(),
name='joint_train')
else:
with ops.control_dependencies([dis_train_op()]):
tpu_train_op = gen_train_op()
return tpu_train_op
def _maybe_make_cross_shard_optimizer(opt):
if callable(opt):
if not isinstance(opt(), tpu_optimizer.CrossShardOptimizer):
return lambda: tpu_optimizer.CrossShardOptimizer(opt())
elif not isinstance(opt, tpu_optimizer.CrossShardOptimizer):
return tpu_optimizer.CrossShardOptimizer(opt)
return opt
|
tensorflow-master
|
tensorflow/contrib/gan/python/estimator/python/tpu_gan_estimator_impl.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`tf.Learn` components for `GANEstimator`'s loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python.estimator.python import head_impl
# pylint: disable=wildcard-import
from tensorflow.contrib.gan.python.estimator.python.head_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
__all__ = head_impl.__all__
remove_undocumented(__name__, __all__)
|
tensorflow-master
|
tensorflow/contrib/gan/python/estimator/python/head.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-GAN's head.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python import namedtuples as tfgan_tuples
from tensorflow.contrib.gan.python.estimator.python import head
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import training
_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
def dummy_loss(gan_model, add_summaries=True): # pylint:disable=unused-argument
return math_ops.reduce_sum(gan_model.discriminator_real_outputs -
gan_model.discriminator_gen_outputs)
def get_gan_model():
# TODO(joelshor): Find a better way of creating a variable scope.
with variable_scope.variable_scope('generator') as gen_scope:
gen_var = variable_scope.get_variable('dummy_var', initializer=0.0)
with variable_scope.variable_scope('discriminator') as dis_scope:
dis_var = variable_scope.get_variable('dummy_var', initializer=0.0)
return tfgan_tuples.GANModel(
generator_inputs=None,
generated_data=array_ops.ones([3, 4]),
generator_variables=[gen_var],
generator_scope=gen_scope,
generator_fn=None,
real_data=None,
discriminator_real_outputs=array_ops.ones([1, 2, 3]) * dis_var,
discriminator_gen_outputs=array_ops.ones([1, 2, 3]) * gen_var * dis_var,
discriminator_variables=[dis_var],
discriminator_scope=dis_scope,
discriminator_fn=None)
class GANHeadTest(test.TestCase):
def setUp(self):
super(GANHeadTest, self).setUp()
self.gan_head = head.gan_head(
generator_loss_fn=dummy_loss,
discriminator_loss_fn=dummy_loss,
generator_optimizer=training.GradientDescentOptimizer(1.0),
discriminator_optimizer=training.GradientDescentOptimizer(1.0),
get_eval_metric_ops_fn=self.get_metrics)
self.assertIsInstance(self.gan_head, head.GANHead)
def get_metrics(self, gan_model):
self.assertTrue(isinstance(gan_model, tfgan_tuples.GANModel))
return {}
def _test_modes_helper(self, mode):
return self.gan_head.create_estimator_spec(
features=None,
mode=mode,
logits=get_gan_model())
def test_modes_predict(self):
spec = self._test_modes_helper(model_fn_lib.ModeKeys.PREDICT)
self.assertItemsEqual((_DEFAULT_SERVING_KEY, 'predict'),
spec.export_outputs.keys())
def test_modes_eval(self):
self._test_modes_helper(model_fn_lib.ModeKeys.EVAL)
def test_modes_train(self):
self._test_modes_helper(model_fn_lib.ModeKeys.TRAIN)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/gan/python/estimator/python/head_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`tf.Learn` components for `Train Input Estimator`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python.estimator.python import latent_gan_estimator_impl
# pylint: disable=wildcard-import
from tensorflow.contrib.gan.python.estimator.python.latent_gan_estimator_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
__all__ = latent_gan_estimator_impl.__all__
remove_undocumented(__name__, __all__)
|
tensorflow-master
|
tensorflow/contrib/gan/python/estimator/python/latent_gan_estimator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`tf.Learn` components for `GANEstimator`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python.estimator.python import gan_estimator_impl
# pylint: disable=wildcard-import
from tensorflow.contrib.gan.python.estimator.python.gan_estimator_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
__all__ = gan_estimator_impl.__all__
remove_undocumented(__name__, __all__)
|
tensorflow-master
|
tensorflow/contrib/gan/python/estimator/python/gan_estimator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-GAN's stargan_estimator.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib import layers
from tensorflow.contrib.gan.python import namedtuples as tfgan_tuples
from tensorflow.contrib.gan.python.estimator.python import stargan_estimator_impl as estimator
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import training
from tensorflow.python.training import training_util
def dummy_generator_fn(input_data, input_data_domain_label, mode):
del input_data_domain_label, mode
return variable_scope.get_variable('dummy_g', initializer=0.5) * input_data
def dummy_discriminator_fn(input_data, num_domains, mode):
del mode
hidden = layers.flatten(input_data)
output_src = math_ops.reduce_mean(hidden, axis=1)
output_cls = layers.fully_connected(
inputs=hidden, num_outputs=num_domains, scope='debug')
return output_src, output_cls
class StarGetGANModelTest(test.TestCase, parameterized.TestCase):
"""Tests that `StarGetGANModel` produces the correct model."""
@parameterized.named_parameters(('train', model_fn_lib.ModeKeys.TRAIN),
('eval', model_fn_lib.ModeKeys.EVAL),
('predict', model_fn_lib.ModeKeys.PREDICT))
def test_get_gan_model(self, mode):
with ops.Graph().as_default():
input_data = array_ops.ones([6, 4, 4, 3])
input_data_domain_label = array_ops.one_hot([0] * 6, 5)
gan_model = estimator._get_gan_model(
mode,
dummy_generator_fn,
dummy_discriminator_fn,
input_data,
input_data_domain_label,
add_summaries=False)
self.assertEqual(input_data, gan_model.input_data)
self.assertIsNotNone(gan_model.generated_data)
self.assertIsNotNone(gan_model.generated_data_domain_target)
self.assertLen(gan_model.generator_variables, 1)
self.assertIsNotNone(gan_model.generator_scope)
self.assertIsNotNone(gan_model.generator_fn)
if mode == model_fn_lib.ModeKeys.PREDICT:
self.assertIsNone(gan_model.input_data_domain_label)
self.assertEqual(input_data_domain_label,
gan_model.generated_data_domain_target)
self.assertIsNone(gan_model.reconstructed_data)
self.assertIsNone(gan_model.discriminator_input_data_source_predication)
self.assertIsNone(
gan_model.discriminator_generated_data_source_predication)
self.assertIsNone(gan_model.discriminator_input_data_domain_predication)
self.assertIsNone(
gan_model.discriminator_generated_data_domain_predication)
self.assertIsNone(gan_model.discriminator_variables)
self.assertIsNone(gan_model.discriminator_scope)
self.assertIsNone(gan_model.discriminator_fn)
else:
self.assertEqual(input_data_domain_label,
gan_model.input_data_domain_label)
self.assertIsNotNone(gan_model.reconstructed_data.shape)
self.assertIsNotNone(
gan_model.discriminator_input_data_source_predication)
self.assertIsNotNone(
gan_model.discriminator_generated_data_source_predication)
self.assertIsNotNone(
gan_model.discriminator_input_data_domain_predication)
self.assertIsNotNone(
gan_model.discriminator_generated_data_domain_predication)
self.assertLen(gan_model.discriminator_variables, 2) # 1 FC layer
self.assertIsNotNone(gan_model.discriminator_scope)
self.assertIsNotNone(gan_model.discriminator_fn)
def get_dummy_gan_model():
"""Similar to get_gan_model()."""
# TODO(joelshor): Find a better way of creating a variable scope.
with variable_scope.variable_scope('generator') as gen_scope:
gen_var = variable_scope.get_variable('dummy_var', initializer=0.0)
with variable_scope.variable_scope('discriminator') as dis_scope:
dis_var = variable_scope.get_variable('dummy_var', initializer=0.0)
return tfgan_tuples.StarGANModel(
input_data=array_ops.ones([1, 2, 2, 3]),
input_data_domain_label=array_ops.ones([1, 2]),
generated_data=array_ops.ones([1, 2, 2, 3]),
generated_data_domain_target=array_ops.ones([1, 2]),
reconstructed_data=array_ops.ones([1, 2, 2, 3]),
discriminator_input_data_source_predication=array_ops.ones([1]) * dis_var,
discriminator_generated_data_source_predication=array_ops.ones(
[1]) * gen_var * dis_var,
discriminator_input_data_domain_predication=array_ops.ones([1, 2
]) * dis_var,
discriminator_generated_data_domain_predication=array_ops.ones([1, 2]) *
gen_var * dis_var,
generator_variables=[gen_var],
generator_scope=gen_scope,
generator_fn=None,
discriminator_variables=[dis_var],
discriminator_scope=dis_scope,
discriminator_fn=None)
def dummy_loss_fn(gan_model):
loss = math_ops.reduce_sum(
gan_model.discriminator_input_data_domain_predication -
gan_model.discriminator_generated_data_domain_predication)
loss += math_ops.reduce_sum(gan_model.input_data - gan_model.generated_data)
return tfgan_tuples.GANLoss(loss, loss)
def get_metrics(gan_model):
return {
'mse_custom_metric':
metrics_lib.mean_squared_error(gan_model.input_data,
gan_model.generated_data)
}
class GetEstimatorSpecTest(test.TestCase, parameterized.TestCase):
"""Tests that the EstimatorSpec is constructed appropriately."""
@classmethod
def setUpClass(cls):
super(GetEstimatorSpecTest, cls).setUpClass()
cls._generator_optimizer = training.GradientDescentOptimizer(1.0)
cls._discriminator_optimizer = training.GradientDescentOptimizer(1.0)
@parameterized.named_parameters(('train', model_fn_lib.ModeKeys.TRAIN),
('eval', model_fn_lib.ModeKeys.EVAL),
('predict', model_fn_lib.ModeKeys.PREDICT))
def test_get_estimator_spec(self, mode):
with ops.Graph().as_default():
self._gan_model = get_dummy_gan_model()
spec = estimator._get_estimator_spec(
mode,
self._gan_model,
loss_fn=dummy_loss_fn,
get_eval_metric_ops_fn=get_metrics,
generator_optimizer=self._generator_optimizer,
discriminator_optimizer=self._discriminator_optimizer)
self.assertEqual(mode, spec.mode)
if mode == model_fn_lib.ModeKeys.PREDICT:
self.assertEqual(self._gan_model.generated_data, spec.predictions)
elif mode == model_fn_lib.ModeKeys.TRAIN:
self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar
self.assertIsNotNone(spec.train_op)
self.assertIsNotNone(spec.training_hooks)
elif mode == model_fn_lib.ModeKeys.EVAL:
self.assertEqual(self._gan_model.generated_data, spec.predictions)
self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar
self.assertIsNotNone(spec.eval_metric_ops)
# TODO(joelshor): Add pandas test.
class StarGANEstimatorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self,
train_input_fn,
eval_input_fn,
predict_input_fn,
prediction_size,
lr_decay=False):
def make_opt():
gstep = training_util.get_or_create_global_step()
lr = learning_rate_decay.exponential_decay(1.0, gstep, 10, 0.9)
return training.GradientDescentOptimizer(lr)
gopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
dopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
est = estimator.StarGANEstimator(
generator_fn=dummy_generator_fn,
discriminator_fn=dummy_discriminator_fn,
loss_fn=dummy_loss_fn,
generator_optimizer=gopt,
discriminator_optimizer=dopt,
get_eval_metric_ops_fn=get_metrics,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', scores)
self.assertEqual(scores['discriminator_loss'] + scores['generator_loss'],
scores['loss'])
self.assertIn('mse_custom_metric', scores)
# PREDICT
predictions = np.array([x for x in est.predict(predict_input_fn)])
self.assertAllEqual(prediction_size, predictions.shape)
@staticmethod
def _numpy_input_fn_wrapper(numpy_input_fn, batch_size, label_size):
"""Wrapper to remove the dictionary in numpy_input_fn.
NOTE:
We create the domain_label here because the model expect a fully define
batch_size from the input.
Args:
numpy_input_fn: input_fn created from numpy_io
batch_size: (int) number of items for each batch
label_size: (int) number of domains
Returns:
a new input_fn
"""
def new_input_fn():
features = numpy_input_fn()
return features['x'], array_ops.one_hot([0] * batch_size, label_size)
return new_input_fn
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
batch_size = 5
img_size = 8
channel_size = 3
label_size = 3
image_data = np.zeros(
[batch_size, img_size, img_size, channel_size], dtype=np.float32)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': image_data},
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': image_data}, batch_size=batch_size, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': image_data}, shuffle=False)
train_input_fn = self._numpy_input_fn_wrapper(train_input_fn, batch_size,
label_size)
eval_input_fn = self._numpy_input_fn_wrapper(eval_input_fn, batch_size,
label_size)
predict_input_fn = self._numpy_input_fn_wrapper(predict_input_fn,
batch_size, label_size)
predict_input_fn = estimator.stargan_prediction_input_fn_wrapper(
predict_input_fn)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
prediction_size=[batch_size, img_size, img_size, channel_size])
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/gan/python/estimator/python/stargan_estimator_test.py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TFGAN features module.
This module includes support for virtual batch normalization, buffer replay,
conditioning, etc.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Collapse features into a single namespace.
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.gan.python.features.python import clip_weights
from tensorflow.contrib.gan.python.features.python import conditioning_utils
from tensorflow.contrib.gan.python.features.python import random_tensor_pool
from tensorflow.contrib.gan.python.features.python import spectral_normalization
from tensorflow.contrib.gan.python.features.python import virtual_batchnorm
from tensorflow.contrib.gan.python.features.python.clip_weights import *
from tensorflow.contrib.gan.python.features.python.conditioning_utils import *
from tensorflow.contrib.gan.python.features.python.random_tensor_pool import *
from tensorflow.contrib.gan.python.features.python.spectral_normalization import *
from tensorflow.contrib.gan.python.features.python.virtual_batchnorm import *
# pylint: enable=unused-import,wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = clip_weights.__all__
_allowed_symbols += conditioning_utils.__all__
_allowed_symbols += random_tensor_pool.__all__
_allowed_symbols += spectral_normalization.__all__
_allowed_symbols += virtual_batchnorm.__all__
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/gan/python/features/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.gan.python.features.random_tensor_pool."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.gan.python.features.python.random_tensor_pool_impl import tensor_pool
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class TensorPoolTest(test.TestCase):
def test_pool_unknown_input_shape(self):
"""Checks that `input_value` can have unknown shape."""
input_value = array_ops.placeholder(
dtype=dtypes.int32, shape=[None, None, 3])
output_value = tensor_pool(input_value, pool_size=10)
self.assertEqual(output_value.shape.as_list(), [None, None, 3])
with self.session(use_gpu=True) as session:
for i in range(10):
session.run(output_value, {input_value: [[[i] * 3]]})
session.run(output_value, {input_value: [[[i] * 3] * 2]})
session.run(output_value, {input_value: [[[i] * 3] * 5] * 2})
def test_pool_sequence(self):
"""Checks that values are pooled and returned maximally twice."""
input_value = array_ops.placeholder(dtype=dtypes.int32, shape=[])
output_value = tensor_pool(input_value, pool_size=10)
self.assertEqual(output_value.shape.as_list(), [])
with self.session(use_gpu=True) as session:
outs = []
for i in range(50):
out = session.run(output_value, {input_value: i})
outs.append(out)
self.assertLessEqual(out, i)
_, counts = np.unique(outs, return_counts=True)
# Check that each value is returned maximally twice.
self.assertTrue((counts <= 2).all())
def test_never_pool(self):
"""Checks that setting `pooling_probability` to zero works."""
input_value = array_ops.placeholder(dtype=dtypes.int32, shape=[])
output_value = tensor_pool(
input_value, pool_size=10, pooling_probability=0.0)
self.assertEqual(output_value.shape.as_list(), [])
with self.session(use_gpu=True) as session:
for i in range(50):
out = session.run(output_value, {input_value: i})
self.assertEqual(out, i)
def test_pooling_probability(self):
"""Checks that `pooling_probability` works."""
input_value = array_ops.placeholder(dtype=dtypes.int32, shape=[])
pool_size = 10
pooling_probability = 0.2
output_value = tensor_pool(
input_value,
pool_size=pool_size,
pooling_probability=pooling_probability)
self.assertEqual(output_value.shape.as_list(), [])
with self.session(use_gpu=True) as session:
not_pooled = 0
total = 1000
for i in range(total):
out = session.run(output_value, {input_value: i})
if out == i:
not_pooled += 1
self.assertAllClose(
(not_pooled - pool_size) / (total - pool_size),
1 - pooling_probability,
atol=0.03)
def test_input_values_tuple(self):
"""Checks that `input_values` can be a tuple."""
input_values = (array_ops.placeholder(dtype=dtypes.int32, shape=[]),
array_ops.placeholder(dtype=dtypes.int32, shape=[]))
output_values = tensor_pool(input_values, pool_size=3)
self.assertEqual(len(output_values), len(input_values))
for output_value in output_values:
self.assertEqual(output_value.shape.as_list(), [])
with self.session(use_gpu=True) as session:
for i in range(10):
outs = session.run(output_values, {
input_values[0]: i,
input_values[1]: i + 1
})
self.assertEqual(len(outs), len(input_values))
self.assertEqual(outs[1] - outs[0], 1)
def test_pool_preserves_shape(self):
t = constant_op.constant(1)
input_values = [[t, t, t], (t, t), t]
output_values = tensor_pool(input_values, pool_size=5)
print('stuff: ', output_values)
# Overall shape.
self.assertIsInstance(output_values, list)
self.assertEqual(3, len(output_values))
# Shape of first element.
self.assertIsInstance(output_values[0], list)
self.assertEqual(3, len(output_values[0]))
# Shape of second element.
self.assertIsInstance(output_values[1], tuple)
self.assertEqual(2, len(output_values[1]))
# Shape of third element.
self.assertIsInstance(output_values[2], ops.Tensor)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/gan/python/features/python/random_tensor_pool_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Virtual batch normalization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python.features.python import virtual_batchnorm_impl
# pylint: disable=wildcard-import
from tensorflow.contrib.gan.python.features.python.virtual_batchnorm_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
__all__ = virtual_batchnorm_impl.__all__
remove_undocumented(__name__, __all__)
|
tensorflow-master
|
tensorflow/contrib/gan/python/features/python/virtual_batchnorm.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for features.spectral_normalization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import slim
from tensorflow.contrib.gan.python.features.python import spectral_normalization_impl as spectral_normalization
from tensorflow.contrib.layers.python.layers import layers as contrib_layers
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras.layers import convolutional as keras_convolutional
from tensorflow.python.keras.layers import core as keras_core
from tensorflow.python.layers import convolutional as layers_convolutional
from tensorflow.python.layers import core as layers_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class SpectralNormalizationTest(test.TestCase):
def testComputeSpectralNorm(self):
weights = variable_scope.get_variable(
'w', dtype=dtypes.float32, shape=[2, 3, 50, 100])
weights = math_ops.multiply(weights, 10.0)
s = linalg_ops.svd(
array_ops.reshape(weights, [-1, weights.shape[-1]]), compute_uv=False)
true_sn = s[..., 0]
estimated_sn = spectral_normalization.compute_spectral_norm(weights)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
np_true_sn = sess.run(true_sn)
for i in range(50):
est = sess.run(estimated_sn)
if i < 1:
np_est_1 = est
if i < 4:
np_est_5 = est
if i < 9:
np_est_10 = est
np_est_50 = est
# Check that the estimate improves with more iterations.
self.assertAlmostEqual(np_true_sn, np_est_50, 0)
self.assertGreater(
abs(np_true_sn - np_est_10), abs(np_true_sn - np_est_50))
self.assertGreater(
abs(np_true_sn - np_est_5), abs(np_true_sn - np_est_10))
self.assertGreater(abs(np_true_sn - np_est_1), abs(np_true_sn - np_est_5))
def testSpectralNormalize(self):
weights = variable_scope.get_variable(
'w', dtype=dtypes.float32, shape=[2, 3, 50, 100])
weights = math_ops.multiply(weights, 10.0)
normalized_weights = spectral_normalization.spectral_normalize(
weights, power_iteration_rounds=1)
unnormalized_sigma = linalg_ops.svd(
array_ops.reshape(weights, [-1, weights.shape[-1]]),
compute_uv=False)[..., 0]
normalized_sigma = linalg_ops.svd(
array_ops.reshape(normalized_weights, [-1, weights.shape[-1]]),
compute_uv=False)[..., 0]
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
s0 = sess.run(unnormalized_sigma)
for i in range(50):
sigma = sess.run(normalized_sigma)
if i < 1:
s1 = sigma
if i < 5:
s5 = sigma
if i < 10:
s10 = sigma
s50 = sigma
self.assertAlmostEqual(1., s50, 0)
self.assertGreater(abs(s10 - 1.), abs(s50 - 1.))
self.assertGreater(abs(s5 - 1.), abs(s10 - 1.))
self.assertGreater(abs(s1 - 1.), abs(s5 - 1.))
self.assertGreater(abs(s0 - 1.), abs(s1 - 1.))
def _testLayerHelper(self, build_layer_fn, w_shape, b_shape, is_keras=False):
x = array_ops.placeholder(dtypes.float32, shape=[2, 10, 10, 3])
w_initial = np.random.randn(*w_shape) * 10
w_initializer = init_ops.constant_initializer(w_initial)
b_initial = np.random.randn(*b_shape)
b_initializer = init_ops.constant_initializer(b_initial)
if is_keras:
context_manager = spectral_normalization.keras_spectral_normalization()
else:
getter = spectral_normalization.spectral_normalization_custom_getter()
context_manager = variable_scope.variable_scope('', custom_getter=getter)
with context_manager:
(net,
expected_normalized_vars, expected_not_normalized_vars) = build_layer_fn(
x, w_initializer, b_initializer)
x_data = np.random.rand(*x.shape)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
# Before running a forward pass we still expect the variables values to
# differ from the initial value because of the normalizer.
w_befores = []
for name, var in expected_normalized_vars.items():
w_before = sess.run(var)
w_befores.append(w_before)
self.assertFalse(
np.allclose(w_initial, w_before),
msg=('%s appears not to be normalized. Before: %s After: %s' %
(name, w_initial, w_before)))
# Not true for the unnormalized variables.
for name, var in expected_not_normalized_vars.items():
b_before = sess.run(var)
self.assertTrue(
np.allclose(b_initial, b_before),
msg=('%s appears to be unexpectedly normalized. '
'Before: %s After: %s' % (name, b_initial, b_before)))
# Run a bunch of forward passes.
for _ in range(1000):
_ = sess.run(net, feed_dict={x: x_data})
# We expect this to have improved the estimate of the spectral norm,
# which should have changed the variable values and brought them close
# to the true Spectral Normalized values.
_, s, _ = np.linalg.svd(w_initial.reshape([-1, 3]))
exactly_normalized = w_initial / s[0]
for w_before, (name, var) in zip(w_befores,
expected_normalized_vars.items()):
w_after = sess.run(var)
self.assertFalse(
np.allclose(w_before, w_after, rtol=1e-8, atol=1e-8),
msg=('%s did not improve over many iterations. '
'Before: %s After: %s' % (name, w_before, w_after)))
self.assertAllClose(
exactly_normalized,
w_after,
rtol=1e-4,
atol=1e-4,
msg=('Estimate of spectral norm for %s was innacurate. '
'Normalized matrices do not match.'
'Estimate: %s Actual: %s' % (name, w_after,
exactly_normalized)))
def testConv2D_Layers(self):
def build_layer_fn(x, w_initializer, b_initializer):
layer = layers_convolutional.Conv2D(
filters=3,
kernel_size=3,
padding='same',
kernel_initializer=w_initializer,
bias_initializer=b_initializer)
net = layer.apply(x)
expected_normalized_vars = {'tf.layers.Conv2d.kernel': layer.kernel}
expected_not_normalized_vars = {'tf.layers.Conv2d.bias': layer.bias}
return net, expected_normalized_vars, expected_not_normalized_vars
self._testLayerHelper(build_layer_fn, (3, 3, 3, 3), (3,))
def testConv2D_ContribLayers(self):
def build_layer_fn(x, w_initializer, b_initializer):
var_collection = {
'weights': ['CONTRIB_LAYERS_CONV2D_WEIGHTS'],
'biases': ['CONTRIB_LAYERS_CONV2D_BIASES']
}
net = contrib_layers.conv2d(
x,
3,
3,
weights_initializer=w_initializer,
biases_initializer=b_initializer,
variables_collections=var_collection)
weight_vars = ops.get_collection('CONTRIB_LAYERS_CONV2D_WEIGHTS')
self.assertEquals(1, len(weight_vars))
bias_vars = ops.get_collection('CONTRIB_LAYERS_CONV2D_BIASES')
self.assertEquals(1, len(bias_vars))
expected_normalized_vars = {
'contrib.layers.conv2d.weights': weight_vars[0]
}
expected_not_normalized_vars = {
'contrib.layers.conv2d.bias': bias_vars[0]
}
return net, expected_normalized_vars, expected_not_normalized_vars
self._testLayerHelper(build_layer_fn, (3, 3, 3, 3), (3,))
def testConv2D_Slim(self):
def build_layer_fn(x, w_initializer, b_initializer):
var_collection = {
'weights': ['SLIM_CONV2D_WEIGHTS'],
'biases': ['SLIM_CONV2D_BIASES']
}
net = slim.conv2d(
x,
3,
3,
weights_initializer=w_initializer,
biases_initializer=b_initializer,
variables_collections=var_collection)
weight_vars = ops.get_collection('SLIM_CONV2D_WEIGHTS')
self.assertEquals(1, len(weight_vars))
bias_vars = ops.get_collection('SLIM_CONV2D_BIASES')
self.assertEquals(1, len(bias_vars))
expected_normalized_vars = {'slim.conv2d.weights': weight_vars[0]}
expected_not_normalized_vars = {'slim.conv2d.bias': bias_vars[0]}
return net, expected_normalized_vars, expected_not_normalized_vars
self._testLayerHelper(build_layer_fn, (3, 3, 3, 3), (3,))
def testConv2D_Keras(self):
def build_layer_fn(x, w_initializer, b_initializer):
layer = keras_convolutional.Conv2D(
filters=3,
kernel_size=3,
padding='same',
kernel_initializer=w_initializer,
bias_initializer=b_initializer)
net = layer.apply(x)
expected_normalized_vars = {'keras.layers.Conv2d.kernel': layer.kernel}
expected_not_normalized_vars = {'keras.layers.Conv2d.bias': layer.bias}
return net, expected_normalized_vars, expected_not_normalized_vars
self._testLayerHelper(build_layer_fn, (3, 3, 3, 3), (3,), is_keras=True)
def testFC_Layers(self):
def build_layer_fn(x, w_initializer, b_initializer):
x = layers_core.Flatten()(x)
layer = layers_core.Dense(
units=3,
kernel_initializer=w_initializer,
bias_initializer=b_initializer)
net = layer.apply(x)
expected_normalized_vars = {'tf.layers.Dense.kernel': layer.kernel}
expected_not_normalized_vars = {'tf.layers.Dense.bias': layer.bias}
return net, expected_normalized_vars, expected_not_normalized_vars
self._testLayerHelper(build_layer_fn, (300, 3), (3,))
def testFC_ContribLayers(self):
def build_layer_fn(x, w_initializer, b_initializer):
var_collection = {
'weights': ['CONTRIB_LAYERS_FC_WEIGHTS'],
'biases': ['CONTRIB_LAYERS_FC_BIASES']
}
x = contrib_layers.flatten(x)
net = contrib_layers.fully_connected(
x,
3,
weights_initializer=w_initializer,
biases_initializer=b_initializer,
variables_collections=var_collection)
weight_vars = ops.get_collection('CONTRIB_LAYERS_FC_WEIGHTS')
self.assertEquals(1, len(weight_vars))
bias_vars = ops.get_collection('CONTRIB_LAYERS_FC_BIASES')
self.assertEquals(1, len(bias_vars))
expected_normalized_vars = {
'contrib.layers.fully_connected.weights': weight_vars[0]
}
expected_not_normalized_vars = {
'contrib.layers.fully_connected.bias': bias_vars[0]
}
return net, expected_normalized_vars, expected_not_normalized_vars
self._testLayerHelper(build_layer_fn, (300, 3), (3,))
def testFC_Slim(self):
def build_layer_fn(x, w_initializer, b_initializer):
var_collection = {
'weights': ['SLIM_FC_WEIGHTS'],
'biases': ['SLIM_FC_BIASES']
}
x = slim.flatten(x)
net = slim.fully_connected(
x,
3,
weights_initializer=w_initializer,
biases_initializer=b_initializer,
variables_collections=var_collection)
weight_vars = ops.get_collection('SLIM_FC_WEIGHTS')
self.assertEquals(1, len(weight_vars))
bias_vars = ops.get_collection('SLIM_FC_BIASES')
self.assertEquals(1, len(bias_vars))
expected_normalized_vars = {
'slim.fully_connected.weights': weight_vars[0]
}
expected_not_normalized_vars = {'slim.fully_connected.bias': bias_vars[0]}
return net, expected_normalized_vars, expected_not_normalized_vars
self._testLayerHelper(build_layer_fn, (300, 3), (3,))
def testFC_Keras(self):
def build_layer_fn(x, w_initializer, b_initializer):
x = keras_core.Flatten()(x)
layer = keras_core.Dense(
units=3,
kernel_initializer=w_initializer,
bias_initializer=b_initializer)
net = layer.apply(x)
expected_normalized_vars = {'keras.layers.Dense.kernel': layer.kernel}
expected_not_normalized_vars = {'keras.layers.Dense.bias': layer.bias}
return net, expected_normalized_vars, expected_not_normalized_vars
self._testLayerHelper(build_layer_fn, (300, 3), (3,), is_keras=True)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/gan/python/features/python/spectral_normalization_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Miscellaneous utilities for TFGAN code and examples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python.features.python import conditioning_utils_impl
# pylint: disable=wildcard-import
from tensorflow.contrib.gan.python.features.python.conditioning_utils_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
__all__ = conditioning_utils_impl.__all__
remove_undocumented(__name__, __all__)
|
tensorflow-master
|
tensorflow/contrib/gan/python/features/python/conditioning_utils.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to clip weights."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python.features.python import clip_weights_impl
# pylint: disable=wildcard-import
from tensorflow.contrib.gan.python.features.python.clip_weights_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
__all__ = clip_weights_impl.__all__
remove_undocumented(__name__, __all__)
|
tensorflow-master
|
tensorflow/contrib/gan/python/features/python/clip_weights.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tfgan.python.features.conditioning_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python.features.python import conditioning_utils_impl as conditioning_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ConditioningUtilsTest(test.TestCase):
def test_condition_tensor_multiple_shapes(self):
for tensor_shape in [(4, 1), (4, 2), (4, 2, 6), (None, 5, 3)]:
for conditioning_shape in [(4, 1), (4, 8), (4, 5, 3)]:
conditioning_utils.condition_tensor(
array_ops.placeholder(dtypes.float32, tensor_shape),
array_ops.placeholder(dtypes.float32, conditioning_shape))
def test_condition_tensor_asserts(self):
with self.assertRaisesRegexp(ValueError, 'Cannot reshape'):
conditioning_utils.condition_tensor(
array_ops.placeholder(dtypes.float32, (4, 1)),
array_ops.placeholder(dtypes.float32, (5, 1)))
with self.assertRaisesRegexp(ValueError, 'Shape .* is not fully defined'):
conditioning_utils.condition_tensor(
array_ops.placeholder(dtypes.float32, (5, None)),
array_ops.placeholder(dtypes.float32, (5, 1)))
with self.assertRaisesRegexp(ValueError, 'at least 2D'):
conditioning_utils.condition_tensor(
array_ops.placeholder(dtypes.float32, (5, 2)),
array_ops.placeholder(dtypes.float32, (5)))
def test_condition_tensor_from_onehot(self):
conditioning_utils.condition_tensor_from_onehot(
array_ops.placeholder(dtypes.float32, (5, 4, 1)),
array_ops.placeholder(dtypes.float32, (5, 10)))
def test_condition_tensor_from_onehot_asserts(self):
with self.assertRaisesRegexp(ValueError, 'Shape .* must have rank 2'):
conditioning_utils.condition_tensor_from_onehot(
array_ops.placeholder(dtypes.float32, (5, 1)),
array_ops.placeholder(dtypes.float32, (5)))
with self.assertRaisesRegexp(ValueError, 'Shape .* is not fully defined'):
conditioning_utils.condition_tensor_from_onehot(
array_ops.placeholder(dtypes.float32, (5, 1)),
array_ops.placeholder(dtypes.float32, (5, None)))
with self.assertRaisesRegexp(ValueError, 'Cannot reshape a tensor'):
conditioning_utils.condition_tensor_from_onehot(
array_ops.placeholder(dtypes.float32, (5, 1)),
array_ops.placeholder(dtypes.float32, (4, 6)))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/gan/python/features/python/conditioning_utils_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Virtual batch normalization.
This technique was first introduced in `Improved Techniques for Training GANs`
(Salimans et al, https://arxiv.org/abs/1606.03498). Instead of using batch
normalization on a minibatch, it fixes a reference subset of the data to use for
calculating normalization statistics.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope
__all__ = [
'VBN',
]
def _static_or_dynamic_batch_size(tensor, batch_axis):
"""Returns the static or dynamic batch size."""
batch_size = array_ops.shape(tensor)[batch_axis]
static_batch_size = tensor_util.constant_value(batch_size)
return static_batch_size or batch_size
def _statistics(x, axes):
"""Calculate the mean and mean square of `x`.
Modified from the implementation of `tf.nn.moments`.
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and variance.
Returns:
Two `Tensor` objects: `mean` and `square mean`.
"""
# The dynamic range of fp16 is too limited to support the collection of
# sufficient statistics. As a workaround we simply perform the operations
# on 32-bit floats before converting the mean and variance back to fp16
y = math_ops.cast(x, dtypes.float32) if x.dtype == dtypes.float16 else x
# Compute true mean while keeping the dims for proper broadcasting.
shift = array_ops.stop_gradient(math_ops.reduce_mean(y, axes, keepdims=True))
shifted_mean = math_ops.reduce_mean(y - shift, axes, keepdims=True)
mean = shifted_mean + shift
mean_squared = math_ops.reduce_mean(math_ops.square(y), axes, keepdims=True)
mean = array_ops.squeeze(mean, axes)
mean_squared = array_ops.squeeze(mean_squared, axes)
if x.dtype == dtypes.float16:
return (math_ops.cast(mean, dtypes.float16),
math_ops.cast(mean_squared, dtypes.float16))
else:
return (mean, mean_squared)
def _validate_init_input_and_get_axis(reference_batch, axis):
"""Validate input and return the used axis value."""
if reference_batch.shape.ndims is None:
raise ValueError('`reference_batch` has unknown dimensions.')
ndims = reference_batch.shape.ndims
if axis < 0:
used_axis = ndims + axis
else:
used_axis = axis
if used_axis < 0 or used_axis >= ndims:
raise ValueError('Value of `axis` argument ' + str(used_axis) +
' is out of range for input with rank ' + str(ndims))
return used_axis
def _validate_call_input(tensor_list, batch_dim):
"""Verifies that tensor shapes are compatible, except for `batch_dim`."""
def _get_shape(tensor):
shape = tensor.shape.as_list()
del shape[batch_dim]
return shape
base_shape = tensor_shape.TensorShape(_get_shape(tensor_list[0]))
for tensor in tensor_list:
base_shape.assert_is_compatible_with(_get_shape(tensor))
class VBN(object):
"""A class to perform virtual batch normalization.
This technique was first introduced in `Improved Techniques for Training GANs`
(Salimans et al, https://arxiv.org/abs/1606.03498). Instead of using batch
normalization on a minibatch, it fixes a reference subset of the data to use
for calculating normalization statistics.
To do this, we calculate the reference batch mean and mean square, and modify
those statistics for each example. We use mean square instead of variance,
since it is linear.
Note that if `center` or `scale` variables are created, they are shared
between all calls to this object.
The `__init__` API is intended to mimic
`tf.compat.v1.layers.batch_normalization` as
closely as possible.
"""
def __init__(self,
reference_batch,
axis=-1,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
trainable=True,
name=None,
batch_axis=0):
"""Initialize virtual batch normalization object.
We precompute the 'mean' and 'mean squared' of the reference batch, so that
`__call__` is efficient. This means that the axis must be supplied when the
object is created, not when it is called.
We precompute 'square mean' instead of 'variance', because the square mean
can be easily adjusted on a per-example basis.
Args:
reference_batch: A minibatch tensors. This will form the reference data
from which the normalization statistics are calculated. See
https://arxiv.org/abs/1606.03498 for more details.
axis: Integer, the axis that should be normalized (typically the features
axis). For instance, after a `Convolution2D` layer with
`data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False,
`beta` is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is not used. When
the next layer is linear (also e.g. `nn.relu`), this can be disabled
since the scaling can be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: String, the name of the ops.
batch_axis: The axis of the batch dimension. This dimension is treated
differently in `virtual batch normalization` vs `batch normalization`.
Raises:
ValueError: If `reference_batch` has unknown dimensions at graph
construction.
ValueError: If `batch_axis` is the same as `axis`.
"""
axis = _validate_init_input_and_get_axis(reference_batch, axis)
self._epsilon = epsilon
self._beta = 0
self._gamma = 1
self._batch_axis = _validate_init_input_and_get_axis(
reference_batch, batch_axis)
if axis == self._batch_axis:
raise ValueError('`axis` and `batch_axis` cannot be the same.')
with variable_scope.variable_scope(
name, 'VBN', values=[reference_batch]) as self._vs:
self._reference_batch = reference_batch
# Calculate important shapes:
# 1) Reduction axes for the reference batch
# 2) Broadcast shape, if necessary
# 3) Reduction axes for the virtual batchnormed batch
# 4) Shape for optional parameters
input_shape = self._reference_batch.shape
ndims = input_shape.ndims
reduction_axes = list(range(ndims))
del reduction_axes[axis]
self._broadcast_shape = [1] * len(input_shape)
self._broadcast_shape[axis] = input_shape.dims[axis]
self._example_reduction_axes = list(range(ndims))
del self._example_reduction_axes[max(axis, self._batch_axis)]
del self._example_reduction_axes[min(axis, self._batch_axis)]
params_shape = self._reference_batch.shape[axis]
# Determines whether broadcasting is needed. This is slightly different
# than in the `nn.batch_normalization` case, due to `batch_dim`.
self._needs_broadcasting = (
sorted(self._example_reduction_axes) != list(range(ndims))[:-2])
# Calculate the sufficient statistics for the reference batch in a way
# that can be easily modified by additional examples.
self._ref_mean, self._ref_mean_squares = _statistics(
self._reference_batch, reduction_axes)
self._ref_variance = (
self._ref_mean_squares - math_ops.square(self._ref_mean))
# Virtual batch normalization uses a weighted average between example
# statistics and the reference batch statistics.
ref_batch_size = _static_or_dynamic_batch_size(self._reference_batch,
self._batch_axis)
self._example_weight = 1. / (
math_ops.cast(ref_batch_size, dtypes.float32) + 1.)
self._ref_weight = 1. - self._example_weight
# Make the variables, if necessary.
if center:
self._beta = variable_scope.get_variable(
name='beta',
shape=(params_shape,),
initializer=beta_initializer,
regularizer=beta_regularizer,
trainable=trainable)
if scale:
self._gamma = variable_scope.get_variable(
name='gamma',
shape=(params_shape,),
initializer=gamma_initializer,
regularizer=gamma_regularizer,
trainable=trainable)
def _virtual_statistics(self, inputs, reduction_axes):
"""Compute the statistics needed for virtual batch normalization."""
cur_mean, cur_mean_sq = _statistics(inputs, reduction_axes)
vb_mean = (
self._example_weight * cur_mean + self._ref_weight * self._ref_mean)
vb_mean_sq = (
self._example_weight * cur_mean_sq +
self._ref_weight * self._ref_mean_squares)
return (vb_mean, vb_mean_sq)
def _broadcast(self, v, broadcast_shape=None):
# The exact broadcast shape depends on the current batch, not the reference
# batch, unless we're calculating the batch normalization of the reference
# batch.
b_shape = broadcast_shape or self._broadcast_shape
if self._needs_broadcasting and v is not None:
return array_ops.reshape(v, b_shape)
return v
def reference_batch_normalization(self):
"""Return the reference batch, but batch normalized."""
with ops.name_scope(self._vs.name):
return nn.batch_normalization(self._reference_batch,
self._broadcast(self._ref_mean),
self._broadcast(self._ref_variance),
self._broadcast(self._beta),
self._broadcast(self._gamma), self._epsilon)
def __call__(self, inputs):
"""Run virtual batch normalization on inputs.
Args:
inputs: Tensor input.
Returns:
A virtual batch normalized version of `inputs`.
Raises:
ValueError: If `inputs` shape isn't compatible with the reference batch.
"""
_validate_call_input([inputs, self._reference_batch], self._batch_axis)
with ops.name_scope(self._vs.name, values=[inputs, self._reference_batch]):
# Calculate the statistics on the current input on a per-example basis.
vb_mean, vb_mean_sq = self._virtual_statistics(
inputs, self._example_reduction_axes)
vb_variance = vb_mean_sq - math_ops.square(vb_mean)
# The exact broadcast shape of the input statistic Tensors depends on the
# current batch, not the reference batch. The parameter broadcast shape
# is independent of the shape of the input statistic Tensor dimensions.
b_shape = self._broadcast_shape[:] # deep copy
b_shape[self._batch_axis] = _static_or_dynamic_batch_size(
inputs, self._batch_axis)
return nn.batch_normalization(
inputs, self._broadcast(vb_mean, b_shape),
self._broadcast(vb_variance, b_shape),
self._broadcast(self._beta, self._broadcast_shape),
self._broadcast(self._gamma, self._broadcast_shape), self._epsilon)
|
tensorflow-master
|
tensorflow/contrib/gan/python/features/python/virtual_batchnorm_impl.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to clip weights.
This is useful in the original formulation of the Wasserstein loss, which
requires that the discriminator be K-Lipschitz. See
https://arxiv.org/pdf/1701.07875 for more details.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.opt.python.training import variable_clipping_optimizer
__all__ = [
'clip_variables',
'clip_discriminator_weights',
]
def clip_discriminator_weights(optimizer, model, weight_clip):
"""Modifies an optimizer so it clips weights to a certain value.
Args:
optimizer: An optimizer to perform variable weight clipping.
model: A GANModel namedtuple.
weight_clip: Positive python float to clip discriminator weights. Used to
enforce a K-lipschitz condition, which is useful for some GAN training
schemes (ex WGAN: https://arxiv.org/pdf/1701.07875).
Returns:
An optimizer to perform weight clipping after updates.
Raises:
ValueError: If `weight_clip` is less than 0.
"""
return clip_variables(optimizer, model.discriminator_variables, weight_clip)
def clip_variables(optimizer, variables, weight_clip):
"""Modifies an optimizer so it clips weights to a certain value.
Args:
optimizer: An optimizer to perform variable weight clipping.
variables: A list of TensorFlow variables.
weight_clip: Positive python float to clip discriminator weights. Used to
enforce a K-lipschitz condition, which is useful for some GAN training
schemes (ex WGAN: https://arxiv.org/pdf/1701.07875).
Returns:
An optimizer to perform weight clipping after updates.
Raises:
ValueError: If `weight_clip` is less than 0.
"""
if weight_clip < 0:
raise ValueError(
'`discriminator_weight_clip` must be positive. Instead, was %s',
weight_clip)
return variable_clipping_optimizer.VariableClippingOptimizer(
opt=optimizer,
# Do no reduction, so clipping happens per-value.
vars_to_clip_dims={var: [] for var in variables},
max_norm=weight_clip,
use_locking=True,
colocate_clip_ops_with_vars=True)
|
tensorflow-master
|
tensorflow/contrib/gan/python/features/python/clip_weights_impl.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tfgan.python.features.virtual_batchnorm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.framework.python.ops import variables as contrib_variables_lib
from tensorflow.contrib.gan.python.features.python import virtual_batchnorm_impl as virtual_batchnorm
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.layers import normalization
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
class VirtualBatchnormTest(test.TestCase):
def test_syntax(self):
reference_batch = array_ops.zeros([5, 3, 16, 9, 15])
vbn = virtual_batchnorm.VBN(reference_batch, batch_axis=1)
vbn(array_ops.ones([5, 7, 16, 9, 15]))
def test_no_broadcast_needed(self):
"""When `axis` and `batch_axis` are at the end, no broadcast is needed."""
reference_batch = array_ops.zeros([5, 3, 16, 9, 15])
minibatch = array_ops.zeros([5, 3, 16, 3, 15])
vbn = virtual_batchnorm.VBN(reference_batch, axis=-1, batch_axis=-2)
vbn(minibatch)
def test_statistics(self):
"""Check that `_statistics` gives the same result as `nn.moments`."""
random_seed.set_random_seed(1234)
tensors = random_ops.random_normal([4, 5, 7, 3])
for axes in [(3), (0, 2), (1, 2, 3)]:
vb_mean, mean_sq = virtual_batchnorm._statistics(tensors, axes)
mom_mean, mom_var = nn.moments(tensors, axes)
vb_var = mean_sq - math_ops.square(vb_mean)
with self.cached_session(use_gpu=True) as sess:
vb_mean_np, vb_var_np, mom_mean_np, mom_var_np = sess.run([
vb_mean, vb_var, mom_mean, mom_var])
self.assertAllClose(mom_mean_np, vb_mean_np)
self.assertAllClose(mom_var_np, vb_var_np)
def test_virtual_statistics(self):
"""Check that `_virtual_statistics` gives same result as `nn.moments`."""
random_seed.set_random_seed(1234)
batch_axis = 0
partial_batch = random_ops.random_normal([4, 5, 7, 3])
single_example = random_ops.random_normal([1, 5, 7, 3])
full_batch = array_ops.concat([partial_batch, single_example], axis=0)
for reduction_axis in range(1, 4):
# Get `nn.moments` on the full batch.
reduction_axes = list(range(4))
del reduction_axes[reduction_axis]
mom_mean, mom_variance = nn.moments(full_batch, reduction_axes)
# Get virtual batch statistics.
vb_reduction_axes = list(range(4))
del vb_reduction_axes[reduction_axis]
del vb_reduction_axes[batch_axis]
vbn = virtual_batchnorm.VBN(partial_batch, reduction_axis)
vb_mean, mean_sq = vbn._virtual_statistics(
single_example, vb_reduction_axes)
vb_variance = mean_sq - math_ops.square(vb_mean)
# Remove singleton batch dim for easy comparisons.
vb_mean = array_ops.squeeze(vb_mean, batch_axis)
vb_variance = array_ops.squeeze(vb_variance, batch_axis)
with self.cached_session(use_gpu=True) as sess:
vb_mean_np, vb_var_np, mom_mean_np, mom_var_np = sess.run([
vb_mean, vb_variance, mom_mean, mom_variance])
self.assertAllClose(mom_mean_np, vb_mean_np)
self.assertAllClose(mom_var_np, vb_var_np)
def test_reference_batch_normalization(self):
"""Check that batch norm from VBN agrees with opensource implementation."""
random_seed.set_random_seed(1234)
batch = random_ops.random_normal([6, 5, 7, 3, 3])
for axis in range(5):
# Get `layers` batchnorm result.
bn_normalized = normalization.batch_normalization(
batch, axis, training=True)
# Get VBN's batch normalization on reference batch.
batch_axis = 0 if axis != 0 else 1 # axis and batch_axis can't same
vbn = virtual_batchnorm.VBN(batch, axis, batch_axis=batch_axis)
vbn_normalized = vbn.reference_batch_normalization()
with self.cached_session(use_gpu=True) as sess:
variables_lib.global_variables_initializer().run()
bn_normalized_np, vbn_normalized_np = sess.run(
[bn_normalized, vbn_normalized])
self.assertAllClose(bn_normalized_np, vbn_normalized_np)
def test_same_as_batchnorm(self):
"""Check that batch norm on set X is the same as ref of X / y on `y`."""
random_seed.set_random_seed(1234)
num_examples = 4
examples = [random_ops.random_normal([5, 7, 3]) for _ in
range(num_examples)]
# Get the result of the opensource batch normalization.
batch_normalized = normalization.batch_normalization(
array_ops.stack(examples), training=True)
for i in range(num_examples):
examples_except_i = array_ops.stack(examples[:i] + examples[i+1:])
# Get the result of VBN's batch normalization.
vbn = virtual_batchnorm.VBN(examples_except_i)
vb_normed = array_ops.squeeze(
vbn(array_ops.expand_dims(examples[i], [0])), [0])
with self.cached_session(use_gpu=True) as sess:
variables_lib.global_variables_initializer().run()
bn_np, vb_np = sess.run([batch_normalized, vb_normed])
self.assertAllClose(bn_np[i, ...], vb_np)
def test_minibatch_independent(self):
"""Test that virtual batch normalized examples are independent.
Unlike batch normalization, virtual batch normalization has the property
that the virtual batch normalized value of an example is independent of the
other examples in the minibatch. In this test, we verify this property.
"""
random_seed.set_random_seed(1234)
# These can be random, but must be the same for all session calls.
reference_batch = constant_op.constant(
np.random.normal(size=[4, 7, 3]), dtype=dtypes.float32)
fixed_example = constant_op.constant(np.random.normal(size=[7, 3]),
dtype=dtypes.float32)
# Get the VBN object and the virtual batch normalized value for
# `fixed_example`.
vbn = virtual_batchnorm.VBN(reference_batch)
vbn_fixed_example = array_ops.squeeze(
vbn(array_ops.expand_dims(fixed_example, 0)), 0)
with self.session(use_gpu=True):
variables_lib.global_variables_initializer().run()
vbn_fixed_example_np = vbn_fixed_example.eval()
# Check that the value is the same for different minibatches, and different
# sized minibatches.
for minibatch_size in range(1, 6):
examples = [random_ops.random_normal([7, 3]) for _ in
range(minibatch_size)]
minibatch = array_ops.stack([fixed_example] + examples)
vbn_minibatch = vbn(minibatch)
cur_vbn_fixed_example = vbn_minibatch[0, ...]
with self.cached_session(use_gpu=True):
variables_lib.global_variables_initializer().run()
cur_vbn_fixed_example_np = cur_vbn_fixed_example.eval()
self.assertAllClose(vbn_fixed_example_np, cur_vbn_fixed_example_np)
def test_variable_reuse(self):
"""Test that variable scopes work and inference on a real-ish case."""
tensor1_ref = array_ops.zeros([6, 5, 7, 3, 3])
tensor1_examples = array_ops.zeros([4, 5, 7, 3, 3])
tensor2_ref = array_ops.zeros([4, 2, 3])
tensor2_examples = array_ops.zeros([2, 2, 3])
with variable_scope.variable_scope('dummy_scope', reuse=True):
with self.assertRaisesRegexp(
ValueError, 'does not exist, or was not created with '
'tf.get_variable()'):
virtual_batchnorm.VBN(tensor1_ref)
vbn1 = virtual_batchnorm.VBN(tensor1_ref, name='vbn1')
vbn2 = virtual_batchnorm.VBN(tensor2_ref, name='vbn2')
# Fetch reference and examples after virtual batch normalization. Also
# fetch in variable reuse case.
to_fetch = []
to_fetch.append(vbn1.reference_batch_normalization())
to_fetch.append(vbn2.reference_batch_normalization())
to_fetch.append(vbn1(tensor1_examples))
to_fetch.append(vbn2(tensor2_examples))
variable_scope.get_variable_scope().reuse_variables()
to_fetch.append(vbn1.reference_batch_normalization())
to_fetch.append(vbn2.reference_batch_normalization())
to_fetch.append(vbn1(tensor1_examples))
to_fetch.append(vbn2(tensor2_examples))
self.assertEqual(4, len(contrib_variables_lib.get_variables()))
with self.session(use_gpu=True) as sess:
variables_lib.global_variables_initializer().run()
sess.run(to_fetch)
def test_invalid_input(self):
# Reference batch has unknown dimensions.
with self.assertRaisesRegexp(
ValueError, '`reference_batch` has unknown dimensions.'):
virtual_batchnorm.VBN(array_ops.placeholder(dtypes.float32), name='vbn1')
# Axis too negative.
with self.assertRaisesRegexp(
ValueError, 'Value of `axis` argument .* is out of range'):
virtual_batchnorm.VBN(array_ops.zeros([1, 2]), axis=-3, name='vbn2')
# Axis too large.
with self.assertRaisesRegexp(
ValueError, 'Value of `axis` argument .* is out of range'):
virtual_batchnorm.VBN(array_ops.zeros([1, 2]), axis=2, name='vbn3')
# Batch axis too negative.
with self.assertRaisesRegexp(
ValueError, 'Value of `axis` argument .* is out of range'):
virtual_batchnorm.VBN(array_ops.zeros([1, 2]), name='vbn4', batch_axis=-3)
# Batch axis too large.
with self.assertRaisesRegexp(
ValueError, 'Value of `axis` argument .* is out of range'):
virtual_batchnorm.VBN(array_ops.zeros([1, 2]), name='vbn5', batch_axis=2)
# Axis and batch axis are the same.
with self.assertRaisesRegexp(
ValueError, '`axis` and `batch_axis` cannot be the same.'):
virtual_batchnorm.VBN(array_ops.zeros(
[1, 2]), axis=1, name='vbn6', batch_axis=1)
# Reference Tensor and example Tensor have incompatible shapes.
tensor_ref = array_ops.zeros([5, 2, 3])
tensor_examples = array_ops.zeros([3, 2, 3])
vbn = virtual_batchnorm.VBN(tensor_ref, name='vbn7', batch_axis=1)
with self.assertRaisesRegexp(ValueError, 'Shapes .* are incompatible'):
vbn(tensor_examples)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/gan/python/features/python/virtual_batchnorm_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Miscellaneous utilities for TFGAN code and examples.
Includes:
1) Conditioning the value of a Tensor, based on techniques from
https://arxiv.org/abs/1609.03499.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
__all__ = [
'condition_tensor',
'condition_tensor_from_onehot',
]
def _get_shape(tensor):
tensor_shape = array_ops.shape(tensor)
static_tensor_shape = tensor_util.constant_value(tensor_shape)
return (static_tensor_shape if static_tensor_shape is not None else
tensor_shape)
def condition_tensor(tensor, conditioning):
"""Condition the value of a tensor.
Conditioning scheme based on https://arxiv.org/abs/1609.03499.
Args:
tensor: A minibatch tensor to be conditioned.
conditioning: A minibatch Tensor of to condition on. Must be 2D, with first
dimension the same as `tensor`.
Returns:
`tensor` conditioned on `conditioning`.
Raises:
ValueError: If the non-batch dimensions of `tensor` aren't fully defined.
ValueError: If `conditioning` isn't at least 2D.
ValueError: If the batch dimension for the input Tensors don't match.
"""
tensor.shape[1:].assert_is_fully_defined()
num_features = tensor.shape[1:].num_elements()
if conditioning.shape.ndims < 2:
raise ValueError('conditioning must be at least 2D, but saw shape: %s'
% conditioning.shape)
mapped_conditioning = layers.linear(
layers.flatten(conditioning), num_features)
if not mapped_conditioning.shape.is_compatible_with(tensor.shape):
mapped_conditioning = array_ops.reshape(
mapped_conditioning, _get_shape(tensor))
return tensor + mapped_conditioning
def _one_hot_to_embedding(one_hot, embedding_size):
"""Get a dense embedding vector from a one-hot encoding."""
num_tokens = one_hot.shape[1]
label_id = math_ops.argmax(one_hot, axis=1)
embedding = variable_scope.get_variable(
'embedding', [num_tokens, embedding_size])
return embedding_ops.embedding_lookup(
embedding, label_id, name='token_to_embedding')
def _validate_onehot(one_hot_labels):
one_hot_labels.shape.assert_has_rank(2)
one_hot_labels.shape[1:].assert_is_fully_defined()
def condition_tensor_from_onehot(tensor, one_hot_labels, embedding_size=256):
"""Condition a tensor based on a one-hot tensor.
Conditioning scheme based on https://arxiv.org/abs/1609.03499.
Args:
tensor: Tensor to be conditioned.
one_hot_labels: A Tensor of one-hot labels. Shape is
[batch_size, num_classes].
embedding_size: The size of the class embedding.
Returns:
`tensor` conditioned on `one_hot_labels`.
Raises:
ValueError: `one_hot_labels` isn't 2D, if non-batch dimensions aren't
fully defined, or if batch sizes don't match.
"""
_validate_onehot(one_hot_labels)
conditioning = _one_hot_to_embedding(one_hot_labels, embedding_size)
return condition_tensor(tensor, conditioning)
|
tensorflow-master
|
tensorflow/contrib/gan/python/features/python/conditioning_utils_impl.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras-like layers and utilities that implement Spectral Normalization.
Based on "Spectral Normalization for Generative Adversarial Networks" by Miyato,
et al in ICLR 2018. https://openreview.net/pdf?id=B1QRgziT-
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import numbers
import re
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras.engine import base_layer_utils as keras_base_layer_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
__all__ = [
'compute_spectral_norm', 'spectral_normalize', 'spectral_norm_regularizer',
'spectral_normalization_custom_getter', 'keras_spectral_normalization'
]
# tf.bfloat16 should work, but tf.matmul converts those to tf.float32 which then
# can't directly be assigned back to the tf.bfloat16 variable.
_OK_DTYPES_FOR_SPECTRAL_NORM = (dtypes.float16, dtypes.float32, dtypes.float64)
_PERSISTED_U_VARIABLE_SUFFIX = 'spectral_norm_u'
def compute_spectral_norm(w_tensor, power_iteration_rounds=1, name=None):
"""Estimates the largest singular value in the weight tensor.
Args:
w_tensor: The weight matrix whose spectral norm should be computed.
power_iteration_rounds: The number of iterations of the power method to
perform. A higher number yields a better approximation.
name: An optional scope name.
Returns:
The largest singular value (the spectral norm) of w.
"""
with variable_scope.variable_scope(name, 'spectral_norm'):
# The paper says to flatten convnet kernel weights from
# (C_out, C_in, KH, KW) to (C_out, C_in * KH * KW). But TensorFlow's Conv2D
# kernel weight shape is (KH, KW, C_in, C_out), so it should be reshaped to
# (KH * KW * C_in, C_out), and similarly for other layers that put output
# channels as last dimension.
# n.b. this means that w here is equivalent to w.T in the paper.
w = array_ops.reshape(w_tensor, (-1, w_tensor.get_shape()[-1]))
# Persisted approximation of first left singular vector of matrix `w`.
u_var = variable_scope.get_variable(
_PERSISTED_U_VARIABLE_SUFFIX,
shape=(w.shape[0], 1),
dtype=w.dtype,
initializer=init_ops.random_normal_initializer(),
trainable=False)
u = u_var
# Use power iteration method to approximate spectral norm.
for _ in range(power_iteration_rounds):
# `v` approximates the first right singular vector of matrix `w`.
v = nn.l2_normalize(math_ops.matmul(array_ops.transpose(w), u))
u = nn.l2_normalize(math_ops.matmul(w, v))
# Update persisted approximation.
with ops.control_dependencies([u_var.assign(u, name='update_u')]):
u = array_ops.identity(u)
u = array_ops.stop_gradient(u)
v = array_ops.stop_gradient(v)
# Largest singular value of `w`.
spectral_norm = math_ops.matmul(
math_ops.matmul(array_ops.transpose(u), w), v)
spectral_norm.shape.assert_is_fully_defined()
spectral_norm.shape.assert_is_compatible_with([1, 1])
return spectral_norm[0][0]
def spectral_normalize(w, power_iteration_rounds=1, name=None):
"""Normalizes a weight matrix by its spectral norm.
Args:
w: The weight matrix to be normalized.
power_iteration_rounds: The number of iterations of the power method to
perform. A higher number yields a better approximation.
name: An optional scope name.
Returns:
A normalized weight matrix tensor.
"""
with variable_scope.variable_scope(name, 'spectral_normalize'):
w_normalized = w / compute_spectral_norm(
w, power_iteration_rounds=power_iteration_rounds)
return array_ops.reshape(w_normalized, w.get_shape())
def spectral_norm_regularizer(scale, power_iteration_rounds=1, scope=None):
"""Returns a functions that can be used to apply spectral norm regularization.
Small spectral norms enforce a small Lipschitz constant, which is necessary
for Wasserstein GANs.
Args:
scale: A scalar multiplier. 0.0 disables the regularizer.
power_iteration_rounds: The number of iterations of the power method to
perform. A higher number yields a better approximation.
scope: An optional scope name.
Returns:
A function with the signature `sn(weights)` that applies spectral norm
regularization.
Raises:
ValueError: If scale is negative or if scale is not a float.
"""
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % scale)
if isinstance(scale, numbers.Real):
if scale < 0.0:
raise ValueError(
'Setting a scale less than 0 on a regularizer: %g' % scale)
if scale == 0.0:
logging.info('Scale of 0 disables regularizer.')
return lambda _: None
def sn(weights, name=None):
"""Applies spectral norm regularization to weights."""
with ops.name_scope(scope, 'SpectralNormRegularizer', [weights]) as name:
scale_t = ops.convert_to_tensor(
scale, dtype=weights.dtype.base_dtype, name='scale')
return math_ops.multiply(
scale_t,
compute_spectral_norm(
weights, power_iteration_rounds=power_iteration_rounds),
name=name)
return sn
def _default_name_filter(name):
"""A filter function to identify common names of weight variables.
Args:
name: The variable name.
Returns:
Whether `name` is a standard name for a weight/kernel variables used in the
Keras, tf.layers, tf.contrib.layers or tf.contrib.slim libraries.
"""
match = re.match(r'(.*\/)?(depthwise_|pointwise_)?(weights|kernel)$', name)
return match is not None
def spectral_normalization_custom_getter(name_filter=_default_name_filter,
power_iteration_rounds=1):
"""Custom getter that performs Spectral Normalization on a weight tensor.
Specifically it divides the weight tensor by its largest singular value. This
is intended to stabilize GAN training, by making the discriminator satisfy a
local 1-Lipschitz constraint.
Based on [Spectral Normalization for Generative Adversarial Networks][sn-gan].
[sn-gan]: https://openreview.net/forum?id=B1QRgziT-
To reproduce an SN-GAN, apply this custom_getter to every weight tensor of
your discriminator. The last dimension of the weight tensor must be the number
of output channels.
Apply this to layers by supplying this as the `custom_getter` of a
`tf.compat.v1.variable_scope`. For example:
with tf.compat.v1.variable_scope('discriminator',
custom_getter=spectral_norm_getter()):
net = discriminator_fn(net)
IMPORTANT: Keras does not respect the custom_getter supplied by the
VariableScope, so Keras users should use `keras_spectral_normalization`
instead of (or in addition to) this approach.
It is important to carefully select to which weights you want to apply
Spectral Normalization. In general you want to normalize the kernels of
convolution and dense layers, but you do not want to normalize biases. You
also want to avoid normalizing batch normalization (and similar) variables,
but in general such layers play poorly with Spectral Normalization, since the
gamma can cancel out the normalization in other layers. By default we supply a
filter that matches the kernel variable names of the dense and convolution
layers of the tf.layers, tf.contrib.layers, tf.keras and tf.contrib.slim
libraries. If you are using anything else you'll need a custom `name_filter`.
This custom getter internally creates a variable used to compute the spectral
norm by power iteration. It will update every time the variable is accessed,
which means the normalized discriminator weights may change slightly whilst
training the generator. Whilst unusual, this matches how the paper's authors
implement it, and in general additional rounds of power iteration can't hurt.
Args:
name_filter: Optionally, a method that takes a Variable name as input and
returns whether this Variable should be normalized.
power_iteration_rounds: The number of iterations of the power method to
perform per step. A higher number yields a better approximation of the
true spectral norm.
Returns:
A custom getter function that applies Spectral Normalization to all
Variables whose names match `name_filter`.
Raises:
ValueError: If name_filter is not callable.
"""
if not callable(name_filter):
raise ValueError('name_filter must be callable')
def _internal_getter(getter, name, *args, **kwargs):
"""A custom getter function that applies Spectral Normalization.
Args:
getter: The true getter to call.
name: Name of new/existing variable, in the same format as
tf.get_variable.
*args: Other positional arguments, in the same format as tf.get_variable.
**kwargs: Keyword arguments, in the same format as tf.get_variable.
Returns:
The return value of `getter(name, *args, **kwargs)`, spectrally
normalized.
Raises:
ValueError: If used incorrectly, or if `dtype` is not supported.
"""
if not name_filter(name):
return getter(name, *args, **kwargs)
if name.endswith(_PERSISTED_U_VARIABLE_SUFFIX):
raise ValueError(
'Cannot apply Spectral Normalization to internal variables created '
'for Spectral Normalization. Tried to normalized variable [%s]' %
name)
if kwargs['dtype'] not in _OK_DTYPES_FOR_SPECTRAL_NORM:
raise ValueError('Disallowed data type {}'.format(kwargs['dtype']))
# This layer's weight Variable/PartitionedVariable.
w_tensor = getter(name, *args, **kwargs)
if len(w_tensor.get_shape()) < 2:
raise ValueError(
'Spectral norm can only be applied to multi-dimensional tensors')
return spectral_normalize(
w_tensor,
power_iteration_rounds=power_iteration_rounds,
name=(name + '/spectral_normalize'))
return _internal_getter
@contextlib.contextmanager
def keras_spectral_normalization(name_filter=_default_name_filter,
power_iteration_rounds=1):
"""A context manager that enables Spectral Normalization for Keras.
Keras doesn't respect the `custom_getter` in the VariableScope, so this is a
bit of a hack to make things work.
Usage:
with keras_spectral_normalization():
net = discriminator_fn(net)
Args:
name_filter: Optionally, a method that takes a Variable name as input and
returns whether this Variable should be normalized.
power_iteration_rounds: The number of iterations of the power method to
perform per step. A higher number yields a better approximation of the
true spectral norm.
Yields:
A context manager that wraps the standard Keras variable creation method
with the `spectral_normalization_custom_getter`.
"""
original_make_variable = keras_base_layer_utils.make_variable
sn_getter = spectral_normalization_custom_getter(
name_filter=name_filter, power_iteration_rounds=power_iteration_rounds)
def make_variable_wrapper(name, *args, **kwargs):
return sn_getter(original_make_variable, name, *args, **kwargs)
keras_base_layer_utils.make_variable = make_variable_wrapper
yield
keras_base_layer_utils.make_variable = original_make_variable
|
tensorflow-master
|
tensorflow/contrib/gan/python/features/python/spectral_normalization_impl.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A tensor pool stores values from an input tensor and returns a stored one.
See the following papers for more details.
1) `Learning from simulated and unsupervised images through adversarial
training` (https://arxiv.org/abs/1612.07828).
2) `Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial
Networks` (https://arxiv.org/abs/1703.10593).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python.features.python import random_tensor_pool_impl
# pylint: disable=wildcard-import
from tensorflow.contrib.gan.python.features.python.random_tensor_pool_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
__all__ = random_tensor_pool_impl.__all__
remove_undocumented(__name__, __all__)
|
tensorflow-master
|
tensorflow/contrib/gan/python/features/python/random_tensor_pool.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A tensor pool stores values from an input tensor and returns a stored one.
We use this to keep a history of values created by a generator, such that
a discriminator can randomly be trained on some older samples, not just the
current one. This can help to not let the discriminator get too far ahead of the
generator and also to keep the system from oscillating, if the discriminator
forgets too fast what past samples from the generator looked like.
See the following papers for more details.
1) `Learning from simulated and unsupervised images through adversarial
training` (https://arxiv.org/abs/1612.07828).
2) `Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial
Networks` (https://arxiv.org/abs/1703.10593).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.util import nest
__all__ = [
'tensor_pool',
]
def _to_list(x):
return [x] if isinstance(x, ops.Tensor) else list(x)
def tensor_pool(input_values,
pool_size=50,
pooling_probability=0.5,
name='tensor_pool'):
"""Queue storing input values and returning random previously stored ones.
Every time the returned `output_value` is evaluated, `input_value` is
evaluated and its value either directly returned (with
`1-pooling_probability`) or stored in the pool and a random one of the samples
currently in the pool is popped and returned. As long as the pool in not fully
filled, the input_value is always directly returned, as well as stored in the
pool. Note during inference / testing, it may be appropriate to set
`pool_size` = 0 or `pooling_probability` = 0.
Args:
input_values: An arbitrarily nested structure of `tf.Tensors`, from which to
read values to be pooled.
pool_size: An integer specifying the maximum size of the pool. Defaults to
50.
pooling_probability: A float `Tensor` specifying the probability of getting
a value from the pool, as opposed to just the current input.
name: A string prefix for the name scope for all tensorflow ops.
Returns:
A nested structure of `Tensor` objects with the same structure as
`input_values`. With the given probability, the Tensor values are either the
same as in `input_values` or a randomly chosen sample that was previously
inserted in the pool.
Raises:
ValueError: If `pool_size` is negative.
"""
pool_size = int(pool_size)
if pool_size < 0:
raise ValueError('`pool_size` is negative.')
elif pool_size == 0:
return input_values
original_input_values = input_values
input_values = nest.flatten(input_values)
with ops.name_scope('{}_pool_queue'.format(name),
values=input_values + [pooling_probability]):
pool_queue = data_flow_ops.RandomShuffleQueue(
capacity=pool_size,
min_after_dequeue=0,
dtypes=[v.dtype for v in input_values],
shapes=None)
# In pseudo code this code does the following:
# if not pool_full:
# enqueue(input_values)
# return input_values
# else
# dequeue_values = dequeue_random_sample()
# enqueue(input_values)
# if rand() < pooling_probability:
# return dequeue_values
# else
# return input_values
def _get_input_value_pooled():
enqueue_op = pool_queue.enqueue(input_values)
with ops.control_dependencies([enqueue_op]):
return [array_ops.identity(v) for v in input_values]
def _get_random_pool_value_and_enqueue_input():
dequeue_values = _to_list(pool_queue.dequeue())
with ops.control_dependencies(dequeue_values):
enqueue_op = pool_queue.enqueue(input_values)
with ops.control_dependencies([enqueue_op]):
prob = random_ops.random_uniform(
(), dtype=dtypes.float32) < pooling_probability
return control_flow_ops.cond(prob, lambda: dequeue_values,
lambda: input_values)
output_values = _to_list(control_flow_ops.cond(
pool_queue.size() < pool_size, _get_input_value_pooled,
_get_random_pool_value_and_enqueue_input))
# Make sure that the shape of `output_value` is set.
for input_value, output_value in zip(input_values, output_values):
output_value.set_shape(input_value.shape)
return nest.pack_sequence_as(original_input_values, output_values)
|
tensorflow-master
|
tensorflow/contrib/gan/python/features/python/random_tensor_pool_impl.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras-like layers and utilities that implement Spectral Normalization.
Based on "Spectral Normalization for Generative Adversarial Networks" by Miyato,
et al in ICLR 2018. https://openreview.net/pdf?id=B1QRgziT-
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python.features.python import spectral_normalization_impl
# pylint: disable=wildcard-import
from tensorflow.contrib.gan.python.features.python.spectral_normalization_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
__all__ = spectral_normalization_impl.__all__
remove_undocumented(__name__, __all__)
|
tensorflow-master
|
tensorflow/contrib/gan/python/features/python/spectral_normalization.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for features.clip_weights."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.gan.python.features.python import clip_weights_impl as clip_weights
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import training
class ClipWeightsTest(test.TestCase):
"""Tests for `discriminator_weight_clip`."""
def setUp(self):
super(ClipWeightsTest, self).setUp()
self.variables = [variables.Variable(2.0)]
self.tuple = collections.namedtuple(
'VarTuple', ['discriminator_variables'])(self.variables)
def _test_weight_clipping_helper(self, use_tuple):
loss = self.variables[0]
opt = training.GradientDescentOptimizer(1.0)
if use_tuple:
opt_clip = clip_weights.clip_variables(opt, self.variables, 0.1)
else:
opt_clip = clip_weights.clip_discriminator_weights(opt, self.tuple, 0.1)
train_op1 = opt.minimize(loss, var_list=self.variables)
train_op2 = opt_clip.minimize(loss, var_list=self.variables)
with self.cached_session(use_gpu=True) as sess:
sess.run(variables.global_variables_initializer())
self.assertEqual(2.0, self.variables[0].eval())
sess.run(train_op1)
self.assertLess(0.1, self.variables[0].eval())
with self.cached_session(use_gpu=True) as sess:
sess.run(variables.global_variables_initializer())
self.assertEqual(2.0, self.variables[0].eval())
sess.run(train_op2)
self.assertNear(0.1, self.variables[0].eval(), 1e-7)
def test_weight_clipping_argsonly(self):
self._test_weight_clipping_helper(False)
def test_weight_clipping_ganmodel(self):
self._test_weight_clipping_helper(True)
def _test_incorrect_weight_clip_value_helper(self, use_tuple):
opt = training.GradientDescentOptimizer(1.0)
if use_tuple:
with self.assertRaisesRegexp(ValueError, 'must be positive'):
clip_weights.clip_discriminator_weights(opt, self.tuple, weight_clip=-1)
else:
with self.assertRaisesRegexp(ValueError, 'must be positive'):
clip_weights.clip_variables(opt, self.variables, weight_clip=-1)
def test_incorrect_weight_clip_value_argsonly(self):
self._test_incorrect_weight_clip_value_helper(False)
def test_incorrect_weight_clip_value_tuple(self):
self._test_incorrect_weight_clip_value_helper(True)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/gan/python/features/python/clip_weights_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TF-GAN evaluation module.
This module supports techniques such as Inception Score, Frechet Inception
distance, and Sliced Wasserstein distance.
"""
# pylint: disable=,wildcard-import,unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Collapse eval into a single namespace.
from tensorflow.contrib.gan.python.eval.python import classifier_metrics
from tensorflow.contrib.gan.python.eval.python import eval_utils
from tensorflow.contrib.gan.python.eval.python import sliced_wasserstein
from tensorflow.contrib.gan.python.eval.python import summaries
from tensorflow.contrib.gan.python.eval.python.classifier_metrics import *
from tensorflow.contrib.gan.python.eval.python.eval_utils import *
from tensorflow.contrib.gan.python.eval.python.sliced_wasserstein import *
from tensorflow.contrib.gan.python.eval.python.summaries import *
# pylint: enable=wildcard-import,unused-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'classifier_metrics',
'sliced_wasserstein_distance',
'summaries',
'eval_utils',
] + (
classifier_metrics.__all__ + sliced_wasserstein.__all__ +
summaries.__all__ + eval_utils.__all__)
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-master
|
tensorflow/contrib/gan/python/eval/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-GAN summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python import namedtuples
from tensorflow.contrib.gan.python.eval.python import summaries_impl as summaries
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
def generator_model(inputs):
return variable_scope.get_variable('dummy_g', initializer=2.0) * inputs
def discriminator_model(inputs, _):
return variable_scope.get_variable('dummy_d', initializer=2.0) * inputs
def stargan_generator_model(inputs, _):
return generator_model(inputs)
def get_gan_model():
# TODO(joelshor): Find a better way of creating a variable scope.
with variable_scope.variable_scope('generator') as gen_scope:
pass
with variable_scope.variable_scope('discriminator') as dis_scope:
pass
return namedtuples.GANModel(
generator_inputs=array_ops.zeros([4, 32, 32, 3]),
generated_data=array_ops.zeros([4, 32, 32, 3]),
generator_variables=[variables.Variable(0), variables.Variable(1)],
generator_scope=gen_scope,
generator_fn=generator_model,
real_data=array_ops.ones([4, 32, 32, 3]),
discriminator_real_outputs=array_ops.ones([1, 2, 3]),
discriminator_gen_outputs=array_ops.ones([1, 2, 3]),
discriminator_variables=[variables.Variable(0)],
discriminator_scope=dis_scope,
discriminator_fn=discriminator_model)
def get_stargan_model():
"""Similar to get_gan_model()."""
# TODO(joelshor): Find a better way of creating a variable scope.
with variable_scope.variable_scope('discriminator') as dis_scope:
pass
with variable_scope.variable_scope('generator') as gen_scope:
return namedtuples.StarGANModel(
input_data=array_ops.ones([1, 2, 2, 3]),
input_data_domain_label=array_ops.ones([1, 2]),
generated_data=stargan_generator_model(
array_ops.ones([1, 2, 2, 3]), None),
generated_data_domain_target=array_ops.ones([1, 2]),
reconstructed_data=array_ops.ones([1, 2, 2, 3]),
discriminator_input_data_source_predication=array_ops.ones([1]),
discriminator_generated_data_source_predication=array_ops.ones([1]),
discriminator_input_data_domain_predication=array_ops.ones([1, 2]),
discriminator_generated_data_domain_predication=array_ops.ones([1, 2]),
generator_variables=None,
generator_scope=gen_scope,
generator_fn=stargan_generator_model,
discriminator_variables=None,
discriminator_scope=dis_scope,
discriminator_fn=discriminator_model)
def get_cyclegan_model():
with variable_scope.variable_scope('x2y'):
model_x2y = get_gan_model()
with variable_scope.variable_scope('y2x'):
model_y2x = get_gan_model()
return namedtuples.CycleGANModel(
model_x2y=model_x2y,
model_y2x=model_y2x,
reconstructed_x=array_ops.zeros([4, 32, 32, 3]),
reconstructed_y=array_ops.zeros([4, 32, 32, 3]))
class SummariesTest(test.TestCase):
def _test_add_gan_model_image_summaries_impl(
self, get_model_fn, expected_num_summary_ops, model_summaries):
summaries.add_gan_model_image_summaries(get_model_fn(), grid_size=2,
model_summaries=model_summaries)
self.assertEquals(expected_num_summary_ops,
len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
with self.test_session(use_gpu=True):
variables.global_variables_initializer().run()
summary.merge_all().eval()
def test_add_gan_model_image_summaries(self):
self._test_add_gan_model_image_summaries_impl(get_gan_model, 5, True)
def test_add_gan_model_image_summaries_no_model(self):
self._test_add_gan_model_image_summaries_impl(get_gan_model, 2, False)
def test_cyclegan_image_summaries_dont_work(self):
with self.assertRaises(ValueError):
summaries.add_gan_model_image_summaries(get_cyclegan_model())
def _test_add_gan_model_summaries_impl(self, get_model_fn,
expected_num_summary_ops):
summaries.add_gan_model_summaries(get_model_fn())
self.assertEquals(expected_num_summary_ops,
len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
with self.test_session(use_gpu=True):
variables.global_variables_initializer().run()
summary.merge_all().eval()
def test_add_gan_model_summaries(self):
self._test_add_gan_model_summaries_impl(get_gan_model, 3)
def test_add_gan_model_summaries_for_cyclegan(self):
self._test_add_gan_model_summaries_impl(get_cyclegan_model, 6)
def _test_add_regularization_loss_summaries_impl(self, get_model_fn,
expected_num_summary_ops):
summaries.add_regularization_loss_summaries(get_model_fn())
self.assertEquals(expected_num_summary_ops,
len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
with self.test_session(use_gpu=True):
summary.merge_all().eval()
def test_add_regularization_loss_summaries(self):
self._test_add_regularization_loss_summaries_impl(get_gan_model, 2)
def test_add_regularization_loss_summaries_for_cyclegan(self):
self._test_add_regularization_loss_summaries_impl(get_cyclegan_model, 4)
# TODO(joelshor): Add correctness test.
def _test_add_image_comparison_summaries_impl(self, get_model_fn,
expected_num_summary_ops):
summaries.add_image_comparison_summaries(get_model_fn(), display_diffs=True)
self.assertEquals(expected_num_summary_ops,
len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
with self.test_session(use_gpu=True):
summary.merge_all().eval()
def test_add_image_comparison_summaries(self):
self._test_add_image_comparison_summaries_impl(get_gan_model, 1)
def test_add_image_comparison_summaries_for_cyclegan(self):
summaries.add_cyclegan_image_summaries(get_cyclegan_model())
self.assertEquals(2, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
with self.test_session(use_gpu=True):
summary.merge_all().eval()
def test_add_image_comparison_summaries_for_stargan(self):
summaries.add_stargan_image_summaries(get_stargan_model())
self.assertEquals(1, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
with self.test_session(use_gpu=True) as sess:
sess.run(variables.global_variables_initializer())
summary.merge_all().eval()
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/gan/python/eval/python/summaries_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model evaluation tools for TF-GAN.
These methods come from https://arxiv.org/abs/1606.03498,
https://arxiv.org/abs/1706.08500, and https://arxiv.org/abs/1801.01401.
NOTE: This implementation uses the same weights as in
https://github.com/openai/improved-gan/blob/master/inception_score/model.py,
but is more numerically stable and is an unbiased estimator of the true
Inception score even when splitting the inputs into batches.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import sys
import tarfile
from six.moves import urllib
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import resource_loader
__all__ = [
'get_graph_def_from_disk',
'get_graph_def_from_resource',
'get_graph_def_from_url_tarball',
'preprocess_image',
'run_image_classifier',
'run_inception',
'inception_score',
'classifier_score',
'classifier_score_from_logits',
'frechet_inception_distance',
'frechet_classifier_distance',
'frechet_classifier_distance_from_activations',
'mean_only_frechet_classifier_distance_from_activations',
'diagonal_only_frechet_classifier_distance_from_activations',
'kernel_inception_distance',
'kernel_inception_distance_and_std',
'kernel_classifier_distance',
'kernel_classifier_distance_and_std',
'kernel_classifier_distance_from_activations',
'kernel_classifier_distance_and_std_from_activations',
'INCEPTION_DEFAULT_IMAGE_SIZE',
]
INCEPTION_URL = 'http://download.tensorflow.org/models/frozen_inception_v1_2015_12_05.tar.gz'
INCEPTION_FROZEN_GRAPH = 'inceptionv1_for_inception_score.pb'
INCEPTION_INPUT = 'Mul:0'
INCEPTION_OUTPUT = 'logits:0'
INCEPTION_FINAL_POOL = 'pool_3:0'
INCEPTION_DEFAULT_IMAGE_SIZE = 299
def _validate_images(images, image_size):
images = ops.convert_to_tensor(images)
images.shape.with_rank(4)
images.shape.assert_is_compatible_with([None, image_size, image_size, None])
return images
def _symmetric_matrix_square_root(mat, eps=1e-10):
"""Compute square root of a symmetric matrix.
Note that this is different from an elementwise square root. We want to
compute M' where M' = sqrt(mat) such that M' * M' = mat.
Also note that this method **only** works for symmetric matrices.
Args:
mat: Matrix to take the square root of.
eps: Small epsilon such that any element less than eps will not be square
rooted to guard against numerical instability.
Returns:
Matrix square root of mat.
"""
# Unlike numpy, tensorflow's return order is (s, u, v)
s, u, v = linalg_ops.svd(mat)
# sqrt is unstable around 0, just use 0 in such case
si = array_ops.where(math_ops.less(s, eps), s, math_ops.sqrt(s))
# Note that the v returned by Tensorflow is v = V
# (when referencing the equation A = U S V^T)
# This is unlike Numpy which returns v = V^T
return math_ops.matmul(
math_ops.matmul(u, array_ops.diag(si)), v, transpose_b=True)
def preprocess_image(images,
height=INCEPTION_DEFAULT_IMAGE_SIZE,
width=INCEPTION_DEFAULT_IMAGE_SIZE,
scope=None):
"""Prepare a batch of images for evaluation.
This is the preprocessing portion of the graph from
http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz.
Note that it expects Tensors in [0, 255]. This function maps pixel values to
[-1, 1] and resizes to match the InceptionV1 network.
Args:
images: 3-D or 4-D Tensor of images. Values are in [0, 255].
height: Integer. Height of resized output image.
width: Integer. Width of resized output image.
scope: Optional scope for name_scope.
Returns:
3-D or 4-D float Tensor of prepared image(s). Values are in [-1, 1].
"""
is_single = images.shape.ndims == 3
with ops.name_scope(scope, 'preprocess', [images, height, width]):
if not images.dtype.is_floating:
images = math_ops.cast(images, dtypes.float32)
if is_single:
images = array_ops.expand_dims(images, axis=0)
resized = image_ops.resize_bilinear(images, [height, width])
resized = (resized - 128.0) / 128.0
if is_single:
resized = array_ops.squeeze(resized, axis=0)
return resized
def _kl_divergence(p, p_logits, q):
"""Computes the Kullback-Liebler divergence between p and q.
This function uses p's logits in some places to improve numerical stability.
Specifically:
KL(p || q) = sum[ p * log(p / q) ]
= sum[ p * ( log(p) - log(q) ) ]
= sum[ p * ( log_softmax(p_logits) - log(q) ) ]
Args:
p: A 2-D floating-point Tensor p_ij, where `i` corresponds to the minibatch
example and `j` corresponds to the probability of being in class `j`.
p_logits: A 2-D floating-point Tensor corresponding to logits for `p`.
q: A 1-D floating-point Tensor, where q_j corresponds to the probability
of class `j`.
Returns:
KL divergence between two distributions. Output dimension is 1D, one entry
per distribution in `p`.
Raises:
ValueError: If any of the inputs aren't floating-point.
ValueError: If p or p_logits aren't 2D.
ValueError: If q isn't 1D.
"""
for tensor in [p, p_logits, q]:
if not tensor.dtype.is_floating:
raise ValueError('Input %s must be floating type.', tensor.name)
p.shape.assert_has_rank(2)
p_logits.shape.assert_has_rank(2)
q.shape.assert_has_rank(1)
return math_ops.reduce_sum(
p * (nn_ops.log_softmax(p_logits) - math_ops.log(q)), axis=1)
def get_graph_def_from_disk(filename):
"""Get a GraphDef proto from a disk location."""
with gfile.GFile(filename, 'rb') as f:
return graph_pb2.GraphDef.FromString(f.read())
def get_graph_def_from_resource(filename):
"""Get a GraphDef proto from within a .par file."""
return graph_pb2.GraphDef.FromString(resource_loader.load_resource(filename))
def get_graph_def_from_url_tarball(url, filename, tar_filename=None):
"""Get a GraphDef proto from a tarball on the web.
Args:
url: Web address of tarball
filename: Filename of graph definition within tarball
tar_filename: Temporary download filename (None = always download)
Returns:
A GraphDef loaded from a file in the downloaded tarball.
"""
if not (tar_filename and os.path.exists(tar_filename)):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' %
(url,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
tar_filename, _ = urllib.request.urlretrieve(url, tar_filename, _progress)
with tarfile.open(tar_filename, 'r:gz') as tar:
proto_str = tar.extractfile(filename).read()
return graph_pb2.GraphDef.FromString(proto_str)
def _default_graph_def_fn():
return get_graph_def_from_url_tarball(INCEPTION_URL, INCEPTION_FROZEN_GRAPH,
os.path.basename(INCEPTION_URL))
def run_inception(images,
graph_def=None,
default_graph_def_fn=_default_graph_def_fn,
image_size=INCEPTION_DEFAULT_IMAGE_SIZE,
input_tensor=INCEPTION_INPUT,
output_tensor=INCEPTION_OUTPUT):
"""Run images through a pretrained Inception classifier.
Args:
images: Input tensors. Must be [batch, height, width, channels]. Input shape
and values must be in [-1, 1], which can be achieved using
`preprocess_image`.
graph_def: A GraphDef proto of a pretrained Inception graph. If `None`,
call `default_graph_def_fn` to get GraphDef.
default_graph_def_fn: A function that returns a GraphDef. Used if
`graph_def` is `None. By default, returns a pretrained InceptionV3 graph.
image_size: Required image width and height. See unit tests for the default
values.
input_tensor: Name of input Tensor.
output_tensor: Name or list of output Tensors. This function will compute
activations at the specified layer. Examples include INCEPTION_V3_OUTPUT
and INCEPTION_V3_FINAL_POOL which would result in this function computing
the final logits or the penultimate pooling layer.
Returns:
Tensor or Tensors corresponding to computed `output_tensor`.
Raises:
ValueError: If images are not the correct size.
ValueError: If neither `graph_def` nor `default_graph_def_fn` are provided.
"""
images = _validate_images(images, image_size)
if graph_def is None:
if default_graph_def_fn is None:
raise ValueError('If `graph_def` is `None`, must provide '
'`default_graph_def_fn`.')
graph_def = default_graph_def_fn()
activations = run_image_classifier(images, graph_def, input_tensor,
output_tensor)
if isinstance(activations, list):
for i, activation in enumerate(activations):
if array_ops.rank(activation) != 2:
activations[i] = layers.flatten(activation)
else:
if array_ops.rank(activations) != 2:
activations = layers.flatten(activations)
return activations
def run_image_classifier(tensor,
graph_def,
input_tensor,
output_tensor,
scope='RunClassifier'):
"""Runs a network from a frozen graph.
Args:
tensor: An Input tensor.
graph_def: A GraphDef proto.
input_tensor: Name of input tensor in graph def.
output_tensor: A tensor name or list of tensor names in graph def.
scope: Name scope for classifier.
Returns:
Classifier output if `output_tensor` is a string, or a list of outputs if
`output_tensor` is a list.
Raises:
ValueError: If `input_tensor` or `output_tensor` aren't in the graph_def.
"""
input_map = {input_tensor: tensor}
is_singleton = isinstance(output_tensor, str)
if is_singleton:
output_tensor = [output_tensor]
classifier_outputs = importer.import_graph_def(
graph_def, input_map, output_tensor, name=scope)
if is_singleton:
classifier_outputs = classifier_outputs[0]
return classifier_outputs
def classifier_score(images, classifier_fn, num_batches=1):
"""Classifier score for evaluating a conditional generative model.
This is based on the Inception Score, but for an arbitrary classifier.
This technique is described in detail in https://arxiv.org/abs/1606.03498. In
summary, this function calculates
exp( E[ KL(p(y|x) || p(y)) ] )
which captures how different the network's classification prediction is from
the prior distribution over classes.
NOTE: This function consumes images, computes their logits, and then
computes the classifier score. If you would like to precompute many logits for
large batches, use classifier_score_from_logits(), which this method also
uses.
Args:
images: Images to calculate the classifier score for.
classifier_fn: A function that takes images and produces logits based on a
classifier.
num_batches: Number of batches to split `generated_images` in to in order to
efficiently run them through the classifier network.
Returns:
The classifier score. A floating-point scalar of the same type as the output
of `classifier_fn`.
"""
generated_images_list = array_ops.split(
images, num_or_size_splits=num_batches)
# Compute the classifier splits using the memory-efficient `map_fn`.
logits = map_fn.map_fn(
fn=classifier_fn,
elems=array_ops.stack(generated_images_list),
parallel_iterations=1,
back_prop=False,
swap_memory=True,
name='RunClassifier')
logits = array_ops.concat(array_ops.unstack(logits), 0)
return classifier_score_from_logits(logits)
def classifier_score_from_logits(logits):
"""Classifier score for evaluating a generative model from logits.
This method computes the classifier score for a set of logits. This can be
used independently of the classifier_score() method, especially in the case
of using large batches during evaluation where we would like precompute all
of the logits before computing the classifier score.
This technique is described in detail in https://arxiv.org/abs/1606.03498. In
summary, this function calculates:
exp( E[ KL(p(y|x) || p(y)) ] )
which captures how different the network's classification prediction is from
the prior distribution over classes.
Args:
logits: Precomputed 2D tensor of logits that will be used to
compute the classifier score.
Returns:
The classifier score. A floating-point scalar of the same type as the output
of `logits`.
"""
logits.shape.assert_has_rank(2)
# Use maximum precision for best results.
logits_dtype = logits.dtype
if logits_dtype != dtypes.float64:
logits = math_ops.cast(logits, dtypes.float64)
p = nn_ops.softmax(logits)
q = math_ops.reduce_mean(p, axis=0)
kl = _kl_divergence(p, logits, q)
kl.shape.assert_has_rank(1)
log_score = math_ops.reduce_mean(kl)
final_score = math_ops.exp(log_score)
if logits_dtype != dtypes.float64:
final_score = math_ops.cast(final_score, logits_dtype)
return final_score
inception_score = functools.partial(
classifier_score,
classifier_fn=functools.partial(
run_inception, output_tensor=INCEPTION_OUTPUT))
def trace_sqrt_product(sigma, sigma_v):
"""Find the trace of the positive sqrt of product of covariance matrices.
'_symmetric_matrix_square_root' only works for symmetric matrices, so we
cannot just take _symmetric_matrix_square_root(sigma * sigma_v).
('sigma' and 'sigma_v' are symmetric, but their product is not necessarily).
Let sigma = A A so A = sqrt(sigma), and sigma_v = B B.
We want to find trace(sqrt(sigma sigma_v)) = trace(sqrt(A A B B))
Note the following properties:
(i) forall M1, M2: eigenvalues(M1 M2) = eigenvalues(M2 M1)
=> eigenvalues(A A B B) = eigenvalues (A B B A)
(ii) if M1 = sqrt(M2), then eigenvalues(M1) = sqrt(eigenvalues(M2))
=> eigenvalues(sqrt(sigma sigma_v)) = sqrt(eigenvalues(A B B A))
(iii) forall M: trace(M) = sum(eigenvalues(M))
=> trace(sqrt(sigma sigma_v)) = sum(eigenvalues(sqrt(sigma sigma_v)))
= sum(sqrt(eigenvalues(A B B A)))
= sum(eigenvalues(sqrt(A B B A)))
= trace(sqrt(A B B A))
= trace(sqrt(A sigma_v A))
A = sqrt(sigma). Both sigma and A sigma_v A are symmetric, so we **can**
use the _symmetric_matrix_square_root function to find the roots of these
matrices.
Args:
sigma: a square, symmetric, real, positive semi-definite covariance matrix
sigma_v: same as sigma
Returns:
The trace of the positive square root of sigma*sigma_v
"""
# Note sqrt_sigma is called "A" in the proof above
sqrt_sigma = _symmetric_matrix_square_root(sigma)
# This is sqrt(A sigma_v A) above
sqrt_a_sigmav_a = math_ops.matmul(sqrt_sigma,
math_ops.matmul(sigma_v, sqrt_sigma))
return math_ops.trace(_symmetric_matrix_square_root(sqrt_a_sigmav_a))
def frechet_classifier_distance(real_images,
generated_images,
classifier_fn,
num_batches=1):
"""Classifier distance for evaluating a generative model.
This is based on the Frechet Inception distance, but for an arbitrary
classifier.
This technique is described in detail in https://arxiv.org/abs/1706.08500.
Given two Gaussian distribution with means m and m_w and covariance matrices
C and C_w, this function calculates
|m - m_w|^2 + Tr(C + C_w - 2(C * C_w)^(1/2))
which captures how different the distributions of real images and generated
images (or more accurately, their visual features) are. Note that unlike the
Inception score, this is a true distance and utilizes information about real
world images.
Note that when computed using sample means and sample covariance matrices,
Frechet distance is biased. It is more biased for small sample sizes. (e.g.
even if the two distributions are the same, for a small sample size, the
expected Frechet distance is large). It is important to use the same
sample size to compute Frechet classifier distance when comparing two
generative models.
NOTE: This function consumes images, computes their activations, and then
computes the classifier score. If you would like to precompute many
activations for real and generated images for large batches, please use
frechet_clasifier_distance_from_activations(), which this method also uses.
Args:
real_images: Real images to use to compute Frechet Inception distance.
generated_images: Generated images to use to compute Frechet Inception
distance.
classifier_fn: A function that takes images and produces activations
based on a classifier.
num_batches: Number of batches to split images in to in order to
efficiently run them through the classifier network.
Returns:
The Frechet Inception distance. A floating-point scalar of the same type
as the output of `classifier_fn`.
"""
real_images_list = array_ops.split(
real_images, num_or_size_splits=num_batches)
generated_images_list = array_ops.split(
generated_images, num_or_size_splits=num_batches)
real_imgs = array_ops.stack(real_images_list)
generated_imgs = array_ops.stack(generated_images_list)
# Compute the activations using the memory-efficient `map_fn`.
def compute_activations(elems):
return map_fn.map_fn(fn=classifier_fn,
elems=elems,
parallel_iterations=1,
back_prop=False,
swap_memory=True,
name='RunClassifier')
real_a = compute_activations(real_imgs)
gen_a = compute_activations(generated_imgs)
# Ensure the activations have the right shapes.
real_a = array_ops.concat(array_ops.unstack(real_a), 0)
gen_a = array_ops.concat(array_ops.unstack(gen_a), 0)
return frechet_classifier_distance_from_activations(real_a, gen_a)
def mean_only_frechet_classifier_distance_from_activations(
real_activations, generated_activations):
"""Classifier distance for evaluating a generative model from activations.
Given two Gaussian distribution with means m and m_w and covariance matrices
C and C_w, this function calcuates
|m - m_w|^2
which captures how different the distributions of real images and generated
images (or more accurately, their visual features) are. Note that unlike the
Inception score, this is a true distance and utilizes information about real
world images.
Note that when computed using sample means and sample covariance matrices,
Frechet distance is biased. It is more biased for small sample sizes. (e.g.
even if the two distributions are the same, for a small sample size, the
expected Frechet distance is large). It is important to use the same
sample size to compute frechet classifier distance when comparing two
generative models.
In this variant, we only compute the difference between the means of the
fitted Gaussians. The computation leads to O(n) vs. O(n^2) memory usage, yet
still retains much of the same information as FID.
Args:
real_activations: 2D array of activations of real images of size
[num_images, num_dims] to use to compute Frechet Inception distance.
generated_activations: 2D array of activations of generated images of size
[num_images, num_dims] to use to compute Frechet Inception distance.
Returns:
The mean-only Frechet Inception distance. A floating-point scalar of the
same type as the output of the activations.
"""
real_activations.shape.assert_has_rank(2)
generated_activations.shape.assert_has_rank(2)
activations_dtype = real_activations.dtype
if activations_dtype != dtypes.float64:
real_activations = math_ops.cast(real_activations, dtypes.float64)
generated_activations = math_ops.cast(generated_activations, dtypes.float64)
# Compute means of activations.
m = math_ops.reduce_mean(real_activations, 0)
m_w = math_ops.reduce_mean(generated_activations, 0)
# Next the distance between means.
mean = math_ops.reduce_sum(
math_ops.squared_difference(m, m_w)) # Equivalent to L2 but more stable.
mofid = mean
if activations_dtype != dtypes.float64:
mofid = math_ops.cast(mofid, activations_dtype)
return mofid
def diagonal_only_frechet_classifier_distance_from_activations(
real_activations, generated_activations):
"""Classifier distance for evaluating a generative model.
This is based on the Frechet Inception distance, but for an arbitrary
classifier.
This technique is described in detail in https://arxiv.org/abs/1706.08500.
Given two Gaussian distribution with means m and m_w and covariance matrices
C and C_w, this function calcuates
|m - m_w|^2 + (sigma + sigma_w - 2(sigma x sigma_w)^(1/2))
which captures how different the distributions of real images and generated
images (or more accurately, their visual features) are. Note that unlike the
Inception score, this is a true distance and utilizes information about real
world images. In this variant, we compute diagonal-only covariance matrices.
As a result, instead of computing an expensive matrix square root, we can do
something much simpler, and has O(n) vs O(n^2) space complexity.
Note that when computed using sample means and sample covariance matrices,
Frechet distance is biased. It is more biased for small sample sizes. (e.g.
even if the two distributions are the same, for a small sample size, the
expected Frechet distance is large). It is important to use the same
sample size to compute frechet classifier distance when comparing two
generative models.
Args:
real_activations: Real images to use to compute Frechet Inception distance.
generated_activations: Generated images to use to compute Frechet Inception
distance.
Returns:
The diagonal-only Frechet Inception distance. A floating-point scalar of
the same type as the output of the activations.
Raises:
ValueError: If the shape of the variance and mean vectors are not equal.
"""
real_activations.shape.assert_has_rank(2)
generated_activations.shape.assert_has_rank(2)
activations_dtype = real_activations.dtype
if activations_dtype != dtypes.float64:
real_activations = math_ops.cast(real_activations, dtypes.float64)
generated_activations = math_ops.cast(generated_activations, dtypes.float64)
# Compute mean and covariance matrices of activations.
m, var = nn_impl.moments(real_activations, axes=[0])
m_w, var_w = nn_impl.moments(generated_activations, axes=[0])
actual_shape = var.get_shape()
expected_shape = m.get_shape()
if actual_shape != expected_shape:
raise ValueError('shape: {} must match expected shape: {}'.format(
actual_shape, expected_shape))
# Compute the two components of FID.
# First the covariance component.
# Here, note that trace(A + B) = trace(A) + trace(B)
trace = math_ops.reduce_sum(
(var + var_w) - 2.0 * math_ops.sqrt(math_ops.multiply(var, var_w)))
# Next the distance between means.
mean = math_ops.reduce_sum(
math_ops.squared_difference(m, m_w)) # Equivalent to L2 but more stable.
dofid = trace + mean
if activations_dtype != dtypes.float64:
dofid = math_ops.cast(dofid, activations_dtype)
return dofid
def frechet_classifier_distance_from_activations(real_activations,
generated_activations):
"""Classifier distance for evaluating a generative model.
This methods computes the Frechet classifier distance from activations of
real images and generated images. This can be used independently of the
frechet_classifier_distance() method, especially in the case of using large
batches during evaluation where we would like precompute all of the
activations before computing the classifier distance.
This technique is described in detail in https://arxiv.org/abs/1706.08500.
Given two Gaussian distribution with means m and m_w and covariance matrices
C and C_w, this function calculates
|m - m_w|^2 + Tr(C + C_w - 2(C * C_w)^(1/2))
which captures how different the distributions of real images and generated
images (or more accurately, their visual features) are. Note that unlike the
Inception score, this is a true distance and utilizes information about real
world images.
Note that when computed using sample means and sample covariance matrices,
Frechet distance is biased. It is more biased for small sample sizes. (e.g.
even if the two distributions are the same, for a small sample size, the
expected Frechet distance is large). It is important to use the same
sample size to compute frechet classifier distance when comparing two
generative models.
Args:
real_activations: 2D Tensor containing activations of real data. Shape is
[batch_size, activation_size].
generated_activations: 2D Tensor containing activations of generated data.
Shape is [batch_size, activation_size].
Returns:
The Frechet Inception distance. A floating-point scalar of the same type
as the output of the activations.
"""
real_activations.shape.assert_has_rank(2)
generated_activations.shape.assert_has_rank(2)
activations_dtype = real_activations.dtype
if activations_dtype != dtypes.float64:
real_activations = math_ops.cast(real_activations, dtypes.float64)
generated_activations = math_ops.cast(generated_activations, dtypes.float64)
# Compute mean and covariance matrices of activations.
m = math_ops.reduce_mean(real_activations, 0)
m_w = math_ops.reduce_mean(generated_activations, 0)
num_examples_real = math_ops.cast(
array_ops.shape(real_activations)[0], dtypes.float64)
num_examples_generated = math_ops.cast(
array_ops.shape(generated_activations)[0], dtypes.float64)
# sigma = (1 / (n - 1)) * (X - mu) (X - mu)^T
real_centered = real_activations - m
sigma = math_ops.matmul(
real_centered, real_centered, transpose_a=True) / (
num_examples_real - 1)
gen_centered = generated_activations - m_w
sigma_w = math_ops.matmul(
gen_centered, gen_centered, transpose_a=True) / (
num_examples_generated - 1)
# Find the Tr(sqrt(sigma sigma_w)) component of FID
sqrt_trace_component = trace_sqrt_product(sigma, sigma_w)
# Compute the two components of FID.
# First the covariance component.
# Here, note that trace(A + B) = trace(A) + trace(B)
trace = math_ops.trace(sigma + sigma_w) - 2.0 * sqrt_trace_component
# Next the distance between means.
mean = math_ops.reduce_sum(
math_ops.squared_difference(m, m_w)) # Equivalent to L2 but more stable.
fid = trace + mean
if activations_dtype != dtypes.float64:
fid = math_ops.cast(fid, activations_dtype)
return fid
frechet_inception_distance = functools.partial(
frechet_classifier_distance,
classifier_fn=functools.partial(
run_inception, output_tensor=INCEPTION_FINAL_POOL))
def kernel_classifier_distance(real_images,
generated_images,
classifier_fn,
num_classifier_batches=1,
max_block_size=1024,
dtype=None):
"""Kernel "classifier" distance for evaluating a generative model.
This is based on the Kernel Inception distance, but for an arbitrary
embedding.
This technique is described in detail in https://arxiv.org/abs/1801.01401.
Given two distributions P and Q of activations, this function calculates
E_{X, X' ~ P}[k(X, X')] + E_{Y, Y' ~ Q}[k(Y, Y')]
- 2 E_{X ~ P, Y ~ Q}[k(X, Y)]
where k is the polynomial kernel
k(x, y) = ( x^T y / dimension + 1 )^3.
This captures how different the distributions of real and generated images'
visual features are. Like the Frechet distance (and unlike the Inception
score), this is a true distance and incorporates information about the
target images. Unlike the Frechet score, this function computes an
*unbiased* and asymptotically normal estimator, which makes comparing
estimates across models much more intuitive.
The estimator used takes time quadratic in max_block_size. Larger values of
max_block_size will decrease the variance of the estimator but increase the
computational cost. This differs slightly from the estimator used by the
original paper; it is the block estimator of https://arxiv.org/abs/1307.1954.
NOTE: the blocking code assumes that real_activations and
generated_activations are both in random order. If either is sorted in a
meaningful order, the estimator will behave poorly.
NOTE: This function consumes images, computes their activations, and then
computes the classifier score. If you would like to precompute many
activations for real and generated images for large batches, or to compute
multiple scores based on the same images, please use
kernel_clasifier_distance_from_activations(), which this method also uses.
Args:
real_images: Real images to use to compute Kernel Inception distance.
generated_images: Generated images to use to compute Kernel Inception
distance.
classifier_fn: A function that takes images and produces activations based
on a classifier.
num_classifier_batches: Number of batches to split images in to in order to
efficiently run them through the classifier network.
max_block_size: integer, default 1024. The distance estimator splits samples
into blocks for computational efficiency. Larger values are more
computationally expensive but decrease the variance of the distance
estimate.
dtype: if not None, coerce activations to this dtype before computations.
Returns:
The Kernel Inception Distance. A floating-point scalar of the same type
as the output of the activations.
"""
return kernel_classifier_distance_and_std(
real_images,
generated_images,
classifier_fn,
num_classifier_batches=num_classifier_batches,
max_block_size=max_block_size,
dtype=dtype)[0]
kernel_inception_distance = functools.partial(
kernel_classifier_distance,
classifier_fn=functools.partial(
run_inception, output_tensor=INCEPTION_FINAL_POOL))
def kernel_classifier_distance_and_std(real_images,
generated_images,
classifier_fn,
num_classifier_batches=1,
max_block_size=1024,
dtype=None):
"""Kernel "classifier" distance for evaluating a generative model.
This is based on the Kernel Inception distance, but for an arbitrary
embedding. Also returns an estimate of the standard error of the distance
estimator.
This technique is described in detail in https://arxiv.org/abs/1801.01401.
Given two distributions P and Q of activations, this function calculates
E_{X, X' ~ P}[k(X, X')] + E_{Y, Y' ~ Q}[k(Y, Y')]
- 2 E_{X ~ P, Y ~ Q}[k(X, Y)]
where k is the polynomial kernel
k(x, y) = ( x^T y / dimension + 1 )^3.
This captures how different the distributions of real and generated images'
visual features are. Like the Frechet distance (and unlike the Inception
score), this is a true distance and incorporates information about the
target images. Unlike the Frechet score, this function computes an
*unbiased* and asymptotically normal estimator, which makes comparing
estimates across models much more intuitive.
The estimator used takes time quadratic in max_block_size. Larger values of
max_block_size will decrease the variance of the estimator but increase the
computational cost. This differs slightly from the estimator used by the
original paper; it is the block estimator of https://arxiv.org/abs/1307.1954.
NOTE: the blocking code assumes that real_activations and
generated_activations are both in random order. If either is sorted in a
meaningful order, the estimator will behave poorly.
NOTE: This function consumes images, computes their activations, and then
computes the classifier score. If you would like to precompute many
activations for real and generated images for large batches, or to compute
multiple scores based on the same images, please use
kernel_clasifier_distance_from_activations(), which this method also uses.
Args:
real_images: Real images to use to compute Kernel Inception distance.
generated_images: Generated images to use to compute Kernel Inception
distance.
classifier_fn: A function that takes images and produces activations based
on a classifier.
num_classifier_batches: Number of batches to split images in to in order to
efficiently run them through the classifier network.
max_block_size: integer, default 1024. The distance estimator splits samples
into blocks for computational efficiency. Larger values are more
computationally expensive but decrease the variance of the distance
estimate. Having a smaller block size also gives a better estimate of the
standard error.
dtype: if not None, coerce activations to this dtype before computations.
Returns:
The Kernel Inception Distance. A floating-point scalar of the same type
as the output of the activations.
An estimate of the standard error of the distance estimator (a scalar of
the same type).
"""
real_images_list = array_ops.split(
real_images, num_or_size_splits=num_classifier_batches)
generated_images_list = array_ops.split(
generated_images, num_or_size_splits=num_classifier_batches)
real_imgs = array_ops.stack(real_images_list)
generated_imgs = array_ops.stack(generated_images_list)
# Compute the activations using the memory-efficient `map_fn`.
def compute_activations(elems):
return map_fn.map_fn(
fn=classifier_fn,
elems=elems,
parallel_iterations=1,
back_prop=False,
swap_memory=True,
name='RunClassifier')
real_a = compute_activations(real_imgs)
gen_a = compute_activations(generated_imgs)
# Ensure the activations have the right shapes.
real_a = array_ops.concat(array_ops.unstack(real_a), 0)
gen_a = array_ops.concat(array_ops.unstack(gen_a), 0)
return kernel_classifier_distance_and_std_from_activations(
real_a, gen_a, max_block_size, dtype)
kernel_inception_distance_and_std = functools.partial(
kernel_classifier_distance_and_std,
classifier_fn=functools.partial(
run_inception, output_tensor=INCEPTION_FINAL_POOL))
def kernel_classifier_distance_from_activations(real_activations,
generated_activations,
max_block_size=1024,
dtype=None):
"""Kernel "classifier" distance for evaluating a generative model.
This methods computes the kernel classifier distance from activations of
real images and generated images. This can be used independently of the
kernel_classifier_distance() method, especially in the case of using large
batches during evaluation where we would like to precompute all of the
activations before computing the classifier distance, or if we want to
compute multiple metrics based on the same images.
This technique is described in detail in https://arxiv.org/abs/1801.01401.
Given two distributions P and Q of activations, this function calculates
E_{X, X' ~ P}[k(X, X')] + E_{Y, Y' ~ Q}[k(Y, Y')]
- 2 E_{X ~ P, Y ~ Q}[k(X, Y)]
where k is the polynomial kernel
k(x, y) = ( x^T y / dimension + 1 )^3.
This captures how different the distributions of real and generated images'
visual features are. Like the Frechet distance (and unlike the Inception
score), this is a true distance and incorporates information about the
target images. Unlike the Frechet score, this function computes an
*unbiased* and asymptotically normal estimator, which makes comparing
estimates across models much more intuitive.
The estimator used takes time quadratic in max_block_size. Larger values of
max_block_size will decrease the variance of the estimator but increase the
computational cost. This differs slightly from the estimator used by the
original paper; it is the block estimator of https://arxiv.org/abs/1307.1954.
NOTE: the blocking code assumes that real_activations and
generated_activations are both in random order. If either is sorted in a
meaningful order, the estimator will behave poorly.
Args:
real_activations: 2D Tensor containing activations of real data. Shape is
[batch_size, activation_size].
generated_activations: 2D Tensor containing activations of generated data.
Shape is [batch_size, activation_size].
max_block_size: integer, default 1024. The distance estimator splits samples
into blocks for computational efficiency. Larger values are more
computationally expensive but decrease the variance of the distance
estimate.
dtype: If not None, coerce activations to this dtype before computations.
Returns:
The Kernel Inception Distance. A floating-point scalar of the same type
as the output of the activations.
"""
return kernel_classifier_distance_and_std_from_activations(
real_activations, generated_activations, max_block_size, dtype)[0]
def kernel_classifier_distance_and_std_from_activations(real_activations,
generated_activations,
max_block_size=1024,
dtype=None):
"""Kernel "classifier" distance for evaluating a generative model.
This methods computes the kernel classifier distance from activations of
real images and generated images. This can be used independently of the
kernel_classifier_distance() method, especially in the case of using large
batches during evaluation where we would like to precompute all of the
activations before computing the classifier distance, or if we want to
compute multiple metrics based on the same images. It also returns a rough
estimate of the standard error of the estimator.
This technique is described in detail in https://arxiv.org/abs/1801.01401.
Given two distributions P and Q of activations, this function calculates
E_{X, X' ~ P}[k(X, X')] + E_{Y, Y' ~ Q}[k(Y, Y')]
- 2 E_{X ~ P, Y ~ Q}[k(X, Y)]
where k is the polynomial kernel
k(x, y) = ( x^T y / dimension + 1 )^3.
This captures how different the distributions of real and generated images'
visual features are. Like the Frechet distance (and unlike the Inception
score), this is a true distance and incorporates information about the
target images. Unlike the Frechet score, this function computes an
*unbiased* and asymptotically normal estimator, which makes comparing
estimates across models much more intuitive.
The estimator used takes time quadratic in max_block_size. Larger values of
max_block_size will decrease the variance of the estimator but increase the
computational cost. This differs slightly from the estimator used by the
original paper; it is the block estimator of https://arxiv.org/abs/1307.1954.
The estimate of the standard error will also be more reliable when there are
more blocks, i.e. when max_block_size is smaller.
NOTE: the blocking code assumes that real_activations and
generated_activations are both in random order. If either is sorted in a
meaningful order, the estimator will behave poorly.
Args:
real_activations: 2D Tensor containing activations of real data. Shape is
[batch_size, activation_size].
generated_activations: 2D Tensor containing activations of generated data.
Shape is [batch_size, activation_size].
max_block_size: integer, default 1024. The distance estimator splits samples
into blocks for computational efficiency. Larger values are more
computationally expensive but decrease the variance of the distance
estimate. Having a smaller block size also gives a better estimate of the
standard error.
dtype: If not None, coerce activations to this dtype before computations.
Returns:
The Kernel Inception Distance. A floating-point scalar of the same type
as the output of the activations.
An estimate of the standard error of the distance estimator (a scalar of
the same type).
"""
real_activations.shape.assert_has_rank(2)
generated_activations.shape.assert_has_rank(2)
real_activations.shape[1].assert_is_compatible_with(
generated_activations.shape[1])
if dtype is None:
dtype = real_activations.dtype
assert generated_activations.dtype == dtype
else:
real_activations = math_ops.cast(real_activations, dtype)
generated_activations = math_ops.cast(generated_activations, dtype)
# Figure out how to split the activations into blocks of approximately
# equal size, with none larger than max_block_size.
n_r = array_ops.shape(real_activations)[0]
n_g = array_ops.shape(generated_activations)[0]
n_bigger = math_ops.maximum(n_r, n_g)
n_blocks = math_ops.cast(math_ops.ceil(n_bigger / max_block_size),
dtypes.int32)
v_r = n_r // n_blocks
v_g = n_g // n_blocks
n_plusone_r = n_r - v_r * n_blocks
n_plusone_g = n_g - v_g * n_blocks
sizes_r = array_ops.concat([
array_ops.fill([n_blocks - n_plusone_r], v_r),
array_ops.fill([n_plusone_r], v_r + 1),
], 0)
sizes_g = array_ops.concat([
array_ops.fill([n_blocks - n_plusone_g], v_g),
array_ops.fill([n_plusone_g], v_g + 1),
], 0)
zero = array_ops.zeros([1], dtype=dtypes.int32)
inds_r = array_ops.concat([zero, math_ops.cumsum(sizes_r)], 0)
inds_g = array_ops.concat([zero, math_ops.cumsum(sizes_g)], 0)
dim = math_ops.cast(real_activations.shape[1], dtype)
def compute_kid_block(i):
"""Computes the ith block of the KID estimate."""
r_s = inds_r[i]
r_e = inds_r[i + 1]
r = real_activations[r_s:r_e]
m = math_ops.cast(r_e - r_s, dtype)
g_s = inds_g[i]
g_e = inds_g[i + 1]
g = generated_activations[g_s:g_e]
n = math_ops.cast(g_e - g_s, dtype)
k_rr = (math_ops.matmul(r, r, transpose_b=True) / dim + 1)**3
k_rg = (math_ops.matmul(r, g, transpose_b=True) / dim + 1)**3
k_gg = (math_ops.matmul(g, g, transpose_b=True) / dim + 1)**3
return (-2 * math_ops.reduce_mean(k_rg) +
(math_ops.reduce_sum(k_rr) - math_ops.trace(k_rr)) / (m * (m - 1)) +
(math_ops.reduce_sum(k_gg) - math_ops.trace(k_gg)) / (n * (n - 1)))
ests = map_fn.map_fn(
compute_kid_block, math_ops.range(n_blocks), dtype=dtype, back_prop=False)
mn = math_ops.reduce_mean(ests)
# nn_impl.moments doesn't use the Bessel correction, which we want here
n_blocks_ = math_ops.cast(n_blocks, dtype)
var = control_flow_ops.cond(
math_ops.less_equal(n_blocks, 1),
lambda: array_ops.constant(float('nan'), dtype=dtype),
lambda: math_ops.reduce_sum(math_ops.square(ests - mn)) / (n_blocks_ - 1))
return mn, math_ops.sqrt(var / n_blocks_)
|
tensorflow-master
|
tensorflow/contrib/gan/python/eval/python/classifier_metrics_impl.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for eval_utils_test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python.eval.python import eval_utils_impl as eval_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class UtilsTest(test.TestCase):
def test_image_grid(self):
eval_utils.image_grid(
input_tensor=array_ops.zeros([25, 32, 32, 3]),
grid_shape=(5, 5))
# TODO(joelshor): Add more `image_reshaper` tests.
def test_image_reshaper_image_list(self):
images = eval_utils.image_reshaper(
images=array_ops.unstack(array_ops.zeros([25, 32, 32, 3])),
num_cols=2)
images.shape.assert_is_compatible_with([1, 13 * 32, 2 * 32, 3])
def test_image_reshaper_image(self):
images = eval_utils.image_reshaper(
images=array_ops.zeros([25, 32, 32, 3]),
num_cols=2)
images.shape.assert_is_compatible_with([1, 13 * 32, 2 * 32, 3])
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/gan/python/eval/python/eval_utils_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Sliced Wasserstein Distance."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import ndimage
from tensorflow.contrib.gan.python.eval.python import sliced_wasserstein_impl as swd
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
class ClassifierMetricsTest(test.TestCase):
def test_laplacian_pyramid(self):
# The numpy/scipy code for reference estimation comes from:
# https://github.com/tkarras/progressive_growing_of_gans
gaussian_filter = np.float32([[1, 4, 6, 4, 1], [4, 16, 24, 16, 4], [
6, 24, 36, 24, 6
], [4, 16, 24, 16, 4], [1, 4, 6, 4, 1]]) / 256.0
def np_pyr_down(minibatch): # matches cv2.pyrDown()
assert minibatch.ndim == 4
return ndimage.convolve(
minibatch,
gaussian_filter[np.newaxis, np.newaxis, :, :],
mode='mirror')[:, :, ::2, ::2]
def np_pyr_up(minibatch): # matches cv2.pyrUp()
assert minibatch.ndim == 4
s = minibatch.shape
res = np.zeros((s[0], s[1], s[2] * 2, s[3] * 2), minibatch.dtype)
res[:, :, ::2, ::2] = minibatch
return ndimage.convolve(
res,
gaussian_filter[np.newaxis, np.newaxis, :, :] * 4.0,
mode='mirror')
def np_laplacian_pyramid(minibatch, num_levels):
# Note: there's a bug in the original SWD, fixed repeatability.
pyramid = [minibatch.astype('f').copy()]
for _ in range(1, num_levels):
pyramid.append(np_pyr_down(pyramid[-1]))
pyramid[-2] -= np_pyr_up(pyramid[-1])
return pyramid
data = np.random.normal(size=[256, 3, 32, 32]).astype('f')
pyramid = np_laplacian_pyramid(data, 3)
data_tf = array_ops.placeholder(dtypes.float32, [256, 32, 32, 3])
pyramid_tf = swd._laplacian_pyramid(data_tf, 3)
with self.cached_session() as sess:
pyramid_tf = sess.run(
pyramid_tf, feed_dict={
data_tf: data.transpose(0, 2, 3, 1)
})
for x in range(3):
self.assertAllClose(
pyramid[x].transpose(0, 2, 3, 1), pyramid_tf[x], atol=1e-6)
def test_sliced_wasserstein_distance(self):
"""Test the distance."""
d1 = random_ops.random_uniform([256, 32, 32, 3])
d2 = random_ops.random_normal([256, 32, 32, 3])
wfunc = swd.sliced_wasserstein_distance(d1, d2)
with self.cached_session() as sess:
wscores = [sess.run(x) for x in wfunc]
self.assertAllClose(
np.array([0.014, 0.014], 'f'),
np.array([x[0] for x in wscores], 'f'),
rtol=0.15)
self.assertAllClose(
np.array([0.014, 0.020], 'f'),
np.array([x[1] for x in wscores], 'f'),
rtol=0.15)
def test_sliced_wasserstein_distance_svd(self):
"""Test the distance."""
d1 = random_ops.random_uniform([256, 32, 32, 3])
d2 = random_ops.random_normal([256, 32, 32, 3])
wfunc = swd.sliced_wasserstein_distance(d1, d2, use_svd=True)
with self.cached_session() as sess:
wscores = [sess.run(x) for x in wfunc]
self.assertAllClose(
np.array([0.013, 0.013], 'f'),
np.array([x[0] for x in wscores], 'f'),
rtol=0.15)
self.assertAllClose(
np.array([0.014, 0.019], 'f'),
np.array([x[1] for x in wscores], 'f'),
rtol=0.15)
def test_swd_mismatched(self):
"""Test the inputs mismatched shapes are detected."""
d1 = random_ops.random_uniform([256, 32, 32, 3])
d2 = random_ops.random_normal([256, 32, 31, 3])
d3 = random_ops.random_normal([256, 31, 32, 3])
d4 = random_ops.random_normal([255, 32, 32, 3])
with self.assertRaises(ValueError):
swd.sliced_wasserstein_distance(d1, d2)
with self.assertRaises(ValueError):
swd.sliced_wasserstein_distance(d1, d3)
with self.assertRaises(ValueError):
swd.sliced_wasserstein_distance(d1, d4)
def test_swd_not_rgb(self):
"""Test that only RGB is supported."""
d1 = random_ops.random_uniform([256, 32, 32, 1])
d2 = random_ops.random_normal([256, 32, 32, 1])
with self.assertRaises(ValueError):
swd.sliced_wasserstein_distance(d1, d2)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/gan/python/eval/python/sliced_wasserstein_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model evaluation tools for TF-GAN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python.eval.python import sliced_wasserstein_impl
# pylint: disable=wildcard-import
from tensorflow.contrib.gan.python.eval.python.sliced_wasserstein_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
__all__ = sliced_wasserstein_impl.__all__
remove_undocumented(__name__, __all__)
|
tensorflow-master
|
tensorflow/contrib/gan/python/eval/python/sliced_wasserstein.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model evaluation tools for TF-GAN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python.eval.python import classifier_metrics_impl
# pylint: disable=wildcard-import
from tensorflow.contrib.gan.python.eval.python.classifier_metrics_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
__all__ = classifier_metrics_impl.__all__
remove_undocumented(__name__, __all__)
|
tensorflow-master
|
tensorflow/contrib/gan/python/eval/python/classifier_metrics.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Sliced Wasserstein Distance.
Proposed in https://arxiv.org/abs/1710.10196 and the official Theano
implementation that we used as reference can be found here:
https://github.com/tkarras/progressive_growing_of_gans
Note: this is not an exact distance but an approximation through random
projections.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
__all__ = ['sliced_wasserstein_distance']
_GAUSSIAN_FILTER = np.float32([[1, 4, 6, 4, 1], [4, 16, 24, 16, 4], [
6, 24, 36, 24, 6
], [4, 16, 24, 16, 4], [1, 4, 6, 4, 1]]).reshape([5, 5, 1, 1]) / 256.0
def _laplacian_pyramid(batch, num_levels):
"""Compute a Laplacian pyramid.
Args:
batch: (tensor) The batch of images (batch, height, width, channels).
num_levels: (int) Desired number of hierarchical levels.
Returns:
List of tensors from the highest to lowest resolution.
"""
gaussian_filter = constant_op.constant(_GAUSSIAN_FILTER)
def spatial_conv(batch, gain):
s = array_ops.shape(batch)
padded = array_ops.pad(batch, [[0, 0], [2, 2], [2, 2], [0, 0]], 'REFLECT')
xt = array_ops.transpose(padded, [0, 3, 1, 2])
xt = array_ops.reshape(xt, [s[0] * s[3], s[1] + 4, s[2] + 4, 1])
conv_out = nn_ops.conv2d(xt, gaussian_filter * gain, [1] * 4, 'VALID')
conv_xt = array_ops.reshape(conv_out, [s[0], s[3], s[1], s[2]])
conv_xt = array_ops.transpose(conv_xt, [0, 2, 3, 1])
return conv_xt
def pyr_down(batch): # matches cv2.pyrDown()
return spatial_conv(batch, 1)[:, ::2, ::2]
def pyr_up(batch): # matches cv2.pyrUp()
s = array_ops.shape(batch)
zeros = array_ops.zeros([3 * s[0], s[1], s[2], s[3]])
res = array_ops.concat([batch, zeros], 0)
res = array_ops.batch_to_space(res, crops=[[0, 0], [0, 0]], block_size=2)
res = spatial_conv(res, 4)
return res
pyramid = [math_ops.cast(batch, dtypes.float32)]
for _ in range(1, num_levels):
pyramid.append(pyr_down(pyramid[-1]))
pyramid[-2] -= pyr_up(pyramid[-1])
return pyramid
def _batch_to_patches(batch, patches_per_image, patch_size):
"""Extract patches from a batch.
Args:
batch: (tensor) The batch of images (batch, height, width, channels).
patches_per_image: (int) Number of patches to extract per image.
patch_size: (int) Size of the patches (size, size, channels) to extract.
Returns:
Tensor (batch*patches_per_image, patch_size, patch_size, channels) of
patches.
"""
def py_func_random_patches(batch):
"""Numpy wrapper."""
batch_size, height, width, channels = batch.shape
patch_count = patches_per_image * batch_size
hs = patch_size // 2
# Randomly pick patches.
patch_id, y, x, chan = np.ogrid[0:patch_count, -hs:hs + 1, -hs:hs + 1, 0:3]
img_id = patch_id // patches_per_image
# pylint: disable=g-no-augmented-assignment
# Need explicit addition for broadcast to work properly.
y = y + np.random.randint(hs, height - hs, size=(patch_count, 1, 1, 1))
x = x + np.random.randint(hs, width - hs, size=(patch_count, 1, 1, 1))
# pylint: enable=g-no-augmented-assignment
idx = ((img_id * height + y) * width + x) * channels + chan
patches = batch.flat[idx]
return patches
patches = script_ops.py_func(
py_func_random_patches, [batch], batch.dtype, stateful=False)
return patches
def _normalize_patches(patches):
"""Normalize patches by their mean and standard deviation.
Args:
patches: (tensor) The batch of patches (batch, size, size, channels).
Returns:
Tensor (batch, size, size, channels) of the normalized patches.
"""
patches = array_ops.concat(patches, 0)
mean, variance = nn.moments(patches, [1, 2, 3], keep_dims=True)
patches = (patches - mean) / math_ops.sqrt(variance)
return array_ops.reshape(patches, [array_ops.shape(patches)[0], -1])
def _sort_rows(matrix, num_rows):
"""Sort matrix rows by the last column.
Args:
matrix: a matrix of values (row,col).
num_rows: (int) number of sorted rows to return from the matrix.
Returns:
Tensor (num_rows, col) of the sorted matrix top K rows.
"""
tmatrix = array_ops.transpose(matrix, [1, 0])
sorted_tmatrix = nn_ops.top_k(tmatrix, num_rows)[0]
return array_ops.transpose(sorted_tmatrix, [1, 0])
def _sliced_wasserstein(a, b, random_sampling_count, random_projection_dim):
"""Compute the approximate sliced Wasserstein distance.
Args:
a: (matrix) Distribution "a" of samples (row, col).
b: (matrix) Distribution "b" of samples (row, col).
random_sampling_count: (int) Number of random projections to average.
random_projection_dim: (int) Dimension of the random projection space.
Returns:
Float containing the approximate distance between "a" and "b".
"""
s = array_ops.shape(a)
means = []
for _ in range(random_sampling_count):
# Random projection matrix.
proj = random_ops.random_normal(
[array_ops.shape(a)[1], random_projection_dim])
proj *= math_ops.rsqrt(
math_ops.reduce_sum(math_ops.square(proj), 0, keepdims=True))
# Project both distributions and sort them.
proj_a = math_ops.matmul(a, proj)
proj_b = math_ops.matmul(b, proj)
proj_a = _sort_rows(proj_a, s[0])
proj_b = _sort_rows(proj_b, s[0])
# Pairwise Wasserstein distance.
wdist = math_ops.reduce_mean(math_ops.abs(proj_a - proj_b))
means.append(wdist)
return math_ops.reduce_mean(means)
def _sliced_wasserstein_svd(a, b):
"""Compute the approximate sliced Wasserstein distance using an SVD.
This is not part of the paper, it's a variant with possibly more accurate
measure.
Args:
a: (matrix) Distribution "a" of samples (row, col).
b: (matrix) Distribution "b" of samples (row, col).
Returns:
Float containing the approximate distance between "a" and "b".
"""
s = array_ops.shape(a)
# Random projection matrix.
sig, u = linalg_ops.svd(array_ops.concat([a, b], 0))[:2]
proj_a, proj_b = array_ops.split(u * sig, 2, axis=0)
proj_a = _sort_rows(proj_a[:, ::-1], s[0])
proj_b = _sort_rows(proj_b[:, ::-1], s[0])
# Pairwise Wasserstein distance.
wdist = math_ops.reduce_mean(math_ops.abs(proj_a - proj_b))
return wdist
def sliced_wasserstein_distance(real_images,
fake_images,
resolution_min=16,
patches_per_image=64,
patch_size=7,
random_sampling_count=1,
random_projection_dim=7 * 7 * 3,
use_svd=False):
"""Compute the Wasserstein distance between two distributions of images.
Note that measure vary with the number of images. Use 8192 images to get
numbers comparable to the ones in the original paper.
Args:
real_images: (tensor) Real images (batch, height, width, channels).
fake_images: (tensor) Fake images (batch, height, width, channels).
resolution_min: (int) Minimum resolution for the Laplacian pyramid.
patches_per_image: (int) Number of patches to extract per image per
Laplacian level.
patch_size: (int) Width of a square patch.
random_sampling_count: (int) Number of random projections to average.
random_projection_dim: (int) Dimension of the random projection space.
use_svd: experimental method to compute a more accurate distance.
Returns:
List of tuples (distance_real, distance_fake) for each level of the
Laplacian pyramid from the highest resolution to the lowest.
distance_real is the Wasserstein distance between real images
distance_fake is the Wasserstein distance between real and fake images.
Raises:
ValueError: If the inputs shapes are incorrect. Input tensor dimensions
(batch, height, width, channels) are expected to be known at graph
construction time. In addition height and width must be the same and the
number of colors should be exactly 3. Real and fake images must have the
same size.
"""
height = real_images.shape[1]
real_images.shape.assert_is_compatible_with([None, None, height, 3])
fake_images.shape.assert_is_compatible_with(real_images.shape)
# Select resolutions.
resolution_full = int(height)
resolution_min = min(resolution_min, resolution_full)
resolution_max = resolution_full
# Base loss of detail.
resolutions = [
2**i
for i in range(
int(np.log2(resolution_max)),
int(np.log2(resolution_min)) - 1, -1)
]
# Gather patches for each level of the Laplacian pyramids.
patches_real, patches_fake, patches_test = (
[[] for _ in resolutions] for _ in range(3))
for lod, level in enumerate(
_laplacian_pyramid(real_images, len(resolutions))):
patches_real[lod].append(
_batch_to_patches(level, patches_per_image, patch_size))
patches_test[lod].append(
_batch_to_patches(level, patches_per_image, patch_size))
for lod, level in enumerate(
_laplacian_pyramid(fake_images, len(resolutions))):
patches_fake[lod].append(
_batch_to_patches(level, patches_per_image, patch_size))
for lod in range(len(resolutions)):
for patches in [patches_real, patches_test, patches_fake]:
patches[lod] = _normalize_patches(patches[lod])
# Evaluate scores.
scores = []
for lod in range(len(resolutions)):
if not use_svd:
scores.append(
(_sliced_wasserstein(patches_real[lod], patches_test[lod],
random_sampling_count, random_projection_dim),
_sliced_wasserstein(patches_real[lod], patches_fake[lod],
random_sampling_count, random_projection_dim)))
else:
scores.append(
(_sliced_wasserstein_svd(patches_real[lod], patches_test[lod]),
_sliced_wasserstein_svd(patches_real[lod], patches_fake[lod])))
return scores
|
tensorflow-master
|
tensorflow/contrib/gan/python/eval/python/sliced_wasserstein_impl.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility file for visualizing generated images."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python.eval.python import eval_utils_impl
# pylint: disable=wildcard-import
from tensorflow.contrib.gan.python.eval.python.eval_utils_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
__all__ = eval_utils_impl.__all__
remove_undocumented(__name__, __all__)
|
tensorflow-master
|
tensorflow/contrib/gan/python/eval/python/eval_utils.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common TF-GAN summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python.eval.python import summaries_impl
# pylint: disable=wildcard-import
from tensorflow.contrib.gan.python.eval.python.summaries_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
__all__ = summaries_impl.__all__
remove_undocumented(__name__, __all__)
|
tensorflow-master
|
tensorflow/contrib/gan/python/eval/python/summaries.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility file for visualizing generated images."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
__all__ = [
"image_grid",
"image_reshaper",
]
# TODO(joelshor): Make this a special case of `image_reshaper`.
def image_grid(input_tensor, grid_shape, image_shape=(32, 32), num_channels=3):
"""Arrange a minibatch of images into a grid to form a single image.
Args:
input_tensor: Tensor. Minibatch of images to format, either 4D
([batch size, height, width, num_channels]) or flattened
([batch size, height * width * num_channels]).
grid_shape: Sequence of int. The shape of the image grid,
formatted as [grid_height, grid_width].
image_shape: Sequence of int. The shape of a single image,
formatted as [image_height, image_width].
num_channels: int. The number of channels in an image.
Returns:
Tensor representing a single image in which the input images have been
arranged into a grid.
Raises:
ValueError: The grid shape and minibatch size don't match, or the image
shape and number of channels are incompatible with the input tensor.
"""
if grid_shape[0] * grid_shape[1] != int(input_tensor.shape[0]):
raise ValueError("Grid shape %s incompatible with minibatch size %i." %
(grid_shape, int(input_tensor.shape[0])))
if len(input_tensor.shape) == 2:
num_features = image_shape[0] * image_shape[1] * num_channels
if int(input_tensor.shape[1]) != num_features:
raise ValueError("Image shape and number of channels incompatible with "
"input tensor.")
elif len(input_tensor.shape) == 4:
if (int(input_tensor.shape[1]) != image_shape[0] or
int(input_tensor.shape[2]) != image_shape[1] or
int(input_tensor.shape[3]) != num_channels):
raise ValueError("Image shape and number of channels incompatible with "
"input tensor.")
else:
raise ValueError("Unrecognized input tensor format.")
height, width = grid_shape[0] * image_shape[0], grid_shape[1] * image_shape[1]
input_tensor = array_ops.reshape(
input_tensor, tuple(grid_shape) + tuple(image_shape) + (num_channels,))
input_tensor = array_ops.transpose(input_tensor, [0, 1, 3, 2, 4])
input_tensor = array_ops.reshape(
input_tensor, [grid_shape[0], width, image_shape[0], num_channels])
input_tensor = array_ops.transpose(input_tensor, [0, 2, 1, 3])
input_tensor = array_ops.reshape(
input_tensor, [1, height, width, num_channels])
return input_tensor
def _validate_images(images):
for img in images:
img.shape.assert_has_rank(3)
img.shape.assert_is_fully_defined()
if img.shape[-1] not in (1, 3):
raise ValueError("image_reshaper only supports 1 or 3 channel images.")
# TODO(joelshor): Move the dimension logic from Python to Tensorflow.
def image_reshaper(images, num_cols=None):
"""A reshaped summary image.
Returns an image that will contain all elements in the list and will be
laid out in a nearly-square tiling pattern (e.g. 11 images will lead to a
3x4 tiled image).
Args:
images: Image data to summarize. Can be an RGB or grayscale image, a list of
such images, or a set of RGB images concatenated along the depth
dimension. The shape of each image is assumed to be [batch_size,
height, width, depth].
num_cols: (Optional) If provided, this is the number of columns in the final
output image grid. Otherwise, the number of columns is determined by
the number of images.
Returns:
A summary image matching the input with automatic tiling if needed.
Output shape is [1, height, width, channels].
"""
if isinstance(images, ops.Tensor):
images = array_ops.unstack(images)
_validate_images(images)
num_images = len(images)
num_columns = (num_cols if num_cols else
int(math.ceil(math.sqrt(num_images))))
num_rows = int(math.ceil(float(num_images) / num_columns))
rows = [images[x:x+num_columns] for x in range(0, num_images, num_columns)]
# Add empty image tiles if the last row is incomplete.
num_short = num_rows * num_columns - num_images
assert num_short >= 0 and num_short < num_columns
if num_short > 0:
rows[-1].extend([array_ops.zeros_like(images[-1])] * num_short)
# Convert each row from a list of tensors to a single tensor.
rows = [array_ops.concat(row, 1) for row in rows]
# Stack rows vertically.
img = array_ops.concat(rows, 0)
return array_ops.expand_dims(img, 0)
|
tensorflow-master
|
tensorflow/contrib/gan/python/eval/python/eval_utils_impl.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-GAN classifier_metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tarfile
import tempfile
from absl.testing import parameterized
import numpy as np
from scipy import linalg as scp_linalg
from google.protobuf import text_format
from tensorflow.contrib.gan.python.eval.python import classifier_metrics_impl as classifier_metrics
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
mock = test.mock
def _numpy_softmax(x):
e_x = np.exp(x - np.max(x, axis=1)[:, None])
return e_x / np.sum(e_x, axis=1)[:, None]
def _expected_inception_score(logits):
p = _numpy_softmax(logits)
q = np.expand_dims(np.mean(p, 0), 0)
per_example_logincscore = np.sum(p * (np.log(p) - np.log(q)), 1)
return np.exp(np.mean(per_example_logincscore))
def _expected_mean_only_fid(real_imgs, gen_imgs):
m = np.mean(real_imgs, axis=0)
m_v = np.mean(gen_imgs, axis=0)
mean = np.square(m - m_v).sum()
mofid = mean
return mofid
def _expected_diagonal_only_fid(real_imgs, gen_imgs):
m = np.mean(real_imgs, axis=0)
m_v = np.mean(gen_imgs, axis=0)
var = np.var(real_imgs, axis=0)
var_v = np.var(gen_imgs, axis=0)
sqcc = np.sqrt(var * var_v)
mean = (np.square(m - m_v)).sum()
trace = (var + var_v - 2 * sqcc).sum()
dofid = mean + trace
return dofid
def _expected_fid(real_imgs, gen_imgs):
m = np.mean(real_imgs, axis=0)
m_v = np.mean(gen_imgs, axis=0)
sigma = np.cov(real_imgs, rowvar=False)
sigma_v = np.cov(gen_imgs, rowvar=False)
sqcc = scp_linalg.sqrtm(np.dot(sigma, sigma_v))
mean = np.square(m - m_v).sum()
trace = np.trace(sigma + sigma_v - 2 * sqcc)
fid = mean + trace
return fid
def _expected_trace_sqrt_product(sigma, sigma_v):
return np.trace(scp_linalg.sqrtm(np.dot(sigma, sigma_v)))
def _expected_kid_and_std(real_imgs, gen_imgs, max_block_size=1024):
n_r, dim = real_imgs.shape
n_g = gen_imgs.shape[0]
n_blocks = int(np.ceil(max(n_r, n_g) / max_block_size))
sizes_r = np.full(n_blocks, n_r // n_blocks)
to_patch = n_r - n_blocks * (n_r // n_blocks)
if to_patch > 0:
sizes_r[-to_patch:] += 1
inds_r = np.r_[0, np.cumsum(sizes_r)]
assert inds_r[-1] == n_r
sizes_g = np.full(n_blocks, n_g // n_blocks)
to_patch = n_g - n_blocks * (n_g // n_blocks)
if to_patch > 0:
sizes_g[-to_patch:] += 1
inds_g = np.r_[0, np.cumsum(sizes_g)]
assert inds_g[-1] == n_g
ests = []
for i in range(n_blocks):
r = real_imgs[inds_r[i]:inds_r[i + 1]]
g = gen_imgs[inds_g[i]:inds_g[i + 1]]
k_rr = (np.dot(r, r.T) / dim + 1)**3
k_rg = (np.dot(r, g.T) / dim + 1)**3
k_gg = (np.dot(g, g.T) / dim + 1)**3
ests.append(-2 * k_rg.mean() +
k_rr[np.triu_indices_from(k_rr, k=1)].mean() +
k_gg[np.triu_indices_from(k_gg, k=1)].mean())
var = np.var(ests, ddof=1) if len(ests) > 1 else np.nan
return np.mean(ests), np.sqrt(var / len(ests))
# A dummy GraphDef string with the minimum number of Ops.
graphdef_string = """
node {
name: "Mul"
op: "Placeholder"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "shape"
value {
shape {
dim {
size: -1
}
dim {
size: 299
}
dim {
size: 299
}
dim {
size: 3
}
}
}
}
}
node {
name: "logits"
op: "Placeholder"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "shape"
value {
shape {
dim {
size: -1
}
dim {
size: 1001
}
}
}
}
}
node {
name: "pool_3"
op: "Placeholder"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "shape"
value {
shape {
dim {
size: -1
}
dim {
size: 2048
}
}
}
}
}
versions {
producer: 24
}
"""
def _get_dummy_graphdef():
dummy_graphdef = graph_pb2.GraphDef()
text_format.Merge(graphdef_string, dummy_graphdef)
return dummy_graphdef
def _run_with_mock(function, *args, **kwargs):
with mock.patch.object(
classifier_metrics,
'get_graph_def_from_url_tarball') as mock_tarball_getter:
mock_tarball_getter.return_value = _get_dummy_graphdef()
return function(*args, **kwargs)
class ClassifierMetricsTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('GraphDef', False),
('DefaultGraphDefFn', True))
def test_run_inception_graph(self, use_default_graph_def):
"""Test `run_inception` graph construction."""
batch_size = 7
img = array_ops.ones([batch_size, 299, 299, 3])
if use_default_graph_def:
logits = _run_with_mock(classifier_metrics.run_inception, img)
else:
logits = classifier_metrics.run_inception(img, _get_dummy_graphdef())
self.assertIsInstance(logits, ops.Tensor)
logits.shape.assert_is_compatible_with([batch_size, 1001])
# Check that none of the model variables are trainable.
self.assertListEqual([], variables.trainable_variables())
@parameterized.named_parameters(
('GraphDef', False),
('DefaultGraphDefFn', True))
def test_run_inception_graph_pool_output(self, use_default_graph_def):
"""Test `run_inception` graph construction with pool output."""
batch_size = 3
img = array_ops.ones([batch_size, 299, 299, 3])
if use_default_graph_def:
pool = _run_with_mock(
classifier_metrics.run_inception,
img,
output_tensor=classifier_metrics.INCEPTION_FINAL_POOL)
else:
pool = classifier_metrics.run_inception(
img, _get_dummy_graphdef(),
output_tensor=classifier_metrics.INCEPTION_FINAL_POOL)
self.assertIsInstance(pool, ops.Tensor)
pool.shape.assert_is_compatible_with([batch_size, 2048])
# Check that none of the model variables are trainable.
self.assertListEqual([], variables.trainable_variables())
def test_run_inception_multiple_outputs(self):
"""Test `run_inception` graph construction with multiple outputs."""
batch_size = 3
img = array_ops.ones([batch_size, 299, 299, 3])
logits, pool = _run_with_mock(
classifier_metrics.run_inception,
img,
output_tensor=[
classifier_metrics.INCEPTION_OUTPUT,
classifier_metrics.INCEPTION_FINAL_POOL
])
self.assertIsInstance(logits, ops.Tensor)
self.assertIsInstance(pool, ops.Tensor)
logits.shape.assert_is_compatible_with([batch_size, 1001])
pool.shape.assert_is_compatible_with([batch_size, 2048])
# Check that none of the model variables are trainable.
self.assertListEqual([], variables.trainable_variables())
def test_inception_score_graph(self):
"""Test `inception_score` graph construction."""
score = _run_with_mock(
classifier_metrics.inception_score,
array_ops.zeros([6, 299, 299, 3]),
num_batches=3)
self.assertIsInstance(score, ops.Tensor)
score.shape.assert_has_rank(0)
# Check that none of the model variables are trainable.
self.assertListEqual([], variables.trainable_variables())
def test_frechet_inception_distance_graph(self):
"""Test `frechet_inception_distance` graph construction."""
img = array_ops.ones([7, 299, 299, 3])
distance = _run_with_mock(
classifier_metrics.frechet_inception_distance, img, img)
self.assertIsInstance(distance, ops.Tensor)
distance.shape.assert_has_rank(0)
# Check that none of the model variables are trainable.
self.assertListEqual([], variables.trainable_variables())
def test_kernel_inception_distance_graph(self):
"""Test `frechet_inception_distance` graph construction."""
img = array_ops.ones([7, 299, 299, 3])
distance = _run_with_mock(classifier_metrics.kernel_inception_distance, img,
img)
self.assertIsInstance(distance, ops.Tensor)
distance.shape.assert_has_rank(0)
# Check that none of the model variables are trainable.
self.assertListEqual([], variables.trainable_variables())
def test_run_inception_multicall(self):
"""Test that `run_inception` can be called multiple times."""
for batch_size in (7, 3, 2):
img = array_ops.ones([batch_size, 299, 299, 3])
_run_with_mock(classifier_metrics.run_inception, img)
def test_invalid_input(self):
"""Test that functions properly fail on invalid input."""
with self.assertRaisesRegexp(ValueError, 'Shapes .* are incompatible'):
classifier_metrics.run_inception(array_ops.ones([7, 50, 50, 3]))
p = array_ops.zeros([8, 10])
p_logits = array_ops.zeros([8, 10])
q = array_ops.zeros([10])
with self.assertRaisesRegexp(ValueError, 'must be floating type'):
classifier_metrics._kl_divergence(
array_ops.zeros([8, 10], dtype=dtypes.int32), p_logits, q)
with self.assertRaisesRegexp(ValueError, 'must be floating type'):
classifier_metrics._kl_divergence(p,
array_ops.zeros(
[8, 10], dtype=dtypes.int32), q)
with self.assertRaisesRegexp(ValueError, 'must be floating type'):
classifier_metrics._kl_divergence(p, p_logits,
array_ops.zeros(
[10], dtype=dtypes.int32))
with self.assertRaisesRegexp(ValueError, 'must have rank 2'):
classifier_metrics._kl_divergence(array_ops.zeros([8]), p_logits, q)
with self.assertRaisesRegexp(ValueError, 'must have rank 2'):
classifier_metrics._kl_divergence(p, array_ops.zeros([8]), q)
with self.assertRaisesRegexp(ValueError, 'must have rank 1'):
classifier_metrics._kl_divergence(p, p_logits, array_ops.zeros([10, 8]))
def test_inception_score_value(self):
"""Test that `inception_score` gives the correct value."""
logits = np.array(
[np.array([1, 2] * 500 + [4]),
np.array([4, 5] * 500 + [6])])
unused_image = array_ops.zeros([2, 299, 299, 3])
incscore = _run_with_mock(classifier_metrics.inception_score, unused_image)
with self.cached_session(use_gpu=True) as sess:
incscore_np = sess.run(incscore, {'concat:0': logits})
self.assertAllClose(_expected_inception_score(logits), incscore_np)
def test_mean_only_frechet_classifier_distance_value(self):
"""Test that `frechet_classifier_distance` gives the correct value."""
np.random.seed(0)
pool_real_a = np.float32(np.random.randn(256, 2048))
pool_gen_a = np.float32(np.random.randn(256, 2048))
tf_pool_real_a = array_ops.constant(pool_real_a)
tf_pool_gen_a = array_ops.constant(pool_gen_a)
mofid_op = classifier_metrics.mean_only_frechet_classifier_distance_from_activations( # pylint: disable=line-too-long
tf_pool_real_a, tf_pool_gen_a)
with self.cached_session() as sess:
actual_mofid = sess.run(mofid_op)
expected_mofid = _expected_mean_only_fid(pool_real_a, pool_gen_a)
self.assertAllClose(expected_mofid, actual_mofid, 0.0001)
def test_diagonal_only_frechet_classifier_distance_value(self):
"""Test that `frechet_classifier_distance` gives the correct value."""
np.random.seed(0)
pool_real_a = np.float32(np.random.randn(256, 2048))
pool_gen_a = np.float32(np.random.randn(256, 2048))
tf_pool_real_a = array_ops.constant(pool_real_a)
tf_pool_gen_a = array_ops.constant(pool_gen_a)
dofid_op = classifier_metrics.diagonal_only_frechet_classifier_distance_from_activations( # pylint: disable=line-too-long
tf_pool_real_a, tf_pool_gen_a)
with self.cached_session() as sess:
actual_dofid = sess.run(dofid_op)
expected_dofid = _expected_diagonal_only_fid(pool_real_a, pool_gen_a)
self.assertAllClose(expected_dofid, actual_dofid, 0.0001)
def test_frechet_classifier_distance_value(self):
"""Test that `frechet_classifier_distance` gives the correct value."""
np.random.seed(0)
# Make num_examples > num_features to ensure scipy's sqrtm function
# doesn't return a complex matrix.
test_pool_real_a = np.float32(np.random.randn(512, 256))
test_pool_gen_a = np.float32(np.random.randn(512, 256))
fid_op = _run_with_mock(
classifier_metrics.frechet_classifier_distance,
test_pool_real_a,
test_pool_gen_a,
classifier_fn=lambda x: x)
with self.cached_session() as sess:
actual_fid = sess.run(fid_op)
expected_fid = _expected_fid(test_pool_real_a, test_pool_gen_a)
self.assertAllClose(expected_fid, actual_fid, 0.0001)
def test_frechet_classifier_distance_covariance(self):
"""Test that `frechet_classifier_distance` takes covariance into account."""
np.random.seed(0)
# Make num_examples > num_features to ensure scipy's sqrtm function
# doesn't return a complex matrix.
test_pool_reals, test_pool_gens = [], []
for i in range(1, 11, 2):
test_pool_reals.append(np.float32(np.random.randn(2048, 256) * i))
test_pool_gens.append(np.float32(np.random.randn(2048, 256) * i))
fid_ops = []
for i in range(len(test_pool_reals)):
fid_ops.append(_run_with_mock(
classifier_metrics.frechet_classifier_distance,
test_pool_reals[i],
test_pool_gens[i],
classifier_fn=lambda x: x))
fids = []
with self.cached_session() as sess:
for fid_op in fid_ops:
fids.append(sess.run(fid_op))
# Check that the FIDs increase monotonically.
self.assertTrue(all(fid_a < fid_b for fid_a, fid_b in zip(fids, fids[1:])))
def test_kernel_classifier_distance_value(self):
"""Test that `kernel_classifier_distance` gives the correct value."""
np.random.seed(0)
test_pool_real_a = np.float32(np.random.randn(512, 256))
test_pool_gen_a = np.float32(np.random.randn(512, 256) * 1.1 + .05)
kid_op = _run_with_mock(
classifier_metrics.kernel_classifier_distance_and_std,
test_pool_real_a,
test_pool_gen_a,
classifier_fn=lambda x: x,
max_block_size=600)
with self.cached_session() as sess:
actual_kid, actual_std = sess.run(kid_op)
expected_kid, expected_std = _expected_kid_and_std(test_pool_real_a,
test_pool_gen_a)
self.assertAllClose(expected_kid, actual_kid, 0.001)
self.assertAllClose(expected_std, actual_std, 0.001)
def test_kernel_classifier_distance_block_sizes(self):
"""Test that `kernel_classifier_distance` works with unusual max_block_size
values..
"""
np.random.seed(0)
test_pool_real_a = np.float32(np.random.randn(512, 256))
test_pool_gen_a = np.float32(np.random.randn(768, 256) * 1.1 + .05)
max_block_size = array_ops.placeholder(dtypes.int32, shape=())
kid_op = _run_with_mock(
classifier_metrics.kernel_classifier_distance_and_std_from_activations,
array_ops.constant(test_pool_real_a),
array_ops.constant(test_pool_gen_a),
max_block_size=max_block_size)
for block_size in [50, 512, 1000]:
with self.cached_session() as sess:
actual_kid, actual_std = sess.run(kid_op, {max_block_size: block_size})
expected_kid, expected_std = _expected_kid_and_std(
test_pool_real_a, test_pool_gen_a, max_block_size=block_size)
self.assertAllClose(expected_kid, actual_kid, 0.001)
self.assertAllClose(expected_std, actual_std, 0.001)
def test_trace_sqrt_product_value(self):
"""Test that `trace_sqrt_product` gives the correct value."""
np.random.seed(0)
# Make num_examples > num_features to ensure scipy's sqrtm function
# doesn't return a complex matrix.
test_pool_real_a = np.float32(np.random.randn(512, 256))
test_pool_gen_a = np.float32(np.random.randn(512, 256))
cov_real = np.cov(test_pool_real_a, rowvar=False)
cov_gen = np.cov(test_pool_gen_a, rowvar=False)
trace_sqrt_prod_op = _run_with_mock(classifier_metrics.trace_sqrt_product,
cov_real, cov_gen)
with self.cached_session() as sess:
# trace_sqrt_product: tsp
actual_tsp = sess.run(trace_sqrt_prod_op)
expected_tsp = _expected_trace_sqrt_product(cov_real, cov_gen)
self.assertAllClose(actual_tsp, expected_tsp, 0.01)
def test_preprocess_image_graph(self):
"""Test `preprocess_image` graph construction."""
incorrectly_sized_image = array_ops.zeros([520, 240, 3])
correct_image = classifier_metrics.preprocess_image(
images=incorrectly_sized_image)
_run_with_mock(classifier_metrics.run_inception,
array_ops.expand_dims(correct_image, 0))
def test_get_graph_def_from_url_tarball(self):
"""Test `get_graph_def_from_url_tarball`."""
# Write dummy binary GraphDef to tempfile.
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
tmp_file.write(_get_dummy_graphdef().SerializeToString())
relative_path = os.path.relpath(tmp_file.name)
# Create gzip tarball.
tar_dir = tempfile.mkdtemp()
tar_filename = os.path.join(tar_dir, 'tmp.tar.gz')
with tarfile.open(tar_filename, 'w:gz') as tar:
tar.add(relative_path)
with mock.patch.object(classifier_metrics, 'urllib') as mock_urllib:
mock_urllib.request.urlretrieve.return_value = tar_filename, None
graph_def = classifier_metrics.get_graph_def_from_url_tarball(
'unused_url', relative_path)
self.assertIsInstance(graph_def, graph_pb2.GraphDef)
self.assertEqual(_get_dummy_graphdef(), graph_def)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/gan/python/eval/python/classifier_metrics_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common TF-GAN summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python import namedtuples
from tensorflow.contrib.gan.python.eval.python import eval_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.losses import util as loss_util
from tensorflow.python.summary import summary
__all__ = [
'add_gan_model_image_summaries',
'add_image_comparison_summaries',
'add_gan_model_summaries',
'add_regularization_loss_summaries',
'add_cyclegan_image_summaries',
'add_stargan_image_summaries'
]
def _assert_is_image(data):
data.shape.assert_has_rank(4)
data.shape[1:].assert_is_fully_defined()
def add_gan_model_image_summaries(gan_model, grid_size=4, model_summaries=True):
"""Adds image summaries for real and fake images.
Args:
gan_model: A GANModel tuple.
grid_size: The size of an image grid.
model_summaries: Also add summaries of the model.
Raises:
ValueError: If real and generated data aren't images.
"""
if isinstance(gan_model, namedtuples.CycleGANModel):
raise ValueError(
'`add_gan_model_image_summaries` does not take CycleGANModels. Please '
'use `add_cyclegan_image_summaries` instead.')
_assert_is_image(gan_model.real_data)
_assert_is_image(gan_model.generated_data)
num_images = grid_size ** 2
real_image_shape = gan_model.real_data.shape.as_list()[1:3]
generated_image_shape = gan_model.generated_data.shape.as_list()[1:3]
real_channels = gan_model.real_data.shape.as_list()[3]
generated_channels = gan_model.generated_data.shape.as_list()[3]
summary.image(
'real_data',
eval_utils.image_grid(
gan_model.real_data[:num_images],
grid_shape=(grid_size, grid_size),
image_shape=real_image_shape,
num_channels=real_channels),
max_outputs=1)
summary.image(
'generated_data',
eval_utils.image_grid(
gan_model.generated_data[:num_images],
grid_shape=(grid_size, grid_size),
image_shape=generated_image_shape,
num_channels=generated_channels),
max_outputs=1)
if model_summaries:
add_gan_model_summaries(gan_model)
def add_cyclegan_image_summaries(cyclegan_model):
"""Adds image summaries for CycleGAN.
There are two summaries, one for each generator. The first image is the
generator input, the second is the generator output, and the third is G(F(x)).
Args:
cyclegan_model: A CycleGANModel tuple.
Raises:
ValueError: If `cyclegan_model` isn't a CycleGANModel.
ValueError: If generated data, generator inputs, and reconstructions aren't
images.
ValueError: If the generator input, generated data, and reconstructions
aren't all the same size.
"""
if not isinstance(cyclegan_model, namedtuples.CycleGANModel):
raise ValueError('`cyclegan_model` was not a CycleGANModel. Instead, was '
'%s' % type(cyclegan_model))
_assert_is_image(cyclegan_model.model_x2y.generator_inputs)
_assert_is_image(cyclegan_model.model_x2y.generated_data)
_assert_is_image(cyclegan_model.reconstructed_x)
_assert_is_image(cyclegan_model.model_y2x.generator_inputs)
_assert_is_image(cyclegan_model.model_y2x.generated_data)
_assert_is_image(cyclegan_model.reconstructed_y)
def _add_comparison_summary(gan_model, reconstructions):
image_list = (array_ops.unstack(gan_model.generator_inputs[:1]) +
array_ops.unstack(gan_model.generated_data[:1]) +
array_ops.unstack(reconstructions[:1]))
summary.image(
'image_comparison', eval_utils.image_reshaper(
image_list, num_cols=len(image_list)), max_outputs=1)
with ops.name_scope('x2y_image_comparison_summaries'):
_add_comparison_summary(
cyclegan_model.model_x2y, cyclegan_model.reconstructed_x)
with ops.name_scope('y2x_image_comparison_summaries'):
_add_comparison_summary(
cyclegan_model.model_y2x, cyclegan_model.reconstructed_y)
def add_image_comparison_summaries(gan_model, num_comparisons=2,
display_diffs=False):
"""Adds image summaries to compare triplets of images.
The first image is the generator input, the second is the generator output,
and the third is the real data. This style of comparison is useful for
image translation problems, where the generator input is a corrupted image,
the generator output is the reconstruction, and the real data is the target.
Args:
gan_model: A GANModel tuple.
num_comparisons: The number of image triplets to display.
display_diffs: Also display the difference between generated and target.
Raises:
ValueError: If real data, generated data, and generator inputs aren't
images.
ValueError: If the generator input, real, and generated data aren't all the
same size.
"""
_assert_is_image(gan_model.generator_inputs)
_assert_is_image(gan_model.generated_data)
_assert_is_image(gan_model.real_data)
gan_model.generated_data.shape.assert_is_compatible_with(
gan_model.generator_inputs.shape)
gan_model.real_data.shape.assert_is_compatible_with(
gan_model.generated_data.shape)
image_list = []
image_list.extend(
array_ops.unstack(gan_model.generator_inputs[:num_comparisons]))
image_list.extend(
array_ops.unstack(gan_model.generated_data[:num_comparisons]))
image_list.extend(array_ops.unstack(gan_model.real_data[:num_comparisons]))
if display_diffs:
generated_list = array_ops.unstack(
gan_model.generated_data[:num_comparisons])
real_list = array_ops.unstack(gan_model.real_data[:num_comparisons])
diffs = [
math_ops.abs(math_ops.cast(generated, dtypes.float32) -
math_ops.cast(real, dtypes.float32))
for generated, real in zip(generated_list, real_list)
]
image_list.extend(diffs)
# Reshape image and display.
summary.image(
'image_comparison',
eval_utils.image_reshaper(image_list, num_cols=num_comparisons),
max_outputs=1)
def add_stargan_image_summaries(stargan_model,
num_images=2,
display_diffs=False):
"""Adds image summaries to see StarGAN image results.
If display_diffs is True, each image result has `2` rows and `num_domains + 1`
columns.
The first row looks like:
[original_image, transformed_to_domain_0, transformed_to_domain_1, ...]
The second row looks like:
[no_modification_baseline, transformed_to_domain_0-original_image, ...]
If display_diffs is False, only the first row is shown.
IMPORTANT:
Since the model originally does not transformed the image to every domains,
we will transform them on-the-fly within this function in parallel.
Args:
stargan_model: A StarGANModel tuple.
num_images: The number of examples/images to be transformed and shown.
display_diffs: Also display the difference between generated and target.
Raises:
ValueError: If input_data is not images.
ValueError: If input_data_domain_label is not rank 2.
ValueError: If dimension 2 of input_data_domain_label is not fully defined.
"""
_assert_is_image(stargan_model.input_data)
stargan_model.input_data_domain_label.shape.assert_has_rank(2)
stargan_model.input_data_domain_label.shape[1:].assert_is_fully_defined()
num_domains = stargan_model.input_data_domain_label.get_shape().as_list()[-1]
def _build_image(image):
"""Helper function to create a result for each image on the fly."""
# Expand the first dimension as batch_size = 1.
images = array_ops.expand_dims(image, axis=0)
# Tile the image num_domains times, so we can get all transformed together.
images = array_ops.tile(images, [num_domains, 1, 1, 1])
# Create the targets to 0, 1, 2, ..., num_domains-1.
targets = array_ops.one_hot(list(range(num_domains)), num_domains)
with variable_scope.variable_scope(
stargan_model.generator_scope, reuse=True):
# Add the original image.
output_images_list = [image]
# Generate the image and add to the list.
gen_images = stargan_model.generator_fn(images, targets)
gen_images_list = array_ops.split(gen_images, num_domains)
gen_images_list = [
array_ops.squeeze(img, axis=0) for img in gen_images_list
]
output_images_list.extend(gen_images_list)
# Display diffs.
if display_diffs:
diff_images = gen_images - images
diff_images_list = array_ops.split(diff_images, num_domains)
diff_images_list = [
array_ops.squeeze(img, axis=0) for img in diff_images_list
]
output_images_list.append(array_ops.zeros_like(image))
output_images_list.extend(diff_images_list)
# Create the final image.
final_image = eval_utils.image_reshaper(
output_images_list, num_cols=num_domains + 1)
# Reduce the first rank.
return array_ops.squeeze(final_image, axis=0)
summary.image(
'stargan_image_generation',
map_fn.map_fn(
_build_image,
stargan_model.input_data[:num_images],
parallel_iterations=num_images,
back_prop=False,
swap_memory=True),
max_outputs=num_images)
def add_gan_model_summaries(gan_model):
"""Adds typical GANModel summaries.
Args:
gan_model: A GANModel tuple.
"""
if isinstance(gan_model, namedtuples.CycleGANModel):
with ops.name_scope('cyclegan_x2y_summaries'):
add_gan_model_summaries(gan_model.model_x2y)
with ops.name_scope('cyclegan_y2x_summaries'):
add_gan_model_summaries(gan_model.model_y2x)
return
with ops.name_scope('generator_variables'):
for var in gan_model.generator_variables:
summary.histogram(var.name, var)
with ops.name_scope('discriminator_variables'):
for var in gan_model.discriminator_variables:
summary.histogram(var.name, var)
def add_regularization_loss_summaries(gan_model):
"""Adds summaries for a regularization losses..
Args:
gan_model: A GANModel tuple.
"""
if isinstance(gan_model, namedtuples.CycleGANModel):
with ops.name_scope('cyclegan_x2y_regularization_loss_summaries'):
add_regularization_loss_summaries(gan_model.model_x2y)
with ops.name_scope('cyclegan_y2x_regularization_loss_summaries'):
add_regularization_loss_summaries(gan_model.model_y2x)
return
if gan_model.generator_scope:
summary.scalar(
'generator_regularization_loss',
loss_util.get_regularization_loss(gan_model.generator_scope.name))
if gan_model.discriminator_scope:
summary.scalar(
'discriminator_regularization_loss',
loss_util.get_regularization_loss(gan_model.discriminator_scope.name))
|
tensorflow-master
|
tensorflow/contrib/gan/python/eval/python/summaries_impl.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Kinesis Dataset.
@@KinesisDataset
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.kinesis.python.ops.kinesis_dataset_ops import KinesisDataset
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"KinesisDataset",
]
remove_undocumented(__name__)
|
tensorflow-master
|
tensorflow/contrib/kinesis/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for KinesisDataset.
NOTE: boto3 is needed and the test has to be invoked manually:
```
$ bazel test -s --verbose_failures --config=opt \
--action_env=AWS_ACCESS_KEY_ID=XXXXXX \
--action_env=AWS_SECRET_ACCESS_KEY=XXXXXX \
//tensorflow/contrib/kinesis:kinesis_test
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import boto3
from tensorflow.contrib.kinesis.python.ops import kinesis_dataset_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class KinesisDatasetTest(test.TestCase):
def testKinesisDatasetOneShard(self):
client = boto3.client('kinesis', region_name='us-east-1')
# Setup the Kinesis with 1 shard.
stream_name = "tf_kinesis_test_1"
client.create_stream(StreamName=stream_name, ShardCount=1)
# Wait until stream exists, default is 10 * 18 seconds.
client.get_waiter('stream_exists').wait(StreamName=stream_name)
for i in range(10):
data = "D" + str(i)
client.put_record(
StreamName=stream_name, Data=data, PartitionKey="TensorFlow" + str(i))
stream = array_ops.placeholder(dtypes.string, shape=[])
num_epochs = array_ops.placeholder(dtypes.int64, shape=[])
batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = kinesis_dataset_ops.KinesisDataset(
stream, read_indefinitely=False).repeat(num_epochs)
batch_dataset = repeat_dataset.batch(batch_size)
iterator = iterator_ops.Iterator.from_structure(
dataset_ops.get_legacy_output_types(batch_dataset))
init_op = iterator.make_initializer(repeat_dataset)
init_batch_op = iterator.make_initializer(batch_dataset)
get_next = iterator.get_next()
with self.cached_session() as sess:
# Basic test: read from shard 0 of stream 1.
sess.run(init_op, feed_dict={stream: stream_name, num_epochs: 1})
for i in range(10):
self.assertEqual("D" + str(i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
client.delete_stream(StreamName=stream_name)
# Wait until stream deleted, default is 10 * 18 seconds.
client.get_waiter('stream_not_exists').wait(StreamName=stream_name)
def testKinesisDatasetTwoShards(self):
client = boto3.client('kinesis', region_name='us-east-1')
# Setup the Kinesis with 2 shards.
stream_name = "tf_kinesis_test_2"
client.create_stream(StreamName=stream_name, ShardCount=2)
# Wait until stream exists, default is 10 * 18 seconds.
client.get_waiter('stream_exists').wait(StreamName=stream_name)
for i in range(10):
data = "D" + str(i)
client.put_record(
StreamName=stream_name, Data=data, PartitionKey="TensorFlow" + str(i))
response = client.describe_stream(StreamName=stream_name)
shard_id_0 = response["StreamDescription"]["Shards"][0]["ShardId"]
shard_id_1 = response["StreamDescription"]["Shards"][1]["ShardId"]
stream = array_ops.placeholder(dtypes.string, shape=[])
shard = array_ops.placeholder(dtypes.string, shape=[])
num_epochs = array_ops.placeholder(dtypes.int64, shape=[])
batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = kinesis_dataset_ops.KinesisDataset(
stream, shard, read_indefinitely=False).repeat(num_epochs)
batch_dataset = repeat_dataset.batch(batch_size)
iterator = iterator_ops.Iterator.from_structure(
dataset_ops.get_legacy_output_types(batch_dataset))
init_op = iterator.make_initializer(repeat_dataset)
init_batch_op = iterator.make_initializer(batch_dataset)
get_next = iterator.get_next()
data = []
with self.cached_session() as sess:
# Basic test: read from shard 0 of stream 2.
sess.run(
init_op, feed_dict={
stream: stream_name, shard: shard_id_0, num_epochs: 1})
with self.assertRaises(errors.OutOfRangeError):
# Use range(11) to guarantee the OutOfRangeError.
for i in range(11):
data.append(sess.run(get_next))
# Basic test: read from shard 1 of stream 2.
sess.run(
init_op, feed_dict={
stream: stream_name, shard: shard_id_1, num_epochs: 1})
with self.assertRaises(errors.OutOfRangeError):
# Use range(11) to guarantee the OutOfRangeError.
for i in range(11):
data.append(sess.run(get_next))
data.sort()
self.assertEqual(data, ["D" + str(i) for i in range(10)])
client.delete_stream(StreamName=stream_name)
# Wait until stream deleted, default is 10 * 18 seconds.
client.get_waiter('stream_not_exists').wait(StreamName=stream_name)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/kinesis/python/kernel_tests/kinesis_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Kinesis Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.kinesis.python.ops import gen_dataset_ops
from tensorflow.contrib.kinesis.python.ops import kinesis_op_loader # pylint: disable=unused-import
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.util import deprecation
class KinesisDataset(dataset_ops.DatasetSource):
"""A Kinesis Dataset that consumes the message.
Kinesis is a managed service provided by AWS for data streaming.
This dataset reads messages from Kinesis with each message presented
as a `tf.string`.
For example, we can construct and use the KinesisDataset as follows:
```python
tf.compat.v1.enable_eager_execution()
dataset = tf.contrib.kinesis.KinesisDataset(
"kinesis_stream_name", read_indefinitely=False)
for element in dataset:
print(element)
```
Since Kinesis is a data streaming service, data may not be available
at the time it is being read. The argument `read_indefinitely` is
used to control the behavior in this situation. If `read_indefinitely`
is `True`, then `KinesisDataset` will keep retrying to retrieve data
from the stream. If `read_indefinitely` is `False`, an `OutOfRangeError`
is returned immediately instead.
"""
@deprecation.deprecated(
None,
"tf.contrib.kinesis will be removed in 2.0, the support for Kinesis "
"will continue to be provided through the tensorflow/io GitHub project.")
def __init__(self,
stream,
shard="",
read_indefinitely=True,
interval=100000):
"""Create a KinesisDataset.
Args:
stream: A `tf.string` tensor containing the name of the stream.
shard: A `tf.string` tensor containing the id of the shard.
read_indefinitely: If `True`, the Kinesis dataset will keep retry
again on `EOF` after the `interval` period. If `False`, then
the dataset will stop on `EOF`. The default value is `True`.
interval: The interval for the Kinesis Client to wait before
it tries to get records again (in millisecond).
"""
self._stream = ops.convert_to_tensor(
stream, dtype=dtypes.string, name="stream")
self._shard = ops.convert_to_tensor(
shard, dtype=dtypes.string, name="shard")
self._read_indefinitely = ops.convert_to_tensor(
read_indefinitely, dtype=dtypes.bool, name="read_indefinitely")
self._interval = ops.convert_to_tensor(
interval, dtype=dtypes.int64, name="interval")
super(KinesisDataset, self).__init__(self._as_variant_tensor())
def _as_variant_tensor(self):
return gen_dataset_ops.kinesis_dataset(
self._stream, self._shard, self._read_indefinitely, self._interval)
@property
def _element_structure(self):
return structure.TensorStructure(dtypes.string, [])
|
tensorflow-master
|
tensorflow/contrib/kinesis/python/ops/kinesis_dataset_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python helper for loading kinesis ops and kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
_dataset_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("../../_dataset_ops.so"))
|
tensorflow-master
|
tensorflow/contrib/kinesis/python/ops/kinesis_op_loader.py
|
# -*- coding: utf-8 -*-
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of the Keras API meant to be a high-level API for TensorFlow.
This module an alias for `tf.keras`, for backwards compatibility.
Detailed documentation and user guides are also available at
[keras.io](https://keras.io).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.keras.api.keras import *
try:
from tensorflow.contrib.keras import python # pylint: disable=g-import-not-at-top
del python
except ImportError:
pass
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of the Keras API meant to be a high-level API for TensorFlow.
Detailed documentation and user guides are available at
[keras.io](https://keras.io).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.api.keras import activations
from tensorflow.contrib.keras.api.keras import applications
from tensorflow.contrib.keras.api.keras import backend
from tensorflow.contrib.keras.api.keras import callbacks
from tensorflow.contrib.keras.api.keras import constraints
from tensorflow.contrib.keras.api.keras import datasets
from tensorflow.contrib.keras.api.keras import initializers
from tensorflow.contrib.keras.api.keras import layers
from tensorflow.contrib.keras.api.keras import losses
from tensorflow.contrib.keras.api.keras import metrics
from tensorflow.contrib.keras.api.keras import models
from tensorflow.contrib.keras.api.keras import optimizers
from tensorflow.contrib.keras.api.keras import preprocessing
from tensorflow.contrib.keras.api.keras import regularizers
from tensorflow.contrib.keras.api.keras import utils
from tensorflow.contrib.keras.api.keras import wrappers
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in activation functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Activation functions.
from tensorflow.python.keras.activations import elu
from tensorflow.python.keras.activations import hard_sigmoid
from tensorflow.python.keras.activations import linear
from tensorflow.python.keras.activations import relu
from tensorflow.python.keras.activations import selu
from tensorflow.python.keras.activations import sigmoid
from tensorflow.python.keras.activations import softmax
from tensorflow.python.keras.activations import softplus
from tensorflow.python.keras.activations import softsign
from tensorflow.python.keras.activations import tanh
# Auxiliary utils.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras.activations import deserialize
from tensorflow.python.keras.activations import serialize
from tensorflow.python.keras.activations import get
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/activations/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in metrics functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Metrics functions.
from tensorflow.python.keras.metrics import binary_accuracy
from tensorflow.python.keras.metrics import binary_crossentropy
from tensorflow.python.keras.metrics import categorical_accuracy
from tensorflow.python.keras.metrics import categorical_crossentropy
from tensorflow.python.keras.metrics import cosine_similarity
from tensorflow.python.keras.metrics import hinge
from tensorflow.python.keras.metrics import kullback_leibler_divergence
from tensorflow.python.keras.metrics import mean_absolute_error
from tensorflow.python.keras.metrics import mean_absolute_percentage_error
from tensorflow.python.keras.metrics import mean_squared_error
from tensorflow.python.keras.metrics import mean_squared_logarithmic_error
from tensorflow.python.keras.metrics import poisson
from tensorflow.python.keras.metrics import sparse_categorical_crossentropy
from tensorflow.python.keras.metrics import sparse_top_k_categorical_accuracy
from tensorflow.python.keras.metrics import squared_hinge
from tensorflow.python.keras.metrics import top_k_categorical_accuracy
# Auxiliary utils.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras.metrics import deserialize
from tensorflow.python.keras.metrics import serialize
from tensorflow.python.keras.metrics import get
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/metrics/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in loss functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Loss functions.
from tensorflow.python.keras.losses import binary_crossentropy
from tensorflow.python.keras.losses import categorical_crossentropy
from tensorflow.python.keras.losses import categorical_hinge
from tensorflow.python.keras.losses import cosine_similarity
from tensorflow.python.keras.losses import hinge
from tensorflow.python.keras.losses import kullback_leibler_divergence
from tensorflow.python.keras.losses import logcosh
from tensorflow.python.keras.losses import mean_absolute_error
from tensorflow.python.keras.losses import mean_absolute_percentage_error
from tensorflow.python.keras.losses import mean_squared_error
from tensorflow.python.keras.losses import mean_squared_logarithmic_error
from tensorflow.python.keras.losses import poisson
from tensorflow.python.keras.losses import sparse_categorical_crossentropy
from tensorflow.python.keras.losses import squared_hinge
# Auxiliary utils.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras.losses import deserialize
from tensorflow.python.keras.losses import serialize
from tensorflow.python.keras.losses import get
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/losses/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for Keras models, providing compatibility with other frameworks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.api.keras.wrappers import scikit_learn
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/wrappers/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras scikit-learn API wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.python.keras.wrappers.scikit_learn import KerasRegressor
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/wrappers/scikit_learn/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras layers API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Generic layers.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_layer import Input
from tensorflow.python.keras.engine.input_layer import InputLayer
# Advanced activations.
from tensorflow.python.keras.layers.advanced_activations import LeakyReLU
from tensorflow.python.keras.layers.advanced_activations import PReLU
from tensorflow.python.keras.layers.advanced_activations import ELU
from tensorflow.python.keras.layers.advanced_activations import ThresholdedReLU
# Convolution layers.
from tensorflow.python.keras.layers.convolutional import Conv1D
from tensorflow.python.keras.layers.convolutional import Conv2D
from tensorflow.python.keras.layers.convolutional import Conv3D
from tensorflow.python.keras.layers.convolutional import Conv2DTranspose
from tensorflow.python.keras.layers.convolutional import Conv3DTranspose
from tensorflow.python.keras.layers.convolutional import SeparableConv2D
# Convolution layer aliases.
from tensorflow.python.keras.layers.convolutional import Convolution1D
from tensorflow.python.keras.layers.convolutional import Convolution2D
from tensorflow.python.keras.layers.convolutional import Convolution3D
from tensorflow.python.keras.layers.convolutional import Convolution2DTranspose
from tensorflow.python.keras.layers.convolutional import Convolution3DTranspose
from tensorflow.python.keras.layers.convolutional import SeparableConvolution2D
# Image processing layers.
from tensorflow.python.keras.layers.convolutional import UpSampling1D
from tensorflow.python.keras.layers.convolutional import UpSampling2D
from tensorflow.python.keras.layers.convolutional import UpSampling3D
from tensorflow.python.keras.layers.convolutional import ZeroPadding1D
from tensorflow.python.keras.layers.convolutional import ZeroPadding2D
from tensorflow.python.keras.layers.convolutional import ZeroPadding3D
from tensorflow.python.keras.layers.convolutional import Cropping1D
from tensorflow.python.keras.layers.convolutional import Cropping2D
from tensorflow.python.keras.layers.convolutional import Cropping3D
# Convolutional-recurrent layers.
from tensorflow.python.keras.layers.convolutional_recurrent import ConvLSTM2D
# Core layers.
from tensorflow.python.keras.layers.core import Masking
from tensorflow.python.keras.layers.core import Dropout
from tensorflow.python.keras.layers.core import SpatialDropout1D
from tensorflow.python.keras.layers.core import SpatialDropout2D
from tensorflow.python.keras.layers.core import SpatialDropout3D
from tensorflow.python.keras.layers.core import Activation
from tensorflow.python.keras.layers.core import Reshape
from tensorflow.python.keras.layers.core import Permute
from tensorflow.python.keras.layers.core import Flatten
from tensorflow.python.keras.layers.core import RepeatVector
from tensorflow.python.keras.layers.core import Lambda
from tensorflow.python.keras.layers.core import Dense
from tensorflow.python.keras.layers.core import ActivityRegularization
# Embedding layers.
from tensorflow.python.keras.layers.embeddings import Embedding
# Locally-connected layers.
from tensorflow.python.keras.layers.local import LocallyConnected1D
from tensorflow.python.keras.layers.local import LocallyConnected2D
# Merge layers.
from tensorflow.python.keras.layers.merge import Add
from tensorflow.python.keras.layers.merge import Multiply
from tensorflow.python.keras.layers.merge import Average
from tensorflow.python.keras.layers.merge import Maximum
from tensorflow.python.keras.layers.merge import Concatenate
from tensorflow.python.keras.layers.merge import Dot
from tensorflow.python.keras.layers.merge import add
from tensorflow.python.keras.layers.merge import multiply
from tensorflow.python.keras.layers.merge import average
from tensorflow.python.keras.layers.merge import maximum
from tensorflow.python.keras.layers.merge import concatenate
from tensorflow.python.keras.layers.merge import dot
# Noise layers.
from tensorflow.python.keras.layers.noise import AlphaDropout
from tensorflow.python.keras.layers.noise import GaussianNoise
from tensorflow.python.keras.layers.noise import GaussianDropout
# Normalization layers.
from tensorflow.python.keras.layers.normalization import BatchNormalization
# Pooling layers.
from tensorflow.python.keras.layers.pooling import MaxPooling1D
from tensorflow.python.keras.layers.pooling import MaxPooling2D
from tensorflow.python.keras.layers.pooling import MaxPooling3D
from tensorflow.python.keras.layers.pooling import AveragePooling1D
from tensorflow.python.keras.layers.pooling import AveragePooling2D
from tensorflow.python.keras.layers.pooling import AveragePooling3D
from tensorflow.python.keras.layers.pooling import GlobalAveragePooling1D
from tensorflow.python.keras.layers.pooling import GlobalAveragePooling2D
from tensorflow.python.keras.layers.pooling import GlobalAveragePooling3D
from tensorflow.python.keras.layers.pooling import GlobalMaxPooling1D
from tensorflow.python.keras.layers.pooling import GlobalMaxPooling2D
from tensorflow.python.keras.layers.pooling import GlobalMaxPooling3D
# Pooling layer aliases.
from tensorflow.python.keras.layers.pooling import MaxPool1D
from tensorflow.python.keras.layers.pooling import MaxPool2D
from tensorflow.python.keras.layers.pooling import MaxPool3D
from tensorflow.python.keras.layers.pooling import AvgPool1D
from tensorflow.python.keras.layers.pooling import AvgPool2D
from tensorflow.python.keras.layers.pooling import AvgPool3D
from tensorflow.python.keras.layers.pooling import GlobalAvgPool1D
from tensorflow.python.keras.layers.pooling import GlobalAvgPool2D
from tensorflow.python.keras.layers.pooling import GlobalAvgPool3D
from tensorflow.python.keras.layers.pooling import GlobalMaxPool1D
from tensorflow.python.keras.layers.pooling import GlobalMaxPool2D
from tensorflow.python.keras.layers.pooling import GlobalMaxPool3D
# Recurrent layers.
from tensorflow.python.keras.layers.recurrent import SimpleRNN
from tensorflow.python.keras.layers.recurrent import GRU
from tensorflow.python.keras.layers.recurrent import LSTM
# Wrapper functions
from tensorflow.python.keras.layers.wrappers import Wrapper
from tensorflow.python.keras.layers.wrappers import Bidirectional
from tensorflow.python.keras.layers.wrappers import TimeDistributed
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/layers/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in constraints functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Constraints functions / callable classes.
from tensorflow.python.keras.constraints import Constraint
from tensorflow.python.keras.constraints import max_norm
from tensorflow.python.keras.constraints import MaxNorm
from tensorflow.python.keras.constraints import min_max_norm
from tensorflow.python.keras.constraints import MinMaxNorm
from tensorflow.python.keras.constraints import non_neg
from tensorflow.python.keras.constraints import NonNeg
from tensorflow.python.keras.constraints import unit_norm
from tensorflow.python.keras.constraints import UnitNorm
# Auxiliary utils.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras.constraints import deserialize
from tensorflow.python.keras.constraints import serialize
from tensorflow.python.keras.constraints import get
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/constraints/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras callback classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.callbacks import BaseLogger
from tensorflow.python.keras.callbacks import Callback
from tensorflow.python.keras.callbacks import CSVLogger
from tensorflow.python.keras.callbacks import EarlyStopping
from tensorflow.python.keras.callbacks import History
from tensorflow.python.keras.callbacks import LambdaCallback
from tensorflow.python.keras.callbacks import LearningRateScheduler
from tensorflow.python.keras.callbacks import ModelCheckpoint
from tensorflow.python.keras.callbacks import ProgbarLogger
from tensorflow.python.keras.callbacks import ReduceLROnPlateau
from tensorflow.python.keras.callbacks import RemoteMonitor
from tensorflow.python.keras.callbacks import TensorBoard
from tensorflow.python.keras.callbacks import TerminateOnNaN
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/callbacks/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.api.keras.datasets import boston_housing
from tensorflow.contrib.keras.api.keras.datasets import cifar10
from tensorflow.contrib.keras.api.keras.datasets import cifar100
from tensorflow.contrib.keras.api.keras.datasets import imdb
from tensorflow.contrib.keras.api.keras.datasets import mnist
from tensorflow.contrib.keras.api.keras.datasets import reuters
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/datasets/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MNIST handwritten digits classification dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.datasets.mnist import load_data
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/datasets/mnist/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Boston housing price regression dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.datasets.boston_housing import load_data
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/datasets/boston_housing/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reuters newswire topic classification dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.datasets.reuters import get_word_index
from tensorflow.python.keras.datasets.reuters import load_data
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/datasets/reuters/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CIFAR100 small image classification dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.datasets.cifar100 import load_data
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/datasets/cifar100/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""IMDB movie review sentiment classification dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.datasets.imdb import get_word_index
from tensorflow.python.keras.datasets.imdb import load_data
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/datasets/imdb/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CIFAR10 small image classification dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.datasets.cifar10 import load_data
del absolute_import
del division
del print_function
|
tensorflow-master
|
tensorflow/contrib/keras/api/keras/datasets/cifar10/__init__.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.