python_code
stringlengths
0
1.02M
repo_name
stringlengths
9
48
file_path
stringlengths
5
114
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilties for V2 control flow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.framework import attr_value_pb2 from tensorflow.python.eager import context from tensorflow.python.eager import function from tensorflow.python.framework import ops from tensorflow.python.framework.func_graph import FuncGraph from tensorflow.python.ops import control_flow_util from tensorflow.python.util import tf_contextlib class CondBranchFuncGraph(FuncGraph): """FuncGraph for branches of tf.cond(). This is used to distinguish cond branches from other functions. """ pass class WhileCondFuncGraph(FuncGraph): """FuncGraph for the condition of tf.while_loop(). This is used to distinguish while conditions from other functions. """ pass class WhileBodyFuncGraph(FuncGraph): """FuncGraph for the body of tf.while_loop(). This is used to distinguish while bodies from other functions. """ pass def in_defun(): """Returns if the current graph is, or is nested in, a defun.""" if context.executing_eagerly(): return False graph = ops.get_default_graph() while (isinstance(graph, CondBranchFuncGraph) or isinstance(graph, WhileBodyFuncGraph)): graph = graph.outer_graph return isinstance(graph, FuncGraph) def create_new_tf_function(func_graph): """Converts func_graph to a TF_Function and adds it to the current graph. Args: func_graph: FuncGraph Returns: The name of the new TF_Function. """ func = function._EagerDefinedFunction( # pylint: disable=protected-access func_graph.name, func_graph, func_graph.inputs, func_graph.outputs, {}) func.add_to_graph(func_graph.outer_graph) return func_graph.name def unique_fn_name(scope, name): """Returns a unique name to use for a control flow function. Args: scope: A name scope string. name: An identifier for this function (e.g. "true", "body"). Returns: A string, the name to use for the function. """ return ("%s%s_%s" % (scope, name, ops.uid())).replace("/", "_") def unique_grad_fn_name(forward_name): return "%s_grad_%s" % (forward_name, ops.uid()) def maybe_set_lowering_attr(op): """Sets the flag to enable lowering on `op` if necessary. Lowering allows cond_v2 and while_v2 to avoid some of the limitations of Functions, allowing users to specify devices & colocation inside of cond_v2 and while_v2 input functions, and enabling non-strict evaluation & partial pruning. This brings v2 control flow closer to feature parity with v1 control flow. However, we do not lower in the following cases: - When the `If` or `While` ops are in the XLA context. Because it is easier for XLA to apply its own optimizations when dealing with un-lowered control flow operators than with low-level control flow primitives. - When the eager execution context specifies the executor of functions to be the single threaded executor (see context.function_executor_type()). Because the single threaded executor does not support v1 control flow ops. Args: op: An `If` or `While` Operation. """ if (not control_flow_util.GraphOrParentsInXlaContext(op.graph) and context.context().function_call_options.executor_type != "SINGLE_THREADED_EXECUTOR"): # pylint: disable=protected-access op._set_attr("_lower_using_switch_merge", attr_value_pb2.AttrValue(b=True)) # pylint: enable=protected-access def maybe_propagate_compile_time_consts_in_xla(op): """Tells XLA whether to propagate compile-time consts in the loop body. This is needed to make compile time constants available to ops, for example `max_num_elements` in `EmptyTensorList`, inside the loop body. Ideally this would always be turned on, but that doesn't work with legacy functionalized while_loops. Args: op: A `While` Operation. """ if control_flow_util.GraphOrParentsInXlaContext(op.graph): # pylint: disable=protected-access op._set_attr("_xla_propagate_compile_time_consts", attr_value_pb2.AttrValue(b=True)) # pylint: enable=protected-access def resource_input_index(tensor_name, input_names, node_defs, functions): """Returns the index of the input corresponding to `tensor_name`. This method is used to find the corresponding index of an arbitrary resource tensor in a function (the function could be a loop body). We assume that resource handles are never created in functions, so that every resource tensor can be traced back to a function input. The awkward signature of this method is to make it work with both FuncGraphs and FunctionDefs. This is so we can recurse on function call ops without building the corresponding FuncGraph (note that even if a FuncGraph for a FunctionDef already exists, the input/output/node names may have been changed when the FuncGraph was serialized to the FunctionDef, which makes it unusable with this algorithm). Args: tensor_name: the name of the resource tensor to be resolved to an input. input_names: a list of the names of all inputs to the function. node_defs: a dict mapping op name -> NodeDef for every op in the function. functions: a dict mapping function name -> _EagerDefinedFunction. Returns: The index into input_names corresponding to `tensor_name`. """ while tensor_name not in input_names: # FunctionDefs and graphs use different tensor naming conventions. parts = tensor_name.split(":") if len(parts) == 3: op_name, _, output_idx = parts elif len(parts) == 2: op_name, output_idx = parts else: assert len(parts) == 1 op_name = parts[0] output_idx = 0 output_idx = int(output_idx) node_def = node_defs[op_name] if node_def.op == "While": # Captured resources occur at the same index in the lists of inputs and # outputs of a while op. So we lookup the input of `tensor.op` at the # same index as the index of `tensor` in the `tensor.op.outputs`. tensor_name = node_def.input[output_idx] elif node_def.op in ("PartitionedCall", "StatefulPartitionedCall"): # Functions output any captured resource tensors used by their # gradients. `tensor_name` is one of these outputs from a nested # function call, so recursively find the corresponding input in the # nested FunctionDef. func_name = node_def.attr["f"].func.name fdef = functions[func_name].definition output_arg_name = fdef.signature.output_arg[output_idx].name output_tensor_name = fdef.ret[output_arg_name] input_index = resource_input_index( output_tensor_name, [arg.name for arg in fdef.signature.input_arg], {ndef.name: ndef for ndef in fdef.node_def}, functions) tensor_name = node_def.input[input_index] else: # We assume there are no other ops types that will "forward" resource # handles like this, so all other handles must have been created by the # op. (Note that cond_v2 wraps resource handle outputs in optionals, # which we'll end up accumulating). raise ValueError("Taking gradient of a while loop which creates " "a resource in its body is not supported: %s" % op_name) return input_names.index(tensor_name) @tf_contextlib.contextmanager def clear_control_inputs(): """Clears the control inputs but preserves the ControlFlowContext. This is needed to preserve the XLAControlFlowControl when clearing control inputs for the gradient accumulators in while_v2. `ops.control_dependencies` does not allow that. Yields: A context manager in which the ops created will not have any control inputs by default but the control flow context is the same. """ # pylint: disable=protected-access control_flow_context = ops.get_default_graph()._get_control_flow_context() with ops.control_dependencies(None): ops.get_default_graph()._set_control_flow_context(control_flow_context) yield # pylint: enable=protected-access
tensorflow-master
tensorflow/python/ops/control_flow_util_v2.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for compute_gradient. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.eager import backprop from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import custom_gradient from tensorflow.python.ops import \ gradient_checker_v2 as gradient_checker from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops # needs this to register gradient for SoftmaxCrossEntropyWithLogits: import tensorflow.python.ops.nn_grad # pylint: disable=unused-import from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging def _random_complex(shape, dtype): data = np.random.random_sample(shape).astype(dtype.as_numpy_dtype) if dtype.is_complex: data.imag = np.random.random_sample(shape) return data @test_util.run_all_in_graph_and_eager_modes class GradientCheckerTest(test.TestCase): def testAddSimple(self): size = (2, 3) x1 = constant_op.constant(2.0, shape=size, name="x1") x2 = constant_op.constant(3.0, shape=size, name="x2") error = gradient_checker.max_error(*gradient_checker.compute_gradient( lambda x1: math_ops.add(x1, x2), [x1])) tf_logging.info("x1 error = %f", error) assert error < 1e-4 def testAddCustomized(self): size = (2, 3) x1 = constant_op.constant( 2.0, shape=size, dtype=dtypes.float64, name="x1") x2 = np.asarray(np.arange(6, dtype=np.float64).reshape(2, 3)) # checkint gradients for x2 using a special delta error = gradient_checker.max_error(*gradient_checker.compute_gradient( lambda x2: math_ops.add(x1, x2), [x2], delta=1e-2)) tf_logging.info("x2 error = %f", error) assert error < 1e-10 def testGather(self): def f(params): index_values = [1, 3] indices = constant_op.constant(index_values, name="i") return array_ops.gather(params, indices, name="y") p_shape = (4, 2) p_size = 8 params = constant_op.constant( np.arange(p_size).astype(np.float), shape=p_shape, name="p") error = gradient_checker.max_error(*gradient_checker.compute_gradient( f, [params])) tf_logging.info("gather error = %f", error) assert error < 1e-4 def testNestedGather(self): def f(params): index_values = [1, 3, 5, 6] indices = constant_op.constant(index_values, name="i") y = array_ops.gather(params, indices, name="y") index_values2 = [0, 2] indices2 = constant_op.constant(index_values2, name="i2") return array_ops.gather(y, indices2, name="y2") p_shape = (8, 2) p_size = 16 params = constant_op.constant( np.arange(p_size).astype(np.float), shape=p_shape, name="p") error = gradient_checker.max_error(*gradient_checker.compute_gradient( f, [params])) tf_logging.info("nested gather error = %f", error) assert error < 1e-4 def testComplexMul(self): c = constant_op.constant(5 + 7j, dtype=dtypes.complex64) def f(x): return c * x x_shape = c.shape x_dtype = c.dtype x = constant_op.constant(_random_complex(x_shape, x_dtype)) analytical, numerical = gradient_checker.compute_gradient( f, [x]) correct = np.array([[5, 7], [-7, 5]]) self.assertAllEqual(correct, analytical[0]) self.assertAllClose(correct, numerical[0], rtol=1e-4) x = constant_op.constant(_random_complex(x_shape, x_dtype)) self.assertLess( gradient_checker.max_error(*gradient_checker.compute_gradient( f, [x])), 3e-4) def testComplexConj(self): def f(x): return math_ops.conj(x) x_shape = () x_dtype = dtypes.complex64 x = constant_op.constant(_random_complex(x_shape, x_dtype)) analytical, numerical = gradient_checker.compute_gradient( f, [x]) correct = np.array([[1, 0], [0, -1]]) self.assertAllEqual(correct, analytical[0]) self.assertAllClose(correct, numerical[0], rtol=2e-5) x = constant_op.constant(_random_complex(x_shape, x_dtype)) self.assertLess( gradient_checker.max_error(*gradient_checker.compute_gradient( f, [x])), 2e-5) def testEmptySucceeds(self): def f(x): return array_ops.identity(x) x = constant_op.constant(np.random.random_sample((0, 3)), dtype=dtypes.float32) for grad in gradient_checker.compute_gradient(f, [x]): self.assertEqual(grad[0].shape, (0, 0)) error = gradient_checker.max_error(*gradient_checker.compute_gradient( f, [x])) self.assertEqual(error, 0) def testEmptyFails(self): @custom_gradient.custom_gradient def id_bad_grad(x): y = array_ops.identity(x) def grad_fn(dy): # dx = constant_op.constant(np.zeros((1, 4)), dtype=dtypes.float32) dx = array_ops.transpose(dy) return dx return y, grad_fn def f(x): return id_bad_grad(x) x = constant_op.constant(np.random.random_sample((0, 3)), dtype=dtypes.float32) bad = r"Empty gradient has wrong shape: expected \(0, 3\), got \(3, 0\)" with self.assertRaisesRegexp(ValueError, bad): gradient_checker.compute_gradient(f, [x]) def testNaNGradFails(self): @custom_gradient.custom_gradient def id_nan_grad(x): y = array_ops.identity(x) def grad_fn(dy): dx = np.nan * dy # dx = dy return dx return y, grad_fn def f(x): return id_nan_grad(x) x = constant_op.constant(np.random.random_sample((1, 1)), dtype=dtypes.float32) error = gradient_checker.max_error(*gradient_checker.compute_gradient( f, [x])) # Typical test would assert error < max_err, so assert this test would # raise AssertionError, since NaN is not < 1.0. with self.assertRaisesRegexp(AssertionError, "False is not true"): self.assertTrue(error < 1.0) def testGradGrad(self): def f(x): with backprop.GradientTape() as tape: tape.watch(x) y = math_ops.square(x) z = math_ops.square(y) return tape.gradient(z, x) analytical, numerical = gradient_checker.compute_gradient(f, [2.0]) self.assertAllEqual([[[48.]]], analytical) self.assertAllClose([[[48.]]], numerical, rtol=1e-4) @test_util.run_all_in_graph_and_eager_modes class MiniMNISTTest(test.TestCase): # Gradient checker for MNIST. def _BuildAndTestMiniMNIST(self, param_index, tag): # Fix seed to avoid occasional flakiness np.random.seed(6) # Hyperparameters batch = 3 inputs = 16 features = 32 classes = 10 # Define the parameters inp_data = np.random.random_sample(inputs * batch) hidden_weight_data = np.random.randn(inputs * features) / np.sqrt(inputs) hidden_bias_data = np.random.random_sample(features) sm_weight_data = np.random.randn(features * classes) / np.sqrt(features) sm_bias_data = np.random.random_sample(classes) # special care for labels since they need to be normalized per batch label_data = np.random.random(batch * classes).reshape((batch, classes)) s = label_data.sum(axis=1) label_data /= s[:, None] # We treat the inputs as "parameters" here inp = constant_op.constant( inp_data.tolist(), shape=[batch, inputs], dtype=dtypes.float64, name="inp") hidden_weight = constant_op.constant( hidden_weight_data.tolist(), shape=[inputs, features], dtype=dtypes.float64, name="hidden_weight") hidden_bias = constant_op.constant( hidden_bias_data.tolist(), shape=[features], dtype=dtypes.float64, name="hidden_bias") softmax_weight = constant_op.constant( sm_weight_data.tolist(), shape=[features, classes], dtype=dtypes.float64, name="softmax_weight") softmax_bias = constant_op.constant( sm_bias_data.tolist(), shape=[classes], dtype=dtypes.float64, name="softmax_bias") # List all the parameter so that we can test them one at a time all_params = [ inp, hidden_weight, hidden_bias, softmax_weight, softmax_bias ] # Now, Building MNIST def f(inp, hidden_weight, hidden_bias, softmax_weight, softmax_bias): features = nn_ops.relu( nn_ops.xw_plus_b(inp, hidden_weight, hidden_bias), name="features") logits = nn_ops.xw_plus_b( features, softmax_weight, softmax_bias, name="logits") labels = constant_op.constant( label_data.tolist(), shape=[batch, classes], dtype=dtypes.float64, name="labels") cost = nn_ops.softmax_cross_entropy_with_logits( labels=labels, logits=logits, name="cost") return cost def f_restricted(x): xs = all_params i = param_index # use x for the i-th parameter xs = xs[0:i]+[x]+xs[i+1:] return f(*xs) # Test the gradients. err = gradient_checker.max_error(*gradient_checker.compute_gradient( f_restricted, [all_params[param_index]], delta=1e-5)) tf_logging.info("Mini MNIST: %s gradient error = %g", tag, err) return err def testInputGradient(self): self.assertLess(self._BuildAndTestMiniMNIST(0, "input"), 1e-8) def testHiddenWeightGradient(self): self.assertLess(self._BuildAndTestMiniMNIST(1, "hidden_weight"), 1e-8) def testHiddenBiasGradient(self): self.assertLess(self._BuildAndTestMiniMNIST(2, "hidden_bias"), 1e-8) def testSoftmaxWeightGradient(self): self.assertLess(self._BuildAndTestMiniMNIST(3, "softmax_weight"), 1e-8) def testSoftmaxBiasGradient(self): self.assertLess(self._BuildAndTestMiniMNIST(4, "softmax_bias"), 1e-8) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/ops/gradient_checker_v2_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """cond_v2 and gradient. This is a version of cond that emits a single If op, as well as the gradient function for If ops produced by cond_v2. This will eventually replace the current tf.cond implementation once it reaches feature and performance parity. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from tensorflow.python.framework import dtypes from tensorflow.python.framework import func_graph as func_graph_module from tensorflow.python.framework import function_def_to_graph from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_util from tensorflow.python.ops import control_flow_util_v2 as util from tensorflow.python.ops import custom_gradient from tensorflow.python.ops import default_gradient from tensorflow.python.ops import gen_dataset_ops from tensorflow.python.ops import gen_functional_ops from tensorflow.python.ops import gen_resource_variable_ops from tensorflow.python.ops import gradients_util from tensorflow.python.ops import math_ops from tensorflow.python.util import nest # NOTE(skyewm): TensorFlow uses protected class methods and fields to signify # that they aren't part of the official public API. These protected members # often need to be used by implementation code however. Rather than litter the # code with pylint comments, we ignore protected access violations for # readability. # pylint: disable=protected-access _COND = 1 _CASE = 2 def cond_v2(pred, true_fn, false_fn, name="cond"): """Like tf.cond, except emits a single If op.""" if isinstance(pred, bool): raise TypeError("pred must not be a Python bool", pred) if not name: name = "cond" with ops.name_scope(name) as scope: true_name = util.unique_fn_name(scope, "true") false_name = util.unique_fn_name(scope, "false") # Automatic control dependencies are added in defuns, but not in v1 # graphs. Propagate that behavior here. add_control_dependencies = ops.get_default_graph()._add_control_dependencies pred = ops.convert_to_tensor(pred) true_graph = func_graph_module.func_graph_from_py_func( true_name, true_fn, [], {}, func_graph=util.CondBranchFuncGraph( true_name, collections=ops.get_default_graph()._collections), # pylint: disable=protected-access add_control_dependencies=add_control_dependencies, op_return_value=pred) false_graph = func_graph_module.func_graph_from_py_func( false_name, false_fn, [], {}, func_graph=util.CondBranchFuncGraph( false_name, collections=ops.get_default_graph()._collections), # pylint: disable=protected-access add_control_dependencies=add_control_dependencies, op_return_value=pred) verify_captures(_COND, [true_graph, false_graph]) return _build_cond(pred, true_graph, false_graph, true_graph.external_captures, false_graph.external_captures, name=scope) @ops.RegisterGradient("If") def _IfGrad(op, *grads): # pylint: disable=invalid-name """The gradient of an If op produced by cond_v2.""" # Get the if operator (this logic handles the case where op is a MockOp) if_op = op.outputs[0].op true_graph, false_graph = _get_func_graphs(if_op) # Note: op.graph != ops.get_default_graph() when we are computing the gradient # of a nested cond. assert true_graph.outer_graph == if_op.graph assert false_graph.outer_graph == if_op.graph # Create grad functions that compute the gradient of the true/false forward # graphs. These functions will capture tensors from the forward pass # functions. true_grad_graph = _create_grad_func( true_graph, grads, util.unique_grad_fn_name(true_graph.name)) false_grad_graph = _create_grad_func( false_graph, grads, util.unique_grad_fn_name(false_graph.name)) if (true_grad_graph.op_needs_rewrite or false_grad_graph.op_needs_rewrite): # Modify 'op' to output the intermediates needed by the grad functions. Note # that all needed intermediates are wrapped in optionals. Each optional # intermediate output will have a value iff its corresponding branch is # taken. # NOTE(skyewm): if there are any active sessions, this modification to `op` # may make them unrunnable! if control_flow_util.GraphOrParentsInXlaContext(ops.get_default_graph()): # XLA does not yet support optionals, so output intermediates directly and # make them match via FakeParams, which can be converted to zeros in XLA. # TODO(skyewm,jpienaar): can XLA support optionals? true_intermediates = true_grad_graph.xla_intermediates false_intermediates = false_grad_graph.xla_intermediates extra_true_outputs, extra_false_outputs = _make_intermediates_match_xla( [true_graph, false_graph], [true_intermediates, false_intermediates]) else: true_intermediates = true_grad_graph.wrapped_intermediates false_intermediates = false_grad_graph.wrapped_intermediates # Make outputs match by adding none optionals. extra_true_outputs, extra_false_outputs = _make_intermediates_match( [true_graph, false_graph], [true_intermediates, false_intermediates]) true_graph.outputs.extend(extra_true_outputs) false_graph.outputs.extend(extra_false_outputs) # TODO(skyewm): indicate it's an internal bug if this fails. _check_same_outputs(_COND, [true_graph, false_graph]) true_graph.name += "_rewritten" false_graph.name += "_rewritten" if_op._set_func_attr("then_branch", util.create_new_tf_function(true_graph)) if_op._set_func_attr("else_branch", util.create_new_tf_function(false_graph)) if_op._set_type_list_attr("Tout", true_graph.output_types) if_op._set_shape_list_attr("output_shapes", true_graph.output_shapes) if_op._add_outputs( [t.dtype for t in extra_true_outputs], [t.shape for t in extra_true_outputs]) # Resolve references to forward graph tensors in grad graphs and ensure # they are in-scope, i.e., belong to one of outer graphs of the grad graph. true_grad_inputs = _resolve_grad_inputs(true_graph, true_grad_graph) false_grad_inputs = _resolve_grad_inputs(false_graph, false_grad_graph) # This modifies true_grad_graph and false_grad_graph. _make_output_composite_tensors_match(_COND, [true_grad_graph, false_grad_graph]) outputs = _build_cond(if_op.inputs[0], true_grad_graph, false_grad_graph, true_grad_inputs, false_grad_inputs) # The predicate has no gradient. return [None] + outputs def _build_cond(pred, true_graph, false_graph, true_inputs, false_inputs, name=None): """Creates an If op from the specified predicate, branch functions and inputs. Note that this modifies true_graph and false_graph to make the inputs match, and to output all intermediates values so they're available for the gradient computation. true_graph and false_graph need not have the same input types, but they must have the same outpute types. Args: pred: boolean Tensor true_graph: FuncGraph false_graph: FuncGraph true_inputs: a list of Tensors to be passed to true_graph as input. false_inputs: a list of Tensors to be passed to false_graph as input. name: the name for the If op. Returns: A list of Tensors which are the outputs of the If op. Does not include added intermediate outputs. """ _make_indexed_slices_indices_types_match(_COND, [true_graph, false_graph]) _check_same_outputs(_COND, [true_graph, false_graph]) # Add inputs to true_graph and false_graph to make them match. Note that # this modifies true_graph and false_graph. cond_inputs = _make_inputs_match([true_graph, false_graph], [true_inputs, false_inputs]) # Create the If op. with ops.control_dependencies( list(true_graph.control_captures) + list(false_graph.control_captures)): tensors = gen_functional_ops._if( # pylint: disable=protected-access pred, cond_inputs, [t.dtype for t in true_graph.outputs], util.create_new_tf_function(true_graph), util.create_new_tf_function(false_graph), output_shapes=_get_output_shapes(true_graph.outputs, false_graph.outputs), name=name) # TODO(b/110167197) this approach requires cond_v2 to have at least 1 output if_op = tensors[0].op util.maybe_set_lowering_attr(if_op) util.maybe_propagate_compile_time_consts_in_xla(if_op) # Return identities for each output of the If op, rather than the output of # the If op directly. This makes pruning work if the output of cond() is # fetched: the lowering pass converts the If outputs into IdentityN outputs, # which if fetched will cause all ops in the taken branch to be run (since # it takes all merge ops as input). After lowering, each output identity op # will end up with only the appropriate merge op as input. # TODO(b/79984175): this doesn't have to be a tuple once we covert to the # correct output structure tensors = [array_ops.identity(t) for t in tensors] # Prevent fetching since the variant outputs can't be fetched directly. if_op.graph.prevent_fetching(if_op) return func_graph_module.pack_sequence_as(true_graph.structured_outputs, tensors) def _get_func_graphs(op): """Returns `FuncGraph`s for the input op branches. Args: op: The If or Case Operation. Returns: A tuple of the `FuncGraph`s of the then_branch and else_branch (all branches for Case). """ def _get_func_graph_for_branch(name_attr_list): """Generates and returns a FuncGraph for the given branch.""" inputs = op.inputs[1:] # First input is pred. input_shapes = [t.shape for t in inputs] fdef = op.graph._get_function(name_attr_list.name).definition # `op.graph` may not be the same as `ops.get_default_graph()` e.g. # in the case of nested if ops or when the gradient is being computed # from inside a Defun. We build the `func_graph` with `op.graph` as its # `outer_graph`. This resembles how the `FuncGraph` was built in the # forward pass. We need this so that we can resolve references to tensors # in `func_graph` from its gradient graph in `_resolve_grad_inputs`. with op.graph.as_default(): func_graph = function_def_to_graph.function_def_to_graph( fdef, input_shapes) for external_t, internal_t in zip(inputs, func_graph.inputs): custom_gradient.copy_handle_data(external_t, internal_t) func_graph.captures = collections.OrderedDict(zip(inputs, func_graph.inputs)) # Link the op so that the gradient code can use it. func_graph._forward_cond = op return func_graph if op.type == "If": return (_get_func_graph_for_branch(op.get_attr("then_branch")), _get_func_graph_for_branch(op.get_attr("else_branch"))) elif op.type == "Case": return [_get_func_graph_for_branch(branch_fn) for branch_fn in op.get_attr("branches")] else: raise ValueError("Unsupported op type: {}".format(op.type)) def _grad_fn(func_graph, grads): """The gradient function for each conditional branch. This function builds the gradient graph of the corresponding forward-pass conditional branch in `func_graph`. This is done by differentiating func_graph's outputs w.r.t. its inputs. Args: func_graph: FuncGraph. The corresponding forward-pass function. grads: The list of input gradient Tensors. Returns: The output gradient Tensors. """ # Filter out untrainable function outputs. # NOTE(skyewm): If we don't do this, the untrainable tensors can sometimes # cause _GradientsHelper to raise an exception (e.g. the implementation # doesn't expect 'ys' to contain boolean tensors). assert len(func_graph.outputs) == len(grads) ys = [] grad_ys = [] for y, grad_y in zip(func_graph.outputs, grads): if not gradients_util.IsTrainable(y): continue ys.append(y) grad_ys.append(grad_y) # Build the gradient graph. Note that this builds the gradient computation of # func_graph in the current graph, which requires capturing tensors from # func_graph. The captured func_graph tensors are resolved to external tensors # in _resolve_grad_inputs. result = gradients_util._GradientsHelper( ys, func_graph.inputs, grad_ys=grad_ys, src_graph=func_graph) # Functions can't return None; replace Nones with zero tensors. # TODO(b/80444525): don't return anything here and make _IfGrad return None if # both branches have zero gradient. for i in range(len(result)): if result[i] is None: if func_graph.inputs[i].dtype == dtypes.resource: result[i] = array_ops.zeros( gen_resource_variable_ops.variable_shape(func_graph.inputs[i]), dtype=default_gradient.get_zeros_dtype(func_graph.inputs[i])) else: result[i] = array_ops.zeros_like(func_graph.inputs[i]) return result def _create_grad_func(func_graph, grads, name): """Returns the FuncGraph representation of _grad_fn.""" return func_graph_module.func_graph_from_py_func( name, lambda: _grad_fn(func_graph, grads), [], {}, func_graph=_CondGradFuncGraph(name, func_graph)) def _resolve_grad_inputs(cond_graph, grad_graph): """Returns the tensors to pass as inputs to `grad_graph`. The `grad_graph` may have external references to 1. Its outer graph containing the input gradients. These references are kept as is. 2. Tensors in the forward pass graph. These tensors may not be "live" when the gradient is being computed. We replace such references by their corresponding tensor in `cond_graph.outer_graph`. In the case of nested control flow or functions, the gradient logic handling `grad_graph.outer_graph` will make sure the tensor from `cond_graph.outer_graph` is also correctly captured. Args: cond_graph: FuncGraph. The forward-pass function. grad_graph: FuncGraph. The gradients function. Returns: A list of inputs tensors to be passed to grad_graph. """ new_inputs = [] for t in grad_graph.external_captures: # `t` must either be in `grad_graph.outer_graph` or in the forward # `cond_graph`. if t.graph != grad_graph.outer_graph: assert t.graph == cond_graph # `internal_captures` are not treated as intermediates and hence not added # to If op outputs. So we get the outer tensor corresponding to those # from the list of `external_captures`. try: t = t.graph._forward_cond.outputs[t.graph.outputs.index(t)] except ValueError: index = t.graph.internal_captures.index(t) t = t.graph.external_captures[index] # Note: We rely on the capturing logic of the gradient If op graph to # correctly capture the tensors in `cond_graph.outer_graph`. Both cond_v2 # and while_v2 handle this while building their gradient functions. assert t.graph == cond_graph.outer_graph new_inputs.append(t) return new_inputs def _get_intermediates(func_graph): """Returns all tensors in `func_graph` that aren't inputs or outputs.""" intermediates = [] for op in func_graph.get_operations(): for t in op.outputs: if t in func_graph.inputs: continue if t in func_graph.outputs: continue intermediates.append(t) return intermediates def _make_intermediates_match(branch_graphs, branch_optionals): """Returns new optionals lists that have matching signatures. This is done by mirroring each list in the other using none optionals. There is no merging of like optionals. Args: branch_graphs: `list` of `FuncGraph`. branch_optionals: `list` of `list`s of optional `Tensor`s from other branch_graphs Returns: A `list` of `list`s of `Tensor`s for each branch_graph. Each list has the same number of `Tensor`s, all of which will be optionals of the same shape/type. """ new_branch_optionals = [] # Since the intermediates are optionals with dtype variant, we only need # enough room for the longest list of intermediates. intermediates_size = max(len(o) for o in branch_optionals) for i, branch_graph in enumerate(branch_graphs): other_optionals = _create_none_optionals( branch_graph, intermediates_size - len(branch_optionals[i])) new_branch_optionals.append(branch_optionals[i] + other_optionals) return new_branch_optionals def _make_intermediates_match_xla(branch_graphs, branch_intermediates): """Like _make_intermediates_match but for the XLA case.""" new_branch_intermediates = [] for i, branch_graph in enumerate(branch_graphs): other_fakeparams = _create_fakeparams( branch_graph, sum((bi for bi in branch_intermediates if bi is not branch_intermediates[i]), [])) num_preceding = sum(len(bi) for bi in branch_intermediates[:i]) new_branch_intermediates.append(other_fakeparams[:num_preceding] + branch_intermediates[i] + other_fakeparams[num_preceding:]) return new_branch_intermediates def _make_inputs_match(branch_graphs, branch_inputs): """Modifies branch_graphs so they have the same input signature. This method reorders and/or adds parameters to each graph in branch_graphs so they have the same input signature, and updates the 'inputs' and 'captured' fields of each graph accordingly. It uses the input tensors from the outer graph to avoid duplicating shared arguments. Args: branch_graphs: a `list` of `FuncGraph` branch_inputs: a `list` of `list`s of `Tensor`s in the outer graph. The inputs for the corresponding graph in `branch_graphs`. Returns: A new list of Tensors from the outer graph that are the new inputs for each branch_graph. This is a deduped version of `sum(branch_inputs)`. """ assert len(branch_graphs) == len(branch_inputs) new_inputs = set() for branch_in in branch_inputs: new_inputs |= set(branch_in) new_inputs = list(new_inputs) for branch_graph, branch_in in zip(branch_graphs, branch_inputs): branch_input_to_param = dict(zip(branch_in, branch_graph.inputs)) input_list = [] for in_t in new_inputs: param = branch_input_to_param.get(in_t, None) if param is None: param = _create_dummy_input(branch_graph, in_t) input_list.append(param) branch_graph.inputs = input_list # Rewrite the FuncGraphs' state to reflect the new inputs. branch_graph.captures = collections.OrderedDict( zip(new_inputs, branch_graph.inputs)) return new_inputs def _make_output_composite_tensors_match(op_type, branch_graphs): """Modifies each branch_graph's outputs to have the same output signature. Currently the only transformation implemented is turning a Tensor into an equivalent IndexedSlices if the other branch returns an IndexedSlices. Updates branch_graph.{outputs,structured_outputs} for each branch_graph in branch_graphs. Args: op_type: _COND or _CASE branch_graphs: `list` of `FuncGraph` Raises: TypeError: if a set of outputs cannot be rewritten. """ # Note: since this is only used for gradient graphs, we do not expect the # outputs to be structured (e.g. nested lists), and thus do not need to use # nest.flatten, etc. assert branch_graphs branch_outputs = [g.structured_outputs for g in branch_graphs] outputs_per_branch = list(len(outs) for outs in branch_outputs) assert len(set(outputs_per_branch)) == 1, outputs_per_branch for output_idx, branch_outs in enumerate(zip(*branch_outputs)): if len(set(type(out) for out in branch_outs)) == 1: continue if not any(isinstance(out, ops.IndexedSlices) for out in branch_outs): continue for branch_idx, branch_out in enumerate(branch_outs): if isinstance(branch_out, ops.IndexedSlices): continue elif isinstance(branch_out, ops.Tensor): with branch_graphs[branch_idx].as_default(): branch_outputs[branch_idx][output_idx] = math_ops._as_indexed_slices( branch_out) else: raise TypeError( "Cannot reconcile {op_name} {output_idx}-th outputs:\n" " outputs from all branches: {outputs}".format( op_name="tf.cond" if op_type == _COND else "tf.switch_case", output_idx=output_idx, outputs=branch_outs)) for branch_graph, branch_outs in zip(branch_graphs, branch_outputs): branch_graph.structured_outputs = branch_outs branch_graph.outputs = func_graph_module.flatten(branch_outs) def _make_indexed_slices_indices_types_match(op_type, branch_graphs): """Match dtype of IndexedSlices.indices in outputs of branch_graphs.""" assert branch_graphs indexed_slice_indices = [] current_index = 0 branch_outputs_flat_with_composites = [ nest.flatten(branch_graph.structured_outputs, expand_composites=False) for branch_graph in branch_graphs ] outs_per_branch = [len(outs) for outs in branch_outputs_flat_with_composites] assert len(set(outs_per_branch)) == 1, outs_per_branch # Store indices of IndexedSlices.indices in `indexed_slice_indices`. for output_idx, branch_outs in enumerate( zip(*branch_outputs_flat_with_composites)): if len(set(isinstance(out, ops.IndexedSlices) for out in branch_outs)) != 1: raise TypeError("Cannot reconcile tf.{op_name} {output_idx}-th outputs:\n" " branches returned: {outputs}".format( op_name="cond" if op_type == _COND else "switch_case", output_idx=output_idx, outputs=branch_outs)) if isinstance(branch_outs[0], ops.IndexedSlices): # indices is the second component of the composite tensor. indexed_slice_indices.append(current_index + 1) if nest.is_sequence_or_composite(branch_outs[0]): current_index += len(nest.flatten(branch_outs[0], expand_composites=True)) else: current_index += 1 if not indexed_slice_indices: return if current_index != len(branch_graphs[0].outputs): raise ValueError("Insufficient elements in branch_graphs[0].outputs.\n" "Expected: %i\n" "Actual: %i" % (current_index, len(branch_graphs[0].outputs))) # Cast indices with mismatching types to int64. for index in indexed_slice_indices: if any(bg.outputs[index].dtype not in (dtypes.int32, dtypes.int64) for bg in branch_graphs): raise TypeError("Type of IndexedSlices.indices must be int32 or int64. " "Found: %s" % str([bg.outputs[index].dtype for bg in branch_graphs])) if len(set(bg.outputs[index].dtype for bg in branch_graphs)) != 1: for branch_graph in branch_graphs: if branch_graph.outputs[index].dtype == dtypes.int32: with branch_graph.as_default(): branch_graph.outputs[index] = math_ops.cast( branch_graph.outputs[index], dtypes.int64) for branch_graph in branch_graphs: branch_graph.structured_outputs = func_graph_module.pack_sequence_as( branch_graph.structured_outputs, branch_graph.outputs) def _wrap_intermediates(func_graph, intermediates): with func_graph.as_default(): return [gen_dataset_ops.optional_from_value([t]) for t in intermediates] def _create_dummy_input(func_graph, template_tensor): """Creates tensors in func_graph to represent template_tensors. Args: func_graph: FuncGraph. template_tensor: a tensor in the outer graph. Returns: A tensor in func_graph. """ with func_graph.as_default(): return array_ops.placeholder( template_tensor.dtype, shape=template_tensor.shape) def _create_none_optionals(func_graph, n): """Creates `n` `None` optionals in func_graph. Args: func_graph: FuncGraph. n: `int` the number of `None` optionals to make. Returns: A list of tensors in func_graph. """ with func_graph.as_default(): return [gen_dataset_ops.optional_none() for _ in range(n)] def _create_fakeparams(func_graph, template_tensors): """Create FakeParams for the XLA case.""" with func_graph.as_default(): return [gen_functional_ops.fake_param(dtype=t.dtype, shape=t.shape) for t in template_tensors] def _check_same_outputs(op_type, graphs): """Raises an error if `graphs` have different outputs.""" def error(branch_idx, error_detail): raise TypeError( "{b0_name} and {bn_name} arguments to {op_name} must have the same " "number, type, and overall structure of return values.\n" "\n" "{b0_name} output: {b0_out}\n" "{bn_name} output: {bn_out}\n" "\n" "Error details:\n" "{detail}".format( b0_name="true_fn" if op_type == _COND else "branches[0]", bn_name=("false_fn" if op_type == _COND else "branches[{}]".format(branch_idx)), op_name="tf.cond" if op_type == _COND else "tf.switch_case", b0_out=graphs[0].structured_outputs, bn_out=graphs[branch_idx].structured_outputs, detail=error_detail)) for b in range(1, len(graphs)): try: nest.assert_same_structure( graphs[0].structured_outputs, graphs[b].structured_outputs, expand_composites=True) except (ValueError, TypeError) as e: error(b, str(e)) op_type_str = "cond" if op_type == _COND else "case" if len(graphs[0].outputs) != len(graphs[b].outputs): raise ValueError("Lengths of branch outputs of {op_type} must match.\n" "len(graphs[0].outputs): {len_0}\n" "len(graphs[{b}].outputs): {len_b}\n".format( op_type=op_type_str, len_0=len(graphs[0].outputs), b=b, len_b=len(graphs[b].outputs))) for b0_out, bn_out in zip(graphs[0].outputs, graphs[b].outputs): if b0_out.dtype != bn_out.dtype: error(b, "%s and %s have different types" % (b0_out, bn_out)) def _get_output_shapes(*branch_graph_outputs): output_shapes = [] for out_by_branch in zip(*branch_graph_outputs): shape = out_by_branch[0].shape for other_out in out_by_branch[1:]: shape = shape.most_specific_compatible_shape(other_out.shape) output_shapes.append(shape) return output_shapes def verify_captures(op_type, branch_graphs): """Verify that a branch's tensor is not accessed in another branch fn.""" # Note: It is technically not possible for lower-branch_index branches to # capture tensors from higher-branch_index branches, because of the order of # branch graph construction, but we check all for completeness and to # guard against potential future changes. other_branch_graphs = {g: i for i, g in enumerate(branch_graphs)} for i, branch_graph in enumerate(branch_graphs): for t in branch_graph.external_captures: if not isinstance(t, ops.EagerTensor) and t.graph in other_branch_graphs: branch_names = ["true_fn", "false_fn"] if op_type == _COND else [ "branch {}".format(bi) for bi in range(len(branch_graphs))] raise ValueError( "Tensor {tname} in {b0name} is accessed from {b1name}.".format( tname=t.name, b0name=branch_names[other_branch_graphs[t.graph]], b1name=branch_names[i])) class _CondGradFuncGraph(util.CondBranchFuncGraph): """FuncGraph for the gradient function of the branch of an If op. Handles wrapping and unwrapping intermediate values that are captured by the gradient computation in optionals. Attributes: op_needs_rewrite: True if any intermediates were captured, meaning the forward If op needs to be written to output the wrapped intermediates. """ def __init__(self, name, forward_graph): super(_CondGradFuncGraph, self).__init__( name, collections=ops.get_default_graph()._collections) # pylint: disable=protected-access self.op_needs_rewrite = False self._forward_graph = forward_graph # Maps from forward intermediate tensor -> the unwrapped captured # intermediate. self._indirect_captures = {} # Maps unwrapped intermediate -> optional-wrapped intermediate in the # forward graph. self._wrapped_intermediates = collections.OrderedDict() # Raw intermediates captured from the forward graph. Populated iff we're in # an XLA context. self._xla_intermediates = [] @property def wrapped_intermediates(self): """The optional-wrapped intermediates captured from the forward graph.""" return list(self._wrapped_intermediates.values()) @property def xla_intermediates(self): """Raw intermediates captured from the forward graph if XLA is enabled.""" return self._xla_intermediates def _capture_helper(self, tensor, name): if (tensor.graph is not self._forward_graph or tensor in self._forward_graph.inputs or tensor in self._forward_graph.outputs): return super(_CondGradFuncGraph, self)._capture_helper(tensor, name) if control_flow_util.GraphOrParentsInXlaContext(ops.get_default_graph()): # XLA does not yet support optionals, so capture intermediates directly. # TODO(skyewm,jpienaar): can XLA support optionals? if tensor not in self.captures: self.xla_intermediates.append(tensor) self.op_needs_rewrite = True return super(_CondGradFuncGraph, self)._capture_helper(tensor, name) captured_tensor = self._indirect_captures.get(tensor) if captured_tensor is not None: return captured_tensor # 'tensor' is an uncaptured intermediate in the forward graph. # If it is not a resource, we wrap it in an optional in the forward graph # and capture the optional normally. We then unwrap the captured optional # value in the gradient graph to get the raw intermediate value. # If it is a resource, we trace the resource upto the input in the forward # graph and capture that. if tensor.dtype == dtypes.resource: # Index of the forward graph input corresponding to the resource tensor. index = util.resource_input_index( tensor.name, [t.name for t in self._forward_graph.inputs], {op.name: op.node_def for op in self._forward_graph.get_operations()}, self._forward_graph._functions) # This gets mapped to the corresponding If op input in # `_resolve_grad_inputs`. captured_tensor = super(_CondGradFuncGraph, self)._capture_helper( self._forward_graph.inputs[index], name) else: if tensor not in self._wrapped_intermediates: # If the gradient has already been computed for this If op, 'tensor' may # already be wrapped. for consumer in tensor.consumers(): if (consumer.type == "OptionalFromValue" and consumer.outputs[0] in self._forward_graph.outputs): optional = consumer.outputs[0] break else: # 'tensor' hasn't been wrapped, do it now. with self._forward_graph.as_default(): optional = gen_dataset_ops.optional_from_value([tensor]) self.op_needs_rewrite = True self._wrapped_intermediates[tensor] = optional optional = self._wrapped_intermediates[tensor] captured_optional = super(_CondGradFuncGraph, self)._capture_helper(optional, name) captured_tensor = gen_dataset_ops.optional_get_value( captured_optional, [tensor.dtype], [tensor.shape])[0] self._indirect_captures[tensor] = captured_tensor return captured_tensor def indexed_case(branch_index, branch_fns, name="indexed_case"): """Like conv_v2, except emits a Case op instead of an If.""" if isinstance(branch_index, int): raise TypeError("branch_index must not be a Python int", branch_index) with ops.name_scope(name) as scope: branch_names = [ util.unique_fn_name(scope, "branch{}".format(b)) for b in range(len(branch_fns)) ] # Automatic control dependencies are added in defuns, but not in v1 # graphs. Propagate that behavior here. add_control_dependencies = ops.get_default_graph()._add_control_dependencies branch_index = ops.convert_to_tensor(branch_index, name="branch_index") branch_graphs = [] for branch_name, branch_fn in zip(branch_names, branch_fns): branch_graphs.append( func_graph_module.func_graph_from_py_func( branch_name, branch_fn, [], {}, func_graph=util.CondBranchFuncGraph( branch_name, collections=ops.get_default_graph()._collections), # pylint: disable=protected-access add_control_dependencies=add_control_dependencies, op_return_value=branch_index)) verify_captures(_CASE, branch_graphs) return _build_case( branch_index, branch_graphs, [g.external_captures for g in branch_graphs], name=scope) @ops.RegisterGradient("Case") def _CaseGrad(op, *grads): # pylint: disable=invalid-name """The gradient of a Case op produced by tf.switch_case.""" # Get the Case operator (this logic handles the case where op is a MockOp) case_op = op.outputs[0].op branch_graphs = _get_func_graphs(case_op) assert branch_graphs # Note: op.graph != ops.get_default_graph() when we are computing the gradient # of a nested cond. for branch_graph in branch_graphs: assert branch_graph.outer_graph == case_op.graph # Create grad functions that compute the gradient of the branch forward # graphs. These functions will capture tensors from the forward pass # functions. branch_grad_graphs = [] for branch_graph in branch_graphs: branch_grad_graphs.append( _create_grad_func(branch_graph, grads, util.unique_grad_fn_name(branch_graph.name))) if any(g.op_needs_rewrite for g in branch_grad_graphs): # Modify 'op' to output the intermediates needed by the grad functions. Note # that all needed intermediates are wrapped in optionals. Each optional # intermediate output will have a value iff its corresponding branch is # taken. # NOTE(bjp): if there are any active sessions, this modification to `op` # may make them unrunnable! if control_flow_util.InXlaContext(ops.get_default_graph()): # XLA does not yet support optionals, so output intermediates directly and # make them match via FakeParams, which can be converted to zeros in XLA. # TODO(bjp,jpienaar): can XLA support optionals? branches_intermediates = [ branch_grad_graph.xla_intermediates for branch_grad_graph in branch_grad_graphs ] extra_branch_outputs = _make_intermediates_match_xla( branch_graphs, branches_intermediates) else: branch_intermediates = [ g.wrapped_intermediates for g in branch_grad_graphs ] # Make outputs match by adding none optionals. extra_branch_outputs = _make_intermediates_match(branch_graphs, branch_intermediates) for branch_graph, extra_outputs in zip(branch_graphs, extra_branch_outputs): branch_graph.outputs.extend(extra_outputs) # TODO(bjp): indicate it's an internal bug if this fails. _check_same_outputs(_CASE, branch_graphs) for branch_graph in branch_graphs: branch_graph.name += "_rewritten" case_op._set_func_list_attr("branches", [ util.create_new_tf_function(branch_graph) for branch_graph in branch_graphs ]) case_op._set_type_list_attr("Tout", branch_graphs[0].output_types) case_op._set_shape_list_attr("output_shapes", branch_graphs[0].output_shapes) case_op._add_outputs([t.dtype for t in extra_branch_outputs[0]], [t.shape for t in extra_branch_outputs[0]]) # Resolve references to forward graph tensors in grad graphs and ensure # they are in-scope, i.e., belong to one of outer graphs of the grad graph. branches_grad_inputs = [ _resolve_grad_inputs(branch_graph, branch_grad_graph) for branch_graph, branch_grad_graph in zip(branch_graphs, branch_grad_graphs) ] # This modifies the graphs in branch_grad_graphs. _make_output_composite_tensors_match(_CASE, branch_grad_graphs) outputs = _build_case(case_op.inputs[0], branch_grad_graphs, branches_grad_inputs, name="gradient") # The predicate has no gradient. return [None] + outputs def _build_case(branch_index, branch_graphs, branch_inputs, name=None): """Creates an `Case` op from `branch_index`, branch graphs and inputs. Note that this modifies `branch_graphs` to make the inputs match, and to output all intermediates values so they're available for the gradient computation. `branch_graphs` need not have the same input types, but they must have the same outpute types. Args: branch_index: integer Tensor branch_graphs: List of FuncGraph branch_inputs: List of lists of Tensors to be passed to corresponding branch_graph as input. name: the name for the Case op. Returns: A list of Tensors which are the outputs of the Case op. Does not include added intermediate outputs. """ _make_indexed_slices_indices_types_match(_CASE, branch_graphs) _check_same_outputs(_CASE, branch_graphs) # Add inputs to branch_graphs to make them match. Note that this modifies the # graphs in `branch_graphs`. case_inputs = _make_inputs_match(branch_graphs, branch_inputs) # Create the Case op. with ops.control_dependencies( sum((list(bg.control_captures) for bg in branch_graphs), [])): tensors = gen_functional_ops.case( branch_index, case_inputs, [t.dtype for t in branch_graphs[0].outputs], [util.create_new_tf_function(g) for g in branch_graphs], output_shapes=_get_output_shapes(*[g.outputs for g in branch_graphs]), name=name) # TODO(b/110167197): this requires Case to have at least 1 output case_op = tensors[0].op util.maybe_set_lowering_attr(case_op) util.maybe_propagate_compile_time_consts_in_xla(case_op) # Return identities for each output of the Case op, rather than the output of # the Case op directly. This makes pruning work if the output of switch_case() # is fetched: the lowering pass converts the Case outputs into IdentityN # outputs, which if fetched will cause all ops in the taken branch to be run # (since it takes all merge ops as input). After lowering, each output # identity op will end up with only the appropriate merge op as input. # TODO(b/79984175): this doesn't have to be a tuple once we covert to the # correct output structure tensors = [array_ops.identity(t) for t in tensors] # Prevent fetching since the variant outputs can't be fetched directly. case_op.graph.prevent_fetching(case_op) return func_graph_module.pack_sequence_as(branch_graphs[0].structured_outputs, tensors)
tensorflow-master
tensorflow/python/ops/cond_v2.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Implementation of image ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.compat import compat from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_image_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import nn_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import string_ops from tensorflow.python.ops import variables from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export ops.NotDifferentiable('RandomCrop') # TODO(b/31222613): This op may be differentiable, and there may be # latent bugs here. ops.NotDifferentiable('RGBToHSV') # TODO(b/31222613): This op may be differentiable, and there may be # latent bugs here. ops.NotDifferentiable('HSVToRGB') ops.NotDifferentiable('DrawBoundingBoxes') ops.NotDifferentiable('SampleDistortedBoundingBox') ops.NotDifferentiable('SampleDistortedBoundingBoxV2') # TODO(bsteiner): Implement the gradient function for extract_glimpse # TODO(b/31222613): This op may be differentiable, and there may be # latent bugs here. ops.NotDifferentiable('ExtractGlimpse') ops.NotDifferentiable('NonMaxSuppression') ops.NotDifferentiable('NonMaxSuppressionV2') ops.NotDifferentiable('NonMaxSuppressionWithOverlaps') # pylint: disable=invalid-name def _assert(cond, ex_type, msg): """A polymorphic assert, works with tensors and boolean expressions. If `cond` is not a tensor, behave like an ordinary assert statement, except that a empty list is returned. If `cond` is a tensor, return a list containing a single TensorFlow assert op. Args: cond: Something evaluates to a boolean value. May be a tensor. ex_type: The exception class to use. msg: The error message. Returns: A list, containing at most one assert op. """ if _is_tensor(cond): return [control_flow_ops.Assert(cond, [msg])] else: if not cond: raise ex_type(msg) else: return [] def _is_tensor(x): """Returns `True` if `x` is a symbolic tensor-like object. Args: x: A python object to check. Returns: `True` if `x` is a `tf.Tensor` or `tf.Variable`, otherwise `False`. """ return isinstance(x, (ops.Tensor, variables.Variable)) def _ImageDimensions(image, rank): """Returns the dimensions of an image tensor. Args: image: A rank-D Tensor. For 3-D of shape: `[height, width, channels]`. rank: The expected rank of the image Returns: A list of corresponding to the dimensions of the input image. Dimensions that are statically known are python integers, otherwise they are integer scalar tensors. """ if image.get_shape().is_fully_defined(): return image.get_shape().as_list() else: static_shape = image.get_shape().with_rank(rank).as_list() dynamic_shape = array_ops.unstack(array_ops.shape(image), rank) return [ s if s is not None else d for s, d in zip(static_shape, dynamic_shape) ] def _Check3DImage(image, require_static=True): """Assert that we are working with properly shaped image. Args: image: 3-D Tensor of shape [height, width, channels] require_static: If `True`, requires that all dimensions of `image` are known and non-zero. Raises: ValueError: if `image.shape` is not a 3-vector. Returns: An empty list, if `image` has fully defined dimensions. Otherwise, a list containing an assert op is returned. """ try: image_shape = image.get_shape().with_rank(3) except ValueError: raise ValueError("'image' (shape %s) must be three-dimensional." % image.shape) if require_static and not image_shape.is_fully_defined(): raise ValueError("'image' (shape %s) must be fully defined." % image_shape) if any(x == 0 for x in image_shape): raise ValueError("all dims of 'image.shape' must be > 0: %s" % image_shape) if not image_shape.is_fully_defined(): return [ check_ops.assert_positive( array_ops.shape(image), ["all dims of 'image.shape' " 'must be > 0.']) ] else: return [] def _Assert3DImage(image): """Assert that we are working with a properly shaped image. Performs the check statically if possible (i.e. if the shape is statically known). Otherwise adds a control dependency to an assert op that checks the dynamic shape. Args: image: 3-D Tensor of shape [height, width, channels] Raises: ValueError: if `image.shape` is not a 3-vector. Returns: If the shape of `image` could be verified statically, `image` is returned unchanged, otherwise there will be a control dependency added that asserts the correct dynamic shape. """ return control_flow_ops.with_dependencies( _Check3DImage(image, require_static=False), image) def _AssertAtLeast3DImage(image): """Assert that we are working with a properly shaped image. Performs the check statically if possible (i.e. if the shape is statically known). Otherwise adds a control dependency to an assert op that checks the dynamic shape. Args: image: >= 3-D Tensor of size [*, height, width, depth] Raises: ValueError: if image.shape is not a [>= 3] vector. Returns: If the shape of `image` could be verified statically, `image` is returned unchanged, otherwise there will be a control dependency added that asserts the correct dynamic shape. """ return control_flow_ops.with_dependencies( _CheckAtLeast3DImage(image, require_static=False), image) def _CheckAtLeast3DImage(image, require_static=True): """Assert that we are working with properly shaped image. Args: image: >= 3-D Tensor of size [*, height, width, depth] require_static: If `True`, requires that all dimensions of `image` are known and non-zero. Raises: ValueError: if image.shape is not a [>= 3] vector. Returns: An empty list, if `image` has fully defined dimensions. Otherwise, a list containing an assert op is returned. """ try: if image.get_shape().ndims is None: image_shape = image.get_shape().with_rank(3) else: image_shape = image.get_shape().with_rank_at_least(3) except ValueError: raise ValueError("'image' must be at least three-dimensional.") if require_static and not image_shape.is_fully_defined(): raise ValueError('\'image\' must be fully defined.') if any(x == 0 for x in image_shape): raise ValueError('all dims of \'image.shape\' must be > 0: %s' % image_shape) if not image_shape.is_fully_defined(): return [ check_ops.assert_positive( array_ops.shape(image), ["all dims of 'image.shape' " 'must be > 0.']), check_ops.assert_greater_equal( array_ops.rank(image), 3, message="'image' must be at least three-dimensional.") ] else: return [] def _AssertGrayscaleImage(image): """Assert that we are working with a properly shaped grayscale image. Performs the check statically if possible (i.e. if the shape is statically known). Otherwise adds a control dependency to an assert op that checks the dynamic shape. Args: image: >= 2-D Tensor of size [*, 1] Raises: ValueError: if image.shape is not a [>= 2] vector or if last dimension is not size 1. Returns: If the shape of `image` could be verified statically, `image` is returned unchanged, otherwise there will be a control dependency added that asserts the correct dynamic shape. """ return control_flow_ops.with_dependencies( _CheckGrayscaleImage(image, require_static=False), image) def _CheckGrayscaleImage(image, require_static=True): """Assert that we are working with properly shaped grayscale image. Args: image: >= 2-D Tensor of size [*, 1] require_static: Boolean, whether static shape is required. Raises: ValueError: if image.shape is not a [>= 2] vector or if last dimension is not size 1. Returns: An empty list, if `image` has fully defined dimensions. Otherwise, a list containing an assert op is returned. """ try: if image.get_shape().ndims is None: image_shape = image.get_shape().with_rank(2) else: image_shape = image.get_shape().with_rank_at_least(2) except ValueError: raise ValueError('A grayscale image must be at least two-dimensional.') if require_static and not image_shape.is_fully_defined(): raise ValueError('\'image\' must be fully defined.') if image_shape.is_fully_defined(): if image_shape[-1] != 1: raise ValueError('Last dimension of a grayscale image should be size 1.') if not image_shape.is_fully_defined(): return [ check_ops.assert_equal( array_ops.shape(image)[-1], 1, message='Last dimension of a grayscale image should be size 1.'), check_ops.assert_greater_equal( array_ops.rank(image), 3, message='A grayscale image must be at least two-dimensional.') ] else: return [] def fix_image_flip_shape(image, result): """Set the shape to 3 dimensional if we don't know anything else. Args: image: original image size result: flipped or transformed image Returns: An image whose shape is at least None,None,None. """ image_shape = image.get_shape() if image_shape == tensor_shape.unknown_shape(): result.set_shape([None, None, None]) else: result.set_shape(image_shape) return result @tf_export('image.random_flip_up_down') def random_flip_up_down(image, seed=None): """Randomly flips an image vertically (upside down). With a 1 in 2 chance, outputs the contents of `image` flipped along the first dimension, which is `height`. Otherwise output the image as-is. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. seed: A Python integer. Used to create a random seed. See `tf.compat.v1.set_random_seed` for behavior. Returns: A tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ return _random_flip(image, 0, seed, 'random_flip_up_down') @tf_export('image.random_flip_left_right') def random_flip_left_right(image, seed=None): """Randomly flip an image horizontally (left to right). With a 1 in 2 chance, outputs the contents of `image` flipped along the second dimension, which is `width`. Otherwise output the image as-is. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. seed: A Python integer. Used to create a random seed. See `tf.compat.v1.set_random_seed` for behavior. Returns: A tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ return _random_flip(image, 1, seed, 'random_flip_left_right') def _random_flip(image, flip_index, seed, scope_name): """Randomly (50% chance) flip an image along axis `flip_index`. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. flip_index: Dimension along which to flip image. Vertical: 0, Horizontal: 1 seed: A Python integer. Used to create a random seed. See `tf.compat.v1.set_random_seed` for behavior. scope_name: Name of the scope in which the ops are added. Returns: A tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ with ops.name_scope(None, scope_name, [image]) as scope: image = ops.convert_to_tensor(image, name='image') image = _AssertAtLeast3DImage(image) shape = image.get_shape() if shape.ndims == 3 or shape.ndims is None: uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed) mirror_cond = math_ops.less(uniform_random, .5) result = control_flow_ops.cond( mirror_cond, lambda: array_ops.reverse(image, [flip_index]), lambda: image, name=scope) return fix_image_flip_shape(image, result) elif shape.ndims == 4: batch_size = array_ops.shape(image)[0] uniform_random = random_ops.random_uniform([batch_size], 0, 1.0, seed=seed) flips = math_ops.round( array_ops.reshape(uniform_random, [batch_size, 1, 1, 1])) flips = math_ops.cast(flips, image.dtype) flipped_input = array_ops.reverse(image, [flip_index + 1]) return flips * flipped_input + (1 - flips) * image else: raise ValueError('\'image\' must have either 3 or 4 dimensions.') @tf_export('image.flip_left_right') def flip_left_right(image): """Flip an image horizontally (left to right). Outputs the contents of `image` flipped along the width dimension. See also `reverse()`. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. Returns: A tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ return _flip(image, 1, 'flip_left_right') @tf_export('image.flip_up_down') def flip_up_down(image): """Flip an image vertically (upside down). Outputs the contents of `image` flipped along the height dimension. See also `reverse()`. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. Returns: A `Tensor` of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ return _flip(image, 0, 'flip_up_down') def _flip(image, flip_index, scope_name): """Flip an image either horizontally or vertically. Outputs the contents of `image` flipped along the dimension `flip_index`. See also `reverse()`. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. flip_index: 0 For vertical, 1 for horizontal. scope_name: string, scope name. Returns: A `Tensor` of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ with ops.name_scope(None, scope_name, [image]): image = ops.convert_to_tensor(image, name='image') image = _AssertAtLeast3DImage(image) shape = image.get_shape() if shape.ndims == 3 or shape.ndims is None: return fix_image_flip_shape(image, array_ops.reverse(image, [flip_index])) elif shape.ndims == 4: return array_ops.reverse(image, [flip_index + 1]) else: raise ValueError('\'image\' must have either 3 or 4 dimensions.') @tf_export('image.rot90') def rot90(image, k=1, name=None): """Rotate image(s) counter-clockwise by 90 degrees. For example: ```python a=tf.constant([[[1],[2]],[[3],[4]]]) # rotating `a` counter clockwise by 90 degrees a_rot=tf.image.rot90(a,k=1) #rotated `a` print(a_rot) # [[[2],[4]],[[1],[3]]] ``` Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. k: A scalar integer. The number of times the image is rotated by 90 degrees. name: A name for this operation (optional). Returns: A rotated tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ with ops.name_scope(name, 'rot90', [image, k]) as scope: image = ops.convert_to_tensor(image, name='image') image = _AssertAtLeast3DImage(image) k = ops.convert_to_tensor(k, dtype=dtypes.int32, name='k') k.get_shape().assert_has_rank(0) k = math_ops.mod(k, 4) shape = image.get_shape() if shape.ndims == 3 or shape.ndims is None: return _rot90_3D(image, k, scope) elif shape.ndims == 4: return _rot90_4D(image, k, scope) else: raise ValueError('\'image\' must have either 3 or 4 dimensions.') def _rot90_3D(image, k, name_scope): """Rotate image counter-clockwise by 90 degrees `k` times. Args: image: 3-D Tensor of shape `[height, width, channels]`. k: A scalar integer. The number of times the image is rotated by 90 degrees. name_scope: A valid TensorFlow name scope. Returns: A 3-D tensor of the same type and shape as `image`. """ def _rot90(): return array_ops.transpose(array_ops.reverse_v2(image, [1]), [1, 0, 2]) def _rot180(): return array_ops.reverse_v2(image, [0, 1]) def _rot270(): return array_ops.reverse_v2(array_ops.transpose(image, [1, 0, 2]), [1]) cases = [(math_ops.equal(k, 1), _rot90), (math_ops.equal(k, 2), _rot180), (math_ops.equal(k, 3), _rot270)] result = control_flow_ops.case( cases, default=lambda: image, exclusive=True, name=name_scope) result.set_shape([None, None, image.get_shape()[2]]) return result def _rot90_4D(images, k, name_scope): """Rotate batch of images counter-clockwise by 90 degrees `k` times. Args: images: 4-D Tensor of shape `[height, width, channels]`. k: A scalar integer. The number of times the images are rotated by 90 degrees. name_scope: A valid TensorFlow name scope. Returns: A 4-D `Tensor` of the same type and shape as `images`. """ def _rot90(): return array_ops.transpose(array_ops.reverse_v2(images, [2]), [0, 2, 1, 3]) def _rot180(): return array_ops.reverse_v2(images, [1, 2]) def _rot270(): return array_ops.reverse_v2(array_ops.transpose(images, [0, 2, 1, 3]), [2]) cases = [(math_ops.equal(k, 1), _rot90), (math_ops.equal(k, 2), _rot180), (math_ops.equal(k, 3), _rot270)] result = control_flow_ops.case( cases, default=lambda: images, exclusive=True, name=name_scope) shape = result.get_shape() result.set_shape([shape[0], None, None, shape[3]]) return result @tf_export('image.transpose', v1=['image.transpose', 'image.transpose_image']) def transpose(image, name=None): """Transpose image(s) by swapping the height and width dimension. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. name: A name for this operation (optional). Returns: If `image` was 4-D, a 4-D float Tensor of shape `[batch, width, height, channels]` If `image` was 3-D, a 3-D float Tensor of shape `[width, height, channels]` Raises: ValueError: if the shape of `image` not supported. """ with ops.name_scope(name, 'transpose', [image]): image = ops.convert_to_tensor(image, name='image') image = _AssertAtLeast3DImage(image) shape = image.get_shape() if shape.ndims == 3 or shape.ndims is None: return array_ops.transpose(image, [1, 0, 2], name=name) elif shape.ndims == 4: return array_ops.transpose(image, [0, 2, 1, 3], name=name) else: raise ValueError('\'image\' must have either 3 or 4 dimensions.') @tf_export('image.central_crop') def central_crop(image, central_fraction): """Crop the central region of the image(s). Remove the outer parts of an image but retain the central region of the image along each dimension. If we specify central_fraction = 0.5, this function returns the region marked with "X" in the below diagram. -------- | | | XXXX | | XXXX | | | where "X" is the central 50% of the image. -------- This function works on either a single image (`image` is a 3-D Tensor), or a batch of images (`image` is a 4-D Tensor). Args: image: Either a 3-D float Tensor of shape [height, width, depth], or a 4-D Tensor of shape [batch_size, height, width, depth]. central_fraction: float (0, 1], fraction of size to crop Usage Example: ```python >> import tensorflow as tf >> x = tf.random.normal(shape=(256, 256, 3)) >> tf.image.central_crop(x, 0.5) ``` Raises: ValueError: if central_crop_fraction is not within (0, 1]. Returns: 3-D / 4-D float Tensor, as per the input. """ with ops.name_scope(None, 'central_crop', [image]): image = ops.convert_to_tensor(image, name='image') if central_fraction <= 0.0 or central_fraction > 1.0: raise ValueError('central_fraction must be within (0, 1]') if central_fraction == 1.0: return image _AssertAtLeast3DImage(image) rank = image.get_shape().ndims if rank != 3 and rank != 4: raise ValueError('`image` should either be a Tensor with rank = 3 or ' 'rank = 4. Had rank = {}.'.format(rank)) # Helper method to return the `idx`-th dimension of `tensor`, along with # a boolean signifying if the dimension is dynamic. def _get_dim(tensor, idx): static_shape = tensor.get_shape().dims[idx].value if static_shape is not None: return static_shape, False return array_ops.shape(tensor)[idx], True # Get the height, width, depth (and batch size, if the image is a 4-D # tensor). if rank == 3: img_h, dynamic_h = _get_dim(image, 0) img_w, dynamic_w = _get_dim(image, 1) img_d = image.get_shape()[2] else: img_bs = image.get_shape()[0] img_h, dynamic_h = _get_dim(image, 1) img_w, dynamic_w = _get_dim(image, 2) img_d = image.get_shape()[3] # Compute the bounding boxes for the crop. The type and value of the # bounding boxes depend on the `image` tensor's rank and whether / not the # dimensions are statically defined. if dynamic_h: img_hd = math_ops.cast(img_h, dtypes.float64) bbox_h_start = math_ops.cast((img_hd - img_hd * central_fraction) / 2, dtypes.int32) else: img_hd = float(img_h) bbox_h_start = int((img_hd - img_hd * central_fraction) / 2) if dynamic_w: img_wd = math_ops.cast(img_w, dtypes.float64) bbox_w_start = math_ops.cast((img_wd - img_wd * central_fraction) / 2, dtypes.int32) else: img_wd = float(img_w) bbox_w_start = int((img_wd - img_wd * central_fraction) / 2) bbox_h_size = img_h - bbox_h_start * 2 bbox_w_size = img_w - bbox_w_start * 2 if rank == 3: bbox_begin = array_ops.stack([bbox_h_start, bbox_w_start, 0]) bbox_size = array_ops.stack([bbox_h_size, bbox_w_size, -1]) else: bbox_begin = array_ops.stack([0, bbox_h_start, bbox_w_start, 0]) bbox_size = array_ops.stack([-1, bbox_h_size, bbox_w_size, -1]) image = array_ops.slice(image, bbox_begin, bbox_size) # Reshape the `image` tensor to the desired size. if rank == 3: image.set_shape([ None if dynamic_h else bbox_h_size, None if dynamic_w else bbox_w_size, img_d ]) else: image.set_shape([ img_bs, None if dynamic_h else bbox_h_size, None if dynamic_w else bbox_w_size, img_d ]) return image @tf_export('image.pad_to_bounding_box') def pad_to_bounding_box(image, offset_height, offset_width, target_height, target_width): """Pad `image` with zeros to the specified `height` and `width`. Adds `offset_height` rows of zeros on top, `offset_width` columns of zeros on the left, and then pads the image on the bottom and right with zeros until it has dimensions `target_height`, `target_width`. This op does nothing if `offset_*` is zero and the image already has size `target_height` by `target_width`. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. offset_height: Number of rows of zeros to add on top. offset_width: Number of columns of zeros to add on the left. target_height: Height of output image. target_width: Width of output image. Returns: If `image` was 4-D, a 4-D float Tensor of shape `[batch, target_height, target_width, channels]` If `image` was 3-D, a 3-D float Tensor of shape `[target_height, target_width, channels]` Raises: ValueError: If the shape of `image` is incompatible with the `offset_*` or `target_*` arguments, or either `offset_height` or `offset_width` is negative. """ with ops.name_scope(None, 'pad_to_bounding_box', [image]): image = ops.convert_to_tensor(image, name='image') is_batch = True image_shape = image.get_shape() if image_shape.ndims == 3: is_batch = False image = array_ops.expand_dims(image, 0) elif image_shape.ndims is None: is_batch = False image = array_ops.expand_dims(image, 0) image.set_shape([None] * 4) elif image_shape.ndims != 4: raise ValueError('\'image\' must have either 3 or 4 dimensions.') assert_ops = _CheckAtLeast3DImage(image, require_static=False) batch, height, width, depth = _ImageDimensions(image, rank=4) after_padding_width = target_width - offset_width - width after_padding_height = target_height - offset_height - height assert_ops += _assert(offset_height >= 0, ValueError, 'offset_height must be >= 0') assert_ops += _assert(offset_width >= 0, ValueError, 'offset_width must be >= 0') assert_ops += _assert(after_padding_width >= 0, ValueError, 'width must be <= target - offset') assert_ops += _assert(after_padding_height >= 0, ValueError, 'height must be <= target - offset') image = control_flow_ops.with_dependencies(assert_ops, image) # Do not pad on the depth dimensions. paddings = array_ops.reshape( array_ops.stack([ 0, 0, offset_height, after_padding_height, offset_width, after_padding_width, 0, 0 ]), [4, 2]) padded = array_ops.pad(image, paddings) padded_shape = [ None if _is_tensor(i) else i for i in [batch, target_height, target_width, depth] ] padded.set_shape(padded_shape) if not is_batch: padded = array_ops.squeeze(padded, axis=[0]) return padded @tf_export('image.crop_to_bounding_box') def crop_to_bounding_box(image, offset_height, offset_width, target_height, target_width): """Crops an image to a specified bounding box. This op cuts a rectangular part out of `image`. The top-left corner of the returned image is at `offset_height, offset_width` in `image`, and its lower-right corner is at `offset_height + target_height, offset_width + target_width`. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. offset_height: Vertical coordinate of the top-left corner of the result in the input. offset_width: Horizontal coordinate of the top-left corner of the result in the input. target_height: Height of the result. target_width: Width of the result. Returns: If `image` was 4-D, a 4-D float Tensor of shape `[batch, target_height, target_width, channels]` If `image` was 3-D, a 3-D float Tensor of shape `[target_height, target_width, channels]` Raises: ValueError: If the shape of `image` is incompatible with the `offset_*` or `target_*` arguments, or either `offset_height` or `offset_width` is negative, or either `target_height` or `target_width` is not positive. """ with ops.name_scope(None, 'crop_to_bounding_box', [image]): image = ops.convert_to_tensor(image, name='image') is_batch = True image_shape = image.get_shape() if image_shape.ndims == 3: is_batch = False image = array_ops.expand_dims(image, 0) elif image_shape.ndims is None: is_batch = False image = array_ops.expand_dims(image, 0) image.set_shape([None] * 4) elif image_shape.ndims != 4: raise ValueError('\'image\' must have either 3 or 4 dimensions.') assert_ops = _CheckAtLeast3DImage(image, require_static=False) batch, height, width, depth = _ImageDimensions(image, rank=4) assert_ops += _assert(offset_width >= 0, ValueError, 'offset_width must be >= 0.') assert_ops += _assert(offset_height >= 0, ValueError, 'offset_height must be >= 0.') assert_ops += _assert(target_width > 0, ValueError, 'target_width must be > 0.') assert_ops += _assert(target_height > 0, ValueError, 'target_height must be > 0.') assert_ops += _assert(width >= (target_width + offset_width), ValueError, 'width must be >= target + offset.') assert_ops += _assert(height >= (target_height + offset_height), ValueError, 'height must be >= target + offset.') image = control_flow_ops.with_dependencies(assert_ops, image) cropped = array_ops.slice( image, array_ops.stack([0, offset_height, offset_width, 0]), array_ops.stack([-1, target_height, target_width, -1])) cropped_shape = [ None if _is_tensor(i) else i for i in [batch, target_height, target_width, depth] ] cropped.set_shape(cropped_shape) if not is_batch: cropped = array_ops.squeeze(cropped, axis=[0]) return cropped @tf_export('image.resize_with_crop_or_pad', v1=['image.resize_with_crop_or_pad', 'image.resize_image_with_crop_or_pad']) def resize_image_with_crop_or_pad(image, target_height, target_width): """Crops and/or pads an image to a target width and height. Resizes an image to a target width and height by either centrally cropping the image or padding it evenly with zeros. If `width` or `height` is greater than the specified `target_width` or `target_height` respectively, this op centrally crops along that dimension. If `width` or `height` is smaller than the specified `target_width` or `target_height` respectively, this op centrally pads with 0 along that dimension. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. target_height: Target height. target_width: Target width. Raises: ValueError: if `target_height` or `target_width` are zero or negative. Returns: Cropped and/or padded image. If `images` was 4-D, a 4-D float Tensor of shape `[batch, new_height, new_width, channels]`. If `images` was 3-D, a 3-D float Tensor of shape `[new_height, new_width, channels]`. """ with ops.name_scope(None, 'resize_image_with_crop_or_pad', [image]): image = ops.convert_to_tensor(image, name='image') image_shape = image.get_shape() is_batch = True if image_shape.ndims == 3: is_batch = False image = array_ops.expand_dims(image, 0) elif image_shape.ndims is None: is_batch = False image = array_ops.expand_dims(image, 0) image.set_shape([None] * 4) elif image_shape.ndims != 4: raise ValueError('\'image\' must have either 3 or 4 dimensions.') assert_ops = _CheckAtLeast3DImage(image, require_static=False) assert_ops += _assert(target_width > 0, ValueError, 'target_width must be > 0.') assert_ops += _assert(target_height > 0, ValueError, 'target_height must be > 0.') image = control_flow_ops.with_dependencies(assert_ops, image) # `crop_to_bounding_box` and `pad_to_bounding_box` have their own checks. # Make sure our checks come first, so that error messages are clearer. if _is_tensor(target_height): target_height = control_flow_ops.with_dependencies( assert_ops, target_height) if _is_tensor(target_width): target_width = control_flow_ops.with_dependencies(assert_ops, target_width) def max_(x, y): if _is_tensor(x) or _is_tensor(y): return math_ops.maximum(x, y) else: return max(x, y) def min_(x, y): if _is_tensor(x) or _is_tensor(y): return math_ops.minimum(x, y) else: return min(x, y) def equal_(x, y): if _is_tensor(x) or _is_tensor(y): return math_ops.equal(x, y) else: return x == y _, height, width, _ = _ImageDimensions(image, rank=4) width_diff = target_width - width offset_crop_width = max_(-width_diff // 2, 0) offset_pad_width = max_(width_diff // 2, 0) height_diff = target_height - height offset_crop_height = max_(-height_diff // 2, 0) offset_pad_height = max_(height_diff // 2, 0) # Maybe crop if needed. cropped = crop_to_bounding_box(image, offset_crop_height, offset_crop_width, min_(target_height, height), min_(target_width, width)) # Maybe pad if needed. resized = pad_to_bounding_box(cropped, offset_pad_height, offset_pad_width, target_height, target_width) # In theory all the checks below are redundant. if resized.get_shape().ndims is None: raise ValueError('resized contains no shape.') _, resized_height, resized_width, _ = _ImageDimensions(resized, rank=4) assert_ops = [] assert_ops += _assert( equal_(resized_height, target_height), ValueError, 'resized height is not correct.') assert_ops += _assert( equal_(resized_width, target_width), ValueError, 'resized width is not correct.') resized = control_flow_ops.with_dependencies(assert_ops, resized) if not is_batch: resized = array_ops.squeeze(resized, axis=[0]) return resized @tf_export(v1=['image.ResizeMethod']) class ResizeMethodV1(object): BILINEAR = 0 NEAREST_NEIGHBOR = 1 BICUBIC = 2 AREA = 3 @tf_export('image.ResizeMethod', v1=[]) class ResizeMethod(object): BILINEAR = 'bilinear' NEAREST_NEIGHBOR = 'nearest' BICUBIC = 'bicubic' AREA = 'area' LANCZOS3 = 'lanczos3' LANCZOS5 = 'lanczos5' GAUSSIAN = 'gaussian' MITCHELLCUBIC = 'mitchellcubic' def _resize_images_common(images, resizer_fn, size, preserve_aspect_ratio, name, skip_resize_if_same): """Core functionality for v1 and v2 resize functions.""" with ops.name_scope(name, 'resize', [images, size]): images = ops.convert_to_tensor(images, name='images') if images.get_shape().ndims is None: raise ValueError('\'images\' contains no shape.') # TODO(shlens): Migrate this functionality to the underlying Op's. is_batch = True if images.get_shape().ndims == 3: is_batch = False images = array_ops.expand_dims(images, 0) elif images.get_shape().ndims != 4: raise ValueError('\'images\' must have either 3 or 4 dimensions.') _, height, width, _ = images.get_shape().as_list() try: size = ops.convert_to_tensor(size, dtypes.int32, name='size') except (TypeError, ValueError): raise ValueError('\'size\' must be a 1-D int32 Tensor') if not size.get_shape().is_compatible_with([2]): raise ValueError('\'size\' must be a 1-D Tensor of 2 elements: ' 'new_height, new_width') size_const_as_shape = tensor_util.constant_value_as_shape(size) new_height_const = size_const_as_shape.dims[0].value new_width_const = size_const_as_shape.dims[1].value if preserve_aspect_ratio: # Get the current shapes of the image, even if dynamic. _, current_height, current_width, _ = _ImageDimensions(images, rank=4) # do the computation to find the right scale and height/width. scale_factor_height = ( math_ops.cast(new_height_const, dtypes.float32) / math_ops.cast(current_height, dtypes.float32)) scale_factor_width = ( math_ops.cast(new_width_const, dtypes.float32) / math_ops.cast(current_width, dtypes.float32)) scale_factor = math_ops.minimum(scale_factor_height, scale_factor_width) scaled_height_const = math_ops.cast( math_ops.round(scale_factor * math_ops.cast(current_height, dtypes.float32)), dtypes.int32) scaled_width_const = math_ops.cast( math_ops.round(scale_factor * math_ops.cast(current_width, dtypes.float32)), dtypes.int32) # NOTE: Reset the size and other constants used later. size = ops.convert_to_tensor([scaled_height_const, scaled_width_const], dtypes.int32, name='size') size_const_as_shape = tensor_util.constant_value_as_shape(size) new_height_const = size_const_as_shape.dims[0].value new_width_const = size_const_as_shape.dims[1].value # If we can determine that the height and width will be unmodified by this # transformation, we avoid performing the resize. if skip_resize_if_same and all( x is not None for x in [new_width_const, width, new_height_const, height]) and ( width == new_width_const and height == new_height_const): if not is_batch: images = array_ops.squeeze(images, axis=[0]) return images images = resizer_fn(images, size) # NOTE(mrry): The shape functions for the resize ops cannot unpack # the packed values in `new_size`, so set the shape here. images.set_shape([None, new_height_const, new_width_const, None]) if not is_batch: images = array_ops.squeeze(images, axis=[0]) return images @tf_export(v1=['image.resize_images', 'image.resize']) def resize_images(images, size, method=ResizeMethodV1.BILINEAR, align_corners=False, preserve_aspect_ratio=False, name=None): """Resize `images` to `size` using the specified `method`. Resized images will be distorted if their original aspect ratio is not the same as `size`. To avoid distortions see `tf.compat.v1.image.resize_image_with_pad`. `method` can be one of: * <b>`ResizeMethod.BILINEAR`</b>: [Bilinear interpolation.]( https://en.wikipedia.org/wiki/Bilinear_interpolation) * <b>`ResizeMethod.NEAREST_NEIGHBOR`</b>: [Nearest neighbor interpolation.]( https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation) * <b>`ResizeMethod.BICUBIC`</b>: [Bicubic interpolation.]( https://en.wikipedia.org/wiki/Bicubic_interpolation) * <b>`ResizeMethod.AREA`</b>: Area interpolation. The return value has the same type as `images` if `method` is `ResizeMethod.NEAREST_NEIGHBOR`. It will also have the same type as `images` if the size of `images` can be statically determined to be the same as `size`, because `images` is returned in this case. Otherwise, the return value has type `float32`. Args: images: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The new size for the images. method: ResizeMethod. Defaults to `ResizeMethod.BILINEAR`. align_corners: bool. If True, the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels. Defaults to `False`. preserve_aspect_ratio: Whether to preserve the aspect ratio. If this is set, then `images` will be resized to a size that fits in `size` while preserving the aspect ratio of the original image. Scales up the image if `size` is bigger than the current size of the `image`. Defaults to False. name: A name for this operation (optional). Raises: ValueError: if the shape of `images` is incompatible with the shape arguments to this function ValueError: if `size` has invalid shape or type. ValueError: if an unsupported resize method is specified. Returns: If `images` was 4-D, a 4-D float Tensor of shape `[batch, new_height, new_width, channels]`. If `images` was 3-D, a 3-D float Tensor of shape `[new_height, new_width, channels]`. """ def resize_fn(images_t, new_size): """Legacy resize core function, passed to _resize_images_common.""" if method == ResizeMethodV1.BILINEAR or method == ResizeMethod.BILINEAR: return gen_image_ops.resize_bilinear( images_t, new_size, align_corners=align_corners) elif (method == ResizeMethodV1.NEAREST_NEIGHBOR or method == ResizeMethod.NEAREST_NEIGHBOR): return gen_image_ops.resize_nearest_neighbor( images_t, new_size, align_corners=align_corners) elif method == ResizeMethodV1.BICUBIC or method == ResizeMethod.BICUBIC: return gen_image_ops.resize_bicubic( images_t, new_size, align_corners=align_corners) elif method == ResizeMethodV1.AREA or method == ResizeMethod.AREA: return gen_image_ops.resize_area( images_t, new_size, align_corners=align_corners) else: raise ValueError('Resize method is not implemented.') return _resize_images_common( images, resize_fn, size, preserve_aspect_ratio=preserve_aspect_ratio, name=name, skip_resize_if_same=True) @tf_export('image.resize', v1=[]) def resize_images_v2(images, size, method=ResizeMethod.BILINEAR, preserve_aspect_ratio=False, antialias=False, name=None): """Resize `images` to `size` using the specified `method`. Resized images will be distorted if their original aspect ratio is not the same as `size`. To avoid distortions see `tf.image.resize_with_pad`. When 'antialias' is true, the sampling filter will anti-alias the input image as well as interpolate. When downsampling an image with [anti-aliasing]( https://en.wikipedia.org/wiki/Spatial_anti-aliasing) the sampling filter kernel is scaled in order to properly anti-alias the input image signal. 'antialias' has no effect when upsampling an image. * <b>`bilinear`</b>: [Bilinear interpolation.]( https://en.wikipedia.org/wiki/Bilinear_interpolation) If 'antialias' is true, becomes a hat/tent filter function with radius 1 when downsampling. * <b>`lanczos3`</b>: [Lanczos kernel]( https://en.wikipedia.org/wiki/Lanczos_resampling) with radius 3. High-quality practical filter but may have some ringing especially on synthetic images. * <b>`lanczos5`</b>: [Lanczos kernel] ( https://en.wikipedia.org/wiki/Lanczos_resampling) with radius 5. Very-high-quality filter but may have stronger ringing. * <b>`bicubic`</b>: [Cubic interpolant]( https://en.wikipedia.org/wiki/Bicubic_interpolation) of Keys. Equivalent to Catmull-Rom kernel. Reasonably good quality and faster than Lanczos3Kernel, particularly when upsampling. * <b>`gaussian`</b>: [Gaussian kernel]( https://en.wikipedia.org/wiki/Gaussian_filter) with radius 3, sigma = 1.5 / 3.0. * <b>`nearest`</b>: [Nearest neighbor interpolation.]( https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation) 'antialias' has no effect when used with nearest neighbor interpolation. * <b>`area`</b>: Anti-aliased resampling with area interpolation. 'antialias' has no effect when used with area interpolation; it always anti-aliases. * <b>`mitchellcubic`</b>: Mitchell-Netravali Cubic non-interpolating filter. For synthetic images (especially those lacking proper prefiltering), less ringing than Keys cubic kernel but less sharp. Note that near image edges the filtering kernel may be partially outside the image boundaries. For these pixels, only input pixels inside the image will be included in the filter sum, and the output value will be appropriately normalized. The return value has the same type as `images` if `method` is `ResizeMethod.NEAREST_NEIGHBOR`. Otherwise, the return value has type `float32`. Args: images: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The new size for the images. method: ResizeMethod. Defaults to `bilinear`. preserve_aspect_ratio: Whether to preserve the aspect ratio. If this is set, then `images` will be resized to a size that fits in `size` while preserving the aspect ratio of the original image. Scales up the image if `size` is bigger than the current size of the `image`. Defaults to False. antialias: Whether to use an anti-aliasing filter when downsampling an image. name: A name for this operation (optional). Raises: ValueError: if the shape of `images` is incompatible with the shape arguments to this function ValueError: if `size` has invalid shape or type. ValueError: if an unsupported resize method is specified. Returns: If `images` was 4-D, a 4-D float Tensor of shape `[batch, new_height, new_width, channels]`. If `images` was 3-D, a 3-D float Tensor of shape `[new_height, new_width, channels]`. """ def resize_fn(images_t, new_size): """Resize core function, passed to _resize_images_common.""" scale_and_translate_methods = [ ResizeMethod.LANCZOS3, ResizeMethod.LANCZOS5, ResizeMethod.GAUSSIAN, ResizeMethod.MITCHELLCUBIC ] def resize_with_scale_and_translate(method): scale = ( math_ops.cast(new_size, dtype=dtypes.float32) / math_ops.cast(array_ops.shape(images_t)[1:3], dtype=dtypes.float32)) return gen_image_ops.scale_and_translate( images_t, new_size, scale, array_ops.zeros([2]), kernel_type=method, antialias=antialias) if method == ResizeMethod.BILINEAR: if antialias: return resize_with_scale_and_translate('triangle') else: return gen_image_ops.resize_bilinear( images_t, new_size, half_pixel_centers=True) elif method == ResizeMethod.NEAREST_NEIGHBOR: return gen_image_ops.resize_nearest_neighbor( images_t, new_size, half_pixel_centers=True) elif method == ResizeMethod.BICUBIC: if antialias: return resize_with_scale_and_translate('keyscubic') else: return gen_image_ops.resize_bicubic( images_t, new_size, half_pixel_centers=True) elif method == ResizeMethod.AREA: return gen_image_ops.resize_area(images_t, new_size) elif method in scale_and_translate_methods: return resize_with_scale_and_translate(method) else: raise ValueError('Resize method is not implemented.') return _resize_images_common( images, resize_fn, size, preserve_aspect_ratio=preserve_aspect_ratio, name=name, skip_resize_if_same=False) def _resize_image_with_pad_common(image, target_height, target_width, resize_fn): """Core functionality for v1 and v2 resize_image_with_pad functions.""" with ops.name_scope(None, 'resize_image_with_pad', [image]): image = ops.convert_to_tensor(image, name='image') image_shape = image.get_shape() is_batch = True if image_shape.ndims == 3: is_batch = False image = array_ops.expand_dims(image, 0) elif image_shape.ndims is None: is_batch = False image = array_ops.expand_dims(image, 0) image.set_shape([None] * 4) elif image_shape.ndims != 4: raise ValueError('\'image\' must have either 3 or 4 dimensions.') assert_ops = _CheckAtLeast3DImage(image, require_static=False) assert_ops += _assert(target_width > 0, ValueError, 'target_width must be > 0.') assert_ops += _assert(target_height > 0, ValueError, 'target_height must be > 0.') image = control_flow_ops.with_dependencies(assert_ops, image) def max_(x, y): if _is_tensor(x) or _is_tensor(y): return math_ops.maximum(x, y) else: return max(x, y) _, height, width, _ = _ImageDimensions(image, rank=4) # convert values to float, to ease divisions f_height = math_ops.cast(height, dtype=dtypes.float64) f_width = math_ops.cast(width, dtype=dtypes.float64) f_target_height = math_ops.cast(target_height, dtype=dtypes.float64) f_target_width = math_ops.cast(target_width, dtype=dtypes.float64) # Find the ratio by which the image must be adjusted # to fit within the target ratio = max_(f_width / f_target_width, f_height / f_target_height) resized_height_float = f_height / ratio resized_width_float = f_width / ratio resized_height = math_ops.cast( math_ops.floor(resized_height_float), dtype=dtypes.int32) resized_width = math_ops.cast( math_ops.floor(resized_width_float), dtype=dtypes.int32) padding_height = (f_target_height - resized_height_float) / 2 padding_width = (f_target_width - resized_width_float) / 2 f_padding_height = math_ops.floor(padding_height) f_padding_width = math_ops.floor(padding_width) p_height = max_(0, math_ops.cast(f_padding_height, dtype=dtypes.int32)) p_width = max_(0, math_ops.cast(f_padding_width, dtype=dtypes.int32)) # Resize first, then pad to meet requested dimensions resized = resize_fn(image, [resized_height, resized_width]) padded = pad_to_bounding_box(resized, p_height, p_width, target_height, target_width) if padded.get_shape().ndims is None: raise ValueError('padded contains no shape.') _ImageDimensions(padded, rank=4) if not is_batch: padded = array_ops.squeeze(padded, axis=[0]) return padded @tf_export(v1=['image.resize_image_with_pad']) def resize_image_with_pad_v1(image, target_height, target_width, method=ResizeMethodV1.BILINEAR, align_corners=False): """Resizes and pads an image to a target width and height. Resizes an image to a target width and height by keeping the aspect ratio the same without distortion. If the target dimensions don't match the image dimensions, the image is resized and then padded with zeroes to match requested dimensions. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. target_height: Target height. target_width: Target width. method: Method to use for resizing image. See `resize_images()` align_corners: bool. If True, the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels. Defaults to `False`. Raises: ValueError: if `target_height` or `target_width` are zero or negative. Returns: Resized and padded image. If `images` was 4-D, a 4-D float Tensor of shape `[batch, new_height, new_width, channels]`. If `images` was 3-D, a 3-D float Tensor of shape `[new_height, new_width, channels]`. """ def _resize_fn(im, new_size): return resize_images(im, new_size, method, align_corners=align_corners) return _resize_image_with_pad_common(image, target_height, target_width, _resize_fn) @tf_export('image.resize_with_pad', v1=[]) def resize_image_with_pad_v2(image, target_height, target_width, method=ResizeMethod.BILINEAR, antialias=False): """Resizes and pads an image to a target width and height. Resizes an image to a target width and height by keeping the aspect ratio the same without distortion. If the target dimensions don't match the image dimensions, the image is resized and then padded with zeroes to match requested dimensions. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. target_height: Target height. target_width: Target width. method: Method to use for resizing image. See `image.resize()` antialias: Whether to use anti-aliasing when resizing. See 'image.resize()'. Raises: ValueError: if `target_height` or `target_width` are zero or negative. Returns: Resized and padded image. If `images` was 4-D, a 4-D float Tensor of shape `[batch, new_height, new_width, channels]`. If `images` was 3-D, a 3-D float Tensor of shape `[new_height, new_width, channels]`. """ def _resize_fn(im, new_size): return resize_images_v2(im, new_size, method, antialias=antialias) return _resize_image_with_pad_common(image, target_height, target_width, _resize_fn) @tf_export('image.per_image_standardization') def per_image_standardization(image): """Linearly scales each image in `image` to have mean 0 and variance 1. For each 3-D image `x` in `image`, computes `(x - mean) / adjusted_stddev`, where - `mean` is the average of all values in `x` - `adjusted_stddev = max(stddev, 1.0/sqrt(N))` is capped away from 0 to protect against division by 0 when handling uniform images - `N` is the number of elements in `x` - `stddev` is the standard deviation of all values in `x` Args: image: An n-D Tensor with at least 3 dimensions, the last 3 of which are the dimensions of each image. Returns: A `Tensor` with same shape and dtype as `image`. Raises: ValueError: if the shape of 'image' is incompatible with this function. """ with ops.name_scope(None, 'per_image_standardization', [image]) as scope: image = ops.convert_to_tensor(image, name='image') image = _AssertAtLeast3DImage(image) # Remember original dtype to so we can convert back if needed orig_dtype = image.dtype if orig_dtype not in [dtypes.float16, dtypes.float32]: image = convert_image_dtype(image, dtypes.float32) num_pixels = math_ops.reduce_prod(array_ops.shape(image)[-3:]) image_mean = math_ops.reduce_mean(image, axis=[-1, -2, -3], keepdims=True) # Apply a minimum normalization that protects us against uniform images. stddev = math_ops.reduce_std(image, axis=[-1, -2, -3], keepdims=True) min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, image.dtype)) adjusted_stddev = math_ops.maximum(stddev, min_stddev) image -= image_mean image = math_ops.div(image, adjusted_stddev, name=scope) return convert_image_dtype(image, orig_dtype, saturate=True) @tf_export('image.random_brightness') def random_brightness(image, max_delta, seed=None): """Adjust the brightness of images by a random factor. Equivalent to `adjust_brightness()` using a `delta` randomly picked in the interval `[-max_delta, max_delta)`. Args: image: An image or images to adjust. max_delta: float, must be non-negative. seed: A Python integer. Used to create a random seed. See `tf.compat.v1.set_random_seed` for behavior. Returns: The brightness-adjusted image(s). Raises: ValueError: if `max_delta` is negative. """ if max_delta < 0: raise ValueError('max_delta must be non-negative.') delta = random_ops.random_uniform([], -max_delta, max_delta, seed=seed) return adjust_brightness(image, delta) @tf_export('image.random_contrast') def random_contrast(image, lower, upper, seed=None): """Adjust the contrast of an image or images by a random factor. Equivalent to `adjust_contrast()` but uses a `contrast_factor` randomly picked in the interval `[lower, upper]`. Args: image: An image tensor with 3 or more dimensions. lower: float. Lower bound for the random contrast factor. upper: float. Upper bound for the random contrast factor. seed: A Python integer. Used to create a random seed. See `tf.compat.v1.set_random_seed` for behavior. Returns: The contrast-adjusted image(s). Raises: ValueError: if `upper <= lower` or if `lower < 0`. """ if upper <= lower: raise ValueError('upper must be > lower.') if lower < 0: raise ValueError('lower must be non-negative.') # Generate an a float in [lower, upper] contrast_factor = random_ops.random_uniform([], lower, upper, seed=seed) return adjust_contrast(image, contrast_factor) @tf_export('image.adjust_brightness') def adjust_brightness(image, delta): """Adjust the brightness of RGB or Grayscale images. This is a convenience method that converts RGB images to float representation, adjusts their brightness, and then converts them back to the original data type. If several adjustments are chained, it is advisable to minimize the number of redundant conversions. The value `delta` is added to all components of the tensor `image`. `image` is converted to `float` and scaled appropriately if it is in fixed-point representation, and `delta` is converted to the same data type. For regular images, `delta` should be in the range `[0,1)`, as it is added to the image in floating point representation, where pixel values are in the `[0,1)` range. Args: image: RGB image or images to adjust. delta: A scalar. Amount to add to the pixel values. Returns: A brightness-adjusted tensor of the same shape and type as `image`. """ with ops.name_scope(None, 'adjust_brightness', [image, delta]) as name: image = ops.convert_to_tensor(image, name='image') # Remember original dtype to so we can convert back if needed orig_dtype = image.dtype if orig_dtype in [dtypes.float16, dtypes.float32]: flt_image = image else: flt_image = convert_image_dtype(image, dtypes.float32) adjusted = math_ops.add( flt_image, math_ops.cast(delta, flt_image.dtype), name=name) return convert_image_dtype(adjusted, orig_dtype, saturate=True) @tf_export('image.adjust_contrast') def adjust_contrast(images, contrast_factor): """Adjust contrast of RGB or grayscale images. This is a convenience method that converts RGB images to float representation, adjusts their contrast, and then converts them back to the original data type. If several adjustments are chained, it is advisable to minimize the number of redundant conversions. `images` is a tensor of at least 3 dimensions. The last 3 dimensions are interpreted as `[height, width, channels]`. The other dimensions only represent a collection of images, such as `[batch, height, width, channels].` Contrast is adjusted independently for each channel of each image. For each channel, this Op computes the mean of the image pixels in the channel and then adjusts each component `x` of each pixel to `(x - mean) * contrast_factor + mean`. Args: images: Images to adjust. At least 3-D. contrast_factor: A float multiplier for adjusting contrast. Returns: The contrast-adjusted image or images. """ with ops.name_scope(None, 'adjust_contrast', [images, contrast_factor]) as name: images = ops.convert_to_tensor(images, name='images') # Remember original dtype to so we can convert back if needed orig_dtype = images.dtype if orig_dtype in (dtypes.float16, dtypes.float32): flt_images = images else: flt_images = convert_image_dtype(images, dtypes.float32) adjusted = gen_image_ops.adjust_contrastv2( flt_images, contrast_factor=contrast_factor, name=name) return convert_image_dtype(adjusted, orig_dtype, saturate=True) @tf_export('image.adjust_gamma') def adjust_gamma(image, gamma=1, gain=1): """Performs Gamma Correction on the input image. Also known as Power Law Transform. This function converts the input images at first to float representation, then transforms them pixelwise according to the equation `Out = gain * In**gamma`, and then converts the back to the original data type. Args: image : RGB image or images to adjust. gamma : A scalar or tensor. Non negative real number. gain : A scalar or tensor. The constant multiplier. Returns: A Tensor. A Gamma-adjusted tensor of the same shape and type as `image`. Usage Example: ```python >> import tensorflow as tf >> x = tf.random.normal(shape=(256, 256, 3)) >> tf.image.adjust_gamma(x, 0.2) ``` Raises: ValueError: If gamma is negative. Notes: For gamma greater than 1, the histogram will shift towards left and the output image will be darker than the input image. For gamma less than 1, the histogram will shift towards right and the output image will be brighter than the input image. References: [1] http://en.wikipedia.org/wiki/Gamma_correction """ with ops.name_scope(None, 'adjust_gamma', [image, gamma, gain]) as name: image = ops.convert_to_tensor(image, name='image') # Remember original dtype to so we can convert back if needed orig_dtype = image.dtype if orig_dtype in [dtypes.float16, dtypes.float32]: flt_image = image else: flt_image = convert_image_dtype(image, dtypes.float32) assert_op = _assert(gamma >= 0, ValueError, 'Gamma should be a non-negative real number.') if assert_op: gamma = control_flow_ops.with_dependencies(assert_op, gamma) # According to the definition of gamma correction. adjusted_img = gain * flt_image**gamma return convert_image_dtype(adjusted_img, orig_dtype, saturate=True) @tf_export('image.convert_image_dtype') def convert_image_dtype(image, dtype, saturate=False, name=None): """Convert `image` to `dtype`, scaling its values if needed. Images that are represented using floating point values are expected to have values in the range [0,1). Image data stored in integer data types are expected to have values in the range `[0,MAX]`, where `MAX` is the largest positive representable number for the data type. This op converts between data types, scaling the values appropriately before casting. Note that converting from floating point inputs to integer types may lead to over/underflow problems. Set saturate to `True` to avoid such problem in problematic conversions. If enabled, saturation will clip the output into the allowed range before performing a potentially dangerous cast (and only before performing such a cast, i.e., when casting from a floating point to an integer type, and when casting from a signed to an unsigned type; `saturate` has no effect on casts between floats, or on casts that increase the type's range). Args: image: An image. dtype: A `DType` to convert `image` to. saturate: If `True`, clip the input before casting (if necessary). name: A name for this operation (optional). Returns: `image`, converted to `dtype`. """ image = ops.convert_to_tensor(image, name='image') if dtype == image.dtype: return array_ops.identity(image, name=name) with ops.name_scope(name, 'convert_image', [image]) as name: # Both integer: use integer multiplication in the larger range if image.dtype.is_integer and dtype.is_integer: scale_in = image.dtype.max scale_out = dtype.max if scale_in > scale_out: # Scaling down, scale first, then cast. The scaling factor will # cause in.max to be mapped to above out.max but below out.max+1, # so that the output is safely in the supported range. scale = (scale_in + 1) // (scale_out + 1) scaled = math_ops.div(image, scale) if saturate: return math_ops.saturate_cast(scaled, dtype, name=name) else: return math_ops.cast(scaled, dtype, name=name) else: # Scaling up, cast first, then scale. The scale will not map in.max to # out.max, but converting back and forth should result in no change. if saturate: cast = math_ops.saturate_cast(image, dtype) else: cast = math_ops.cast(image, dtype) scale = (scale_out + 1) // (scale_in + 1) return math_ops.multiply(cast, scale, name=name) elif image.dtype.is_floating and dtype.is_floating: # Both float: Just cast, no possible overflows in the allowed ranges. # Note: We're ignoring float overflows. If your image dynamic range # exceeds float range, you're on your own. return math_ops.cast(image, dtype, name=name) else: if image.dtype.is_integer: # Converting to float: first cast, then scale. No saturation possible. cast = math_ops.cast(image, dtype) scale = 1. / image.dtype.max return math_ops.multiply(cast, scale, name=name) else: # Converting from float: first scale, then cast scale = dtype.max + 0.5 # avoid rounding problems in the cast scaled = math_ops.multiply(image, scale) if saturate: return math_ops.saturate_cast(scaled, dtype, name=name) else: return math_ops.cast(scaled, dtype, name=name) @tf_export('image.rgb_to_grayscale') def rgb_to_grayscale(images, name=None): """Converts one or more images from RGB to Grayscale. Outputs a tensor of the same `DType` and rank as `images`. The size of the last dimension of the output is 1, containing the Grayscale value of the pixels. Args: images: The RGB tensor to convert. Last dimension must have size 3 and should contain RGB values. name: A name for the operation (optional). Returns: The converted grayscale image(s). """ with ops.name_scope(name, 'rgb_to_grayscale', [images]) as name: images = ops.convert_to_tensor(images, name='images') # Remember original dtype to so we can convert back if needed orig_dtype = images.dtype flt_image = convert_image_dtype(images, dtypes.float32) # Reference for converting between RGB and grayscale. # https://en.wikipedia.org/wiki/Luma_%28video%29 rgb_weights = [0.2989, 0.5870, 0.1140] gray_float = math_ops.tensordot(flt_image, rgb_weights, [-1, -1]) gray_float = array_ops.expand_dims(gray_float, -1) return convert_image_dtype(gray_float, orig_dtype, name=name) @tf_export('image.grayscale_to_rgb') def grayscale_to_rgb(images, name=None): """Converts one or more images from Grayscale to RGB. Outputs a tensor of the same `DType` and rank as `images`. The size of the last dimension of the output is 3, containing the RGB value of the pixels. The input images' last dimension must be size 1. Args: images: The Grayscale tensor to convert. Last dimension must be size 1. name: A name for the operation (optional). Returns: The converted grayscale image(s). """ with ops.name_scope(name, 'grayscale_to_rgb', [images]) as name: images = _AssertGrayscaleImage(images) images = ops.convert_to_tensor(images, name='images') rank_1 = array_ops.expand_dims(array_ops.rank(images) - 1, 0) shape_list = ([array_ops.ones(rank_1, dtype=dtypes.int32)] + [array_ops.expand_dims(3, 0)]) multiples = array_ops.concat(shape_list, 0) rgb = array_ops.tile(images, multiples, name=name) rgb.set_shape(images.get_shape()[:-1].concatenate([3])) return rgb # pylint: disable=invalid-name @tf_export('image.random_hue') def random_hue(image, max_delta, seed=None): """Adjust the hue of RGB images by a random factor. Equivalent to `adjust_hue()` but uses a `delta` randomly picked in the interval `[-max_delta, max_delta]`. `max_delta` must be in the interval `[0, 0.5]`. Args: image: RGB image or images. Size of the last dimension must be 3. max_delta: float. Maximum value for the random delta. seed: An operation-specific seed. It will be used in conjunction with the graph-level seed to determine the real seeds that will be used in this operation. Please see the documentation of set_random_seed for its interaction with the graph-level random seed. Returns: Adjusted image(s), same shape and DType as `image`. Raises: ValueError: if `max_delta` is invalid. """ if max_delta > 0.5: raise ValueError('max_delta must be <= 0.5.') if max_delta < 0: raise ValueError('max_delta must be non-negative.') delta = random_ops.random_uniform([], -max_delta, max_delta, seed=seed) return adjust_hue(image, delta) @tf_export('image.adjust_hue') def adjust_hue(image, delta, name=None): """Adjust hue of RGB images. This is a convenience method that converts an RGB image to float representation, converts it to HSV, add an offset to the hue channel, converts back to RGB and then back to the original data type. If several adjustments are chained it is advisable to minimize the number of redundant conversions. `image` is an RGB image. The image hue is adjusted by converting the image(s) to HSV and rotating the hue channel (H) by `delta`. The image is then converted back to RGB. `delta` must be in the interval `[-1, 1]`. Args: image: RGB image or images. Size of the last dimension must be 3. delta: float. How much to add to the hue channel. name: A name for this operation (optional). Returns: Adjusted image(s), same shape and DType as `image`. Usage Example: ```python >> import tensorflow as tf >> x = tf.random.normal(shape=(256, 256, 3)) >> tf.image.adjust_hue(x, 0.2) ``` """ with ops.name_scope(name, 'adjust_hue', [image]) as name: image = ops.convert_to_tensor(image, name='image') # Remember original dtype to so we can convert back if needed orig_dtype = image.dtype if orig_dtype in (dtypes.float16, dtypes.float32): flt_image = image else: flt_image = convert_image_dtype(image, dtypes.float32) rgb_altered = gen_image_ops.adjust_hue(flt_image, delta) return convert_image_dtype(rgb_altered, orig_dtype) # pylint: disable=invalid-name @tf_export('image.random_jpeg_quality') def random_jpeg_quality(image, min_jpeg_quality, max_jpeg_quality, seed=None): """Randomly changes jpeg encoding quality for inducing jpeg noise. `min_jpeg_quality` must be in the interval `[0, 100]` and less than `max_jpeg_quality`. `max_jpeg_quality` must be in the interval `[0, 100]`. Args: image: RGB image or images. Size of the last dimension must be 3. min_jpeg_quality: Minimum jpeg encoding quality to use. max_jpeg_quality: Maximum jpeg encoding quality to use. seed: An operation-specific seed. It will be used in conjunction with the graph-level seed to determine the real seeds that will be used in this operation. Please see the documentation of set_random_seed for its interaction with the graph-level random seed. Returns: Adjusted image(s), same shape and DType as `image`. Raises: ValueError: if `min_jpeg_quality` or `max_jpeg_quality` is invalid. """ if (min_jpeg_quality < 0 or max_jpeg_quality < 0 or min_jpeg_quality > 100 or max_jpeg_quality > 100): raise ValueError('jpeg encoding range must be between 0 and 100.') if min_jpeg_quality >= max_jpeg_quality: raise ValueError('`min_jpeg_quality` must be less than `max_jpeg_quality`.') if compat.forward_compatible(2019, 4, 4): jpeg_quality = random_ops.random_uniform([], min_jpeg_quality, max_jpeg_quality, seed=seed, dtype=dtypes.int32) else: np.random.seed(seed) jpeg_quality = np.random.randint(min_jpeg_quality, max_jpeg_quality) return adjust_jpeg_quality(image, jpeg_quality) @tf_export('image.adjust_jpeg_quality') def adjust_jpeg_quality(image, jpeg_quality, name=None): """Adjust jpeg encoding quality of an RGB image. This is a convenience method that adjusts jpeg encoding quality of an RGB image. `image` is an RGB image. The image's encoding quality is adjusted to `jpeg_quality`. `jpeg_quality` must be in the interval `[0, 100]`. Args: image: RGB image or images. Size of the last dimension must be 3. jpeg_quality: Python int or Tensor of type int32. jpeg encoding quality. name: A name for this operation (optional). Returns: Adjusted image(s), same shape and DType as `image`. """ with ops.name_scope(name, 'adjust_jpeg_quality', [image]) as name: image = ops.convert_to_tensor(image, name='image') # Remember original dtype to so we can convert back if needed orig_dtype = image.dtype # Convert to uint8 image = convert_image_dtype(image, dtypes.uint8) # Encode image to jpeg with given jpeg quality if compat.forward_compatible(2019, 4, 4): if not _is_tensor(jpeg_quality): # If jpeg_quality is a int (not tensor). jpeg_quality = ops.convert_to_tensor(jpeg_quality, dtype=dtypes.int32) image = gen_image_ops.encode_jpeg_variable_quality(image, jpeg_quality) else: image = gen_image_ops.encode_jpeg(image, quality=jpeg_quality) # Decode jpeg image image = gen_image_ops.decode_jpeg(image) # Convert back to original dtype and return return convert_image_dtype(image, orig_dtype) @tf_export('image.random_saturation') def random_saturation(image, lower, upper, seed=None): """Adjust the saturation of RGB images by a random factor. Equivalent to `adjust_saturation()` but uses a `saturation_factor` randomly picked in the interval `[lower, upper]`. Args: image: RGB image or images. Size of the last dimension must be 3. lower: float. Lower bound for the random saturation factor. upper: float. Upper bound for the random saturation factor. seed: An operation-specific seed. It will be used in conjunction with the graph-level seed to determine the real seeds that will be used in this operation. Please see the documentation of set_random_seed for its interaction with the graph-level random seed. Returns: Adjusted image(s), same shape and DType as `image`. Raises: ValueError: if `upper <= lower` or if `lower < 0`. """ if upper <= lower: raise ValueError('upper must be > lower.') if lower < 0: raise ValueError('lower must be non-negative.') # Pick a float in [lower, upper] saturation_factor = random_ops.random_uniform([], lower, upper, seed=seed) return adjust_saturation(image, saturation_factor) @tf_export('image.adjust_saturation') def adjust_saturation(image, saturation_factor, name=None): """Adjust saturation of RGB images. This is a convenience method that converts RGB images to float representation, converts them to HSV, add an offset to the saturation channel, converts back to RGB and then back to the original data type. If several adjustments are chained it is advisable to minimize the number of redundant conversions. `image` is an RGB image or images. The image saturation is adjusted by converting the images to HSV and multiplying the saturation (S) channel by `saturation_factor` and clipping. The images are then converted back to RGB. Args: image: RGB image or images. Size of the last dimension must be 3. saturation_factor: float. Factor to multiply the saturation by. name: A name for this operation (optional). Returns: Adjusted image(s), same shape and DType as `image`. """ with ops.name_scope(name, 'adjust_saturation', [image]) as name: image = ops.convert_to_tensor(image, name='image') # Remember original dtype to so we can convert back if needed orig_dtype = image.dtype if orig_dtype in (dtypes.float16, dtypes.float32): flt_image = image else: flt_image = convert_image_dtype(image, dtypes.float32) adjusted = gen_image_ops.adjust_saturation(flt_image, saturation_factor) return convert_image_dtype(adjusted, orig_dtype) @tf_export('io.is_jpeg', 'image.is_jpeg', v1=['io.is_jpeg', 'image.is_jpeg']) def is_jpeg(contents, name=None): r"""Convenience function to check if the 'contents' encodes a JPEG image. Args: contents: 0-D `string`. The encoded image bytes. name: A name for the operation (optional) Returns: A scalar boolean tensor indicating if 'contents' may be a JPEG image. is_jpeg is susceptible to false positives. """ # Normal JPEGs start with \xff\xd8\xff\xe0 # JPEG with EXIF starts with \xff\xd8\xff\xe1 # Use \xff\xd8\xff to cover both. with ops.name_scope(name, 'is_jpeg'): substr = string_ops.substr(contents, 0, 3) return math_ops.equal(substr, b'\xff\xd8\xff', name=name) def _is_png(contents, name=None): r"""Convenience function to check if the 'contents' encodes a PNG image. Args: contents: 0-D `string`. The encoded image bytes. name: A name for the operation (optional) Returns: A scalar boolean tensor indicating if 'contents' may be a PNG image. is_png is susceptible to false positives. """ with ops.name_scope(name, 'is_png'): substr = string_ops.substr(contents, 0, 3) return math_ops.equal(substr, b'\211PN', name=name) tf_export( 'io.decode_and_crop_jpeg', 'image.decode_and_crop_jpeg', v1=['io.decode_and_crop_jpeg', 'image.decode_and_crop_jpeg'])( gen_image_ops.decode_and_crop_jpeg) tf_export( 'io.decode_bmp', 'image.decode_bmp', v1=['io.decode_bmp', 'image.decode_bmp'])( gen_image_ops.decode_bmp) tf_export( 'io.decode_gif', 'image.decode_gif', v1=['io.decode_gif', 'image.decode_gif'])( gen_image_ops.decode_gif) tf_export( 'io.decode_jpeg', 'image.decode_jpeg', v1=['io.decode_jpeg', 'image.decode_jpeg'])( gen_image_ops.decode_jpeg) tf_export( 'io.decode_png', 'image.decode_png', v1=['io.decode_png', 'image.decode_png'])( gen_image_ops.decode_png) tf_export( 'io.encode_jpeg', 'image.encode_jpeg', v1=['io.encode_jpeg', 'image.encode_jpeg'])( gen_image_ops.encode_jpeg) tf_export( 'io.extract_jpeg_shape', 'image.extract_jpeg_shape', v1=['io.extract_jpeg_shape', 'image.extract_jpeg_shape'])( gen_image_ops.extract_jpeg_shape) @tf_export( 'io.decode_image', 'image.decode_image', v1=['io.decode_image', 'image.decode_image']) def decode_image(contents, channels=None, dtype=dtypes.uint8, name=None, expand_animations=True): """Function for `decode_bmp`, `decode_gif`, `decode_jpeg`, and `decode_png`. Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the appropriate operation to convert the input bytes `string` into a `Tensor` of type `dtype`. Note: `decode_gif` returns a 4-D array `[num_frames, height, width, 3]`, as opposed to `decode_bmp`, `decode_jpeg` and `decode_png`, which return 3-D arrays `[height, width, num_channels]`. Make sure to take this into account when constructing your graph if you are intermixing GIF files with BMP, JPEG, and/or PNG files. Alternately, set the `expand_animations` argument of this function to `False`, in which case the op will return 3-dimensional tensors and will truncate animated GIF files to the first frame. Args: contents: 0-D `string`. The encoded image bytes. channels: An optional `int`. Defaults to `0`. Number of color channels for the decoded image. dtype: The desired DType of the returned `Tensor`. name: A name for the operation (optional) expand_animations: Controls the shape of the returned op's output. If `True`, the returned op will produce a 3-D tensor for PNG, JPEG, and BMP files; and a 4-D tensor for all GIFs, whether animated or not. If, `False`, the returned op will produce a 3-D tensor for all file types and will truncate animated GIFs to the first frame. Returns: `Tensor` with type `dtype` and a 3- or 4-dimensional shape, depending on the file type and the value of the `expand_animations` parameter. Raises: ValueError: On incorrect number of channels. """ with ops.name_scope(name, 'decode_image'): if channels not in (None, 0, 1, 3, 4): raise ValueError('channels must be in (None, 0, 1, 3, 4)') substr = string_ops.substr(contents, 0, 3) def _bmp(): """Decodes a BMP image.""" signature = string_ops.substr(contents, 0, 2) # Create assert op to check that bytes are BMP decodable is_bmp = math_ops.equal(signature, 'BM', name='is_bmp') decode_msg = 'Unable to decode bytes as JPEG, PNG, GIF, or BMP' assert_decode = control_flow_ops.Assert(is_bmp, [decode_msg]) bmp_channels = 0 if channels is None else channels good_channels = math_ops.not_equal(bmp_channels, 1, name='check_channels') channels_msg = 'Channels must be in (None, 0, 3) when decoding BMP images' assert_channels = control_flow_ops.Assert(good_channels, [channels_msg]) with ops.control_dependencies([assert_decode, assert_channels]): return convert_image_dtype(gen_image_ops.decode_bmp(contents), dtype) def _gif(): """Decodes a GIF image.""" # Create assert to make sure that channels is not set to 1 # Already checked above that channels is in (None, 0, 1, 3) gif_channels = 0 if channels is None else channels good_channels = math_ops.logical_and( math_ops.not_equal(gif_channels, 1, name='check_gif_channels'), math_ops.not_equal(gif_channels, 4, name='check_gif_channels')) channels_msg = 'Channels must be in (None, 0, 3) when decoding GIF images' assert_channels = control_flow_ops.Assert(good_channels, [channels_msg]) with ops.control_dependencies([assert_channels]): result = convert_image_dtype(gen_image_ops.decode_gif(contents), dtype) if not expand_animations: # For now we decode animated GIFs fully and toss out all but the # first frame when expand_animations is False result = array_ops.gather(result, 0) return result def check_gif(): # Create assert op to check that bytes are GIF decodable is_gif = math_ops.equal(substr, b'\x47\x49\x46', name='is_gif') return control_flow_ops.cond(is_gif, _gif, _bmp, name='cond_gif') def _png(): """Decodes a PNG image.""" return convert_image_dtype( gen_image_ops.decode_png( contents, channels, dtype=dtypes.uint8 if dtype == dtypes.uint8 else dtypes.uint16), dtype) def check_png(): """Checks if an image is PNG.""" return control_flow_ops.cond( _is_png(contents), _png, check_gif, name='cond_png') def _jpeg(): """Decodes a jpeg image.""" jpeg_channels = 0 if channels is None else channels good_channels = math_ops.not_equal( jpeg_channels, 4, name='check_jpeg_channels') channels_msg = ('Channels must be in (None, 0, 1, 3) when decoding JPEG ' 'images') assert_channels = control_flow_ops.Assert(good_channels, [channels_msg]) with ops.control_dependencies([assert_channels]): return convert_image_dtype( gen_image_ops.decode_jpeg(contents, channels), dtype) # Decode normal JPEG images (start with \xff\xd8\xff\xe0) # as well as JPEG images with EXIF data (start with \xff\xd8\xff\xe1). return control_flow_ops.cond( is_jpeg(contents), _jpeg, check_png, name='cond_jpeg') @tf_export('image.total_variation') def total_variation(images, name=None): """Calculate and return the total variation for one or more images. The total variation is the sum of the absolute differences for neighboring pixel-values in the input images. This measures how much noise is in the images. This can be used as a loss-function during optimization so as to suppress noise in images. If you have a batch of images, then you should calculate the scalar loss-value as the sum: `loss = tf.reduce_sum(tf.image.total_variation(images))` This implements the anisotropic 2-D version of the formula described here: https://en.wikipedia.org/wiki/Total_variation_denoising Args: images: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. name: A name for the operation (optional). Raises: ValueError: if images.shape is not a 3-D or 4-D vector. Returns: The total variation of `images`. If `images` was 4-D, return a 1-D float Tensor of shape `[batch]` with the total variation for each image in the batch. If `images` was 3-D, return a scalar float with the total variation for that image. """ with ops.name_scope(name, 'total_variation'): ndims = images.get_shape().ndims if ndims == 3: # The input is a single image with shape [height, width, channels]. # Calculate the difference of neighboring pixel-values. # The images are shifted one pixel along the height and width by slicing. pixel_dif1 = images[1:, :, :] - images[:-1, :, :] pixel_dif2 = images[:, 1:, :] - images[:, :-1, :] # Sum for all axis. (None is an alias for all axis.) sum_axis = None elif ndims == 4: # The input is a batch of images with shape: # [batch, height, width, channels]. # Calculate the difference of neighboring pixel-values. # The images are shifted one pixel along the height and width by slicing. pixel_dif1 = images[:, 1:, :, :] - images[:, :-1, :, :] pixel_dif2 = images[:, :, 1:, :] - images[:, :, :-1, :] # Only sum for the last 3 axis. # This results in a 1-D tensor with the total variation for each image. sum_axis = [1, 2, 3] else: raise ValueError('\'images\' must be either 3 or 4-dimensional.') # Calculate the total variation by taking the absolute value of the # pixel-differences and summing over the appropriate axis. tot_var = ( math_ops.reduce_sum(math_ops.abs(pixel_dif1), axis=sum_axis) + math_ops.reduce_sum(math_ops.abs(pixel_dif2), axis=sum_axis)) return tot_var @tf_export('image.sample_distorted_bounding_box', v1=[]) def sample_distorted_bounding_box_v2(image_size, bounding_boxes, seed=0, min_object_covered=0.1, aspect_ratio_range=None, area_range=None, max_attempts=None, use_image_if_no_bounding_boxes=None, name=None): """Generate a single randomly distorted bounding box for an image. Bounding box annotations are often supplied in addition to ground-truth labels in image recognition or object localization tasks. A common technique for training such a system is to randomly distort an image while preserving its content, i.e. *data augmentation*. This Op outputs a randomly distorted localization of an object, i.e. bounding box, given an `image_size`, `bounding_boxes` and a series of constraints. The output of this Op is a single bounding box that may be used to crop the original image. The output is returned as 3 tensors: `begin`, `size` and `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize what the bounding box looks like. Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and height of the underlying image. For example, ```python # Generate a single distorted bounding box. begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( tf.shape(image), bounding_boxes=bounding_boxes, min_object_covered=0.1) # Draw the bounding box in an image summary. image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), bbox_for_draw) tf.compat.v1.summary.image('images_with_box', image_with_box) # Employ the bounding box to distort the image. distorted_image = tf.slice(image, begin, size) ``` Note that if no bounding box information is available, setting `use_image_if_no_bounding_boxes = true` will assume there is a single implicit bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is false and no bounding boxes are supplied, an error is raised. Args: image_size: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `int16`, `int32`, `int64`. 1-D, containing `[height, width, channels]`. bounding_boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, N, 4]` describing the N bounding boxes associated with the image. seed: An optional `int`. Defaults to `0`. If `seed` is set to non-zero, the random number generator is seeded by the given `seed`. Otherwise, it is seeded by a random seed. min_object_covered: A Tensor of type `float32`. Defaults to `0.1`. The cropped area of the image must contain at least this fraction of any bounding box supplied. The value of this parameter should be non-negative. In the case of 0, the cropped area does not need to overlap any of the bounding boxes supplied. aspect_ratio_range: An optional list of `floats`. Defaults to `[0.75, 1.33]`. The cropped area of the image must have an aspect `ratio = width / height` within this range. area_range: An optional list of `floats`. Defaults to `[0.05, 1]`. The cropped area of the image must contain a fraction of the supplied image within this range. max_attempts: An optional `int`. Defaults to `100`. Number of attempts at generating a cropped region of the image of the specified constraints. After `max_attempts` failures, return the entire image. use_image_if_no_bounding_boxes: An optional `bool`. Defaults to `False`. Controls behavior if no bounding boxes supplied. If true, assume an implicit bounding box covering the whole input. If false, raise an error. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (begin, size, bboxes). begin: A `Tensor`. Has the same type as `image_size`. 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to `tf.slice`. size: A `Tensor`. Has the same type as `image_size`. 1-D, containing `[target_height, target_width, -1]`. Provide as input to `tf.slice`. bboxes: A `Tensor` of type `float32`. 3-D with shape `[1, 1, 4]` containing the distorted bounding box. Provide as input to `tf.image.draw_bounding_boxes`. """ seed1, seed2 = random_seed.get_seed(seed) if seed else (0, 0) return sample_distorted_bounding_box(image_size, bounding_boxes, seed1, seed2, min_object_covered, aspect_ratio_range, area_range, max_attempts, use_image_if_no_bounding_boxes, name) @tf_export(v1=['image.sample_distorted_bounding_box']) @deprecation.deprecated( date=None, instructions='`seed2` arg is deprecated.' 'Use sample_distorted_bounding_box_v2 instead.') def sample_distorted_bounding_box(image_size, bounding_boxes, seed=None, seed2=None, min_object_covered=0.1, aspect_ratio_range=None, area_range=None, max_attempts=None, use_image_if_no_bounding_boxes=None, name=None): """Generate a single randomly distorted bounding box for an image. Bounding box annotations are often supplied in addition to ground-truth labels in image recognition or object localization tasks. A common technique for training such a system is to randomly distort an image while preserving its content, i.e. *data augmentation*. This Op outputs a randomly distorted localization of an object, i.e. bounding box, given an `image_size`, `bounding_boxes` and a series of constraints. The output of this Op is a single bounding box that may be used to crop the original image. The output is returned as 3 tensors: `begin`, `size` and `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize what the bounding box looks like. Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and height of the underlying image. For example, ```python # Generate a single distorted bounding box. begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( tf.shape(image), bounding_boxes=bounding_boxes, min_object_covered=0.1) # Draw the bounding box in an image summary. image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), bbox_for_draw) tf.compat.v1.summary.image('images_with_box', image_with_box) # Employ the bounding box to distort the image. distorted_image = tf.slice(image, begin, size) ``` Note that if no bounding box information is available, setting `use_image_if_no_bounding_boxes = True` will assume there is a single implicit bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is false and no bounding boxes are supplied, an error is raised. Args: image_size: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `int16`, `int32`, `int64`. 1-D, containing `[height, width, channels]`. bounding_boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, N, 4]` describing the N bounding boxes associated with the image. seed: An optional `int`. Defaults to `0`. If either `seed` or `seed2` are set to non-zero, the random number generator is seeded by the given `seed`. Otherwise, it is seeded by a random seed. seed2: An optional `int`. Defaults to `0`. A second seed to avoid seed collision. min_object_covered: A Tensor of type `float32`. Defaults to `0.1`. The cropped area of the image must contain at least this fraction of any bounding box supplied. The value of this parameter should be non-negative. In the case of 0, the cropped area does not need to overlap any of the bounding boxes supplied. aspect_ratio_range: An optional list of `floats`. Defaults to `[0.75, 1.33]`. The cropped area of the image must have an aspect ratio = width / height within this range. area_range: An optional list of `floats`. Defaults to `[0.05, 1]`. The cropped area of the image must contain a fraction of the supplied image within this range. max_attempts: An optional `int`. Defaults to `100`. Number of attempts at generating a cropped region of the image of the specified constraints. After `max_attempts` failures, return the entire image. use_image_if_no_bounding_boxes: An optional `bool`. Defaults to `False`. Controls behavior if no bounding boxes supplied. If true, assume an implicit bounding box covering the whole input. If false, raise an error. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (begin, size, bboxes). begin: A `Tensor`. Has the same type as `image_size`. 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to `tf.slice`. size: A `Tensor`. Has the same type as `image_size`. 1-D, containing `[target_height, target_width, -1]`. Provide as input to `tf.slice`. bboxes: A `Tensor` of type `float32`. 3-D with shape `[1, 1, 4]` containing the distorted bounding box. Provide as input to `tf.image.draw_bounding_boxes`. """ with ops.name_scope(name, 'sample_distorted_bounding_box'): return gen_image_ops.sample_distorted_bounding_box_v2( image_size, bounding_boxes, seed=seed, seed2=seed2, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes, name=name) @tf_export('image.non_max_suppression') def non_max_suppression(boxes, scores, max_output_size, iou_threshold=0.5, score_threshold=float('-inf'), name=None): """Greedily selects a subset of bounding boxes in descending order of score. Prunes away boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. Bounding boxes are supplied as `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval `[0, 1]`) or absolute. Note that this algorithm is agnostic to where the origin is in the coordinate system. Note that this algorithm is invariant to orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system result in the same boxes being selected by the algorithm. The output of this operation is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.gather` operation. For example: ```python selected_indices = tf.image.non_max_suppression( boxes, scores, max_output_size, iou_threshold) selected_boxes = tf.gather(boxes, selected_indices) ``` Args: boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`. scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single score corresponding to each box (each row of boxes). max_output_size: A scalar integer `Tensor` representing the maximum number of boxes to be selected by non max suppression. iou_threshold: A float representing the threshold for deciding whether boxes overlap too much with respect to IOU. score_threshold: A float representing the threshold for deciding when to remove boxes based on score. name: A name for the operation (optional). Returns: selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the selected indices from the boxes tensor, where `M <= max_output_size`. """ with ops.name_scope(name, 'non_max_suppression'): iou_threshold = ops.convert_to_tensor(iou_threshold, name='iou_threshold') score_threshold = ops.convert_to_tensor( score_threshold, name='score_threshold') return gen_image_ops.non_max_suppression_v3(boxes, scores, max_output_size, iou_threshold, score_threshold) @tf_export('image.non_max_suppression_with_scores') def non_max_suppression_with_scores(boxes, scores, max_output_size, iou_threshold=0.5, score_threshold=float('-inf'), soft_nms_sigma=0.0, name=None): """Greedily selects a subset of bounding boxes in descending order of score. Prunes away boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. Bounding boxes are supplied as `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval `[0, 1]`) or absolute. Note that this algorithm is agnostic to where the origin is in the coordinate system. Note that this algorithm is invariant to orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system result in the same boxes being selected by the algorithm. The output of this operation is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.gather` operation. For example: ```python selected_indices, selected_scores = tf.image.non_max_suppression_v2( boxes, scores, max_output_size, iou_threshold=1.0, score_threshold=0.1, soft_nms_sigma=0.5) selected_boxes = tf.gather(boxes, selected_indices) ``` This function generalizes the `tf.image.non_max_suppression` op by also supporting a Soft-NMS (with Gaussian weighting) mode (c.f. Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score of other overlapping boxes instead of directly causing them to be pruned. Consequently, in contrast to `tf.image.non_max_suppression`, `tf.image.non_max_suppression_v2` returns the new scores of each input box in the second output, `selected_scores`. To enable this Soft-NMS mode, set the `soft_nms_sigma` parameter to be larger than 0. When `soft_nms_sigma` equals 0, the behavior of `tf.image.non_max_suppression_v2` is identical to that of `tf.image.non_max_suppression` (except for the extra output) both in function and in running time. Args: boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`. scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single score corresponding to each box (each row of boxes). max_output_size: A scalar integer `Tensor` representing the maximum number of boxes to be selected by non max suppression. iou_threshold: A float representing the threshold for deciding whether boxes overlap too much with respect to IOU. score_threshold: A float representing the threshold for deciding when to remove boxes based on score. soft_nms_sigma: A scalar float representing the Soft NMS sigma parameter; See Bodla et al, https://arxiv.org/abs/1704.04503). When `soft_nms_sigma=0.0` (which is default), we fall back to standard (hard) NMS. name: A name for the operation (optional). Returns: selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the selected indices from the boxes tensor, where `M <= max_output_size`. selected_scores: A 1-D float tensor of shape `[M]` representing the corresponding scores for each selected box, where `M <= max_output_size`. Scores only differ from corresponding input scores when using Soft NMS (i.e. when `soft_nms_sigma>0`) """ with ops.name_scope(name, 'non_max_suppression_with_scores'): iou_threshold = ops.convert_to_tensor(iou_threshold, name='iou_threshold') score_threshold = ops.convert_to_tensor( score_threshold, name='score_threshold') soft_nms_sigma = ops.convert_to_tensor( soft_nms_sigma, name='soft_nms_sigma') (selected_indices, selected_scores, _ ) = gen_image_ops.non_max_suppression_v5( boxes, scores, max_output_size, iou_threshold, score_threshold, soft_nms_sigma, pad_to_max_output_size=False) return selected_indices, selected_scores @tf_export('image.non_max_suppression_padded') def non_max_suppression_padded(boxes, scores, max_output_size, iou_threshold=0.5, score_threshold=float('-inf'), pad_to_max_output_size=False, name=None): """Greedily selects a subset of bounding boxes in descending order of score. Performs algorithmically equivalent operation to tf.image.non_max_suppression, with the addition of an optional parameter which zero-pads the output to be of size `max_output_size`. The output of this operation is a tuple containing the set of integers indexing into the input collection of bounding boxes representing the selected boxes and the number of valid indices in the index set. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.slice` and `tf.gather` operations. For example: ```python selected_indices_padded, num_valid = tf.image.non_max_suppression_padded( boxes, scores, max_output_size, iou_threshold, score_threshold, pad_to_max_output_size=True) selected_indices = tf.slice( selected_indices_padded, tf.constant([0]), num_valid) selected_boxes = tf.gather(boxes, selected_indices) ``` Args: boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`. scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single score corresponding to each box (each row of boxes). max_output_size: A scalar integer `Tensor` representing the maximum number of boxes to be selected by non max suppression. iou_threshold: A float representing the threshold for deciding whether boxes overlap too much with respect to IOU. score_threshold: A float representing the threshold for deciding when to remove boxes based on score. pad_to_max_output_size: bool. If True, size of `selected_indices` output is padded to `max_output_size`. name: A name for the operation (optional). Returns: selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the selected indices from the boxes tensor, where `M <= max_output_size`. valid_outputs: A scalar integer `Tensor` denoting how many elements in `selected_indices` are valid. Valid elements occur first, then padding. """ with ops.name_scope(name, 'non_max_suppression_padded'): iou_threshold = ops.convert_to_tensor(iou_threshold, name='iou_threshold') score_threshold = ops.convert_to_tensor( score_threshold, name='score_threshold') if compat.forward_compatible(2018, 8, 7) or pad_to_max_output_size: return gen_image_ops.non_max_suppression_v4(boxes, scores, max_output_size, iou_threshold, score_threshold, pad_to_max_output_size) else: return gen_image_ops.non_max_suppression_v3(boxes, scores, max_output_size, iou_threshold, score_threshold) @tf_export('image.non_max_suppression_overlaps') def non_max_suppression_with_overlaps(overlaps, scores, max_output_size, overlap_threshold=0.5, score_threshold=float('-inf'), name=None): """Greedily selects a subset of bounding boxes in descending order of score. Prunes away boxes that have high overlap with previously selected boxes. N-by-n overlap values are supplied as square matrix. The output of this operation is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.gather` operation. For example: ```python selected_indices = tf.image.non_max_suppression_overlaps( overlaps, scores, max_output_size, iou_threshold) selected_boxes = tf.gather(boxes, selected_indices) ``` Args: overlaps: A 2-D float `Tensor` of shape `[num_boxes, num_boxes]`. scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single score corresponding to each box (each row of boxes). max_output_size: A scalar integer `Tensor` representing the maximum number of boxes to be selected by non max suppression. overlap_threshold: A float representing the threshold for deciding whether boxes overlap too much with respect to the provided overlap values. score_threshold: A float representing the threshold for deciding when to remove boxes based on score. name: A name for the operation (optional). Returns: selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the selected indices from the overlaps tensor, where `M <= max_output_size`. """ with ops.name_scope(name, 'non_max_suppression_overlaps'): overlap_threshold = ops.convert_to_tensor( overlap_threshold, name='overlap_threshold') # pylint: disable=protected-access return gen_image_ops.non_max_suppression_with_overlaps( overlaps, scores, max_output_size, overlap_threshold, score_threshold) # pylint: enable=protected-access _rgb_to_yiq_kernel = [[0.299, 0.59590059, 0.2115], [0.587, -0.27455667, -0.52273617], [0.114, -0.32134392, 0.31119955]] @tf_export('image.rgb_to_yiq') def rgb_to_yiq(images): """Converts one or more images from RGB to YIQ. Outputs a tensor of the same shape as the `images` tensor, containing the YIQ value of the pixels. The output is only well defined if the value in images are in [0,1]. Args: images: 2-D or higher rank. Image data to convert. Last dimension must be size 3. Returns: images: tensor with the same shape as `images`. """ images = ops.convert_to_tensor(images, name='images') kernel = ops.convert_to_tensor( _rgb_to_yiq_kernel, dtype=images.dtype, name='kernel') ndims = images.get_shape().ndims return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]]) _yiq_to_rgb_kernel = [[1, 1, 1], [0.95598634, -0.27201283, -1.10674021], [0.6208248, -0.64720424, 1.70423049]] @tf_export('image.yiq_to_rgb') def yiq_to_rgb(images): """Converts one or more images from YIQ to RGB. Outputs a tensor of the same shape as the `images` tensor, containing the RGB value of the pixels. The output is only well defined if the Y value in images are in [0,1], I value are in [-0.5957,0.5957] and Q value are in [-0.5226,0.5226]. Args: images: 2-D or higher rank. Image data to convert. Last dimension must be size 3. Returns: images: tensor with the same shape as `images`. """ images = ops.convert_to_tensor(images, name='images') kernel = ops.convert_to_tensor( _yiq_to_rgb_kernel, dtype=images.dtype, name='kernel') ndims = images.get_shape().ndims return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]]) _rgb_to_yuv_kernel = [[0.299, -0.14714119, 0.61497538], [0.587, -0.28886916, -0.51496512], [0.114, 0.43601035, -0.10001026]] @tf_export('image.rgb_to_yuv') def rgb_to_yuv(images): """Converts one or more images from RGB to YUV. Outputs a tensor of the same shape as the `images` tensor, containing the YUV value of the pixels. The output is only well defined if the value in images are in [0,1]. Args: images: 2-D or higher rank. Image data to convert. Last dimension must be size 3. Returns: images: tensor with the same shape as `images`. """ images = ops.convert_to_tensor(images, name='images') kernel = ops.convert_to_tensor( _rgb_to_yuv_kernel, dtype=images.dtype, name='kernel') ndims = images.get_shape().ndims return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]]) _yuv_to_rgb_kernel = [[1, 1, 1], [0, -0.394642334, 2.03206185], [1.13988303, -0.58062185, 0]] @tf_export('image.yuv_to_rgb') def yuv_to_rgb(images): """Converts one or more images from YUV to RGB. Outputs a tensor of the same shape as the `images` tensor, containing the RGB value of the pixels. The output is only well defined if the Y value in images are in [0,1], U and V value are in [-0.5,0.5]. Args: images: 2-D or higher rank. Image data to convert. Last dimension must be size 3. Returns: images: tensor with the same shape as `images`. """ images = ops.convert_to_tensor(images, name='images') kernel = ops.convert_to_tensor( _yuv_to_rgb_kernel, dtype=images.dtype, name='kernel') ndims = images.get_shape().ndims return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]]) def _verify_compatible_image_shapes(img1, img2): """Checks if two image tensors are compatible for applying SSIM or PSNR. This function checks if two sets of images have ranks at least 3, and if the last three dimensions match. Args: img1: Tensor containing the first image batch. img2: Tensor containing the second image batch. Returns: A tuple containing: the first tensor shape, the second tensor shape, and a list of control_flow_ops.Assert() ops implementing the checks. Raises: ValueError: When static shape check fails. """ shape1 = img1.get_shape().with_rank_at_least(3) shape2 = img2.get_shape().with_rank_at_least(3) shape1[-3:].assert_is_compatible_with(shape2[-3:]) if shape1.ndims is not None and shape2.ndims is not None: for dim1, dim2 in zip( reversed(shape1.dims[:-3]), reversed(shape2.dims[:-3])): if not (dim1 == 1 or dim2 == 1 or dim1.is_compatible_with(dim2)): raise ValueError('Two images are not compatible: %s and %s' % (shape1, shape2)) # Now assign shape tensors. shape1, shape2 = array_ops.shape_n([img1, img2]) # TODO(sjhwang): Check if shape1[:-3] and shape2[:-3] are broadcastable. checks = [] checks.append( control_flow_ops.Assert( math_ops.greater_equal(array_ops.size(shape1), 3), [shape1, shape2], summarize=10)) checks.append( control_flow_ops.Assert( math_ops.reduce_all(math_ops.equal(shape1[-3:], shape2[-3:])), [shape1, shape2], summarize=10)) return shape1, shape2, checks @tf_export('image.psnr') def psnr(a, b, max_val, name=None): """Returns the Peak Signal-to-Noise Ratio between a and b. This is intended to be used on signals (or images). Produces a PSNR value for each image in batch. The last three dimensions of input are expected to be [height, width, depth]. Example: ```python # Read images from file. im1 = tf.decode_png('path/to/im1.png') im2 = tf.decode_png('path/to/im2.png') # Compute PSNR over tf.uint8 Tensors. psnr1 = tf.image.psnr(im1, im2, max_val=255) # Compute PSNR over tf.float32 Tensors. im1 = tf.image.convert_image_dtype(im1, tf.float32) im2 = tf.image.convert_image_dtype(im2, tf.float32) psnr2 = tf.image.psnr(im1, im2, max_val=1.0) # psnr1 and psnr2 both have type tf.float32 and are almost equal. ``` Arguments: a: First set of images. b: Second set of images. max_val: The dynamic range of the images (i.e., the difference between the maximum the and minimum allowed values). name: Namespace to embed the computation in. Returns: The scalar PSNR between a and b. The returned tensor has type `tf.float32` and shape [batch_size, 1]. """ with ops.name_scope(name, 'PSNR', [a, b]): # Need to convert the images to float32. Scale max_val accordingly so that # PSNR is computed correctly. max_val = math_ops.cast(max_val, a.dtype) max_val = convert_image_dtype(max_val, dtypes.float32) a = convert_image_dtype(a, dtypes.float32) b = convert_image_dtype(b, dtypes.float32) mse = math_ops.reduce_mean(math_ops.squared_difference(a, b), [-3, -2, -1]) psnr_val = math_ops.subtract( 20 * math_ops.log(max_val) / math_ops.log(10.0), np.float32(10 / np.log(10)) * math_ops.log(mse), name='psnr') _, _, checks = _verify_compatible_image_shapes(a, b) with ops.control_dependencies(checks): return array_ops.identity(psnr_val) def _ssim_helper(x, y, reducer, max_val, compensation=1.0, k1=0.01, k2=0.03): r"""Helper function for computing SSIM. SSIM estimates covariances with weighted sums. The default parameters use a biased estimate of the covariance: Suppose `reducer` is a weighted sum, then the mean estimators are \mu_x = \sum_i w_i x_i, \mu_y = \sum_i w_i y_i, where w_i's are the weighted-sum weights, and covariance estimator is cov_{xy} = \sum_i w_i (x_i - \mu_x) (y_i - \mu_y) with assumption \sum_i w_i = 1. This covariance estimator is biased, since E[cov_{xy}] = (1 - \sum_i w_i ^ 2) Cov(X, Y). For SSIM measure with unbiased covariance estimators, pass as `compensation` argument (1 - \sum_i w_i ^ 2). Arguments: x: First set of images. y: Second set of images. reducer: Function that computes 'local' averages from set of images. For non-convolutional version, this is usually tf.reduce_mean(x, [1, 2]), and for convolutional version, this is usually tf.nn.avg_pool2d or tf.nn.conv2d with weighted-sum kernel. max_val: The dynamic range (i.e., the difference between the maximum possible allowed value and the minimum allowed value). compensation: Compensation factor. See above. k1: Default value 0.01 k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so it would be better if we taken the values in range of 0< K2 <0.4). Returns: A pair containing the luminance measure, and the contrast-structure measure. """ c1 = (k1 * max_val)**2 c2 = (k2 * max_val)**2 # SSIM luminance measure is # (2 * mu_x * mu_y + c1) / (mu_x ** 2 + mu_y ** 2 + c1). mean0 = reducer(x) mean1 = reducer(y) num0 = mean0 * mean1 * 2.0 den0 = math_ops.square(mean0) + math_ops.square(mean1) luminance = (num0 + c1) / (den0 + c1) # SSIM contrast-structure measure is # (2 * cov_{xy} + c2) / (cov_{xx} + cov_{yy} + c2). # Note that `reducer` is a weighted sum with weight w_k, \sum_i w_i = 1, then # cov_{xy} = \sum_i w_i (x_i - \mu_x) (y_i - \mu_y) # = \sum_i w_i x_i y_i - (\sum_i w_i x_i) (\sum_j w_j y_j). num1 = reducer(x * y) * 2.0 den1 = reducer(math_ops.square(x) + math_ops.square(y)) c2 *= compensation cs = (num1 - num0 + c2) / (den1 - den0 + c2) # SSIM score is the product of the luminance and contrast-structure measures. return luminance, cs def _fspecial_gauss(size, sigma): """Function to mimic the 'fspecial' gaussian MATLAB function.""" size = ops.convert_to_tensor(size, dtypes.int32) sigma = ops.convert_to_tensor(sigma) coords = math_ops.cast(math_ops.range(size), sigma.dtype) coords -= math_ops.cast(size - 1, sigma.dtype) / 2.0 g = math_ops.square(coords) g *= -0.5 / math_ops.square(sigma) g = array_ops.reshape(g, shape=[1, -1]) + array_ops.reshape(g, shape=[-1, 1]) g = array_ops.reshape(g, shape=[1, -1]) # For tf.nn.softmax(). g = nn_ops.softmax(g) return array_ops.reshape(g, shape=[size, size, 1, 1]) def _ssim_per_channel(img1, img2, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03): """Computes SSIM index between img1 and img2 per color channel. This function matches the standard SSIM implementation from: Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing. Details: - 11x11 Gaussian filter of width 1.5 is used. - k1 = 0.01, k2 = 0.03 as in the original paper. Args: img1: First image batch. img2: Second image batch. max_val: The dynamic range of the images (i.e., the difference between the maximum the and minimum allowed values). filter_size: Default value 11 (size of gaussian filter). filter_sigma: Default value 1.5 (width of gaussian filter). k1: Default value 0.01 k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so it would be better if we taken the values in range of 0< K2 <0.4). Returns: A pair of tensors containing and channel-wise SSIM and contrast-structure values. The shape is [..., channels]. """ filter_size = constant_op.constant(filter_size, dtype=dtypes.int32) filter_sigma = constant_op.constant(filter_sigma, dtype=img1.dtype) shape1, shape2 = array_ops.shape_n([img1, img2]) checks = [ control_flow_ops.Assert( math_ops.reduce_all( math_ops.greater_equal(shape1[-3:-1], filter_size)), [shape1, filter_size], summarize=8), control_flow_ops.Assert( math_ops.reduce_all( math_ops.greater_equal(shape2[-3:-1], filter_size)), [shape2, filter_size], summarize=8) ] # Enforce the check to run before computation. with ops.control_dependencies(checks): img1 = array_ops.identity(img1) # TODO(sjhwang): Try to cache kernels and compensation factor. kernel = _fspecial_gauss(filter_size, filter_sigma) kernel = array_ops.tile(kernel, multiples=[1, 1, shape1[-1], 1]) # The correct compensation factor is `1.0 - tf.reduce_sum(tf.square(kernel))`, # but to match MATLAB implementation of MS-SSIM, we use 1.0 instead. compensation = 1.0 # TODO(sjhwang): Try FFT. # TODO(sjhwang): Gaussian kernel is separable in space. Consider applying # 1-by-n and n-by-1 Gaussain filters instead of an n-by-n filter. def reducer(x): shape = array_ops.shape(x) x = array_ops.reshape(x, shape=array_ops.concat([[-1], shape[-3:]], 0)) y = nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID') return array_ops.reshape( y, array_ops.concat([shape[:-3], array_ops.shape(y)[1:]], 0)) luminance, cs = _ssim_helper(img1, img2, reducer, max_val, compensation, k1, k2) # Average over the second and the third from the last: height, width. axes = constant_op.constant([-3, -2], dtype=dtypes.int32) ssim_val = math_ops.reduce_mean(luminance * cs, axes) cs = math_ops.reduce_mean(cs, axes) return ssim_val, cs @tf_export('image.ssim') def ssim(img1, img2, max_val, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03): """Computes SSIM index between img1 and img2. This function is based on the standard SSIM implementation from: Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing. Note: The true SSIM is only defined on grayscale. This function does not perform any colorspace transform. (If input is already YUV, then it will compute YUV SSIM average.) Details: - 11x11 Gaussian filter of width 1.5 is used. - k1 = 0.01, k2 = 0.03 as in the original paper. The image sizes must be at least 11x11 because of the filter size. Example: ```python # Read images from file. im1 = tf.decode_png('path/to/im1.png') im2 = tf.decode_png('path/to/im2.png') # Compute SSIM over tf.uint8 Tensors. ssim1 = tf.image.ssim(im1, im2, max_val=255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) # Compute SSIM over tf.float32 Tensors. im1 = tf.image.convert_image_dtype(im1, tf.float32) im2 = tf.image.convert_image_dtype(im2, tf.float32) ssim2 = tf.image.ssim(im1, im2, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) # ssim1 and ssim2 both have type tf.float32 and are almost equal. ``` Args: img1: First image batch. img2: Second image batch. max_val: The dynamic range of the images (i.e., the difference between the maximum the and minimum allowed values). filter_size: Default value 11 (size of gaussian filter). filter_sigma: Default value 1.5 (width of gaussian filter). k1: Default value 0.01 k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so it would be better if we taken the values in range of 0< K2 <0.4). Returns: A tensor containing an SSIM value for each image in batch. Returned SSIM values are in range (-1, 1], when pixel values are non-negative. Returns a tensor with shape: broadcast(img1.shape[:-3], img2.shape[:-3]). """ _, _, checks = _verify_compatible_image_shapes(img1, img2) with ops.control_dependencies(checks): img1 = array_ops.identity(img1) # Need to convert the images to float32. Scale max_val accordingly so that # SSIM is computed correctly. max_val = math_ops.cast(max_val, img1.dtype) max_val = convert_image_dtype(max_val, dtypes.float32) img1 = convert_image_dtype(img1, dtypes.float32) img2 = convert_image_dtype(img2, dtypes.float32) ssim_per_channel, _ = _ssim_per_channel(img1, img2, max_val, filter_size, filter_sigma, k1, k2) # Compute average over color channels. return math_ops.reduce_mean(ssim_per_channel, [-1]) # Default values obtained by Wang et al. _MSSSIM_WEIGHTS = (0.0448, 0.2856, 0.3001, 0.2363, 0.1333) @tf_export('image.ssim_multiscale') def ssim_multiscale(img1, img2, max_val, power_factors=_MSSSIM_WEIGHTS, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03): """Computes the MS-SSIM between img1 and img2. This function assumes that `img1` and `img2` are image batches, i.e. the last three dimensions are [height, width, channels]. Note: The true SSIM is only defined on grayscale. This function does not perform any colorspace transform. (If input is already YUV, then it will compute YUV SSIM average.) Original paper: Wang, Zhou, Eero P. Simoncelli, and Alan C. Bovik. "Multiscale structural similarity for image quality assessment." Signals, Systems and Computers, 2004. Arguments: img1: First image batch. img2: Second image batch. Must have the same rank as img1. max_val: The dynamic range of the images (i.e., the difference between the maximum the and minimum allowed values). power_factors: Iterable of weights for each of the scales. The number of scales used is the length of the list. Index 0 is the unscaled resolution's weight and each increasing scale corresponds to the image being downsampled by 2. Defaults to (0.0448, 0.2856, 0.3001, 0.2363, 0.1333), which are the values obtained in the original paper. filter_size: Default value 11 (size of gaussian filter). filter_sigma: Default value 1.5 (width of gaussian filter). k1: Default value 0.01 k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so it would be better if we taken the values in range of 0< K2 <0.4). Returns: A tensor containing an MS-SSIM value for each image in batch. The values are in range [0, 1]. Returns a tensor with shape: broadcast(img1.shape[:-3], img2.shape[:-3]). """ with ops.name_scope(None, 'MS-SSIM', [img1, img2]): # Convert to tensor if needed. img1 = ops.convert_to_tensor(img1, name='img1') img2 = ops.convert_to_tensor(img2, name='img2') # Shape checking. shape1, shape2, checks = _verify_compatible_image_shapes(img1, img2) with ops.control_dependencies(checks): img1 = array_ops.identity(img1) # Need to convert the images to float32. Scale max_val accordingly so that # SSIM is computed correctly. max_val = math_ops.cast(max_val, img1.dtype) max_val = convert_image_dtype(max_val, dtypes.float32) img1 = convert_image_dtype(img1, dtypes.float32) img2 = convert_image_dtype(img2, dtypes.float32) imgs = [img1, img2] shapes = [shape1, shape2] # img1 and img2 are assumed to be a (multi-dimensional) batch of # 3-dimensional images (height, width, channels). `heads` contain the batch # dimensions, and `tails` contain the image dimensions. heads = [s[:-3] for s in shapes] tails = [s[-3:] for s in shapes] divisor = [1, 2, 2, 1] divisor_tensor = constant_op.constant(divisor[1:], dtype=dtypes.int32) def do_pad(images, remainder): padding = array_ops.expand_dims(remainder, -1) padding = array_ops.pad(padding, [[1, 0], [1, 0]]) return [array_ops.pad(x, padding, mode='SYMMETRIC') for x in images] mcs = [] for k in range(len(power_factors)): with ops.name_scope(None, 'Scale%d' % k, imgs): if k > 0: # Avg pool takes rank 4 tensors. Flatten leading dimensions. flat_imgs = [ array_ops.reshape(x, array_ops.concat([[-1], t], 0)) for x, t in zip(imgs, tails) ] remainder = tails[0] % divisor_tensor need_padding = math_ops.reduce_any(math_ops.not_equal(remainder, 0)) # pylint: disable=cell-var-from-loop padded = control_flow_ops.cond(need_padding, lambda: do_pad(flat_imgs, remainder), lambda: flat_imgs) # pylint: enable=cell-var-from-loop downscaled = [ nn_ops.avg_pool( x, ksize=divisor, strides=divisor, padding='VALID') for x in padded ] tails = [x[1:] for x in array_ops.shape_n(downscaled)] imgs = [ array_ops.reshape(x, array_ops.concat([h, t], 0)) for x, h, t in zip(downscaled, heads, tails) ] # Overwrite previous ssim value since we only need the last one. ssim_per_channel, cs = _ssim_per_channel( *imgs, max_val=max_val, filter_size=filter_size, filter_sigma=filter_sigma, k1=k1, k2=k2) mcs.append(nn_ops.relu(cs)) # Remove the cs score for the last scale. In the MS-SSIM calculation, # we use the l(p) at the highest scale. l(p) * cs(p) is ssim(p). mcs.pop() # Remove the cs score for the last scale. mcs_and_ssim = array_ops.stack( mcs + [nn_ops.relu(ssim_per_channel)], axis=-1) # Take weighted geometric mean across the scale axis. ms_ssim = math_ops.reduce_prod( math_ops.pow(mcs_and_ssim, power_factors), [-1]) return math_ops.reduce_mean(ms_ssim, [-1]) # Avg over color channels. @tf_export('image.image_gradients') def image_gradients(image): """Returns image gradients (dy, dx) for each color channel. Both output tensors have the same shape as the input: [batch_size, h, w, d]. The gradient values are organized so that [I(x+1, y) - I(x, y)] is in location (x, y). That means that dy will always have zeros in the last row, and dx will always have zeros in the last column. Arguments: image: Tensor with shape [batch_size, h, w, d]. Returns: Pair of tensors (dy, dx) holding the vertical and horizontal image gradients (1-step finite difference). Raises: ValueError: If `image` is not a 4D tensor. """ if image.get_shape().ndims != 4: raise ValueError('image_gradients expects a 4D tensor ' '[batch_size, h, w, d], not %s.', image.get_shape()) image_shape = array_ops.shape(image) batch_size, height, width, depth = array_ops.unstack(image_shape) dy = image[:, 1:, :, :] - image[:, :-1, :, :] dx = image[:, :, 1:, :] - image[:, :, :-1, :] # Return tensors with same size as original image by concatenating # zeros. Place the gradient [I(x+1,y) - I(x,y)] on the base pixel (x, y). shape = array_ops.stack([batch_size, 1, width, depth]) dy = array_ops.concat([dy, array_ops.zeros(shape, image.dtype)], 1) dy = array_ops.reshape(dy, image_shape) shape = array_ops.stack([batch_size, height, 1, depth]) dx = array_ops.concat([dx, array_ops.zeros(shape, image.dtype)], 2) dx = array_ops.reshape(dx, image_shape) return dy, dx @tf_export('image.sobel_edges') def sobel_edges(image): """Returns a tensor holding Sobel edge maps. Arguments: image: Image tensor with shape [batch_size, h, w, d] and type float32 or float64. The image(s) must be 2x2 or larger. Returns: Tensor holding edge maps for each channel. Returns a tensor with shape [batch_size, h, w, d, 2] where the last two dimensions hold [[dy[0], dx[0]], [dy[1], dx[1]], ..., [dy[d-1], dx[d-1]]] calculated using the Sobel filter. """ # Define vertical and horizontal Sobel filters. static_image_shape = image.get_shape() image_shape = array_ops.shape(image) kernels = [[[-1, -2, -1], [0, 0, 0], [1, 2, 1]], [[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]] num_kernels = len(kernels) kernels = np.transpose(np.asarray(kernels), (1, 2, 0)) kernels = np.expand_dims(kernels, -2) kernels_tf = constant_op.constant(kernels, dtype=image.dtype) kernels_tf = array_ops.tile( kernels_tf, [1, 1, image_shape[-1], 1], name='sobel_filters') # Use depth-wise convolution to calculate edge maps per channel. pad_sizes = [[0, 0], [1, 1], [1, 1], [0, 0]] padded = array_ops.pad(image, pad_sizes, mode='REFLECT') # Output tensor has shape [batch_size, h, w, d * num_kernels]. strides = [1, 1, 1, 1] output = nn.depthwise_conv2d(padded, kernels_tf, strides, 'VALID') # Reshape to [batch_size, h, w, d, num_kernels]. shape = array_ops.concat([image_shape, [num_kernels]], 0) output = array_ops.reshape(output, shape=shape) output.set_shape(static_image_shape.concatenate([num_kernels])) return output def resize_bicubic(images, size, align_corners=False, name=None, half_pixel_centers=False): return gen_image_ops.resize_bicubic( images=images, size=size, align_corners=align_corners, half_pixel_centers=half_pixel_centers, name=name) def resize_bilinear(images, size, align_corners=False, name=None, half_pixel_centers=False): return gen_image_ops.resize_bilinear( images=images, size=size, align_corners=align_corners, half_pixel_centers=half_pixel_centers, name=name) def resize_nearest_neighbor(images, size, align_corners=False, name=None, half_pixel_centers=False): return gen_image_ops.resize_nearest_neighbor( images=images, size=size, align_corners=align_corners, half_pixel_centers=half_pixel_centers, name=name) resize_area_deprecation = deprecation.deprecated( date=None, instructions=( 'Use `tf.image.resize(...method=ResizeMethod.AREA...)` instead.')) tf_export(v1=['image.resize_area'])( resize_area_deprecation(gen_image_ops.resize_area)) resize_bicubic_deprecation = deprecation.deprecated( date=None, instructions=( 'Use `tf.image.resize(...method=ResizeMethod.BICUBIC...)` instead.')) tf_export(v1=['image.resize_bicubic'])( resize_bicubic_deprecation(resize_bicubic)) resize_bilinear_deprecation = deprecation.deprecated( date=None, instructions=( 'Use `tf.image.resize(...method=ResizeMethod.BILINEAR...)` instead.')) tf_export(v1=['image.resize_bilinear'])( resize_bilinear_deprecation(resize_bilinear)) resize_nearest_neighbor_deprecation = deprecation.deprecated( date=None, instructions=( 'Use `tf.image.resize(...method=ResizeMethod.NEAREST_NEIGHBOR...)` ' 'instead.')) tf_export(v1=['image.resize_nearest_neighbor'])( resize_nearest_neighbor_deprecation(resize_nearest_neighbor)) @tf_export('image.crop_and_resize', v1=[]) def crop_and_resize_v2(image, boxes, box_indices, crop_size, method='bilinear', extrapolation_value=0, name=None): """Extracts crops from the input image tensor and resizes them. Extracts crops from the input image tensor and resizes them using bilinear sampling or nearest neighbor sampling (possibly with aspect ratio change) to a common output size specified by `crop_size`. This is more general than the `crop_to_bounding_box` op which extracts a fixed size slice from the input image and does not allow resizing or aspect ratio change. Returns a tensor with `crops` from the input `image` at positions defined at the bounding box locations in `boxes`. The cropped boxes are all resized (with bilinear or nearest neighbor interpolation) to a fixed `size = [crop_height, crop_width]`. The result is a 4-D tensor `[num_boxes, crop_height, crop_width, depth]`. The resizing is corner aligned. In particular, if `boxes = [[0, 0, 1, 1]]`, the method will give identical results to using `tf.compat.v1.image.resize_bilinear()` or `tf.compat.v1.image.resize_nearest_neighbor()`(depends on the `method` argument) with `align_corners=True`. Args: image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. Both `image_height` and `image_width` need to be positive. boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor specifies the coordinates of a box in the `box_ind[i]` image and is specified in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the `[0, 1]` interval of normalized image height is mapped to `[0, image_height - 1]` in image height coordinates. We do allow `y1` > `y2`, in which case the sampled crop is an up-down flipped version of the original image. The width dimension is treated similarly. Normalized coordinates outside the `[0, 1]` range are allowed, in which case we use `extrapolation_value` to extrapolate the input image values. box_indices: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. The value of `box_ind[i]` specifies the image that the `i`-th box refers to. crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All cropped image patches are resized to this size. The aspect ratio of the image content is not preserved. Both `crop_height` and `crop_width` need to be positive. method: An optional string specifying the sampling method for resizing. It can be either `"bilinear"` or `"nearest"` and default to `"bilinear"`. Currently two sampling methods are supported: Bilinear and Nearest Neighbor. extrapolation_value: An optional `float`. Defaults to `0`. Value used for extrapolation, when applicable. name: A name for the operation (optional). Returns: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. """ return gen_image_ops.crop_and_resize(image, boxes, box_indices, crop_size, method, extrapolation_value, name) @tf_export(v1=['image.crop_and_resize']) @deprecation.deprecated_args(None, 'box_ind is deprecated, use box_indices instead', 'box_ind') def crop_and_resize_v1( # pylint: disable=missing-docstring image, boxes, box_ind=None, crop_size=None, method='bilinear', extrapolation_value=0, name=None, box_indices=None): box_ind = deprecation.deprecated_argument_lookup('box_indices', box_indices, 'box_ind', box_ind) return gen_image_ops.crop_and_resize(image, boxes, box_ind, crop_size, method, extrapolation_value, name) crop_and_resize_v1.__doc__ = gen_image_ops.crop_and_resize.__doc__ @tf_export(v1=['image.extract_glimpse']) def extract_glimpse( input, # pylint: disable=redefined-builtin size, offsets, centered=True, normalized=True, uniform_noise=True, name=None): """Extracts a glimpse from the input tensor. Returns a set of windows called glimpses extracted at location `offsets` from the input tensor. If the windows only partially overlaps the inputs, the non overlapping areas will be filled with random noise. The result is a 4-D tensor of shape `[batch_size, glimpse_height, glimpse_width, channels]`. The channels and batch dimensions are the same as that of the input tensor. The height and width of the output windows are specified in the `size` parameter. The argument `normalized` and `centered` controls how the windows are built: * If the coordinates are normalized but not centered, 0.0 and 1.0 correspond to the minimum and maximum of each height and width dimension. * If the coordinates are both normalized and centered, they range from -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper left corner, the lower right corner is located at (1.0, 1.0) and the center is at (0, 0). * If the coordinates are not normalized they are interpreted as numbers of pixels. Args: input: A `Tensor` of type `float32`. A 4-D float tensor of shape `[batch_size, height, width, channels]`. size: A `Tensor` of type `int32`. A 1-D tensor of 2 elements containing the size of the glimpses to extract. The glimpse height must be specified first, following by the glimpse width. offsets: A `Tensor` of type `float32`. A 2-D integer tensor of shape `[batch_size, 2]` containing the y, x locations of the center of each window. centered: An optional `bool`. Defaults to `True`. indicates if the offset coordinates are centered relative to the image, in which case the (0, 0) offset is relative to the center of the input images. If false, the (0,0) offset corresponds to the upper left corner of the input images. normalized: An optional `bool`. Defaults to `True`. indicates if the offset coordinates are normalized. uniform_noise: An optional `bool`. Defaults to `True`. indicates if the noise should be generated using a uniform distribution or a Gaussian distribution. name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ return gen_image_ops.extract_glimpse( input=input, size=size, offsets=offsets, centered=centered, normalized=normalized, uniform_noise=uniform_noise, name=name) @tf_export('image.extract_glimpse', v1=[]) def extract_glimpse_v2( input, # pylint: disable=redefined-builtin size, offsets, centered=True, normalized=True, noise='uniform', name=None): """Extracts a glimpse from the input tensor. Returns a set of windows called glimpses extracted at location `offsets` from the input tensor. If the windows only partially overlaps the inputs, the non overlapping areas will be filled with random noise. The result is a 4-D tensor of shape `[batch_size, glimpse_height, glimpse_width, channels]`. The channels and batch dimensions are the same as that of the input tensor. The height and width of the output windows are specified in the `size` parameter. The argument `normalized` and `centered` controls how the windows are built: * If the coordinates are normalized but not centered, 0.0 and 1.0 correspond to the minimum and maximum of each height and width dimension. * If the coordinates are both normalized and centered, they range from -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper left corner, the lower right corner is located at (1.0, 1.0) and the center is at (0, 0). * If the coordinates are not normalized they are interpreted as numbers of pixels. Args: input: A `Tensor` of type `float32`. A 4-D float tensor of shape `[batch_size, height, width, channels]`. size: A `Tensor` of type `int32`. A 1-D tensor of 2 elements containing the size of the glimpses to extract. The glimpse height must be specified first, following by the glimpse width. offsets: A `Tensor` of type `float32`. A 2-D integer tensor of shape `[batch_size, 2]` containing the y, x locations of the center of each window. centered: An optional `bool`. Defaults to `True`. indicates if the offset coordinates are centered relative to the image, in which case the (0, 0) offset is relative to the center of the input images. If false, the (0,0) offset corresponds to the upper left corner of the input images. normalized: An optional `bool`. Defaults to `True`. indicates if the offset coordinates are normalized. noise: An optional `string`. Defaults to `uniform`. indicates if the noise should be `uniform` (uniform distribution), `gaussian` (gaussian distribution), or `zero` (zero padding). name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ return gen_image_ops.extract_glimpse( input=input, size=size, offsets=offsets, centered=centered, normalized=normalized, noise=noise, uniform_noise=False, name=name) @tf_export('image.combined_non_max_suppression') def combined_non_max_suppression(boxes, scores, max_output_size_per_class, max_total_size, iou_threshold=0.5, score_threshold=float('-inf'), pad_per_class=False, clip_boxes=True, name=None): """Greedily selects a subset of bounding boxes in descending order of score. This operation performs non_max_suppression on the inputs per batch, across all classes. Prunes away boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. Bounding boxes are supplied as [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm is agnostic to where the origin is in the coordinate system. Also note that this algorithm is invariant to orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system result in the same boxes being selected by the algorithm. The output of this operation is the final boxes, scores and classes tensor returned after performing non_max_suppression. Args: boxes: A 4-D float `Tensor` of shape `[batch_size, num_boxes, q, 4]`. If `q` is 1 then same boxes are used for all classes otherwise, if `q` is equal to number of classes, class-specific boxes are used. scores: A 3-D float `Tensor` of shape `[batch_size, num_boxes, num_classes]` representing a single score corresponding to each box (each row of boxes). max_output_size_per_class: A scalar integer `Tensor` representing the maximum number of boxes to be selected by non max suppression per class max_total_size: A scalar representing maximum number of boxes retained over all classes. iou_threshold: A float representing the threshold for deciding whether boxes overlap too much with respect to IOU. score_threshold: A float representing the threshold for deciding when to remove boxes based on score. pad_per_class: If false, the output nmsed boxes, scores and classes are padded/clipped to `max_total_size`. If true, the output nmsed boxes, scores and classes are padded to be of length `max_size_per_class`*`num_classes`, unless it exceeds `max_total_size` in which case it is clipped to `max_total_size`. Defaults to false. clip_boxes: If true, the coordinates of output nmsed boxes will be clipped to [0, 1]. If false, output the box coordinates as it is. Defaults to true. name: A name for the operation (optional). Returns: 'nmsed_boxes': A [batch_size, max_detections, 4] float32 tensor containing the non-max suppressed boxes. 'nmsed_scores': A [batch_size, max_detections] float32 tensor containing the scores for the boxes. 'nmsed_classes': A [batch_size, max_detections] float32 tensor containing the class for boxes. 'valid_detections': A [batch_size] int32 tensor indicating the number of valid detections per batch item. Only the top valid_detections[i] entries in nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the entries are zero paddings. """ with ops.name_scope(name, 'combined_non_max_suppression'): iou_threshold = ops.convert_to_tensor( iou_threshold, dtype=dtypes.float32, name='iou_threshold') score_threshold = ops.convert_to_tensor( score_threshold, dtype=dtypes.float32, name='score_threshold') return gen_image_ops.combined_non_max_suppression( boxes, scores, max_output_size_per_class, max_total_size, iou_threshold, score_threshold, pad_per_class, clip_boxes) @tf_export('image.draw_bounding_boxes', v1=[]) def draw_bounding_boxes_v2(images, boxes, colors, name=None): """Draw bounding boxes on a batch of images. Outputs a copy of `images` but draws on top of the pixels zero or more bounding boxes specified by the locations in `boxes`. The coordinates of the each bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and height of the underlying image. For example, if an image is 100 x 200 pixels (height x width) and the bounding box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates). Parts of the bounding box may fall outside the image. Args: images: A `Tensor`. Must be one of the following types: `float32`, `half`. 4-D with shape `[batch, height, width, depth]`. A batch of images. boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding boxes. colors: A `Tensor` of type `float32`. 2-D. A list of RGBA colors to cycle through for the boxes. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `images`. """ if colors is None: return gen_image_ops.draw_bounding_boxes(images, boxes, name) return gen_image_ops.draw_bounding_boxes_v2(images, boxes, colors, name) @tf_export(v1=['image.draw_bounding_boxes']) def draw_bounding_boxes(images, boxes, name=None, colors=None): """Draw bounding boxes on a batch of images. Outputs a copy of `images` but draws on top of the pixels zero or more bounding boxes specified by the locations in `boxes`. The coordinates of the each bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and height of the underlying image. For example, if an image is 100 x 200 pixels (height x width) and the bounding box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates). Parts of the bounding box may fall outside the image. Args: images: A `Tensor`. Must be one of the following types: `float32`, `half`. 4-D with shape `[batch, height, width, depth]`. A batch of images. boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding boxes. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `images`. """ return draw_bounding_boxes_v2(images, boxes, colors, name)
tensorflow-master
tensorflow/python/ops/image_ops_impl.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Clustering Operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed as random_seed_ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_clustering_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_impl from tensorflow.python.ops import random_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops.embedding_ops import embedding_lookup # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.gen_clustering_ops import * # pylint: enable=wildcard-import # Euclidean distance between vectors U and V is defined as \\(||U - V||_F\\) # which is the square root of the sum of the absolute squares of the elements # difference. SQUARED_EUCLIDEAN_DISTANCE = 'squared_euclidean' # Cosine distance between vectors U and V is defined as # \\(1 - (U \dot V) / (||U||_F ||V||_F)\\) COSINE_DISTANCE = 'cosine' RANDOM_INIT = 'random' KMEANS_PLUS_PLUS_INIT = 'kmeans_plus_plus' KMC2_INIT = 'kmc2' # The name of the variable holding the cluster centers. Used by the Estimator. CLUSTERS_VAR_NAME = 'clusters' class KMeans(object): """Creates the graph for k-means clustering.""" def __init__(self, inputs, num_clusters, initial_clusters=RANDOM_INIT, distance_metric=SQUARED_EUCLIDEAN_DISTANCE, use_mini_batch=False, mini_batch_steps_per_iteration=1, random_seed=0, kmeans_plus_plus_num_retries=2, kmc2_chain_length=200): """Creates an object for generating KMeans clustering graph. This class implements the following variants of K-means algorithm: If use_mini_batch is False, it runs standard full batch K-means. Each step runs a single iteration of K-Means. This step can be run sharded across multiple workers by passing a list of sharded inputs to this class. Note however that a single step needs to process the full input at once. If use_mini_batch is True, it runs a generalization of the mini-batch K-means algorithm. It runs multiple iterations, where each iteration is composed of mini_batch_steps_per_iteration steps. Two copies of cluster centers are maintained: one that is updated at the end of each iteration, and one that is updated every step. The first copy is used to compute cluster allocations for each step, and for inference, while the second copy is the one updated each step using the mini-batch update rule. After each iteration is complete, this second copy is copied back the first copy. Note that for use_mini_batch=True, when mini_batch_steps_per_iteration=1, the algorithm reduces to the standard mini-batch algorithm. Also by setting mini_batch_steps_per_iteration = num_inputs / batch_size, the algorithm becomes an asynchronous version of the full-batch algorithm. Note however that there is no guarantee by this implementation that each input is seen exactly once per iteration. Also, different updates are applied asynchronously without locking. So this asynchronous version may not behave exactly like a full-batch version. Args: inputs: An input tensor or list of input tensors. It is assumed that the data points have been previously randomly permuted. num_clusters: An integer tensor specifying the number of clusters. This argument is ignored if initial_clusters is a tensor or numpy array. initial_clusters: Specifies the clusters used during initialization. One of the following: - a tensor or numpy array with the initial cluster centers. - a function f(inputs, k) that returns up to k centers from `inputs`. - "random": Choose centers randomly from `inputs`. - "kmeans_plus_plus": Use kmeans++ to choose centers from `inputs`. - "kmc2": Use the fast k-MC2 algorithm to choose centers from `inputs`. In the last three cases, one batch of `inputs` may not yield `num_clusters` centers, in which case initialization will require multiple batches until enough centers are chosen. In the case of "random" or "kmeans_plus_plus", if the input size is <= `num_clusters` then the entire batch is chosen to be cluster centers. distance_metric: Distance metric used for clustering. Supported options: "squared_euclidean", "cosine". use_mini_batch: If true, use the mini-batch k-means algorithm. Else assume full batch. mini_batch_steps_per_iteration: Number of steps after which the updated cluster centers are synced back to a master copy. random_seed: Seed for PRNG used to initialize seeds. kmeans_plus_plus_num_retries: For each point that is sampled during kmeans++ initialization, this parameter specifies the number of additional points to draw from the current distribution before selecting the best. If a negative value is specified, a heuristic is used to sample O(log(num_to_sample)) additional points. kmc2_chain_length: Determines how many candidate points are used by the k-MC2 algorithm to produce one new cluster centers. If a (mini-)batch contains less points, one new cluster center is generated from the (mini-)batch. Raises: ValueError: An invalid argument was passed to initial_clusters or distance_metric. """ if isinstance(initial_clusters, str) and initial_clusters not in [ RANDOM_INIT, KMEANS_PLUS_PLUS_INIT, KMC2_INIT ]: raise ValueError( "Unsupported initialization algorithm '%s'" % initial_clusters) if distance_metric not in [SQUARED_EUCLIDEAN_DISTANCE, COSINE_DISTANCE]: raise ValueError("Unsupported distance metric '%s'" % distance_metric) self._inputs = inputs if isinstance(inputs, list) else [inputs] self._num_clusters = num_clusters self._initial_clusters = initial_clusters self._distance_metric = distance_metric self._use_mini_batch = use_mini_batch self._mini_batch_steps_per_iteration = int(mini_batch_steps_per_iteration) self._seed = random_seed_ops.get_seed(random_seed)[0] self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries self._kmc2_chain_length = kmc2_chain_length @classmethod def _distance_graph(cls, inputs, clusters, distance_metric): """Computes distance between each input and each cluster center. Args: inputs: list of input Tensors. clusters: cluster Tensor. distance_metric: distance metric used for clustering Returns: list of Tensors, where each element corresponds to each element in inputs. The value is the distance of each row to all the cluster centers. Currently only Euclidean distance and cosine distance are supported. """ assert isinstance(inputs, list) if distance_metric == SQUARED_EUCLIDEAN_DISTANCE: return cls._compute_euclidean_distance(inputs, clusters) elif distance_metric == COSINE_DISTANCE: return cls._compute_cosine_distance( inputs, clusters, inputs_normalized=True) else: assert False, str(distance_metric) @classmethod def _compute_euclidean_distance(cls, inputs, clusters): """Computes Euclidean distance between each input and each cluster center. Args: inputs: list of input Tensors. clusters: cluster Tensor. Returns: list of Tensors, where each element corresponds to each element in inputs. The value is the distance of each row to all the cluster centers. """ output = [] for inp in inputs: with ops.colocate_with(inp, ignore_existing=True): # Computes Euclidean distance. Note the first and third terms are # broadcast additions. squared_distance = ( math_ops.reduce_sum(math_ops.square(inp), 1, keepdims=True) - 2 * math_ops.matmul(inp, clusters, transpose_b=True) + array_ops.transpose( math_ops.reduce_sum( math_ops.square(clusters), 1, keepdims=True))) output.append(squared_distance) return output @classmethod def _compute_cosine_distance(cls, inputs, clusters, inputs_normalized=True): """Computes cosine distance between each input and each cluster center. Args: inputs: list of input Tensor. clusters: cluster Tensor inputs_normalized: if True, it assumes that inp and clusters are normalized and computes the dot product which is equivalent to the cosine distance. Else it L2 normalizes the inputs first. Returns: list of Tensors, where each element corresponds to each element in inp. The value is the distance of each row to all the cluster centers. """ output = [] if not inputs_normalized: with ops.colocate_with(clusters, ignore_existing=True): clusters = nn_impl.l2_normalize(clusters, dim=1) for inp in inputs: with ops.colocate_with(inp, ignore_existing=True): if not inputs_normalized: inp = nn_impl.l2_normalize(inp, dim=1) output.append(1 - math_ops.matmul(inp, clusters, transpose_b=True)) return output def _infer_graph(self, inputs, clusters): """Maps input to closest cluster and the score. Args: inputs: list of input Tensors. clusters: Tensor of cluster centers. Returns: List of tuple, where each value in tuple corresponds to a value in inp. The tuple has following three elements: all_scores: distance of each input to each cluster center. score: distance of each input to closest cluster center. cluster_idx: index of cluster center closest to the corresponding input. """ assert isinstance(inputs, list) # Pairwise distances are used only by transform(). In all other cases, this # sub-graph is not evaluated. scores = self._distance_graph(inputs, clusters, self._distance_metric) output = [] if (self._distance_metric == COSINE_DISTANCE and not self._clusters_l2_normalized()): # The cosine distance between normalized vectors x and y is the same as # 2 * squared_euclidean_distance. We are using this fact and reusing the # nearest_neighbors op. # TODO(ands): Support COSINE distance in nearest_neighbors and remove # this. with ops.colocate_with(clusters, ignore_existing=True): clusters = nn_impl.l2_normalize(clusters, dim=1) for inp, score in zip(inputs, scores): with ops.colocate_with(inp, ignore_existing=True): (indices, distances) = gen_clustering_ops.nearest_neighbors( inp, clusters, 1) if self._distance_metric == COSINE_DISTANCE: distances *= 0.5 output.append((score, array_ops.squeeze(distances, [-1]), array_ops.squeeze(indices, [-1]))) return zip(*output) def _clusters_l2_normalized(self): """Returns True if clusters centers are kept normalized.""" return (self._distance_metric == COSINE_DISTANCE and (not self._use_mini_batch or self._mini_batch_steps_per_iteration > 1)) def _create_variables(self, num_clusters): """Creates variables. Args: num_clusters: an integer Tensor providing the number of clusters. Returns: Tuple with following elements: - cluster_centers: a Tensor for storing cluster centers - cluster_centers_initialized: bool Variable indicating whether clusters are initialized. - cluster_counts: a Tensor for storing counts of points assigned to this cluster. This is used by mini-batch training. - cluster_centers_updated: Tensor representing copy of cluster centers that are updated every step. - update_in_steps: numbers of steps left before we sync cluster_centers_updated back to cluster_centers. """ init_value = array_ops.placeholder_with_default([], shape=None) cluster_centers = variable_scope.variable( init_value, name=CLUSTERS_VAR_NAME, validate_shape=False) cluster_centers_initialized = variable_scope.variable( False, dtype=dtypes.bool, name='initialized') if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1: # Copy of cluster centers actively updated each step according to # mini-batch update rule. cluster_centers_updated = variable_scope.variable( init_value, name='clusters_updated', validate_shape=False) # How many steps till we copy the updated clusters to cluster_centers. update_in_steps = variable_scope.variable( self._mini_batch_steps_per_iteration, dtype=dtypes.int64, name='update_in_steps') # Count of points assigned to cluster_centers_updated. cluster_counts = variable_scope.variable( array_ops.zeros([num_clusters], dtype=dtypes.int64)) else: cluster_centers_updated = cluster_centers update_in_steps = None cluster_counts = ( variable_scope.variable( array_ops.ones([num_clusters], dtype=dtypes.int64)) if self._use_mini_batch else None) return (cluster_centers, cluster_centers_initialized, cluster_counts, cluster_centers_updated, update_in_steps) @classmethod def _l2_normalize_data(cls, inputs): """Normalized the input data.""" output = [] for inp in inputs: with ops.colocate_with(inp, ignore_existing=True): output.append(nn_impl.l2_normalize(inp, dim=1)) return output def training_graph(self): """Generate a training graph for kmeans algorithm. This returns, among other things, an op that chooses initial centers (init_op), a boolean variable that is set to True when the initial centers are chosen (cluster_centers_initialized), and an op to perform either an entire Lloyd iteration or a mini-batch of a Lloyd iteration (training_op). The caller should use these components as follows. A single worker should execute init_op multiple times until cluster_centers_initialized becomes True. Then multiple workers may execute training_op any number of times. Returns: A tuple consisting of: all_scores: A matrix (or list of matrices) of dimensions (num_input, num_clusters) where the value is the distance of an input vector and a cluster center. cluster_idx: A vector (or list of vectors). Each element in the vector corresponds to an input row in 'inp' and specifies the cluster id corresponding to the input. scores: Similar to cluster_idx but specifies the distance to the assigned cluster instead. cluster_centers_initialized: scalar indicating whether clusters have been initialized. init_op: an op to initialize the clusters. training_op: an op that runs an iteration of training. """ # Implementation of kmeans. if (isinstance(self._initial_clusters, str) or callable(self._initial_clusters)): initial_clusters = self._initial_clusters num_clusters = ops.convert_to_tensor(self._num_clusters) else: initial_clusters = ops.convert_to_tensor(self._initial_clusters) num_clusters = array_ops.shape(initial_clusters)[0] inputs = self._inputs (cluster_centers_var, cluster_centers_initialized, total_counts, cluster_centers_updated, update_in_steps) = self._create_variables(num_clusters) init_op = _InitializeClustersOpFactory( self._inputs, num_clusters, initial_clusters, self._distance_metric, self._seed, self._kmeans_plus_plus_num_retries, self._kmc2_chain_length, cluster_centers_var, cluster_centers_updated, cluster_centers_initialized).op() cluster_centers = cluster_centers_var if self._distance_metric == COSINE_DISTANCE: inputs = self._l2_normalize_data(inputs) if not self._clusters_l2_normalized(): cluster_centers = nn_impl.l2_normalize(cluster_centers, dim=1) all_scores, scores, cluster_idx = self._infer_graph(inputs, cluster_centers) if self._use_mini_batch: sync_updates_op = self._mini_batch_sync_updates_op( update_in_steps, cluster_centers_var, cluster_centers_updated, total_counts) assert sync_updates_op is not None with ops.control_dependencies([sync_updates_op]): training_op = self._mini_batch_training_op( inputs, cluster_idx, cluster_centers_updated, total_counts) else: assert cluster_centers == cluster_centers_var training_op = self._full_batch_training_op( inputs, num_clusters, cluster_idx, cluster_centers_var) return (all_scores, cluster_idx, scores, cluster_centers_initialized, init_op, training_op) def _mini_batch_sync_updates_op(self, update_in_steps, cluster_centers_var, cluster_centers_updated, total_counts): if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1: assert update_in_steps is not None with ops.colocate_with(update_in_steps, ignore_existing=True): def _f(): # Note that there is a race condition here, so we do a best effort # updates here. We reset update_in_steps first so that other workers # don't duplicate the updates. Also we update cluster_center_vars # before resetting total_counts to avoid large updates to # cluster_centers_updated based on partially updated # cluster_center_vars. with ops.control_dependencies([ state_ops.assign(update_in_steps, self._mini_batch_steps_per_iteration - 1) ]): with ops.colocate_with( cluster_centers_updated, ignore_existing=True): if self._distance_metric == COSINE_DISTANCE: cluster_centers = nn_impl.l2_normalize( cluster_centers_updated, dim=1) else: cluster_centers = cluster_centers_updated with ops.colocate_with(cluster_centers_var, ignore_existing=True): with ops.control_dependencies( [state_ops.assign(cluster_centers_var, cluster_centers)]): with ops.colocate_with(None, ignore_existing=True): with ops.control_dependencies([ state_ops.assign(total_counts, array_ops.zeros_like(total_counts)) ]): return array_ops.identity(update_in_steps) return control_flow_ops.cond( update_in_steps <= 0, _f, lambda: state_ops.assign_sub(update_in_steps, 1)) else: return control_flow_ops.no_op() def _mini_batch_training_op(self, inputs, cluster_idx_list, cluster_centers, total_counts): """Creates an op for training for mini batch case. Args: inputs: list of input Tensors. cluster_idx_list: A vector (or list of vectors). Each element in the vector corresponds to an input row in 'inp' and specifies the cluster id corresponding to the input. cluster_centers: Tensor Ref of cluster centers. total_counts: Tensor Ref of cluster counts. Returns: An op for doing an update of mini-batch k-means. """ update_ops = [] for inp, cluster_idx in zip(inputs, cluster_idx_list): with ops.colocate_with(inp, ignore_existing=True): assert total_counts is not None cluster_idx = array_ops.reshape(cluster_idx, [-1]) # Dedupe the unique ids of cluster_centers being updated so that updates # can be locally aggregated. unique_ids, unique_idx = array_ops.unique(cluster_idx) num_unique_cluster_idx = array_ops.size(unique_ids) # Fetch the old values of counts and cluster_centers. with ops.colocate_with(total_counts, ignore_existing=True): old_counts = array_ops.gather(total_counts, unique_ids) # TODO(agarwal): This colocation seems to run into problems. Fix it. with ops.colocate_with(cluster_centers, ignore_existing=True): old_cluster_centers = array_ops.gather(cluster_centers, unique_ids) # Locally aggregate the increment to counts. count_updates = math_ops.unsorted_segment_sum( array_ops.ones_like(unique_idx, dtype=total_counts.dtype), unique_idx, num_unique_cluster_idx) # Locally compute the sum of inputs mapped to each id. # For a cluster with old cluster value x, old count n, and with data # d_1,...d_k newly assigned to it, we recompute the new value as # \\(x += (sum_i(d_i) - k * x) / (n + k)\\). # Compute \\(sum_i(d_i)\\), see comment above. cluster_center_updates = math_ops.unsorted_segment_sum( inp, unique_idx, num_unique_cluster_idx) # Shape to enable broadcasting count_updates and learning_rate to inp. # It extends the shape with 1's to match the rank of inp. broadcast_shape = array_ops.concat([ array_ops.reshape(num_unique_cluster_idx, [1]), array_ops.ones( array_ops.reshape(array_ops.rank(inp) - 1, [1]), dtype=dtypes.int32) ], 0) # Subtract k * x, see comment above. cluster_center_updates -= math_ops.cast( array_ops.reshape(count_updates, broadcast_shape), inp.dtype) * old_cluster_centers learning_rate = math_ops.reciprocal( math_ops.cast(old_counts + count_updates, inp.dtype)) learning_rate = array_ops.reshape(learning_rate, broadcast_shape) # scale by 1 / (n + k), see comment above. cluster_center_updates *= learning_rate # Apply the updates. update_counts = state_ops.scatter_add(total_counts, unique_ids, count_updates) update_cluster_centers = state_ops.scatter_add( cluster_centers, unique_ids, cluster_center_updates) update_ops.extend([update_counts, update_cluster_centers]) return control_flow_ops.group(*update_ops) def _full_batch_training_op(self, inputs, num_clusters, cluster_idx_list, cluster_centers): """Creates an op for training for full batch case. Args: inputs: list of input Tensors. num_clusters: an integer Tensor providing the number of clusters. cluster_idx_list: A vector (or list of vectors). Each element in the vector corresponds to an input row in 'inp' and specifies the cluster id corresponding to the input. cluster_centers: Tensor Ref of cluster centers. Returns: An op for doing an update of mini-batch k-means. """ cluster_sums = [] cluster_counts = [] epsilon = constant_op.constant(1e-6, dtype=inputs[0].dtype) for inp, cluster_idx in zip(inputs, cluster_idx_list): with ops.colocate_with(inp, ignore_existing=True): cluster_sums.append( math_ops.unsorted_segment_sum(inp, cluster_idx, num_clusters)) cluster_counts.append( math_ops.unsorted_segment_sum( array_ops.reshape( array_ops.ones( array_ops.reshape(array_ops.shape(inp)[0], [-1])), [-1, 1]), cluster_idx, num_clusters)) with ops.colocate_with(cluster_centers, ignore_existing=True): new_clusters_centers = math_ops.add_n(cluster_sums) / ( math_ops.cast(math_ops.add_n(cluster_counts), cluster_sums[0].dtype) + epsilon) if self._clusters_l2_normalized(): new_clusters_centers = nn_impl.l2_normalize(new_clusters_centers, dim=1) return state_ops.assign(cluster_centers, new_clusters_centers) class _InitializeClustersOpFactory(object): """Internal class to create the op to initialize the clusters. The op performs this algorithm (see constructor args): num_remaining = num_clusters - length(cluster_centers) if num_remaining == 0: assert that cluster_centers_initialized is true else: assert that num_remaining > 0 new_centers = choose up to num_remaining initial centers l2-normalize new_centers if using cosine distance all_centers = concat(cluster_centers, new_centers) cluster_centers := all_centers if there is a cluster_centers_updated variable: cluster_centers_updated := cluster_centers num_now_remaining = num_clusters - length(cluster_centers) if num_now_remaining == 0: cluster_centers_initialized := true """ # TODO(ccolby): Refactor this class so that kmc2 isn't so much a special case. def __init__(self, inputs, num_clusters, initial_clusters, distance_metric, random_seed, kmeans_plus_plus_num_retries, kmc2_chain_length, cluster_centers, cluster_centers_updated, cluster_centers_initialized): """Creates an op factory. Args: inputs: See KMeans constructor. num_clusters: An integer Tensor providing the number of clusters. initial_clusters: See KMeans constructor. distance_metric: See KMeans constructor. random_seed: See KMeans constructor. kmeans_plus_plus_num_retries: See KMeans constructor. kmc2_chain_length: See KMeans constructor. cluster_centers: The TF variable holding the initial centers. It may already contain some centers when the op is executed. cluster_centers_updated: A second TF variable to hold a copy of the initial centers, used for full-batch mode. In mini-batch mode, cluster_centers_updated is the same variable as cluster_centers. cluster_centers_initialized: A boolean TF variable that will be set to true when all the initial centers have been chosen. """ # All of these instance variables are constants. self._inputs = inputs self._num_clusters = num_clusters self._initial_clusters = initial_clusters self._distance_metric = distance_metric self._seed = random_seed self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries self._kmc2_chain_length = kmc2_chain_length self._cluster_centers = cluster_centers self._cluster_centers_updated = cluster_centers_updated self._cluster_centers_initialized = cluster_centers_initialized self._num_selected = array_ops.shape(self._cluster_centers)[0] self._num_remaining = self._num_clusters - self._num_selected self._num_data = math_ops.add_n( [array_ops.shape(i)[0] for i in self._inputs]) def _random(self): indices = random_ops.random_uniform( array_ops.reshape(self._num_remaining, [-1]), minval=0, maxval=math_ops.cast(self._num_data, dtypes.int64), seed=self._seed, dtype=dtypes.int64) return embedding_lookup(self._inputs, indices, partition_strategy='div') def _kmeans_plus_plus(self): # Points from only the first shard are used for initializing centers. # TODO(ands): Use all points. inp = self._inputs[0] if self._distance_metric == COSINE_DISTANCE: inp = nn_impl.l2_normalize(inp, dim=1) return gen_clustering_ops.kmeans_plus_plus_initialization( inp, math_ops.cast(self._num_remaining, dtypes.int64), self._seed, self._kmeans_plus_plus_num_retries) def _kmc2_multiple_centers(self): """Adds new initial cluster centers using the k-MC2 algorithm. In each call to the op, the provided batch is split into subsets based on the specified `kmc2_chain_length`. On each subset, a single Markov chain of the k-MC2 algorithm is used to add *one* new center cluster center. If there are less than `kmc2_chain_length` points in the subset, a single center is added using one Markov chain on the full input. It is assumed that the provided batch has previously been randomly permuted. Otherwise, k-MC2 may return suboptimal centers. Returns: An op that adds new cluster centers. """ # The op only operates on the first shard of data. first_shard = self._inputs[0] # Number of points in the input that can be used. batch_size = array_ops.shape(first_shard)[0] # Maximum number of subsets such that the size of each subset is at least # `kmc2_chain_length`. Final subsets may be larger. max_to_sample = math_ops.cast( batch_size / self._kmc2_chain_length, dtype=dtypes.int32) # We sample at least one new center and at most all remaining centers. num_to_sample = math_ops.maximum( math_ops.minimum(self._num_remaining, max_to_sample), 1) def _cond(i, _): """Stopping condition for the while loop.""" return math_ops.less(i, num_to_sample) def _body(i, _): """Body that adds a single new center based on a subset.""" def _sample_random(): """Returns a random point as a cluster center.""" # By assumption the batch is reshuffled and _sample_random is always # called for i=0. Hence, we simply return the first point. new_center = array_ops.reshape(first_shard[0], [1, -1]) if self._distance_metric == COSINE_DISTANCE: new_center = nn_impl.l2_normalize(new_center, dim=1) return new_center def _sample_kmc2_chain(): """Returns previous centers as well as a new center sampled using k-MC2. """ # Extract the subset from the underlying batch. start = i * self._kmc2_chain_length end = start + self._kmc2_chain_length subset = first_shard[start:end] # Compute the distances from points in the subset to previous centers. _, distances = gen_clustering_ops.nearest_neighbors( subset, self._cluster_centers, 1) # Sample index of new center using k-MC2 Markov chain. new_center_index = gen_clustering_ops.kmc2_chain_initialization( array_ops.squeeze(distances), self._seed) # Extract actual new center. newly_sampled_center = array_ops.reshape(subset[new_center_index], [1, -1]) # Return concatenation with previously sampled centers. if self._distance_metric == COSINE_DISTANCE: newly_sampled_center = nn_impl.l2_normalize( newly_sampled_center, dim=1) return array_ops.concat([self._cluster_centers, newly_sampled_center], 0) # Obtain a random point if there are no previously sampled centers. # Otherwise, construct a k-MC2 Markov chain. new_centers = control_flow_ops.cond( math_ops.equal(self._num_selected, 0), _sample_random, _sample_kmc2_chain) # Assign new cluster centers to underlying variable. assigned_centers = state_ops.assign( self._cluster_centers, new_centers, validate_shape=False) if self._cluster_centers_updated is not self._cluster_centers: assigned_centers = state_ops.assign( self._cluster_centers_updated, assigned_centers, validate_shape=False) return i + 1, self._num_clusters - array_ops.shape(assigned_centers)[0] # Add num_to_sample new data points. _, num_remaining = control_flow_ops.while_loop(_cond, _body, [0, 0]) return num_remaining def _greedy_batch_sampler(self, sampler): # If the input dataset size is smaller than the number of centers # remaining, choose the entire input dataset as centers. This can happen # with mini-batch. Otherwise, sample the batch according to the provided # sampler. return control_flow_ops.cond(self._num_data <= self._num_remaining, lambda: array_ops.concat(self._inputs, 0), sampler) def _single_batch_sampler(self, sampler): # Enforce that there are at least as many data points as centers # remaining. This gives the provided sampler the chance to select all # remaining centers from a single batch. with ops.control_dependencies( [check_ops.assert_greater_equal(self._num_data, self._num_remaining)]): return sampler() def _choose_initial_centers(self): if isinstance(self._initial_clusters, str): if self._initial_clusters == RANDOM_INIT: return self._greedy_batch_sampler(self._random) else: # self._initial_clusters == KMEANS_PLUS_PLUS_INIT return self._single_batch_sampler(self._kmeans_plus_plus) elif callable(self._initial_clusters): return self._initial_clusters(self._inputs, self._num_remaining) else: with ops.control_dependencies([ check_ops.assert_equal(self._num_remaining, array_ops.shape(self._initial_clusters)[0]) ]): return self._initial_clusters def _add_new_centers(self): """Adds some centers and returns the number of centers remaining.""" new_centers = self._choose_initial_centers() if self._distance_metric == COSINE_DISTANCE: new_centers = nn_impl.l2_normalize(new_centers, dim=1) # If cluster_centers is empty, it doesn't have the right shape for concat. all_centers = control_flow_ops.cond( math_ops.equal(self._num_selected, 0), lambda: new_centers, lambda: array_ops.concat([self._cluster_centers, new_centers], 0)) # TODO(ccolby): De-dupe all_centers? a = state_ops.assign( self._cluster_centers, all_centers, validate_shape=False) if self._cluster_centers_updated is not self._cluster_centers: a = state_ops.assign( self._cluster_centers_updated, a, validate_shape=False) return self._num_clusters - array_ops.shape(a)[0] def _initialize(self): with ops.control_dependencies([ check_ops.assert_positive(self._num_remaining), ]): if self._initial_clusters == KMC2_INIT: num_now_remaining = self._kmc2_multiple_centers() else: num_now_remaining = self._add_new_centers() return control_flow_ops.cond( math_ops.equal(num_now_remaining, 0), lambda: state_ops.assign(self._cluster_centers_initialized, True), control_flow_ops.no_op) def op(self): """Returns the cluster initializer op.""" return control_flow_ops.cond( math_ops.equal(self._num_remaining, 0), lambda: check_ops.assert_equal(self._cluster_centers_initialized, True), self._initialize)
tensorflow-master
tensorflow/python/ops/clustering_ops.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for control_flow_ops.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from absl.testing import parameterized import numpy as np from tensorflow.core.framework import graph_pb2 from tensorflow.core.framework import node_def_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.python import tf2 from tensorflow.python.client import session from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import init_ops from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import tensor_array_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import from tensorflow.python.platform import googletest from tensorflow.python.training import momentum from tensorflow.python.util import nest TestTuple = collections.namedtuple("TestTuple", "a b") SingletonTestTuple = collections.namedtuple("SingletonTestTuple", "a") class GroupTestCase(test_util.TensorFlowTestCase): def _StripNode(self, nd): snode = node_def_pb2.NodeDef(name=nd.name, op=nd.op, input=nd.input) if nd.device: snode.device = nd.device return snode def _StripGraph(self, gd): """Copy gd keeping only, node.name, node.op, node.input, and node.device.""" return graph_pb2.GraphDef(node=[self._StripNode(nd) for nd in gd.node]) def testGroup_NoDevices(self): with ops.Graph().as_default() as g: a = constant_op.constant(0, name="a") b = constant_op.constant(0, name="b") c = constant_op.constant(0, name="c") control_flow_ops.group(a.op, b.op, c.op, name="root") gd = g.as_graph_def() self.assertProtoEquals(""" node { name: "a" op: "Const"} node { name: "b" op: "Const"} node { name: "c" op: "Const"} node { name: "root" op: "NoOp" input: "^a" input: "^b" input: "^c" } """, self._StripGraph(gd)) def testGroup_OneDevice(self): with ops.Graph().as_default() as g: with g.device("/task:0"): a = constant_op.constant(0, name="a") b = constant_op.constant(0, name="b") control_flow_ops.group(a.op, b.op, name="root") gd = g.as_graph_def() self.assertProtoEquals(""" node { name: "a" op: "Const" device: "/task:0" } node { name: "b" op: "Const" device: "/task:0" } node { name: "root" op: "NoOp" input: "^a" input: "^b" device: "/task:0" } """, self._StripGraph(gd)) def testGroup_MultiDevice(self): with ops.Graph().as_default() as g: with g.device("/task:0"): a = constant_op.constant(0, name="a") b = constant_op.constant(0, name="b") with g.device("/task:1"): c = constant_op.constant(0, name="c") d = constant_op.constant(0, name="d") with g.device("/task:2"): control_flow_ops.group(a.op, b.op, c.op, d.op, name="root") gd = g.as_graph_def() self.assertProtoEquals(""" node { name: "a" op: "Const" device: "/task:0"} node { name: "b" op: "Const" device: "/task:0"} node { name: "c" op: "Const" device: "/task:1"} node { name: "d" op: "Const" device: "/task:1"} node { name: "root/NoOp" op: "NoOp" input: "^a" input: "^b" device: "/task:0" } node { name: "root/NoOp_1" op: "NoOp" input: "^c" input: "^d" device: "/task:1" } node { name: "root" op: "NoOp" input: "^root/NoOp" input: "^root/NoOp_1" device: "/task:2" } """, self._StripGraph(gd)) def testPassingList(self): with ops.Graph().as_default() as g: a = constant_op.constant(0, name="a") b = constant_op.constant(0, name="b") control_flow_ops.group([a.op, b.op], name="root") gd = g.as_graph_def() self.assertProtoEquals(""" node { name: "a" op: "Const"} node { name: "b" op: "Const"} node { name: "root" op: "NoOp" input: "^a" input: "^b" } """, self._StripGraph(gd)) @test_util.run_deprecated_v1 def testPassingNonTensors(self): with self.assertRaises(TypeError): control_flow_ops.group(1, 2) class ShapeTestCase(test_util.TensorFlowTestCase): def testShape(self): tensor = constant_op.constant([1.0, 2.0]) self.assertEquals([2], tensor.get_shape()) self.assertEquals([2], control_flow_ops.with_dependencies( [constant_op.constant(1.0)], tensor).get_shape()) class WithDependenciesTestCase(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testTupleDependencies(self): counter = variable_scope.get_variable( "my_counter", shape=[], initializer=init_ops.zeros_initializer()) increment_counter = state_ops.assign_add(counter, 1) const_with_dep = control_flow_ops.with_dependencies( (increment_counter, constant_op.constant(42)), constant_op.constant(7)) self.evaluate(variables.global_variables_initializer()) self.assertEquals(0, self.evaluate(counter)) self.assertEquals(7, self.evaluate(const_with_dep)) self.assertEquals(1, self.evaluate(counter)) @test_util.run_deprecated_v1 def testListDependencies(self): counter = variable_scope.get_variable( "my_counter", shape=[], initializer=init_ops.zeros_initializer()) increment_counter = state_ops.assign_add(counter, 1) const_with_dep = control_flow_ops.with_dependencies( [increment_counter, constant_op.constant(42)], constant_op.constant(7)) self.evaluate(variables.global_variables_initializer()) self.assertEquals(0, self.evaluate(counter)) self.assertEquals(7, self.evaluate(const_with_dep)) self.assertEquals(1, self.evaluate(counter)) class SwitchTestCase(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testIndexedSlicesWithDenseShape(self): with self.cached_session(): data = ops.IndexedSlices( constant_op.constant([1, 2, 3]), constant_op.constant([0, 1, 2]), dense_shape=constant_op.constant([3])) zero = constant_op.constant(0) one = constant_op.constant(1) less_op = math_ops.less(zero, one) _, switch_true = control_flow_ops.switch(data, less_op) self.assertAllEqual([1, 2, 3], switch_true.values.eval()) self.assertAllEqual([0, 1, 2], switch_true.indices.eval()) @test_util.run_deprecated_v1 def testIndexedSlicesGradient(self): embedding_matrix = variable_scope.get_variable( "embedding_matrix", [5, 5], initializer=init_ops.random_normal_initializer()) def cond(it, _): return it < 5 def body(it, cost): embedding = embedding_ops.embedding_lookup(embedding_matrix + 0.0, [0]) cost += math_ops.reduce_sum(embedding) return it + 1, cost _, cost = control_flow_ops.while_loop( cond, body, [constant_op.constant(0), constant_op.constant(0.0)]) optimizer = momentum.MomentumOptimizer(0.1, 0.9) train_op = optimizer.minimize(cost) with self.cached_session(): self.evaluate(variables.global_variables_initializer()) for _ in range(10): self.evaluate([train_op]) def testResourceReadInLoop(self): embedding_matrix = variable_scope.get_variable( "embedding_matrix", initializer=[[2.0], [3.0]], use_resource=True) def cond(it, _): return it < 5 def body(it, cost): embedding = embedding_ops.embedding_lookup(embedding_matrix, [0]) cost += math_ops.reduce_sum(embedding) return it + 1, cost _, cost = control_flow_ops.while_loop( cond, body, [constant_op.constant(0), constant_op.constant(0.0)]) with self.cached_session(): self.evaluate(variables.global_variables_initializer()) self.assertAllEqual(10.0, self.evaluate(cost)) def doTestIndexedSlicesGradientInCondInWhileLoop(self, use_resource=False): embedding_matrix = variable_scope.get_variable( "embedding_matrix", [5, 5], initializer=init_ops.random_normal_initializer(), use_resource=use_resource) def cond(it, _): return it < 5 def body(it, cost): embedding = embedding_ops.embedding_lookup(embedding_matrix, [0]) cost = control_flow_ops.cond( math_ops.equal(it, 3), lambda: math_ops.square(cost), (lambda: cost + math_ops.reduce_sum(embedding))) return it + 1, cost _, cost = control_flow_ops.while_loop( cond, body, [constant_op.constant(0), constant_op.constant(0.0)]) dynamic_grads = gradients_impl.gradients(cost, [embedding_matrix])[0] dynamic_grads = math_ops.segment_sum(dynamic_grads.values, dynamic_grads.indices) embedding = embedding_ops.embedding_lookup(embedding_matrix, [0]) static = math_ops.square( math_ops.reduce_sum(embedding) + math_ops.reduce_sum(embedding) + math_ops.reduce_sum(embedding)) + math_ops.reduce_sum(embedding) static_grads = gradients_impl.gradients(static, [embedding_matrix])[0] static_grads = math_ops.segment_sum(static_grads.values, static_grads.indices) with self.cached_session(): self.evaluate(variables.global_variables_initializer()) self.assertAllEqual(*self.evaluate([static_grads, dynamic_grads])) def testIndexedSlicesGradientInCondInWhileLoop(self): self.doTestIndexedSlicesGradientInCondInWhileLoop(use_resource=False) def testIndexedSlicesGradientInCondInWhileLoopResource(self): self.doTestIndexedSlicesGradientInCondInWhileLoop(use_resource=True) @test_util.run_v1_only("b/120545219") def testIndexedSlicesWithShapeGradientInWhileLoop(self): for dtype in [dtypes.float32, dtypes.float64]: with self.cached_session() as sess: num_steps = 9 inputs = array_ops.placeholder(dtype=dtype, shape=[num_steps]) initial_outputs = tensor_array_ops.TensorArray( dtype=dtype, size=num_steps) initial_i = constant_op.constant(0, dtype=dtypes.int32) def cond(i, _): return i < num_steps # pylint: disable=cell-var-from-loop def body(i, outputs): x = array_ops.gather(inputs, i) # pylint: disable=cell-var-from-loop outputs = outputs.write(i, x) return i + 1, outputs _, outputs = control_flow_ops.while_loop(cond, body, [initial_i, initial_outputs]) outputs = math_ops.reduce_sum(outputs.stack()) r = gradients_impl.gradients([outputs], [inputs])[0] grad_wr_inputs = ops.convert_to_tensor(r) o, grad = sess.run([outputs, grad_wr_inputs], feed_dict={inputs: [4, 6, 0, 7, 0, 0, 1, 2, 0]}) self.assertEquals(o, 20) self.assertAllEqual(grad, [1] * num_steps) @test_util.run_v1_only("b/120545219") def testIndexedSlicesWithDynamicShapeGradientInWhileLoop(self): for dtype in [dtypes.float32, dtypes.float64]: with self.cached_session() as sess: inputs = array_ops.placeholder(dtype=dtype) initial_outputs = tensor_array_ops.TensorArray( dtype=dtype, dynamic_size=True, size=1) initial_i = constant_op.constant(0, dtype=dtypes.int32) def cond(i, _): return i < array_ops.size(inputs) # pylint: disable=cell-var-from-loop def body(i, outputs): x = array_ops.gather(inputs, i) # pylint: disable=cell-var-from-loop outputs = outputs.write(i, x) return i + 1, outputs _, outputs = control_flow_ops.while_loop(cond, body, [initial_i, initial_outputs]) outputs = math_ops.reduce_sum(outputs.stack()) r = gradients_impl.gradients([outputs], [inputs])[0] grad_wr_inputs = ops.convert_to_tensor(r) o, grad = sess.run([outputs, grad_wr_inputs], feed_dict={inputs: [1, 3, 2]}) self.assertEquals(o, 6) self.assertAllEqual(grad, [1] * 3) @test_util.run_deprecated_v1 def testGradientThroughSingleBranchOutsideOfContext(self): x = constant_op.constant(2.) s = constant_op.constant(True) x_false, x_true = control_flow_ops.switch(x, s) grad_x_true = gradients_impl.gradients(x_true, x)[0] grad_x_false = gradients_impl.gradients(x_false, x)[0] self.assertEquals(self.evaluate(grad_x_true), 1.) self.assertEquals(self.evaluate(grad_x_false), 0.) class CondTest(test_util.TensorFlowTestCase): def testCondTrue(self): x = constant_op.constant(2) y = constant_op.constant(5) z = control_flow_ops.cond( math_ops.less( x, y), lambda: math_ops.multiply(x, 17), lambda: math_ops.add(y, 23)) self.assertEquals(self.evaluate(z), 34) def testCondFalse(self): x = constant_op.constant(2) y = constant_op.constant(1) z = control_flow_ops.cond( math_ops.less( x, y), lambda: math_ops.multiply(x, 17), lambda: math_ops.add(y, 23)) self.assertEquals(self.evaluate(z), 24) def testCondTrueLegacy(self): x = constant_op.constant(2) y = constant_op.constant(5) z = control_flow_ops.cond( math_ops.less(x, y), fn1=lambda: math_ops.multiply(x, 17), fn2=lambda: math_ops.add(y, 23)) self.assertEquals(self.evaluate(z), 34) def testCondFalseLegacy(self): x = constant_op.constant(2) y = constant_op.constant(1) z = control_flow_ops.cond( math_ops.less(x, y), fn1=lambda: math_ops.multiply(x, 17), fn2=lambda: math_ops.add(y, 23)) self.assertEquals(self.evaluate(z), 24) @test_util.run_deprecated_v1 def testCondModifyBoolPred(self): # This test in particular used to fail only when running in GPU, hence # use_gpu=True. with test_util.use_gpu(): bool_var = variable_scope.get_variable( "bool_var", dtype=dtypes.bool, initializer=True) cond_on_bool_var = control_flow_ops.cond( pred=bool_var, true_fn=lambda: state_ops.assign(bool_var, False), false_fn=lambda: True) self.evaluate(bool_var.initializer) self.assertEquals(self.evaluate(cond_on_bool_var), False) self.assertEquals(self.evaluate(cond_on_bool_var), True) def testCondMissingArg1(self): x = constant_op.constant(1) with self.assertRaises(TypeError): control_flow_ops.cond(True, false_fn=lambda: x) def testCondMissingArg2(self): x = constant_op.constant(1) with self.assertRaises(TypeError): control_flow_ops.cond(True, lambda: x) def testCondDuplicateArg1(self): x = constant_op.constant(1) with self.assertRaises(TypeError): control_flow_ops.cond(True, lambda: x, lambda: x, fn1=lambda: x) def testCondDuplicateArg2(self): x = constant_op.constant(1) with self.assertRaises(TypeError): control_flow_ops.cond(True, lambda: x, lambda: x, fn2=lambda: x) @test_util.enable_control_flow_v2 @test_util.run_in_graph_and_eager_modes def testCond_gradient(self): true_in, false_in = array_ops.constant(1.), array_ops.constant(5.) with backprop.GradientTape(persistent=True) as tape: tape.watch(true_in) tape.watch(false_in) cond_true = control_flow_ops.cond( array_ops.constant(True), lambda: true_in**2., lambda: false_in**2.) cond_false = control_flow_ops.cond( array_ops.constant(False), lambda: true_in**2., lambda: false_in**2.) grads_true = tape.gradient( cond_true, [true_in, false_in], output_gradients=3.) grads_false = tape.gradient( cond_false, [true_in, false_in], output_gradients=3.) self.assertEqual(3. * 2. * 1., self.evaluate(grads_true[0])) self.assertEqual(None if context.executing_eagerly() else 0., self.evaluate(grads_true[1])) self.assertEqual(3. * 2. * 5., self.evaluate(grads_false[1])) self.assertEqual(None if context.executing_eagerly() else 0., self.evaluate(grads_false[0])) class ContextTest(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testCondContext(self): with self.cached_session() as sess: x = constant_op.constant(2) y = constant_op.constant(5) control_flow_ops.cond( math_ops.less(x, y), lambda: math_ops.multiply(x, 17), lambda: math_ops.add(y, 23)) for op in sess.graph.get_operations(): c = op._get_control_flow_context() if c: self.assertProtoEquals( c.to_proto(), control_flow_ops.CondContext.from_proto(c.to_proto()).to_proto()) def _testWhileContextHelper(self, maximum_iterations=None): with self.cached_session() as sess: i = constant_op.constant(0) c = lambda i: math_ops.less(i, 10) b = lambda i: math_ops.add(i, 1) control_flow_ops.while_loop( c, b, [i], maximum_iterations=maximum_iterations) for op in sess.graph.get_operations(): control_flow_context = op._get_control_flow_context() if control_flow_context: self.assertProtoEquals( control_flow_context.to_proto(), control_flow_ops.WhileContext.from_proto( control_flow_context.to_proto()).to_proto()) @test_util.run_deprecated_v1 def testWhileContext(self): self._testWhileContextHelper() @test_util.run_deprecated_v1 def testWhileContextWithMaximumIterations(self): self._testWhileContextHelper(maximum_iterations=10) @test_util.run_deprecated_v1 def testControlContextImportScope(self): class NoABCControlFlowContext(control_flow_ops.ControlFlowContext): """A noop wrapper around `ControlFlowContext`. `ControlFlowContext` is an ABC and therefore cannot be instantiated. """ # pylint: disable=useless-super-delegation def to_control_flow_context_def(self, context_def, export_scope=None): super(NoABCControlFlowContext, self).to_control_flow_context_def( context_def, export_scope) with self.cached_session(): constant_op.constant(0, name="a") constant_op.constant(2, name="test_scope/a") b1 = constant_op.constant(1, name="b") b2 = constant_op.constant(3, name="test_scope/b") c = NoABCControlFlowContext() c._values = ["a", "b"] c._external_values = {"a": b1} c_with_scope = NoABCControlFlowContext( values_def=c._to_values_def(), import_scope="test_scope") # _values and _external_values should be have scope prepended. self.assertEquals( c_with_scope._values, set(["test_scope/a", "test_scope/b"])) self.assertEquals( c_with_scope._external_values, {"test_scope/a": b2}) # Calling _to_proto() with export_scope should remove "test_scope". self.assertProtoEquals( c._to_values_def(), c_with_scope._to_values_def(export_scope="test_scope")) def _get_nested_shape(nested): def _get_shape(tensor): if isinstance(tensor, tensor_array_ops.TensorArray): return tensor_array_ops.TensorArray elif isinstance(tensor, ops.IndexedSlices): return tensor.dense_shape else: return tensor.get_shape() return nest.map_structure(_get_shape, nested) def _create_tensor_array(size, shape): ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=size, clear_after_read=False) for i in range(size): ta = ta.write(i, array_ops.zeros(shape)) return ta def _raw_nested_shape(nested_shape): def _raw_shape(shape): if isinstance(shape, tensor_shape.TensorShape) and shape.ndims is not None: return [x.value for x in shape.dims] else: return None return nest.map_structure(_raw_shape, nested_shape) # TODO(yori): Add tests for indexed slices. class DataTypesTest(test_util.TensorFlowTestCase): def assertAllEqualNested(self, a, b): if isinstance(a, (list, tuple)): for entry_a, entry_b in zip(a, b): self.assertAllEqualNested(entry_a, entry_b) else: self.assertAllEqual(a, b) def _testShape(self, fn_true, fn_false, expected_shape, strict=False): condition = array_ops.placeholder(dtypes.bool) output_cond = control_flow_ops.cond(condition, fn_true, fn_false, strict=strict) self.assertEqual( _raw_nested_shape(_get_nested_shape(output_cond)), _raw_nested_shape(expected_shape)) output_case = control_flow_ops.case([(condition, fn_true)], fn_false, strict=strict) self.assertEqual( _raw_nested_shape(_get_nested_shape(output_case)), _raw_nested_shape(expected_shape)) def _testReturnValues(self, fn_true, fn_false, expected_value_true, expected_value_false, strict=False, check_cond=True, feed_dict=None): if feed_dict is None: feed_dict = {} condition = array_ops.placeholder(dtypes.bool) output_cond = control_flow_ops.cond(condition, fn_true, fn_false, strict=strict) output_case = control_flow_ops.case([(condition, fn_true)], fn_false, strict=strict) with self.cached_session() as sess: self.evaluate(variables.global_variables_initializer()) true_feed_dict = {condition: True} true_feed_dict.update(feed_dict) result_cond, result_case = sess.run([output_cond, output_case], feed_dict=true_feed_dict) self.assertAllEqualNested(result_cond, expected_value_true) if check_cond: self.assertAllEqualNested(result_case, expected_value_true) false_feed_dict = {condition: False} false_feed_dict.update(feed_dict) result_cond, result_case = sess.run([output_cond, output_case], feed_dict=false_feed_dict) self.assertAllEqualNested(result_cond, expected_value_false) if check_cond: self.assertAllEqualNested(result_case, expected_value_false) @test_util.run_deprecated_v1 def test_int(self): shape = tensor_shape.TensorShape([]) fn_true = lambda: 1 fn_false = lambda: 2 self._testShape(fn_true, fn_false, shape) self._testReturnValues(fn_true, fn_false, 1, 2) self._testShape(fn_true, fn_false, shape, strict=True) self._testReturnValues(fn_true, fn_false, 1, 2, strict=True) @test_util.run_deprecated_v1 def test_float(self): shape = tensor_shape.TensorShape([]) fn_true = lambda: 1.0 fn_false = lambda: 2.0 self._testShape(fn_true, fn_false, shape) self._testReturnValues(fn_true, fn_false, 1.0, 2.0) @test_util.run_deprecated_v1 def test_noop(self): shape = tensor_shape.TensorShape(None) self._testShape(control_flow_ops.no_op, control_flow_ops.no_op, shape) self._testReturnValues(control_flow_ops.no_op, control_flow_ops.no_op, True, False, check_cond=False) @test_util.run_deprecated_v1 def test_string(self): shape = tensor_shape.TensorShape([]) fn_true = lambda: "abc" fn_false = lambda: "xyz" self._testShape(fn_true, fn_false, shape) self._testReturnValues(fn_true, fn_false, b"abc", b"xyz") @test_util.run_deprecated_v1 def test_variable(self): shape = tensor_shape.TensorShape([]) fn_true = lambda: variables.Variable(3.0) fn_false = lambda: variables.Variable(4.0) self._testShape(fn_true, fn_false, shape) self._testReturnValues(fn_true, fn_false, 3.0, 4.0) @test_util.run_v1_only("b/120553181") def test_none(self): fn_none = lambda: None fn_tensor = lambda: constant_op.constant(1) with self.assertRaises(ValueError): control_flow_ops.cond(constant_op.constant(True), fn_none, fn_tensor) with self.assertRaises(ValueError): control_flow_ops.cond(constant_op.constant(True), fn_tensor, fn_none) @test_util.run_deprecated_v1 def test_tensors(self): def _build_true_branch(dtype): def _build(): return (array_ops.zeros([2, 2], dtype=dtype), array_ops.ones([3, 3], dtype=dtype)) return _build def _build_false_branch(dtype): def _build(): return (array_ops.ones([2, 2], dtype=dtype), array_ops.zeros([3, 3], dtype=dtype)) return _build for dtype in (dtypes.float16, dtypes.int8, dtypes.int32, dtypes.uint8): shape = (tensor_shape.TensorShape([2, 2]), tensor_shape.TensorShape([3, 3])) fn_true = _build_true_branch(dtype) fn_false = _build_false_branch(dtype) self._testShape(fn_true, fn_false, shape) self._testReturnValues(fn_true, fn_false, (np.zeros([2, 2]), np.ones([3, 3])), (np.ones([2, 2]), np.zeros([3, 3]))) @test_util.run_deprecated_v1 def test_tensors_unknown_shape(self): def _build_true_branch(dtype): tensor = array_ops.placeholder(dtype=dtype, shape=None) def _build(): return tensor return _build, tensor def _build_false_branch(dtype): tensor = array_ops.placeholder(dtype=dtype, shape=None) def _build(): return tensor return _build, tensor for dtype in (dtypes.float16, dtypes.int8, dtypes.int32, dtypes.uint8): shape = tensor_shape.TensorShape(None) fn_true, true_tensor = _build_true_branch(dtype) fn_false, false_tensor = _build_false_branch(dtype) self._testShape(fn_true, fn_false, shape) self._testReturnValues(fn_true, fn_false, np.zeros([2, 2]), np.ones([2, 2]), feed_dict={true_tensor: np.zeros([2, 2]), false_tensor: np.ones([2, 2])}) @test_util.run_deprecated_v1 def test_sparse_tensors(self): shape = tensor_shape.TensorShape([None, None]) def true_fn(): return [sparse_tensor.SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])] def false_fn(): return [sparse_tensor.SparseTensor(indices=[[0, 0], [2, 1]], values=[3, 4], dense_shape=[3, 4])] value1 = sparse_tensor.SparseTensorValue(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4]) value2 = sparse_tensor.SparseTensorValue(indices=[[0, 0], [2, 1]], values=[3, 4], dense_shape=[3, 4]) # Non-strict cond is only available in v1 if not tf2.enabled(): self._testShape(true_fn, false_fn, shape) self._testReturnValues(true_fn, false_fn, value1, value2) self._testShape(true_fn, false_fn, [shape], strict=True) self._testReturnValues(true_fn, false_fn, [value1], [value2], strict=True) @test_util.run_deprecated_v1 def test_tensors_with_partially_specified_shapes(self): def _build_branch(dtype, shape): a = array_ops.placeholder(dtype=dtype, shape=shape[0]) b = array_ops.placeholder(dtype=dtype, shape=shape[1]) c = array_ops.placeholder(dtype=dtype, shape=shape[2]) def _build(): return a, b, c return _build, (a, b, c) for dtype in (dtypes.float16, dtypes.int8, dtypes.int32, dtypes.uint8): shape = (tensor_shape.TensorShape([None, 2]), tensor_shape.TensorShape([None]), tensor_shape.TensorShape([3, None])) fn_true, true_tensors = _build_branch(dtype, shape) fn_false, false_tensors = _build_branch(dtype, shape) self._testShape(fn_true, fn_false, shape) self._testReturnValues(fn_true, fn_false, (np.zeros([2, 2]), np.zeros(5), np.ones([3, 3])), (np.zeros([2, 2]), np.zeros(5), np.ones([3, 3])), feed_dict={true_tensors[0]: np.zeros([2, 2]), false_tensors[0]: np.zeros([2, 2]), true_tensors[1]: np.zeros([5]), false_tensors[1]: np.zeros([5]), true_tensors[2]: np.ones([3, 3]), false_tensors[2]: np.ones([3, 3])}) @test_util.run_deprecated_v1 def test_tensor_arrays(self): element_shape = tensor_shape.TensorShape([2]) ta1 = _create_tensor_array(4, element_shape) ta2 = _create_tensor_array(4, element_shape) shape = tensor_array_ops.TensorArray fn_true = lambda: ta1 fn_false = lambda: ta2 self._testShape(fn_true, fn_false, shape) @test_util.run_deprecated_v1 def test_tensor_array_reads(self): shape = tensor_shape.TensorShape([2]) ta = _create_tensor_array(4, shape) fn_true = lambda: ta.read(0) fn_false = lambda: ta.read(1) self._testShape(fn_true, fn_false, shape) @test_util.run_deprecated_v1 def test_list(self): shape = [tensor_shape.TensorShape([]), tensor_shape.TensorShape([]), tensor_shape.TensorShape([])] fn_true = lambda: [constant_op.constant(1), 2, variables.Variable(3.0)] fn_false = lambda: [constant_op.constant(3), 4, variables.Variable(5.0)] self._testShape(fn_true, fn_false, shape) self._testReturnValues(fn_true, fn_false, [1, 2, 3.0], [3, 4, 5.0]) @test_util.run_v1_only("Non-strict cond is only available in v1") def test_non_strict(self): shape = tensor_shape.TensorShape([]) fn_tensor = lambda: constant_op.constant(1) fn_list = lambda: [constant_op.constant(2)] fn_tuple = lambda: (constant_op.constant(3),) self._testShape(fn_tensor, fn_list, shape) self._testShape(fn_tensor, fn_tuple, shape) self._testShape(fn_list, fn_tuple, shape) self._testReturnValues(fn_tensor, fn_list, 1, 2) self._testReturnValues(fn_tensor, fn_tuple, 1, 3) self._testReturnValues(fn_list, fn_tuple, 2, 3) @test_util.run_v1_only("b/120553181") def test_singleton_strict(self): fn_tensor = lambda: constant_op.constant(1) fn_list = lambda: [constant_op.constant(2)] fn_tuple = lambda: (constant_op.constant(3),) with self.assertRaises(ValueError): control_flow_ops.cond(constant_op.constant(True), fn_tensor, fn_list, strict=True) with self.assertRaises(TypeError): control_flow_ops.cond(constant_op.constant(True), fn_list, fn_tuple, strict=True) with self.assertRaises(ValueError): control_flow_ops.case([(constant_op.constant(True), fn_tensor)], fn_list, strict=True) with self.assertRaises(TypeError): control_flow_ops.case([(constant_op.constant(True), fn_list)], fn_tuple, strict=True) @test_util.run_deprecated_v1 def test_singleton_list(self): shape = tensor_shape.TensorShape([]) fn_true = lambda: [constant_op.constant(1)] fn_false = lambda: [constant_op.constant(3)] # Non-strict cond is only available in v1 if not tf2.enabled(): self._testShape(fn_true, fn_false, shape) self._testReturnValues(fn_true, fn_false, 1, 3) self._testShape(fn_true, fn_false, [shape], strict=True) self._testReturnValues(fn_true, fn_false, [1], [3], strict=True) @test_util.run_deprecated_v1 def test_singleton_tuple(self): shape = tensor_shape.TensorShape([]) fn_true = lambda: (constant_op.constant(1),) fn_false = lambda: (constant_op.constant(3),) # Non-strict cond is only available in v1 if not tf2.enabled(): self._testShape(fn_true, fn_false, shape) self._testReturnValues(fn_true, fn_false, 1, 3) self._testShape(fn_true, fn_false, (shape,), strict=True) self._testReturnValues(fn_true, fn_false, (1,), (3,), strict=True) @test_util.run_deprecated_v1 def test_singleton_namedtuple(self): shape = tensor_shape.TensorShape([]) fn_true = lambda: SingletonTestTuple(constant_op.constant(1)) fn_false = lambda: SingletonTestTuple(constant_op.constant(3)) # Non-strict cond is only available in v1 if not tf2.enabled(): self._testShape(fn_true, fn_false, shape) self._testReturnValues(fn_true, fn_false, 1, 3) self._testShape(fn_true, fn_false, SingletonTestTuple(shape), strict=True) self._testReturnValues(fn_true, fn_false, SingletonTestTuple(1), SingletonTestTuple(3), strict=True) @test_util.run_deprecated_v1 def test_tuple(self): shape = (tensor_shape.TensorShape([]), tensor_shape.TensorShape([])) fn_true = lambda: (constant_op.constant(1), 2) fn_false = lambda: (constant_op.constant(3), 4) self._testShape(fn_true, fn_false, shape) self._testReturnValues(fn_true, fn_false, (1, 2), (3, 4)) @test_util.run_deprecated_v1 def test_namedtuple(self): shape = TestTuple(tensor_shape.TensorShape([]), tensor_shape.TensorShape([])) fn_true = lambda: TestTuple(constant_op.constant(1), 2) fn_false = lambda: TestTuple(constant_op.constant(3), 4) self._testShape(fn_true, fn_false, shape) self._testReturnValues(fn_true, fn_false, TestTuple(1, 2), TestTuple(3, 4)) @test_util.run_deprecated_v1 def test_nested(self): shape = [tensor_shape.TensorShape([]), TestTuple(tensor_shape.TensorShape([]), [tensor_shape.TensorShape([]), tensor_shape.TensorShape([])]), tensor_shape.TensorShape([5, 5]), tensor_shape.TensorShape([])] def true_fn(): return [constant_op.constant(1), TestTuple(constant_op.constant(2), [3, 4]), array_ops.zeros([5, 5]), 6] def false_fn(): return [constant_op.constant(11), TestTuple(constant_op.constant(12), [13, 14]), array_ops.ones([5, 5]), 16] self._testShape(true_fn, false_fn, shape) self._testReturnValues( true_fn, false_fn, [1, TestTuple(2, [3, 4]), np.zeros([5, 5]), 6], [11, TestTuple(12, [13, 14]), np.ones([5, 5]), 16]) @test_util.run_deprecated_v1 def test_cond_inside_while_loop(self): def body(i, matrix): result_tuple, unused_matrix = control_flow_ops.cond( constant_op.constant(True), lambda: (TestTuple(matrix * 2, matrix * 4), matrix), lambda: (TestTuple(matrix * 4, matrix * 2), matrix)) return [i+1, result_tuple.a] iteration, matrix = control_flow_ops.while_loop( lambda i, matrix: i < 10, body, loop_vars=[constant_op.constant(0), array_ops.ones([2, 2])]) self.assertEqual(iteration.get_shape(), tensor_shape.TensorShape([])) self.assertEqual(matrix.get_shape(), tensor_shape.TensorShape([2, 2])) @test_util.run_all_in_graph_and_eager_modes class IndexedCaseTest(test_util.TensorFlowTestCase, parameterized.TestCase): def make_name(self): return self.id().split(".")[-1].replace("(", "_").replace(")", "") def disabled_testCase_ticklesGpuVsHostMemoryIssueWithInt32(self): nbranches = 5 def make_func(bi): return lambda: array_ops.constant(bi * 10, name="br{}_out".format(bi)) branches = [(i, make_func(i)) for i in range(nbranches)] for bi in range(nbranches): branch_index = array_ops.placeholder_with_default(bi, []) case_out = control_flow_ops.switch_case(branch_index, branches) self.assertEqual(bi * 10, self.evaluate(case_out)) @parameterized.parameters((0,), (2,), (3,)) def testCase(self, bi): nbranches = 5 def make_func(bi): return lambda: array_ops.constant(bi * 10., name="br{}_out".format(bi)) branches = [(i, make_func(i)) for i in range(nbranches)] branch_index = array_ops.placeholder_with_default(bi, []) case_out = control_flow_ops.switch_case( branch_index, branches, name=self.make_name()) self.assertEqual(bi * 10., self.evaluate(case_out)) @parameterized.parameters((-1,), (2,), (4,), (5,), (6,)) def testCase_withDefault(self, bi): nbranches = 5 def make_func(bi): return lambda: array_ops.constant(bi * 10., name="br{}_out".format(bi)) branches = [(i, make_func(i)) for i in range(nbranches)] branch_index = array_ops.placeholder_with_default(bi, []) case_out = control_flow_ops.switch_case( branch_index, branches, default=make_func(6), name=self.make_name()) if bi < 0 or bi >= nbranches: expected = 60. else: expected = bi * 10. self.assertEqual(expected, self.evaluate(case_out)) @parameterized.parameters((-1,), (0,), (3,), (5,)) def testCase_dictWithDefault(self, bi): nbranches = 5 def make_func(bi): return lambda: array_ops.constant(bi * 10., name="br{}_out".format(bi)) branches = {i: make_func(i) for i in range(nbranches)} branch_index = array_ops.placeholder_with_default(bi, []) case_out = control_flow_ops.switch_case( branch_index, branches, default=make_func(6), name=self.make_name()) if bi < 0 or bi >= nbranches: expected = 60. else: expected = bi * 10. self.assertEqual(expected, self.evaluate(case_out)) @parameterized.parameters((-1,), (1,), (4,), (5,)) def testCase_gradient(self, bi): nbranches = 5 inputs = [ array_ops.constant(float(bi), name="br{}_in".format(bi)) for bi in range(nbranches) ] def make_func(bi): return lambda: inputs[bi]**2. branches = {bi: make_func(bi) for bi in range(nbranches)} branch_index = array_ops.placeholder_with_default(bi, []) with backprop.GradientTape() as tape: for x in inputs: tape.watch(x) case_out = control_flow_ops.switch_case(branch_index, branches) out_grad = 3. actual_grads = tape.gradient(case_out, inputs, output_gradients=out_grad) expected_grads = [None if context.executing_eagerly() else 0.] * nbranches used_branch_idx = nbranches - 1 if bi < 0 or bi >= nbranches - 1 else bi expected_grads[used_branch_idx] = out_grad * 2. * used_branch_idx self.assertEqual(len(expected_grads), len(actual_grads)) for expected, actual in zip(expected_grads, actual_grads): self.assertEqual(expected, self.evaluate(actual)) @parameterized.parameters((-2,), (2,), (5,)) def testCase_gradient_diffShapedIntermediates(self, bi): nbranches = 5 inputs = [ array_ops.constant( float(bi), shape=[bi + 1], name="br{}_in".format(bi)) for bi in range(nbranches) ] def make_func(bi): def f(): x = inputs[bi]**2 * inputs[bi][:bi + 1, None] return math_ops.reduce_sum(x) return f branches = {bi: make_func(bi) for bi in range(nbranches)} branch_index = array_ops.placeholder_with_default(bi, []) with backprop.GradientTape() as tape: for x in inputs: tape.watch(x) case_out = control_flow_ops.switch_case( branch_index, branches, name=self.make_name()) out_grad = 3. actual_grads = tape.gradient(case_out, inputs, output_gradients=out_grad) used_bi = (nbranches - 1) if (bi < 0 or bi >= nbranches - 1) else bi expected_grads = [] for input_idx in range(nbranches): if used_bi == input_idx: with backprop.GradientTape() as tape: tape.watch(inputs[used_bi]) y = make_func(used_bi)() expected_grads.append( self.evaluate( tape.gradient(y, inputs[used_bi], output_gradients=out_grad))) else: expected_grads.append(None if context.executing_eagerly() else [0.] * (input_idx + 1)) self.assertEqual(len(expected_grads), len(actual_grads)) for expected, actual in zip(expected_grads, actual_grads): if expected is None: self.assertIsNone(actual) else: self.assertAllEqual(expected, self.evaluate(actual)) @test_util.run_gpu_only @test_util.disable_xla("Wants RunMetadata") def testParallelExecution(self): """Verify disjoint branches across while iterations are run in parallel.""" with ops.Graph().as_default() as g: nbranches = 7 matrices = array_ops.unstack( # Ensure all are ready before while. array_ops.matrix_diag( random_ops.random_uniform([nbranches, 8, 512]) + 1e-3)) def make_branch(i, mat, name): def branch_fn(): next_i = i + 1 with ops.device("gpu:0"): return next_i, math_ops.reduce_sum( linalg_ops.cholesky(mat, name=name + "_Cholesky")) return branch_fn def make_branches(i): return [make_branch(i, matrices[bi], "br{}".format(bi)) for bi in range(nbranches)] def cond(i, _): return i < nbranches def body(i, result): with ops.device("cpu:0"): next_i, branch_out = control_flow_ops.switch_case(i, make_branches(i)) return next_i, result + branch_out _, result = control_flow_ops.while_loop(cond, body, [0, 0.]) run_metadata = config_pb2.RunMetadata() run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) config = config_pb2.ConfigProto( allow_soft_placement=False, log_device_placement=True) with session.Session(config=config, graph=g) as sess: _ = sess.run(result, options=run_options, run_metadata=run_metadata) chol_node_stats = [] for dev_stats in run_metadata.step_stats.dev_stats: for node_stats in dev_stats.node_stats: if (node_stats.node_name.endswith("Cholesky") and node_stats.all_start_nanos > 0): chol_node_stats.append(node_stats) self.assertLen(chol_node_stats, nbranches) chol_node_stats = sorted(chol_node_stats, key=lambda stats: stats.node_name) op_start_nanos = [ stats.all_start_nanos for stats in chol_node_stats ] op_end_nanos = [ stats.all_start_nanos + stats.op_end_rel_nanos for stats in chol_node_stats ] def overlap(range1, range2): s1, e1 = range1 s2, e2 = range2 if s1 < s2: return 0 if s2 > e1 else e1 - s2 return 0 if s1 > e2 else e2 - s1 timespans = list(zip(op_start_nanos, op_end_nanos)) overlaps_chol0 = [overlap(timespans[0], r2) for r2 in timespans[1:]] # There are nbranches-1 overlaps, sometimes all nonzero, but we # conservatively check for at least one here, to avoid test flakiness. self.assertGreater(np.count_nonzero(overlaps_chol0), 0) def testCase_validateIndicesContiguous(self): def make_func(bi): return lambda: array_ops.constant(bi * 10., name="br{}_out".format(bi)) branches = {i: make_func(i) for i in range(0, 6, 2)} with self.assertRaisesRegexp(ValueError, "must form contiguous"): control_flow_ops.switch_case(array_ops.constant(0), branches) def testCase_validateIndicesDup(self): def make_func(bi): return lambda: array_ops.constant(bi * 10., name="br{}_out".format(bi)) branches = [(i, make_func(i)) for i in range(0, 6, 2)] branches.append((0, make_func(7))) with self.assertRaisesRegexp(ValueError, "must form contiguous"): control_flow_ops.switch_case(array_ops.constant(0), branches) def testCase_validateBranchIndex(self): def make_func(bi): return lambda: array_ops.constant(bi * 10., name="br{}_out".format(bi)) branches = {i: make_func(i) for i in range(5)} with self.assertRaisesRegexp(TypeError, "branch_index.*Tensor"): control_flow_ops.switch_case(1, branches) def testCase_validateNonIntKeys(self): def make_func(bi): return lambda: array_ops.constant(bi * 10., name="br{}_out".format(bi)) branches = {array_ops.constant(i): make_func(i) for i in range(5)} with self.assertRaisesRegexp(TypeError, "must be a Python `int`"): control_flow_ops.switch_case(array_ops.constant(1), branches) class CaseTest(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testCase_withDefault(self): x = array_ops.placeholder(dtype=dtypes.int32, shape=[]) conditions = [(math_ops.equal(x, 1), lambda: constant_op.constant(2)), (math_ops.equal(x, 2), lambda: constant_op.constant(4))] default = lambda: constant_op.constant(6) output = control_flow_ops.case(conditions, default, exclusive=True) with self.cached_session() as sess: self.assertEqual(sess.run(output, feed_dict={x: 1}), 2) self.assertEqual(sess.run(output, feed_dict={x: 2}), 4) self.assertEqual(sess.run(output, feed_dict={x: 3}), 6) @test_util.run_deprecated_v1 def testCase_multiple_matches_exclusive(self): x = array_ops.placeholder(dtype=dtypes.int32, shape=[]) conditions = [(math_ops.equal(x, 1), lambda: constant_op.constant(2)), (math_ops.equal(x, 2), lambda: constant_op.constant(4)), (math_ops.equal(x, 2), lambda: constant_op.constant(6))] default = lambda: constant_op.constant(8) output = control_flow_ops.case(conditions, default, exclusive=True) with self.cached_session() as sess: self.assertEqual(sess.run(output, feed_dict={x: 1}), 2) self.assertEqual(sess.run(output, feed_dict={x: 3}), 8) with self.assertRaisesRegexp(errors.InvalidArgumentError, "Input error:"): sess.run(output, feed_dict={x: 2}) @test_util.run_deprecated_v1 def testCase_multiple_matches_non_exclusive(self): x = array_ops.placeholder(dtype=dtypes.int32, shape=[]) conditions = [(math_ops.equal(x, 1), lambda: constant_op.constant(2)), (math_ops.equal(x, 2), lambda: constant_op.constant(4)), (math_ops.equal(x, 2), lambda: constant_op.constant(6))] default = lambda: constant_op.constant(8) output = control_flow_ops.case(conditions, default, exclusive=False) with self.cached_session() as sess: self.assertEqual(sess.run(output, feed_dict={x: 1}), 2) self.assertEqual(sess.run(output, feed_dict={x: 2}), 4) self.assertEqual(sess.run(output, feed_dict={x: 3}), 8) @test_util.run_deprecated_v1 def testCase_withoutDefault(self): x = array_ops.placeholder(dtype=dtypes.int32, shape=[]) conditions = [(math_ops.equal(x, 1), lambda: constant_op.constant(2)), (math_ops.equal(x, 2), lambda: constant_op.constant(4)), (math_ops.equal(x, 3), lambda: constant_op.constant(6))] output = control_flow_ops.case(conditions, exclusive=True) with self.cached_session() as sess: self.assertEqual(sess.run(output, feed_dict={x: 1}), 2) self.assertEqual(sess.run(output, feed_dict={x: 2}), 4) self.assertEqual(sess.run(output, feed_dict={x: 3}), 6) with self.assertRaisesRegexp(errors.InvalidArgumentError, "Input error:"): sess.run(output, feed_dict={x: 4}) @test_util.run_deprecated_v1 def testCase_withoutDefault_oneCondition(self): x = array_ops.placeholder(dtype=dtypes.int32, shape=[]) conditions = [(math_ops.equal(x, 1), lambda: constant_op.constant(2))] output = control_flow_ops.case(conditions, exclusive=True) with self.cached_session() as sess: self.assertEqual(sess.run(output, feed_dict={x: 1}), 2) with self.assertRaisesRegexp(errors.InvalidArgumentError, "Input error:"): sess.run(output, feed_dict={x: 4}) @test_util.run_in_graph_and_eager_modes def testCase_dict(self): x = constant_op.constant(2) conditions = { math_ops.equal(x, 1): lambda: constant_op.constant(2), math_ops.equal(x, 2): lambda: constant_op.constant(4) } output = control_flow_ops.case(conditions, exclusive=True) self.assertEqual(4, self.evaluate(output)) class WhileLoopTestCase(test_util.TensorFlowTestCase): @test_util.run_in_graph_and_eager_modes def testWhileLoopWithSingleVariable(self): i = constant_op.constant(0) c = lambda i: math_ops.less(i, 10) b = lambda i: math_ops.add(i, 1) r = control_flow_ops.while_loop(c, b, [i]) self.assertEqual(self.evaluate(r), 10) @test_util.run_in_graph_and_eager_modes def testEagerWhileLoopWithSingleVariable_bodyReturnsTuple(self): i = constant_op.constant(0) c = lambda i: math_ops.less(i, 10) b = lambda i: (math_ops.add(i, 1),) r = control_flow_ops.while_loop(c, b, [i]) # Expect a tuple since that is what the body returns. self.assertEqual(self.evaluate(r), (10,)) @test_util.run_deprecated_v1 def testWhileLoopSameReturnShape_False(self): i = constant_op.constant(0) c = lambda i, _: math_ops.less(i, 10) # Body returns a [tensor, []] b = lambda i, _: [math_ops.add(i, 1), []] # Should only return the tensor. r = control_flow_ops.while_loop(c, b, [i, []]) self.assertEqual(self.evaluate(r), 10) def testWhileLoopSameReturnShape_True(self): i = constant_op.constant(0) c = lambda i, _: math_ops.less(i, 10) # Body returns a [tensor, []] b = lambda i, _: [math_ops.add(i, 1), []] # Should only return the original structure. r = control_flow_ops.while_loop(c, b, [i, []], return_same_structure=True) self.assertEqual(self.evaluate(r), [10, []]) class AssertTest(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testAssert(self): i = constant_op.constant(0) c = control_flow_ops.Assert(i < 10, [i, [10], [i + 1]]) self.evaluate(c) i = constant_op.constant(10) c = control_flow_ops.Assert(i < 10, [i, [10], [i + 1]]) with self.assertRaises(errors.InvalidArgumentError): self.evaluate(c) @test_util.run_in_graph_and_eager_modes def testAssertInFunction(self): @def_function.function def whiny(value): control_flow_ops.Assert(value, ["Raised false"]) return constant_op.constant(5) with self.assertRaises(errors.InvalidArgumentError): self.evaluate(whiny(False)) self.assertAllEqual(whiny(True), 5) if __name__ == "__main__": googletest.main()
tensorflow-master
tensorflow/python/ops/control_flow_ops_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=g-short-docstring-punctuation """Asserts and Boolean Checks.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.util import compat from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export NUMERIC_TYPES = frozenset( [dtypes.float32, dtypes.float64, dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, dtypes.uint8, dtypes.qint8, dtypes.qint32, dtypes.quint8, dtypes.complex64]) __all__ = [ 'assert_negative', 'assert_positive', 'assert_proper_iterable', 'assert_non_negative', 'assert_non_positive', 'assert_equal', 'assert_none_equal', 'assert_near', 'assert_integer', 'assert_less', 'assert_less_equal', 'assert_greater', 'assert_greater_equal', 'assert_rank', 'assert_rank_at_least', 'assert_rank_in', 'assert_same_float_dtype', 'assert_scalar', 'assert_type', 'assert_shapes', 'is_non_decreasing', 'is_numeric_tensor', 'is_strictly_increasing', ] def _maybe_constant_value_string(t): if not isinstance(t, ops.Tensor): return str(t) const_t = tensor_util.constant_value(t) if const_t is not None: return str(const_t) return t def _assert_static(condition, data): """Raises a InvalidArgumentError with as much information as possible.""" if not condition: data_static = [_maybe_constant_value_string(x) for x in data] raise errors.InvalidArgumentError(node_def=None, op=None, message='\n'.join(data_static)) def _shape_and_dtype_str(tensor): """Returns a string containing tensor's shape and dtype.""" return 'shape=%s dtype=%s' % (tensor.shape, tensor.dtype.name) @tf_export( 'debugging.assert_proper_iterable', v1=['debugging.assert_proper_iterable', 'assert_proper_iterable']) @deprecation.deprecated_endpoints('assert_proper_iterable') def assert_proper_iterable(values): """Static assert that values is a "proper" iterable. `Ops` that expect iterables of `Tensor` can call this to validate input. Useful since `Tensor`, `ndarray`, byte/text type are all iterables themselves. Args: values: Object to be checked. Raises: TypeError: If `values` is not iterable or is one of `Tensor`, `SparseTensor`, `np.array`, `tf.compat.bytes_or_text_types`. """ unintentional_iterables = ( (ops.Tensor, sparse_tensor.SparseTensor, np.ndarray) + compat.bytes_or_text_types ) if isinstance(values, unintentional_iterables): raise TypeError( 'Expected argument "values" to be a "proper" iterable. Found: %s' % type(values)) if not hasattr(values, '__iter__'): raise TypeError( 'Expected argument "values" to be iterable. Found: %s' % type(values)) @tf_export('debugging.assert_negative', v1=[]) def assert_negative_v2(x, message=None, summarize=None, name=None): """Assert the condition `x < 0` holds element-wise. This Op checks that `x[i] < 0` holds for every element of `x`. If `x` is empty, this is trivially satisfied. If `x` is not negative everywhere, `message`, as well as the first `summarize` entries of `x` are printed, and `InvalidArgumentError` is raised. Args: x: Numeric `Tensor`. message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to "assert_negative". Returns: Op raising `InvalidArgumentError` unless `x` is all negative. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and `x[i] < 0` is False. The check can be performed immediately during eager execution or if `x` is statically known. """ return assert_negative(x=x, message=message, summarize=summarize, name=name) @tf_export(v1=['debugging.assert_negative', 'assert_negative']) @deprecation.deprecated_endpoints('assert_negative') def assert_negative(x, data=None, summarize=None, message=None, name=None): """Assert the condition `x < 0` holds element-wise. Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.compat.v1.assert_negative(x)]): output = tf.reduce_sum(x) ``` Negative means, for every element `x[i]` of `x`, we have `x[i] < 0`. If `x` is empty this is trivially satisfied. Args: x: Numeric `Tensor`. data: The tensors to print out if the condition is False. Defaults to error message and first few entries of `x`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_negative". Returns: Op raising `InvalidArgumentError` unless `x` is all negative. """ message = message or '' with ops.name_scope(name, 'assert_negative', [x, data]): x = ops.convert_to_tensor(x, name='x') if data is None: if context.executing_eagerly(): name = _shape_and_dtype_str(x) else: name = x.name data = [ message, 'Condition x < 0 did not hold element-wise:', 'x (%s) = ' % name, x] zero = ops.convert_to_tensor(0, dtype=x.dtype) return assert_less(x, zero, data=data, summarize=summarize) @tf_export('debugging.assert_positive', v1=[]) def assert_positive_v2(x, message=None, summarize=None, name=None): """Assert the condition `x > 0` holds element-wise. This Op checks that `x[i] > 0` holds for every element of `x`. If `x` is empty, this is trivially satisfied. If `x` is not positive everywhere, `message`, as well as the first `summarize` entries of `x` are printed, and `InvalidArgumentError` is raised. Args: x: Numeric `Tensor`. message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to "assert_positive". Returns: Op raising `InvalidArgumentError` unless `x` is all positive. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and `x[i] > 0` is False. The check can be performed immediately during eager execution or if `x` is statically known. """ return assert_positive(x=x, summarize=summarize, message=message, name=name) @tf_export(v1=['debugging.assert_positive', 'assert_positive']) @deprecation.deprecated_endpoints('assert_positive') def assert_positive(x, data=None, summarize=None, message=None, name=None): """Assert the condition `x > 0` holds element-wise. Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.compat.v1.assert_positive(x)]): output = tf.reduce_sum(x) ``` Positive means, for every element `x[i]` of `x`, we have `x[i] > 0`. If `x` is empty this is trivially satisfied. Args: x: Numeric `Tensor`. data: The tensors to print out if the condition is False. Defaults to error message and first few entries of `x`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_positive". Returns: Op raising `InvalidArgumentError` unless `x` is all positive. """ message = message or '' with ops.name_scope(name, 'assert_positive', [x, data]): x = ops.convert_to_tensor(x, name='x') if data is None: if context.executing_eagerly(): name = _shape_and_dtype_str(x) else: name = x.name data = [ message, 'Condition x > 0 did not hold element-wise:', 'x (%s) = ' % name, x] zero = ops.convert_to_tensor(0, dtype=x.dtype) return assert_less(zero, x, data=data, summarize=summarize) @tf_export('debugging.assert_non_negative', v1=[]) def assert_non_negative_v2(x, message=None, summarize=None, name=None): """Assert the condition `x >= 0` holds element-wise. This Op checks that `x[i] >= 0` holds for every element of `x`. If `x` is empty, this is trivially satisfied. If `x` is not >= 0 everywhere, `message`, as well as the first `summarize` entries of `x` are printed, and `InvalidArgumentError` is raised. Args: x: Numeric `Tensor`. message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to "assert_non_negative". Returns: Op raising `InvalidArgumentError` unless `x` is all non-negative. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and `x[i] >= 0` is False. The check can be performed immediately during eager execution or if `x` is statically known. """ return assert_non_negative(x=x, summarize=summarize, message=message, name=name) @tf_export(v1=['debugging.assert_non_negative', 'assert_non_negative']) @deprecation.deprecated_endpoints('assert_non_negative') def assert_non_negative(x, data=None, summarize=None, message=None, name=None): """Assert the condition `x >= 0` holds element-wise. Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.compat.v1.assert_non_negative(x)]): output = tf.reduce_sum(x) ``` Non-negative means, for every element `x[i]` of `x`, we have `x[i] >= 0`. If `x` is empty this is trivially satisfied. Args: x: Numeric `Tensor`. data: The tensors to print out if the condition is False. Defaults to error message and first few entries of `x`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_non_negative". Returns: Op raising `InvalidArgumentError` unless `x` is all non-negative. """ message = message or '' with ops.name_scope(name, 'assert_non_negative', [x, data]): x = ops.convert_to_tensor(x, name='x') if data is None: if context.executing_eagerly(): name = _shape_and_dtype_str(x) else: name = x.name data = [ message, 'Condition x >= 0 did not hold element-wise:', 'x (%s) = ' % name, x] zero = ops.convert_to_tensor(0, dtype=x.dtype) return assert_less_equal(zero, x, data=data, summarize=summarize) @tf_export('debugging.assert_non_positive', v1=[]) def assert_non_positive_v2(x, message=None, summarize=None, name=None): """Assert the condition `x <= 0` holds element-wise. This Op checks that `x[i] <= 0` holds for every element of `x`. If `x` is empty, this is trivially satisfied. If `x` is not <= 0 everywhere, `message`, as well as the first `summarize` entries of `x` are printed, and `InvalidArgumentError` is raised. Args: x: Numeric `Tensor`. message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to "assert_non_positive". Returns: Op raising `InvalidArgumentError` unless `x` is all non-positive. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and `x[i] <= 0` is False. The check can be performed immediately during eager execution or if `x` is statically known. """ return assert_non_positive(x=x, summarize=summarize, message=message, name=name) @tf_export(v1=['debugging.assert_non_positive', 'assert_non_positive']) @deprecation.deprecated_endpoints('assert_non_positive') def assert_non_positive(x, data=None, summarize=None, message=None, name=None): """Assert the condition `x <= 0` holds element-wise. Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.compat.v1.assert_non_positive(x)]): output = tf.reduce_sum(x) ``` Non-positive means, for every element `x[i]` of `x`, we have `x[i] <= 0`. If `x` is empty this is trivially satisfied. Args: x: Numeric `Tensor`. data: The tensors to print out if the condition is False. Defaults to error message and first few entries of `x`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_non_positive". Returns: Op raising `InvalidArgumentError` unless `x` is all non-positive. """ message = message or '' with ops.name_scope(name, 'assert_non_positive', [x, data]): x = ops.convert_to_tensor(x, name='x') if data is None: if context.executing_eagerly(): name = _shape_and_dtype_str(x) else: name = x.name data = [ message, 'Condition x <= 0 did not hold element-wise:' 'x (%s) = ' % name, x] zero = ops.convert_to_tensor(0, dtype=x.dtype) return assert_less_equal(x, zero, data=data, summarize=summarize) @tf_export('debugging.assert_equal', 'assert_equal', v1=[]) def assert_equal_v2(x, y, message=None, summarize=None, name=None): """Assert the condition `x == y` holds element-wise. This Op checks that `x[i] == y[i]` holds for every pair of (possibly broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is trivially satisfied. If `x` and `y` are not equal, `message`, as well as the first `summarize` entries of `x` and `y` are printed, and `InvalidArgumentError` is raised. Args: x: Numeric `Tensor`. y: Numeric `Tensor`, same dtype as and broadcastable to `x`. message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to "assert_equal". Returns: Op that raises `InvalidArgumentError` if `x == y` is False. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and `x == y` is False. The check can be performed immediately during eager execution or if `x` and `y` are statically known. """ return assert_equal(x=x, y=y, summarize=summarize, message=message, name=name) @tf_export(v1=['debugging.assert_equal', 'assert_equal']) def assert_equal(x, y, data=None, summarize=None, message=None, name=None): """Assert the condition `x == y` holds element-wise. Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.compat.v1.assert_equal(x, y)]): output = tf.reduce_sum(x) ``` This condition holds if for every pair of (possibly broadcast) elements `x[i]`, `y[i]`, we have `x[i] == y[i]`. If both `x` and `y` are empty, this is trivially satisfied. Args: x: Numeric `Tensor`. y: Numeric `Tensor`, same dtype as and broadcastable to `x`. data: The tensors to print out if the condition is False. Defaults to error message and first few entries of `x`, `y`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_equal". Returns: Op that raises `InvalidArgumentError` if `x == y` is False. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and `x == y` is False. The check can be performed immediately during eager execution or if `x` and `y` are statically known. """ message = message or '' with ops.name_scope(name, 'assert_equal', [x, y, data]): x = ops.convert_to_tensor(x, name='x') y = ops.convert_to_tensor(y, name='y') if context.executing_eagerly(): eq = math_ops.equal(x, y) condition = math_ops.reduce_all(eq) if not condition: # Prepare a message with first elements of x and y. summary_msg = '' # Default to printing 3 elements like control_flow_ops.Assert (used # by graph mode) does. summarize = 3 if summarize is None else summarize if summarize: # reshape((-1,)) is the fastest way to get a flat array view. x_np = x.numpy().reshape((-1,)) y_np = y.numpy().reshape((-1,)) x_sum = min(x_np.size, summarize) y_sum = min(y_np.size, summarize) summary_msg = ('First %d elements of x:\n%s\n' 'First %d elements of y:\n%s\n' % (x_sum, x_np[:x_sum], y_sum, y_np[:y_sum])) index_and_values_str = '' if x.shape == y.shape and x.shape.as_list(): # If the shapes of x and y are the same (and not scalars), # Get the values that actually differed and their indices. # If shapes are different this information is more confusing # than useful. mask = math_ops.logical_not(eq) indices = array_ops.where(mask) indices_np = indices.numpy() x_vals = array_ops.boolean_mask(x, mask) y_vals = array_ops.boolean_mask(y, mask) summarize = min(summarize, indices_np.shape[0]) index_and_values_str = ( 'Indices of first %s different values:\n%s\n' 'Corresponding x values:\n%s\n' 'Corresponding y values:\n%s\n' % (summarize, indices_np[:summarize], x_vals.numpy().reshape((-1,))[:summarize], y_vals.numpy().reshape((-1,))[:summarize])) raise errors.InvalidArgumentError( node_def=None, op=None, message=('%s\nCondition x == y did not hold.\n%s%s' % (message or '', index_and_values_str, summary_msg))) return if data is None: data = [ message, 'Condition x == y did not hold element-wise:', 'x (%s) = ' % x.name, x, 'y (%s) = ' % y.name, y ] condition = math_ops.reduce_all(math_ops.equal(x, y)) x_static = tensor_util.constant_value(x) y_static = tensor_util.constant_value(y) if x_static is not None and y_static is not None: condition_static = (x_static == y_static).all() _assert_static(condition_static, data) return control_flow_ops.Assert(condition, data, summarize=summarize) @tf_export('debugging.assert_none_equal', v1=[]) def assert_none_equal_v2(x, y, summarize=None, message=None, name=None): """Assert the condition `x != y` holds for all elements. This Op checks that `x[i] != y[i]` holds for every pair of (possibly broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is trivially satisfied. If any elements of `x` and `y` are equal, `message`, as well as the first `summarize` entries of `x` and `y` are printed, and `InvalidArgumentError` is raised. Args: x: Numeric `Tensor`. y: Numeric `Tensor`, same dtype as and broadcastable to `x`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_none_equal". Returns: Op that raises `InvalidArgumentError` if `x != y` is ever False. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and `x != y` is False for any pair of elements in `x` and `y`. The check can be performed immediately during eager execution or if `x` and `y` are statically known. """ return assert_none_equal(x=x, y=y, summarize=summarize, message=message, name=name) @tf_export(v1=['debugging.assert_none_equal', 'assert_none_equal']) @deprecation.deprecated_endpoints('assert_none_equal') def assert_none_equal( x, y, data=None, summarize=None, message=None, name=None): """Assert the condition `x != y` holds for all elements. Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.compat.v1.assert_none_equal(x, y)]): output = tf.reduce_sum(x) ``` This condition holds if for every pair of (possibly broadcast) elements `x[i]`, `y[i]`, we have `x[i] != y[i]`. If both `x` and `y` are empty, this is trivially satisfied. Args: x: Numeric `Tensor`. y: Numeric `Tensor`, same dtype as and broadcastable to `x`. data: The tensors to print out if the condition is False. Defaults to error message and first few entries of `x`, `y`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_none_equal". Returns: Op that raises `InvalidArgumentError` if `x != y` is ever False. """ message = message or '' with ops.name_scope(name, 'assert_none_equal', [x, y, data]): x = ops.convert_to_tensor(x, name='x') y = ops.convert_to_tensor(y, name='y') if context.executing_eagerly(): x_name = _shape_and_dtype_str(x) y_name = _shape_and_dtype_str(y) else: x_name = x.name y_name = y.name if data is None: data = [ message, 'Condition x != y did not hold for every single element:', 'x (%s) = ' % x_name, x, 'y (%s) = ' % y_name, y ] condition = math_ops.reduce_all(math_ops.not_equal(x, y)) return control_flow_ops.Assert(condition, data, summarize=summarize) @tf_export('debugging.assert_near', v1=[]) def assert_near_v2(x, y, rtol=None, atol=None, message=None, summarize=None, name=None): """Assert the condition `x` and `y` are close element-wise. This Op checks that `x[i] - y[i] < atol + rtol * tf.abs(y[i])` holds for every pair of (possibly broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is trivially satisfied. If any elements of `x` and `y` are not close, `message`, as well as the first `summarize` entries of `x` and `y` are printed, and `InvalidArgumentError` is raised. The default `atol` and `rtol` is `10 * eps`, where `eps` is the smallest representable positive number such that `1 + eps != 1`. This is about `1.2e-6` in `32bit`, `2.22e-15` in `64bit`, and `0.00977` in `16bit`. See `numpy.finfo`. Args: x: Float or complex `Tensor`. y: Float or complex `Tensor`, same dtype as and broadcastable to `x`. rtol: `Tensor`. Same `dtype` as, and broadcastable to, `x`. The relative tolerance. Default is `10 * eps`. atol: `Tensor`. Same `dtype` as, and broadcastable to, `x`. The absolute tolerance. Default is `10 * eps`. message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to "assert_near". Returns: Op that raises `InvalidArgumentError` if `x` and `y` are not close enough. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and `x != y` is False for any pair of elements in `x` and `y`. The check can be performed immediately during eager execution or if `x` and `y` are statically known. @compatibility(numpy) Similar to `numpy.assert_allclose`, except tolerance depends on data type. This is due to the fact that `TensorFlow` is often used with `32bit`, `64bit`, and even `16bit` data. @end_compatibility """ return assert_near(x=x, y=y, rtol=rtol, atol=atol, summarize=summarize, message=message, name=name) @tf_export(v1=['debugging.assert_near', 'assert_near']) @deprecation.deprecated_endpoints('assert_near') def assert_near( x, y, rtol=None, atol=None, data=None, summarize=None, message=None, name=None): """Assert the condition `x` and `y` are close element-wise. Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.compat.v1.assert_near(x, y)]): output = tf.reduce_sum(x) ``` This condition holds if for every pair of (possibly broadcast) elements `x[i]`, `y[i]`, we have ```tf.abs(x[i] - y[i]) <= atol + rtol * tf.abs(y[i])```. If both `x` and `y` are empty, this is trivially satisfied. The default `atol` and `rtol` is `10 * eps`, where `eps` is the smallest representable positive number such that `1 + eps != 1`. This is about `1.2e-6` in `32bit`, `2.22e-15` in `64bit`, and `0.00977` in `16bit`. See `numpy.finfo`. Args: x: Float or complex `Tensor`. y: Float or complex `Tensor`, same `dtype` as, and broadcastable to, `x`. rtol: `Tensor`. Same `dtype` as, and broadcastable to, `x`. The relative tolerance. Default is `10 * eps`. atol: `Tensor`. Same `dtype` as, and broadcastable to, `x`. The absolute tolerance. Default is `10 * eps`. data: The tensors to print out if the condition is False. Defaults to error message and first few entries of `x`, `y`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_near". Returns: Op that raises `InvalidArgumentError` if `x` and `y` are not close enough. @compatibility(numpy) Similar to `numpy.assert_allclose`, except tolerance depends on data type. This is due to the fact that `TensorFlow` is often used with `32bit`, `64bit`, and even `16bit` data. @end_compatibility """ message = message or '' with ops.name_scope(name, 'assert_near', [x, y, rtol, atol, data]): x = ops.convert_to_tensor(x, name='x') y = ops.convert_to_tensor(y, name='y', dtype=x.dtype) eps = np.finfo(x.dtype.as_numpy_dtype).eps rtol = 10 * eps if rtol is None else rtol atol = 10 * eps if atol is None else atol rtol = ops.convert_to_tensor(rtol, name='rtol', dtype=x.dtype) atol = ops.convert_to_tensor(atol, name='atol', dtype=x.dtype) if context.executing_eagerly(): x_name = _shape_and_dtype_str(x) y_name = _shape_and_dtype_str(y) else: x_name = x.name y_name = y.name if data is None: data = [ message, 'x and y not equal to tolerance rtol = %s, atol = %s' % (rtol, atol), 'x (%s) = ' % x_name, x, 'y (%s) = ' % y_name, y ] tol = atol + rtol * math_ops.abs(y) diff = math_ops.abs(x - y) condition = math_ops.reduce_all(math_ops.less(diff, tol)) return control_flow_ops.Assert(condition, data, summarize=summarize) @tf_export('debugging.assert_less', 'assert_less', v1=[]) def assert_less_v2(x, y, message=None, summarize=None, name=None): """Assert the condition `x < y` holds element-wise. This Op checks that `x[i] < y[i]` holds for every pair of (possibly broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is trivially satisfied. If `x` is not less than `y` element-wise, `message`, as well as the first `summarize` entries of `x` and `y` are printed, and `InvalidArgumentError` is raised. Args: x: Numeric `Tensor`. y: Numeric `Tensor`, same dtype as and broadcastable to `x`. message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to "assert_less". Returns: Op that raises `InvalidArgumentError` if `x < y` is False. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and `x < y` is False. The check can be performed immediately during eager execution or if `x` and `y` are statically known. """ return assert_less(x=x, y=y, summarize=summarize, message=message, name=name) @tf_export(v1=['debugging.assert_less', 'assert_less']) def assert_less(x, y, data=None, summarize=None, message=None, name=None): """Assert the condition `x < y` holds element-wise. Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.compat.v1.assert_less(x, y)]): output = tf.reduce_sum(x) ``` This condition holds if for every pair of (possibly broadcast) elements `x[i]`, `y[i]`, we have `x[i] < y[i]`. If both `x` and `y` are empty, this is trivially satisfied. Args: x: Numeric `Tensor`. y: Numeric `Tensor`, same dtype as and broadcastable to `x`. data: The tensors to print out if the condition is False. Defaults to error message and first few entries of `x`, `y`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_less". Returns: Op that raises `InvalidArgumentError` if `x < y` is False. """ message = message or '' with ops.name_scope(name, 'assert_less', [x, y, data]): x = ops.convert_to_tensor(x, name='x') y = ops.convert_to_tensor(y, name='y') if context.executing_eagerly(): x_name = _shape_and_dtype_str(x) y_name = _shape_and_dtype_str(y) else: x_name = x.name y_name = y.name if data is None: data = [ message, 'Condition x < y did not hold element-wise:', 'x (%s) = ' % x_name, x, 'y (%s) = ' % y_name, y ] condition = math_ops.reduce_all(math_ops.less(x, y)) return control_flow_ops.Assert(condition, data, summarize=summarize) @tf_export('debugging.assert_less_equal', v1=[]) def assert_less_equal_v2(x, y, message=None, summarize=None, name=None): """Assert the condition `x <= y` holds element-wise. This Op checks that `x[i] <= y[i]` holds for every pair of (possibly broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is trivially satisfied. If `x` is not less or equal than `y` element-wise, `message`, as well as the first `summarize` entries of `x` and `y` are printed, and `InvalidArgumentError` is raised. Args: x: Numeric `Tensor`. y: Numeric `Tensor`, same dtype as and broadcastable to `x`. message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to "assert_less_equal". Returns: Op that raises `InvalidArgumentError` if `x <= y` is False. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and `x <= y` is False. The check can be performed immediately during eager execution or if `x` and `y` are statically known. """ return assert_less_equal(x=x, y=y, summarize=summarize, message=message, name=name) @tf_export(v1=['debugging.assert_less_equal', 'assert_less_equal']) @deprecation.deprecated_endpoints('assert_less_equal') def assert_less_equal(x, y, data=None, summarize=None, message=None, name=None): """Assert the condition `x <= y` holds element-wise. Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.compat.v1.assert_less_equal(x, y)]): output = tf.reduce_sum(x) ``` This condition holds if for every pair of (possibly broadcast) elements `x[i]`, `y[i]`, we have `x[i] <= y[i]`. If both `x` and `y` are empty, this is trivially satisfied. Args: x: Numeric `Tensor`. y: Numeric `Tensor`, same dtype as and broadcastable to `x`. data: The tensors to print out if the condition is False. Defaults to error message and first few entries of `x`, `y`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_less_equal" Returns: Op that raises `InvalidArgumentError` if `x <= y` is False. """ message = message or '' with ops.name_scope(name, 'assert_less_equal', [x, y, data]): x = ops.convert_to_tensor(x, name='x') y = ops.convert_to_tensor(y, name='y') if context.executing_eagerly(): x_name = _shape_and_dtype_str(x) y_name = _shape_and_dtype_str(y) else: x_name = x.name y_name = y.name if data is None: data = [ message, 'Condition x <= y did not hold element-wise:' 'x (%s) = ' % x_name, x, 'y (%s) = ' % y_name, y ] condition = math_ops.reduce_all(math_ops.less_equal(x, y)) return control_flow_ops.Assert(condition, data, summarize=summarize) @tf_export('debugging.assert_greater', 'assert_greater', v1=[]) def assert_greater_v2(x, y, message=None, summarize=None, name=None): """Assert the condition `x > y` holds element-wise. This Op checks that `x[i] > y[i]` holds for every pair of (possibly broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is trivially satisfied. If `x` is not greater than `y` element-wise, `message`, as well as the first `summarize` entries of `x` and `y` are printed, and `InvalidArgumentError` is raised. Args: x: Numeric `Tensor`. y: Numeric `Tensor`, same dtype as and broadcastable to `x`. message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to "assert_greater". Returns: Op that raises `InvalidArgumentError` if `x > y` is False. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and `x > y` is False. The check can be performed immediately during eager execution or if `x` and `y` are statically known. """ return assert_greater(x=x, y=y, summarize=summarize, message=message, name=name) @tf_export(v1=['debugging.assert_greater', 'assert_greater']) def assert_greater(x, y, data=None, summarize=None, message=None, name=None): """Assert the condition `x > y` holds element-wise. Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.compat.v1.assert_greater(x, y)]): output = tf.reduce_sum(x) ``` This condition holds if for every pair of (possibly broadcast) elements `x[i]`, `y[i]`, we have `x[i] > y[i]`. If both `x` and `y` are empty, this is trivially satisfied. Args: x: Numeric `Tensor`. y: Numeric `Tensor`, same dtype as and broadcastable to `x`. data: The tensors to print out if the condition is False. Defaults to error message and first few entries of `x`, `y`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_greater". Returns: Op that raises `InvalidArgumentError` if `x > y` is False. """ message = message or '' with ops.name_scope(name, 'assert_greater', [x, y, data]): x = ops.convert_to_tensor(x, name='x') y = ops.convert_to_tensor(y, name='y') if context.executing_eagerly(): x_name = _shape_and_dtype_str(x) y_name = _shape_and_dtype_str(y) else: x_name = x.name y_name = y.name if data is None: data = [ message, 'Condition x > y did not hold element-wise:' 'x (%s) = ' % x_name, x, 'y (%s) = ' % y_name, y ] condition = math_ops.reduce_all(math_ops.greater(x, y)) return control_flow_ops.Assert(condition, data, summarize=summarize) @tf_export('debugging.assert_greater_equal', v1=[]) def assert_greater_equal_v2(x, y, message=None, summarize=None, name=None): """Assert the condition `x >= y` holds element-wise. This Op checks that `x[i] >= y[i]` holds for every pair of (possibly broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is trivially satisfied. If `x` is not greater or equal to `y` element-wise, `message`, as well as the first `summarize` entries of `x` and `y` are printed, and `InvalidArgumentError` is raised. Args: x: Numeric `Tensor`. y: Numeric `Tensor`, same dtype as and broadcastable to `x`. message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to "assert_greater_equal". Returns: Op that raises `InvalidArgumentError` if `x >= y` is False. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and `x >= y` is False. The check can be performed immediately during eager execution or if `x` and `y` are statically known. """ return assert_greater_equal(x=x, y=y, summarize=summarize, message=message, name=name) @tf_export(v1=['debugging.assert_greater_equal', 'assert_greater_equal']) @deprecation.deprecated_endpoints('assert_greater_equal') def assert_greater_equal(x, y, data=None, summarize=None, message=None, name=None): """Assert the condition `x >= y` holds element-wise. Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.compat.v1.assert_greater_equal(x, y)]): output = tf.reduce_sum(x) ``` This condition holds if for every pair of (possibly broadcast) elements `x[i]`, `y[i]`, we have `x[i] >= y[i]`. If both `x` and `y` are empty, this is trivially satisfied. Args: x: Numeric `Tensor`. y: Numeric `Tensor`, same dtype as and broadcastable to `x`. data: The tensors to print out if the condition is False. Defaults to error message and first few entries of `x`, `y`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_greater_equal" Returns: Op that raises `InvalidArgumentError` if `x >= y` is False. """ message = message or '' with ops.name_scope(name, 'assert_greater_equal', [x, y, data]): x = ops.convert_to_tensor(x, name='x') y = ops.convert_to_tensor(y, name='y') if context.executing_eagerly(): x_name = _shape_and_dtype_str(x) y_name = _shape_and_dtype_str(y) else: x_name = x.name y_name = y.name if data is None: data = [ message, 'Condition x >= y did not hold element-wise:' 'x (%s) = ' % x_name, x, 'y (%s) = ' % y_name, y ] condition = math_ops.reduce_all(math_ops.greater_equal(x, y)) return control_flow_ops.Assert(condition, data, summarize=summarize) def _assert_rank_condition( x, rank, static_condition, dynamic_condition, data, summarize): """Assert `x` has a rank that satisfies a given condition. Args: x: Numeric `Tensor`. rank: Scalar `Tensor`. static_condition: A python function that takes `[actual_rank, given_rank]` and returns `True` if the condition is satisfied, `False` otherwise. dynamic_condition: An `op` that takes [actual_rank, given_rank] and return `True` if the condition is satisfied, `False` otherwise. data: The tensors to print out if the condition is false. Defaults to error message and first few entries of `x`. summarize: Print this many entries of each tensor. Returns: Op raising `InvalidArgumentError` if `x` fails dynamic_condition. Raises: ValueError: If static checks determine `x` fails static_condition. """ assert_type(rank, dtypes.int32) # Attempt to statically defined rank. rank_static = tensor_util.constant_value(rank) if rank_static is not None: if rank_static.ndim != 0: raise ValueError('Rank must be a scalar.') x_rank_static = x.get_shape().ndims if x_rank_static is not None: if not static_condition(x_rank_static, rank_static): raise ValueError( 'Static rank condition failed', x_rank_static, rank_static) return control_flow_ops.no_op(name='static_checks_determined_all_ok') condition = dynamic_condition(array_ops.rank(x), rank) # Add the condition that `rank` must have rank zero. Prevents the bug where # someone does assert_rank(x, [n]), rather than assert_rank(x, n). if rank_static is None: this_data = ['Rank must be a scalar. Received rank: ', rank] rank_check = assert_rank(rank, 0, data=this_data) condition = control_flow_ops.with_dependencies([rank_check], condition) return control_flow_ops.Assert(condition, data, summarize=summarize) @tf_export('debugging.assert_rank', 'assert_rank', v1=[]) def assert_rank_v2(x, rank, message=None, name=None): """Assert that `x` has rank equal to `rank`. This Op checks that the rank of `x` is equal to `rank`. If `x` has a different rank, `message`, as well as the shape of `x` are printed, and `InvalidArgumentError` is raised. Args: x: `Tensor`. rank: Scalar integer `Tensor`. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_rank". Returns: Op raising `InvalidArgumentError` unless `x` has specified rank. If static checks determine `x` has correct rank, a `no_op` is returned. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and `x` does not have rank `rank`. The check can be performed immediately during eager execution or if the shape of `x` is statically known. """ return assert_rank(x=x, rank=rank, message=message, name=name) @tf_export(v1=['debugging.assert_rank', 'assert_rank']) def assert_rank(x, rank, data=None, summarize=None, message=None, name=None): """Assert `x` has rank equal to `rank`. Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.compat.v1.assert_rank(x, 2)]): output = tf.reduce_sum(x) ``` Args: x: Numeric `Tensor`. rank: Scalar integer `Tensor`. data: The tensors to print out if the condition is False. Defaults to error message and the shape of `x`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_rank". Returns: Op raising `InvalidArgumentError` unless `x` has specified rank. If static checks determine `x` has correct rank, a `no_op` is returned. Raises: ValueError: If static checks determine `x` has wrong rank. """ with ops.name_scope(name, 'assert_rank', (x, rank) + tuple(data or [])): x = ops.convert_to_tensor(x, name='x') rank = ops.convert_to_tensor(rank, name='rank') message = message or '' static_condition = lambda actual_rank, given_rank: actual_rank == given_rank dynamic_condition = math_ops.equal if context.executing_eagerly(): name = '' else: name = x.name if data is None: data = [ message, 'Tensor %s must have rank' % name, rank, 'Received shape: ', array_ops.shape(x) ] try: assert_op = _assert_rank_condition(x, rank, static_condition, dynamic_condition, data, summarize) except ValueError as e: if e.args[0] == 'Static rank condition failed': raise ValueError( '%s. Tensor %s must have rank %d. Received rank %d, shape %s' % (message, name, e.args[2], e.args[1], x.get_shape())) else: raise return assert_op @tf_export('debugging.assert_rank_at_least', v1=[]) def assert_rank_at_least_v2(x, rank, message=None, name=None): """Assert that `x` has rank of at least `rank`. This Op checks that the rank of `x` is greater or equal to `rank`. If `x` has a rank lower than `rank`, `message`, as well as the shape of `x` are printed, and `InvalidArgumentError` is raised. Args: x: `Tensor`. rank: Scalar integer `Tensor`. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_rank_at_least". Returns: Op raising `InvalidArgumentError` unless `x` has specified rank or higher. If static checks determine `x` has correct rank, a `no_op` is returned. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: `x` does not have rank at least `rank`, but the rank cannot be statically determined. ValueError: If static checks determine `x` has mismatched rank. """ return assert_rank_at_least(x=x, rank=rank, message=message, name=name) @tf_export(v1=['debugging.assert_rank_at_least', 'assert_rank_at_least']) @deprecation.deprecated_endpoints('assert_rank_at_least') def assert_rank_at_least( x, rank, data=None, summarize=None, message=None, name=None): """Assert `x` has rank equal to `rank` or higher. Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.compat.v1.assert_rank_at_least(x, 2)]): output = tf.reduce_sum(x) ``` Args: x: Numeric `Tensor`. rank: Scalar `Tensor`. data: The tensors to print out if the condition is False. Defaults to error message and first few entries of `x`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_rank_at_least". Returns: Op raising `InvalidArgumentError` unless `x` has specified rank or higher. If static checks determine `x` has correct rank, a `no_op` is returned. Raises: ValueError: If static checks determine `x` has wrong rank. """ with ops.name_scope( name, 'assert_rank_at_least', (x, rank) + tuple(data or [])): x = ops.convert_to_tensor(x, name='x') rank = ops.convert_to_tensor(rank, name='rank') message = message or '' static_condition = lambda actual_rank, given_rank: actual_rank >= given_rank dynamic_condition = math_ops.greater_equal if context.executing_eagerly(): name = '' else: name = x.name if data is None: data = [ message, 'Tensor %s must have rank at least' % name, rank, 'Received shape: ', array_ops.shape(x) ] try: assert_op = _assert_rank_condition(x, rank, static_condition, dynamic_condition, data, summarize) except ValueError as e: if e.args[0] == 'Static rank condition failed': raise ValueError( '%s. Tensor %s must have rank at least %d. Received rank %d, ' 'shape %s' % (message, name, e.args[2], e.args[1], x.get_shape())) else: raise return assert_op def _static_rank_in(actual_rank, given_ranks): return actual_rank in given_ranks def _dynamic_rank_in(actual_rank, given_ranks): if len(given_ranks) < 1: return ops.convert_to_tensor(False) result = math_ops.equal(given_ranks[0], actual_rank) for given_rank in given_ranks[1:]: result = math_ops.logical_or( result, math_ops.equal(given_rank, actual_rank)) return result def _assert_ranks_condition( x, ranks, static_condition, dynamic_condition, data, summarize): """Assert `x` has a rank that satisfies a given condition. Args: x: Numeric `Tensor`. ranks: Scalar `Tensor`. static_condition: A python function that takes `[actual_rank, given_ranks]` and returns `True` if the condition is satisfied, `False` otherwise. dynamic_condition: An `op` that takes [actual_rank, given_ranks] and return `True` if the condition is satisfied, `False` otherwise. data: The tensors to print out if the condition is false. Defaults to error message and first few entries of `x`. summarize: Print this many entries of each tensor. Returns: Op raising `InvalidArgumentError` if `x` fails dynamic_condition. Raises: ValueError: If static checks determine `x` fails static_condition. """ for rank in ranks: assert_type(rank, dtypes.int32) # Attempt to statically defined rank. ranks_static = tuple([tensor_util.constant_value(rank) for rank in ranks]) if not any(r is None for r in ranks_static): for rank_static in ranks_static: if rank_static.ndim != 0: raise ValueError('Rank must be a scalar.') x_rank_static = x.get_shape().ndims if x_rank_static is not None: if not static_condition(x_rank_static, ranks_static): raise ValueError( 'Static rank condition failed', x_rank_static, ranks_static) return control_flow_ops.no_op(name='static_checks_determined_all_ok') condition = dynamic_condition(array_ops.rank(x), ranks) # Add the condition that `rank` must have rank zero. Prevents the bug where # someone does assert_rank(x, [n]), rather than assert_rank(x, n). for rank, rank_static in zip(ranks, ranks_static): if rank_static is None: this_data = ['Rank must be a scalar. Received rank: ', rank] rank_check = assert_rank(rank, 0, data=this_data) condition = control_flow_ops.with_dependencies([rank_check], condition) return control_flow_ops.Assert(condition, data, summarize=summarize) @tf_export('debugging.assert_rank_in', v1=[]) def assert_rank_in_v2(x, ranks, message=None, name=None): """Assert that `x` has a rank in `ranks`. This Op checks that the rank of `x` is in `ranks`. If `x` has a different rank, `message`, as well as the shape of `x` are printed, and `InvalidArgumentError` is raised. Args: x: `Tensor`. ranks: `Iterable` of scalar `Tensor` objects. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_rank_in". Returns: Op raising `InvalidArgumentError` unless rank of `x` is in `ranks`. If static checks determine `x` has matching rank, a `no_op` is returned. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: `x` does not have rank in `ranks`, but the rank cannot be statically determined. ValueError: If static checks determine `x` has mismatched rank. """ return assert_rank_in(x=x, ranks=ranks, message=message, name=name) @tf_export(v1=['debugging.assert_rank_in', 'assert_rank_in']) @deprecation.deprecated_endpoints('assert_rank_in') def assert_rank_in( x, ranks, data=None, summarize=None, message=None, name=None): """Assert `x` has rank in `ranks`. Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.compat.v1.assert_rank_in(x, (2, 4))]): output = tf.reduce_sum(x) ``` Args: x: Numeric `Tensor`. ranks: Iterable of scalar `Tensor` objects. data: The tensors to print out if the condition is False. Defaults to error message and first few entries of `x`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_rank_in". Returns: Op raising `InvalidArgumentError` unless rank of `x` is in `ranks`. If static checks determine `x` has matching rank, a `no_op` is returned. Raises: ValueError: If static checks determine `x` has mismatched rank. """ with ops.name_scope( name, 'assert_rank_in', (x,) + tuple(ranks) + tuple(data or [])): x = ops.convert_to_tensor(x, name='x') ranks = tuple([ops.convert_to_tensor(rank, name='rank') for rank in ranks]) message = message or '' if context.executing_eagerly(): name = '' else: name = x.name if data is None: data = [ message, 'Tensor %s must have rank in' % name ] + list(ranks) + [ 'Received shape: ', array_ops.shape(x) ] try: assert_op = _assert_ranks_condition(x, ranks, _static_rank_in, _dynamic_rank_in, data, summarize) except ValueError as e: if e.args[0] == 'Static rank condition failed': raise ValueError( '%s. Tensor %s must have rank in %s. Received rank %d, ' 'shape %s' % (message, name, e.args[2], e.args[1], x.get_shape())) else: raise return assert_op @tf_export('debugging.assert_integer', v1=[]) def assert_integer_v2(x, message=None, name=None): """Assert that `x` is of integer dtype. If `x` has a non-integer type, `message`, as well as the dtype of `x` are printed, and `InvalidArgumentError` is raised. This can always be checked statically, so this method returns nothing. Args: x: A `Tensor`. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_integer". Raises: TypeError: If `x.dtype` is not a non-quantized integer type. """ assert_integer(x=x, message=message, name=name) @tf_export(v1=['debugging.assert_integer', 'assert_integer']) @deprecation.deprecated_endpoints('assert_integer') def assert_integer(x, message=None, name=None): """Assert that `x` is of integer dtype. Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.compat.v1.assert_integer(x)]): output = tf.reduce_sum(x) ``` Args: x: `Tensor` whose basetype is integer and is not quantized. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_integer". Raises: TypeError: If `x.dtype` is anything other than non-quantized integer. Returns: A `no_op` that does nothing. Type can be determined statically. """ message = message or '' with ops.name_scope(name, 'assert_integer', [x]): x = ops.convert_to_tensor(x, name='x') if not x.dtype.is_integer: if context.executing_eagerly(): name = 'tensor' else: name = x.name err_msg = ( '%s Expected "x" to be integer type. Found: %s of dtype %s' % (message, name, x.dtype)) raise TypeError(err_msg) return control_flow_ops.no_op('statically_determined_was_integer') @tf_export('debugging.assert_type', v1=[]) def assert_type_v2(tensor, tf_type, message=None, name=None): """Asserts that the given `Tensor` is of the specified type. This can always be checked statically, so this method returns nothing. Args: tensor: A `Tensor`. tf_type: A tensorflow type (`dtypes.float32`, `tf.int64`, `dtypes.bool`, etc). message: A string to prefix to the default message. name: A name for this operation. Defaults to "assert_type" Raises: TypeError: If the tensor's data type doesn't match `tf_type`. """ assert_type(tensor=tensor, tf_type=tf_type, message=message, name=name) @tf_export(v1=['debugging.assert_type', 'assert_type']) @deprecation.deprecated_endpoints('assert_type') def assert_type(tensor, tf_type, message=None, name=None): """Statically asserts that the given `Tensor` is of the specified type. Args: tensor: A `Tensor`. tf_type: A tensorflow type (`dtypes.float32`, `tf.int64`, `dtypes.bool`, etc). message: A string to prefix to the default message. name: A name to give this `Op`. Defaults to "assert_type" Raises: TypeError: If the tensors data type doesn't match `tf_type`. Returns: A `no_op` that does nothing. Type can be determined statically. """ message = message or '' with ops.name_scope(name, 'assert_type', [tensor]): tensor = ops.convert_to_tensor(tensor, name='tensor') if tensor.dtype != tf_type: if context.executing_eagerly(): raise TypeError('%s tensor must be of type %s' % (message, tf_type)) else: raise TypeError('%s %s must be of type %s' % (message, tensor.name, tf_type)) return control_flow_ops.no_op('statically_determined_correct_type') def _dimension_sizes(x): """Gets the dimension sizes of a tensor `x`. If a size can be determined statically it is returned as an integer, otherwise as a tensor. If `x` is a scalar it is treated as rank 1 size 1. Args: x: A `Tensor`. Returns: Dimension sizes. """ dynamic_shape = array_ops.shape(x) rank = x.get_shape().rank rank_is_known = rank is not None if rank_is_known and rank == 0: return tuple([1]) if rank_is_known and rank > 0: static_shape = x.get_shape().as_list() sizes = [ int(size) if size is not None else dynamic_shape[i] for i, size in enumerate(static_shape) ] return sizes has_rank_zero = math_ops.equal(array_ops.rank(x), 0) return control_flow_ops.cond( has_rank_zero, lambda: array_ops.constant([1]), lambda: dynamic_shape) def _symbolic_dimension_sizes(symbolic_shape): if len(symbolic_shape) == 0: return tuple([1]) return symbolic_shape def _has_known_value(dimension_size): not_none = dimension_size is not None try: int(dimension_size) can_be_parsed_as_int = True except (ValueError, TypeError): can_be_parsed_as_int = False return not_none and can_be_parsed_as_int def _is_symbol_for_any_size(symbol): return symbol in [None, '.'] def _is_symbol_for_unspecified_dims(symbol): return symbol in [Ellipsis, '*'] @tf_export('debugging.assert_shapes', v1=[]) def assert_shapes_v2(shapes, data=None, summarize=None, message=None, name=None): """Assert tensor shapes and dimension size relationships between tensors. This Op checks that a collection of tensors shape relationships satisfies given constraints. Example: ```python tf.assert_shapes({ x: ('N', 'Q'), y: ('N', 'D'), param: ('Q',), scalar: () }) ``` If `x`, `y`, `param` or `scalar` does not have a shape that satisfies all specified constraints, `message`, as well as the first `summarize` entries of the first encountered violating tensor are printed, and `InvalidArgumentError` is raised. Size entries in the specified shapes are checked against other entries by their __hash__, except: - a size entry is interpreted as an explicit size if it can be parsed as an integer primitive. - a size entry is interpreted as *any* size if it is None or '.'. If the first entry of a shape is `...` (type `Ellipsis`) or '*' that indicates a variable number of outer dimensions of unspecified size, i.e. the constraint applies to the inner-most dimensions only. Scalar tensors and specified shapes of length zero (excluding the 'inner-most' prefix) are both treated as having a single dimension of size one. Args: shapes: dictionary with (`Tensor` to shape) items. A shape must be an iterable. data: The tensors to print out if the condition is False. Defaults to error message and first few entries of the violating tensor. summarize: Print this many entries of the tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_shapes". Raises: ValueError: If static checks determine any shape constraint is violated. """ assert_shapes( shapes, data=data, summarize=summarize, message=message, name=name) @tf_export(v1=['debugging.assert_shapes']) def assert_shapes(shapes, data=None, summarize=None, message=None, name=None): """Assert tensor shapes and dimension size relationships between tensors. This Op checks that a collection of tensors shape relationships satisfies given constraints. Example: ```python tf.assert_shapes({ x: ('N', 'Q'), y: ('N', 'D'), param: ('Q',), scalar: () }) ``` Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.assert_shapes(shapes)]): output = tf.matmul(x, y, transpose_a=True) ``` If `x`, `y`, `param` or `scalar` does not have a shape that satisfies all specified constraints, `message`, as well as the first `summarize` entries of the first encountered violating tensor are printed, and `InvalidArgumentError` is raised. Size entries in the specified shapes are checked against other entries by their __hash__, except: - a size entry is interpreted as an explicit size if it can be parsed as an integer primitive. - a size entry is interpreted as *any* size if it is None or '.'. If the first entry of a shape is `...` (type `Ellipsis`) or '*' that indicates a variable number of outer dimensions of unspecified size, i.e. the constraint applies to the inner-most dimensions only. Scalar tensors and specified shapes of length zero (excluding the 'inner-most' prefix) are both treated as having a single dimension of size one. Args: shapes: dictionary with (`Tensor` to shape) items. A shape must be an iterable. data: The tensors to print out if the condition is False. Defaults to error message and first few entries of the violating tensor. summarize: Print this many entries of the tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_shapes". Returns: Op raising `InvalidArgumentError` unless all shape constraints are satisfied. If static checks determine all constraints are satisfied, a `no_op` is returned. Raises: ValueError: If static checks determine any shape constraint is violated. """ message = message or '' with ops.name_scope(name, 'assert_shapes', [shapes, data]): # Shape specified as None implies no constraint shapes = {x: shapes[x] for x in shapes if shapes[x] is not None} shapes = {ops.convert_to_tensor(x): shapes[x] for x in shapes} executing_eagerly = context.executing_eagerly() def tensor_name(x): if executing_eagerly: return _shape_and_dtype_str(x) return x.name for x in shapes: symbolic_shape = shapes[x] is_iterable = ( hasattr(symbolic_shape, '__iter__') or hasattr(symbolic_shape, '__getitem__') # For Python 2 compat. ) if not is_iterable: raise ValueError( '%s. ' 'Tensor %s. Specified shape must be an iterable. ' 'An iterable has the attribute `__iter__` or `__getitem__`. ' 'Received specified shape: %s' % (message, tensor_name(x), symbolic_shape)) shapes[x] = tuple(shapes[x]) tensors_specified_innermost = set() for x in shapes: symbolic_shape = shapes[x] for i, symbol in enumerate(symbolic_shape): if not _is_symbol_for_unspecified_dims(symbol): continue if i != 0: raise ValueError( '%s. ' 'Tensor %s specified shape index %d. ' 'Symbol `...` or `*` for a variable number of ' 'unspecified dimensions is only allowed as the first entry' % (message, tensor_name(x), i)) tensors_specified_innermost.add(x) actual_sizes_by_tensor = {x: _dimension_sizes(x) for x in shapes} specified_sizes_by_tensor = { x: _symbolic_dimension_sizes( # Ignoring innermost prefix shapes[x][1:] if x in tensors_specified_innermost else shapes[x]) for x in shapes } rank_assertions = [] for x in shapes.keys(): symbolic_sizes = specified_sizes_by_tensor[x] rank = len(symbolic_sizes) rank_zero_or_one = rank in [0, 1] if x in tensors_specified_innermost: if rank_zero_or_one: # No assertion of rank needed as `x` only need to have rank at least 0. # See elif rank_zero_or_one case comment. continue assertion = assert_rank_at_least( x=x, rank=rank, data=data, summarize=summarize, message=message, name=name) elif rank_zero_or_one: # Rank 0 is treated as rank 1 size 1, i.e. there is # no distinction between the two in terms of rank. # See _dimension_sizes. assertion = assert_rank_in( x=x, ranks=[0, 1], data=data, summarize=summarize, message=message, name=name) else: assertion = assert_rank( x=x, rank=rank, data=data, summarize=summarize, message=message, name=name) rank_assertions.append(assertion) size_assertions = [] size_specifications = {} for x in shapes.keys(): actual_sizes = actual_sizes_by_tensor[x] symbolic_sizes = specified_sizes_by_tensor[x] innermost_dims = x in tensors_specified_innermost for i, size_symbol in enumerate(symbolic_sizes): if _is_symbol_for_any_size(size_symbol): # Size specified as any implies no constraint continue if innermost_dims: tensor_dim = i - len(symbolic_sizes) else: tensor_dim = i if size_symbol in size_specifications or _has_known_value(size_symbol): if _has_known_value(size_symbol): specified_size = int(size_symbol) size_check_message = 'Specified explicitly' else: specified_size, specified_by_y, specified_at_dim = \ size_specifications[size_symbol] size_check_message = ( 'Specified by tensor %s dimension %d' % (tensor_name(specified_by_y), specified_at_dim)) actual_size = actual_sizes[tensor_dim] if _has_known_value(actual_size) and _has_known_value(specified_size): if int(actual_size) != int(specified_size): raise ValueError( '%s. %s. Tensor %s dimension %s must have size %d. ' 'Received size %d, shape %s' % (message, size_check_message, tensor_name(x), tensor_dim, specified_size, actual_size, x.get_shape())) # No dynamic assertion needed continue condition = math_ops.equal( ops.convert_to_tensor(actual_size), ops.convert_to_tensor(specified_size)) data_ = data if data is None: data_ = [ message, size_check_message, 'Tensor %s dimension' % tensor_name(x), tensor_dim, 'must have size', specified_size, 'Received shape: ', array_ops.shape(x) ] size_assertions.append( control_flow_ops.Assert(condition, data_, summarize=summarize)) else: size = actual_sizes[tensor_dim] size_specifications[size_symbol] = (size, x, tensor_dim) with ops.control_dependencies(rank_assertions): shapes_assertion = control_flow_ops.group(size_assertions) return shapes_assertion # pylint: disable=line-too-long def _get_diff_for_monotonic_comparison(x): """Gets the difference x[1:] - x[:-1].""" x = array_ops.reshape(x, [-1]) if not is_numeric_tensor(x): raise TypeError('Expected x to be numeric, instead found: %s' % x) # If x has less than 2 elements, there is nothing to compare. So return []. is_shorter_than_two = math_ops.less(array_ops.size(x), 2) short_result = lambda: ops.convert_to_tensor([], dtype=x.dtype) # With 2 or more elements, return x[1:] - x[:-1] s_len = array_ops.shape(x) - 1 diff = lambda: array_ops.strided_slice(x, [1], [1] + s_len)- array_ops.strided_slice(x, [0], s_len) return control_flow_ops.cond(is_shorter_than_two, short_result, diff) @tf_export( 'debugging.is_numeric_tensor', v1=['debugging.is_numeric_tensor', 'is_numeric_tensor']) @deprecation.deprecated_endpoints('is_numeric_tensor') def is_numeric_tensor(tensor): """Returns `True` if the elements of `tensor` are numbers. Specifically, returns `True` if the dtype of `tensor` is one of the following: * `tf.float32` * `tf.float64` * `tf.int8` * `tf.int16` * `tf.int32` * `tf.int64` * `tf.uint8` * `tf.qint8` * `tf.qint32` * `tf.quint8` * `tf.complex64` Returns `False` if `tensor` is of a non-numeric type or if `tensor` is not a `tf.Tensor` object. """ return isinstance(tensor, ops.Tensor) and tensor.dtype in NUMERIC_TYPES @tf_export( 'math.is_non_decreasing', v1=[ 'math.is_non_decreasing', 'debugging.is_non_decreasing', 'is_non_decreasing' ]) @deprecation.deprecated_endpoints('debugging.is_non_decreasing', 'is_non_decreasing') def is_non_decreasing(x, name=None): """Returns `True` if `x` is non-decreasing. Elements of `x` are compared in row-major order. The tensor `[x[0],...]` is non-decreasing if for every adjacent pair we have `x[i] <= x[i+1]`. If `x` has less than two elements, it is trivially non-decreasing. See also: `is_strictly_increasing` Args: x: Numeric `Tensor`. name: A name for this operation (optional). Defaults to "is_non_decreasing" Returns: Boolean `Tensor`, equal to `True` iff `x` is non-decreasing. Raises: TypeError: if `x` is not a numeric tensor. """ with ops.name_scope(name, 'is_non_decreasing', [x]): diff = _get_diff_for_monotonic_comparison(x) # When len(x) = 1, diff = [], less_equal = [], and reduce_all([]) = True. zero = ops.convert_to_tensor(0, dtype=diff.dtype) return math_ops.reduce_all(math_ops.less_equal(zero, diff)) @tf_export( 'math.is_strictly_increasing', v1=[ 'math.is_strictly_increasing', 'debugging.is_strictly_increasing', 'is_strictly_increasing' ]) @deprecation.deprecated_endpoints('debugging.is_strictly_increasing', 'is_strictly_increasing') def is_strictly_increasing(x, name=None): """Returns `True` if `x` is strictly increasing. Elements of `x` are compared in row-major order. The tensor `[x[0],...]` is strictly increasing if for every adjacent pair we have `x[i] < x[i+1]`. If `x` has less than two elements, it is trivially strictly increasing. See also: `is_non_decreasing` Args: x: Numeric `Tensor`. name: A name for this operation (optional). Defaults to "is_strictly_increasing" Returns: Boolean `Tensor`, equal to `True` iff `x` is strictly increasing. Raises: TypeError: if `x` is not a numeric tensor. """ with ops.name_scope(name, 'is_strictly_increasing', [x]): diff = _get_diff_for_monotonic_comparison(x) # When len(x) = 1, diff = [], less = [], and reduce_all([]) = True. zero = ops.convert_to_tensor(0, dtype=diff.dtype) return math_ops.reduce_all(math_ops.less(zero, diff)) def _assert_same_base_type(items, expected_type=None): r"""Asserts all items are of the same base type. Args: items: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`, `Operation`, or `IndexedSlices`). Can include `None` elements, which will be ignored. expected_type: Expected type. If not specified, assert all items are of the same base type. Returns: Validated type, or none if neither expected_type nor items provided. Raises: ValueError: If any types do not match. """ original_expected_type = expected_type mismatch = False for item in items: if item is not None: item_type = item.dtype.base_dtype if not expected_type: expected_type = item_type elif expected_type != item_type: mismatch = True break if mismatch: # Loop back through and build up an informative error message (this is very # slow, so we don't do it unless we found an error above). expected_type = original_expected_type original_item_str = None for item in items: if item is not None: item_type = item.dtype.base_dtype if not expected_type: expected_type = item_type original_item_str = item.name if hasattr(item, 'name') else str(item) elif expected_type != item_type: raise ValueError('%s, type=%s, must be of the same type (%s)%s.' % ( item.name if hasattr(item, 'name') else str(item), item_type, expected_type, (' as %s' % original_item_str) if original_item_str else '')) return expected_type # Should be unreachable else: return expected_type @tf_export( 'debugging.assert_same_float_dtype', v1=['debugging.assert_same_float_dtype', 'assert_same_float_dtype']) @deprecation.deprecated_endpoints('assert_same_float_dtype') def assert_same_float_dtype(tensors=None, dtype=None): """Validate and return float type based on `tensors` and `dtype`. For ops such as matrix multiplication, inputs and weights must be of the same float type. This function validates that all `tensors` are the same type, validates that type is `dtype` (if supplied), and returns the type. Type must be a floating point type. If neither `tensors` nor `dtype` is supplied, the function will return `dtypes.float32`. Args: tensors: Tensors of input values. Can include `None` elements, which will be ignored. dtype: Expected type. Returns: Validated type. Raises: ValueError: if neither `tensors` nor `dtype` is supplied, or result is not float, or the common type of the inputs is not a floating point type. """ if tensors: dtype = _assert_same_base_type(tensors, dtype) if not dtype: dtype = dtypes.float32 elif not dtype.is_floating: raise ValueError('Expected floating point type, got %s.' % dtype) return dtype @tf_export('debugging.assert_scalar', v1=[]) def assert_scalar_v2(tensor, message=None, name=None): """Asserts that the given `tensor` is a scalar. This function raises `ValueError` unless it can be certain that the given `tensor` is a scalar. `ValueError` is also raised if the shape of `tensor` is unknown. This is always checked statically, so this method returns nothing. Args: tensor: A `Tensor`. message: A string to prefix to the default message. name: A name for this operation. Defaults to "assert_scalar" Raises: ValueError: If the tensor is not scalar (rank 0), or if its shape is unknown. """ assert_scalar(tensor=tensor, message=message, name=name) @tf_export(v1=['debugging.assert_scalar', 'assert_scalar']) @deprecation.deprecated_endpoints('assert_scalar') def assert_scalar(tensor, name=None, message=None): """Asserts that the given `tensor` is a scalar (i.e. zero-dimensional). This function raises `ValueError` unless it can be certain that the given `tensor` is a scalar. `ValueError` is also raised if the shape of `tensor` is unknown. Args: tensor: A `Tensor`. name: A name for this operation. Defaults to "assert_scalar" message: A string to prefix to the default message. Returns: The input tensor (potentially converted to a `Tensor`). Raises: ValueError: If the tensor is not scalar (rank 0), or if its shape is unknown. """ with ops.name_scope(name, 'assert_scalar', [tensor]) as name_scope: tensor = ops.convert_to_tensor(tensor, name=name_scope) shape = tensor.get_shape() if shape.ndims != 0: if context.executing_eagerly(): raise ValueError('%sExpected scalar shape, saw shape: %s.' % (message or '', shape,)) else: raise ValueError('%sExpected scalar shape for %s, saw shape: %s.' % (message or '', tensor.name, shape)) return tensor @tf_export('ensure_shape') def ensure_shape(x, shape, name=None): """Updates the shape of a tensor and checks at runtime that the shape holds. For example: ```python x = tf.compat.v1.placeholder(tf.int32) print(x.shape) ==> TensorShape(None) y = x * 2 print(y.shape) ==> TensorShape(None) y = tf.ensure_shape(y, (None, 3, 3)) print(y.shape) ==> TensorShape([Dimension(None), Dimension(3), Dimension(3)]) with tf.compat.v1.Session() as sess: # Raises tf.errors.InvalidArgumentError, because the shape (3,) is not # compatible with the shape (None, 3, 3) sess.run(y, feed_dict={x: [1, 2, 3]}) ``` NOTE: This differs from `Tensor.set_shape` in that it sets the static shape of the resulting tensor and enforces it at runtime, raising an error if the tensor's runtime shape is incompatible with the specified shape. `Tensor.set_shape` sets the static shape of the tensor without enforcing it at runtime, which may result in inconsistencies between the statically-known shape of tensors and the runtime value of tensors. Args: x: A `Tensor`. shape: A `TensorShape` representing the shape of this tensor, a `TensorShapeProto`, a list, a tuple, or None. name: A name for this operation (optional). Defaults to "EnsureShape". Returns: A `Tensor`. Has the same type and contents as `x`. At runtime, raises a `tf.errors.InvalidArgumentError` if `shape` is incompatible with the shape of `x`. """ if not isinstance(shape, tensor_shape.TensorShape): shape = tensor_shape.TensorShape(shape) return array_ops.ensure_shape(x, shape, name=name) @ops.RegisterGradient('EnsureShape') def _ensure_shape_grad(op, grad): del op # Unused. return grad
tensorflow-master
tensorflow/python/ops/check_ops.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Module for constructing RNN Cells.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.rnn_cell_impl import * # pylint: enable=wildcard-import
tensorflow-master
tensorflow/python/ops/rnn_cell.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Gradients for operators defined in array_ops.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import warnings from tensorflow.python import pywrap_tensorflow from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import control_flow_util from tensorflow.python.ops import gen_array_ops from tensorflow.python.ops import gen_resource_variable_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import sparse_ops @ops.RegisterGradient("Pack") def _PackGrad(op, grad): """Gradient for pack op.""" return array_ops.unstack(grad, num=op.get_attr("N"), axis=op.get_attr("axis")) @ops.RegisterGradient("Unpack") def _UnpackGrad(op, *grads): """Gradient for unpack op.""" return array_ops.stack(grads, axis=op.get_attr("axis")) def _ConcatGradHelper(op, grad, start_value_index, end_value_index, dim_index): """Gradient for concat op. Args: op: An operation. grad: `Tensor` or `IndexedSlices` representing the gradients with respect to each output of the op. start_value_index: An integer index of the first value in the op.inputs. end_value_index: An integer index of the last value in the op.inputs. dim_index: An interger index of concat_dim or axis parameter in op.inputs. Returns: Tensors representing the partial gradients with respect to each input of the op. Raises: ValueError: if concat_dim/axis is not statically known. """ def _CreateDenseMaskAndBegin(sizes, concat_dim): """Create variables for iteratively slicing a dense gradients tensor.""" # Since shape is 1-D, shape_of_shape = [rank-of-inputs] shape_of_shape = array_ops.shape(sizes[0]) # Make a vector of length equal to the input's dimensions, # with 0's everywhere and 1 in the concat dim position. # Note: Can't use sparse_to_dense since it isn't GPU-capable (for now) mask = array_ops.concat([ array_ops.fill(array_ops.expand_dims(concat_dim, 0), 0), [1], array_ops.fill(shape_of_shape - concat_dim - 1, 0) ], 0) begin = array_ops.fill(shape_of_shape, 0) return mask, begin def _ExtractInputShapes(inputs): """Extract the shapes of a set of input tensors.""" if context.executing_eagerly(): return array_ops.shape_n(inputs) sizes = [] fully_known = True for x in inputs: input_shape = array_ops.shape(x) if not isinstance(input_shape, ops.Tensor) or input_shape.op.type != "Const": fully_known = False break sizes.append(input_shape) if fully_known: return sizes else: return array_ops.shape_n(inputs) # Degenerate concatenation, just return grad. if len(op.inputs) == 2: return grad + [None] if end_value_index <= dim_index else [None] + grad concat_dim = op.inputs[dim_index] input_values = op.inputs[start_value_index:end_value_index] out_grads = [] if isinstance(grad, ops.Tensor): if context.executing_eagerly() or isinstance(concat_dim, ops.EagerTensor): # Using mod here for convenience since concat_dim is already verified # in concat implementation to be within the allowed [-rank, rank) range. non_neg_concat_dim = ( concat_dim._numpy().item(0) % input_values[0]._rank()) # pylint: disable=protected-access # All inputs are guaranteed to be EagerTensors in eager mode sizes = pywrap_tensorflow.TFE_Py_TensorShapeSlice(input_values, non_neg_concat_dim) out_grads = array_ops.split(grad, sizes, non_neg_concat_dim) else: if constant_op.is_constant(concat_dim): # If concat_dim is a constant defined in a different context, # then we duplicate it in the current context to avoid passing it # through an Enter node. # This is a small optimization in general, but it is required when # compiling with XLA, as XLA needs the concat input to be folded into a # constant. grad_context = control_flow_util.GetOutputContext(grad.op) dim_context = control_flow_util.GetOutputContext(concat_dim.op) if dim_context != grad_context: value = tensor_util.constant_value(concat_dim) concat_dim = constant_op.constant(value=value, dtype=concat_dim.dtype) # Using mod here for convenience since concat_dim is already verified # in concat implementation to be within the allowed [-rank, rank) range. non_neg_concat_dim = concat_dim % array_ops.rank(input_values[0]) # Get the inputs' tensor shapes sizes = _ExtractInputShapes(input_values) # The magic number of 16 was found through benchmarking a range of sizes # on CPUs and a Maxwell TitanX. A speedup was seen in a large majority of # cases when switching implementations at N=16, but it is possible that # there will be a small number of performance regressions. if len(sizes) > 16: # extract the size of each input along the concat dimension sizes = array_ops.squeeze( array_ops.slice( array_ops.stack(sizes, axis=1), [non_neg_concat_dim, 0], [1, -1])) out_grads = array_ops.split(grad, sizes, non_neg_concat_dim) else: offset = gen_array_ops.concat_offset(non_neg_concat_dim, sizes) for (begin, size) in zip(offset, sizes): out_grads.append(array_ops.slice(grad, begin, size)) elif isinstance(grad, ops.IndexedSlices): # Using mod here for convenience since concat_dim is already verified # in concat implementation to be within the allowed [-rank, rank) range. non_neg_concat_dim = concat_dim % array_ops.rank(input_values[0]) concat_dim_static = tensor_util.constant_value(concat_dim) if concat_dim_static is None: raise ValueError("Can only compute IndexedSlices gradient with " "statically-known concat_dim") if concat_dim_static < 0: rank = tensor_util.constant_value(array_ops.rank(input_values[0])) if rank is None: raise ValueError("Can only compute IndexedSlices gradient with " "negative concat_dim when first value rank is " "statically-known.") concat_dim_static %= rank # Get the inputs' tensor shapes sizes = [array_ops.shape(x) for x in input_values] if concat_dim_static > 0: # IndexedSlices, non_neg_concat_dim > 0. Each input gets IndexedSlices # gradients with all the indices, but with grad.values sliced accordingly. # This is like the Tensor case, except shape(grad.values)[0] is not equal # to shape(sizes[i])[0], since only a subset of the dim-0 values are # stored. mask, begin = _CreateDenseMaskAndBegin(sizes, non_neg_concat_dim) for size in sizes: new_values = array_ops.slice( grad.values, begin, array_ops.concat([[-1], array_ops.slice(size, [1], [-1])], 0)) out_grads.append(ops.IndexedSlices(new_values, grad.indices, size)) # Lint complains begin = begin + ... begin = math_ops.add(begin, size * mask) else: # IndexedSlices, concat_dim == 0. Each input gets IndexedSlices gradients # only for the relevant indices. start = constant_op.constant(0, dtype=grad.indices.dtype) for size in sizes: size_concat_dim = array_ops.gather(size, non_neg_concat_dim) if size_concat_dim.dtype != grad.indices.dtype: size_concat_dim = math_ops.cast( size_concat_dim, dtype=grad.indices.dtype) end = start + size_concat_dim # Compute the 1-D Tensor of indices relevant for this input. indices_to_select = array_ops.squeeze( array_ops.where( math_ops.logical_and(grad.indices >= start, grad.indices < end)), axis=[1]) new_indices = array_ops.gather(grad.indices, indices_to_select) - start new_values = array_ops.gather(grad.values, indices_to_select) out_grads.append(ops.IndexedSlices(new_values, new_indices, size)) start = end else: raise TypeError("Expected Tensor or IndexedSlices, got %s" % type(grad)) return (out_grads + [None] if end_value_index <= dim_index else [None] + out_grads) @ops.RegisterGradient("Concat") def _ConcatGrad(op, grad): return _ConcatGradHelper( op, grad, start_value_index=1, end_value_index=len(op.inputs), dim_index=0) @ops.RegisterGradient("ConcatV2") def _ConcatGradV2(op, grad): return _ConcatGradHelper( op, grad, start_value_index=0, end_value_index=-1, dim_index=-1) ops.NotDifferentiable("ConcatOffset") @ops.RegisterGradient("Slice") def _SliceGrad(op, grad): """Gradient for Slice op.""" # Create an Nx2 padding where the first column represents how many # zeros are to be prepended for each dimension, and the second # column indicates how many zeros are appended. # # The number of zeros to append is the shape of the input # elementwise-subtracted by both the begin vector and sizes vector. # # Some more reshaping is needed to assemble this tensor with the # right dimensions. input_vec = op.inputs[0] begin_vec = op.inputs[1] input_rank = array_ops.rank(input_vec) slice_size = array_ops.shape(op.outputs[0]) shape = array_ops.stack([input_rank, 1]) before_pad = array_ops.reshape(begin_vec, shape) after_pad = array_ops.reshape( array_ops.shape(input_vec) - slice_size - begin_vec, shape) paddings = array_ops.concat([before_pad, after_pad], 1) return array_ops.pad(grad, paddings), None, None @ops.RegisterGradient("StridedSlice") def _StridedSliceGrad(op, grad): """Gradient for StridedSlice op.""" begin = op.inputs[1] end = op.inputs[2] strides = op.inputs[3] # StridedSliceGrad requires `x`, `begin`, `end` and `strides` to be of the # same dtype so we build a shape of the same type as other args. # Note that the choice of `begin` for specifying `out_type` is arbitrary. # We could choose any of {begin|end|strides}.dtype since they are required to # be the same. x = array_ops.shape(op.inputs[0], out_type=begin.dtype) return array_ops.strided_slice_grad( x, begin, end, strides, grad, begin_mask=op.get_attr("begin_mask"), end_mask=op.get_attr("end_mask"), ellipsis_mask=op.get_attr("ellipsis_mask"), new_axis_mask=op.get_attr("new_axis_mask"), shrink_axis_mask=op.get_attr("shrink_axis_mask")), None, None, None @ops.RegisterGradient("StridedSliceGrad") def _StridedSliceGradGrad(op, grad): """Gradient for StridedSliceGrad op.""" begin = op.inputs[1] end = op.inputs[2] strides = op.inputs[3] return None, None, None, None, array_ops.strided_slice( grad, begin, end, strides, begin_mask=op.get_attr("begin_mask"), end_mask=op.get_attr("end_mask"), ellipsis_mask=op.get_attr("ellipsis_mask"), new_axis_mask=op.get_attr("new_axis_mask"), shrink_axis_mask=op.get_attr("shrink_axis_mask")) @ops.RegisterGradient("Split") def _SplitGrad(op, *grads): return None, array_ops.concat(list(grads), op.inputs[0]) @ops.RegisterGradient("SplitV") def _SplitVGrad(op, *grads): returnval = array_ops.concat(list(grads), op.inputs[2]) returnval = [returnval] + [ None, ] * ( len(op.inputs) - 1) return returnval ops.NotDifferentiable("Const") @ops.RegisterGradient("Diag") def _DiagGrad(_, grad): return array_ops.diag_part(grad) @ops.RegisterGradient("DiagPart") def _DiagPartGrad(_, grad): return array_ops.diag(grad) @ops.RegisterGradient("MatrixDiag") def _MatrixDiagGrad(_, grad): return array_ops.matrix_diag_part(grad) @ops.RegisterGradient("MatrixDiagV2") def _MatrixDiagV2Grad(op, grad): return array_ops.matrix_diag_part( grad, k=op.inputs[1]), None, None, None, None @ops.RegisterGradient("MatrixDiagPart") def _MatrixDiagPartGrad(op, grad): matrix_shape = op.inputs[0].get_shape()[-2:] if matrix_shape.is_fully_defined() and matrix_shape[0] == matrix_shape[1]: return array_ops.matrix_diag(grad) else: return array_ops.matrix_set_diag(array_ops.zeros_like(op.inputs[0]), grad) @ops.RegisterGradient("MatrixDiagPartV2") def _MatrixDiagPartV2Grad(op, grad): """Gradient for MatrixDiagPartV2.""" matrix_shape = op.inputs[0].get_shape()[-2:] if matrix_shape.is_fully_defined(): return array_ops.matrix_diag( grad, k=op.inputs[1], num_rows=matrix_shape[0], num_cols=matrix_shape[1]), None, None else: return array_ops.matrix_set_diag( array_ops.zeros_like(op.inputs[0]), grad, k=op.inputs[1]), None, None @ops.RegisterGradient("MatrixSetDiag") def _MatrixSetDiagGrad(op, grad): """Gradient for MatrixSetDiag.""" input_shape = op.inputs[0].get_shape().merge_with(grad.get_shape()) diag_shape = op.inputs[1].get_shape() batch_shape = input_shape[:-2].merge_with(diag_shape[:-1]) matrix_shape = input_shape[-2:] if batch_shape.is_fully_defined() and matrix_shape.is_fully_defined(): diag_shape = batch_shape.as_list() + [min(matrix_shape.as_list())] else: with ops.colocate_with(grad): grad_shape = array_ops.shape(grad) grad_rank = array_ops.rank(grad) batch_shape = array_ops.slice(grad_shape, [0], [grad_rank - 2]) matrix_shape = array_ops.slice(grad_shape, [grad_rank - 2], [2]) min_dim = math_ops.reduce_min(matrix_shape) diag_shape = array_ops.concat([batch_shape, [min_dim]], 0) grad_input = array_ops.matrix_set_diag(grad, array_ops.zeros( diag_shape, dtype=grad.dtype)) grad_diag = array_ops.matrix_diag_part(grad) return (grad_input, grad_diag) @ops.RegisterGradient("MatrixSetDiagV2") def _MatrixSetDiagGradV2(op, grad): """Gradient for MatrixSetDiag.""" diag_shape = op.inputs[1].get_shape() if not diag_shape.is_fully_defined(): # Need to know the values of `d_lower` and `d_upper` to infer diag_shape. grad_shape = array_ops.shape(grad) batch_shape = grad_shape[:-2] matrix_shape = grad_shape[-2:] diag_index = array_ops.reshape(op.inputs[2], [-1]) # Converts to vector. d_lower = diag_index[0] d_upper = diag_index[-1] # Works both when len(diag_index) is 1 and 2. y_offset = control_flow_ops.cond( math_ops.less(d_upper, 0), lambda: d_upper, lambda: 0) x_offset = control_flow_ops.cond( math_ops.greater(d_lower, 0), lambda: -d_lower, lambda: 0) max_diag_len = math_ops.minimum(matrix_shape[0] + y_offset, matrix_shape[1] + x_offset) # pylint: disable=g-long-lambda # pyformat: disable postfix = control_flow_ops.cond( math_ops.equal(d_lower, d_upper), lambda: ops.convert_to_tensor([max_diag_len]), lambda: ops.convert_to_tensor([d_upper - d_lower + 1, max_diag_len])) # pyformat: enable # pylint: enable=g-long-lambda diag_shape = array_ops.concat([batch_shape, postfix], 0) grad_input = array_ops.matrix_set_diag( grad, array_ops.zeros(diag_shape, dtype=grad.dtype), k=op.inputs[2]) grad_diag = array_ops.matrix_diag_part(grad, k=op.inputs[2]) return (grad_input, grad_diag, None) @ops.RegisterGradient("MatrixBandPart") def _MatrixBandPartGrad(op, grad): num_lower = op.inputs[1] num_upper = op.inputs[2] return (array_ops.matrix_band_part(grad, num_lower, num_upper), None, None) # Edit Distance has no gradient (but can be used to eval seq2seq or CTC). ops.NotDifferentiable("EditDistance") @ops.RegisterGradient("Fill") def _FillGrad(_, grad): return None, math_ops.reduce_sum(grad) ops.NotDifferentiable("ZerosLike") ops.NotDifferentiable("OnesLike") @ops.RegisterGradient("PreventGradient") def _PreventGradientGrad(op, _): raise LookupError( "Gradient explicitly disabled. Reason: %s" % op.get_attr("message")) @ops.RegisterGradient("Gather") def _GatherGrad(op, grad): """Gradient for Gather op.""" # params can be large, so colocate the shape calculation with it. # # params can be very large for sparse model, array_ops.shape raises # exception on the Windows platform when any dimension is larger than # int32. params_shape is not used in optimizer apply_sparse gradients, # so it's fine to convert it back to int32 regardless of truncation. params = op.inputs[0] with ops.colocate_with(params): params_shape = array_ops.shape(params, out_type=ops.dtypes.int64) params_shape = math_ops.cast(params_shape, dtypes.int32) # Build appropriately shaped IndexedSlices indices = op.inputs[1] size = array_ops.expand_dims(array_ops.size(indices), 0) values_shape = array_ops.concat([size, params_shape[1:]], 0) with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message="Converting sparse IndexedSlices to a dense Tensor.*") values = array_ops.reshape(grad, values_shape) indices = array_ops.reshape(indices, size) return [ops.IndexedSlices(values, indices, params_shape), None] @ops.RegisterGradient("GatherV2") def _GatherV2Grad(op, grad): """Gradient for GatherV2 op.""" # params can be large, so colocate the shape calculation with it. # # params can be very large for sparse model, array_ops.shape raises # exception on the Windows platform when any dimension is larger than # int32. params_shape is not used in optimizer apply_sparse gradients, # so it's fine to convert it back to int32 regardless of truncation. params = op.inputs[0] with ops.colocate_with(params): params_shape = array_ops.shape(params, out_type=ops.dtypes.int64) params_shape = math_ops.cast(params_shape, dtypes.int32) indices = op.inputs[1] indices_size = array_ops.expand_dims(array_ops.size(indices), 0) axis = op.inputs[2] axis_static = tensor_util.constant_value(axis) # For axis 0 gathers, build an appropriately shaped IndexedSlices. if axis_static == 0: if context.executing_eagerly(): params_tail_shape = params_shape.cpu()[1:] else: params_tail_shape = params_shape[1:] values_shape = array_ops.concat([indices_size, params_tail_shape], 0) with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message="Converting sparse IndexedSlices to a dense Tensor.*") values = array_ops.reshape(grad, values_shape) indices = array_ops.reshape(indices, indices_size) return [ops.IndexedSlices(values, indices, params_shape), None, None] outer_shape = params_shape[:axis] outer_dims = array_ops.size(outer_shape) inner_shape = params_shape[axis:][1:] inner_dims = array_ops.size(inner_shape) outer_axes_indices = math_ops.range(outer_dims) inner_axes_indices = math_ops.range(outer_dims + 1, outer_dims + 1 + inner_dims) values_shape = array_ops.concat([outer_shape, indices_size, inner_shape], 0) with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message="Converting sparse IndexedSlices to a dense Tensor.*") values = array_ops.reshape(grad, values_shape) indices = array_ops.reshape(indices, indices_size) # We need to sum up every slice `values[..., i, ....]` corresponding to # `params[..., indices[i], ...]`. Since `unsorted_segment_sum` does not # support an axis parameter, we transpose the gather dimension to the front, # then use `unsorted_segment_sum` to build a # [gather_axis, outer_axes, inner_axes] tensor with all the gradients # affecting each index in `gather_axis` summed up. transpose_dims = array_ops.concat( [[outer_dims], outer_axes_indices, inner_axes_indices], 0) values_transpose = array_ops.transpose(values, transpose_dims) num_segments = params_shape[axis] params_grad = math_ops.unsorted_segment_sum(values_transpose, indices, num_segments) # Inverts the above transpose by moving dimension 0 back to its original # position. invert_transpose_dims = array_ops.concat( [outer_axes_indices + 1, [0], inner_axes_indices], 0) params_grad = array_ops.transpose(params_grad, invert_transpose_dims) return [params_grad, None, None] @ops.RegisterGradient("GatherNd") def _GatherNdGrad(op, grad): ref = op.inputs[0] indices = op.inputs[1] ref_shape = array_ops.shape(ref, out_type=indices.dtype) if indices.shape.ndims == 2 and indices.shape.dims[-1].value == 1: ref_grad = ops.IndexedSlices(grad, array_ops.squeeze(indices, axis=-1), ref_shape) else: ref_grad = array_ops.scatter_nd(indices, grad, ref_shape) return [ref_grad, None] @ops.RegisterGradient("ResourceGatherNd") def _ResourceGatherNdGrad(op, grad): # pylint: disable=missing-docstring ref = op.inputs[0] indices = op.inputs[1] ref_shape = gen_resource_variable_ops.variable_shape(ref, indices.dtype) if indices.shape.ndims == 2 and indices.shape.dims[-1].value == 1: ref_grad = ops.IndexedSlices(grad, array_ops.squeeze(indices, axis=-1), ref_shape) else: ref_grad = array_ops.scatter_nd(indices, grad, ref_shape) return [ref_grad, None] @ops.RegisterGradient("CheckNumerics") def _CheckNumericsGrad(op, grad): """Gradient for check_numerics op.""" return array_ops.check_numerics( grad, "Not a number (NaN) or infinity (Inf) values detected in gradient. %s" % op.get_attr("message")) @ops.RegisterGradient("PlaceholderWithDefault") @ops.RegisterGradient("Identity") def _IdGrad(_, grad): return grad @ops.RegisterGradient("RefIdentity") def _RefIdGrad(_, grad): return grad @ops.RegisterGradient("IdentityN") def _IdNGrad(_, *grad): return grad ops.NotDifferentiable("StopGradient") @ops.RegisterGradient("Reshape") def _ReshapeGrad(op, grad): with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message="Converting sparse IndexedSlices to a dense Tensor.*") return [array_ops.reshape(grad, array_ops.shape(op.inputs[0])), None] ops.NotDifferentiable("InvertPermutation") def _ReshapeToInput(op, grad): """Reshapes the gradient to the shape of the original input.""" with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message="Converting sparse IndexedSlices to a dense Tensor.*") return array_ops.reshape(grad, array_ops.shape(op.inputs[0])) @ops.RegisterGradient("ExpandDims") def _ExpandDimsGrad(op, grad): return [_ReshapeToInput(op, grad), None] @ops.RegisterGradient("Squeeze") def _SqueezeGrad(op, grad): return _ReshapeToInput(op, grad) @ops.RegisterGradient("Transpose") def _TransposeGrad(op, grad): """Returns unshuffle(grad).""" p = op.inputs[1] return [array_ops.transpose(grad, array_ops.invert_permutation(p)), None] @ops.RegisterGradient("ConjugateTranspose") def _ConjugateTransposeGrad(op, grad): """Returns conj(unshuffle(grad)).""" p = op.inputs[1] return [ array_ops.transpose( grad, array_ops.invert_permutation(p), conjugate=True), None ] ops.NotDifferentiable("Shape") ops.NotDifferentiable("ShapeN") ops.NotDifferentiable("Rank") ops.NotDifferentiable("Size") @ops.RegisterGradient("Tile") def _TileGrad(op, grad): """Sum reduces grad along the tiled dimensions.""" input_shape = array_ops.shape(op.inputs[0], out_type=op.inputs[1].dtype) # We interleave multiples and input_shape to get split_shape, # reshape grad to split_shape, and reduce along all even # dimensions (the tiled dimensions) to get the result # with shape input_shape. For example # input_shape = [20, 30, 40] # multiples = [2, 3, 4] # split_shape = [2, 20, 3, 30, 4, 40] # axes = [0, 2, 4] split_shape = array_ops.reshape( array_ops.transpose(array_ops.stack([op.inputs[1], input_shape])), [-1]) axes = math_ops.range(0, array_ops.size(split_shape), 2) # Sum reduces grad along the first dimension for IndexedSlices if isinstance(grad, ops.IndexedSlices): input_shape_0 = math_ops.cast(input_shape[0], grad.indices.dtype) grad = math_ops.unsorted_segment_sum( grad.values, math_ops.mod(grad.indices, input_shape_0), input_shape_0) split_shape = array_ops.concat([[1], split_shape[1:]], axis=0) input_grad = math_ops.reduce_sum(array_ops.reshape(grad, split_shape), axes) # Fix shape inference if not context.executing_eagerly(): input_grad.set_shape(op.inputs[0].get_shape()) return [input_grad, None] ops.NotDifferentiable("BroadcastGradientArgs") def _PadGrad(op, grad): """Gradient for Pad.""" # Pad introduces values around the original tensor, so the gradient function # slices the original shape out of the gradient.""" x = op.inputs[0] a = op.inputs[1] # [Rank(x), 2] # Takes a slice of a. The 1st column. [Rank(x), 1]. pad_before = array_ops.slice(a, [0, 0], array_ops.stack([array_ops.rank(x), 1])) # Make it a 1-D tensor. begin = array_ops.reshape(pad_before, [-1]) sizes = array_ops.shape(x) x_grad = array_ops.slice(grad, begin, sizes) if len(op.inputs) == 3: return x_grad, None, None else: return x_grad, None ops.RegisterGradient("Pad")(_PadGrad) ops.RegisterGradient("PadV2")(_PadGrad) # ReverseSequence is just a permutation. The gradient permutes back. @ops.RegisterGradient("ReverseSequence") def _ReverseSequenceGrad(op, grad): seq_lengths = op.inputs[1] return [ array_ops.reverse_sequence( grad, batch_axis=op.get_attr("batch_dim"), seq_axis=op.get_attr("seq_dim"), seq_lengths=seq_lengths), None ] @ops.RegisterGradient("Reverse") def _ReverseGrad(op, grad): reverse_dims = op.inputs[1] return gen_array_ops.reverse(grad, reverse_dims), None @ops.RegisterGradient("ReverseV2") def _ReverseV2Grad(op, grad): axis = op.inputs[1] return array_ops.reverse_v2(grad, axis), None @ops.RegisterGradient("SpaceToBatch") def _SpaceToBatchGrad(op, grad): # Its gradient is the opposite op: BatchToSpace. block_size = op.get_attr("block_size") return [ array_ops.batch_to_space(grad, op.inputs[1], block_size=block_size), None ] @ops.RegisterGradient("SpaceToBatchND") def _SpaceToBatchNDGrad(op, grad): # Its gradient is the opposite op: BatchToSpaceND. return [ array_ops.batch_to_space_nd(grad, op.inputs[1], op.inputs[2]), None, None ] @ops.RegisterGradient("BatchToSpace") def _BatchToSpaceGrad(op, grad): # Its gradient is the opposite op: SpaceToBatch. block_size = op.get_attr("block_size") return [ array_ops.space_to_batch(grad, op.inputs[1], block_size=block_size), None ] @ops.RegisterGradient("BatchToSpaceND") def _BatchToSpaceNDGrad(op, grad): # Its gradient is the opposite op: SpaceToBatchND. return [ array_ops.space_to_batch_nd(grad, op.inputs[1], op.inputs[2]), None, None ] @ops.RegisterGradient("SpaceToDepth") def _SpaceToDepthGrad(op, grad): # Its gradient is the opposite op: DepthToSpace. block_size = op.get_attr("block_size") data_format = op.get_attr("data_format") if data_format == "NCHW_VECT_C": raise ValueError("Cannot compute SpaceToDepth gradient with NCHW_VECT_C. " "NCHW_VECT_C requires qint8 data type.") return array_ops.depth_to_space(grad, block_size, data_format=data_format) @ops.RegisterGradient("DepthToSpace") def _DepthToSpaceGrad(op, grad): # Its gradient is the opposite op: SpaceToDepth. block_size = op.get_attr("block_size") data_format = op.get_attr("data_format") if data_format == "NCHW_VECT_C": raise ValueError("Cannot compute DepthToSpace gradient with NCHW_VECT_C. " "NCHW_VECT_C requires qint8 data type.") return array_ops.space_to_depth(grad, block_size, data_format=data_format) ops.NotDifferentiable("OneHot") @ops.RegisterGradient("MirrorPad") def _MirrorPadGrad(op, grad): mode = op.get_attr("mode") return [gen_array_ops.mirror_pad_grad(grad, op.inputs[1], mode=mode), None] @ops.RegisterGradient("MirrorPadGrad") def _MirrorPadGradGrad(op, grad): mode = op.get_attr("mode") return [gen_array_ops.mirror_pad(grad, op.inputs[1], mode=mode), None] @ops.RegisterGradient("QuantizeAndDequantize") def _QuantizeAndDequantizeGrad(_, grad): return grad @ops.RegisterGradient("QuantizeAndDequantizeV2") def _QuantizeAndDequantizeV2Grad(_, grad): return [grad, None, None] @ops.RegisterGradient("QuantizeAndDequantizeV3") def _QuantizeAndDequantizeV3Grad(_, grad): # Only propagate the gradient for the unquantized input. return [grad, None, None, None] @ops.RegisterGradient("ExtractImagePatches") def _ExtractImagePatchesGrad(op, grad): batch_size, rows_in, cols_in, channels = [ dim.value for dim in op.inputs[0].shape.dims ] input_bhwc = array_ops.shape(op.inputs[0]) batch_size = input_bhwc[0] channels = input_bhwc[3] # Create indices matrix for input tensor. # Note that 0 is preserved for padding location, # so indices for input start from 1 to 1 + rows_in * cols_in. input_indices_num = 1 + rows_in * cols_in input_idx = array_ops.reshape(math_ops.range(1, input_indices_num, dtype=ops.dtypes.int64), (1, rows_in, cols_in, 1)) input_idx_patched = gen_array_ops.extract_image_patches( input_idx, op.get_attr("ksizes"), op.get_attr("strides"), op.get_attr("rates"), op.get_attr("padding")) # Create indices matrix for output tensor. _, rows_out, cols_out, _ = [dim.value for dim in op.outputs[0].shape.dims] _, ksize_r, ksize_c, _ = op.get_attr("ksizes") # Indices for output start from 0. output_indices_num = rows_out * cols_out * ksize_r * ksize_c output_idx = array_ops.reshape(math_ops.range(output_indices_num, dtype=ops.dtypes.int64), (1, rows_out, cols_out, ksize_r * ksize_c)) # Construct mapping table for indices: (input -> output). idx_matrix = array_ops.concat( [array_ops.expand_dims(input_idx_patched, axis=-1), array_ops.expand_dims(output_idx, axis=-1)], axis=-1) idx_map = array_ops.reshape(idx_matrix, (-1, 2)) sp_shape = (input_indices_num, output_indices_num) sp_mat_full = sparse_tensor.SparseTensor( idx_map, array_ops.ones([output_indices_num], dtype=grad.dtype), sp_shape) # Remove all padding locations [0, :]. sp_mat = sparse_ops.sparse_slice(sp_mat_full, (1, 0), (input_indices_num - 1, output_indices_num)) with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message="Converting sparse IndexedSlices to a dense Tensor.*") grad_expanded = array_ops.transpose( array_ops.reshape( grad, (batch_size, rows_out, cols_out, ksize_r, ksize_c, channels)), (1, 2, 3, 4, 0, 5)) grad_flat = array_ops.reshape(grad_expanded, (-1, batch_size * channels)) jac = sparse_ops.sparse_tensor_dense_matmul(sp_mat, grad_flat) grad_out = array_ops.reshape(jac, (rows_in, cols_in, batch_size, channels)) grad_out = array_ops.transpose(grad_out, (2, 0, 1, 3)) return [grad_out] @ops.RegisterGradient("ExtractVolumePatches") def _ExtractVolumePatchesGrad(op, grad): batch_size, planes_in, rows_in, cols_in, channels = [ dim.value for dim in op.inputs[0].shape.dims ] input_bphwc = array_ops.shape(op.inputs[0]) batch_size = input_bphwc[0] channels = input_bphwc[4] # Create indices matrix for input tensor. # Note that 0 is preserved for padding location, # so indices for input start from 1 to 1 + rows_in * cols_in. input_indices_num = 1 + planes_in * rows_in * cols_in input_idx = array_ops.reshape( math_ops.range(1, input_indices_num, dtype=ops.dtypes.int64), (1, planes_in, rows_in, cols_in, 1)) input_idx_patched = gen_array_ops.extract_volume_patches( input_idx, op.get_attr("ksizes"), op.get_attr("strides"), op.get_attr("padding")) # Create indices matrix for output tensor. _, planes_out, rows_out, cols_out, _ = [ dim.value for dim in op.outputs[0].shape.dims ] _, ksize_p, ksize_r, ksize_c, _ = op.get_attr("ksizes") # Indices for output start from 0. prc_indices_num = planes_out * rows_out * cols_out output_indices_num = prc_indices_num * ksize_p * ksize_r * ksize_c output_idx = array_ops.reshape( math_ops.range(output_indices_num, dtype=ops.dtypes.int64), (1, planes_out, rows_out, cols_out, ksize_p * ksize_r * ksize_c)) # Construct mapping table for indices: (input -> output). idx_matrix = array_ops.concat([ array_ops.expand_dims(input_idx_patched, axis=-1), array_ops.expand_dims(output_idx, axis=-1) ], axis=-1) idx_map = array_ops.reshape(idx_matrix, (-1, 2)) sp_shape = (input_indices_num, output_indices_num) sp_mat_full = sparse_tensor.SparseTensor( idx_map, array_ops.ones([output_indices_num], dtype=grad.dtype), sp_shape) # Remove all padding locations [0, :]. sp_mat = sparse_ops.sparse_slice(sp_mat_full, (1, 0), (input_indices_num - 1, output_indices_num)) with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message="Converting sparse IndexedSlices to a dense Tensor.*") grad_expanded = array_ops.transpose( array_ops.reshape(grad, (batch_size, planes_out, rows_out, cols_out, ksize_p, ksize_r, ksize_c, channels)), (1, 2, 3, 4, 5, 6, 0, 7)) grad_flat = array_ops.reshape(grad_expanded, (-1, batch_size * channels)) jac = sparse_ops.sparse_tensor_dense_matmul(sp_mat, grad_flat) grad_out = array_ops.reshape( jac, (planes_in, rows_in, cols_in, batch_size, channels)) grad_out = array_ops.transpose(grad_out, (3, 0, 1, 2, 4)) return [grad_out] @ops.RegisterGradient("ScatterNd") def _ScatterNdGrad(op, grad): indices = op.inputs[0] updates_grad = array_ops.gather_nd(grad, indices) return [None, updates_grad, None] @ops.RegisterGradient("TensorScatterUpdate") def _TensorScatterUpdateGrad(op, grad): indices = op.inputs[1] updates_grad = array_ops.gather_nd(grad, indices) tensor_grad = array_ops.tensor_scatter_update( array_ops.identity(grad), indices, array_ops.zeros_like(op.inputs[2], dtype=grad.dtype)) return [tensor_grad, None, updates_grad] @ops.RegisterGradient("TensorScatterAdd") def _TensorScatterAddGrad(op, grad): indices = op.inputs[1] updates_grad = array_ops.gather_nd(grad, indices) tensor_grad = array_ops.identity(grad) return [tensor_grad, None, updates_grad] @ops.RegisterGradient("TensorScatterSub") def _TensorScatterSubGrad(op, grad): indices = op.inputs[1] updates_grad = array_ops.gather_nd(grad, indices) tensor_grad = array_ops.identity(grad) return [tensor_grad, None, -updates_grad] @ops.RegisterGradient("ScatterNdNonAliasingAdd") def _ScatterNdNonAliasingAddGrad(op, grad): indices = op.inputs[1] updates_grad = array_ops.gather_nd(grad, indices) return [grad, None, updates_grad] @ops.RegisterGradient("BroadcastTo") def _BroadcastToGrad(op, grad): input_value = op.inputs[0] broadcast_shape = op.inputs[1] input_value_shape = array_ops.shape(input_value) _, reduction_axes = gen_array_ops.broadcast_gradient_args(broadcast_shape, input_value_shape) updates_grad_reshaped = math_ops.reduce_sum(grad, axis=reduction_axes, keepdims=True) updates_grad = array_ops.reshape(updates_grad_reshaped, input_value_shape) return [updates_grad, None]
tensorflow-master
tensorflow/python/ops/array_grad.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """CTC (Connectionist Temporal Classification) Operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import function from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import custom_gradient from tensorflow.python.ops import functional_ops from tensorflow.python.ops import gen_ctc_ops from tensorflow.python.ops import inplace_ops from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import map_fn from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.ops.nn_grad import _BroadcastMul from tensorflow.python.util import deprecation from tensorflow.python.util import nest from tensorflow.python.util.tf_export import tf_export # pylint: disable=protected-access, invalid-name @tf_export(v1=["nn.ctc_loss"]) def ctc_loss(labels, inputs=None, sequence_length=None, preprocess_collapse_repeated=False, ctc_merge_repeated=True, ignore_longer_outputs_than_inputs=False, time_major=True, logits=None): """Computes the CTC (Connectionist Temporal Classification) Loss. This op implements the CTC loss as presented in the article: [A. Graves, S. Fernandez, F. Gomez, J. Schmidhuber. Connectionist Temporal Classification: Labeling Unsegmented Sequence Data with Recurrent Neural Networks. ICML 2006, Pittsburgh, USA, pp. 369-376.](http://www.cs.toronto.edu/~graves/icml_2006.pdf) Input requirements: ``` sequence_length(b) <= time for all b max(labels.indices(labels.indices[:, 1] == b, 2)) <= sequence_length(b) for all b. ``` Notes: This class performs the softmax operation for you, so inputs should be e.g. linear projections of outputs by an LSTM. The `inputs` Tensor's innermost dimension size, `num_classes`, represents `num_labels + 1` classes, where num_labels is the number of true labels, and the largest value `(num_classes - 1)` is reserved for the blank label. For example, for a vocabulary containing 3 labels `[a, b, c]`, `num_classes = 4` and the labels indexing is `{a: 0, b: 1, c: 2, blank: 3}`. Regarding the arguments `preprocess_collapse_repeated` and `ctc_merge_repeated`: If `preprocess_collapse_repeated` is True, then a preprocessing step runs before loss calculation, wherein repeated labels passed to the loss are merged into single labels. This is useful if the training labels come from, e.g., forced alignments and therefore have unnecessary repetitions. If `ctc_merge_repeated` is set False, then deep within the CTC calculation, repeated non-blank labels will not be merged and are interpreted as individual labels. This is a simplified (non-standard) version of CTC. Here is a table of the (roughly) expected first order behavior: * `preprocess_collapse_repeated=False`, `ctc_merge_repeated=True` Classical CTC behavior: Outputs true repeated classes with blanks in between, and can also output repeated classes with no blanks in between that need to be collapsed by the decoder. * `preprocess_collapse_repeated=True`, `ctc_merge_repeated=False` Never learns to output repeated classes, as they are collapsed in the input labels before training. * `preprocess_collapse_repeated=False`, `ctc_merge_repeated=False` Outputs repeated classes with blanks in between, but generally does not require the decoder to collapse/merge repeated classes. * `preprocess_collapse_repeated=True`, `ctc_merge_repeated=True` Untested. Very likely will not learn to output repeated classes. The `ignore_longer_outputs_than_inputs` option allows to specify the behavior of the CTCLoss when dealing with sequences that have longer outputs than inputs. If true, the CTCLoss will simply return zero gradient for those items, otherwise an InvalidArgument error is returned, stopping training. Args: labels: An `int32` `SparseTensor`. `labels.indices[i, :] == [b, t]` means `labels.values[i]` stores the id for (batch b, time t). `labels.values[i]` must take on values in `[0, num_labels)`. See `core/ops/ctc_ops.cc` for more details. inputs: 3-D `float` `Tensor`. If time_major == False, this will be a `Tensor` shaped: `[batch_size, max_time, num_classes]`. If time_major == True (default), this will be a `Tensor` shaped: `[max_time, batch_size, num_classes]`. The logits. sequence_length: 1-D `int32` vector, size `[batch_size]`. The sequence lengths. preprocess_collapse_repeated: Boolean. Default: False. If True, repeated labels are collapsed prior to the CTC calculation. ctc_merge_repeated: Boolean. Default: True. ignore_longer_outputs_than_inputs: Boolean. Default: False. If True, sequences with longer outputs than inputs will be ignored. time_major: The shape format of the `inputs` Tensors. If True, these `Tensors` must be shaped `[max_time, batch_size, num_classes]`. If False, these `Tensors` must be shaped `[batch_size, max_time, num_classes]`. Using `time_major = True` (default) is a bit more efficient because it avoids transposes at the beginning of the ctc_loss calculation. However, most TensorFlow data is batch-major, so by this function also accepts inputs in batch-major form. logits: Alias for inputs. Returns: A 1-D `float` `Tensor`, size `[batch]`, containing the negative log probabilities. Raises: TypeError: if labels is not a `SparseTensor`. """ # The second, third, etc output tensors contain the gradients. We use it in # _CTCLossGrad() below. if not isinstance(labels, sparse_tensor.SparseTensor): raise TypeError("Expected labels (first argument) to be a SparseTensor") # For internal calculations, we transpose to [time, batch, num_classes] inputs = deprecation.deprecated_argument_lookup("logits", logits, "inputs", inputs) if not time_major: inputs = array_ops.transpose(inputs, [1, 0, 2]) # (B,T,N) => (T,B,N) loss, _ = gen_ctc_ops.ctc_loss( inputs, labels.indices, labels.values, sequence_length, preprocess_collapse_repeated=preprocess_collapse_repeated, ctc_merge_repeated=ctc_merge_repeated, ignore_longer_outputs_than_inputs=ignore_longer_outputs_than_inputs) return loss # pylint: disable=unused-argument @ops.RegisterGradient("CTCLoss") def _CTCLossGrad(op, grad_loss, _): """The derivative provided by CTC Loss. Args: op: the CTCLoss op. grad_loss: The backprop for cost. Returns: The CTC Loss gradient. """ # Outputs are: loss, grad # # Currently there is no way to take the second derivative of this op # due to the fused implementation's interaction with tf.gradients(), # so we make sure we prevent silently incorrect results by raising # an error if the second derivative is requested via prevent_gradient. grad_without_gradient = array_ops.prevent_gradient( op.outputs[1], message="Currently there is no way to take the second " " derivative of ctc_loss due to the fused implementation's interaction " " with tf.gradients()") # Return gradient for inputs and None for # labels_indices, labels_values and sequence_length return [_BroadcastMul(grad_loss, grad_without_gradient), None, None, None] @tf_export("nn.ctc_greedy_decoder") def ctc_greedy_decoder(inputs, sequence_length, merge_repeated=True): """Performs greedy decoding on the logits given in input (best path). Note: Regardless of the value of merge_repeated, if the maximum index of a given time and batch corresponds to the blank index `(num_classes - 1)`, no new element is emitted. If `merge_repeated` is `True`, merge repeated classes in output. This means that if consecutive logits' maximum indices are the same, only the first of these is emitted. The sequence `A B B * B * B` (where '*' is the blank label) becomes * `A B B B` if `merge_repeated=True`. * `A B B B B` if `merge_repeated=False`. Args: inputs: 3-D `float` `Tensor` sized `[max_time, batch_size, num_classes]`. The logits. sequence_length: 1-D `int32` vector containing sequence lengths, having size `[batch_size]`. merge_repeated: Boolean. Default: True. Returns: A tuple `(decoded, neg_sum_logits)` where decoded: A single-element list. `decoded[0]` is an `SparseTensor` containing the decoded outputs s.t.: `decoded.indices`: Indices matrix `(total_decoded_outputs, 2)`. The rows store: `[batch, time]`. `decoded.values`: Values vector, size `(total_decoded_outputs)`. The vector stores the decoded classes. `decoded.dense_shape`: Shape vector, size `(2)`. The shape values are: `[batch_size, max_decoded_length]` neg_sum_logits: A `float` matrix `(batch_size x 1)` containing, for the sequence found, the negative of the sum of the greatest logit at each timeframe. """ outputs = gen_ctc_ops.ctc_greedy_decoder( inputs, sequence_length, merge_repeated=merge_repeated) (decoded_ix, decoded_val, decoded_shape, log_probabilities) = outputs return ([sparse_tensor.SparseTensor(decoded_ix, decoded_val, decoded_shape)], log_probabilities) @tf_export(v1=["nn.ctc_beam_search_decoder"]) def ctc_beam_search_decoder(inputs, sequence_length, beam_width=100, top_paths=1, merge_repeated=True): """Performs beam search decoding on the logits given in input. **Note** The `ctc_greedy_decoder` is a special case of the `ctc_beam_search_decoder` with `top_paths=1` and `beam_width=1` (but that decoder is faster for this special case). If `merge_repeated` is `True`, merge repeated classes in the output beams. This means that if consecutive entries in a beam are the same, only the first of these is emitted. That is, when the sequence is `A B B * B * B` (where '*' is the blank label), the return value is: * `A B` if `merge_repeated = True`. * `A B B B` if `merge_repeated = False`. Args: inputs: 3-D `float` `Tensor`, size `[max_time x batch_size x num_classes]`. The logits. sequence_length: 1-D `int32` vector containing sequence lengths, having size `[batch_size]`. beam_width: An int scalar >= 0 (beam search beam width). top_paths: An int scalar >= 0, <= beam_width (controls output size). merge_repeated: Boolean. Default: True. Returns: A tuple `(decoded, log_probabilities)` where decoded: A list of length top_paths, where `decoded[j]` is a `SparseTensor` containing the decoded outputs: `decoded[j].indices`: Indices matrix `(total_decoded_outputs[j] x 2)` The rows store: [batch, time]. `decoded[j].values`: Values vector, size `(total_decoded_outputs[j])`. The vector stores the decoded classes for beam j. `decoded[j].dense_shape`: Shape vector, size `(2)`. The shape values are: `[batch_size, max_decoded_length[j]]`. log_probability: A `float` matrix `(batch_size x top_paths)` containing sequence log-probabilities. """ decoded_ixs, decoded_vals, decoded_shapes, log_probabilities = ( gen_ctc_ops.ctc_beam_search_decoder( inputs, sequence_length, beam_width=beam_width, top_paths=top_paths, merge_repeated=merge_repeated)) return ([ sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape) in zip(decoded_ixs, decoded_vals, decoded_shapes) ], log_probabilities) @tf_export("nn.ctc_beam_search_decoder", v1=["nn.ctc_beam_search_decoder_v2"]) def ctc_beam_search_decoder_v2(inputs, sequence_length, beam_width=100, top_paths=1): """Performs beam search decoding on the logits given in input. **Note** The `ctc_greedy_decoder` is a special case of the `ctc_beam_search_decoder` with `top_paths=1` and `beam_width=1` (but that decoder is faster for this special case). Args: inputs: 3-D `float` `Tensor`, size `[max_time, batch_size, num_classes]`. The logits. sequence_length: 1-D `int32` vector containing sequence lengths, having size `[batch_size]`. beam_width: An int scalar >= 0 (beam search beam width). top_paths: An int scalar >= 0, <= beam_width (controls output size). Returns: A tuple `(decoded, log_probabilities)` where decoded: A list of length top_paths, where `decoded[j]` is a `SparseTensor` containing the decoded outputs: `decoded[j].indices`: Indices matrix `[total_decoded_outputs[j], 2]`; The rows store: `[batch, time]`. `decoded[j].values`: Values vector, size `[total_decoded_outputs[j]]`. The vector stores the decoded classes for beam `j`. `decoded[j].dense_shape`: Shape vector, size `(2)`. The shape values are: `[batch_size, max_decoded_length[j]]`. log_probability: A `float` matrix `[batch_size, top_paths]` containing sequence log-probabilities. """ # Note, merge_repeated is an invalid optimization that is removed from the # public API: it returns low probability paths. return ctc_beam_search_decoder( inputs, sequence_length=sequence_length, beam_width=beam_width, top_paths=top_paths, merge_repeated=False) ops.NotDifferentiable("CTCGreedyDecoder") ops.NotDifferentiable("CTCBeamSearchDecoder") def _ctc_state_trans(label_seq): """Compute CTC alignment model transition matrix. Args: label_seq: tensor of shape [batch_size, max_seq_length] Returns: tensor of shape [batch_size, states, states] with a state transition matrix computed for each sequence of the batch. """ with ops.name_scope("ctc_state_trans"): label_seq = ops.convert_to_tensor(label_seq, name="label_seq") batch_size = _get_dim(label_seq, 0) num_labels = _get_dim(label_seq, 1) num_label_states = num_labels + 1 num_states = 2 * num_label_states label_states = math_ops.range(num_label_states) blank_states = label_states + num_label_states # Start state to first label. start_to_label = [[1, 0]] # Blank to label transitions. blank_to_label = array_ops.stack([label_states[1:], blank_states[:-1]], 1) # Label to blank transitions. label_to_blank = array_ops.stack([blank_states, label_states], 1) # Scatter transitions that don't depend on sequence. indices = array_ops.concat([start_to_label, blank_to_label, label_to_blank], 0) values = array_ops.ones([_get_dim(indices, 0)]) trans = array_ops.scatter_nd( indices, values, shape=[num_states, num_states]) trans += linalg_ops.eye(num_states) # Self-loops. # Label to label transitions. Disallow transitions between repeated labels # with no blank state in between. batch_idx = array_ops.zeros_like(label_states[2:]) indices = array_ops.stack([batch_idx, label_states[2:], label_states[1:-1]], 1) indices = array_ops.tile( array_ops.expand_dims(indices, 0), [batch_size, 1, 1]) batch_idx = array_ops.expand_dims(math_ops.range(batch_size), 1) * [1, 0, 0] indices += array_ops.expand_dims(batch_idx, 1) repeats = math_ops.equal(label_seq[:, :-1], label_seq[:, 1:]) values = 1.0 - math_ops.cast(repeats, dtypes.float32) batched_shape = [batch_size, num_states, num_states] label_to_label = array_ops.scatter_nd(indices, values, batched_shape) return array_ops.expand_dims(trans, 0) + label_to_label def ctc_state_log_probs(seq_lengths, max_seq_length): """Computes CTC alignment initial and final state log probabilities. Create the initial/final state values directly as log values to avoid having to take a float64 log on tpu (which does not exist). Args: seq_lengths: int tensor of shape [batch_size], seq lengths in the batch. max_seq_length: int, max sequence length possible. Returns: initial_state_log_probs, final_state_log_probs """ batch_size = _get_dim(seq_lengths, 0) num_label_states = max_seq_length + 1 num_duration_states = 2 num_states = num_duration_states * num_label_states log_0 = math_ops.cast( math_ops.log(math_ops.cast(0, dtypes.float64) + 1e-307), dtypes.float32) initial_state_log_probs = array_ops.one_hot( indices=array_ops.zeros([batch_size], dtype=dtypes.int32), depth=num_states, on_value=0.0, off_value=log_0, axis=1) label_final_state_mask = array_ops.one_hot( seq_lengths, depth=num_label_states, axis=0) duration_final_state_mask = array_ops.ones( [num_duration_states, 1, batch_size]) final_state_mask = duration_final_state_mask * label_final_state_mask final_state_log_probs = (1.0 - final_state_mask) * log_0 final_state_log_probs = array_ops.reshape(final_state_log_probs, [num_states, batch_size]) return initial_state_log_probs, array_ops.transpose(final_state_log_probs) def _ilabel_to_state(labels, num_labels, ilabel_log_probs): """Project ilabel log probs to state log probs.""" num_label_states = _get_dim(labels, 1) blank = ilabel_log_probs[:, :, :1] blank = array_ops.tile(blank, [1, 1, num_label_states + 1]) one_hot = array_ops.one_hot(labels, depth=num_labels) one_hot = array_ops.expand_dims(one_hot, axis=0) ilabel_log_probs = array_ops.expand_dims(ilabel_log_probs, axis=2) state_log_probs = math_ops.reduce_sum(ilabel_log_probs * one_hot, axis=3) state_log_probs = array_ops.concat([state_log_probs, blank], axis=2) return array_ops.pad( state_log_probs, [[0, 0], [0, 0], [1, 0]], constant_values=math_ops.log(0.0)) def _state_to_olabel(labels, num_labels, states): """Sum state log probs to ilabel log probs.""" num_label_states = _get_dim(labels, 1) + 1 label_states = states[:, :, 1:num_label_states] blank_states = states[:, :, num_label_states:] one_hot = array_ops.one_hot( labels - 1, depth=(num_labels - 1), on_value=0.0, off_value=math_ops.log(0.0)) one_hot = array_ops.expand_dims(one_hot, axis=0) label_states = array_ops.expand_dims(label_states, axis=3) label_olabels = math_ops.reduce_logsumexp(label_states + one_hot, axis=2) blank_olabels = math_ops.reduce_logsumexp(blank_states, axis=2, keepdims=True) return array_ops.concat([blank_olabels, label_olabels], axis=-1) # pylint: disable=redefined-outer-name def _state_to_olabel_unique(labels, num_labels, states, unique): """Sum state log probs to ilabel log probs using unique label indices.""" num_label_states = _get_dim(labels, 1) + 1 label_states = states[:, :, 1:num_label_states] blank_states = states[:, :, num_label_states:] unique_y, unique_idx = unique mul_reduce = _sum_states(unique_idx, label_states) num_frames = states.shape[0] batch_size = states.shape[1] num_states = num_label_states - 1 batch_state_major = array_ops.transpose(mul_reduce, perm=[1, 2, 0]) batch_state_major = array_ops.reshape(batch_state_major, [batch_size * num_states, num_frames]) batch_offset = math_ops.range(batch_size, dtype=unique_y.dtype) * num_labels indices = unique_y + array_ops.expand_dims(batch_offset, axis=-1) indices = array_ops.reshape(indices, [-1, 1]) scatter = array_ops.scatter_nd( indices=indices, updates=batch_state_major, shape=[batch_size * num_labels, num_frames]) scatter = array_ops.reshape(scatter, [batch_size, num_labels, num_frames]) scatter = array_ops.where( math_ops.equal(scatter, 0.0), array_ops.fill(array_ops.shape(scatter), math_ops.log(0.0)), scatter) label_olabels = array_ops.transpose(scatter, [2, 0, 1]) label_olabels = label_olabels[:, :, 1:] blank_olabels = math_ops.reduce_logsumexp(blank_states, axis=2, keepdims=True) return array_ops.concat([blank_olabels, label_olabels], axis=-1) def ctc_loss_and_grad(logits, labels, label_length, logit_length, unique=None): """Computes the CTC loss and gradients. Most users will want fwd_bwd.ctc_loss This function returns the computed gradient, it does not have a gradient of its own defined. Args: logits: tensor of shape [frames, batch_size, num_labels] labels: tensor of shape [batch_size, max_label_seq_length] label_length: tensor of shape [batch_size] Length of reference label sequence in labels. logit_length: tensor of shape [batch_size] Length of input sequence in logits. unique: (optional) unique label indices as computed by unique(labels) If supplied, enables an implementation that is faster and more memory efficient on TPU. Returns: loss: tensor of shape [batch_size] gradient: tensor of shape [frames, batch_size, num_labels] """ num_labels = _get_dim(logits, 2) max_label_seq_length = _get_dim(labels, 1) ilabel_log_probs = nn_ops.log_softmax(logits) state_log_probs = _ilabel_to_state(labels, num_labels, ilabel_log_probs) state_trans_probs = _ctc_state_trans(labels) initial_state_log_probs, final_state_log_probs = ctc_state_log_probs( label_length, max_label_seq_length) fwd_bwd_log_probs, log_likelihood = _forward_backward_log( state_trans_log_probs=math_ops.log(state_trans_probs), initial_state_log_probs=initial_state_log_probs, final_state_log_probs=final_state_log_probs, observed_log_probs=state_log_probs, sequence_length=logit_length) if unique: olabel_log_probs = _state_to_olabel_unique(labels, num_labels, fwd_bwd_log_probs, unique) else: olabel_log_probs = _state_to_olabel(labels, num_labels, fwd_bwd_log_probs) grad = math_ops.exp(ilabel_log_probs) - math_ops.exp(olabel_log_probs) loss = -log_likelihood return loss, grad def _ctc_loss_grad(op, grad_loss, _): grad = op.outputs[1] grad = [array_ops.reshape(grad_loss, [1, -1, 1]) * grad] grad += [None] * (len(op.inputs) - len(grad)) return grad def _ctc_loss_shape(op): return [op.inputs[2].get_shape(), op.inputs[0].get_shape()] @tf_export("nn.ctc_loss", v1=["nn.ctc_loss_v2"]) def ctc_loss_v2(labels, logits, label_length, logit_length, logits_time_major=True, unique=None, blank_index=None, name=None): """Computes CTC (Connectionist Temporal Classification) loss. This op implements the CTC loss as presented in the article: [A. Graves, S. Fernandez, F. Gomez, J. Schmidhuber. Connectionist Temporal Classification: Labeling Unsegmented Sequence Data with Recurrent Neural Networks. ICML 2006, Pittsburgh, USA, pp. 369-376.](http://www.cs.toronto.edu/~graves/icml_2006.pdf) Notes: - Same as the "Classic CTC" in TensorFlow 1.x's tf.compat.v1.nn.ctc_loss setting of preprocess_collapse_repeated=False, ctc_merge_repeated=True - Labels may be supplied as either a dense, zero-padded tensor with a vector of label sequence lengths OR as a SparseTensor. - On TPU and GPU: - Only dense padded labels are supported. - On CPU: - Caller may use SparseTensor or dense padded labels but calling with a SparseTensor will be significantly faster. - Default blank label is 0 rather num_classes - 1, unless overridden by blank_index. Args: labels: tensor of shape [batch_size, max_label_seq_length] or SparseTensor logits: tensor of shape [frames, batch_size, num_labels], if logits_time_major == False, shape is [batch_size, frames, num_labels]. label_length: tensor of shape [batch_size], None if labels is SparseTensor Length of reference label sequence in labels. logit_length: tensor of shape [batch_size] Length of input sequence in logits. logits_time_major: (optional) If True (default), logits is shaped [time, batch, logits]. If False, shape is [batch, time, logits] unique: (optional) Unique label indices as computed by ctc_unique_labels(labels). If supplied, enable a faster, memory efficient implementation on TPU. blank_index: (optional) Set the class index to use for the blank label. Negative values will start from num_classes, ie, -1 will reproduce the ctc_loss behavior of using num_classes - 1 for the blank symbol. There is some memory/performance overhead to switching from the default of 0 as an additional shifted copy of the logits may be created. name: A name for this `Op`. Defaults to "ctc_loss_dense". Returns: loss: tensor of shape [batch_size], negative log probabilities. """ if isinstance(labels, sparse_tensor.SparseTensor): if blank_index is None: raise ValueError( "blank_index must be given when using SparseTensor labels.") if blank_index < 0: blank_index += _get_dim(logits, 2) if blank_index != _get_dim(logits, 2) - 1: logits = array_ops.concat([ logits[:, :, :blank_index], logits[:, :, blank_index + 1:], logits[:, :, blank_index:blank_index + 1], ], axis=2) labels = sparse_tensor.SparseTensor( labels.indices, array_ops.where(labels.values < blank_index, labels.values, labels.values - 1), labels.dense_shape) return ctc_loss( labels=labels, inputs=logits, sequence_length=logit_length, time_major=logits_time_major) if blank_index is None: blank_index = 0 return ctc_loss_dense( labels=labels, logits=logits, label_length=label_length, logit_length=logit_length, logits_time_major=logits_time_major, unique=unique, blank_index=blank_index, name=name) def ctc_loss_dense(labels, logits, label_length, logit_length, logits_time_major=True, unique=None, blank_index=0, name=None): """Computes CTC (Connectionist Temporal Classification) loss. This op implements the CTC loss as presented in the article: [A. Graves, S. Fernandez, F. Gomez, J. Schmidhuber. Connectionist Temporal Classification: Labeling Unsegmented Sequence Data with Recurrent Neural Networks. ICML 2006, Pittsburgh, USA, pp. 369-376.](http://www.cs.toronto.edu/~graves/icml_2006.pdf) Using the batched forward backward algorithm described in: [Sim, K. C., Narayanan, A., Bagby, T., Sainath, T. N., & Bacchiani, M. Improving the efficiency of forward-backward algorithm using batched computation in TensorFlow. Automatic Speech Recognition and Understanding Workshop (ASRU), 2017 IEEE (pp. 258-264). ](https://ieeexplore.ieee.org/iel7/8260578/8268903/08268944.pdf) Notes: Significant differences from tf.compat.v1.nn.ctc_loss: Supports GPU and TPU (tf.compat.v1.nn.ctc_loss supports CPU only): For batched operations, GPU and TPU are significantly faster than using ctc_loss on CPU. This implementation runs on CPU, but significantly slower than ctc_loss. Blank label is 0 rather num_classes - 1, unless overridden by blank_index. Logits and labels are dense arrays with padding rather than SparseTensor. The only mode supported is the same as: preprocess_collapse_repeated=False, ctc_merge_repeated=True To collapse labels, the caller can preprocess label sequence first. The dense implementation supports both CPU, GPU and TPU. A fast path is provided that significantly improves memory use for large vocabulary if the caller preprocesses label sequences to get unique label indices on the CPU (eg. in the data input pipeline) using ctc_ops.unique and simplies this in the optional "unique" kwarg. This is especially useful for TPU and GPU but also works with if used on CPU. Args: labels: tensor of shape [batch_size, max_label_seq_length] logits: tensor of shape [frames, batch_size, num_labels], if logits_time_major == False, shape is [batch_size, frames, num_labels]. label_length: tensor of shape [batch_size] Length of reference label sequence in labels. logit_length: tensor of shape [batch_size] Length of input sequence in logits. logits_time_major: (optional) If True (default), logits is shaped [time, batch, logits]. If False, shape is [batch, time, logits] unique: (optional) Unique label indices as computed by unique(labels). If supplied, enable a faster, memory efficient implementation on TPU. blank_index: (optional) Set the class index to use for the blank label. Negative values will start from num_classes, ie, -1 will reproduce the ctc_loss behavior of using num_classes - 1 for the blank symbol. There is some memory/performance overhead to switching from the default of 0 as an additional shifted copy of the logits may be created. name: A name for this `Op`. Defaults to "ctc_loss_dense". Returns: loss: tensor of shape [batch_size], negative log probabilities. """ with ops.name_scope(name, "ctc_loss_dense", [logits, labels, label_length, logit_length]): logits = ops.convert_to_tensor(logits, name="logits") labels = ops.convert_to_tensor(labels, name="labels") label_length = ops.convert_to_tensor(label_length, name="label_length") logit_length = ops.convert_to_tensor(logit_length, name="logit_length") if not logits_time_major: logits = array_ops.transpose(logits, perm=[1, 0, 2]) if blank_index != 0: if blank_index < 0: blank_index += _get_dim(logits, 2) logits = array_ops.concat([ logits[:, :, blank_index:blank_index + 1], logits[:, :, :blank_index], logits[:, :, blank_index + 1:], ], axis=2) labels = array_ops.where(labels < blank_index, labels + 1, labels) args = [logits, labels, label_length, logit_length] if unique: unique_y, unique_idx = unique args.extend([unique_y, unique_idx]) @custom_gradient.custom_gradient def compute_ctc_loss(logits_t, labels_t, label_length_t, logit_length_t, *unique_t): """Compute CTC loss.""" logits_t.set_shape(logits.shape) labels_t.set_shape(labels.shape) label_length_t.set_shape(label_length.shape) logit_length_t.set_shape(logit_length.shape) kwargs = dict( logits=logits_t, labels=labels_t, label_length=label_length_t, logit_length=logit_length_t) if unique_t: kwargs["unique"] = unique_t result = ctc_loss_and_grad(**kwargs) def grad(grad_loss): grad = [array_ops.reshape(grad_loss, [1, -1, 1]) * result[1]] grad += [None] * (len(args) - len(grad)) return grad return result[0], grad return compute_ctc_loss(*args) @tf_export("nn.collapse_repeated") def collapse_repeated(labels, seq_length, name=None): """Merge repeated labels into single labels. Args: labels: Tensor of shape [batch, max value in seq_length] seq_length: Tensor of shape [batch], sequence length of each batch element. name: A name for this `Op`. Defaults to "collapse_repeated_labels". Returns: A tuple `(collapsed_labels, new_seq_length)` where collapsed_labels: Tensor of shape [batch, max_seq_length] with repeated labels collapsed and padded to max_seq_length, eg: `[[A, A, B, B, A], [A, B, C, D, E]] => [[A, B, A, 0, 0], [A, B, C, D, E]]` new_seq_length: int tensor of shape [batch] with new sequence lengths. """ with ops.name_scope(name, "collapse_repeated_labels", [labels, seq_length]): labels = ops.convert_to_tensor(labels, name="labels") seq_length = ops.convert_to_tensor(seq_length, name="seq_length") # Mask labels that don't equal previous label. label_mask = array_ops.concat([ array_ops.ones_like(labels[:, :1], dtypes.bool), math_ops.not_equal(labels[:, 1:], labels[:, :-1]) ], axis=1) # Filter labels that aren't in the original sequence. maxlen = _get_dim(labels, 1) seq_mask = array_ops.sequence_mask(seq_length, maxlen=maxlen) label_mask = math_ops.logical_and(label_mask, seq_mask) # Count masks for new sequence lengths. new_seq_len = math_ops.reduce_sum( math_ops.cast(label_mask, dtypes.int32), axis=1) # Mask indexes based on sequence length mask. new_maxlen = math_ops.reduce_max(new_seq_len) idx_mask = array_ops.sequence_mask(new_seq_len, maxlen=new_maxlen) # Flatten everything and mask out labels to keep and sparse indices. flat_labels = array_ops.reshape(labels, [-1]) flat_label_mask = array_ops.reshape(label_mask, [-1]) flat_idx_mask = array_ops.reshape(idx_mask, [-1]) idx = math_ops.range(_get_dim(flat_idx_mask, 0)) # Scatter to flat shape. flat = array_ops.scatter_nd( indices=array_ops.expand_dims( array_ops.boolean_mask(idx, flat_idx_mask), axis=1), updates=array_ops.boolean_mask(flat_labels, flat_label_mask), shape=array_ops.shape(flat_idx_mask)) # Reshape back to square batch. batch_size = _get_dim(labels, 0) new_shape = [batch_size, new_maxlen] return (array_ops.reshape(flat, new_shape), math_ops.cast(new_seq_len, seq_length.dtype)) def dense_labels_to_sparse(dense, length): """Convert dense labels with sequence lengths to sparse tensor. Args: dense: tensor of shape [batch, max_length] length: int tensor of shape [batch] The length of each sequence in dense. Returns: tf.SparseTensor with values only for the valid elements of sequences. """ flat_values = array_ops.reshape(dense, [-1]) flat_indices = math_ops.range( array_ops.shape(flat_values, out_type=dtypes.int64)[0]) mask = array_ops.sequence_mask(length, maxlen=array_ops.shape(dense)[1]) flat_mask = array_ops.reshape(mask, [-1]) indices = array_ops.expand_dims( array_ops.boolean_mask(flat_indices, flat_mask), 1) values = array_ops.boolean_mask(flat_values, flat_mask) sparse = sparse_tensor.SparseTensor( indices=indices, values=math_ops.cast(values, dtypes.int32), dense_shape=array_ops.shape(flat_values, out_type=dtypes.int64)) reshaped = sparse_ops.sparse_reshape(sparse, array_ops.shape(dense)) max_length = math_ops.reduce_max(length) return sparse_tensor.SparseTensor( indices=reshaped.indices, values=reshaped.values, dense_shape=[ math_ops.cast(reshaped.dense_shape[0], dtypes.int64), math_ops.cast(max_length, dtypes.int64) ]) @tf_export("nn.ctc_unique_labels") def ctc_unique_labels(labels, name=None): """Get unique labels and indices for batched labels for `tf.nn.ctc_loss`. For use with `tf.nn.ctc_loss` optional argument `unique`: This op can be used to preprocess labels in input pipeline to for better speed/memory use computing the ctc loss on TPU. Example: ctc_unique_labels([[3, 4, 4, 3]]) -> unique labels padded with 0: [[3, 4, 0, 0]] indices of original labels in unique: [0, 1, 1, 0] Args: labels: tensor of shape [batch_size, max_label_length] padded with 0. name: A name for this `Op`. Defaults to "ctc_unique_labels". Returns: tuple of - unique labels, tensor of shape `[batch_size, max_label_length]` - indices into unique labels, shape `[batch_size, max_label_length]` """ with ops.name_scope(name, "ctc_unique_labels", [labels]): labels = ops.convert_to_tensor(labels, name="labels") def _unique(x): u = array_ops.unique(x) y = array_ops.pad(u.y, [[0, _get_dim(u.idx, 0) - _get_dim(u.y, 0)]]) y = math_ops.cast(y, dtypes.int64) return [y, u.idx] return map_fn.map_fn(_unique, labels, dtype=[dtypes.int64, dtypes.int32]) def _sum_states(idx, states): """Take logsumexp for each unique state out of all label states. Args: idx: tensor of shape [batch, label_length] For each sequence, indices into a set of unique labels as computed by calling unique. states: tensor of shape [frames, batch, label_length] Log probabilities for each label state. Returns: tensor of shape [frames, batch_size, label_length], log probabilites summed for each unique label of the sequence. """ with ops.name_scope("sum_states"): idx = ops.convert_to_tensor(idx, name="idx") num_states = _get_dim(states, 2) states = array_ops.expand_dims(states, axis=2) one_hot = array_ops.one_hot( idx, depth=num_states, on_value=0.0, off_value=math_ops.log(0.0), axis=1) return math_ops.reduce_logsumexp(states + one_hot, axis=-1) def _forward_backward_log(state_trans_log_probs, initial_state_log_probs, final_state_log_probs, observed_log_probs, sequence_length): """Forward-backward algorithm computed in log domain. Args: state_trans_log_probs: tensor of shape [states, states] or if different transition matrix per batch [batch_size, states, states] initial_state_log_probs: tensor of shape [batch_size, states] final_state_log_probs: tensor of shape [batch_size, states] observed_log_probs: tensor of shape [frames, batch_size, states] sequence_length: tensor of shape [batch_size] Returns: forward backward log probabilites: tensor of shape [frames, batch, states] log_likelihood: tensor of shape [batch_size] Raises: ValueError: If state_trans_log_probs has unknown or incorrect rank. """ if state_trans_log_probs.shape.ndims == 2: perm = [1, 0] elif state_trans_log_probs.shape.ndims == 3: perm = [0, 2, 1] else: raise ValueError( "state_trans_log_probs rank must be known and == 2 or 3, is: %s" % state_trans_log_probs.shape.ndims) bwd_state_trans_log_probs = array_ops.transpose(state_trans_log_probs, perm) batch_size = _get_dim(observed_log_probs, 1) def _forward(state_log_prob, obs_log_prob): state_log_prob = array_ops.expand_dims(state_log_prob, axis=1) # Broadcast. state_log_prob += state_trans_log_probs state_log_prob = math_ops.reduce_logsumexp(state_log_prob, axis=-1) state_log_prob += obs_log_prob log_prob_sum = math_ops.reduce_logsumexp( state_log_prob, axis=-1, keepdims=True) state_log_prob -= log_prob_sum return state_log_prob fwd = _scan( _forward, observed_log_probs, initial_state_log_probs, inclusive=True) def _backward(accs, elems): """Calculate log probs and cumulative sum masked for sequence length.""" state_log_prob, cum_log_sum = accs obs_log_prob, mask = elems state_log_prob += obs_log_prob state_log_prob = array_ops.expand_dims(state_log_prob, axis=1) # Broadcast. state_log_prob += bwd_state_trans_log_probs state_log_prob = math_ops.reduce_logsumexp(state_log_prob, axis=-1) log_prob_sum = math_ops.reduce_logsumexp( state_log_prob, axis=-1, keepdims=True) state_log_prob -= log_prob_sum cum_log_sum += array_ops.squeeze(log_prob_sum) * mask batched_mask = array_ops.expand_dims(mask, axis=1) out = state_log_prob * batched_mask out += final_state_log_probs * (1.0 - batched_mask) return out, cum_log_sum zero_log_sum = array_ops.zeros([batch_size]) maxlen = _get_dim(observed_log_probs, 0) mask = array_ops.sequence_mask(sequence_length, maxlen, dtypes.float32) mask = array_ops.transpose(mask, perm=[1, 0]) bwd, cum_log_sum = _scan( _backward, (observed_log_probs, mask), (final_state_log_probs, zero_log_sum), reverse=True, inclusive=True) fwd_bwd_log_probs = fwd[1:] + bwd[1:] fwd_bwd_log_probs_sum = math_ops.reduce_logsumexp( fwd_bwd_log_probs, axis=2, keepdims=True) fwd_bwd_log_probs -= fwd_bwd_log_probs_sum fwd_bwd_log_probs += math_ops.log(array_ops.expand_dims(mask, axis=2)) log_likelihood = bwd[0, :, 0] + cum_log_sum[0] return fwd_bwd_log_probs, log_likelihood # TODO(tombagby): This is currently faster for the ctc implementation than using # functional_ops.scan, but could be replaced by that or something similar if # things change. def _scan(fn, elems, initial, reverse=False, inclusive=False, final_only=False): """Repeatedly applies callable `fn` to a sequence of elements. Implemented by functional_ops.While, tpu friendly, no gradient. This is similar to functional_ops.scan but significantly faster on tpu/gpu for the forward backward use case. Examples: scan(lambda a, e: a + e, [1.0, 2.0, 3.0], 1.0) => [2.0, 4.0, 7.0] Multiple accumulators: scan(lambda a, e: (a[0] + e, a[1] * e), [1.0, 2.0, 3.0], (0.0, 1.0)) Multiple inputs: scan(lambda a, e: a + (e[0] * e[1]), (elems1, elems2), 0.0) Args: fn: callable, fn(accumulators, element) return new accumulator values. The (possibly nested) sequence of accumulators is the same as `initial` and the return value must have the same structure. elems: A (possibly nested) tensor which will be unpacked along the first dimension. The resulting slices will be the second argument to fn. The first dimension of all nested input tensors must be the same. initial: A tensor or (possibly nested) sequence of tensors with initial values for the accumulators. reverse: (optional) True enables scan and output elems in reverse order. inclusive: (optional) True includes the initial accumulator values in the output. Length of output will be len(elem sequence) + 1. Not meaningful if final_only is True. final_only: (optional) When True, return only the final accumulated values, not the concatenation of accumulated values for each input. Returns: A (possibly nested) sequence of tensors with the results of applying fn to tensors unpacked from elems and previous accumulator values. """ flat_elems = [ops.convert_to_tensor(x) for x in nest.flatten(elems)] num_elems = array_ops.shape(flat_elems[0])[0] pack_elems = lambda x: nest.pack_sequence_as(structure=elems, flat_sequence=x) flat_initial = [ops.convert_to_tensor(x) for x in nest.flatten(initial)] pack = lambda x: nest.pack_sequence_as(structure=initial, flat_sequence=x) accum_dtypes = [x.dtype for x in flat_initial] num_accums = len(flat_initial) # Types for counter, [outputs], [accumulators] loop arguments. if final_only: loop_dtypes = [dtypes.int32, dtypes.int32] + accum_dtypes else: loop_dtypes = [dtypes.int32, dtypes.int32] + accum_dtypes + accum_dtypes # TODO(tombagby): Update to tfe.defun def cond(i, num_elems, *args): del args return i >= 0 if reverse else i < num_elems # The loop *args are [output tensors] + [accumulator tensors] which must # be paired. Each output corresponds to one accumulator. def body(i, num_elems, *args): """Loop body.""" i.set_shape([]) if final_only: accum = args else: out, accum = args[:num_accums], args[num_accums:] slices = [array_ops.gather(e, i) for e in flat_elems] accum = fn(pack(accum), pack_elems(slices)) flat_accum = nest.flatten(accum) if final_only: new_out = [] else: update_i = i + 1 if inclusive and not reverse else i new_out = [ inplace_ops.alias_inplace_update(x, update_i, y) for x, y in zip(out, flat_accum) ] i = i - 1 if reverse else i + 1 return [i, num_elems] + new_out + flat_accum init_i = ( array_ops.shape(flat_elems[0])[0] - 1 if reverse else constant_op.constant(0, dtype=dtypes.int32)) outputs = [] if not final_only: num_outputs = array_ops.shape(flat_elems[0])[0] + (1 if inclusive else 0) for initial_accum in flat_initial: out_shape = array_ops.concat( [[num_outputs], array_ops.shape(initial_accum)], 0) out = inplace_ops.empty(out_shape, dtype=initial_accum.dtype, init=True) if inclusive: out = inplace_ops.alias_inplace_add(out, init_i + (1 if reverse else 0), initial_accum) outputs.append(out) loop_in = [init_i, num_elems] + outputs + flat_initial hostmem = [ i for i, x in enumerate(loop_in) if x.dtype.base_dtype in (dtypes.int32, dtypes.int64) ] if context.executing_eagerly(): loop_results = loop_in while cond(*loop_results): loop_results = body(*loop_results) else: # TODO(tombagby): Update to while_v2. cond = function.Defun(*loop_dtypes)(cond) body = function.Defun(*loop_dtypes)(body) loop_results = functional_ops.While(loop_in, cond, body, hostmem=hostmem) out = loop_results[2:num_accums + 2] return pack(out) def _get_dim(tensor, i): """Get value of tensor shape[i] preferring static value if available.""" return tensor_shape.dimension_value( tensor.shape[i]) or array_ops.shape(tensor)[i]
tensorflow-master
tensorflow/python/ops/ctc_ops.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Stateless random ops which take seed as a tensor input.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.ops import gen_stateless_random_ops from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import math_ops from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export ops.NotDifferentiable("StatelessMultinomial") ops.NotDifferentiable("StatelessRandomNormal") ops.NotDifferentiable("StatelessRandomUniform") ops.NotDifferentiable("StatelessRandomUniformInt") ops.NotDifferentiable("StatelessTruncatedNormal") @tf_export("random.stateless_uniform") def stateless_random_uniform(shape, seed, minval=0, maxval=None, dtype=dtypes.float32, name=None): """Outputs deterministic pseudorandom values from a uniform distribution. This is a stateless version of `tf.random.uniform`: if run twice with the same seeds, it will produce the same pseudorandom numbers. The output is consistent across multiple runs on the same hardware (and between CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU hardware. The generated values follow a uniform distribution in the range `[minval, maxval)`. The lower bound `minval` is included in the range, while the upper bound `maxval` is excluded. For floats, the default range is `[0, 1)`. For ints, at least `maxval` must be specified explicitly. In the integer case, the random integers are slightly biased unless `maxval - minval` is an exact power of two. The bias is small for values of `maxval - minval` significantly smaller than the range of the output (either `2**32` or `2**64`). Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. seed: A shape [2] integer Tensor of seeds to the random number generator. minval: A 0-D Tensor or Python value of type `dtype`. The lower bound on the range of random values to generate. Defaults to 0. maxval: A 0-D Tensor or Python value of type `dtype`. The upper bound on the range of random values to generate. Defaults to 1 if `dtype` is floating point. dtype: The type of the output: `float16`, `float32`, `float64`, `int32`, or `int64`. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random uniform values. Raises: ValueError: If `dtype` is integral and `maxval` is not specified. """ dtype = dtypes.as_dtype(dtype) if dtype not in (dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64): raise ValueError("Invalid dtype %r" % dtype) if maxval is None: if dtype.is_integer: raise ValueError("Must specify maxval for integer dtype %r" % dtype) maxval = 1 with ops.name_scope(name, "stateless_random_uniform", [shape, seed, minval, maxval]) as name: shape = random_ops._ShapeTensor(shape) # pylint: disable=protected-access minval = ops.convert_to_tensor(minval, dtype=dtype, name="min") maxval = ops.convert_to_tensor(maxval, dtype=dtype, name="max") if dtype.is_integer: return gen_stateless_random_ops.stateless_random_uniform_int( shape, seed=seed, minval=minval, maxval=maxval, name=name) else: rnd = gen_stateless_random_ops.stateless_random_uniform( shape, seed=seed, dtype=dtype) return math_ops.add(rnd * (maxval - minval), minval, name=name) @tf_export("random.stateless_normal") def stateless_random_normal(shape, seed, mean=0.0, stddev=1.0, dtype=dtypes.float32, name=None): """Outputs deterministic pseudorandom values from a normal distribution. This is a stateless version of `tf.random.normal`: if run twice with the same seeds, it will produce the same pseudorandom numbers. The output is consistent across multiple runs on the same hardware (and between CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU hardware. Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. seed: A shape [2] integer Tensor of seeds to the random number generator. mean: A 0-D Tensor or Python value of type `dtype`. The mean of the normal distribution. stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation of the normal distribution. dtype: The type of the output. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random normal values. """ with ops.name_scope(name, "stateless_random_normal", [shape, seed, mean, stddev]) as name: shape = random_ops._ShapeTensor(shape) # pylint: disable=protected-access mean = ops.convert_to_tensor(mean, dtype=dtype, name="mean") stddev = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev") rnd = gen_stateless_random_ops.stateless_random_normal(shape, seed, dtype) return math_ops.add(rnd * stddev, mean, name=name) @tf_export("random.stateless_truncated_normal") def stateless_truncated_normal(shape, seed, mean=0.0, stddev=1.0, dtype=dtypes.float32, name=None): """Outputs deterministic pseudorandom values, truncated normally distributed. This is a stateless version of `tf.random.truncated_normal`: if run twice with the same seeds, it will produce the same pseudorandom numbers. The output is consistent across multiple runs on the same hardware (and between CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU hardware. The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked. Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. seed: A shape [2] integer Tensor of seeds to the random number generator. mean: A 0-D Tensor or Python value of type `dtype`. The mean of the truncated normal distribution. stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation of the normal distribution, before truncation. dtype: The type of the output. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random truncated normal values. """ with ops.name_scope(name, "stateless_truncated_normal", [shape, seed, mean, stddev]) as name: shape = random_ops._ShapeTensor(shape) # pylint: disable=protected-access mean = ops.convert_to_tensor(mean, dtype=dtype, name="mean") stddev = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev") rnd = gen_stateless_random_ops.stateless_truncated_normal( shape, seed, dtype) return math_ops.add(rnd * stddev, mean, name=name) @tf_export(v1=["random.stateless_multinomial"]) @deprecation.deprecated( date=None, instructions="Use `tf.random.stateless_categorical` instead.") def stateless_multinomial(logits, num_samples, seed, output_dtype=dtypes.int64, name=None): """Draws deterministic pseudorandom samples from a multinomial distribution. This is a stateless version of `tf.random.categorical`: if run twice with the same seeds, it will produce the same pseudorandom numbers. The output is consistent across multiple runs on the same hardware (and between CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU hardware. Example: ```python # samples has shape [1, 5], where each value is either 0 or 1 with equal # probability. samples = tf.random.stateless_categorical( tf.math.log([[10., 10.]]), 5, seed=[7, 17]) ``` Args: logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` represents the unnormalized log-probabilities for all classes. num_samples: 0-D. Number of independent samples to draw for each row slice. seed: A shape [2] integer Tensor of seeds to the random number generator. output_dtype: integer type to use for the output. Defaults to int64. name: Optional name for the operation. Returns: The drawn samples of shape `[batch_size, num_samples]`. """ with ops.name_scope(name, "stateless_multinomial", [logits, seed]): return stateless_multinomial_categorical_impl(logits, num_samples, output_dtype, seed) @tf_export("random.stateless_categorical") def stateless_categorical(logits, num_samples, seed, dtype=dtypes.int64, name=None): """Draws deterministic pseudorandom samples from a categorical distribution. This is a stateless version of `tf.categorical`: if run twice with the same seeds, it will produce the same pseudorandom numbers. The output is consistent across multiple runs on the same hardware (and between CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU hardware. Example: ```python # samples has shape [1, 5], where each value is either 0 or 1 with equal # probability. samples = tf.random.stateless_categorical( tf.math.log([[10., 10.]]), 5, seed=[7, 17]) ``` Args: logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` represents the unnormalized log-probabilities for all classes. num_samples: 0-D. Number of independent samples to draw for each row slice. seed: A shape [2] integer Tensor of seeds to the random number generator. dtype: integer type to use for the output. Defaults to int64. name: Optional name for the operation. Returns: The drawn samples of shape `[batch_size, num_samples]`. """ with ops.name_scope(name, "stateless_categorical", [logits, seed]): return stateless_multinomial_categorical_impl(logits, num_samples, dtype, seed) def stateless_multinomial_categorical_impl(logits, num_samples, dtype, seed): """Implementation for stateless multinomial/categorical ops (v1/v2).""" logits = ops.convert_to_tensor(logits, name="logits") return gen_stateless_random_ops.stateless_multinomial( logits, num_samples, seed, output_dtype=dtype)
tensorflow-master
tensorflow/python/ops/stateless_random_ops.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Gradients for CuudnnRNN operators.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops import gen_cudnn_rnn_ops @ops.RegisterGradient("CudnnRNN") def _cudnn_rnn_backward(op, *grads): """Gradients for the CudnnRNN op.""" if not op.get_attr("is_training"): raise ValueError( "To use CudnnRNN in gradients, is_training must be set to True.") return gen_cudnn_rnn_ops.cudnn_rnn_backprop( input=op.inputs[0], input_h=op.inputs[1], input_c=op.inputs[2], params=op.inputs[3], output=op.outputs[0], output_h=op.outputs[1], output_c=op.outputs[2], output_backprop=grads[0], output_h_backprop=grads[1], output_c_backprop=grads[2], reserve_space=op.outputs[3], dropout=op.get_attr("dropout"), seed=op.get_attr("seed"), seed2=op.get_attr("seed2"), rnn_mode=op.get_attr("rnn_mode"), input_mode=op.get_attr("input_mode"), direction=op.get_attr("direction")) @ops.RegisterGradient("CudnnRNNV2") def _cudnn_rnn_backward_v2(op, *grad): if not op.get_attr("is_training"): raise ValueError( "To use CudnnRNNV2 in gradients, is_training must be set to True.") return gen_cudnn_rnn_ops.cudnn_rnn_backprop_v2( input=op.inputs[0], input_h=op.inputs[1], input_c=op.inputs[2], params=op.inputs[3], output=op.outputs[0], output_h=op.outputs[1], output_c=op.outputs[2], output_backprop=grad[0], output_h_backprop=grad[1], output_c_backprop=grad[2], reserve_space=op.outputs[3], host_reserved=op.outputs[4], dropout=op.get_attr("dropout"), seed=op.get_attr("seed"), seed2=op.get_attr("seed2"), rnn_mode=op.get_attr("rnn_mode"), input_mode=op.get_attr("input_mode"), direction=op.get_attr("direction")) @ops.RegisterGradient("CudnnRNNV3") def _cudnn_rnn_backwardv3(op, *grads): """Gradients for the CudnnRNNV3 op.""" if not op.get_attr("is_training"): raise ValueError( "To use CudnnRNNV3 in gradients, is_training must be set to" " True.") return gen_cudnn_rnn_ops.cudnn_rnn_backprop_v3( input=op.inputs[0], input_h=op.inputs[1], input_c=op.inputs[2], params=op.inputs[3], sequence_lengths=op.inputs[4], output=op.outputs[0], output_h=op.outputs[1], output_c=op.outputs[2], output_backprop=grads[0], output_h_backprop=grads[1], output_c_backprop=grads[2], reserve_space=op.outputs[3], host_reserved=op.outputs[4], dropout=op.get_attr("dropout"), seed=op.get_attr("seed"), seed2=op.get_attr("seed2"), time_major=op.get_attr("time_major"), num_proj=op.get_attr("num_proj"), rnn_mode=op.get_attr("rnn_mode"), input_mode=op.get_attr("input_mode"), direction=op.get_attr("direction")) + (None,)
tensorflow-master
tensorflow/python/ops/cudnn_rnn_grad.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Control Flow Operations. See the [autograph](https://www.tensorflow.org/guide/autographs) guide. """ # pylint: disable=g-bad-name from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import collections import functools import six from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.protobuf import control_flow_pb2 from tensorflow.python.eager import context from tensorflow.python.framework import composite_tensor from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import tensor_util from tensorflow.python.framework import type_spec from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_util as util from tensorflow.python.ops import gen_array_ops from tensorflow.python.ops import gen_control_flow_ops from tensorflow.python.ops import gen_logging_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import tensor_array_ops # go/tf-wildcard-import # pylint: disable=wildcard-import,undefined-variable from tensorflow.python.ops.gen_control_flow_ops import * # pylint: enable=wildcard-import from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import compat from tensorflow.python.util import deprecation from tensorflow.python.util import nest from tensorflow.python.util import tf_should_use from tensorflow.python.util.lazy_loader import LazyLoader from tensorflow.python.util.tf_export import tf_export # This is to avoid a circular dependency: # cond_v2 -> gradients_util -> control_flow_ops cond_v2 = LazyLoader("cond_v2", globals(), "tensorflow.python.ops.cond_v2") # This is to avoid circular dependencies: # while_v2 -> control_flow_ops # while_v2 -> gradients_util -> control_flow_ops while_v2 = LazyLoader("while_v2", globals(), "tensorflow.python.ops.while_v2") # We override the 'tuple' for a control flow op, so we keep python's # existing 'tuple' for later use in this module. _basetuple = tuple def _summarize_eager(tensor, summarize=None): """Returns a summarized string representation of eager `tensor`. Args: tensor: EagerTensor to summarize summarize: Include these many first elements of `array` """ # Emulate the behavior of Tensor::SummarizeValue() if summarize is None: summarize = 3 elif summarize < 0: summarize = array_ops.size(tensor) # reshape((-1,)) is the fastest way to get a flat array view if tensor._rank(): # pylint: disable=protected-access flat = tensor.numpy().reshape((-1,)) lst = [str(x) for x in flat[:summarize]] if len(lst) < flat.size: lst.append("...") else: # tensor.numpy() returns a scalar for zero dimensional arrays if summarize != 0: lst = [str(tensor.numpy())] else: lst = [] return ", ".join(lst) # pylint: disable=protected-access # Assert and Print are special symbols in python, so we must # use an upper-case version of them. @tf_export("debugging.Assert", "Assert") @tf_should_use.should_use_result def Assert(condition, data, summarize=None, name=None): """Asserts that the given condition is true. If `condition` evaluates to false, print the list of tensors in `data`. `summarize` determines how many entries of the tensors to print. NOTE: In graph mode, to ensure that Assert executes, one usually attaches a dependency: ```python # Ensure maximum element of x is smaller or equal to 1 assert_op = tf.Assert(tf.less_equal(tf.reduce_max(x), 1.), [x]) with tf.control_dependencies([assert_op]): ... code using x ... ``` Args: condition: The condition to evaluate. data: The tensors to print out when condition is false. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Returns: assert_op: An `Operation` that, when executed, raises a `tf.errors.InvalidArgumentError` if `condition` is not true. @compatibility(eager) returns None @end_compatibility Raises: @compatibility(eager) `tf.errors.InvalidArgumentError` if `condition` is not true @end_compatibility """ if context.executing_eagerly(): if not condition: xs = ops.convert_n_to_tensor(data) data_str = [_summarize_eager(x, summarize) for x in xs] raise errors.InvalidArgumentError( node_def=None, op=None, message="Expected '%s' to be true. Summarized data: %s" % (condition, "\n".join(data_str))) return with ops.name_scope(name, "Assert", [condition, data]) as name: xs = ops.convert_n_to_tensor(data) if all(x.dtype in {dtypes.string, dtypes.int32} for x in xs): # As a simple heuristic, we assume that string and int32 are # on host to avoid the need to use cond. If it is not case, # we will pay the price copying the tensor to host memory. return gen_logging_ops._assert(condition, data, summarize, name="Assert") else: condition = ops.convert_to_tensor(condition, name="Condition") def true_assert(): return gen_logging_ops._assert( condition, data, summarize, name="Assert") guarded_assert = cond(condition, no_op, true_assert, name="AssertGuard") if context.executing_eagerly(): return return guarded_assert.op def _Identity(data, name=None): """Return a tensor with the same shape and contents as the input tensor. Args: data: A Tensor. name: A name for this operation (optional). Returns: A Tensor with the same type and value as the input Tensor. """ data = ops.internal_convert_to_tensor_or_composite(data, as_ref=True) if isinstance(data, ops.Tensor): if data.dtype._is_ref_dtype: # pylint: disable=protected-access return gen_array_ops.ref_identity(data, name=name) else: return array_ops.identity(data, name=name) elif isinstance(data, composite_tensor.CompositeTensor): return nest.map_structure(_Identity, data, expand_composites=True) else: raise TypeError("Type %s not supported" % type(data)) def _NextIteration(data, name=None): data = ops.internal_convert_to_tensor_or_composite(data, as_ref=True) if isinstance(data, ops.Tensor): if data.dtype._is_ref_dtype: # pylint: disable=protected-access return ref_next_iteration(data, name=name) else: return next_iteration(data, name=name) elif isinstance(data, composite_tensor.CompositeTensor): return nest.map_structure(_NextIteration, data, expand_composites=True) else: raise TypeError("Type %s not supported" % type(data)) def _Enter(data, frame_name, is_constant=False, parallel_iterations=10, use_ref=True, use_input_shape=True, name=None): """Creates or finds a child frame, and makes `data` available to it. The unique `frame_name` is used by the `Executor` to identify frames. If `is_constant` is true, `data` is a constant in the child frame; otherwise it may be changed in the child frame. At most `parallel_iterations` iterations are run in parallel in the child frame. Args: data: The tensor to be made available to the child frame. frame_name: The name of the child frame. is_constant: If true, the output is constant within the child frame. parallel_iterations: The number of iterations allowed to run in parallel. use_ref: If true, use ref_enter if data is of ref type. use_input_shape: If true, set the result's shape based on data's shape. name: A name for this operation (optional). Returns: The same tensor as `data`. """ data = ops.internal_convert_to_tensor_or_composite(data, as_ref=True) if isinstance(data, ops.Tensor): if data.dtype._is_ref_dtype and use_ref: # pylint: disable=protected-access result = gen_control_flow_ops.ref_enter( data, frame_name, is_constant, parallel_iterations, name=name) else: result = gen_control_flow_ops.enter( data, frame_name, is_constant, parallel_iterations, name=name) if use_input_shape: result.set_shape(data.get_shape()) return result elif isinstance(data, composite_tensor.CompositeTensor): def enter_component(t): return _Enter(t, frame_name, is_constant, parallel_iterations, use_ref, use_input_shape) return nest.map_structure(enter_component, data, expand_composites=True) else: raise TypeError("Type %s not supported" % type(data)) def exit(data, name=None): # pylint: disable=redefined-builtin """Exits the current frame to its parent frame. Exit makes its input `data` available to the parent frame. Args: data: The tensor to be made available to the parent frame. name: A name for this operation (optional). Returns: The same tensor as `data`. """ data = ops.internal_convert_to_tensor_or_composite(data, as_ref=True) if isinstance(data, ops.Tensor): if data.dtype._is_ref_dtype: # pylint: disable=protected-access return gen_control_flow_ops.ref_exit(data, name) else: return gen_control_flow_ops._exit(data, name) elif isinstance(data, composite_tensor.CompositeTensor): return nest.map_structure(exit, data, expand_composites=True) else: raise TypeError("Type %s not supported" % type(data)) def switch(data, pred, dtype=None, name=None): """Forwards `data` to an output determined by `pred`. If `pred` is false, the `data` input is forwarded to the first output. Otherwise, the data goes to the second output. This op handles `Tensor`s and `IndexedSlices`. Args: data: The tensor to be forwarded to the appropriate output. pred: A scalar that specifies which output port will receive data. dtype: Optional element type for the returned tensor. If missing, the type is inferred from the type of `value`. name: A name for this operation (optional). Returns: `(output_false, output_true)`: If `pred` is true, data will be forwarded to `output_true`, otherwise it goes to `output_false`. """ with ops.name_scope(name, "Switch", [data, pred]) as name: data = ops.internal_convert_to_tensor_or_composite( data, dtype=dtype, name="data", as_ref=True) pred = ops.convert_to_tensor(pred, name="pred") if isinstance(data, ops.Tensor): return gen_control_flow_ops.switch(data, pred, name=name) else: if not isinstance(data, composite_tensor.CompositeTensor): raise TypeError("Type %s not supported" % type(data)) tensors = nest.flatten(data, expand_composites=True) mapped = [gen_control_flow_ops.switch(tensor, pred) for tensor in tensors] mapped_f, mapped_t = zip(*mapped) return (nest.pack_sequence_as(data, mapped_f, expand_composites=True), nest.pack_sequence_as(data, mapped_t, expand_composites=True)) def _SwitchRefOrTensor(data, pred, name="Switch"): """Forwards `data` to an output determined by `pred`. If `pred` is false, the `data` input is forwarded to the first output. Otherwise, the data goes to the second output. This op handles `Tensor`s and `IndexedSlices`. Args: data: The tensor to be forwarded to the appropriate output. pred: A scalar that specifies which output port will receive data. name: A name for this operation (optional). Returns: `(output_false, output_true)`: If `pred` is true, data will be forwarded to `output_true`, otherwise it goes to `output_false`. Raises: TypeError: if data is not a Tensor or IndexedSlices """ data = ops.convert_to_tensor_or_composite(data, name="data") # NOTE(vrv): ops.colocate_with(data, ignore_existing=True) below # addresses the following scenario. # # Assume you execute Optimizer.apply_gradients() in a branch of a cond(). # # 1. The update op is created inside a `with ops.colocate(var):` block # # 2. Some tensor `data` is captured and a switch is created in a # `with ops.colocate_with(data):` block. # # with ops.colocate_with(var): # with ops.colocate_with(data): # op = ... # # var and data may be pinned to different devices, so we want to ops # created within ops.colocate_with(data) to ignore the existing stack. with ops.colocate_with(data, ignore_existing=True): if isinstance(data, ops.Tensor): if data.dtype._is_ref_dtype: # pylint: disable=protected-access return ref_switch(data, pred, name=name) return switch(data, pred, name=name) def merge(inputs, name=None): """Returns the value of an available element of `inputs`. This op tests each of the tensors in `inputs` in turn to determine if any of them is available. If it finds an available tensor, it returns it and its index in `inputs`. It is an error if more than one tensor in `inputs` is available. If no tensor in `inputs` is available, the returned tensor and index are not set. This op handles both `Tensor`s and `IndexedSlices`. If inputs has a mix of `Tensor`s and `IndexedSlices`, all inputs are converted to IndexedSlices before merging. Args: inputs: The input tensors, at most one of which is available. name: A name for this operation (optional). Returns: A tuple containing the chosen input tensor and its index in `inputs`. Raises: ValueError: If any of the inputs is None, or inputs are IndexedSlices and some but not all have a dense_shape property. """ if any(inp is None for inp in inputs): raise ValueError("At least one of the merge inputs is None: %s" % inputs) with ops.name_scope(name, "Merge", inputs) as name: inputs = [ ops.internal_convert_to_tensor_or_composite(inp, as_ref=True) for inp in inputs ] if all(isinstance(v, ops.Tensor) for v in inputs): if all(v.dtype._is_ref_dtype for v in inputs): # pylint: disable=protected-access return gen_control_flow_ops.ref_merge(inputs, name) else: return gen_control_flow_ops.merge(inputs, name) else: # If there is a mix of tensors and indexed slices, then convert the # tensors to indexed slices. if all(isinstance(v, (ops.IndexedSlices, ops.Tensor)) for v in inputs): inputs = math_ops._as_indexed_slices_list(inputs, optimize=False) for v in inputs: if not isinstance(v, composite_tensor.CompositeTensor): raise TypeError("Type %s not supported" % type(v)) for v in inputs[1:]: nest.assert_same_structure(inputs[0], v, expand_composites=True) flat_inputs = [nest.flatten(v, expand_composites=True) for v in inputs] merged_results = [ gen_control_flow_ops.merge(component) for component in zip(*flat_inputs) ] flat_merged = [tensor for (tensor, _) in merged_results] chosen_index = merged_results[0][1] merged_inputs = nest.pack_sequence_as( inputs[0], flat_merged, expand_composites=True) return (merged_inputs, chosen_index) # pylint: enable=protected-access def _convert_tensorarray_to_flow(tensor_or_tensor_array): if isinstance(tensor_or_tensor_array, tensor_array_ops.TensorArray): return tensor_or_tensor_array.flow else: return tensor_or_tensor_array def _make_tensor_array(ta, t_or_flow): # pylint: disable=protected-access new_ta = tensor_array_ops.TensorArray( dtype=ta.dtype, handle=ta.handle, flow=t_or_flow, infer_shape=ta._infer_shape, colocate_with_first_write_call=ta._colocate_with_first_write_call) new_ta._colocate_with = ta._colocate_with new_ta._element_shape = ta._element_shape # pylint: enable=protected-access return new_ta def _convert_flows_to_tensorarrays(tensors_or_tensorarrays, tensors_or_flows): if len(tensors_or_tensorarrays) != len(tensors_or_flows): raise ValueError( "Lengths of original Tensor list and new list do not match: %d vs. %d" % (len(tensors_or_tensorarrays), len(tensors_or_flows))) return [ _make_tensor_array(ta, t_or_flow) if isinstance( ta, tensor_array_ops.TensorArray) else t_or_flow for (ta, t_or_flow) in zip(tensors_or_tensorarrays, tensors_or_flows) ] def _ShapeLessThanOrEqual(shape1, shape2): if shape2.dims is None: return True if shape1.ndims != shape2.ndims: return False for dim1, dim2 in zip(shape1.dims, shape2.dims): if dim2.value is not None and dim1.value != dim2.value: return False return True def _get_shape_invariant(var, shape=None): """Returns shape invariant(s) for the given variable. Args: var: The tensor whose shape is described. shape: The shape invariant for the tensor. If not specified, then a default shape invariant for `var` is returned. Returns: `TensorShape` or `list` of `TensorShape`: The shape invariant for `var` (if it is a `Tensor`), or the shape invariants for the components that comprise `var` (if it is a `CompositeTensor`). """ if isinstance(var, composite_tensor.CompositeTensor): # Get a TypeSpec for `var`. if shape is None: spec = var._type_spec # pylint: disable=protected-access else: spec = _shape_invariant_to_type_spec(var, shape) tensor_specs = nest.flatten(spec, expand_composites=True) return [tspec.shape for tspec in tensor_specs] elif shape is None: return var.shape else: return shape def _shape_invariant_to_type_spec(var, shape): """Converts a shape invariant to a TypeSpec. Args: var: The tensor whose shape is described by the shape invariant. shape: A `TypeSpec` or `TensorShape`. If `shape` is already a `TypeSpec`, then it is simply returned as-is. Returns: A `TypeSpec` for `var`, consistent with the given shape. """ if isinstance(shape, type_spec.TypeSpec): return shape elif not isinstance(shape, tensor_shape.TensorShape): raise TypeError("Expected shape to be a TypeSpec or TensorShape, got %r" % shape) if isinstance(var, ops.Tensor): return tensor_spec.TensorSpec(shape, var.dtype) elif isinstance(var, composite_tensor.CompositeTensor): try: return var._shape_invariant_to_type_spec(shape) # pylint: disable=protected-access except NotImplementedError: raise TypeError( "To describe or constrain a %s, use a %s instead of a TensorShape." % (type(var).__name__, type(var._type_spec).__name__)) # pylint: disable=protected-access else: raise TypeError("Expected var to be a Tensor or CompositeTensor, got %s" % var) def _SetShapeInvariants(input_vars, enter_vars, shapes): """Set the shapes of the tensors in `enter_vars` to `shapes`. Args: input_vars: A list of tensors that are inputs to `enter_vars`. enter_vars: A list of tensors whose shapes will be set. shapes: A (possibly nested) list of shapes. Raises: ValueError: If any tensor in `enter_vars` has a less specific shape than its corresponding shape in `shapes`. """ if shapes is None: return flat_shapes = nest.flatten(shapes) if not all(isinstance(s, tensor_shape.TensorShape) for s in flat_shapes): raise ValueError("`shapes` must be a (possibly nested) list of shapes.") # Check that the shapes of the inputs are less than the shape invariants, # and set the shapes of `enter_vars` to the shape invariants. for inp, var, shape in zip(input_vars, enter_vars, flat_shapes): if isinstance(var, ops.Tensor): if not _ShapeLessThanOrEqual(inp.get_shape(), shape): raise ValueError( "The shape invariant specified for %s is not compatible with " "the initial shape of the loop variable. It enters the loop " "with shape %s, but the specified shape invariant is %s." % (inp.name, inp.get_shape(), shape)) var.set_shape(shape) else: raise TypeError("Type %s not supported" % type(var)) def _EnforceShapeInvariant(merge_var, next_var): """Check if the shapes of the loops variables are invariants. Args: merge_var: The list of tensors representing the initial values of the loop variables. next_var: The list of tensors representing the values of the loop variables after one loop iteration. Raises: ValueError: If any tensor in `merge_var` has a more specific shape than its correspnding tensor in `next_var`. """ if isinstance(merge_var, ops.Tensor): m_shape = merge_var.get_shape() n_shape = next_var.get_shape() if not _ShapeLessThanOrEqual(n_shape, m_shape): enter = merge_var.op.inputs[0].op assert util.IsLoopEnter(enter) input_t = enter.inputs[0] raise ValueError( "Input tensor '%s' enters the loop with shape %s, but has shape %s " "after one iteration. To allow the shape to vary across iterations, " "use the `shape_invariants` argument of tf.while_loop to specify a " "less-specific shape." % (input_t.name, input_t.shape, n_shape)) else: raise TypeError("Type %s not supported" % type(merge_var)) def _AddNextAndBackEdge(m, v, enforce_shape_invariant=True): """Add NextIteration and back edge from v to m.""" if isinstance(m, ops.Tensor): v = ops.convert_to_tensor(v) v = _NextIteration(v) if enforce_shape_invariant: # Make sure the shapes of loop outputs are correct. We do this before # calling _update_input, which will raise a less-helpful error message if # the types don't match. # TODO(skyewm): call this for other cases below (needs testing) _EnforceShapeInvariant(m, v) m.op._update_input(1, v) # pylint: disable=protected-access elif isinstance(m, composite_tensor.CompositeTensor): # pylint: disable=protected-access def update_component(m_component, v_component): m_component.op._update_input(1, v_component) if isinstance(m, ops.IndexedSlices): v = math_ops._as_indexed_slices(v, optimize=False) # pylint: enable=protected-access v = _NextIteration(v) return nest.map_structure(update_component, m, v, expand_composites=True) else: raise TypeError("Type %s not supported" % type(m)) return v @six.add_metaclass(abc.ABCMeta) class ControlFlowContext(object): """The base class for control flow context. The usage pattern is a sequence of (Enter, Exit) followed by a final ExitResult. We maintain the following state for control flow contexts during graph construction: 1. graph has _control_flow_context: the current context used to construct new nodes. Changed by ctxt.Enter() and ctxt.Exit() 2. op has _control_flow_context: the context to which the op belongs. Set at the time the op is created. Immutable. 3. A ControlFlowContext has _outer_context: the context in which this context is created. Set at the time a context is created. Immutable. 4. A ControlFlowContext has _context_stack. Pushed and popped by ctxt.Enter() and ctxt.Exit() """ def __init__(self, values_def=None, import_scope=None): self._nested_contexts = [] self._outer_context = ops.get_default_graph()._get_control_flow_context() if self._outer_context: self._outer_context._nested_contexts.append(self) # pylint: disable=protected-access self._context_stack = [] if values_def: self._init_values_from_proto(values_def, import_scope=import_scope) else: # The names of tensors that have been already seen in this context. self._values = set() # The keys are the names of tensors referenced by but external to this # context. Each value is the Tensor that should be used by this context to # access the key value (e.g. a switch output guarding a cond input value). self._external_values = {} def _init_values_from_proto(self, values_def, import_scope=None): """Initializes values and external_values from `ValuesDef` protocol buffer. Args: values_def: `ValuesDef` protocol buffer. import_scope: Optional `string`. Name scope to add. """ assert isinstance(values_def, control_flow_pb2.ValuesDef) self._values = set( ops.prepend_name_scope(value, import_scope) for value in values_def.values) g = ops.get_default_graph() self._external_values = {} for k, v in values_def.external_values.items(): k = ops.prepend_name_scope(k, import_scope) self._external_values[k] = g.as_graph_element( ops.prepend_name_scope(v, import_scope)) op_names = set([ op.split(":")[0] for op in self._values - set(self._external_values.keys()) ]) for op in op_names: # pylint: disable=protected-access g.as_graph_element(op)._set_control_flow_context(self) # pylint: enable=protected-access @property def name(self): return self._name @property def outer_context(self): """Return the context containing this context.""" return self._outer_context @property def grad_state(self): raise NotImplementedError("Abstract method") @property def back_prop(self): raise NotImplementedError("Abstract method") @abc.abstractmethod def to_control_flow_context_def(self, context_def, export_scope=None): """Serializes this into `context_def`. Args: context_def: a `ControlFlowContextDef` protocol buffer. export_scope: Optional `string`. Name scope to remove. """ raise NotImplementedError("Abstract method") def _to_values_def(self, export_scope=None): """Converts the values to a `ValuesDef` protocol buffer. Args: export_scope: Optional `string`. Name scope to remove. Returns: A `ValuesDef` protocol buffer. """ values_def = control_flow_pb2.ValuesDef() values_def.values.extend( [ops.strip_name_scope(v, export_scope) for v in sorted(self._values)]) for k, v in self._external_values.items(): k = ops.strip_name_scope(k, export_scope) values_def.external_values[k] = ops.strip_name_scope(v.name, export_scope) return values_def def AddName(self, name): self._values.add(name) # pylint: disable=protected-access def Enter(self): """Enter this control flow context.""" graph = ops.get_default_graph() self._context_stack.append(graph._get_control_flow_context()) graph._set_control_flow_context(self) def Exit(self): """Exit this control flow context.""" graph = ops.get_default_graph() last_context = self._context_stack.pop() graph._set_control_flow_context(last_context) def EnterGradientColocation(self, op, gradient_uid): """Start building a gradient colocated with an op.""" if self._outer_context: self._outer_context.EnterGradientColocation(op, gradient_uid) def ExitGradientColocation(self, op, gradient_uid): """Start building a gradient colocated with an op.""" if self._outer_context: self._outer_context.ExitGradientColocation(op, gradient_uid) def ExitResult(self, result): """Make a list of tensors available in the outer context.""" if self._outer_context: nest.map_structure( lambda x: self._outer_context.AddName(x.name), result, expand_composites=True) def GetWhileContext(self): """Return the while context containing this context.""" if self._outer_context: return self._outer_context.GetWhileContext() return None def _IsInOuterContext(self, op): op_ctxt = util.GetOutputContext(op) outer_ctxt = self.outer_context while outer_ctxt != op_ctxt: if outer_ctxt is None: return False outer_ctxt = outer_ctxt.outer_context return True def _RemoveExternalControlEdges(self, op): """Remove any external control dependency on this op.""" while_ctxt = self.GetWhileContext() # A control input of `op` is internal if it is in the same while # loop context as the enclosing while loop context of self. if while_ctxt is None: internal_control_inputs = op.control_inputs else: internal_control_inputs = [] for x in op.control_inputs: ctxt = util.GetOutputContext(x) if ctxt is not None and ctxt.GetWhileContext() == while_ctxt: internal_control_inputs.append(x) external_control_inputs = [] if len(internal_control_inputs) != len(op.control_inputs): external_control_inputs = list( set(op.control_inputs) - set(internal_control_inputs)) op._remove_all_control_inputs() op._add_control_inputs(internal_control_inputs) return internal_control_inputs, external_control_inputs # pylint: enable=protected-access def AddInnerOp(self, op): """Notifies a scope about an operator added to an inner scope.""" if self._outer_context: self._outer_context.AddInnerOp(op) def GetControlPivot(self): """Returns the pivot node for this context, or None.""" return None def IsWhileContext(self): return False def IsCondContext(self): return False def IsXLAContext(self): return False def __str__(self): return self.name class CondContext(ControlFlowContext): """The context for the conditional construct.""" def __init__(self, pred=None, pivot=None, branch=None, name="cond_text", context_def=None, import_scope=None): """Creates a `CondContext`. Args: pred: The `boolean` tensor for the conditional predicate. pivot: The predicate tensor in this branch. branch: 0 or 1 representing this branch. name: Name of the `CondContext` python object. context_def: Optional `ContextDef` protocol buffer to initialize the `CondContext` object from. import_scope: Optional `string`. Name scope to add. Only used when initialing from protocol buffer. """ self._name = ops.get_default_graph().unique_name(name) if context_def: self._init_from_proto(context_def, import_scope=import_scope) else: # Initializes the default fields. ControlFlowContext.__init__(self) self._pred = pred # The boolean tensor for the cond predicate self._pivot = pivot # The predicate tensor in this branch self._branch = branch # 0 or 1 representing this branch # Values considered to have been already seen in this context. pred is not # included in this context. self._values.add(pred.name) self._external_values[pred.name] = pred self._values.add(pivot.name) pivot.op._set_control_flow_context(self) # pylint: disable=protected-access def _init_from_proto(self, context_def, import_scope=None): """Creates a new `CondContext` from protocol buffer. Args: context_def: `CondContextDef` protocol buffer. import_scope: Optional `string`. Name scope to add. """ assert isinstance(context_def, control_flow_pb2.CondContextDef) # Create from context_def. g = ops.get_default_graph() self._name = ops.prepend_name_scope(context_def.context_name, import_scope) self._pred = g.as_graph_element( ops.prepend_name_scope(context_def.pred_name, import_scope)) self._pivot = g.as_graph_element( ops.prepend_name_scope(context_def.pivot_name, import_scope)) self._branch = context_def.branch super(CondContext, self).__init__( values_def=context_def.values_def, import_scope=import_scope) @property def pred(self): return self._pred @property def pivot(self): return self._pivot @property def branch(self): return self._branch @property def grad_state(self): if self.GetWhileContext(): return self.GetWhileContext().grad_state return None @property def back_prop(self): if self.GetWhileContext(): self.GetWhileContext().back_prop return False def GetControlPivot(self): return self._pivot def to_proto(self, export_scope=None): """Converts a `CondContext` to a `CondContextDef` protocol buffer. Args: export_scope: Optional `string`. Name scope to remove. Returns: A `CondContextDef` protocol buffer. """ if (export_scope is None or self.name.startswith(export_scope)): context_def = control_flow_pb2.CondContextDef() context_def.context_name = ops.strip_name_scope(self.name, export_scope) context_def.pred_name = ops.strip_name_scope(self._pred.name, export_scope) context_def.pivot_name = ops.strip_name_scope(self._pivot.name, export_scope) context_def.branch = self._branch context_def.values_def.MergeFrom( super(CondContext, self)._to_values_def(export_scope)) for nested in self._nested_contexts: nested_def = context_def.nested_contexts.add() nested.to_control_flow_context_def(nested_def) return context_def else: return None @staticmethod def from_proto(context_def, import_scope=None): """Returns a `CondContext` object created from `context_def`.""" ret = CondContext(context_def=context_def, import_scope=import_scope) ret.Enter() for nested_def in context_def.nested_contexts: from_control_flow_context_def(nested_def, import_scope=import_scope) ret.Exit() return ret def to_control_flow_context_def(self, context_def, export_scope=None): context_def.cond_ctxt.CopyFrom(self.to_proto(export_scope=export_scope)) def AddValue(self, val): """Add `val` to the current context and its outer context recursively.""" if val.name in self._values: # Use the real value if it comes from outer context. This is needed in # particular for nested conds. result = self._external_values.get(val.name) result = val if result is None else result else: result = val self._values.add(val.name) if self._outer_context: result = self._outer_context.AddValue(val) self._values.add(result.name) self._external_values[result.name] = result with ops.control_dependencies(None): result = _SwitchRefOrTensor(result, self._pred)[self._branch] if self._outer_context: self._outer_context.AddInnerOp(result.op) result.op.graph.prevent_fetching(result.op) # pylint: disable=protected-access result.op._set_control_flow_context(self) # pylint: enable=protected-access # Mark Switch output as seen by this context and any outer contexts, # just like what we do for normal op outputs in _AddOpInternal() below. ctxt = self while ctxt is not None: # pylint: disable=protected-access ctxt._values.add(result.name) ctxt = ctxt._outer_context # pylint: enable=protected-access self._external_values[val.name] = result return result def AddOp(self, op): self._AddOpInternal(op) def _AddOpInternal(self, op): """Add `op` to the current context.""" if not op.inputs: # If we're in a while loop, remove any control inputs from outside the # loop. self._RemoveExternalControlEdges(op) if not any( util.OpInContext(input_op, self) for input_op in op.control_inputs): # pylint: disable=protected-access op._add_control_input(self._pivot.op) # pylint: enable=protected-access else: # Make each input to 'op' available in this CondContext. If an input is # already part of this context there's nothing to do, but if it's # external, AddValue() will handle adding the appropriate Switch node and # other bookkeeping. for index in range(len(op.inputs)): x = op.inputs[index] if op.type == "Merge" and x.op.type == "NextIteration": # Edge case: if we're importing a while loop inside this CondContext, # AddValue() will not correctly handle the NextIteration inputs to # Merge node. The problem is that the NextIteration should also be # part of this context, but if we're importing it won't have been # processed and added to the context yet, so AddValue() will try to # add a Switch which results in an invalid graph. Instead, we use the # NextIteration input as-is here, and it will eventually be added to # the context via AddOp(). real_x = x else: real_x = self.AddValue(x) if real_x != x: # pylint: disable=protected-access op._update_input(index, real_x) # pylint: enable=protected-access # Remove any external control dependency on this op. self._RemoveExternalControlEdges(op) # pylint: disable=protected-access if op.graph._is_function(op.type) or op.type == "SymbolicGradient": op._add_control_input(self._pivot.op) # pylint: enable=protected-access # Mark op's outputs as seen by this context and any outer contexts. output_names = [x.name for x in op.outputs] ctxt = self while ctxt is not None: # pylint: disable=protected-access ctxt._values.update(output_names) ctxt = ctxt._outer_context # pylint: enable=protected-access if self._outer_context or not util.IsLoopExit(op): op.graph.prevent_fetching(op) if self._outer_context: self._outer_context.AddInnerOp(op) def _ProcessOutputTensor(self, val): """Process an output tensor of a conditional branch.""" real_val = val if val.name not in self._values: # Handle the special case of lambda: x self._values.add(val.name) if self._outer_context: real_val = self._outer_context.AddValue(val) self._values.add(real_val.name) self._external_values[real_val.name] = real_val real_val = _SwitchRefOrTensor(real_val, self._pred)[self._branch] self._external_values[val.name] = real_val else: external_val = self._external_values.get(val.name) if external_val is not None: real_val = external_val return real_val def _BuildCondTensor(self, v): if isinstance(v, ops.Operation): # Use pivot as the proxy for this op. return with_dependencies([v], self._pivot) else: v = nest.map_structure( _convert_tensorarray_to_flow, v, expand_composites=True) return self._ProcessOutputTensor(ops.convert_to_tensor(v)) def BuildCondBranch(self, fn): """Add the subgraph defined by fn() to the graph.""" pre_summaries = ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access original_result = fn() post_summaries = ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access if len(post_summaries) > len(pre_summaries): new_summaries = post_summaries[len(pre_summaries):] summary_ref = ops.get_collection_ref(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access summary_ref[:] = pre_summaries with ops.control_dependencies(new_summaries): if original_result is None: return no_op(), None else: original_result = nest.map_structure( array_ops.identity, original_result, expand_composites=True) if original_result is None: return None, None result = nest.map_structure( self._BuildCondTensor, original_result, expand_composites=True) if not isinstance(result, (list, _basetuple)): result = [result] return original_result, result def IsCondContext(self): return True def _UnpackIfSingleton(res): if isinstance(res, (list, _basetuple)) and len(res) == 1: return res[0] else: return res # pylint: disable=redefined-outer-name # pylint: disable=g-doc-args @tf_export(v1=["cond"]) @deprecation.deprecated_args( None, "fn1/fn2 are deprecated in favor of the true_fn/false_fn arguments.", "fn1", "fn2") def cond(pred, true_fn=None, false_fn=None, strict=False, name=None, fn1=None, fn2=None): """Return `true_fn()` if the predicate `pred` is true else `false_fn()`. `true_fn` and `false_fn` both return lists of output tensors. `true_fn` and `false_fn` must have the same non-zero number and type of outputs. **WARNING**: Any Tensors or Operations created outside of `true_fn` and `false_fn` will be executed regardless of which branch is selected at runtime. Although this behavior is consistent with the dataflow model of TensorFlow, it has frequently surprised users who expected a lazier semantics. Consider the following simple program: ```python z = tf.multiply(a, b) result = tf.cond(x < y, lambda: tf.add(x, z), lambda: tf.square(y)) ``` If `x < y`, the `tf.add` operation will be executed and `tf.square` operation will not be executed. Since `z` is needed for at least one branch of the `cond`, the `tf.multiply` operation is always executed, unconditionally. Note that `cond` calls `true_fn` and `false_fn` *exactly once* (inside the call to `cond`, and not at all during `Session.run()`). `cond` stitches together the graph fragments created during the `true_fn` and `false_fn` calls with some additional graph nodes to ensure that the right branch gets executed depending on the value of `pred`. `tf.cond` supports nested structures as implemented in `tensorflow.python.util.nest`. Both `true_fn` and `false_fn` must return the same (possibly nested) value structure of lists, tuples, and/or named tuples. Singleton lists and tuples form the only exceptions to this: when returned by `true_fn` and/or `false_fn`, they are implicitly unpacked to single values. This behavior is disabled by passing `strict=True`. Args: pred: A scalar determining whether to return the result of `true_fn` or `false_fn`. true_fn: The callable to be performed if pred is true. false_fn: The callable to be performed if pred is false. strict: A boolean that enables/disables 'strict' mode; see above. name: Optional name prefix for the returned tensors. Returns: Tensors returned by the call to either `true_fn` or `false_fn`. If the callables return a singleton list, the element is extracted from the list. Raises: TypeError: if `true_fn` or `false_fn` is not callable. ValueError: if `true_fn` and `false_fn` do not return the same number of tensors, or return tensors of different types. Example: ```python x = tf.constant(2) y = tf.constant(5) def f1(): return tf.multiply(x, 17) def f2(): return tf.add(y, 23) r = tf.cond(tf.less(x, y), f1, f2) # r is set to f1(). # Operations in f2 (e.g., tf.add) are not executed. ``` """ # Always enable control flow v2 if building a function, regardless of toggle. if (util.EnableControlFlowV2(ops.get_default_graph()) and not context.executing_eagerly()): return cond_v2.cond_v2(pred, true_fn, false_fn, name) # We needed to make true_fn/false_fn keyword arguments for # backwards-compatibility. This check exists so that we can convert back to # having them be positional arguments. # TODO(josh11b): Make `true_fn` and `false_fn` positional arguments after # `fn1` and `fn2` are deleted. if fn1 is not None: if true_fn is not None: raise TypeError("cond(): true_fn and fn1 may not be set simultaneously.") true_fn = fn1 elif true_fn is None: raise TypeError("cond(): true_fn argument required") if fn2 is not None: if false_fn is not None: raise TypeError("cond(): false_fn and fn2 may not be set simultaneously.") false_fn = fn2 elif false_fn is None: raise TypeError("cond(): false_fn argument required") if not callable(true_fn): raise TypeError("true_fn must be callable.") if not callable(false_fn): raise TypeError("false_fn must be callable.") with ops.name_scope(name, "cond", [pred]): if context.executing_eagerly(): if pred: result = true_fn() else: result = false_fn() if not strict: result = _UnpackIfSingleton(result) return result # Add the Switch to the graph. if isinstance(pred, bool): raise TypeError("pred must not be a Python bool") p_2, p_1 = switch(pred, pred) pivot_1 = array_ops.identity(p_1, name="switch_t") pivot_2 = array_ops.identity(p_2, name="switch_f") pred = array_ops.identity(pred, name="pred_id") # Disable the fetching of tensors that are only on one branch of cond. for tensor in [p_1, p_2, pivot_1, pivot_2, pred]: tensor.op.graph.prevent_fetching(tensor.op) # Build the graph for the true branch in a new context. context_t = CondContext(pred, pivot_1, branch=1) try: context_t.Enter() orig_res_t, res_t = context_t.BuildCondBranch(true_fn) if orig_res_t is None: raise ValueError("true_fn must have a return value.") context_t.ExitResult(res_t) finally: context_t.Exit() # Build the graph for the false branch in a new context. context_f = CondContext(pred, pivot_2, branch=0) try: context_f.Enter() orig_res_f, res_f = context_f.BuildCondBranch(false_fn) if orig_res_f is None: raise ValueError("false_fn must have a return value.") context_f.ExitResult(res_f) finally: context_f.Exit() if not strict: orig_res_t = _UnpackIfSingleton(orig_res_t) orig_res_f = _UnpackIfSingleton(orig_res_f) # Check that the return values of the two branches have the same structure. try: nest.assert_same_structure(orig_res_t, orig_res_f, expand_composites=True) except (TypeError, ValueError): nest.map_structure(_cast_indexed_slice_indices, orig_res_t, orig_res_f) nest.map_structure(_cast_indexed_slice_indices, res_t, res_f) try: nest.assert_same_structure(orig_res_t, orig_res_f, expand_composites=True) except TypeError as e: raise TypeError( "Incompatible return types of true_fn and false_fn: {}".format(e)) except ValueError as e: raise ValueError( "Incompatible return values of true_fn and false_fn: {}".format(e)) # Add the final merge to the graph. if not res_t: raise ValueError("true_fn and false_fn must return at least one result.") res_t_flat = nest.flatten(res_t, expand_composites=True) res_f_flat = nest.flatten(res_f, expand_composites=True) for (x, y) in zip(res_t_flat, res_f_flat): assert isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor) if x.dtype.base_dtype != y.dtype.base_dtype: raise ValueError( "Outputs of true_fn and false_fn must have the same type: " "%s, %s" % (x.dtype.name, y.dtype.name)) merges = [merge(pair)[0] for pair in zip(res_f_flat, res_t_flat)] merges = _convert_flows_to_tensorarrays( nest.flatten(orig_res_t, expand_composites=True), merges) # Only add non-nested conds to the collection. Any nested control flow will # be encapsulated in the root context. assert context_t.outer_context == context_f.outer_context if context_t.outer_context is None: ops.add_to_collection(ops.GraphKeys.COND_CONTEXT, context_t) ops.add_to_collection(ops.GraphKeys.COND_CONTEXT, context_f) merges = nest.pack_sequence_as( structure=orig_res_t, flat_sequence=merges, expand_composites=True) # Singleton lists and tuples are automatically unpacked if strict == False. if not strict: merges = _UnpackIfSingleton(merges) return merges def _cast_indexed_slice_indices(a, b): """Cast IndexedSlice.indices from int32 to int64 where necessary. If `a` and `b` are both IndexedSlices, and their indices have different dtypes, then cast both their dtypes to `int64` (modifies `a` and `b` in-place). Otherwise, does nothing. Args: a: A value, which may be an IndexedSlices. b: A value, which may be an IndexedSlices. """ if (isinstance(a, ops.IndexedSlices) and isinstance(b, ops.IndexedSlices) and a.indices.dtype != b.indices.dtype): # pylint: disable=protected-access a._indices = math_ops.cast(a.indices, dtypes.int64) b._indices = math_ops.cast(b.indices, dtypes.int64) # pylint: enable=g-doc-args # pylint: enable=redefined-outer-name @tf_export("cond", v1=[]) def cond_for_tf_v2(pred, true_fn=None, false_fn=None, name=None): """Return `true_fn()` if the predicate `pred` is true else `false_fn()`. `true_fn` and `false_fn` both return lists of output tensors. `true_fn` and `false_fn` must have the same non-zero number and type of outputs. **WARNING**: Any Tensors or Operations created outside of `true_fn` and `false_fn` will be executed regardless of which branch is selected at runtime. Although this behavior is consistent with the dataflow model of TensorFlow, it has frequently surprised users who expected a lazier semantics. Consider the following simple program: ```python z = tf.multiply(a, b) result = tf.cond(x < y, lambda: tf.add(x, z), lambda: tf.square(y)) ``` If `x < y`, the `tf.add` operation will be executed and `tf.square` operation will not be executed. Since `z` is needed for at least one branch of the `cond`, the `tf.multiply` operation is always executed, unconditionally. Note that `cond` calls `true_fn` and `false_fn` *exactly once* (inside the call to `cond`, and not at all during `Session.run()`). `cond` stitches together the graph fragments created during the `true_fn` and `false_fn` calls with some additional graph nodes to ensure that the right branch gets executed depending on the value of `pred`. `tf.cond` supports nested structures as implemented in `tensorflow.python.util.nest`. Both `true_fn` and `false_fn` must return the same (possibly nested) value structure of lists, tuples, and/or named tuples. Singleton lists and tuples form the only exceptions to this: when returned by `true_fn` and/or `false_fn`, they are implicitly unpacked to single values. Note: It is illegal to "directly" use tensors created inside a cond branch outside it, e.g. by storing a reference to a branch tensor in the python state. If you need to use a tensor created in a branch function you should return it as an output of the branch function and use the output from `tf.cond` instead. Args: pred: A scalar determining whether to return the result of `true_fn` or `false_fn`. true_fn: The callable to be performed if pred is true. false_fn: The callable to be performed if pred is false. name: Optional name prefix for the returned tensors. Returns: Tensors returned by the call to either `true_fn` or `false_fn`. If the callables return a singleton list, the element is extracted from the list. Raises: TypeError: if `true_fn` or `false_fn` is not callable. ValueError: if `true_fn` and `false_fn` do not return the same number of tensors, or return tensors of different types. Example: ```python x = tf.constant(2) y = tf.constant(5) def f1(): return tf.multiply(x, 17) def f2(): return tf.add(y, 23) r = tf.cond(tf.less(x, y), f1, f2) # r is set to f1(). # Operations in f2 (e.g., tf.add) are not executed. ``` """ return cond(pred, true_fn=true_fn, false_fn=false_fn, strict=True, name=name) def _resource_safe_shape(t): """Returns the shape of t or the variable it points to.""" if t.dtype == dtypes.resource: while t.op.inputs: t = t.op.inputs[0] return tensor_shape.TensorShape(t.op.get_attr("shape")) return array_ops.shape_internal(t, optimize=False) # TODO(yuanbyu): Consider having a unified notion of context for # not only conditionals and loops but also control dependency and # subgraphs. class WhileContext(ControlFlowContext): """The context for the loop construct.""" def __init__(self, maximum_iterations=None, parallel_iterations=10, back_prop=True, swap_memory=False, name="while_context", grad_state=None, context_def=None, import_scope=None): """"Creates a `WhileContext`. Args: maximum_iterations: Optional upper bound on number of loop iterations. parallel_iterations: The number of iterations allowed to run in parallel. back_prop: Whether backprop is enabled for this while loop. swap_memory: Whether GPU-CPU memory swap is enabled for this loop. name: Optional name prefix for the returned tensors. grad_state: The gradient loop state. context_def: Optional `WhileContextDef` protocol buffer to initialize the `Whilecontext` python object from. import_scope: Optional `string`. Name scope to add. Only used when initialing from protocol buffer. """ if context_def: self._init_from_proto(context_def, import_scope=import_scope) else: ControlFlowContext.__init__(self) self._init_from_args(maximum_iterations, parallel_iterations, back_prop, swap_memory, name) # The gradient loop state. self._grad_state = grad_state def _init_from_args(self, maximum_iterations, parallel_iterations, back_prop, swap_memory, name): """Creates a new `WhileContext` from arguments. Args: maximum_iterations: Optional upper bound on number of loop iterations. parallel_iterations: The number of iterations allowed to run in parallel. back_prop: Whether backprop is enabled for this while loop. swap_memory: Whether GPU-CPU memory swap is enabled for this loop. name: Optional name prefix for the returned tensors. Raises: ValueError: If `parallel_iterations` has invalid value. """ if not isinstance(parallel_iterations, int) or (parallel_iterations <= 0): raise ValueError("`parallel_iterations` must be a positive integer: " "%s" % parallel_iterations) self._name = ops.get_default_graph().unique_name(name) self._maximum_iterations = maximum_iterations self._parallel_iterations = parallel_iterations self._back_prop = back_prop self._swap_memory = swap_memory # We use this node to control constants created by the pred lambda. self._pivot_for_pred = None # We use this node to control constants created by the body lambda. self._pivot_for_body = None # The boolean tensor for loop termination condition. Used in code # generation for gradient computation self._pivot = None # The list of exit tensors for loop variables. self._loop_exits = [] # The list of enter tensors for loop variables. self._loop_enters = [] self._graph = ops.get_default_graph() def _init_from_proto(self, context_def, import_scope=None): """Creates a new `WhileContext` from protocol buffer. Args: context_def: `WhileContextDef` protocol buffer. import_scope: Optional `string`. Name scope to add. """ assert isinstance(context_def, control_flow_pb2.WhileContextDef) # Create from context_def. g = ops.get_default_graph() self._name = ops.prepend_name_scope(context_def.context_name, import_scope) if context_def.maximum_iterations_name: self._maximum_iterations = g.as_graph_element( ops.prepend_name_scope(context_def.maximum_iterations_name, import_scope)) else: self._maximum_iterations = None self._parallel_iterations = context_def.parallel_iterations self._back_prop = context_def.back_prop self._swap_memory = context_def.swap_memory self._pivot_for_pred = g.as_graph_element( ops.prepend_name_scope(context_def.pivot_for_pred_name, import_scope)) # We use this node to control constants created by the body lambda. self._pivot_for_body = g.as_graph_element( ops.prepend_name_scope(context_def.pivot_for_body_name, import_scope)) # The boolean tensor for loop termination condition. Used in code # generation for gradient computation. self._pivot = g.as_graph_element( ops.prepend_name_scope(context_def.pivot_name, import_scope)) # The list of exit tensors for loop variables. self._loop_exits = [ g.as_graph_element(ops.prepend_name_scope(exit_name, import_scope)) for exit_name in context_def.loop_exit_names ] # The list of enter tensors for loop variables. self._loop_enters = [ g.as_graph_element(ops.prepend_name_scope(enter_name, import_scope)) for enter_name in context_def.loop_enter_names ] super(WhileContext, self).__init__( values_def=context_def.values_def, import_scope=import_scope) # import_scope causes self.name to be different from the original serialized # context's name. Rewrite "frame_name" attrs with the new name. if import_scope: for tensor_name in self._values: op = g.as_graph_element(tensor_name).op if util.IsLoopEnter(op): # pylint: disable=protected-access op._set_attr("frame_name", attr_value_pb2.AttrValue(s=compat.as_bytes(self.name))) # pylint: enable=protected-access self._graph = ops.get_default_graph() @property def maximum_iterations(self): """The maximum number of iterations that will be executed.""" return self._maximum_iterations @property def parallel_iterations(self): """The number of iterations allowed to run in parallel.""" return self._parallel_iterations @property def back_prop(self): """True iff backprop is enabled for this while loop.""" return self._back_prop @property def swap_memory(self): """True iff GPU-CPU memory swap is enabled for this while loop.""" return self._swap_memory @property def pivot(self): """The boolean tensor representing the loop termination condition.""" return self._pivot @property def loop_enters(self): """The list of enter tensors for loop variables.""" return self._loop_enters @property def loop_exits(self): """The list of exit tensors for loop variables.""" return self._loop_exits @property def grad_state(self): """The gradient loop state.""" return self._grad_state def to_proto(self, export_scope=None): """Converts a `WhileContext` to a `WhileContextDef` protocol buffer. Args: export_scope: Optional `string`. Name scope to remove. Returns: A `WhileContextDef` protocol buffer. """ if (export_scope is None or self.name.startswith(export_scope)): context_def = control_flow_pb2.WhileContextDef() context_def.context_name = ops.strip_name_scope(self.name, export_scope) context_def.parallel_iterations = self._parallel_iterations if self._maximum_iterations is not None: context_def.maximum_iterations_name = ops.strip_name_scope( self._maximum_iterations.name, export_scope) context_def.back_prop = self._back_prop context_def.swap_memory = self._swap_memory context_def.pivot_for_pred_name = ops.strip_name_scope( self._pivot_for_pred.name, export_scope) context_def.pivot_for_body_name = ops.strip_name_scope( self._pivot_for_body.name, export_scope) context_def.pivot_name = ops.strip_name_scope(self._pivot.name, export_scope) context_def.loop_exit_names.extend([ ops.strip_name_scope(l.name, export_scope) for l in self._loop_exits ]) context_def.loop_enter_names.extend([ ops.strip_name_scope(l.name, export_scope) for l in self._loop_enters ]) context_def.values_def.MergeFrom( super(WhileContext, self)._to_values_def(export_scope=export_scope)) for nested in self._nested_contexts: nested_def = context_def.nested_contexts.add() nested.to_control_flow_context_def(nested_def) return context_def else: return None def to_control_flow_context_def(self, context_def, export_scope=None): context_def.while_ctxt.CopyFrom(self.to_proto(export_scope=export_scope)) @staticmethod def from_proto(context_def, import_scope=None): """Returns a `WhileContext` object created from `context_def`. Args: context_def: A `WhileContextDef` protocol buffer. import_scope: Optional `string`. Name scope to add. Returns: A `WhileContext` Python object. """ ret = WhileContext(context_def=context_def, import_scope=import_scope) ret.Enter() for nested_def in context_def.nested_contexts: from_control_flow_context_def(nested_def, import_scope=import_scope) ret.Exit() return ret def GetWhileContext(self): return self def GetControlPivot(self): if self._pivot_for_body is not None: return self._pivot_for_body return self._pivot_for_pred def AddValue(self, val): """Add `val` to the current context and its outer context recursively.""" result = val new_value = val.name not in self._values # Don't treat ops in this context as new values. Usually all known values # are in self._values, except when we're importing a while loop inside this # WhileContext. Since there's a cycle in this case, `val` may be part of the # imported while loop but not yet processed by this context and added to # self._values in _AddOpInternal. We only want to process external input # tensors to the while loop here. new_value &= val.op._control_flow_context is not self # pylint: disable=protected-access if new_value: self._values.add(val.name) # If we are in a grad context and val is from its forward context, # use GetRealValue(), which adds the logic to save the history of # val in forward. grad_ctxt = ops.get_default_graph()._get_control_flow_context() if grad_ctxt: grad_ctxt = grad_ctxt.GetWhileContext() if grad_ctxt.grad_state: forward_ctxt = util.GetWhileContext(val.op) if util.IsLoopExit(val.op): forward_ctxt = forward_ctxt.outer_context if forward_ctxt: forward_ctxt = forward_ctxt.GetWhileContext() if forward_ctxt == grad_ctxt.grad_state.forward_context: real_val = grad_ctxt.grad_state.GetRealValue(val) self._external_values[val.name] = real_val return real_val if self._outer_context is not None: result = self._outer_context.AddValue(val) # Create an Enter to make `result` known to this loop context. with ops.control_dependencies(None): enter = _Enter( result, self._name, is_constant=True, parallel_iterations=self._parallel_iterations) enter.graph.prevent_feeding(enter) if self._outer_context: self._outer_context.AddInnerOp(enter.op) # Fix the control inputs and control flow context of these enter ops. self._FixControlInputsAndContext([enter]) # Add `enter` in this context. self._values.add(enter.name) self._external_values[val.name] = enter result = enter else: actual_val = self._external_values.get(val.name) if actual_val is not None: result = actual_val return result def AddOp(self, op): """Add `op` to the current context.""" # For a reduction op, if op is in a grad context and its input is from # its forward context, moving op to the forward context means we would # store the tensor after the reduction as opposed to the tensor before # reduction, and therefore could significantly reduce memory consumption. # For now, we do this only for a few ops. # # If in XLA context, do not move constant ops to forward pass as pushing to # and popping from a stack removes the constant property of an op and breaks # XLA compilation, which requires certain inputs to be constant for certain # ops. if not util.IsInXLAContext(op) and op.type in {"Shape", "Size", "Rank"}: grad_ctxt = ops.get_default_graph()._get_control_flow_context() if grad_ctxt: grad_ctxt = grad_ctxt.GetWhileContext() if grad_ctxt.grad_state: op_input_forward_ctxt = util.GetWhileContext(op.inputs[0].op) if op_input_forward_ctxt == grad_ctxt.grad_state.forward_context: op_input_ctxt = op.inputs[0].op._get_control_flow_context() op._set_control_flow_context(op_input_ctxt) op_input_ctxt._AddOpInternal(op) return self._AddOpInternal(op) def _AddOpInternal(self, op): """Add `op` to the current context. We move any external control dependencies of the op to the loop pivot, to ensure they get executed. """ if not op.inputs: # Remove any external control dependency on this op control_inputs, external_inputs = self._RemoveExternalControlEdges(op) # Add a control edge from the control pivot to this op. if not control_inputs: # pylint: disable=protected-access op._add_control_input(self.GetControlPivot().op) # pylint: enable=protected-access for x in op.outputs: self._values.add(x.name) else: for index in range(len(op.inputs)): x = op.inputs[index] real_x = self.AddValue(x) if real_x != x: op._update_input(index, real_x) # pylint: disable=protected-access # Remove any external control dependency on this op. _, external_inputs = self._RemoveExternalControlEdges(op) # Add a control dependency to prevent loop invariants from # enabling ops that should not be executed. self._MaybeAddControlDependency(op) for x in op.outputs: self._values.add(x.name) if external_inputs: # Use an identity to pull control inputs as data inputs. Note that we # ignore ops which don't have outputs. TODO(apassos): fix that with ops.control_dependencies(None): self.Enter() external_inputs = [ array_ops.identity(x.outputs[0]).op for x in external_inputs if x.outputs ] self.Exit() op._add_control_inputs(external_inputs) # pylint: disable=protected-access if self._outer_context or not util.IsLoopExit(op): op.graph.prevent_fetching(op) for x in op.outputs: op.graph.prevent_feeding(x) if self._outer_context: self._outer_context.AddInnerOp(op) def _MaybeAddControlDependency(self, op): """Add a control input to the op if it only depends on loop invariants.""" def _IsOpFree(op): """Determines if `op` needs a control dependency.""" if op.control_inputs: return False # pylint: disable=protected-access if op.graph._is_function(op.type) or op.type == "SymbolicGradient": return True # pylint: enable=protected-access for x in op.inputs: if not util.IsLoopConstantEnter(x.op): return False return True if _IsOpFree(op): # pylint: disable=protected-access op._add_control_input(self.GetControlPivot().op) # pylint: enable=protected-access def AddForwardLoopCounter(self, outer_grad_state): """Adds a loop that counts the number of iterations. This is added to the forward loop at the time when we start to create the loop for backprop gradient computation. Called in the outer context of this forward context. The pseudocode is: `n = 0; while (_pivot) { n++; }` Note that a control dependency is added to `n` to ensure the correct execution order of stack push ops. Args: outer_grad_state: The outer grad state. None if not nested. Returns: The number of iterations taken by the forward loop and the loop index. """ n = constant_op.constant(0, name="f_count") if outer_grad_state is not None: # Force the stack pushes of i-th execution of an inner loop to be ordered # before the pushes of (i+1)-th execution of the same inner loop. outer_add_op = outer_grad_state.forward_index.op.inputs[0].op n.op._add_control_input(outer_add_op) # pylint: disable=protected-access self.Enter() self.AddName(n.name) enter_n = _Enter( n, self._name, is_constant=False, parallel_iterations=self._parallel_iterations, name="f_count") self.loop_enters.append(enter_n) merge_n = merge([enter_n, enter_n])[0] switch_n = switch(merge_n, self._pivot) index = math_ops.add(switch_n[1], 1) next_n = _NextIteration(index) merge_n.op._update_input(1, next_n) total_iterations = exit(switch_n[0], name="f_count") self.loop_exits.append(total_iterations) self.ExitResult([total_iterations]) self.Exit() return total_iterations, next_n def AddBackpropLoopCounter(self, count, outer_grad_state): """Add the backprop loop that controls the iterations. This is added to the backprop loop. It is used to control the loop termination of the backprop loop. Called in the outer context of this grad context. The pseudocode is: `n = count; while (n >= 1) { n--; }` Note that a control dependency is added to `final_zero` to ensure the correct execution order of stack pop ops. Args: count: The number of iterations for backprop. outer_grad_state: The outer grad state. None if not nested. Returns: The loop index. """ in_separate_functions = count.graph is not ops.get_default_graph() if in_separate_functions: # Brings the count into this graph count = array_ops.identity(count) else: # TODO(apassos) XLA expects this constant to be created outside the loop, # so doing that for now. one = constant_op.constant(1, name="b_count") self.Enter() self.AddName(count.name) enter_count = _Enter( count, self._name, is_constant=False, parallel_iterations=self._parallel_iterations, name="b_count") self.loop_enters.append(enter_count) merge_count = merge([enter_count, enter_count])[0] self._pivot_for_pred = merge_count if in_separate_functions: one = constant_op.constant(1, name="b_count") pred = math_ops.greater_equal(merge_count, one) self._pivot = loop_cond(pred, name="b_count") switch_count = switch(merge_count, self._pivot) index = math_ops.subtract(switch_count[1], one) self._pivot_for_body = index next_count = _NextIteration(index) merge_count.op._update_input(1, next_count) final_zero = exit(switch_count[0], name="b_count") self.loop_exits.append(final_zero) if outer_grad_state is not None: # Force the stack pops of i-th execution of an inner loop to be ordered # before the pops of (i+1)-th execution of the same inner loop. # pylint: disable=protected-access outer_grad_state.grad_sync._add_control_input(final_zero.op) # pylint: enable=protected-access self.ExitResult([final_zero]) self.Exit() return next_count def AddBackpropAccumulator(self, op, grad): """Add an accumulation loop for every loop invariant. This is added to the backprop loop. It is used to accumulate partial gradients within each loop iteration. Called when in the gradient while context. The pseudocode is: ``` acc = 0.0; while (_pivot) { acc += grad; } ``` Args: op: The Enter op for a loop invariant. grad: The partial gradient of an iteration for a loop invariant. Returns: The gradient for a loop invariant. """ self.Exit() # Create a zeros tensor with the right shape for acc. If we don't # know the full shape statically, we will have to get the shape # dynamically from the forward inference. Getting the shape right # for the zeros is only needed for the base case when the loop exits # without running any iterations. shape = grad.get_shape() if shape.is_fully_defined(): if self.outer_context: self.outer_context.Enter() acc = constant_op.constant(0, grad.dtype, shape=shape, name="b_acc") if self.outer_context: self.outer_context.Exit() else: value = op.inputs[0] if (isinstance(self.outer_context, WhileContext) and self.outer_context.grad_state is not None): # We are in a nested while loop. forward_ctxt = self.grad_state.forward_context forward_ctxt.outer_context.Enter() zeros_shape = array_ops.shape_internal(value, optimize=False) forward_ctxt.outer_context.Exit() outer_grad_state = self.grad_state.outer_grad_state history_zeros_shape = outer_grad_state.AddForwardAccumulator( zeros_shape) self.outer_context.Enter() real_shape = outer_grad_state.AddBackpropAccumulatedValue( history_zeros_shape, zeros_shape) acc = array_ops.zeros(real_shape, grad.dtype) self.outer_context.Exit() else: if self.outer_context: self.outer_context.Enter() zeros_shape = array_ops.shape_internal(value, optimize=False) acc = array_ops.zeros(zeros_shape, grad.dtype) if self.outer_context: self.outer_context.Exit() self.Enter() self.AddName(acc.name) enter_acc = _Enter( acc, self._name, is_constant=False, parallel_iterations=self._parallel_iterations, name="b_acc") self.loop_enters.append(enter_acc) merge_acc = merge([enter_acc, enter_acc], name="b_acc")[0] switch_acc_false, switch_acc_true = switch(merge_acc, self._pivot) add_acc = math_ops.add(switch_acc_true, grad) next_acc = _NextIteration(add_acc) merge_acc.op._update_input(1, next_acc) # pylint: disable=protected-access result_acc = exit(switch_acc_false, name="b_acc") self.loop_exits.append(result_acc) self.ExitResult([result_acc]) return result_acc def AddBackpropIndexedSlicesAccumulator(self, op, grad): """This is used for accumulating gradients that are IndexedSlices. This is essentially the equivalent of AddBackpropAccumulator but optimized for things like updating embeddings from within a while loop. Args: op: The Enter op for a loop invariant. grad: The partial gradients represented as an IndexedSlices. Returns: The accumulated IndexedSlices gradient of the loop invariant. """ values = grad.values indices = grad.indices dense_shape = grad.dense_shape self.Exit() if self.outer_context: self.outer_context.Enter() if values.get_shape().is_fully_defined(): values_shape = tensor_shape.TensorShape([tensor_shape.Dimension(1)] + values.get_shape().dims[1:]) if self.outer_context: self.outer_context.Enter() values_acc = constant_op.constant( 0, values.dtype, shape=values_shape, name="b_acc") if self.outer_context: self.outer_context.Exit() else: values_shape = _resource_safe_shape(op.inputs[0])[1:] values_shape = array_ops.concat([[1], values_shape], 0) values_acc = array_ops.zeros(values_shape, dtype=values.dtype) indices_acc = constant_op.constant([0], indices.dtype) shape_acc = None if dense_shape is not None: if dense_shape.get_shape().is_fully_defined(): if self.outer_context: self.outer_context.Enter() shape_acc = constant_op.constant( 0, dense_shape.dtype, shape=dense_shape.get_shape()) if self.outer_context: self.outer_context.Exit() else: shape_acc = array_ops.zeros_like( array_ops.shape_internal( op.inputs[0], optimize=False, out_type=dense_shape.dtype), optimize=False) if self.outer_context: self.outer_context.Exit() self.Enter() self.AddName(values_acc.name) self.AddName(indices_acc.name) init_acc = [indices_acc, values_acc] if shape_acc is not None: self.AddName(shape_acc.name) init_acc.append(shape_acc) # Set use_input_shape=False since the accumulator tensors will grow in # size. If use_input_shape=True, the _update_input call below will result in # incompatible shapes. enter_acc = [ _Enter( x, self._name, is_constant=False, parallel_iterations=self._parallel_iterations, use_input_shape=False, name="b_acc") for x in init_acc ] # Manually set appropriate partial shapes. enter_acc[0].set_shape([None]) if values_acc.shape.dims is not None: enter_acc[1].set_shape([None] + values_acc.shape.as_list()[1:]) self.loop_enters.extend(enter_acc) merge_acc = [merge([x, x], name="b_acc")[0] for x in enter_acc] switch_acc = [switch(x, self._pivot) for x in merge_acc] # The actual accumulation. acc_indexed_slices = [ array_ops.concat([xa[1], xv], 0) for xa, xv in zip(switch_acc[:2], [indices, values]) ] if shape_acc is not None: # For the shape we just keep the maximum acc_indexed_slices.append(math_ops.maximum(dense_shape, switch_acc[2][1])) next_acc = [_NextIteration(x) for x in acc_indexed_slices] for xm, xn in zip(merge_acc, next_acc): xm.op._update_input(1, xn) # pylint: disable=protected-access exit_acc = [exit(x[0], name="b_acc") for x in switch_acc] self.loop_exits.extend(exit_acc) self.ExitResult(exit_acc) return ops.IndexedSlices( indices=exit_acc[0], values=exit_acc[1], dense_shape=exit_acc[2] if shape_acc is not None else None) def _InitializeValues(self, values): """Makes the values known to this context.""" self._values = set() for x in values: if isinstance(x, ops.Tensor): self._values.add(x.name) else: raise TypeError("Type %s not supported" % type(x)) def _BuildLoop(self, pred, body, original_loop_vars, loop_vars, shape_invariants): """Core: Add the loop termination condition and body to the graph.""" flat_loop_vars = nest.flatten(original_loop_vars, expand_composites=True) # Let the context know the loop variables so the loop variables # would be added in the outer contexts properly. self._InitializeValues(loop_vars) real_vars = loop_vars if self._outer_context: real_vars = [self._outer_context.AddValue(x) for x in loop_vars] with ops.control_dependencies(None): enter_vars = [ _Enter( x, self._name, is_constant=False, parallel_iterations=self._parallel_iterations, use_input_shape=(shape_invariants is None)) for x in real_vars ] for x in enter_vars: x.graph.prevent_feeding(x) if self._outer_context: self._outer_context.AddInnerOp(x.op) # Finds the closest enclosing non-None control pivot. outer_context = self._outer_context control_pivot = None while outer_context is not None and control_pivot is None: control_pivot = outer_context.GetControlPivot() # pylint: disable=protected-access outer_context = outer_context._outer_context # pylint: enable=protected-access if control_pivot is not None: for var in enter_vars: if util.IsLoopConstantEnter(var.op.inputs[0].op): # pylint: disable=protected-access var.op._add_control_input(control_pivot.op) # pylint: enable=protected-access _SetShapeInvariants(real_vars, enter_vars, shape_invariants) # Fix the control inputs and control flow context of these enter ops. self._FixControlInputsAndContext(enter_vars) self._InitializeValues(enter_vars) self._loop_enters = enter_vars merge_vars = [merge([x, x])[0] for x in enter_vars] self._pivot_for_pred = merge_vars[0] # Build the graph for pred. merge_vars_with_tensor_arrays = ( _convert_flows_to_tensorarrays(flat_loop_vars, merge_vars)) packed_vars = nest.pack_sequence_as( structure=original_loop_vars, flat_sequence=merge_vars_with_tensor_arrays, expand_composites=True) c = ops.convert_to_tensor(pred(*packed_vars)) self._pivot = loop_cond(c, name="LoopCond") switch_vars = [_SwitchRefOrTensor(x, self._pivot) for x in merge_vars] # Build the graph for body. vars_for_body = [_Identity(x[1]) for x in switch_vars] self._pivot_for_body = vars_for_body[0] # Convert TensorArray flow variables inside the context back into # their associated TensorArrays for calling the body. vars_for_body_with_tensor_arrays = ( _convert_flows_to_tensorarrays(flat_loop_vars, vars_for_body)) packed_vars_for_body = nest.pack_sequence_as( structure=original_loop_vars, flat_sequence=vars_for_body_with_tensor_arrays, expand_composites=True) pre_summaries = ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access body_result = body(*packed_vars_for_body) post_summaries = ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access if not nest.is_sequence_or_composite(body_result): body_result = [body_result] if len(post_summaries) > len(pre_summaries): new_summaries = post_summaries[len(pre_summaries):] summary_ref = ops.get_collection_ref(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access summary_ref[:] = pre_summaries with ops.control_dependencies(new_summaries): def map_fn(x): # TODO(apassos) figure out how to trigger with tensor arrays as well if isinstance(x, tensor_array_ops.TensorArray): return x return array_ops.identity(x) body_result = nest.map_structure( map_fn, body_result, expand_composites=True) # Compare the structure types of input and output of body. # For backwards compatibility, the first layer is forced to a list # during this comparison, because inputs are typically lists and # outputs of the body are typically tuples. nest.assert_same_structure( list(packed_vars_for_body), list(body_result), expand_composites=True) # Store body_result to keep track of TensorArrays returned by body original_body_result = body_result # Convert TensorArrays returned by body into their flow variables result = nest.map_structure( _convert_tensorarray_to_flow, nest.flatten(body_result, expand_composites=True), expand_composites=True) result = ops.convert_n_to_tensor_or_composite(result) # Add NextIteration and the back edges to complete the loop. if len(merge_vars) != len(result): raise ValueError("Number of inputs and outputs of body must match " "loop_vars: %d, %d" % (len(merge_vars), len(result))) next_vars = [] for m, v in zip(merge_vars, result): next_vars.append(_AddNextAndBackEdge(m, v)) # Add the exit ops. exit_vars = [exit(x[0]) for x in switch_vars] self._loop_exits = exit_vars # Exit the loop. self.ExitResult(exit_vars) return original_body_result, exit_vars def BuildLoop(self, pred, body, loop_vars, shape_invariants, return_same_structure): """Add the loop termination condition and body to the graph.""" # Keep original_loop_vars to identify which are TensorArrays original_loop_vars = loop_vars # Convert TensorArrays to their flow variables loop_vars = nest.map_structure( _convert_tensorarray_to_flow, nest.flatten(loop_vars, expand_composites=False), expand_composites=True) loop_vars = ops.convert_n_to_tensor_or_composite(loop_vars) if shape_invariants is None: shape_invariants = nest.map_structure( _get_shape_invariant, loop_vars, expand_composites=False) loop_vars = nest.flatten(loop_vars, expand_composites=True) try: self.Enter() # _BuildLoop calls _update_input in several places. _mutation_lock() # ensures a Session.run call cannot occur between creating and mutating # new ops. with ops.get_default_graph()._mutation_lock(): # pylint: disable=protected-access original_body_result, exit_vars = self._BuildLoop( pred, body, original_loop_vars, loop_vars, shape_invariants) finally: self.Exit() flat_result = nest.flatten(original_body_result, expand_composites=True) # Convert TensorArray flow variables outside the context back into # their associated TensorArrays for returning to caller. exit_vars_with_tensor_arrays = ( _convert_flows_to_tensorarrays(flat_result, exit_vars)) packed_exit_vars = nest.pack_sequence_as( structure=original_body_result, flat_sequence=exit_vars_with_tensor_arrays, expand_composites=True) if return_same_structure: return packed_exit_vars else: return packed_exit_vars[0] if len(exit_vars) == 1 else packed_exit_vars def _FixControlInputsAndContext(self, enters): graph = ops.get_default_graph() # pylint: disable=protected-access for e in enters: if isinstance(e, ops.Tensor): xs = [e] else: raise TypeError("Type %s not supported" % type(e)) for x in xs: inp_op = x.op.inputs[0].op control_inputs = graph._control_dependencies_for_inputs([inp_op]) outer_control_inputs = [ op for op in control_inputs if self._IsInOuterContext(op) ] x.op._set_control_flow_context(self) x.op._add_control_inputs(outer_control_inputs) graph._record_op_seen_by_control_dependencies(x.op) # pylint: enable=protected-access def IsWhileContext(self): return True # @TODO(b/133606651) Replace "shape_invariants" with "loop_vars_signature". # pylint: disable=redefined-outer-name @tf_export("while_loop", v1=[]) def while_loop_v2(cond, body, loop_vars, shape_invariants=None, parallel_iterations=10, back_prop=True, swap_memory=False, maximum_iterations=None, name=None): """Repeat `body` while the condition `cond` is true. `cond` is a callable returning a boolean scalar tensor. `body` is a callable returning a (possibly nested) tuple, namedtuple or list of tensors of the same arity (length and structure) and types as `loop_vars`. `loop_vars` is a (possibly nested) tuple, namedtuple or list of tensors that is passed to both `cond` and `body`. `cond` and `body` both take as many arguments as there are `loop_vars`. In addition to regular Tensors or IndexedSlices, the body may accept and return TensorArray objects. The flows of the TensorArray objects will be appropriately forwarded between loops and during gradient calculations. Note that `while_loop` calls `cond` and `body` *exactly once* (inside the call to `while_loop`, and not at all during `Session.run()`). `while_loop` stitches together the graph fragments created during the `cond` and `body` calls with some additional graph nodes to create the graph flow that repeats `body` until `cond` returns false. For correctness, `tf.while_loop()` strictly enforces shape invariants for the loop variables. A shape invariant is a (possibly partial) shape that is unchanged across the iterations of the loop. An error will be raised if the shape of a loop variable after an iteration is determined to be more general than or incompatible with its shape invariant. For example, a shape of [11, None] is more general than a shape of [11, 17], and [11, 21] is not compatible with [11, 17]. By default (if the argument `shape_invariants` is not specified), it is assumed that the initial shape of each tensor in `loop_vars` is the same in every iteration. The `shape_invariants` argument allows the caller to specify a less specific shape invariant for each loop variable, which is needed if the shape varies between iterations. The `tf.Tensor.set_shape` function may also be used in the `body` function to indicate that the output loop variable has a particular shape. The shape invariant for SparseTensor and IndexedSlices are treated specially as follows: a) If a loop variable is a SparseTensor, the shape invariant must be TensorShape([r]) where r is the rank of the dense tensor represented by the sparse tensor. It means the shapes of the three tensors of the SparseTensor are ([None], [None, r], [r]). NOTE: The shape invariant here is the shape of the SparseTensor.dense_shape property. It must be the shape of a vector. b) If a loop variable is an IndexedSlices, the shape invariant must be a shape invariant of the values tensor of the IndexedSlices. It means the shapes of the three tensors of the IndexedSlices are (shape, [shape[0]], [shape.ndims]). `while_loop` implements non-strict semantics, enabling multiple iterations to run in parallel. The maximum number of parallel iterations can be controlled by `parallel_iterations`, which gives users some control over memory consumption and execution order. For correct programs, `while_loop` should return the same result for any parallel_iterations > 0. For training, TensorFlow stores the tensors that are produced in the forward inference and are needed in back propagation. These tensors are a main source of memory consumption and often cause OOM errors when training on GPUs. When the flag swap_memory is true, we swap out these tensors from GPU to CPU. This for example allows us to train RNN models with very long sequences and large batches. Args: cond: A callable that represents the termination condition of the loop. body: A callable that represents the loop body. loop_vars: A (possibly nested) tuple, namedtuple or list of numpy array, `Tensor`, and `TensorArray` objects. shape_invariants: The shape invariants for the loop variables. parallel_iterations: The number of iterations allowed to run in parallel. It must be a positive integer. back_prop: Whether backprop is enabled for this while loop. swap_memory: Whether GPU-CPU memory swap is enabled for this loop. maximum_iterations: Optional maximum number of iterations of the while loop to run. If provided, the `cond` output is AND-ed with an additional condition ensuring the number of iterations executed is no greater than `maximum_iterations`. name: Optional name prefix for the returned tensors. Returns: The output tensors for the loop variables after the loop. The return value has the same structure as `loop_vars`. Raises: TypeError: if `cond` or `body` is not callable. ValueError: if `loop_vars` is empty. Example: ```python i = tf.constant(0) c = lambda i: tf.less(i, 10) b = lambda i: tf.add(i, 1) r = tf.while_loop(c, b, [i]) ``` Example with nesting and a namedtuple: ```python import collections Pair = collections.namedtuple('Pair', 'j, k') ijk_0 = (tf.constant(0), Pair(tf.constant(1), tf.constant(2))) c = lambda i, p: i < 10 b = lambda i, p: (i + 1, Pair((p.j + p.k), (p.j - p.k))) ijk_final = tf.while_loop(c, b, ijk_0) ``` Example using shape_invariants: ```python i0 = tf.constant(0) m0 = tf.ones([2, 2]) c = lambda i, m: i < 10 b = lambda i, m: [i+1, tf.concat([m, m], axis=0)] tf.while_loop( c, b, loop_vars=[i0, m0], shape_invariants=[i0.get_shape(), tf.TensorShape([None, 2])]) ``` Example which demonstrates non-strict semantics: In the following example, the final value of the counter `i` does not depend on `x`. So the `while_loop` can increment the counter parallel to updates of `x`. However, because the loop counter at one loop iteration depends on the value at the previous iteration, the loop counter itself cannot be incremented in parallel. Hence if we just want the final value of the counter (which we print on the line `print(sess.run(i))`), then `x` will never be incremented, but the counter will be updated on a single thread. Conversely, if we want the value of the output (which we print on the line `print(sess.run(out).shape)`), then the counter may be incremented on its own thread, while `x` can be incremented in parallel on a separate thread. In the extreme case, it is conceivable that the thread incrementing the counter runs until completion before `x` is incremented even a single time. The only thing that can never happen is that the thread updating `x` can never get ahead of the counter thread because the thread incrementing `x` depends on the value of the counter. ```python import tensorflow as tf n = 10000 x = tf.constant(list(range(n))) c = lambda i, x: i < n b = lambda i, x: (tf.compat.v1.Print(i + 1, [i]), tf.compat.v1.Print(x + 1, [i], "x:")) i, out = tf.while_loop(c, b, (0, x)) with tf.compat.v1.Session() as sess: print(sess.run(i)) # prints [0] ... [9999] # The following line may increment the counter and x in parallel. # The counter thread may get ahead of the other thread, but not the # other way around. So you may see things like # [9996] x:[9987] # meaning that the counter thread is on iteration 9996, # while the other thread is on iteration 9987 print(sess.run(out).shape) ``` """ return while_loop( cond=cond, body=body, loop_vars=loop_vars, shape_invariants=shape_invariants, parallel_iterations=parallel_iterations, back_prop=back_prop, swap_memory=swap_memory, name=name, maximum_iterations=maximum_iterations, return_same_structure=True) # pylint: disable=redefined-outer-name @tf_export(v1=["while_loop"]) def while_loop(cond, body, loop_vars, shape_invariants=None, parallel_iterations=10, back_prop=True, swap_memory=False, name=None, maximum_iterations=None, return_same_structure=False): """Repeat `body` while the condition `cond` is true. `cond` is a callable returning a boolean scalar tensor. `body` is a callable returning a (possibly nested) tuple, namedtuple or list of tensors of the same arity (length and structure) and types as `loop_vars`. `loop_vars` is a (possibly nested) tuple, namedtuple or list of tensors that is passed to both `cond` and `body`. `cond` and `body` both take as many arguments as there are `loop_vars`. In addition to regular Tensors or IndexedSlices, the body may accept and return TensorArray objects. The flows of the TensorArray objects will be appropriately forwarded between loops and during gradient calculations. Note that `while_loop` calls `cond` and `body` *exactly once* (inside the call to `while_loop`, and not at all during `Session.run()`). `while_loop` stitches together the graph fragments created during the `cond` and `body` calls with some additional graph nodes to create the graph flow that repeats `body` until `cond` returns false. For correctness, `tf.while_loop()` strictly enforces shape invariants for the loop variables. A shape invariant is a (possibly partial) shape that is unchanged across the iterations of the loop. An error will be raised if the shape of a loop variable after an iteration is determined to be more general than or incompatible with its shape invariant. For example, a shape of [11, None] is more general than a shape of [11, 17], and [11, 21] is not compatible with [11, 17]. By default (if the argument `shape_invariants` is not specified), it is assumed that the initial shape of each tensor in `loop_vars` is the same in every iteration. The `shape_invariants` argument allows the caller to specify a less specific shape invariant for each loop variable, which is needed if the shape varies between iterations. The `tf.Tensor.set_shape` function may also be used in the `body` function to indicate that the output loop variable has a particular shape. The shape invariant for SparseTensor and IndexedSlices are treated specially as follows: a) If a loop variable is a SparseTensor, the shape invariant must be TensorShape([r]) where r is the rank of the dense tensor represented by the sparse tensor. It means the shapes of the three tensors of the SparseTensor are ([None], [None, r], [r]). NOTE: The shape invariant here is the shape of the SparseTensor.dense_shape property. It must be the shape of a vector. b) If a loop variable is an IndexedSlices, the shape invariant must be a shape invariant of the values tensor of the IndexedSlices. It means the shapes of the three tensors of the IndexedSlices are (shape, [shape[0]], [shape.ndims]). `while_loop` implements non-strict semantics, enabling multiple iterations to run in parallel. The maximum number of parallel iterations can be controlled by `parallel_iterations`, which gives users some control over memory consumption and execution order. For correct programs, `while_loop` should return the same result for any parallel_iterations > 0. For training, TensorFlow stores the tensors that are produced in the forward inference and are needed in back propagation. These tensors are a main source of memory consumption and often cause OOM errors when training on GPUs. When the flag swap_memory is true, we swap out these tensors from GPU to CPU. This for example allows us to train RNN models with very long sequences and large batches. Args: cond: A callable that represents the termination condition of the loop. body: A callable that represents the loop body. loop_vars: A (possibly nested) tuple, namedtuple or list of numpy array, `Tensor`, and `TensorArray` objects. shape_invariants: The shape invariants for the loop variables. parallel_iterations: The number of iterations allowed to run in parallel. It must be a positive integer. back_prop: Whether backprop is enabled for this while loop. swap_memory: Whether GPU-CPU memory swap is enabled for this loop. name: Optional name prefix for the returned tensors. maximum_iterations: Optional maximum number of iterations of the while loop to run. If provided, the `cond` output is AND-ed with an additional condition ensuring the number of iterations executed is no greater than `maximum_iterations`. return_same_structure: If True, output has same structure as `loop_vars`. If eager execution is enabled, this is ignored (and always treated as True). Returns: The output tensors for the loop variables after the loop. If `return_same_structure` is True, the return value has the same structure as `loop_vars`. If `return_same_structure` is False, the return value is a Tensor, TensorArray or IndexedSlice if the length of `loop_vars` is 1, or a list otherwise. Raises: TypeError: if `cond` or `body` is not callable. ValueError: if `loop_vars` is empty. Example: ```python i = tf.constant(0) c = lambda i: tf.less(i, 10) b = lambda i: tf.add(i, 1) r = tf.while_loop(c, b, [i]) ``` Example with nesting and a namedtuple: ```python import collections Pair = collections.namedtuple('Pair', 'j, k') ijk_0 = (tf.constant(0), Pair(tf.constant(1), tf.constant(2))) c = lambda i, p: i < 10 b = lambda i, p: (i + 1, Pair((p.j + p.k), (p.j - p.k))) ijk_final = tf.while_loop(c, b, ijk_0) ``` Example using shape_invariants: ```python i0 = tf.constant(0) m0 = tf.ones([2, 2]) c = lambda i, m: i < 10 b = lambda i, m: [i+1, tf.concat([m, m], axis=0)] tf.while_loop( c, b, loop_vars=[i0, m0], shape_invariants=[i0.get_shape(), tf.TensorShape([None, 2])]) ``` Example which demonstrates non-strict semantics: In the following example, the final value of the counter `i` does not depend on `x`. So the `while_loop` can increment the counter parallel to updates of `x`. However, because the loop counter at one loop iteration depends on the value at the previous iteration, the loop counter itself cannot be incremented in parallel. Hence if we just want the final value of the counter (which we print on the line `print(sess.run(i))`), then `x` will never be incremented, but the counter will be updated on a single thread. Conversely, if we want the value of the output (which we print on the line `print(sess.run(out).shape)`), then the counter may be incremented on its own thread, while `x` can be incremented in parallel on a separate thread. In the extreme case, it is conceivable that the thread incrementing the counter runs until completion before `x` is incremented even a single time. The only thing that can never happen is that the thread updating `x` can never get ahead of the counter thread because the thread incrementing `x` depends on the value of the counter. ```python import tensorflow as tf n = 10000 x = tf.constant(list(range(n))) c = lambda i, x: i < n b = lambda i, x: (tf.compat.v1.Print(i + 1, [i]), tf.compat.v1.Print(x + 1, [i], "x:")) i, out = tf.while_loop(c, b, (0, x)) with tf.compat.v1.Session() as sess: print(sess.run(i)) # prints [0] ... [9999] # The following line may increment the counter and x in parallel. # The counter thread may get ahead of the other thread, but not the # other way around. So you may see things like # [9996] x:[9987] # meaning that the counter thread is on iteration 9996, # while the other thread is on iteration 9987 print(sess.run(out).shape) ``` """ # Always enable control flow v2 if building a function, regardless of toggle. executing_eagerly = context.executing_eagerly() if (util.EnableControlFlowV2(ops.get_default_graph()) and not executing_eagerly): return while_v2.while_loop( cond, body, loop_vars, shape_invariants=shape_invariants, parallel_iterations=parallel_iterations, maximum_iterations=maximum_iterations, name=name, return_same_structure=return_same_structure) with ops.name_scope(name, "while", loop_vars): if not loop_vars: raise ValueError("No loop variables provided") if not callable(cond): raise TypeError("cond must be callable.") if not callable(body): raise TypeError("body must be callable.") if parallel_iterations < 1: raise TypeError("parallel_iterations must be a positive integer.") if maximum_iterations is not None: maximum_iterations = ops.convert_to_tensor( maximum_iterations, name="maximum_iterations") if maximum_iterations.shape.ndims != 0: raise ValueError("maximum_iterations must be a scalar, saw shape: %s" % maximum_iterations.shape) if executing_eagerly: counter = 0 maximum_iterations = int(maximum_iterations.numpy()) else: counter = constant_op.constant( 0, dtype=maximum_iterations.dtype, name="iteration_counter") orig_cond = cond orig_body = body if len(loop_vars) == 1: loop_vars = (counter, loop_vars[0]) cond = lambda i, lv: ( # pylint: disable=g-long-lambda math_ops.logical_and(i < maximum_iterations, orig_cond(lv))) body = lambda i, lv: (i + 1, orig_body(lv)) else: loop_vars = (counter, loop_vars) cond = lambda i, lv: ( # pylint: disable=g-long-lambda math_ops.logical_and(i < maximum_iterations, orig_cond(*lv))) body = lambda i, lv: (i + 1, orig_body(*lv)) if executing_eagerly: try_to_pack = len(loop_vars) == 1 packed = False # whether the body result was packed into a 1-item tuple loop_var_structure = nest.map_structure(type_spec.type_spec_from_value, list(loop_vars)) while cond(*loop_vars): loop_vars = body(*loop_vars) if try_to_pack and not isinstance(loop_vars, (list, _basetuple)): packed = True loop_vars = (loop_vars,) nest.assert_same_structure(loop_var_structure, list(loop_vars)) def convert(x): if isinstance(x, tensor_array_ops.TensorArray): return x return ops.convert_to_tensor(x) loop_vars = nest.map_structure(convert, loop_vars, expand_composites=True) if maximum_iterations is not None: return loop_vars[1] else: return loop_vars[0] if packed else loop_vars if shape_invariants is not None: if maximum_iterations is not None: shape_invariants = (tensor_shape.TensorShape([]), shape_invariants) nest.assert_same_structure( loop_vars, shape_invariants, expand_composites=False) shape_invariants = nest.map_structure( _get_shape_invariant, loop_vars, shape_invariants, expand_composites=False) loop_context = WhileContext( maximum_iterations=maximum_iterations, parallel_iterations=parallel_iterations, back_prop=back_prop, swap_memory=swap_memory) # Only add non-nested loops to the collection. Any nested control flow will # be encapsulated in the root context. if loop_context.outer_context is None: ops.add_to_collection(ops.GraphKeys.WHILE_CONTEXT, loop_context) result = loop_context.BuildLoop(cond, body, loop_vars, shape_invariants, return_same_structure) if maximum_iterations is not None: return result[1] else: return result # pylint: enable=redefined-outer-name def _AsTensorList(x, p): """Return x as a list of Tensors or IndexedSlices. For entries of `x` that are Operations, this returns an Identity of `p` with a dependency on the operation. Args: x: A Tensor/IndexedSlices/Operation or a list or tuple of them. p: A Tensor to return for entries in `x` that are Operations. Returns: A list of Tensors or IndexedSlices. """ if not isinstance(x, (list, _basetuple)): x = [x] l = [] for v in x: if isinstance(v, ops.Operation): v = with_dependencies([v], p) v = ops.convert_to_tensor_or_composite(v) if isinstance(v, ops.Tensor): l.append(array_ops.identity(v)) else: l.append( ops.IndexedSlices( array_ops.identity(v.values), array_ops.identity(v.indices))) return l def _CheckResults(a, b): assert len(a) == len(b), ( "Values returned by a() and b() must have the same length.") for x, y in zip(a, b): assert x.dtype == y.dtype, ( "Values returned by a() [%s] and b() [%s] must have " "the same type: %s, %s." % (x.name, y.name, x.dtype.name, y.dtype.name)) def with_dependencies(dependencies, output_tensor, name=None): """Produces the content of `output_tensor` only after `dependencies`. In some cases, a user may want the output of an operation to be consumed externally only after some other dependencies have run first. This function ensures returns `output_tensor`, but only after all operations in `dependencies` have run. Note that this means that there is no guarantee that `output_tensor` will be evaluated after any `dependencies` have run. See also `tf.tuple` and `tf.group`. Args: dependencies: Iterable of operations to run before this op finishes. output_tensor: A `Tensor` or `IndexedSlices` that will be returned. name: (Optional) A name for this operation. Returns: Same as `output_tensor`. Raises: TypeError: if `output_tensor` is not a `Tensor` or `IndexedSlices`. """ if context.executing_eagerly(): return output_tensor with ops.name_scope(name, "control_dependency", list(dependencies) + [output_tensor]) as name: with ops.colocate_with(output_tensor): with ops.control_dependencies(dependencies): output_tensor = ops.convert_to_tensor_or_composite(output_tensor) if isinstance(output_tensor, ops.Tensor): return _Identity(output_tensor, name=name) else: return ops.IndexedSlices( _Identity(output_tensor.values, name=name), output_tensor.indices, output_tensor.dense_shape) def _GroupControlDeps(dev, deps, name=None): with ops.control_dependencies(deps): if dev is None: return no_op(name=name) else: with ops.device(dev): return no_op(name=name) # TODO(touts): Accept "inputs" as a list. @tf_export("group") def group(*inputs, **kwargs): """Create an op that groups multiple operations. When this op finishes, all ops in `inputs` have finished. This op has no output. See also `tf.tuple` and `tf.control_dependencies`. Args: *inputs: Zero or more tensors to group. name: A name for this operation (optional). Returns: An Operation that executes all its inputs. Raises: ValueError: If an unknown keyword argument is provided. """ if context.executing_eagerly(): return None name = kwargs.pop("name", None) if kwargs: raise ValueError("Unknown keyword arguments: " + ", ".join(kwargs.keys())) with ops.name_scope(name, "group_deps", inputs) as name: # Grouping no inputs means do nothing if not inputs: return no_op(name=name) # Sorts *inputs according to their devices. ops_on_device = {} # device -> operations specified on the device. for inp in nest.flatten(inputs, expand_composites=True): if not hasattr(inp, "device"): raise TypeError("Expected tf.group() expected Tensor arguments not " "'%s' with type '%s'" % (inp, type(inp))) dev = inp.device if dev in ops_on_device: ops_on_device[dev].append(inp) else: ops_on_device[dev] = [inp] if len(ops_on_device) == 1: # 1-level tree. The root node is the returned NoOp node. (dev, deps), = ops_on_device.items() return _GroupControlDeps(dev, deps, name=name) # 2-level tree. The root node is the returned NoOp node. # deps contains 1 NoOp node for each device. deps = [] def device_key(dev): """A sort key that allows None to be compared to strings.""" return "" if dev is None else dev for dev in sorted(ops_on_device, key=device_key): deps.append(_GroupControlDeps(dev, ops_on_device[dev])) with ops.control_dependencies(deps): return no_op(name=name) @tf_export("tuple", v1=[]) def tuple_v2(tensors, control_inputs=None, name=None): """Group tensors together. This creates a tuple of tensors with the same values as the `tensors` argument, except that the value of each tensor is only returned after the values of all tensors have been computed. `control_inputs` contains additional ops that have to finish before this op finishes, but whose outputs are not returned. This can be used as a "join" mechanism for parallel computations: all the argument tensors can be computed in parallel, but the values of any tensor returned by `tuple` are only available after all the parallel computations are done. See also `tf.group` and `tf.control_dependencies`. Args: tensors: A list of `Tensor`s or `IndexedSlices`, some entries can be `None`. control_inputs: List of additional ops to finish before returning. name: (optional) A name to use as a `name_scope` for the operation. Returns: Same as `tensors`. Raises: ValueError: If `tensors` does not contain any `Tensor` or `IndexedSlices`. TypeError: If `control_inputs` is not a list of `Operation` or `Tensor` objects. """ return tuple(tensors=tensors, name=name, control_inputs=control_inputs) # pylint: disable=redefined-builtin @tf_export(v1=["tuple"]) def tuple(tensors, name=None, control_inputs=None): # pylint: disable=redefined-builtin """Group tensors together. This creates a tuple of tensors with the same values as the `tensors` argument, except that the value of each tensor is only returned after the values of all tensors have been computed. `control_inputs` contains additional ops that have to finish before this op finishes, but whose outputs are not returned. This can be used as a "join" mechanism for parallel computations: all the argument tensors can be computed in parallel, but the values of any tensor returned by `tuple` are only available after all the parallel computations are done. See also `tf.group` and `tf.control_dependencies`. Args: tensors: A list of `Tensor`s or `IndexedSlices`, some entries can be `None`. name: (optional) A name to use as a `name_scope` for the operation. control_inputs: List of additional ops to finish before returning. Returns: Same as `tensors`. Raises: ValueError: If `tensors` does not contain any `Tensor` or `IndexedSlices`. TypeError: If `control_inputs` is not a list of `Operation` or `Tensor` objects. """ if context.executing_eagerly(): return tensors with ops.name_scope(name, "tuple", tensors) as name: tensors = [ t if (isinstance(t, ops.Operation) or tensor_util.is_tensor(t) or t is None) else ops.convert_to_tensor(t) for t in tensors ] gating_ops = [ t if isinstance(t, ops.Operation) else t.op for t in tensors if t is not None ] if control_inputs: for c in control_inputs: if isinstance(c, ops.Tensor): c = c.op elif not isinstance(c, ops.Operation): raise TypeError("Control input must be Operation or Tensor: %s" % c) gating_ops.append(c) # Note that in order to ensure ordering in the pbtxt, we must take care to # ensure the order here. gating_ops = sorted(set(gating_ops), key=lambda op: op._id) # Uniquify ops. if not gating_ops: raise ValueError("Must have at least one Tensor: %s" % tensors) gate = group(*gating_ops) tpl = [] for t in tensors: if tensor_util.is_tensor(t): tpl.append(with_dependencies([gate], t)) elif isinstance(t, ops.Operation): with ops.control_dependencies([gate]): tpl.append(group(t)) else: tpl.append(None) return tpl def _assert_at_most_n_true(predicates, n, msg): """Returns an Assert op that checks that at most n predicates are True. Args: predicates: list of bool scalar tensors. n: maximum number of true predicates allowed. msg: Error message. """ preds_c = array_ops.stack(predicates, name="preds_c") num_true_conditions = math_ops.reduce_sum( math_ops.cast(preds_c, dtypes.int32), name="num_true_conds") condition = math_ops.less_equal(num_true_conditions, constant_op.constant(n, name="n_true_conds")) preds_names = ", ".join(getattr(p, "name", "?") for p in predicates) error_msg = [ "%s: more than %d conditions (%s) evaluated as True:" % (msg, n, preds_names), preds_c ] return Assert(condition, data=error_msg, summarize=len(predicates)) def _case_create_default_action(predicates, actions): """Creates default action for a list of actions and their predicates. It uses the input actions to select an arbitrary as default and makes sure that corresponding predicates have valid values. Args: predicates: a list of bool scalar tensors actions: a list of callable objects which return tensors. Returns: a callable """ k = len(predicates) - 1 # could pick any predicate, action = predicates[k], actions[k] other_predicates, other_actions = predicates[:k], actions[:k] def default_action(): others_msg = ("Implementation error: " "selected default action #%d was called, but some of other " "predicates are True: " % k) default_msg = ("Input error: " "None of conditions evaluated as True:", array_ops.stack(predicates, name="preds_c")) with ops.control_dependencies([ _assert_at_most_n_true(other_predicates, n=0, msg=others_msg), Assert(predicate, data=default_msg) ]): return action() return default_action, other_predicates, other_actions def _case_verify_and_canonicalize_args(pred_fn_pairs, exclusive, name, allow_python_preds): """Verifies input arguments for the case function. Args: pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor, and a callable which returns a list of tensors. exclusive: True iff at most one predicate is allowed to evaluate to `True`. name: A name for the case operation. allow_python_preds: if true, pred_fn_pairs may contain Python bools in addition to boolean Tensors Raises: TypeError: If `pred_fn_pairs` is not a list/dictionary. TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples. TypeError: If `fns[i]` is not callable for any i, or `default` is not callable. Returns: a tuple <list of scalar bool tensors, list of callables>. """ if not isinstance(pred_fn_pairs, (list, _basetuple, dict)): raise TypeError("fns must be a list, tuple, or dict") if isinstance(pred_fn_pairs, collections.OrderedDict): pred_fn_pairs = pred_fn_pairs.items() elif isinstance(pred_fn_pairs, dict): if context.executing_eagerly(): # No name to sort on in eager mode. Use dictionary traversal order, # which is nondeterministic in versions of Python < 3.6 if not exclusive: raise ValueError("Unordered dictionaries are not supported for the " "`pred_fn_pairs` argument when `exclusive=False` and " "eager mode is enabled.") pred_fn_pairs = list(pred_fn_pairs.items()) else: pred_fn_pairs = sorted( pred_fn_pairs.items(), key=lambda item: item[0].name) if not exclusive: logging.warn( "%s: An unordered dictionary of predicate/fn pairs was " "provided, but exclusive=False. The order of conditional " "tests is deterministic but not guaranteed.", name) for pred_fn_pair in pred_fn_pairs: if not isinstance(pred_fn_pair, _basetuple) or len(pred_fn_pair) != 2: raise TypeError("Each entry in pred_fn_pairs must be a 2-tuple") pred, fn = pred_fn_pair if isinstance(pred, ops.Tensor): if pred.dtype != dtypes.bool: raise TypeError("pred must be Tensor of type bool: %s" % pred.name) elif not allow_python_preds: raise TypeError("pred must be a Tensor, got: %s" % pred) elif not isinstance(pred, bool): raise TypeError("pred must be a Tensor or bool, got: %s" % pred) if not callable(fn): raise TypeError("fn for pred %s must be callable." % pred.name) predicates, actions = zip(*pred_fn_pairs) return predicates, actions def _case_helper(cond_fn, pred_fn_pairs, default, exclusive, name, allow_python_preds=False, **cond_kwargs): """Implementation of case that allows for different cond functions. Args: cond_fn: method that has signature and semantics of `cond` above. pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor, and a callable which returns a list of tensors. default: Optional callable that returns a list of tensors. exclusive: True iff at most one predicate is allowed to evaluate to `True`. name: A name for this operation (optional). allow_python_preds: if true, pred_fn_pairs may contain Python bools in addition to boolean Tensors **cond_kwargs: keyword arguments that will be passed to `cond_fn`. Returns: The tensors returned by the first pair whose predicate evaluated to True, or those returned by `default` if none does. Raises: TypeError: If `pred_fn_pairs` is not a list/dictionary. TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples. TypeError: If `fns[i]` is not callable for any i, or `default` is not callable. """ predicates, actions = _case_verify_and_canonicalize_args( pred_fn_pairs, exclusive, name, allow_python_preds) with ops.name_scope(name, "case", [predicates]): if default is None: default, predicates, actions = _case_create_default_action( predicates, actions) fn = default # To eval conditions in direct order we create nested conditions in reverse: # cond_fn(c[0], true_fn=.., false_fn=cond_fn(c[1], ...)) for predicate, action in reversed(list(zip(predicates, actions))): fn = functools.partial( cond_fn, predicate, true_fn=action, false_fn=fn, **cond_kwargs) if exclusive: with ops.control_dependencies([ _assert_at_most_n_true( predicates, n=1, msg="Input error: exclusive=True") ]): return fn() else: return fn() def _indexed_case_verify_and_canonicalize_args(branch_fns, default, branch_index): """Verifies input arguments for the case function. Args: branch_fns: Dict or list of pairs of an `int` and a callable which returns a list of tensors. default: Optional callable that returns a list of tensors. branch_index: Optional int `Tensor`, which selects for the corresponding pred_fn_pair. Raises: TypeError: If `branch_fns` is not a list/dictionary. TypeError: If `branch_fns` is a list but does not contain 2-tuples or callables. TypeError: If `fns[i]` is not callable for any i, or `default` is not callable. Returns: branch_fns: validated list of callables for each branch (default last). """ if not isinstance(branch_index, ops.Tensor): raise TypeError("branch_index must a Tensor, got {}".format( type(branch_index))) if not branch_index.dtype.is_integer: raise TypeError("branch_index must an integer Tensor, got {}".format( branch_index.dtype)) if not branch_fns: raise ValueError("Must provide at least one item in branch_fns") if not isinstance(branch_fns, (list, _basetuple, dict)): raise TypeError("branch_fns must be a list, tuple, or dict") if isinstance(branch_fns, dict): branch_fns = branch_fns.items() if all(callable(fn) for fn in branch_fns): branch_fns = list(enumerate(branch_fns)) for key_fn_pair in branch_fns: if not isinstance(key_fn_pair, _basetuple) or len(key_fn_pair) != 2: raise TypeError("Each entry in branch_fns must be a 2-tuple") key, branch_fn = key_fn_pair if not isinstance(key, int): raise TypeError("key must be a Python `int`, got {}".format(type(key))) if not callable(branch_fn): raise TypeError("fn for key {} must be callable.".format(key)) keys = [p[0] for p in branch_fns] if min(keys) < 0 or max(keys) >= len(keys) or len(set(keys)) != len(keys): raise ValueError( "branch indices (keys) must form contiguous range of [0 to {}) but " "found {{{}}}".format(len(keys), ",".join(map(str, sorted(keys))))) actions = [p[1] for p in sorted(branch_fns)] if default is not None: actions.append(default) return actions def _indexed_case_helper(branch_fns, default, branch_index, name): """Implementation of case that emits the n-way indexed Case op. Args: branch_fns: Dict or list of pairs of a boolean scalar tensor, and a callable which returns a list of tensors. default: Optional callable that returns a list of tensors. branch_index: Optional int `Tensor`, which selects for the corresponding pred_fn_pair. name: A name for this operation (optional). Returns: The tensors returned by the pair whose key matched branch_index, or those returned by `default` if none does. Raises: TypeError: If `branch_fns` is not a list/dictionary. TypeError: If `branch_fns` is a list but does not contain 2-tuples or callables. TypeError: If `fns[i]` is not callable for any i, or `default` is not callable. """ branch_fns = _indexed_case_verify_and_canonicalize_args( branch_fns, default, branch_index) with ops.name_scope(name, "case", [branch_index]): if context.executing_eagerly(): branch_index = array_ops.where( math_ops.less(branch_index, 0) | math_ops.greater_equal(branch_index, len(branch_fns)), len(branch_fns) - 1, branch_index) return branch_fns[int(branch_index)]() return cond_v2.indexed_case(branch_index, branch_fns) @tf_export("case") def case(pred_fn_pairs, default=None, exclusive=False, strict=False, name="case"): """Create a case operation. See also `tf.switch_case`. The `pred_fn_pairs` parameter is a dict or list of pairs of size N. Each pair contains a boolean scalar tensor and a python callable that creates the tensors to be returned if the boolean evaluates to True. `default` is a callable generating a list of tensors. All the callables in `pred_fn_pairs` as well as `default` (if provided) should return the same number and types of tensors. If `exclusive==True`, all predicates are evaluated, and an exception is thrown if more than one of the predicates evaluates to `True`. If `exclusive==False`, execution stops at the first predicate which evaluates to True, and the tensors generated by the corresponding function are returned immediately. If none of the predicates evaluate to True, this operation returns the tensors generated by `default`. `tf.case` supports nested structures as implemented in `tf.contrib.framework.nest`. All of the callables must return the same (possibly nested) value structure of lists, tuples, and/or named tuples. Singleton lists and tuples form the only exceptions to this: when returned by a callable, they are implicitly unpacked to single values. This behavior is disabled by passing `strict=True`. If an unordered dictionary is used for `pred_fn_pairs`, the order of the conditional tests is not guaranteed. However, the order is guaranteed to be deterministic, so that variables created in conditional branches are created in fixed order across runs. @compatibility(eager) Unordered dictionaries are not supported in eager mode when `exclusive=False`. Use a list of tuples instead. @end_compatibility **Example 1:** Pseudocode: ``` if (x < y) return 17; else return 23; ``` Expressions: ```python f1 = lambda: tf.constant(17) f2 = lambda: tf.constant(23) r = tf.case([(tf.less(x, y), f1)], default=f2) ``` **Example 2:** Pseudocode: ``` if (x < y && x > z) raise OpError("Only one predicate may evaluate to True"); if (x < y) return 17; else if (x > z) return 23; else return -1; ``` Expressions: ```python def f1(): return tf.constant(17) def f2(): return tf.constant(23) def f3(): return tf.constant(-1) r = tf.case({tf.less(x, y): f1, tf.greater(x, z): f2}, default=f3, exclusive=True) ``` Args: pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor and a callable which returns a list of tensors. default: Optional callable that returns a list of tensors. exclusive: True iff at most one predicate is allowed to evaluate to `True`. strict: A boolean that enables/disables 'strict' mode; see above. name: A name for this operation (optional). Returns: The tensors returned by the first pair whose predicate evaluated to True, or those returned by `default` if none does. Raises: TypeError: If `pred_fn_pairs` is not a list/dictionary. TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples. TypeError: If `fns[i]` is not callable for any i, or `default` is not callable. """ return _case_helper( cond, pred_fn_pairs, default, exclusive, name, allow_python_preds=False, strict=strict) @tf_export("switch_case") def switch_case(branch_index, branch_fns, default=None, name="switch_case"): """Create a switch/case operation, i.e. an integer-indexed conditional. See also `tf.case`. This op can be substantially more efficient than `tf.case` when exactly one branch will be selected. `tf.switch_case` is more like a C++ switch/case statement than `tf.case`, which is more like an if/elif/elif/else chain. The `branch_fns` parameter is either a dict from `int` to callables, or list of (`int, callable) pairs, or simply a list of callables (in which case the index is implicitly the key). The `branch_index` `Tensor` is used to select an element in `branch_fns` with matching `int` key, falling back to `default` if none match, or `max(keys)` if no `default` is provided. The keys must form a contiguous set from `0` to `len(branch_fns) - 1`. `tf.switch_case` supports nested structures as implemented in `tf.nest`. All callables must return the same (possibly nested) value structure of lists, tuples, and/or named tuples. **Example:** Pseudocode: ```c++ switch (branch_index) { // c-style switch case 0: return 17; case 1: return 31; default: return -1; } ``` or ```python branches = {0: lambda: 17, 1: lambda: 31} branches.get(branch_index, lambda: -1)() ``` Expressions: ```python def f1(): return tf.constant(17) def f2(): return tf.constant(31) def f3(): return tf.constant(-1) r = tf.switch_case(branch_index, branch_fns={0: f1, 1: f2}, default=f3) # Equivalent: tf.switch_case(branch_index, branch_fns={0: f1, 1: f2, 2: f3}) ``` Args: branch_index: An int Tensor specifying which of `branch_fns` should be executed. branch_fns: A `dict` mapping `int`s to callables, or a `list` of (`int, callable) pairs, or simply a list of callables (in which case the index serves as the key). Each callable must return a matching structure of tensors. default: Optional callable that returns a structure of tensors. name: A name for this operation (optional). Returns: The tensors returned by the callable identified by `branch_index`, or those returned by `default` if no key matches and `default` was provided, or those returned by the max-keyed `branch_fn` if no `default` is provided. Raises: TypeError: If `branch_fns` is not a list/dictionary. TypeError: If `branch_fns` is a list but does not contain 2-tuples or callables. TypeError: If `fns[i]` is not callable for any i, or `default` is not callable. """ return _indexed_case_helper(branch_fns, default, branch_index, name) class XLAControlFlowContext(ControlFlowContext): """Base class for XLA and TPU control flow contexts.""" def __init__(self): super(XLAControlFlowContext, self).__init__() self._name = "XLAControlFlowContext" def to_control_flow_context_def(self, context_def, export_scope=None): # pylint: disable=useless-super-delegation # NOTE(slebedev): the method is required by `ControlFlowContext`. super(XLAControlFlowContext, self).to_control_flow_context_def(context_def, export_scope) def IsXLAContext(self): return True def AddOp(self, _): pass def AddValue(self, x): return x def from_control_flow_context_def(context_def, import_scope=None): """Deserializes `context_def` into the appropriate ControlFlowContext. Args: context_def: ControlFlowContextDef proto import_scope: Optional `string`. Name scope to add. Returns: A ControlFlowContext subclass """ if context_def.HasField("cond_ctxt"): return CondContext.from_proto( context_def.cond_ctxt, import_scope=import_scope) if context_def.HasField("while_ctxt"): return WhileContext.from_proto( context_def.while_ctxt, import_scope=import_scope) raise NotImplementedError("Unknown ControlFlowContextDef field: %s" % context_def.WhichOneof("ctxt")) ops.register_proto_function( ops.GraphKeys.COND_CONTEXT, proto_type=control_flow_pb2.CondContextDef, to_proto=CondContext.to_proto, from_proto=CondContext.from_proto) ops.register_proto_function( ops.GraphKeys.WHILE_CONTEXT, proto_type=control_flow_pb2.WhileContextDef, to_proto=WhileContext.to_proto, from_proto=WhileContext.from_proto)
tensorflow-master
tensorflow/python/ops/control_flow_ops.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================== """Contains utility functions used by summary ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib import re from tensorflow.python.framework import ops from tensorflow.python.platform import tf_logging def collect(val, collections, default_collections): """Adds keys to a collection. Args: val: The value to add per each key. collections: A collection of keys to add. default_collections: Used if collections is None. """ if collections is None: collections = default_collections for key in collections: ops.add_to_collection(key, val) _INVALID_TAG_CHARACTERS = re.compile(r'[^-/\w\.]') def clean_tag(name): """Cleans a tag. Removes illegal characters for instance. Args: name: The original tag name to be processed. Returns: The cleaned tag name. """ # In the past, the first argument to summary ops was a tag, which allowed # arbitrary characters. Now we are changing the first argument to be the node # name. This has a number of advantages (users of summary ops now can # take advantage of the tf name scope system) but risks breaking existing # usage, because a much smaller set of characters are allowed in node names. # This function replaces all illegal characters with _s, and logs a warning. # It also strips leading slashes from the name. if name is not None: new_name = _INVALID_TAG_CHARACTERS.sub('_', name) new_name = new_name.lstrip('/') # Remove leading slashes if new_name != name: tf_logging.info('Summary name %s is illegal; using %s instead.' % (name, new_name)) name = new_name return name @contextlib.contextmanager def summary_scope(name, family=None, default_name=None, values=None): """Enters a scope used for the summary and yields both the name and tag. To ensure that the summary tag name is always unique, we create a name scope based on `name` and use the full scope name in the tag. If `family` is set, then the tag name will be '<family>/<scope_name>', where `scope_name` is `<outer_scope>/<family>/<name>`. This ensures that `family` is always the prefix of the tag (and unmodified), while ensuring the scope respects the outer scope from this summary was created. Args: name: A name for the generated summary node. family: Optional; if provided, used as the prefix of the summary tag name. default_name: Optional; if provided, used as default name of the summary. values: Optional; passed as `values` parameter to name_scope. Yields: A tuple `(tag, scope)`, both of which are unique and should be used for the tag and the scope for the summary to output. """ name = clean_tag(name) family = clean_tag(family) # Use family name in the scope to ensure uniqueness of scope/tag. scope_base_name = name if family is None else '{}/{}'.format(family, name) with ops.name_scope(scope_base_name, default_name, values) as scope: if family is None: tag = scope.rstrip('/') else: # Prefix our scope with family again so it displays in the right tab. tag = '{}/{}'.format(family, scope.rstrip('/')) # Note: tag is not 100% unique if the user explicitly enters a scope with # the same name as family, then later enter it again before summaries. # This is very contrived though, and we opt here to let it be a runtime # exception if tags do indeed collide. yield (tag, scope)
tensorflow-master
tensorflow/python/ops/summary_op_util.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for quantized operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.platform import test class QuantizedOpsTest(test.TestCase): def __init__(self, method_name="runTest"): super(QuantizedOpsTest, self).__init__(method_name) def testQuantizeOp(self): expected_output = [1, 1, 2, 127, 255, 255] with self.session(use_gpu=False) as sess: x = constant_op.constant( [1.0, 1.25, 1.75, 127.0, 255.0, 500.0], shape=[6], dtype=dtypes.float32) x_min = 0.0 x_max = 255.0 op = array_ops.quantize(x, x_min, x_max, dtypes.quint8, mode="MIN_FIRST") value = self.evaluate(op) self.assertArrayNear(expected_output, value.output, 0.1) def testDequantizeOp(self): expected_output = [1.0, 2.0, 4.0, 8.0, 16.0, 255.0] inp = np.array([1, 2, 4, 8, 16, 255]).astype(np.uint8) with self.session(use_gpu=False) as sess: x = constant_op.constant(inp, shape=[6], dtype=dtypes.quint8) x_min = 0.0 x_max = 255.0 op = array_ops.dequantize(x, x_min, x_max, mode="MIN_FIRST") value = self.evaluate(op) self.assertArrayNear(expected_output, value, 0.1) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/ops/quantized_ops_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.python.ops.special_math_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.client import session from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import special_math_ops from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging class LBetaTest(test.TestCase): @test_util.run_in_graph_and_eager_modes def test_one_dimensional_arg(self): # Should evaluate to 1 and 1/2. x_one = [1, 1.] x_one_half = [2, 1.] with self.session(use_gpu=True): self.assertAllClose( 1, self.evaluate(math_ops.exp(special_math_ops.lbeta(x_one)))) self.assertAllClose( 0.5, self.evaluate(math_ops.exp(special_math_ops.lbeta(x_one_half)))) self.assertEqual([], special_math_ops.lbeta(x_one).get_shape()) @test_util.run_deprecated_v1 def test_one_dimensional_arg_dynamic(self): # Should evaluate to 1 and 1/2. x_one = [1, 1.] x_one_half = [2, 1.] with self.session(use_gpu=True): ph = array_ops.placeholder(dtypes.float32) beta_ph = math_ops.exp(special_math_ops.lbeta(ph)) self.assertAllClose(1, beta_ph.eval(feed_dict={ph: x_one})) self.assertAllClose(0.5, beta_ph.eval(feed_dict={ph: x_one_half})) @test_util.run_deprecated_v1 def test_four_dimensional_arg_with_partial_shape_dynamic(self): x_ = np.ones((3, 2, 3, 4)) # Gamma(1) = 0! = 1 # Gamma(1 + 1 + 1 + 1) = Gamma(4) = 3! = 6 # ==> Beta([1, 1, 1, 1]) # = Gamma(1) * Gamma(1) * Gamma(1) * Gamma(1) / Gamma(1 + 1 + 1 + 1) # = 1 / 6 expected_beta_x = 1 / 6 * np.ones((3, 2, 3)) with self.session(use_gpu=True): x_ph = array_ops.placeholder(dtypes.float32, [3, 2, 3, None]) beta_ph = math_ops.exp(special_math_ops.lbeta(x_ph)) self.assertAllClose(expected_beta_x, beta_ph.eval(feed_dict={x_ph: x_})) @test_util.run_in_graph_and_eager_modes def test_two_dimensional_arg(self): # Should evaluate to 1/2. x_one_half = [[2, 1.], [2, 1.]] with self.session(use_gpu=True): self.assertAllClose( [0.5, 0.5], self.evaluate(math_ops.exp(special_math_ops.lbeta(x_one_half)))) self.assertEqual((2,), special_math_ops.lbeta(x_one_half).get_shape()) @test_util.run_deprecated_v1 def test_two_dimensional_arg_dynamic(self): # Should evaluate to 1/2. x_one_half = [[2, 1.], [2, 1.]] with self.session(use_gpu=True): ph = array_ops.placeholder(dtypes.float32) beta_ph = math_ops.exp(special_math_ops.lbeta(ph)) self.assertAllClose([0.5, 0.5], beta_ph.eval(feed_dict={ph: x_one_half})) @test_util.run_in_graph_and_eager_modes def test_two_dimensional_proper_shape(self): # Should evaluate to 1/2. x_one_half = [[2, 1.], [2, 1.]] with self.session(use_gpu=True): self.assertAllClose( [0.5, 0.5], self.evaluate(math_ops.exp(special_math_ops.lbeta(x_one_half)))) self.assertEqual( (2,), self.evaluate(array_ops.shape(special_math_ops.lbeta(x_one_half)))) self.assertEqual( tensor_shape.TensorShape([2]), special_math_ops.lbeta(x_one_half).get_shape()) @test_util.run_in_graph_and_eager_modes def test_complicated_shape(self): with self.session(use_gpu=True): x = ops.convert_to_tensor(np.random.rand(3, 2, 2)) self.assertAllEqual( (3, 2), self.evaluate(array_ops.shape(special_math_ops.lbeta(x)))) self.assertEqual( tensor_shape.TensorShape([3, 2]), special_math_ops.lbeta(x).get_shape()) @test_util.run_in_graph_and_eager_modes def test_length_1_last_dimension_results_in_one(self): # If there is only one coefficient, the formula still works, and we get one # as the answer, always. x_a = [5.5] x_b = [0.1] with self.session(use_gpu=True): self.assertAllClose( 1, self.evaluate(math_ops.exp(special_math_ops.lbeta(x_a))), rtol=3e-6) self.assertAllClose( 1, self.evaluate(math_ops.exp(special_math_ops.lbeta(x_b)))) self.assertEqual((), special_math_ops.lbeta(x_a).get_shape()) @test_util.run_in_graph_and_eager_modes def test_empty_rank1_returns_negative_infinity(self): with self.session(use_gpu=True): x = constant_op.constant([], shape=[0]) lbeta_x = special_math_ops.lbeta(x) expected_result = constant_op.constant(-np.inf, shape=()) self.assertAllEqual(self.evaluate(expected_result), self.evaluate(lbeta_x)) self.assertEqual(expected_result.get_shape(), lbeta_x.get_shape()) @test_util.run_in_graph_and_eager_modes def test_empty_rank2_with_zero_last_dim_returns_negative_infinity(self): with self.session(use_gpu=True): event_size = 0 for batch_size in [0, 1, 2]: x = constant_op.constant([], shape=[batch_size, event_size]) lbeta_x = special_math_ops.lbeta(x) expected_result = constant_op.constant(-np.inf, shape=[batch_size]) self.assertAllEqual(self.evaluate(expected_result), self.evaluate(lbeta_x)) self.assertEqual(expected_result.get_shape(), lbeta_x.get_shape()) @test_util.run_in_graph_and_eager_modes def test_empty_rank2_with_zero_batch_dim_returns_empty(self): with self.session(use_gpu=True): batch_size = 0 for event_size in [0, 1, 2]: x = constant_op.constant([], shape=[batch_size, event_size]) lbeta_x = special_math_ops.lbeta(x) expected_result = constant_op.constant([], shape=[batch_size]) self.assertAllEqual(self.evaluate(expected_result), self.evaluate(lbeta_x)) self.assertEqual(expected_result.get_shape(), lbeta_x.get_shape()) class BesselTest(test.TestCase): @test_util.run_in_graph_and_eager_modes def test_bessel_i0(self): x_single = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float32) x_double = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float64) try: from scipy import special # pylint: disable=g-import-not-at-top self.assertAllClose(special.i0(x_single), self.evaluate(special_math_ops.bessel_i0(x_single))) self.assertAllClose(special.i0(x_double), self.evaluate(special_math_ops.bessel_i0(x_double))) except ImportError as e: tf_logging.warn('Cannot test special functions: %s' % str(e)) @test_util.run_in_graph_and_eager_modes def test_bessel_i1(self): x_single = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float32) x_double = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float64) try: from scipy import special # pylint: disable=g-import-not-at-top self.assertAllClose(special.i1(x_single), self.evaluate(special_math_ops.bessel_i1(x_single))) self.assertAllClose(special.i1(x_double), self.evaluate(special_math_ops.bessel_i1(x_double))) except ImportError as e: tf_logging.warn('Cannot test special functions: %s' % str(e)) class EinsumTest(test.TestCase): simple_cases = [ 'ij,jk->ik', 'ijk,jklm->il', 'ij,jk,kl->il', 'ijk->i', 'ijk->kji', 'ji,kj->ik', 'ikl,kji->kl', 'klj,lki->ij', 'ijk,ilj->kli', 'kij,mkb->ijmb', 'ijk,ijl,ikl->i', 'i,ijk,j->k', 'ij,ij,jk,kl->il', 'ij,kj,il,jm->ml', 'a,ab,abc->abc', 'a,b,ab->ab', 'ab,ab,c->', 'ab,ab,c->c', 'ab,ab,cd,cd->', 'ab,ab,cd,cd->ac', 'ab,ab,cd,cd->cd', 'ab,ab,cd,cd,ef,ef->', 'ab,cd,ef->abcdef', 'ab,cd,ef->acdf', 'ab,cd,de->abcde', 'ab,cd,de->be', 'ab,bcd,cd->abcd', 'ab,bcd,cd->abd', 'eb,cb,fb->cef', 'abcd,ad', 'bd,db,eac->ace', 'ba,ac,da->bcd', 'ab,ab', 'ab,ba', 'abc,abc', 'abc,bac', 'abc,cba', 'dba,ead,cad->bce', 'aef,fbc,dca->bde', 'iJ,Jk->ik', 'iJ,Ki->JK', 'iJk,Jklm->Jk', 'ij, jk, kl -> il', 'a, ab, abc -> abc', 'ab, ab, cd, cd, ef, ef -> ', 'abc, bac', 'iJ, Ki -> JK', 'iJk, Jklm -> Jk', 'ii', 'ijji' ] long_cases = [ 'bca,cdb,dbf,afc->', 'efc,dbc,acf,fd->abe', 'ea,fb,gc,hd,abcd->efgh', 'ea,fb,abcd,gc,hd->efgh', 'abhe,hidj,jgba,hiab,gab', 'efc, dbc, acf, fd -> abe', 'abhe, hidj, jgba, hiab, gab', ] invalid_cases = [ # bad formats '', 'ijk ijk', 'ij.jk->ik', '...ij...,jk...->ik...', '...ij,jk...->...ik...', 'i$...', 'ij...,jk...->ik.', 'ij.,jk...->ik...', 'ij,k ->kji', 'ij,k-> kji', # axis in output that does not exist 'ij,jk->im', # incorrect number of dimensions 'ij,jkl->kl', # this is allowed in numpy but not implemented here yet 'iij,jk', 'a...a->a...', ] ellipsis_cases = [ ['...ij,...jk->...ik', 'abij,abjk->abik'], # batch mat-mat multiply ['...ij,...jk->...ik', 'abij,bjk->abik'], ['...ij,...jk->...ik', 'abij,jk->abik'], ['...ij,...jk->...ik', 'ij,jk->ik'], ['...ij,...jk->...ik', 'abij,abjk->'], ['...ij,...jk->', 'bij,bjk->'], ['...ij,...jk', 'bij,bjk'], ['...XY,...YZ', 'bXY,bYZ'], ['...ik,...i->...i', 'abik,abi->abi'], # batch mat-vec multiply ['...ij->...ji', 'aij->aji'], # batch matrix transpose ['...ij->...', 'aij->a'], # batch sum ['...,...->...', 'ij,ij->ij'], # hadamard product ['...i,...j->...ij', 'abi,abj->abij'], # outer product ['i...,j...->...ij', 'iab,jab->abij'], # ['i...,j...->ij...', 'iab,jab->ijab'], # ['...ik,...jkl,...il->...ij', 'aik,ajkl,ail->aij'], ['...', 'abc'], # copy ['...ca,cd...,d...f,afc->', 'bca,cdb,dbf,afc->'], # Tests based on https://github.com/dask/dask/pull/3412/files ['ab...,bc...->ac...', 'abxy,bcxy->acxy'], ['a...a', 'aba'], ['a...a', 'aa'], ['abc...->cba...', 'abcxy->cbaxy'], ['...ab->...a', 'xyab->xya'], ['...abc,...abcd->...d', 'xyabc,xabcd->xyd'], ['ab...,b->ab...', 'abxy,b->abxy'], ] dim_mismatch_cases = [('ijk,jkl->il', [(2, 3, 4), (3, 5, 6)]), ('...ij,...jk->...ik', [(3), (3, 4)]), (('...abcdefghijklmnopqrstuvwxyz,' '...ABCDEFGHIJKLMNOPQRSTUVWXYZ'), [tuple([1] * 27), tuple([1] * 27)])] def test_simple(self): for case in self.simple_cases: self.run_test(case) def test_long(self): for case in self.long_cases: self.run_test(case) def test_ellipsis(self): for (axes, expanded_axes) in self.ellipsis_cases: self.run_test(axes, expanded_axes) @test_util.run_deprecated_v1 def test_invalid(self): for axes in self.invalid_cases: inputs = [ array_ops.placeholder(dtypes.float32, shape=(3, 4)), array_ops.placeholder(dtypes.float32, shape=(3, 4)), ] with self.assertRaises(ValueError): _ = special_math_ops.einsum(axes, *inputs) @test_util.run_deprecated_v1 def test_invalid_keyword_arguments(self): m0 = array_ops.placeholder(dtypes.int32, shape=(1, None)) m1 = array_ops.placeholder(dtypes.int32, shape=(None, 1)) with self.assertRaisesRegexp( TypeError, 'invalid keyword arguments for this function: invalid1, invalid2'): _ = special_math_ops.einsum( 'ij,jk->ik', m0, m1, name='name', invalid1='value1', invalid2='value2') @test_util.run_deprecated_v1 def test_repeated_axis_single_input(self): x = array_ops.placeholder(dtypes.float32, shape=[2, 2]) with self.assertRaises(ValueError): _ = special_math_ops.einsum('ii->', x) @test_util.run_deprecated_v1 def test_dim_mismatch(self): for axes, input_shapes in self.dim_mismatch_cases: inputs = [ array_ops.placeholder(dtypes.float32, shape=shape) for shape in input_shapes ] with self.assertRaises(ValueError): _ = special_math_ops.einsum(axes, *inputs) def run_test(self, axes, expanded_axes=None): expanded_axes = expanded_axes if expanded_axes is not None else axes all_axes = {ax: np.random.randint(4, 12) for ax in expanded_axes if ax.isalpha()} input_vals = [] input_axes, _, _ = axes.partition('->') for idx in input_axes.split(','): shape = [all_axes[ax] for ax in idx if ax.isalpha()] input_vals.append(np.random.random(shape)) input_tensors = [constant_op.constant(val) for val in input_vals] output_tensor = special_math_ops.einsum(axes, *input_tensors) with self.session(use_gpu=True): output_value = self.evaluate(output_tensor) correct_value = 0 if axes == 'ijji': output = math_ops.trace(*input_tensors) correct_value = self.evaluate(output) else: correct_value = np.einsum(axes, *input_vals) err = np.abs(correct_value - output_value).max() self.assertLess(err, 1e-8) def test_input_is_placeholder(self): with ops.Graph().as_default(): m0 = array_ops.placeholder(dtypes.int32, shape=(1, None)) m1 = array_ops.placeholder(dtypes.int32, shape=(None, 1)) out = special_math_ops.einsum('ij,jk->ik', m0, m1) with session.Session() as sess: feed_dict = { m0: [[1, 2, 3]], m1: [[2], [1], [1]], } self.assertAllClose([[7]], sess.run(out, feed_dict=feed_dict)) with ops.Graph().as_default(): m0 = array_ops.placeholder(dtypes.int32, shape=(None, 3)) m1 = array_ops.placeholder(dtypes.int32, shape=(3,)) out = special_math_ops.einsum('ij,j->i', m0, m1) with session.Session() as sess: feed_dict = { m0: [[1, 2, 3]], m1: [2, 1, 1], } self.assertAllClose([7], sess.run(out, feed_dict=feed_dict)) # Tests for placeholders which have two or more None values with ops.Graph().as_default(): m0 = array_ops.placeholder(dtypes.int32, shape=(None, None, 2)) m1 = array_ops.placeholder(dtypes.int32, shape=(2, 1)) out = special_math_ops.einsum('ijk,kl->ijl', m0, m1) with session.Session() as sess: feed_dict = { m0: [[[1, 2]]], m1: [[3], [2]], } self.assertAllClose([[[7]]], sess.run(out, feed_dict=feed_dict)) with ops.Graph().as_default(): m0 = array_ops.placeholder(dtypes.int32, shape=(2, 1)) m1 = array_ops.placeholder(dtypes.int32, shape=(None, None, 2)) out = special_math_ops.einsum('kl,ijk->ijl', m0, m1) with session.Session() as sess: feed_dict = { m0: [[3], [2]], m1: [[[1, 2]]], } self.assertAllClose([[[7]]], sess.run(out, feed_dict=feed_dict)) with ops.Graph().as_default(): m0 = array_ops.placeholder(dtypes.int32, shape=(None, None, 2)) m1 = array_ops.placeholder(dtypes.int32, shape=(2,)) out = special_math_ops.einsum('ijk,k->ij', m0, m1) with session.Session() as sess: feed_dict = { m0: [[[1, 2]]], m1: [3, 2], } self.assertAllClose([[7]], sess.run(out, feed_dict=feed_dict)) with ops.Graph().as_default(): m0 = array_ops.placeholder(dtypes.int32, shape=(None, 2, None, 2)) m1 = array_ops.placeholder(dtypes.int32, shape=(None, 2)) out = special_math_ops.einsum('ijkl,ij->ikl', m0, m1) with session.Session() as sess: feed_dict = { m0: [[[[1, 2]], [[2, 1]]]], m1: [[3, 2]], } self.assertAllClose([[[7, 8]]], sess.run(out, feed_dict=feed_dict)) @test_util.run_in_graph_and_eager_modes def test_multiple_ellipses(self): m0 = array_ops.placeholder_with_default([[[[1, 2]], [[2, 1]]]], shape=(None, 2, None, 2)) m1 = array_ops.placeholder_with_default([[3, 2]], shape=(None, 2)) out = special_math_ops.einsum('...jkl,...j->...kl', m0, m1) self.assertAllClose([[[7, 8]]], self.evaluate(out)) def test_ellipses_with_unknown_input_dim(self): with ops.Graph().as_default(): m0 = array_ops.placeholder(dtypes.float32) m1 = array_ops.placeholder_with_default([[3, 2]], shape=(None, 2)) with self.assertRaises(ValueError): _ = special_math_ops.einsum('...jkl,...j->...kl', m0, m1) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/ops/special_math_ops_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Gradient checker for any ops, graphs. The gradient checker verifies numerically that an op/graph properly computes the gradients """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradients from tensorflow.python.ops import math_ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export def _product(t): if isinstance(t, int): return t else: y = 1 for x in t: y *= x return y def _extra_feeds(extra_feed_dict, new_feeds): if not extra_feed_dict: return new_feeds r = {} r.update(extra_feed_dict) r.update(new_feeds) return r def _compute_theoretical_jacobian(x, x_shape, x_data, dy, dy_shape, dx, extra_feed_dict): """Computes the theoretical Jacobian for dy/dx. Computes the theoretical Jacobian using the ops generated by compute_gradient(). Args: x: the tensor "x". x_shape: the dimensions of x as a tuple or an array of ints. x_data: a numpy parray as the input data for x dy: the tensor "dy". dy_shape: the dimensions of dy as a tuple or an array of ints. dx: Tensor or IndexedSlices representing dx extra_feed_dict: dict that allows fixing specified tensor values during the jacobian calculation. Returns: A 2-d numpy array representing the Jacobian for dy/dx. It has "x_size" rows and "dy_size" columns where "x_size" is the number of elements in x and "dy_size" is the number of elements in dy. Raises: ValueError: If `dy` is empty but the gradient is nonzero. """ # Complex vectors are treated as vectors of twice as many reals. if x.dtype.is_complex: x_shape = tuple(x_shape) + (2,) dy_factor = 2 if dy.dtype.is_complex else 1 # To compute the jacobian, we treat x and y as one-dimensional vectors. x_size = _product(x_shape) x_val_size = _product(x_shape[1:]) # This is used for sparse gradients dy_size = _product(dy_shape) * dy_factor # Allocate 2-D Jacobian, with x dimensions smashed into the first # dimension and y dimensions smashed into the second. jacobian = np.zeros((x_size, dy_size), dtype=x.dtype.real_dtype.as_numpy_dtype) # For each of the entry of dy, we set this to be 1 and # everything else to be 0 and compute the backprop -- this will give us one # one column of the Jacobian matrix. dy_data = np.zeros(dy_shape, dtype=dy.dtype.as_numpy_dtype) dy_data_flat = dy_data.ravel().view(dy.dtype.real_dtype.as_numpy_dtype) sess = ops.get_default_session() for col in range(dy_size): dy_data_flat[col] = 1 if isinstance(dx, ops.IndexedSlices): backprop_indices, backprop_values = sess.run( [dx.indices, dx.values], feed_dict=_extra_feeds(extra_feed_dict, {x: x_data, dy: dy_data})) for i, v in zip(backprop_indices, backprop_values): r_begin = i * x_val_size r_end = r_begin + x_val_size jacobian[r_begin:r_end, col] += v.flat else: assert isinstance(dx, ops.Tensor), "dx = " + str(dx) backprop = sess.run( dx, feed_dict=_extra_feeds(extra_feed_dict, {x: x_data, dy: dy_data})) jacobian[:, col] = backprop.ravel().view(jacobian.dtype) dy_data_flat[col] = 0 # If the output is empty, run the gradients at least once and make sure # they produce zeros. if not dy_size: backprop = sess.run( dx, feed_dict=_extra_feeds(extra_feed_dict, {x: x_data, dy: dy_data})) if backprop.shape != x_data.shape: raise ValueError("Empty gradient has wrong shape: expected %s, got %s" % (x_data.shape, backprop.shape)) if np.any(backprop): raise ValueError("Empty tensor with nonzero gradients") logging.vlog(1, "Theoretical Jacobian =\n%s", jacobian) return jacobian def _compute_numeric_jacobian(x, x_shape, x_data, y, y_shape, delta, extra_feed_dict): """Computes the numeric Jacobian for dy/dx. Computes the numeric Jacobian by slightly perturbing the inputs and measuring the differences on the output. Args: x: the tensor "x". x_shape: the dimensions of x as a tuple or an array of ints. x_data: a numpy array as the input data for x y: the tensor "y". y_shape: the dimensions of y as a tuple or an array of ints. delta: the amount of perturbation we give to the input extra_feed_dict: dict that allows fixing specified tensor values during the jacobian calculation. Returns: A 2-d numpy array representing the Jacobian for dy/dx. It has "x_size" rows and "y_size" columns where "x_size" is the number of elements in x and "y_size" is the number of elements in y. """ # bfloat16 doesn't have enough bits to represent high precision numbers such # as delta. Convert to float32 here. Since numeric_jacobian is expected to # be the groundtruth to compare against, it shouldn't lose any information. if x.dtype == dtypes.bfloat16: x = math_ops.cast(x, dtypes.float32) # TODO(wangpeng): Now that the new x # is an output of the old x, isn't feeding to the new x a mistake? if y.dtype == dtypes.bfloat16: y = math_ops.cast(y, dtypes.float32) if x_data.dtype == dtypes.bfloat16.as_numpy_dtype: x_data = x_data.astype(np.float32) # To compute the jacobian, we treat x and y as one-dimensional vectors x_size = _product(x_shape) * (2 if x.dtype.is_complex else 1) y_size = _product(y_shape) * (2 if y.dtype.is_complex else 1) x_dtype = x.dtype.real_dtype.as_numpy_dtype y_dtype = y.dtype.real_dtype.as_numpy_dtype # Make sure we have the right types x_data = np.asarray(x_data, dtype=x.dtype.as_numpy_dtype) scale = np.asarray(2 * delta, dtype=y_dtype)[()] jacobian = np.zeros((x_size, y_size), dtype=x_dtype) # For each of the entry of x, we slightly perturbs this by adding and # subtracting a delta and then compute difference between the outputs. This # will give us one row of the Jacobian matrix. for row in range(x_size): x_pos = x_data.copy() x_neg = x_data.copy() x_pos.ravel().view(x_dtype)[row] += delta y_pos = y.eval(feed_dict=_extra_feeds(extra_feed_dict, {x: x_pos})) x_neg.ravel().view(x_dtype)[row] -= delta y_neg = y.eval(feed_dict=_extra_feeds(extra_feed_dict, {x: x_neg})) diff = (y_pos - y_neg) / scale jacobian[row, :] = diff.ravel().view(y_dtype) logging.vlog(1, "Numeric Jacobian =\n%s", jacobian) return jacobian def _compute_dx_and_dy(x, y, y_shape): """Returns a node to compute gradient of y wrt x.""" # We make up a dy so that we can compute the gradients. We don't really use # the value of dy -- we will always feed it. We need to add an identity node # so that we can always feed it properly. Otherwise, for the Add operation, # dx is the same as dy and we cannot fetch the tensor that we are feeding. with x.graph.as_default(): dy_orig = constant_op.constant(1.0, shape=y_shape, dtype=y.dtype) dy = array_ops.identity(dy_orig) # We compute the gradients for y wrt. x grads = gradients.gradients(y, x, dy) assert len(grads) == 1 return grads[0], dy_orig def _compute_gradient(x, x_shape, dx, y, y_shape, dy, x_init_value=None, delta=1e-3, extra_feed_dict=None): """Computes the theoretical and numerical jacobian.""" t = dtypes.as_dtype(x.dtype) allowed_types = [dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128] assert t.base_dtype in allowed_types, "Don't support type %s for x" % t.name t2 = dtypes.as_dtype(y.dtype) assert t2.base_dtype in allowed_types, "Don't support type %s for y" % t2.name if x_init_value is not None: i_shape = list(x_init_value.shape) assert(list(x_shape) == i_shape), "x_shape = %s, init_data shape = %s" % ( x_shape, i_shape) x_data = x_init_value else: x_data = np.random.random_sample(x_shape).astype(t.as_numpy_dtype) if t.is_complex: x_data.imag = np.random.random_sample(x_shape) jacob_t = _compute_theoretical_jacobian( x, x_shape, x_data, dy, y_shape, dx, extra_feed_dict=extra_feed_dict) jacob_n = _compute_numeric_jacobian( x, x_shape, x_data, y, y_shape, delta, extra_feed_dict=extra_feed_dict) return jacob_t, jacob_n def _compute_gradient_list(x, x_shape, y, y_shape, x_init_value=None, delta=1e-3, init_targets=None, extra_feed_dict=None): """Compute gradients for a list of x values.""" assert isinstance(x, list) dx, dy = zip(*[_compute_dx_and_dy(xi, y, y_shape) for xi in x]) if init_targets is not None: assert isinstance(init_targets, (list, tuple)) for init in init_targets: init.run() if x_init_value is None: x_init_value = [None] * len(x) # pylint: disable=g-complex-comprehension ret = [_compute_gradient(xi, x_shapei, dxi, y, y_shape, dyi, x_init_valuei, delta, extra_feed_dict=extra_feed_dict) for xi, x_shapei, dxi, dyi, x_init_valuei in zip(x, x_shape, dx, dy, x_init_value)] return ret @tf_export(v1=["test.compute_gradient"]) @deprecation.deprecated( date=None, instructions="Use tf.test.compute_gradient in 2.0, which has better " "support for functions. Note that the two versions have different usage, " "so code change is needed.") def compute_gradient(x, x_shape, y, y_shape, x_init_value=None, delta=1e-3, init_targets=None, extra_feed_dict=None): """Computes and returns the theoretical and numerical Jacobian. If `x` or `y` is complex, the Jacobian will still be real but the corresponding Jacobian dimension(s) will be twice as large. This is required even if both input and output is complex since TensorFlow graphs are not necessarily holomorphic, and may have gradients not expressible as complex numbers. For example, if `x` is complex with shape `[m]` and `y` is complex with shape `[n]`, each Jacobian `J` will have shape `[m * 2, n * 2]` with J[:m, :n] = d(Re y)/d(Re x) J[:m, n:] = d(Im y)/d(Re x) J[m:, :n] = d(Re y)/d(Im x) J[m:, n:] = d(Im y)/d(Im x) Args: x: a tensor or list of tensors x_shape: the dimensions of x as a tuple or an array of ints. If x is a list, then this is the list of shapes. y: a tensor y_shape: the dimensions of y as a tuple or an array of ints. x_init_value: (optional) a numpy array of the same shape as "x" representing the initial value of x. If x is a list, this should be a list of numpy arrays. If this is none, the function will pick a random tensor as the initial value. delta: (optional) the amount of perturbation. init_targets: list of targets to run to initialize model params. extra_feed_dict: dict that allows fixing specified tensor values during the Jacobian calculation. Returns: Two 2-d numpy arrays representing the theoretical and numerical Jacobian for dy/dx. Each has "x_size" rows and "y_size" columns where "x_size" is the number of elements in x and "y_size" is the number of elements in y. If x is a list, returns a list of two numpy arrays. """ # TODO(mrry): remove argument `init_targets` if extra_feed_dict is None: extra_feed_dict = {} if isinstance(x, list): return _compute_gradient_list(x, x_shape, y, y_shape, x_init_value, delta, init_targets, extra_feed_dict=extra_feed_dict) else: if init_targets is not None: assert isinstance(init_targets, (list, tuple)) for init in init_targets: init.run() dx, dy = _compute_dx_and_dy(x, y, y_shape) ret = _compute_gradient(x, x_shape, dx, y, y_shape, dy, x_init_value, delta, extra_feed_dict=extra_feed_dict) return ret def _compute_error(grad): if isinstance(grad, tuple): grad = [grad] error = 0 for j_t, j_n in grad: if j_t.size or j_n.size: # Handle zero size tensors correctly error = np.maximum(error, np.fabs(j_t - j_n).max()) return error @tf_export(v1=["test.compute_gradient_error"]) @deprecation.deprecated( date=None, instructions="Use tf.test.compute_gradient in 2.0, which has better " "support for functions. Note that the two versions have different usage, " "so code change is needed.") def compute_gradient_error(x, x_shape, y, y_shape, x_init_value=None, delta=1e-3, init_targets=None, extra_feed_dict=None): """Computes the gradient error. Computes the maximum error for dy/dx between the computed Jacobian and the numerically estimated Jacobian. This function will modify the tensors passed in as it adds more operations and hence changing the consumers of the operations of the input tensors. This function adds operations to the current session. To compute the error using a particular device, such as a GPU, use the standard methods for setting a device (e.g. using with sess.graph.device() or setting a device function in the session constructor). Args: x: a tensor or list of tensors x_shape: the dimensions of x as a tuple or an array of ints. If x is a list, then this is the list of shapes. y: a tensor y_shape: the dimensions of y as a tuple or an array of ints. x_init_value: (optional) a numpy array of the same shape as "x" representing the initial value of x. If x is a list, this should be a list of numpy arrays. If this is none, the function will pick a random tensor as the initial value. delta: (optional) the amount of perturbation. init_targets: list of targets to run to initialize model params. extra_feed_dict: dict that allows fixing specified tensor values during the Jacobian calculation. Returns: The maximum error in between the two Jacobians. """ grad = compute_gradient(x, x_shape, y, y_shape, x_init_value, delta, init_targets, extra_feed_dict=extra_feed_dict) return _compute_error(grad)
tensorflow-master
tensorflow/python/ops/gradient_checker.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmark for Transpose op.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import time import numpy as np from tensorflow.python.client import session as session_lib from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test def build_graph(device, input_shape, perm, datatype, num_iters): """builds a graph containing a sequence of conv2d operations. Args: device: String, the device to run on. input_shape: Shape of the input tensor. perm: A list of ints with the same length as input tensor's dimension. datatype: numpy data type of the input tensor. num_iters: number of iterations to run transpose. Returns: An array of tensors to run() """ with ops.device("/%s:0" % device): total_size = np.prod(input_shape) inp = np.arange(1, total_size + 1, dtype=datatype).reshape(input_shape) t = constant_op.constant(inp, shape=input_shape) outputs = [] transpose_op = array_ops.transpose(t, perm) outputs.append(transpose_op) for _ in range(1, num_iters): with ops.control_dependencies([transpose_op]): transpose_op = array_ops.transpose(t, perm) outputs.append(transpose_op) return control_flow_ops.group(*outputs) class TransposeBenchmark(test.Benchmark): """Benchmark transpose!""" def _run_graph(self, device, input_shape, perm, num_iters, datatype): """runs the graph and print its execution time. Args: device: String, the device to run on. input_shape: Shape of the input tensor. perm: A list of ints with the same length as input tensor's dimension. num_iters: Number of iterations to run the benchmark. datatype: numpy data type of the input tensor. Returns: The duration of the run in seconds. """ graph = ops.Graph() with graph.as_default(): outputs = build_graph(device, input_shape, perm, datatype, num_iters) with session_lib.Session(graph=graph) as session: variables.global_variables_initializer().run() # warmup runs session.run(outputs) start_time = time.time() session.run(outputs) duration = (time.time() - start_time) / num_iters throughput = np.prod( np.array(input_shape)) * datatype().itemsize * 2 / duration / 1e9 print("%s %s inputshape:%s perm:%s %d %.6fsec, %.4fGB/s." % (device, str(datatype), str(input_shape).replace(" ", ""), str(perm).replace(" ", ""), num_iters, duration, throughput)) name_template = ( "transpose_{device}_{dtype}_input_shape_{inputshape}_perm_{perm}") self.report_benchmark( name=name_template.format( device=device, dtype=str(datatype).replace(" ", ""), inputshape=str(input_shape).replace(" ", ""), perm=str(perm).replace(" ", "")).replace(" ", ""), iters=num_iters, wall_time=duration) return duration def benchmark_transpose(self): print("transpose benchmark:") datatypes = [np.complex128, np.float64, np.float32, np.float16, np.int8] small_shapes = [[2, 20, 20, 20, 16], [2, 16, 20, 20, 20]] * 2 small_shapes += [[2, 100, 100, 16], [2, 16, 100, 100]] * 2 small_shapes += [[2, 5000, 16], [2, 16, 5000]] * 2 small_perms = [[0, 4, 1, 2, 3], [0, 2, 3, 4, 1]] + [[4, 1, 2, 3, 0]] * 2 small_perms += [[0, 3, 1, 2], [0, 2, 3, 1]] + [[3, 1, 2, 0]] * 2 small_perms += [[0, 2, 1]] * 2 + [[2, 1, 0]] * 2 large_shapes = [[2, 40, 40, 40, 32], [2, 40, 40, 40, 64]] * 2 + [[ 2, 300, 300, 32 ], [2, 300, 300, 64]] * 2 + [[2, 100000, 32], [2, 100000, 64]] * 2 large_perms = [[0, 4, 1, 2, 3], [0, 2, 3, 4, 1]] + [[4, 1, 2, 3, 0]] * 2 + [ [0, 3, 1, 2], [0, 2, 3, 1] ] + [[3, 1, 2, 0]] * 2 + [[0, 2, 1]] * 2 + [[2, 1, 0]] * 2 num_iters = 40 for datatype in datatypes: for ishape, perm in zip(small_shapes, small_perms): self._run_graph("gpu", ishape, perm, num_iters, datatype) if datatype is not np.complex128: if datatype is not np.float16: for ishape, perm in zip(large_shapes, large_perms): self._run_graph("gpu", ishape, perm, num_iters, datatype) small_dim_large_shapes = [[2, 10000, 3], [2, 3, 10000], [2, 10000, 8], [2, 8, 10000]] small_dim_small_shapes = [[2, 5000, 3], [2, 3, 5000], [2, 5000, 8], [2, 8, 5000]] small_dim_perms = [[0, 2, 1]] * 4 num_iters = 320 small_dim_large_shape_datatypes = [np.float64, np.float32, np.int8] for datatype in small_dim_large_shape_datatypes: for ishape, perm in zip(small_dim_large_shapes, small_dim_perms): self._run_graph("gpu", ishape, perm, num_iters, datatype) small_dim_small_shape_datatypes = [np.complex128, np.float16] for datatype in small_dim_small_shape_datatypes: for ishape, perm in zip(small_dim_small_shapes, small_dim_perms): self._run_graph("gpu", ishape, perm, num_iters, datatype) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/ops/transpose_benchmark.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tensor Handle Operations.""" # pylint: disable=g-bad-name from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.core.framework import resource_handle_pb2 from tensorflow.python import pywrap_tensorflow_internal from tensorflow.python.framework import device as pydev from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_data_flow_ops from tensorflow.python.util import compat from tensorflow.python.util.tf_export import tf_export def encode_resource_handle(resource_handle): """Encode a ResourceHandle proto as custom numpy struct type.""" return np.asarray(bytearray(resource_handle.SerializeToString()), dtype=dtypes.np_resource) class TensorHandle(object): """Represents a handle for a live tensor in a session.""" def __init__(self, handle, dtype, session): """Constructs a new tensor handle. A tensor handle for a persistent tensor is a python string that has the form of "tensor_name;unique_id;device_name". Args: handle: A tensor handle. dtype: The data type of the tensor represented by `handle`. session: The session in which the tensor is produced. """ self._handle = compat.as_str_any(handle) self._resource_handle = None self._dtype = dtype self._session = session self._auto_gc_enabled = True def __del__(self): if self._auto_gc_enabled: self._session._register_dead_handle(self.handle) def __str__(self): return self._handle def _get_resource_handle(self): """The ResourceHandle representation of this handle.""" if not self._resource_handle: self._resource_handle = resource_handle_pb2.ResourceHandleProto() self._resource_handle.device = self._handle.split(";")[-1] self._resource_handle.container = ( pywrap_tensorflow_internal.TENSOR_HANDLE_KEY) self._resource_handle.name = self._handle return self._resource_handle def to_numpy_array(self): """Convert a TensorHandle object to a feedable numpy value. Returns: A numpy array of a custom struct type that can be used as a feed value to run(). """ return encode_resource_handle(self._get_resource_handle()) @property def handle(self): """The string representation of this handle.""" return self._handle def eval(self): """Return the value of the tensor represented by this handle.""" if not self._auto_gc_enabled: raise TypeError("Persistent tensor %s may have already been deleted." % self.handle) holder, reader = _get_handle_reader(self._session.graph, self._handle, self._dtype) return self._session.run(reader, feed_dict={holder: self._handle}) def delete(self): """Force the deletion of this persistent tensor.""" if not self._auto_gc_enabled: raise TypeError("Persistent tensor %s may have already been deleted." % self.handle) self._auto_gc_enabled = False holder, deleter = _get_handle_deleter(self._session.graph, 0, self._handle) self._session.run(deleter, feed_dict={holder: self.handle}) def get_raw_handle(self): """Return the raw handle of the tensor. Note that the method disables the automatic garbage collection of this persistent tensor. The caller is now responsible for managing the life time of the tensor. """ self._auto_gc_enabled = False return self._handle @staticmethod def _get_device_name(handle): """The device name encoded in the handle.""" handle_str = compat.as_str_any(handle) return pydev.canonical_name(handle_str.split(";")[-1]) @staticmethod def _get_reader_key(handle): """The graph key for reader.""" handle_parts = str(handle).split(";") return handle_parts[0] + ";" + handle_parts[-1] @staticmethod def _get_mover_key(feeder, handle): """The graph key for mover.""" return feeder.op.name + ";" + TensorHandle._get_reader_key(handle) @tf_export(v1=["get_session_handle"]) def get_session_handle(data, name=None): """Return the handle of `data`. This is EXPERIMENTAL and subject to change. Keep `data` "in-place" in the runtime and create a handle that can be used to retrieve `data` in a subsequent run(). Combined with `get_session_tensor`, we can keep a tensor produced in one run call in place, and use it as the input in a future run call. Args: data: A tensor to be stored in the session. name: Optional name prefix for the return tensor. Returns: A scalar string tensor representing a unique handle for `data`. Raises: TypeError: if `data` is not a Tensor. Example: ```python c = tf.multiply(a, b) h = tf.compat.v1.get_session_handle(c) h = sess.run(h) p, a = tf.compat.v1.get_session_tensor(h.handle, tf.float32) b = tf.multiply(a, 10) c = sess.run(b, feed_dict={p: h.handle}) ``` """ if not isinstance(data, ops.Tensor): raise TypeError("`data` must be of type Tensor.") # Colocate this operation with data. with ops.colocate_with(data): return gen_data_flow_ops.get_session_handle(data, name=name) @tf_export(v1=["get_session_tensor"]) def get_session_tensor(handle, dtype, name=None): """Get the tensor of type `dtype` by feeding a tensor handle. This is EXPERIMENTAL and subject to change. Get the value of the tensor from a tensor handle. The tensor is produced in a previous run() and stored in the state of the session. Args: handle: The string representation of a persistent tensor handle. dtype: The type of the output tensor. name: Optional name prefix for the return tensor. Returns: A pair of tensors. The first is a placeholder for feeding a tensor handle and the second is the tensor in the session state keyed by the tensor handle. Example: ```python c = tf.multiply(a, b) h = tf.compat.v1.get_session_handle(c) h = sess.run(h) p, a = tf.compat.v1.get_session_tensor(h.handle, tf.float32) b = tf.multiply(a, 10) c = sess.run(b, feed_dict={p: h.handle}) ``` """ handle_device = TensorHandle._get_device_name(handle) with ops.device(handle_device): holder = array_ops.placeholder(dtypes.string) _register_handle_feeder(holder.graph, holder, dtype) tensor = gen_data_flow_ops.get_session_tensor(holder, dtype, name=name) return (holder, tensor) @tf_export(v1=["delete_session_tensor"]) def delete_session_tensor(handle, name=None): """Delete the tensor for the given tensor handle. This is EXPERIMENTAL and subject to change. Delete the tensor of a given tensor handle. The tensor is produced in a previous run() and stored in the state of the session. Args: handle: The string representation of a persistent tensor handle. name: Optional name prefix for the return tensor. Returns: A pair of graph elements. The first is a placeholder for feeding a tensor handle and the second is a deletion operation. """ handle_device = TensorHandle._get_device_name(handle) with ops.device(handle_device): holder = array_ops.placeholder(dtypes.string) deleter = gen_data_flow_ops.delete_session_tensor(holder, name=name) return (holder, deleter) def _register_handle_feeder(graph, feeder, dtype): graph._handle_feeders[feeder.op.name] = dtype def _get_handle_feeder(graph, feeder): return graph._handle_feeders.get(feeder.op.name) def _get_handle_reader(graph, handle, dtype): """Return a read subgraph for this handle.""" graph_key = TensorHandle._get_reader_key(handle) result = graph._handle_readers.get(graph_key) if result is None: # Create reader if we haven't done it. handle_device = TensorHandle._get_device_name(handle) with graph.as_default(), graph.device(handle_device): holder = array_ops.placeholder(dtypes.string) _register_handle_feeder(holder.graph, holder, dtype) reader = gen_data_flow_ops.get_session_tensor(holder, dtype) result = (holder, reader) graph._handle_readers[graph_key] = result return result def _get_handle_mover(graph, feeder, handle): """Return a move subgraph for this pair of feeder and handle.""" dtype = _get_handle_feeder(graph, feeder) if dtype is None: return None handle_device = TensorHandle._get_device_name(handle) if feeder.op.device == handle_device: return None # Now we know we have to move the tensor. graph_key = TensorHandle._get_mover_key(feeder, handle) result = graph._handle_movers.get(graph_key) if result is None: # Create mover if we haven't done it. holder, reader = _get_handle_reader(graph, handle, dtype) with graph.as_default(), graph.device(feeder.op.device): mover = gen_data_flow_ops.get_session_handle(reader) result = (holder, mover) graph._handle_movers[graph_key] = result return result def _get_handle_deleter(graph, deleter_key, handle): """Return a deletion subgraph for this handle.""" result = graph._handle_deleters.get(deleter_key) if result is None: # Create deleter if we haven't done it. handle_device = TensorHandle._get_device_name(handle) with graph.as_default(), graph.device(handle_device): holder = array_ops.placeholder(dtypes.string) deleter = gen_data_flow_ops.delete_session_tensor(holder) result = (holder, deleter) graph._handle_deleters[deleter_key] = result return result
tensorflow-master
tensorflow/python/ops/session_ops.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Implementation of tf.sets.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import gen_set_ops from tensorflow.python.util.tf_export import tf_export _VALID_DTYPES = set([ dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, dtypes.uint8, dtypes.uint16, dtypes.string]) @tf_export("sets.size", v1=["sets.size", "sets.set_size"]) def set_size(a, validate_indices=True): """Compute number of unique elements along last dimension of `a`. Args: a: `SparseTensor`, with indices sorted in row-major order. validate_indices: Whether to validate the order and range of sparse indices in `a`. Returns: `int32` `Tensor` of set sizes. For `a` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st `n-1` dimensions as `a`. Each value is the number of unique elements in the corresponding `[0...n-1]` dimension of `a`. Raises: TypeError: If `a` is an invalid types. """ a = sparse_tensor.convert_to_tensor_or_sparse_tensor(a, name="a") if not isinstance(a, sparse_tensor.SparseTensor): raise TypeError("Expected `SparseTensor`, got %s." % a) if a.values.dtype.base_dtype not in _VALID_DTYPES: raise TypeError("Invalid dtype %s." % a.values.dtype) # pylint: disable=protected-access return gen_set_ops.set_size( a.indices, a.values, a.dense_shape, validate_indices) ops.NotDifferentiable("SetSize") ops.NotDifferentiable("DenseToDenseSetOperation") ops.NotDifferentiable("DenseToSparseSetOperation") ops.NotDifferentiable("SparseToSparseSetOperation") def _convert_to_tensors_or_sparse_tensors(a, b): """Convert to tensor types, and flip order if necessary. Args: a: `Tensor` or `SparseTensor` of the same type as `b`. b: `Tensor` or `SparseTensor` of the same type as `a`. Returns: Tuple of `(a, b, flipped)`, where `a` and `b` have been converted to `Tensor` or `SparseTensor`, and `flipped` indicates whether the order has been flipped to make it dense,sparse instead of sparse,dense (since the set ops do not support the latter). """ a = sparse_tensor.convert_to_tensor_or_sparse_tensor(a, name="a") if a.dtype.base_dtype not in _VALID_DTYPES: raise TypeError("'a' invalid dtype %s." % a.dtype) b = sparse_tensor.convert_to_tensor_or_sparse_tensor(b, name="b") if b.dtype.base_dtype != a.dtype.base_dtype: raise TypeError("Types don't match, %s vs %s." % (a.dtype, b.dtype)) if (isinstance(a, sparse_tensor.SparseTensor) and not isinstance(b, sparse_tensor.SparseTensor)): return b, a, True return a, b, False def _set_operation(a, b, set_operation, validate_indices=True): """Compute set operation of elements in last dimension of `a` and `b`. All but the last dimension of `a` and `b` must match. Args: a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices must be sorted in row-major order. b: `Tensor` or `SparseTensor` of the same type as `a`. Must be `SparseTensor` if `a` is `SparseTensor`. If sparse, indices must be sorted in row-major order. set_operation: String indicating set operation. See SetOperationOp::SetOperationFromContext for valid values. validate_indices: Whether to validate the order and range of sparse indices in `a` and `b`. Returns: A `SparseTensor` with the same rank as `a` and `b`, and all but the last dimension the same. Elements along the last dimension contain the results of the set operation. Raises: TypeError: If inputs are invalid types. ValueError: If `a` is sparse and `b` is dense. """ if isinstance(a, sparse_tensor.SparseTensor): if isinstance(b, sparse_tensor.SparseTensor): indices, values, shape = gen_set_ops.sparse_to_sparse_set_operation( a.indices, a.values, a.dense_shape, b.indices, b.values, b.dense_shape, set_operation, validate_indices) else: raise ValueError("Sparse,Dense is not supported, but Dense,Sparse is. " "Please flip the order of your inputs.") elif isinstance(b, sparse_tensor.SparseTensor): indices, values, shape = gen_set_ops.dense_to_sparse_set_operation( a, b.indices, b.values, b.dense_shape, set_operation, validate_indices) else: indices, values, shape = gen_set_ops.dense_to_dense_set_operation( a, b, set_operation, validate_indices) return sparse_tensor.SparseTensor(indices, values, shape) @tf_export( "sets.intersection", v1=["sets.intersection", "sets.set_intersection"]) def set_intersection(a, b, validate_indices=True): """Compute set intersection of elements in last dimension of `a` and `b`. All but the last dimension of `a` and `b` must match. Example: ```python import tensorflow as tf import collections # Represent the following array of sets as a sparse tensor: # a = np.array([[{1, 2}, {3}], [{4}, {5, 6}]]) a = collections.OrderedDict([ ((0, 0, 0), 1), ((0, 0, 1), 2), ((0, 1, 0), 3), ((1, 0, 0), 4), ((1, 1, 0), 5), ((1, 1, 1), 6), ]) a = tf.SparseTensor(list(a.keys()), list(a.values()), dense_shape=[2,2,2]) # b = np.array([[{1}, {}], [{4}, {5, 6, 7, 8}]]) b = collections.OrderedDict([ ((0, 0, 0), 1), ((1, 0, 0), 4), ((1, 1, 0), 5), ((1, 1, 1), 6), ((1, 1, 2), 7), ((1, 1, 3), 8), ]) b = tf.SparseTensor(list(b.keys()), list(b.values()), dense_shape=[2, 2, 4]) # `tf.sets.intersection` is applied to each aligned pair of sets. tf.sets.intersection(a, b) # The result will be equivalent to either of: # # np.array([[{1}, {}], [{4}, {5, 6}]]) # # collections.OrderedDict([ # ((0, 0, 0), 1), # ((1, 0, 0), 4), # ((1, 1, 0), 5), # ((1, 1, 1), 6), # ]) ``` Args: a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices must be sorted in row-major order. b: `Tensor` or `SparseTensor` of the same type as `a`. If sparse, indices must be sorted in row-major order. validate_indices: Whether to validate the order and range of sparse indices in `a` and `b`. Returns: A `SparseTensor` whose shape is the same rank as `a` and `b`, and all but the last dimension the same. Elements along the last dimension contain the intersections. """ a, b, _ = _convert_to_tensors_or_sparse_tensors(a, b) return _set_operation(a, b, "intersection", validate_indices) @tf_export( "sets.difference", v1=["sets.difference", "sets.set_difference"]) def set_difference(a, b, aminusb=True, validate_indices=True): """Compute set difference of elements in last dimension of `a` and `b`. All but the last dimension of `a` and `b` must match. Example: ```python import tensorflow as tf import collections # Represent the following array of sets as a sparse tensor: # a = np.array([[{1, 2}, {3}], [{4}, {5, 6}]]) a = collections.OrderedDict([ ((0, 0, 0), 1), ((0, 0, 1), 2), ((0, 1, 0), 3), ((1, 0, 0), 4), ((1, 1, 0), 5), ((1, 1, 1), 6), ]) a = tf.SparseTensor(list(a.keys()), list(a.values()), dense_shape=[2, 2, 2]) # np.array([[{1, 3}, {2}], [{4, 5}, {5, 6, 7, 8}]]) b = collections.OrderedDict([ ((0, 0, 0), 1), ((0, 0, 1), 3), ((0, 1, 0), 2), ((1, 0, 0), 4), ((1, 0, 1), 5), ((1, 1, 0), 5), ((1, 1, 1), 6), ((1, 1, 2), 7), ((1, 1, 3), 8), ]) b = tf.SparseTensor(list(b.keys()), list(b.values()), dense_shape=[2, 2, 4]) # `set_difference` is applied to each aligned pair of sets. tf.sets.difference(a, b) # The result will be equivalent to either of: # # np.array([[{2}, {3}], [{}, {}]]) # # collections.OrderedDict([ # ((0, 0, 0), 2), # ((0, 1, 0), 3), # ]) ``` Args: a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices must be sorted in row-major order. b: `Tensor` or `SparseTensor` of the same type as `a`. If sparse, indices must be sorted in row-major order. aminusb: Whether to subtract `b` from `a`, vs vice versa. validate_indices: Whether to validate the order and range of sparse indices in `a` and `b`. Returns: A `SparseTensor` whose shape is the same rank as `a` and `b`, and all but the last dimension the same. Elements along the last dimension contain the differences. """ a, b, flipped = _convert_to_tensors_or_sparse_tensors(a, b) if flipped: aminusb = not aminusb return _set_operation(a, b, "a-b" if aminusb else "b-a", validate_indices) @tf_export( "sets.union", v1=["sets.union", "sets.set_union"]) def set_union(a, b, validate_indices=True): """Compute set union of elements in last dimension of `a` and `b`. All but the last dimension of `a` and `b` must match. Example: ```python import tensorflow as tf import collections # [[{1, 2}, {3}], [{4}, {5, 6}]] a = collections.OrderedDict([ ((0, 0, 0), 1), ((0, 0, 1), 2), ((0, 1, 0), 3), ((1, 0, 0), 4), ((1, 1, 0), 5), ((1, 1, 1), 6), ]) a = tf.SparseTensor(list(a.keys()), list(a.values()), dense_shape=[2, 2, 2]) # [[{1, 3}, {2}], [{4, 5}, {5, 6, 7, 8}]] b = collections.OrderedDict([ ((0, 0, 0), 1), ((0, 0, 1), 3), ((0, 1, 0), 2), ((1, 0, 0), 4), ((1, 0, 1), 5), ((1, 1, 0), 5), ((1, 1, 1), 6), ((1, 1, 2), 7), ((1, 1, 3), 8), ]) b = tf.SparseTensor(list(b.keys()), list(b.values()), dense_shape=[2, 2, 4]) # `set_union` is applied to each aligned pair of sets. tf.sets.union(a, b) # The result will be a equivalent to either of: # # np.array([[{1, 2, 3}, {2, 3}], [{4, 5}, {5, 6, 7, 8}]]) # # collections.OrderedDict([ # ((0, 0, 0), 1), # ((0, 0, 1), 2), # ((0, 0, 2), 3), # ((0, 1, 0), 2), # ((0, 1, 1), 3), # ((1, 0, 0), 4), # ((1, 0, 1), 5), # ((1, 1, 0), 5), # ((1, 1, 1), 6), # ((1, 1, 2), 7), # ((1, 1, 3), 8), # ]) ``` Args: a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices must be sorted in row-major order. b: `Tensor` or `SparseTensor` of the same type as `a`. If sparse, indices must be sorted in row-major order. validate_indices: Whether to validate the order and range of sparse indices in `a` and `b`. Returns: A `SparseTensor` whose shape is the same rank as `a` and `b`, and all but the last dimension the same. Elements along the last dimension contain the unions. """ a, b, _ = _convert_to_tensors_or_sparse_tensors(a, b) return _set_operation(a, b, "union", validate_indices)
tensorflow-master
tensorflow/python/ops/sets_impl.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the currently experimental in-graph batch ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import threading import time from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import function from tensorflow.python.framework import test_util from tensorflow.python.framework.errors import InvalidArgumentError from tensorflow.python.ops import array_ops from tensorflow.python.ops import batch_ops from tensorflow.python.ops import gen_batch_ops from tensorflow.python.ops import script_ops from tensorflow.python.platform import test def delayed_plus1(x): """Sleeps for 100ms then returns x+1.""" time.sleep(0.1) return x + 1 @test_util.run_all_in_graph_and_eager_modes class BatchOpsTest(test.TestCase): """Tests for batch_ops.{un,}batch.""" # Test for only non eager mode as batching in eager context as a functionality # is TBD. def testBasicBatch(self): """Tests that a single batched tensor executes together and only once.""" if context.executing_eagerly(): return with self.cached_session() as sess: inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) batched, index, _ = batch_ops.batch( [inp], num_batch_threads=1, max_batch_size=2, batch_timeout_micros=36000000, grad_timeout_micros=0, batching_queue="") thread_results = [] def worker(): thread_results.extend( sess.run([batched, index], feed_dict={inp: [1]})) worker_thread = threading.Thread(target=worker) worker_thread.start() main_results = sess.run([batched, index], feed_dict={inp: [2]}) worker_thread.join() # At this point either the thread or the main did the batch and the other # should have empty results. if list(thread_results[0][0]): batch_t = thread_results[0][0] index_t = thread_results[1] empty_b = main_results[0][0] empty_m = main_results[1] else: batch_t = main_results[0][0] index_t = main_results[1] empty_b = thread_results[0][0] empty_m = thread_results[1] # Check that both the inputs made it out exactly once. self.assertAllEqual(sorted(batch_t), (1, 2)) # Check that we get 2 rows in the index tensor. self.assertEqual(len(index_t), 2) # Check that the other ones are empty. self.assertEqual(len(empty_b), 0) self.assertEqual(len(empty_m), 0) def testBatchWithPadding(self): """Test that batching with padding up to an allowed batch size works.""" if context.executing_eagerly(): return with self.cached_session() as sess: inp = array_ops.placeholder(dtype=dtypes.int32, shape=[2]) batched, index, _ = batch_ops.batch( [inp], num_batch_threads=1, max_batch_size=10, batch_timeout_micros=100000, # 100ms allowed_batch_sizes=[5, 10], grad_timeout_micros=0, batching_queue="") thread_results = [] def worker(): thread_results.extend( sess.run([batched, index], feed_dict={inp: [1, 3]})) worker_thread = threading.Thread(target=worker) worker_thread.start() main_results = sess.run([batched, index], feed_dict={inp: [2, 4]}) worker_thread.join() # At this point either the thread or the main did the batch and the other # should have empty results. if list(thread_results[0][0]): batch_t = thread_results[0][0] else: batch_t = main_results[0][0] # Check that the batch tensor incorporates the padding. self.assertEqual(len(batch_t), 5) def testMultipleBatch(self): """Tests that multiple batched tensors execute together.""" if context.executing_eagerly(): return with self.cached_session() as sess: inp0 = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) inp1 = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) batched, _, _ = batch_ops.batch( [inp0, inp1], num_batch_threads=1, max_batch_size=2, batch_timeout_micros=36000000, grad_timeout_micros=0, batching_queue="") thread_results = [] def worker(): thread_results.extend( sess.run([batched], feed_dict={inp0: [1], inp1: [2]})) worker_thread = threading.Thread(target=worker) worker_thread.start() main_results = sess.run([batched], feed_dict={inp0: [2], inp1: [3]}) worker_thread.join() # At this point either the thread or the main did the batch and the other # should have empty results. if list(thread_results[0][0]): batch_t = thread_results[0] empty_t = main_results[0] else: batch_t = main_results[0] empty_t = thread_results[0] # Assert that the tensors were batched together. self.assertAllEqual(sorted(batch_t[0]), [1, 2]) self.assertAllEqual(sorted(batch_t[1]), [2, 3]) self.assertAllEqual(empty_t[0], []) self.assertAllEqual(empty_t[1], []) def testIllegalBatchDifferentDim0Sizes(self): """Tests illegally feeding tensors with different dim0 sizes.""" if context.executing_eagerly(): return with self.cached_session() as sess: inp0 = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) inp1 = array_ops.placeholder(dtype=dtypes.int32, shape=[2]) batched, index, _ = batch_ops.batch( [inp0, inp1], num_batch_threads=1, max_batch_size=2, batch_timeout_micros=0, grad_timeout_micros=0, batching_queue="") with self.assertRaises(Exception) as raised: _ = sess.run([batched, index], feed_dict={inp0: [0], inp1: [1, 2]}) self.assertGreater( raised.exception.message.find("must have equal 0th-dimension size"), 0) def testBasicUnbatch(self): """Tests that batch and unbatch work together.""" if context.executing_eagerly(): return with self.cached_session() as sess: inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) batched, index, id_t = batch_ops.batch( [inp], num_batch_threads=1, max_batch_size=10, batch_timeout_micros=100000, # 100ms allowed_batch_sizes=[3, 10], grad_timeout_micros=0, batching_queue="") computation = batched[0] + 1 result = batch_ops.unbatch(computation, index, id_t, timeout_micros=1000000, shared_name="unbatch") thread_results = [] def worker(): thread_results.extend(sess.run([result], feed_dict={inp: [1]})) worker_thread = threading.Thread(target=worker) worker_thread.start() main_results = sess.run([result], feed_dict={inp: [2]}) worker_thread.join() self.assertEqual(thread_results[0], [2]) self.assertEqual(main_results[0], [3]) def testBasicUnbatchDecorated(self): """Tests that the batch_function decorator works.""" if context.executing_eagerly(): return with self.cached_session() as sess: # TODO(apassos): Removing this line causes test flakiness! Ideally should # be investigated. default_inp = array_ops.placeholder_with_default(2, shape=[]) # pylint: disable=unused-variable @batch_ops.batch_function(1, 10, 100000) def computation(in_t): self.assertTrue(in_t.shape is not None) return in_t + 1 inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) result = computation(inp) thread_results = [] def worker(): thread_results.extend(sess.run([result], feed_dict={inp: [1]})) worker_thread = threading.Thread(target=worker) worker_thread.start() main_results = sess.run([result], feed_dict={inp: [2]}) worker_thread.join() self.assertEqual(thread_results[0], [2]) self.assertEqual(main_results[0], [3]) def testBatchDecoratedWithCapturedInput(self): """Tests that the batch_function decorator works.""" if context.executing_eagerly(): return with self.cached_session() as sess: captured_inp0 = array_ops.placeholder_with_default(2, shape=[]) captured_inp1 = array_ops.placeholder_with_default(1, shape=[]) @batch_ops.batch_function(1, 10, 100000) def computation(in_t): return in_t + captured_inp0 - captured_inp1 inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) result = computation(inp) thread_results = [] def worker(): thread_results.extend(sess.run([result], feed_dict={inp: [1]})) worker_thread = threading.Thread(target=worker) worker_thread.start() main_results = sess.run([result], feed_dict={inp: [2]}) worker_thread.join() self.assertEqual(thread_results[0], [2]) self.assertEqual(main_results[0], [3]) def testBatchFunctionOp(self): """Tests that the batch_function op works.""" if context.executing_eagerly(): return with self.cached_session() as sess: @function.Defun(dtypes.int32) def computation(in_t): return in_t + 1 inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) result = gen_batch_ops.batch_function( [inp], num_batch_threads=1, max_batch_size=10, batch_timeout_micros=100000, Tout=[dtypes.int32], f=computation, captured_tensors=computation.captured_inputs) thread_results = [] def worker(): thread_results.extend(sess.run([result], feed_dict={inp: [1]})) worker_thread = threading.Thread(target=worker) worker_thread.start() main_results = sess.run([result], feed_dict={inp: [2]}) worker_thread.join() self.assertEqual(thread_results[0], [2]) self.assertEqual(main_results[0], [3]) def testBatchFunctionOpWithCapturedInput(self): """Tests that batch_function op works with captured input.""" if context.executing_eagerly(): return with self.cached_session() as sess: captured_inp0 = array_ops.placeholder_with_default(2, shape=[]) captured_inp1 = array_ops.placeholder_with_default(1, shape=[]) inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) @function.Defun(dtypes.int32) def computation(inp): return inp + captured_inp0 - captured_inp1 result = gen_batch_ops.batch_function( num_batch_threads=1, max_batch_size=10, batch_timeout_micros=100000, # 100ms allowed_batch_sizes=[3, 10], batching_queue="", f=computation, in_tensors=[inp], captured_tensors=computation.captured_inputs, Tout=[o.type for o in computation.definition.signature.output_arg]) thread_results = [] def worker(): thread_results.extend(sess.run([result], feed_dict={inp: [1]})) worker_thread = threading.Thread(target=worker) worker_thread.start() main_results = sess.run([result], feed_dict={inp: [2]}) worker_thread.join() self.assertEqual(thread_results[0], [2]) self.assertEqual(main_results[0], [3]) def testBatchFunctionOpWithInputError(self): """Tests that batch_function op works with error in the inputs.""" if context.executing_eagerly(): return with self.cached_session() as sess: inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) @function.Defun(dtypes.int32, dtypes.int32) def computation(in0, in1): return in0 + in1 result = gen_batch_ops.batch_function( [inp], # computation actually expects 2 inputs. num_batch_threads=1, max_batch_size=10, batch_timeout_micros=100000, # 100ms batching_queue="", f=computation, captured_tensors=computation.captured_inputs, Tout=[o.type for o in computation.definition.signature.output_arg]) with self.assertRaisesRegexp(InvalidArgumentError, ".*2 arguments.*but 1.*"): sess.run([result], feed_dict={inp: [2]}) def testBasicUnbatchDecoratedWithReshape(self): """Tests that the batch_function decorator works.""" if context.executing_eagerly(): return with self.cached_session() as sess: @batch_ops.batch_function(1, 10, 100000) def computation(in_t): return array_ops.reshape(in_t, [-1]) + 1 inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1, 1]) result = computation(inp) thread_results = [] def worker(): thread_results.extend(sess.run([result], feed_dict={inp: [[1]]})) worker_thread = threading.Thread(target=worker) worker_thread.start() main_results = sess.run([result], feed_dict={inp: [[2]]}) worker_thread.join() self.assertEqual(thread_results[0], [2]) self.assertEqual(main_results[0], [3]) def testUnbatchTimeout(self): """Tests that the unbatch timeout works.""" if context.executing_eagerly(): return with self.cached_session() as sess: inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) batched, index, id_t = batch_ops.batch( [inp], num_batch_threads=1, max_batch_size=2, batch_timeout_micros=36000000, grad_timeout_micros=0, batching_queue="") computation = batched[0] + 1 timeout_micros = 10 result = batch_ops.unbatch(computation, index, id_t, timeout_micros, shared_name="shared_unbatch") # Set up a parallel pipeline that delays the computation, but uses the # same unbatch resource object as the non-delayed pipeline. computation_delayed = script_ops.py_func(delayed_plus1, [batched[0]], dtypes.int32) result_delayed = batch_ops.unbatch(computation_delayed, index, id_t, timeout_micros, shared_name="shared_unbatch") thread_results = [] def worker(): # A first call using the non-delayed pipeline. The batcher will send an # empty tensor along the non-delayed pipeline. thread_results.extend(sess.run([result], feed_dict={inp: [1]})) worker_thread = threading.Thread(target=worker) worker_thread.start() time.sleep(0.1) # Ensure the thread's call starts first. # A second call using the delayed pipeline. The batcher will send the # batched tensor along the delayed pipeline, thus delaying the arrival of # the batched tensor at the unbatch op, relative to the empty tensor. # # TODO(olston, apassos): Avoid relying on the order in which the batch op # emits the empty tensor versus the batched one. _ = sess.run([result_delayed], feed_dict={inp: [2]}) worker_thread.join() # The thread's call should hit the timeout, and thus get 0 results. self.assertEqual(len(thread_results), 0) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/ops/batch_ops_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=line-too-long """Inputs and Readers. See the [Inputs and Readers](https://tensorflow.org/api_guides/python/io_ops) guide. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.lib.io import python_io from tensorflow.python.ops import gen_data_flow_ops from tensorflow.python.ops import gen_io_ops # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.gen_io_ops import * # pylint: enable=wildcard-import from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export # pylint: disable=protected-access def _save(filename, tensor_names, tensors, tensor_slices=None, name="save"): """Save a list of tensors to a file with given names. Example usage without slice info: Save("/foo/bar", ["w", "b"], [w, b]) Example usage with slices: Save("/foo/bar", ["w", "w"], [slice0, slice1], tensor_slices=["4 10 0,2:-", "4 10 2,2:-"]) Args: filename: the file name of the sstable. tensor_names: a list of strings. tensors: the list of tensors to be saved. tensor_slices: Optional list of strings to specify the shape and slices of a larger virtual tensor that each tensor is a part of. If not specified each tensor is saved as a full slice. name: string. Optional name for the op. Requires: The length of tensors should match the size of tensor_names and of tensor_slices. Returns: An Operation that saves the tensors. """ if tensor_slices is None: return gen_io_ops.save(filename, tensor_names, tensors, name=name) else: return gen_io_ops.save_slices(filename, tensor_names, tensor_slices, tensors, name=name) def _restore_slice(file_pattern, tensor_name, shape_and_slice, tensor_type, name="restore_slice", preferred_shard=-1): """Restore a tensor slice from a set of files with a given pattern. Example usage: RestoreSlice("/foo/bar-?????-of-?????", "w", "10 10 0,2:-", DT_FLOAT) Args: file_pattern: the file pattern used to match a set of checkpoint files. tensor_name: the name of the tensor to restore. shape_and_slice: the shape-and-slice spec of the slice. tensor_type: the type of the tensor to restore. name: string. Optional name for the op. preferred_shard: Int. Optional shard to open first in the checkpoint file. Returns: A tensor of type "tensor_type". """ base_type = dtypes.as_dtype(tensor_type).base_dtype return gen_io_ops.restore_slice( file_pattern, tensor_name, shape_and_slice, base_type, preferred_shard, name=name) @tf_export(v1=["ReaderBase"]) class ReaderBase(object): """Base class for different Reader types, that produce a record every step. Conceptually, Readers convert string 'work units' into records (key, value pairs). Typically the 'work units' are filenames and the records are extracted from the contents of those files. We want a single record produced per step, but a work unit can correspond to many records. Therefore we introduce some decoupling using a queue. The queue contains the work units and the Reader dequeues from the queue when it is asked to produce a record (via Read()) but it has finished the last work unit. @compatibility(eager) Readers are not compatible with eager execution. Instead, please use `tf.data` to get data into your model. @end_compatibility """ def __init__(self, reader_ref, supports_serialize=False): """Creates a new ReaderBase. Args: reader_ref: The operation that implements the reader. supports_serialize: True if the reader implementation can serialize its state. Raises: RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError( "Readers are not supported when eager execution is enabled. " "Instead, please use tf.data to get data into your model.") self._reader_ref = reader_ref self._supports_serialize = supports_serialize @property def reader_ref(self): """Op that implements the reader.""" return self._reader_ref def read(self, queue, name=None): """Returns the next record (key, value) pair produced by a reader. Will dequeue a work unit from queue if necessary (e.g. when the Reader needs to start reading from a new file since it has finished with the previous file). Args: queue: A Queue or a mutable string Tensor representing a handle to a Queue, with string work items. name: A name for the operation (optional). Returns: A tuple of Tensors (key, value). key: A string scalar Tensor. value: A string scalar Tensor. """ if isinstance(queue, ops.Tensor): queue_ref = queue else: queue_ref = queue.queue_ref if self._reader_ref.dtype == dtypes.resource: return gen_io_ops.reader_read_v2(self._reader_ref, queue_ref, name=name) else: # For compatibility with pre-resource queues, create a ref(string) tensor # which can be looked up as the same queue by a resource manager. old_queue_op = gen_data_flow_ops.fake_queue(queue_ref) return gen_io_ops.reader_read(self._reader_ref, old_queue_op, name=name) def read_up_to(self, queue, num_records, # pylint: disable=invalid-name name=None): """Returns up to num_records (key, value) pairs produced by a reader. Will dequeue a work unit from queue if necessary (e.g., when the Reader needs to start reading from a new file since it has finished with the previous file). It may return less than num_records even before the last batch. Args: queue: A Queue or a mutable string Tensor representing a handle to a Queue, with string work items. num_records: Number of records to read. name: A name for the operation (optional). Returns: A tuple of Tensors (keys, values). keys: A 1-D string Tensor. values: A 1-D string Tensor. """ if isinstance(queue, ops.Tensor): queue_ref = queue else: queue_ref = queue.queue_ref if self._reader_ref.dtype == dtypes.resource: return gen_io_ops.reader_read_up_to_v2(self._reader_ref, queue_ref, num_records, name=name) else: # For compatibility with pre-resource queues, create a ref(string) tensor # which can be looked up as the same queue by a resource manager. old_queue_op = gen_data_flow_ops.fake_queue(queue_ref) return gen_io_ops.reader_read_up_to(self._reader_ref, old_queue_op, num_records, name=name) def num_records_produced(self, name=None): """Returns the number of records this reader has produced. This is the same as the number of Read executions that have succeeded. Args: name: A name for the operation (optional). Returns: An int64 Tensor. """ if self._reader_ref.dtype == dtypes.resource: return gen_io_ops.reader_num_records_produced_v2(self._reader_ref, name=name) else: return gen_io_ops.reader_num_records_produced(self._reader_ref, name=name) def num_work_units_completed(self, name=None): """Returns the number of work units this reader has finished processing. Args: name: A name for the operation (optional). Returns: An int64 Tensor. """ if self._reader_ref.dtype == dtypes.resource: return gen_io_ops.reader_num_work_units_completed_v2(self._reader_ref, name=name) else: return gen_io_ops.reader_num_work_units_completed(self._reader_ref, name=name) def serialize_state(self, name=None): """Produce a string tensor that encodes the state of a reader. Not all Readers support being serialized, so this can produce an Unimplemented error. Args: name: A name for the operation (optional). Returns: A string Tensor. """ if self._reader_ref.dtype == dtypes.resource: return gen_io_ops.reader_serialize_state_v2(self._reader_ref, name=name) else: return gen_io_ops.reader_serialize_state(self._reader_ref, name=name) def restore_state(self, state, name=None): """Restore a reader to a previously saved state. Not all Readers support being restored, so this can produce an Unimplemented error. Args: state: A string Tensor. Result of a SerializeState of a Reader with matching type. name: A name for the operation (optional). Returns: The created Operation. """ if self._reader_ref.dtype == dtypes.resource: return gen_io_ops.reader_restore_state_v2( self._reader_ref, state, name=name) else: return gen_io_ops.reader_restore_state(self._reader_ref, state, name=name) @property def supports_serialize(self): """Whether the Reader implementation can serialize its state.""" return self._supports_serialize def reset(self, name=None): """Restore a reader to its initial clean state. Args: name: A name for the operation (optional). Returns: The created Operation. """ if self._reader_ref.dtype == dtypes.resource: return gen_io_ops.reader_reset_v2(self._reader_ref, name=name) else: return gen_io_ops.reader_reset(self._reader_ref, name=name) ops.NotDifferentiable("ReaderRead") ops.NotDifferentiable("ReaderReadUpTo") ops.NotDifferentiable("ReaderNumRecordsProduced") ops.NotDifferentiable("ReaderNumWorkUnitsCompleted") ops.NotDifferentiable("ReaderSerializeState") ops.NotDifferentiable("ReaderRestoreState") ops.NotDifferentiable("ReaderReset") @tf_export(v1=["WholeFileReader"]) class WholeFileReader(ReaderBase): """A Reader that outputs the entire contents of a file as a value. To use, enqueue filenames in a Queue. The output of Read will be a filename (key) and the contents of that file (value). See ReaderBase for supported methods. @compatibility(eager) Readers are not compatible with eager execution. Instead, please use `tf.data` to get data into your model. @end_compatibility """ @deprecation.deprecated( None, "Queue-based input pipelines have been replaced by `tf.data`. Use " "`tf.data.Dataset.map(tf.read_file)`.") def __init__(self, name=None): """Create a WholeFileReader. Args: name: A name for the operation (optional). """ rr = gen_io_ops.whole_file_reader_v2(name=name) super(WholeFileReader, self).__init__(rr, supports_serialize=True) ops.NotDifferentiable("WholeFileReader") @tf_export(v1=["TextLineReader"]) class TextLineReader(ReaderBase): """A Reader that outputs the lines of a file delimited by newlines. Newlines are stripped from the output. See ReaderBase for supported methods. @compatibility(eager) Readers are not compatible with eager execution. Instead, please use `tf.data` to get data into your model. @end_compatibility """ # TODO(josh11b): Support serializing and restoring state. @deprecation.deprecated( None, "Queue-based input pipelines have been replaced by `tf.data`. Use " "`tf.data.TextLineDataset`.") def __init__(self, skip_header_lines=None, name=None): """Create a TextLineReader. Args: skip_header_lines: An optional int. Defaults to 0. Number of lines to skip from the beginning of every file. name: A name for the operation (optional). """ rr = gen_io_ops.text_line_reader_v2(skip_header_lines=skip_header_lines, name=name) super(TextLineReader, self).__init__(rr) ops.NotDifferentiable("TextLineReader") @tf_export(v1=["FixedLengthRecordReader"]) class FixedLengthRecordReader(ReaderBase): """A Reader that outputs fixed-length records from a file. See ReaderBase for supported methods. @compatibility(eager) Readers are not compatible with eager execution. Instead, please use `tf.data` to get data into your model. @end_compatibility """ # TODO(josh11b): Support serializing and restoring state. @deprecation.deprecated( None, "Queue-based input pipelines have been replaced by `tf.data`. Use " "`tf.data.FixedLengthRecordDataset`.") def __init__(self, record_bytes, header_bytes=None, footer_bytes=None, hop_bytes=None, name=None, encoding=None): """Create a FixedLengthRecordReader. Args: record_bytes: An int. header_bytes: An optional int. Defaults to 0. footer_bytes: An optional int. Defaults to 0. hop_bytes: An optional int. Defaults to 0. name: A name for the operation (optional). encoding: The type of encoding for the file. Defaults to none. """ rr = gen_io_ops.fixed_length_record_reader_v2( record_bytes=record_bytes, header_bytes=header_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, encoding=encoding, name=name) super(FixedLengthRecordReader, self).__init__(rr) ops.NotDifferentiable("FixedLengthRecordReader") @tf_export(v1=["TFRecordReader"]) class TFRecordReader(ReaderBase): """A Reader that outputs the records from a TFRecords file. See ReaderBase for supported methods. @compatibility(eager) Readers are not compatible with eager execution. Instead, please use `tf.data` to get data into your model. @end_compatibility """ # TODO(josh11b): Support serializing and restoring state. @deprecation.deprecated( None, "Queue-based input pipelines have been replaced by `tf.data`. Use " "`tf.data.TFRecordDataset`.") def __init__(self, name=None, options=None): """Create a TFRecordReader. Args: name: A name for the operation (optional). options: A TFRecordOptions object (optional). """ compression_type = python_io.TFRecordOptions.get_compression_type_string( options) rr = gen_io_ops.tf_record_reader_v2( name=name, compression_type=compression_type) super(TFRecordReader, self).__init__(rr) ops.NotDifferentiable("TFRecordReader") @tf_export(v1=["LMDBReader"]) class LMDBReader(ReaderBase): """A Reader that outputs the records from a LMDB file. See ReaderBase for supported methods. @compatibility(eager) Readers are not compatible with eager execution. Instead, please use `tf.data` to get data into your model. @end_compatibility """ @deprecation.deprecated( None, "Queue-based input pipelines have been replaced by `tf.data`. Use " "`tf.contrib.data.LMDBDataset`.") def __init__(self, name=None, options=None): """Create a LMDBReader. Args: name: A name for the operation (optional). options: A LMDBRecordOptions object (optional). """ del options rr = gen_io_ops.lmdb_reader(name=name) super(LMDBReader, self).__init__(rr) ops.NotDifferentiable("LMDBReader") @tf_export(v1=["IdentityReader"]) class IdentityReader(ReaderBase): """A Reader that outputs the queued work as both the key and value. To use, enqueue strings in a Queue. Read will take the front work string and output (work, work). See ReaderBase for supported methods. @compatibility(eager) Readers are not compatible with eager execution. Instead, please use `tf.data` to get data into your model. @end_compatibility """ @deprecation.deprecated( None, "Queue-based input pipelines have been replaced by `tf.data`. Use " "`tf.data.Dataset.map(...)`.") def __init__(self, name=None): """Create a IdentityReader. Args: name: A name for the operation (optional). """ rr = gen_io_ops.identity_reader_v2(name=name) super(IdentityReader, self).__init__(rr, supports_serialize=True) ops.NotDifferentiable("IdentityReader")
tensorflow-master
tensorflow/python/ops/io_ops.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Clip Operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import clip_ops from tensorflow.python.ops import numerics from tensorflow.python.platform import test class ClipOpsTest(test.TestCase): def __init__(self, method_name="runTest"): super(ClipOpsTest, self).__init__(method_name) def _testClipTensorByNorm(self, inputs, max_norm, expected): with self.cached_session() as sess: input_op = constant_op.constant(inputs) clipped = clip_ops.clip_by_norm(input_op, max_norm) check_op = numerics.add_check_numerics_ops() result, _ = self.evaluate([clipped, check_op]) self.assertAllClose(result, expected) def _testClipIndexedSlicesByNorm(self, values, indices, shape, max_norm, axes): with self.cached_session() as sess: values = constant_op.constant(values) indices = constant_op.constant(indices) shape = constant_op.constant(shape) # IndexedSlices mode indixed_slices = ops.IndexedSlices(values, indices, shape) clipped = clip_ops.clip_by_norm(indixed_slices, max_norm, axes) # clipped should be IndexedSlices self.assertIsInstance(clipped, ops.IndexedSlices) clipped = ops.convert_to_tensor(clipped) # Tensor mode dense_tensor = ops.convert_to_tensor(indixed_slices) dense_clipped = clip_ops.clip_by_norm(dense_tensor, max_norm, axes) result, expected = self.evaluate([clipped, dense_clipped]) self.assertAllClose(result, expected) @test_util.run_deprecated_v1 def testClipTensorByNorm(self): # Simple example self._testClipTensorByNorm([[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]], 4.0, [[-2.4, 0.0, 0.0], [3.2, 0.0, 0.0]]) # Zero norm self._testClipTensorByNorm([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], 4.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]) def testClipIndexedSlicesByNorm(self): values = [[[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]], [[0.0, 2.0, 0.0], [0.0, 0.0, -1.0]]] indices = [2, 6] shape = [10, 2, 3] # Axes == None self._testClipIndexedSlicesByNorm(values, indices, shape, 4.0, None) # Axes == 0 self._testClipIndexedSlicesByNorm(values, indices, shape, 4.0, 0) # Axes == 1 self._testClipIndexedSlicesByNorm(values, indices, shape, 4.0, 1) # Axes == 2 self._testClipIndexedSlicesByNorm(values, indices, shape, 4.0, 1) # Axes == [0, 1] self._testClipIndexedSlicesByNorm(values, indices, shape, 4.0, [0, 1]) # Axes == [0, 1] self._testClipIndexedSlicesByNorm(values, indices, shape, 4.0, [0, 2]) # Axes == [0, 1] self._testClipIndexedSlicesByNorm(values, indices, shape, 4.0, [1, 2]) # Axes == [0, 1] self._testClipIndexedSlicesByNorm(values, indices, shape, 4.0, [0, 1, 2]) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/ops/clip_ops_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Gradients for operators defined in data_flow_ops.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import math_ops @ops.RegisterGradient("DynamicPartition") def _DynamicPartitionGrads(op, *grads): """Gradients for DynamicPartition.""" data = op.inputs[0] indices = op.inputs[1] num_partitions = op.get_attr("num_partitions") prefix_shape = array_ops.shape(indices) original_indices = array_ops.reshape( math_ops.range(math_ops.reduce_prod(prefix_shape)), prefix_shape) partitioned_indices = data_flow_ops.dynamic_partition( original_indices, indices, num_partitions) reconstructed = data_flow_ops.dynamic_stitch(partitioned_indices, grads) reconstructed = array_ops.reshape(reconstructed, array_ops.shape(data)) return [reconstructed, None] @ops.RegisterGradient("DynamicStitch") @ops.RegisterGradient("ParallelDynamicStitch") def _DynamicStitchGrads(op, grad): """Gradients for DynamicStitch and ParallelDynamicStitch.""" num_values = len(op.inputs) // 2 indices_grad = [None] * num_values def AsInt32(x): return (x if op.inputs[0].dtype == dtypes.int32 else math_ops.cast(x, dtypes.int32)) inputs = [AsInt32(op.inputs[i]) for i in xrange(num_values)] if isinstance(grad, ops.IndexedSlices): output_shape = array_ops.shape(op.outputs[0]) output_rows = output_shape[0] grad = math_ops.unsorted_segment_sum(grad.values, grad.indices, output_rows) values_grad = [array_ops.gather(grad, inp) for inp in inputs] return indices_grad + values_grad ops.NotDifferentiable("Queue") ops.NotDifferentiable("QueueEnqueue") ops.NotDifferentiable("QueueEnqueueMany") ops.NotDifferentiable("QueueDequeue") ops.NotDifferentiable("QueueDequeueMany") ops.NotDifferentiable("QueueDequeueUpTo") ops.NotDifferentiable("QueueClose") ops.NotDifferentiable("QueueSize") ops.NotDifferentiable("Stack") ops.NotDifferentiable("StackPush") ops.NotDifferentiable("StackPop") ops.NotDifferentiable("StackClose") ops.NotDifferentiable("GetSessionHandle") ops.NotDifferentiable("GetSessionHandleV2") ops.NotDifferentiable("GetSessionTensor") ops.NotDifferentiable("DeleteSessionTensor")
tensorflow-master
tensorflow/python/ops/data_flow_grad.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """TensorFlow collective Ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import device from tensorflow.python.ops import gen_collective_ops def all_reduce(t, group_size, group_key, instance_key, merge_op, final_op, subdiv_offsets=(0,)): """Reduces tensors collectively, across devices. Args: t: the tensor to be reduced. group_size: the total number of tensors to be collectively reduced. Each must reside on a different device. group_key: an integer identifying the group of devices. instance_key: an integer identifying the participating group of Ops. merge_op: string naming the binary Op to be applied to compute each partial reduction. final_op: string naming the unary Op to be applied to each fully reduced value. Can be 'Id' for no operation. subdiv_offsets: a list of integer offsets into the tensor at which each independent subdivision should begin. Use [0] if no subdivision should be done. Returns: An Op implementing the distributed reduction. Raises: ValueError: if any of the input parameter constraints are not met. """ if not device.canonical_name(t.device): raise ValueError('Device assignment required for collective ops') if group_size <= 1: raise ValueError('Parameter group_size to all_reduce must be at least 2.') return gen_collective_ops.collective_reduce(t, group_size=group_size, group_key=group_key, instance_key=instance_key, merge_op=merge_op, final_op=final_op, subdiv_offsets=subdiv_offsets) def all_gather(t, group_size, group_key, instance_key): """Accumulates tensors collectively, across devices, along first dimension. Args: t: the tensor to participate in the accumulation. group_size: the total number of tensors to be collectively accumulated. Each must reside on a different device. group_key: an integer identifying the group of devices. instance_key: an integer identifying the participating group of Ops. Returns: An Op implementing the distributed operation. Raises: ValueError: if any of the input parameter constraints are not met. """ if not device.canonical_name(t.device): raise ValueError('Device assignment required for collective ops') if group_size <= 1: raise ValueError('Parameter group_size to all_gather must be at least 2.') return gen_collective_ops.collective_gather( t, shape=[0], group_size=group_size, group_key=group_key, instance_key=instance_key) def broadcast_send(t, shape, dtype, group_size, group_key, instance_key): """Broadcasts one tensor to a group of others, across devices. Args: t: the tensor to be sent. shape: the shape of the tensor being sent, which must agree with t. dtype: the type of the tensor being sent, which must agree with t. group_size: one plus the number of receiving tensors, i.e. the total number of devices participating. Each tensor must reside on a different device. group_key: an integer identifying the group of devices. instance_key: an integer identifying the participating group of Ops. Returns: An Op implementing the distributed broadcast send. Raises: ValueError: if any of the input parameter constraints are not met. Note that the shape and dtype arguments appear redundant since they should be obtainable from t. The are two reasons for including them. First, the shape and type of tensors passed via broadcast must be known ahead of time in their most specific form so that the receive side can allocate memory for the operation and shape/type inference can carry forward from there. Including the same declarations on the send side clarifies a commitment already made. Secondly, having nearly identical use syntax for send and receive sides may simplify tool-driven generation of broadcast. """ if not device.canonical_name(t.device): raise ValueError('Device assignment required for collective ops') if group_size <= 1: raise ValueError( 'Parameter group_size to broadcast_send must be at least 2.') if t.shape != shape: raise ValueError( 'Shape of broadcast_send tensor not equal to delcared shape') if t.dtype != dtype: raise ValueError( 'Type of broadcast_send tensor not equal to declared type') return gen_collective_ops.collective_bcast_send(t, shape=shape, group_size=group_size, group_key=group_key, instance_key=instance_key) def broadcast_recv(shape, dtype, group_size, group_key, instance_key): """Receives a broadcasts tensor, across devices. Args: shape: Shape of the tensor to be received. dtype: Type of the tensor to be received. group_size: one plus the number of receiving tensors, i.e. the total number of devices participating. Each tensor must reside on a different device. group_key: an integer identifying the group of devices. instance_key: an integer identifying the participating group of Ops. Returns: An Op implementing the broadcast receive. Raises: ValueError: if any of the input parameter constraints are not met. """ if group_size <= 1: raise ValueError( 'Parameter group_size to broadcast_send must be at least 2.') return gen_collective_ops.collective_bcast_recv(shape=shape, T=dtype, group_size=group_size, group_key=group_key, instance_key=instance_key)
tensorflow-master
tensorflow/python/ops/collective_ops.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for computing default gradients.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.ops import resource_variable_ops def get_zeros_dtype(t): """Return the dtype for the default gradient for a Tensor.""" if t.dtype == dtypes.resource: handle_data = resource_variable_ops.get_eager_safe_handle_data(t) if (handle_data is None or not handle_data.is_set or len(handle_data.shape_and_type) != 1): # TODO(srbs): Ideally we should raise an error here but returning float32 # for backwards compatibility. return dtypes.float32 else: return handle_data.shape_and_type[0].dtype return t.dtype
tensorflow-master
tensorflow/python/ops/default_gradient.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Wrappers for primitive Neural Net (NN) Operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import numbers import numpy as np from tensorflow.python.eager import context from tensorflow.python.eager import monitoring from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors_impl from tensorflow.python.framework import graph_util from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import gen_nn_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.gen_nn_ops import * # pylint: enable=wildcard-import from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import deprecation from tensorflow.python.util.deprecation import deprecated_args from tensorflow.python.util.deprecation import deprecated_argument_lookup from tensorflow.python.util.tf_export import tf_export # Aliases for some automatically-generated names. local_response_normalization = gen_nn_ops.lrn # pylint: disable=protected-access _ops_counter = monitoring.Counter("/tensorflow/api/ops/nn", "number of neural net ops in the graph.", "op_name") def _get_sequence(value, n, channel_index, name): """Formats a value input for gen_nn_ops.""" if value is None: value = [1] elif not isinstance(value, collections.Sized): value = [value] current_n = len(value) if current_n == n + 2: return value elif current_n == 1: value = list((value[0],) * n) elif current_n == n: value = list(value) else: raise ValueError("{} should be of length 1, {} or {} but was {}".format( name, n, n + 2, current_n)) if channel_index == 1: return [1, 1] + value else: return [1] + value + [1] def _non_atrous_convolution( input, # pylint: disable=redefined-builtin filter, # pylint: disable=redefined-builtin padding, data_format=None, # pylint: disable=redefined-builtin strides=None, name=None): """Computes sums of N-D convolutions (actually cross correlation). It is required that 1 <= N <= 3. This is used to implement the more generic `convolution` function, which extends the interface of this function with a `dilation_rate` parameter. Args: input: Rank N+2 tensor of type T of shape `[batch_size] + input_spatial_shape + [in_channels]` if `data_format` does not start with `"NC"`, or `[batch_size, in_channels] + input_spatial_shape` if `data_format` starts with `"NC"`. filter: Rank N+2 tensor of type T of shape `filter_spatial_shape + [in_channels, out_channels]`. Rank of either `input` or `filter` must be known. padding: Padding method to use, must be either "VALID" or "SAME". data_format: A string or None. Specifies whether the channel dimension of the `input` and output is the last dimension (default, or if `data_format` does not start with "NC"), or the second dimension (if `data_format` starts with "NC"). For N=1, the valid values are "NWC" (default) and "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For N=3, the valid values are "NDHWC" (default) and "NCDHW". strides: Sequence of N positive integers, defaults to `[1] * N`. name: Name prefix to use. Returns: Rank N+2 tensor of type T of shape `[batch_size] + output_spatial_shape + [out_channels]`, where if padding == "SAME": output_spatial_shape = input_spatial_shape if padding == "VALID": output_spatial_shape = input_spatial_shape - filter_spatial_shape + 1. Raises: ValueError: if ranks are incompatible. """ with ops.name_scope(name, "non_atrous_convolution", [input, filter]) as scope: input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin input_shape = input.get_shape() filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin filter_shape = filter.get_shape() op = _NonAtrousConvolution( input_shape, filter_shape=filter_shape, padding=padding, data_format=data_format, strides=strides, name=scope) return op(input, filter) class _NonAtrousConvolution(object): """Helper class for _non_atrous_convolution. Note that this class assumes that shapes of input and filter passed to __call__ are compatible with input_shape and filter_shape passed to the constructor. Arguments: input_shape: static input shape, i.e. input.get_shape(). filter_shape: static filter shape, i.e. filter.get_shape(). padding: see _non_atrous_convolution. data_format: see _non_atrous_convolution. strides: see _non_atrous_convolution. name: see _non_atrous_convolution. """ def __init__( self, input_shape, filter_shape, # pylint: disable=redefined-builtin padding, data_format=None, strides=None, name=None): filter_shape = filter_shape.with_rank(input_shape.ndims) self.padding = padding self.name = name input_shape = input_shape.with_rank(filter_shape.ndims) if input_shape.ndims is None: raise ValueError("Rank of convolution must be known") if input_shape.ndims < 3 or input_shape.ndims > 5: raise ValueError( "`input` and `filter` must have rank at least 3 and at most 5") conv_dims = input_shape.ndims - 2 if strides is None: strides = [1] * conv_dims elif len(strides) != conv_dims: raise ValueError("len(strides)=%d, but should be %d" % (len(strides), conv_dims)) if conv_dims == 1: # conv1d uses the 2-d data format names if data_format is None: data_format = "NWC" elif data_format not in {"NCW", "NWC", "NCHW", "NHWC"}: raise ValueError("data_format must be \"NWC\" or \"NCW\".") self.strides = strides[0] self.data_format = data_format self.conv_op = self._conv1d elif conv_dims == 2: if data_format is None or data_format == "NHWC": data_format = "NHWC" strides = [1] + list(strides) + [1] elif data_format == "NCHW": strides = [1, 1] + list(strides) else: raise ValueError("data_format must be \"NHWC\" or \"NCHW\".") self.strides = strides self.data_format = data_format self.conv_op = conv2d elif conv_dims == 3: if data_format is None or data_format == "NDHWC": strides = [1] + list(strides) + [1] elif data_format == "NCDHW": strides = [1, 1] + list(strides) else: raise ValueError("data_format must be \"NDHWC\" or \"NCDHW\". Have: %s" % data_format) self.strides = strides self.data_format = data_format self.conv_op = gen_nn_ops.conv3d # Note that we need this adapter since argument names for conv1d don't match # those for gen_nn_ops.conv2d and gen_nn_ops.conv3d. # pylint: disable=redefined-builtin def _conv1d(self, input, filter, strides, padding, data_format, name): return conv1d( value=input, filters=filter, stride=strides, padding=padding, data_format=data_format, name=name) # pylint: enable=redefined-builtin def __call__(self, inp, filter): # pylint: disable=redefined-builtin return self.conv_op( input=inp, filter=filter, strides=self.strides, padding=self.padding, data_format=self.data_format, name=self.name) @tf_export("nn.dilation2d", v1=[]) def dilation2d_v2( input, # pylint: disable=redefined-builtin filters, # pylint: disable=redefined-builtin strides, padding, data_format, dilations, name=None): """Computes the grayscale dilation of 4-D `input` and 3-D `filters` tensors. The `input` tensor has shape `[batch, in_height, in_width, depth]` and the `filters` tensor has shape `[filter_height, filter_width, depth]`, i.e., each input channel is processed independently of the others with its own structuring function. The `output` tensor has shape `[batch, out_height, out_width, depth]`. The spatial dimensions of the output tensor depend on the `padding` algorithm. We currently only support the default "NHWC" `data_format`. In detail, the grayscale morphological 2-D dilation is the max-sum correlation (for consistency with `conv2d`, we use unmirrored filters): output[b, y, x, c] = max_{dy, dx} input[b, strides[1] * y + rates[1] * dy, strides[2] * x + rates[2] * dx, c] + filters[dy, dx, c] Max-pooling is a special case when the filter has size equal to the pooling kernel size and contains all zeros. Note on duality: The dilation of `input` by the `filters` is equal to the negation of the erosion of `-input` by the reflected `filters`. Args: input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 4-D with shape `[batch, in_height, in_width, depth]`. filters: A `Tensor`. Must have the same type as `input`. 3-D with shape `[filter_height, filter_width, depth]`. strides: A list of `ints` that has length `>= 4`. The stride of the sliding window for each dimension of the input tensor. Must be: `[1, stride_height, stride_width, 1]`. padding: A `string` from: `"SAME", "VALID"`. The type of padding algorithm to use. data_format: A `string`, only `"NCHW"` is currently supported. dilations: A list of `ints` that has length `>= 4`. The input stride for atrous morphological dilation. Must be: `[1, rate_height, rate_width, 1]`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ if data_format != "NCHW": raise ValueError("Data formats other than NCHW are not yet supported") return gen_nn_ops.dilation2d(input=input, filter=filters, strides=strides, rates=dilations, padding=padding, name=name) @tf_export(v1=["nn.dilation2d"]) def dilation2d_v1( # pylint: disable=missing-docstring input, # pylint: disable=redefined-builtin filter=None, # pylint: disable=redefined-builtin strides=None, rates=None, padding=None, name=None, filters=None, dilations=None): filter = deprecated_argument_lookup("filters", filters, "filter", filter) rates = deprecated_argument_lookup("dilations", dilations, "rates", rates) return gen_nn_ops.dilation2d(input, filter, strides, rates, padding, name) dilation2d_v1.__doc__ = gen_nn_ops.dilation2d.__doc__ @tf_export("nn.with_space_to_batch") def with_space_to_batch( input, # pylint: disable=redefined-builtin dilation_rate, padding, op, filter_shape=None, spatial_dims=None, data_format=None): """Performs `op` on the space-to-batch representation of `input`. This has the effect of transforming sliding window operations into the corresponding "atrous" operation in which the input is sampled at the specified `dilation_rate`. In the special case that `dilation_rate` is uniformly 1, this simply returns: op(input, num_spatial_dims, padding) Otherwise, it returns: batch_to_space_nd( op(space_to_batch_nd(input, adjusted_dilation_rate, adjusted_paddings), num_spatial_dims, "VALID") adjusted_dilation_rate, adjusted_crops), where: adjusted_dilation_rate is an int64 tensor of shape [max(spatial_dims)], adjusted_{paddings,crops} are int64 tensors of shape [max(spatial_dims), 2] defined as follows: We first define two int64 tensors `paddings` and `crops` of shape `[num_spatial_dims, 2]` based on the value of `padding` and the spatial dimensions of the `input`: If `padding = "VALID"`, then: paddings, crops = required_space_to_batch_paddings( input_shape[spatial_dims], dilation_rate) If `padding = "SAME"`, then: dilated_filter_shape = filter_shape + (filter_shape - 1) * (dilation_rate - 1) paddings, crops = required_space_to_batch_paddings( input_shape[spatial_dims], dilation_rate, [(dilated_filter_shape - 1) // 2, dilated_filter_shape - 1 - (dilated_filter_shape - 1) // 2]) Because `space_to_batch_nd` and `batch_to_space_nd` assume that the spatial dimensions are contiguous starting at the second dimension, but the specified `spatial_dims` may not be, we must adjust `dilation_rate`, `paddings` and `crops` in order to be usable with these operations. For a given dimension, if the block size is 1, and both the starting and ending padding and crop amounts are 0, then space_to_batch_nd effectively leaves that dimension alone, which is what is needed for dimensions not part of `spatial_dims`. Furthermore, `space_to_batch_nd` and `batch_to_space_nd` handle this case efficiently for any number of leading and trailing dimensions. For 0 <= i < len(spatial_dims), we assign: adjusted_dilation_rate[spatial_dims[i] - 1] = dilation_rate[i] adjusted_paddings[spatial_dims[i] - 1, :] = paddings[i, :] adjusted_crops[spatial_dims[i] - 1, :] = crops[i, :] All unassigned values of `adjusted_dilation_rate` default to 1, while all unassigned values of `adjusted_paddings` and `adjusted_crops` default to 0. Note in the case that `dilation_rate` is not uniformly 1, specifying "VALID" padding is equivalent to specifying `padding = "SAME"` with a filter_shape of `[1]*N`. Advanced usage. Note the following optimization: A sequence of `with_space_to_batch` operations with identical (not uniformly 1) `dilation_rate` parameters and "VALID" padding net = with_space_to_batch(net, dilation_rate, "VALID", op_1) ... net = with_space_to_batch(net, dilation_rate, "VALID", op_k) can be combined into a single `with_space_to_batch` operation as follows: def combined_op(converted_input, num_spatial_dims, _): result = op_1(converted_input, num_spatial_dims, "VALID") ... result = op_k(result, num_spatial_dims, "VALID") net = with_space_to_batch(net, dilation_rate, "VALID", combined_op) This eliminates the overhead of `k-1` calls to `space_to_batch_nd` and `batch_to_space_nd`. Similarly, a sequence of `with_space_to_batch` operations with identical (not uniformly 1) `dilation_rate` parameters, "SAME" padding, and odd filter dimensions net = with_space_to_batch(net, dilation_rate, "SAME", op_1, filter_shape_1) ... net = with_space_to_batch(net, dilation_rate, "SAME", op_k, filter_shape_k) can be combined into a single `with_space_to_batch` operation as follows: def combined_op(converted_input, num_spatial_dims, _): result = op_1(converted_input, num_spatial_dims, "SAME") ... result = op_k(result, num_spatial_dims, "SAME") net = with_space_to_batch(net, dilation_rate, "VALID", combined_op) Args: input: Tensor of rank > max(spatial_dims). dilation_rate: int32 Tensor of *known* shape [num_spatial_dims]. padding: str constant equal to "VALID" or "SAME" op: Function that maps (input, num_spatial_dims, padding) -> output filter_shape: If padding = "SAME", specifies the shape of the convolution kernel/pooling window as an integer Tensor of shape [>=num_spatial_dims]. If padding = "VALID", filter_shape is ignored and need not be specified. spatial_dims: Monotonically increasing sequence of `num_spatial_dims` integers (which are >= 1) specifying the spatial dimensions of `input` and output. Defaults to: `range(1, num_spatial_dims+1)`. data_format: A string or None. Specifies whether the channel dimension of the `input` and output is the last dimension (default, or if `data_format` does not start with "NC"), or the second dimension (if `data_format` starts with "NC"). For N=1, the valid values are "NWC" (default) and "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For N=3, the valid values are "NDHWC" (default) and "NCDHW". Returns: The output Tensor as described above, dimensions will vary based on the op provided. Raises: ValueError: if `padding` is invalid or the arguments are incompatible. ValueError: if `spatial_dims` are invalid. """ input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin input_shape = input.get_shape() def build_op(num_spatial_dims, padding): return lambda inp, _: op(inp, num_spatial_dims, padding) new_op = _WithSpaceToBatch( input_shape, dilation_rate, padding, build_op, filter_shape=filter_shape, spatial_dims=spatial_dims, data_format=data_format) return new_op(input, None) class _WithSpaceToBatch(object): """Helper class for with_space_to_batch. Note that this class assumes that shapes of input and filter passed to __call__ are compatible with input_shape and filter_shape passed to the constructor. Arguments input_shape: static shape of input. i.e. input.get_shape(). dilation_rate: see with_space_to_batch padding: see with_space_to_batch build_op: Function that maps (num_spatial_dims, paddings) -> (function that maps (input, filter) -> output). filter_shape: see with_space_to_batch spatial_dims: see with_space_to_batch data_format: see with_space_to_batch """ def __init__(self, input_shape, dilation_rate, padding, build_op, filter_shape=None, spatial_dims=None, data_format=None): """Helper class for _with_space_to_batch.""" dilation_rate = ops.convert_to_tensor( dilation_rate, dtypes.int32, name="dilation_rate") try: rate_shape = dilation_rate.get_shape().with_rank(1) except ValueError: raise ValueError("rate must be rank 1") if not dilation_rate.get_shape().is_fully_defined(): raise ValueError("rate must have known shape") num_spatial_dims = rate_shape.dims[0].value if data_format is not None and data_format.startswith("NC"): starting_spatial_dim = 2 else: starting_spatial_dim = 1 if spatial_dims is None: spatial_dims = range(starting_spatial_dim, num_spatial_dims + starting_spatial_dim) orig_spatial_dims = list(spatial_dims) spatial_dims = sorted(set(int(x) for x in orig_spatial_dims)) if spatial_dims != orig_spatial_dims or any(x < 1 for x in spatial_dims): raise ValueError( "spatial_dims must be a montonically increasing sequence of positive " "integers") if data_format is not None and data_format.startswith("NC"): expected_input_rank = spatial_dims[-1] else: expected_input_rank = spatial_dims[-1] + 1 try: input_shape.with_rank_at_least(expected_input_rank) except ValueError: raise ValueError( "input tensor must have rank %d at least" % (expected_input_rank)) const_rate = tensor_util.constant_value(dilation_rate) rate_or_const_rate = dilation_rate if const_rate is not None: rate_or_const_rate = const_rate if np.any(const_rate < 1): raise ValueError("dilation_rate must be positive") if np.all(const_rate == 1): self.call = build_op(num_spatial_dims, padding) return # We have two padding contributions. The first is used for converting "SAME" # to "VALID". The second is required so that the height and width of the # zero-padded value tensor are multiples of rate. # Padding required to reduce to "VALID" convolution if padding == "SAME": if filter_shape is None: raise ValueError("filter_shape must be specified for SAME padding") filter_shape = ops.convert_to_tensor(filter_shape, name="filter_shape") const_filter_shape = tensor_util.constant_value(filter_shape) if const_filter_shape is not None: filter_shape = const_filter_shape self.base_paddings = _with_space_to_batch_base_paddings( const_filter_shape, num_spatial_dims, rate_or_const_rate) else: self.num_spatial_dims = num_spatial_dims self.rate_or_const_rate = rate_or_const_rate self.base_paddings = None elif padding == "VALID": self.base_paddings = np.zeros([num_spatial_dims, 2], np.int32) else: raise ValueError("Invalid padding method %r" % padding) self.input_shape = input_shape self.spatial_dims = spatial_dims self.dilation_rate = dilation_rate self.data_format = data_format self.op = build_op(num_spatial_dims, "VALID") self.call = self._with_space_to_batch_call def _with_space_to_batch_call(self, inp, filter): # pylint: disable=redefined-builtin """Call functionality for with_space_to_batch.""" # Handle input whose shape is unknown during graph creation. input_spatial_shape = None input_shape = self.input_shape spatial_dims = self.spatial_dims if input_shape.ndims is not None: input_shape_list = input_shape.as_list() input_spatial_shape = [input_shape_list[i] for i in spatial_dims] if input_spatial_shape is None or None in input_spatial_shape: input_shape_tensor = array_ops.shape(inp) input_spatial_shape = array_ops.stack( [input_shape_tensor[i] for i in spatial_dims]) base_paddings = self.base_paddings if base_paddings is None: # base_paddings could not be computed at build time since static filter # shape was not fully defined. filter_shape = array_ops.shape(filter) base_paddings = _with_space_to_batch_base_paddings( filter_shape, self.num_spatial_dims, self.rate_or_const_rate) paddings, crops = array_ops.required_space_to_batch_paddings( input_shape=input_spatial_shape, base_paddings=base_paddings, block_shape=self.dilation_rate) dilation_rate = _with_space_to_batch_adjust(self.dilation_rate, 1, spatial_dims) paddings = _with_space_to_batch_adjust(paddings, 0, spatial_dims) crops = _with_space_to_batch_adjust(crops, 0, spatial_dims) input_converted = array_ops.space_to_batch_nd( input=inp, block_shape=dilation_rate, paddings=paddings) result = self.op(input_converted, filter) result_converted = array_ops.batch_to_space_nd( input=result, block_shape=dilation_rate, crops=crops) # Recover channel information for output shape if channels are not last. if self.data_format is not None and self.data_format.startswith("NC"): if not result_converted.shape.dims[1].value and filter is not None: output_shape = result_converted.shape.as_list() output_shape[1] = filter.shape[-1] result_converted.set_shape(output_shape) return result_converted def __call__(self, inp, filter): # pylint: disable=redefined-builtin return self.call(inp, filter) def _with_space_to_batch_base_paddings(filter_shape, num_spatial_dims, rate_or_const_rate): """Helper function to compute base_paddings.""" # Spatial dimensions of the filters and the upsampled filters in which we # introduce (rate - 1) zeros between consecutive filter values. filter_spatial_shape = filter_shape[:num_spatial_dims] dilated_filter_spatial_shape = ( filter_spatial_shape + (filter_spatial_shape - 1) * (rate_or_const_rate - 1)) pad_extra_shape = dilated_filter_spatial_shape - 1 # When full_padding_shape is odd, we pad more at end, following the same # convention as conv2d. pad_extra_start = pad_extra_shape // 2 pad_extra_end = pad_extra_shape - pad_extra_start base_paddings = array_ops.stack( [[pad_extra_start[i], pad_extra_end[i]] for i in range(num_spatial_dims)]) return base_paddings def _with_space_to_batch_adjust(orig, fill_value, spatial_dims): """Returns an `adjusted` version of `orig` based on `spatial_dims`. Tensor of the same type as `orig` and with shape `[max(spatial_dims), ...]` where: adjusted[spatial_dims[i] - 1, ...] = orig[i, ...] for 0 <= i < len(spatial_dims), and adjusted[j, ...] = fill_value for j != spatial_dims[i] - 1 for some i. If `orig` is a constant value, then the result will be a constant value. Args: orig: Tensor of rank > max(spatial_dims). fill_value: Numpy scalar (of same data type as `orig) specifying the fill value for non-spatial dimensions. spatial_dims: See with_space_to_batch. Returns: `adjusted` tensor. """ fill_dims = orig.get_shape().as_list()[1:] dtype = orig.dtype.as_numpy_dtype parts = [] const_orig = tensor_util.constant_value(orig) const_or_orig = const_orig if const_orig is not None else orig prev_spatial_dim = 0 i = 0 while i < len(spatial_dims): start_i = i start_spatial_dim = spatial_dims[i] if start_spatial_dim > 1: # Fill in any gap from the previous spatial dimension (or dimension 1 if # this is the first spatial dimension) with `fill_value`. parts.append( np.full( [start_spatial_dim - 1 - prev_spatial_dim] + fill_dims, fill_value, dtype=dtype)) # Find the largest value of i such that: # [spatial_dims[start_i], ..., spatial_dims[i]] # == [start_spatial_dim, ..., start_spatial_dim + i - start_i], # i.e. the end of a contiguous group of spatial dimensions. while (i + 1 < len(spatial_dims) and spatial_dims[i + 1] == spatial_dims[i] + 1): i += 1 parts.append(const_or_orig[start_i:i + 1]) prev_spatial_dim = spatial_dims[i] i += 1 if const_orig is not None: return np.concatenate(parts) else: return array_ops.concat(parts, 0) def _get_strides_and_dilation_rate(num_spatial_dims, strides, dilation_rate): """Helper function for verifying strides and dilation_rate arguments. This is used by `convolution` and `pool`. Args: num_spatial_dims: int strides: Optional. List of N ints >= 1. Defaults to [1]*N. If any value of strides is > 1, then all values of dilation_rate must be 1. dilation_rate: Optional. List of N ints >= 1. Defaults to [1]*N. If any value of dilation_rate is > 1, then all values of strides must be 1. Returns: Normalized (strides, dilation_rate) as int32 numpy arrays of shape [num_spatial_dims]. Raises: ValueError: if the parameters are invalid. """ if dilation_rate is None: dilation_rate = [1] * num_spatial_dims elif len(dilation_rate) != num_spatial_dims: raise ValueError("len(dilation_rate)=%d but should be %d" % (len(dilation_rate), num_spatial_dims)) dilation_rate = np.array(dilation_rate, dtype=np.int32) if np.any(dilation_rate < 1): raise ValueError("all values of dilation_rate must be positive") if strides is None: strides = [1] * num_spatial_dims elif len(strides) != num_spatial_dims: raise ValueError("len(strides)=%d but should be %d" % (len(strides), num_spatial_dims)) strides = np.array(strides, dtype=np.int32) if np.any(strides < 1): raise ValueError("all values of strides must be positive") if np.any(strides > 1) and np.any(dilation_rate > 1): raise ValueError( "strides > 1 not supported in conjunction with dilation_rate > 1") return strides, dilation_rate @tf_export(v1=["nn.convolution"]) def convolution( input, # pylint: disable=redefined-builtin filter, # pylint: disable=redefined-builtin padding, strides=None, dilation_rate=None, name=None, data_format=None, filters=None, dilations=None): """Computes sums of N-D convolutions (actually cross-correlation). This also supports either output striding via the optional `strides` parameter or atrous convolution (also known as convolution with holes or dilated convolution, based on the French word "trous" meaning holes in English) via the optional `dilation_rate` parameter. Currently, however, output striding is not supported for atrous convolutions. Specifically, in the case that `data_format` does not start with "NC", given a rank (N+2) `input` Tensor of shape [num_batches, input_spatial_shape[0], ..., input_spatial_shape[N-1], num_input_channels], a rank (N+2) `filter` Tensor of shape [spatial_filter_shape[0], ..., spatial_filter_shape[N-1], num_input_channels, num_output_channels], an optional `dilation_rate` tensor of shape [N] (defaulting to [1]*N) specifying the filter upsampling/input downsampling rate, and an optional list of N `strides` (defaulting [1]*N), this computes for each N-D spatial output position (x[0], ..., x[N-1]): ``` output[b, x[0], ..., x[N-1], k] = sum_{z[0], ..., z[N-1], q} filter[z[0], ..., z[N-1], q, k] * padded_input[b, x[0]*strides[0] + dilation_rate[0]*z[0], ..., x[N-1]*strides[N-1] + dilation_rate[N-1]*z[N-1], q] ``` where b is the index into the batch, k is the output channel number, q is the input channel number, and z is the N-D spatial offset within the filter. Here, `padded_input` is obtained by zero padding the input using an effective spatial filter shape of `(spatial_filter_shape-1) * dilation_rate + 1` and output striding `strides` as described in the [comment here](https://tensorflow.org/api_guides/python/nn#Convolution). In the case that `data_format` does start with `"NC"`, the `input` and output (but not the `filter`) are simply transposed as follows: convolution(input, data_format, **kwargs) = tf.transpose(convolution(tf.transpose(input, [0] + range(2,N+2) + [1]), **kwargs), [0, N+1] + range(1, N+1)) It is required that 1 <= N <= 3. Args: input: An (N+2)-D `Tensor` of type `T`, of shape `[batch_size] + input_spatial_shape + [in_channels]` if data_format does not start with "NC" (default), or `[batch_size, in_channels] + input_spatial_shape` if data_format starts with "NC". filter: An (N+2)-D `Tensor` with the same type as `input` and shape `spatial_filter_shape + [in_channels, out_channels]`. padding: A string, either `"VALID"` or `"SAME"`. The padding algorithm. strides: Optional. Sequence of N ints >= 1. Specifies the output stride. Defaults to [1]*N. If any value of strides is > 1, then all values of dilation_rate must be 1. dilation_rate: Optional. Sequence of N ints >= 1. Specifies the filter upsampling/input downsampling rate. In the literature, the same parameter is sometimes called `input stride` or `dilation`. The effective filter size used for the convolution will be `spatial_filter_shape + (spatial_filter_shape - 1) * (rate - 1)`, obtained by inserting (dilation_rate[i]-1) zeros between consecutive elements of the original filter in each spatial dimension i. If any value of dilation_rate is > 1, then all values of strides must be 1. name: Optional name for the returned tensor. data_format: A string or None. Specifies whether the channel dimension of the `input` and output is the last dimension (default, or if `data_format` does not start with "NC"), or the second dimension (if `data_format` starts with "NC"). For N=1, the valid values are "NWC" (default) and "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For N=3, the valid values are "NDHWC" (default) and "NCDHW". filters: Alias of filter. dilations: Alias of dilation_rate. Returns: A `Tensor` with the same type as `input` of shape `[batch_size] + output_spatial_shape + [out_channels]` if data_format is None or does not start with "NC", or `[batch_size, out_channels] + output_spatial_shape` if data_format starts with "NC", where `output_spatial_shape` depends on the value of `padding`. If padding == "SAME": output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i]) If padding == "VALID": output_spatial_shape[i] = ceil((input_spatial_shape[i] - (spatial_filter_shape[i]-1) * dilation_rate[i]) / strides[i]). Raises: ValueError: If input/output depth does not match `filter` shape, if padding is other than `"VALID"` or `"SAME"`, or if data_format is invalid. """ filter = deprecated_argument_lookup("filters", filters, "filter", filter) dilation_rate = deprecated_argument_lookup( "dilations", dilations, "dilation_rate", dilation_rate) return convolution_internal( input, filter, strides=strides, padding=padding, data_format=data_format, dilations=dilation_rate, name=name) @tf_export("nn.convolution", v1=[]) def convolution_v2( input, # pylint: disable=redefined-builtin filters, strides=None, padding="VALID", data_format=None, dilations=None, name=None): return convolution_internal( input, # pylint: disable=redefined-builtin filters, strides=strides, padding=padding, data_format=data_format, dilations=dilations, name=name) convolution_v2.__doc__ = deprecation.rewrite_argument_docstring( deprecation.rewrite_argument_docstring( convolution.__doc__, "dilation_rate", "dilations"), "filter", "filters") def convolution_internal( input, # pylint: disable=redefined-builtin filters, strides=None, padding="VALID", data_format=None, dilations=None, name=None): """Internal function which performs rank agnostic convolution.""" with ops.name_scope(name, "convolution", [input, filters]) as name: if isinstance(input.shape, tensor_shape.TensorShape) and \ input.shape.rank is not None: n = len(input.shape) - 2 elif not isinstance(input.shape, tensor_shape.TensorShape) and \ input.shape is not None: n = len(input.shape) - 2 elif isinstance(filters.shape, tensor_shape.TensorShape) and \ filters.shape.rank is not None: n = len(filters.shape) - 2 elif not isinstance(filters.shape, tensor_shape.TensorShape) and \ filters.shape is not None: n = len(filters.shape) - 2 else: raise ValueError("rank of input or filter must be known") if not 1 <= n <= 3: raise ValueError( "Input tensor must be of rank 3, 4 or 5 but was {}.".format(n + 2)) if data_format is None: channel_index = n + 1 else: channel_index = 1 if data_format.startswith("NC") else n + 1 strides = _get_sequence(strides, n, channel_index, "strides") dilations = _get_sequence(dilations, n, channel_index, "dilations") conv_ops = {1: conv1d, 2: gen_nn_ops.conv2d, 3: gen_nn_ops.conv3d} if all(i == 1 for i in dilations): # fast path if no dilation as gradient only supported on GPU for dilations op = conv_ops[n] _ops_counter.get_cell("conv{}d".format(n)).increase_by(1) return op( input, filters, strides, padding=padding, data_format=data_format, dilations=dilations, name=name) else: if channel_index == 1: strides = strides[2:] dilations = dilations[2:] else: strides = strides[1:-1] dilations = dilations[1:-1] op = Convolution( tensor_shape.as_shape(input.shape), tensor_shape.as_shape(filters.shape), padding, strides=strides, dilation_rate=dilations, name=name, data_format=data_format) return op(input, filters) class Convolution(object): """Helper class for convolution. Note that this class assumes that shapes of input and filter passed to __call__ are compatible with input_shape and filter_shape passed to the constructor. Arguments input_shape: static shape of input. i.e. input.get_shape(). filter_shape: static shape of the filter. i.e. filter.get_shape(). padding: see convolution. strides: see convolution. dilation_rate: see convolution. name: see convolution. data_format: see convolution. """ def __init__(self, input_shape, filter_shape, padding, strides=None, dilation_rate=None, name=None, data_format=None): """Helper function for convolution.""" num_total_dims = filter_shape.ndims if num_total_dims is None: num_total_dims = input_shape.ndims if num_total_dims is None: raise ValueError("rank of input or filter must be known") num_spatial_dims = num_total_dims - 2 try: input_shape.with_rank(num_spatial_dims + 2) except ValueError: raise ValueError( "input tensor must have rank %d" % (num_spatial_dims + 2)) try: filter_shape.with_rank(num_spatial_dims + 2) except ValueError: raise ValueError( "filter tensor must have rank %d" % (num_spatial_dims + 2)) if data_format is None or not data_format.startswith("NC"): input_channels_dim = tensor_shape.dimension_at_index( input_shape, num_spatial_dims + 1) spatial_dims = range(1, num_spatial_dims + 1) else: input_channels_dim = tensor_shape.dimension_at_index(input_shape, 1) spatial_dims = range(2, num_spatial_dims + 2) if not input_channels_dim.is_compatible_with( filter_shape[num_spatial_dims]): raise ValueError( "number of input channels does not match corresponding dimension of " "filter, {} != {}".format(input_channels_dim, filter_shape[num_spatial_dims])) strides, dilation_rate = _get_strides_and_dilation_rate( num_spatial_dims, strides, dilation_rate) self.input_shape = input_shape self.filter_shape = filter_shape self.data_format = data_format self.strides = strides self.name = name self.conv_op = _WithSpaceToBatch( input_shape, dilation_rate=dilation_rate, padding=padding, build_op=self._build_op, filter_shape=filter_shape, spatial_dims=spatial_dims, data_format=data_format) def _build_op(self, _, padding): return _NonAtrousConvolution( self.input_shape, filter_shape=self.filter_shape, padding=padding, data_format=self.data_format, strides=self.strides, name=self.name) def __call__(self, inp, filter): # pylint: disable=redefined-builtin return self.conv_op(inp, filter) @tf_export(v1=["nn.pool"]) def pool( input, # pylint: disable=redefined-builtin window_shape, pooling_type, padding, dilation_rate=None, strides=None, name=None, data_format=None, dilations=None): """Performs an N-D pooling operation. In the case that `data_format` does not start with "NC", computes for 0 <= b < batch_size, 0 <= x[i] < output_spatial_shape[i], 0 <= c < num_channels: ``` output[b, x[0], ..., x[N-1], c] = REDUCE_{z[0], ..., z[N-1]} input[b, x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0], ... x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1], c], ``` where the reduction function REDUCE depends on the value of `pooling_type`, and pad_before is defined based on the value of `padding` as described in the "returns" section of `tf.nn.convolution` for details. The reduction never includes out-of-bounds positions. In the case that `data_format` starts with `"NC"`, the `input` and output are simply transposed as follows: ``` pool(input, data_format, **kwargs) = tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]), **kwargs), [0, N+1] + range(1, N+1)) ``` Args: input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape + [num_channels]` if data_format does not start with "NC" (default), or `[batch_size, num_channels] + input_spatial_shape` if data_format starts with "NC". Pooling happens over the spatial dimensions only. window_shape: Sequence of N ints >= 1. pooling_type: Specifies pooling operation, must be "AVG" or "MAX". padding: The padding algorithm, must be "SAME" or "VALID". See the "returns" section of `tf.nn.convolution` for details. dilation_rate: Optional. Dilation rate. List of N ints >= 1. Defaults to [1]*N. If any value of dilation_rate is > 1, then all values of strides must be 1. strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N. If any value of strides is > 1, then all values of dilation_rate must be 1. name: Optional. Name of the op. data_format: A string or None. Specifies whether the channel dimension of the `input` and output is the last dimension (default, or if `data_format` does not start with "NC"), or the second dimension (if `data_format` starts with "NC"). For N=1, the valid values are "NWC" (default) and "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For N=3, the valid values are "NDHWC" (default) and "NCDHW". dilations: Alias for dilation_rate Returns: Tensor of rank N+2, of shape [batch_size] + output_spatial_shape + [num_channels] if data_format is None or does not start with "NC", or [batch_size, num_channels] + output_spatial_shape if data_format starts with "NC", where `output_spatial_shape` depends on the value of padding: If padding = "SAME": output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i]) If padding = "VALID": output_spatial_shape[i] = ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i]) / strides[i]). Raises: ValueError: if arguments are invalid. """ dilation_rate = deprecated_argument_lookup( "dilations", dilations, "dilation_rate", dilation_rate) # pylint: enable=line-too-long with ops.name_scope(name, "%s_pool" % (pooling_type.lower()), [input]) as scope: input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin num_spatial_dims = len(window_shape) if num_spatial_dims < 1 or num_spatial_dims > 3: raise ValueError("It is required that 1 <= num_spatial_dims <= 3.") input.get_shape().with_rank(num_spatial_dims + 2) strides, dilation_rate = _get_strides_and_dilation_rate( num_spatial_dims, strides, dilation_rate) if padding == "SAME" and np.any(dilation_rate > 1): raise ValueError( "pooling with SAME padding is not implemented for dilation_rate > 1") if np.any(strides > window_shape): raise ValueError( "strides > window_shape not supported due to inconsistency between " "CPU and GPU implementations") pooling_ops = { ("MAX", 1): max_pool, ("MAX", 2): max_pool, ("MAX", 3): max_pool3d, # pylint: disable=undefined-variable ("AVG", 1): avg_pool, ("AVG", 2): avg_pool, ("AVG", 3): avg_pool3d, # pylint: disable=undefined-variable } op_key = (pooling_type, num_spatial_dims) if op_key not in pooling_ops: raise ValueError("%d-D %s pooling is not supported." % (op_key[1], op_key[0])) if data_format is None or not data_format.startswith("NC"): adjusted_window_shape = [1] + list(window_shape) + [1] adjusted_strides = [1] + list(strides) + [1] spatial_dims = range(1, num_spatial_dims + 1) else: adjusted_window_shape = [1, 1] + list(window_shape) adjusted_strides = [1, 1] + list(strides) spatial_dims = range(2, num_spatial_dims + 2) if num_spatial_dims == 1: if data_format is None or data_format == "NWC": data_format_kwargs = dict(data_format="NHWC") elif data_format == "NCW": data_format_kwargs = dict(data_format="NCHW") else: raise ValueError("data_format must be either \"NWC\" or \"NCW\".") adjusted_window_shape = [1] + adjusted_window_shape adjusted_strides = [1] + adjusted_strides else: data_format_kwargs = dict(data_format=data_format) def op(converted_input, _, converted_padding): # pylint: disable=missing-docstring if num_spatial_dims == 1: converted_input = array_ops.expand_dims(converted_input, spatial_dims[0]) result = pooling_ops[op_key]( converted_input, adjusted_window_shape, adjusted_strides, converted_padding, name=scope, **data_format_kwargs) if num_spatial_dims == 1: result = array_ops.squeeze(result, [spatial_dims[0]]) return result return with_space_to_batch( input=input, dilation_rate=dilation_rate, padding=padding, op=op, spatial_dims=spatial_dims, filter_shape=window_shape) @tf_export("nn.pool", v1=[]) def pool_v2( input, # pylint: disable=redefined-builtin window_shape, pooling_type, strides=None, padding="VALID", data_format=None, dilations=None, name=None): # pylint: disable=line-too-long """Performs an N-D pooling operation. In the case that `data_format` does not start with "NC", computes for 0 <= b < batch_size, 0 <= x[i] < output_spatial_shape[i], 0 <= c < num_channels: ``` output[b, x[0], ..., x[N-1], c] = REDUCE_{z[0], ..., z[N-1]} input[b, x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0], ... x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1], c], ``` where the reduction function REDUCE depends on the value of `pooling_type`, and pad_before is defined based on the value of `padding` as described in the "returns" section of `tf.nn.convolution` for details. The reduction never includes out-of-bounds positions. In the case that `data_format` starts with `"NC"`, the `input` and output are simply transposed as follows: ``` pool(input, data_format, **kwargs) = tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]), **kwargs), [0, N+1] + range(1, N+1)) ``` Args: input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape + [num_channels]` if data_format does not start with "NC" (default), or `[batch_size, num_channels] + input_spatial_shape` if data_format starts with "NC". Pooling happens over the spatial dimensions only. window_shape: Sequence of N ints >= 1. pooling_type: Specifies pooling operation, must be "AVG" or "MAX". strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N. If any value of strides is > 1, then all values of dilation_rate must be 1. padding: The padding algorithm, must be "SAME" or "VALID". Defaults to "SAME". See the "returns" section of `tf.nn.convolution` for details. data_format: A string or None. Specifies whether the channel dimension of the `input` and output is the last dimension (default, or if `data_format` does not start with "NC"), or the second dimension (if `data_format` starts with "NC"). For N=1, the valid values are "NWC" (default) and "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For N=3, the valid values are "NDHWC" (default) and "NCDHW". dilations: Optional. Dilation rate. List of N ints >= 1. Defaults to [1]*N. If any value of dilation_rate is > 1, then all values of strides must be 1. name: Optional. Name of the op. Returns: Tensor of rank N+2, of shape [batch_size] + output_spatial_shape + [num_channels] if data_format is None or does not start with "NC", or [batch_size, num_channels] + output_spatial_shape if data_format starts with "NC", where `output_spatial_shape` depends on the value of padding: If padding = "SAME": output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i]) If padding = "VALID": output_spatial_shape[i] = ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i]) / strides[i]). Raises: ValueError: if arguments are invalid. """ return pool( input=input, window_shape=window_shape, pooling_type=pooling_type, padding=padding, dilation_rate=dilations, strides=strides, name=name, data_format=data_format) @tf_export("nn.atrous_conv2d") def atrous_conv2d(value, filters, rate, padding, name=None): """Atrous convolution (a.k.a. convolution with holes or dilated convolution). This function is a simpler wrapper around the more general `tf.nn.convolution`, and exists only for backwards compatibility. You can use `tf.nn.convolution` to perform 1-D, 2-D, or 3-D atrous convolution. Computes a 2-D atrous convolution, also known as convolution with holes or dilated convolution, given 4-D `value` and `filters` tensors. If the `rate` parameter is equal to one, it performs regular 2-D convolution. If the `rate` parameter is greater than one, it performs convolution with holes, sampling the input values every `rate` pixels in the `height` and `width` dimensions. This is equivalent to convolving the input with a set of upsampled filters, produced by inserting `rate - 1` zeros between two consecutive values of the filters along the `height` and `width` dimensions, hence the name atrous convolution or convolution with holes (the French word trous means holes in English). More specifically: ``` output[batch, height, width, out_channel] = sum_{dheight, dwidth, in_channel} ( filters[dheight, dwidth, in_channel, out_channel] * value[batch, height + rate*dheight, width + rate*dwidth, in_channel] ) ``` Atrous convolution allows us to explicitly control how densely to compute feature responses in fully convolutional networks. Used in conjunction with bilinear interpolation, it offers an alternative to `conv2d_transpose` in dense prediction tasks such as semantic image segmentation, optical flow computation, or depth estimation. It also allows us to effectively enlarge the field of view of filters without increasing the number of parameters or the amount of computation. For a description of atrous convolution and how it can be used for dense feature extraction, please see: [Semantic Image Segmentation with Deep Convolutional Nets and Fully Connected CRFs](http://arxiv.org/abs/1412.7062). The same operation is investigated further in [Multi-Scale Context Aggregation by Dilated Convolutions](http://arxiv.org/abs/1511.07122). Previous works that effectively use atrous convolution in different ways are, among others, [OverFeat: Integrated Recognition, Localization and Detection using Convolutional Networks](http://arxiv.org/abs/1312.6229) and [Fast Image Scanning with Deep Max-Pooling Convolutional Neural Networks](http://arxiv.org/abs/1302.1700). Atrous convolution is also closely related to the so-called noble identities in multi-rate signal processing. There are many different ways to implement atrous convolution (see the refs above). The implementation here reduces ```python atrous_conv2d(value, filters, rate, padding=padding) ``` to the following three operations: ```python paddings = ... net = space_to_batch(value, paddings, block_size=rate) net = conv2d(net, filters, strides=[1, 1, 1, 1], padding="VALID") crops = ... net = batch_to_space(net, crops, block_size=rate) ``` Advanced usage. Note the following optimization: A sequence of `atrous_conv2d` operations with identical `rate` parameters, 'SAME' `padding`, and filters with odd heights/ widths: ```python net = atrous_conv2d(net, filters1, rate, padding="SAME") net = atrous_conv2d(net, filters2, rate, padding="SAME") ... net = atrous_conv2d(net, filtersK, rate, padding="SAME") ``` can be equivalently performed cheaper in terms of computation and memory as: ```python pad = ... # padding so that the input dims are multiples of rate net = space_to_batch(net, paddings=pad, block_size=rate) net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME") net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME") ... net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME") net = batch_to_space(net, crops=pad, block_size=rate) ``` because a pair of consecutive `space_to_batch` and `batch_to_space` ops with the same `block_size` cancel out when their respective `paddings` and `crops` inputs are identical. Args: value: A 4-D `Tensor` of type `float`. It needs to be in the default "NHWC" format. Its shape is `[batch, in_height, in_width, in_channels]`. filters: A 4-D `Tensor` with the same type as `value` and shape `[filter_height, filter_width, in_channels, out_channels]`. `filters`' `in_channels` dimension must match that of `value`. Atrous convolution is equivalent to standard convolution with upsampled filters with effective height `filter_height + (filter_height - 1) * (rate - 1)` and effective width `filter_width + (filter_width - 1) * (rate - 1)`, produced by inserting `rate - 1` zeros along consecutive elements across the `filters`' spatial dimensions. rate: A positive int32. The stride with which we sample input values across the `height` and `width` dimensions. Equivalently, the rate by which we upsample the filter values by inserting zeros across the `height` and `width` dimensions. In the literature, the same parameter is sometimes called `input stride` or `dilation`. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. name: Optional name for the returned tensor. Returns: A `Tensor` with the same type as `value`. Output shape with `'VALID'` padding is: [batch, height - 2 * (filter_width - 1), width - 2 * (filter_height - 1), out_channels]. Output shape with `'SAME'` padding is: [batch, height, width, out_channels]. Raises: ValueError: If input/output depth does not match `filters`' shape, or if padding is other than `'VALID'` or `'SAME'`. """ return convolution( input=value, filter=filters, padding=padding, dilation_rate=np.broadcast_to(rate, (2,)), name=name) def _convert_padding(padding): """Converts Python padding to C++ padding for ops which take EXPLICIT padding. Args: padding: the `padding` argument for a Python op which supports EXPLICIT padding. Returns: (padding, explicit_paddings) pair, which should be passed as attributes to a C++ op. Raises: ValueError: If padding is invalid. """ explicit_paddings = [] if padding == "EXPLICIT": # Give a better error message if EXPLICIT is passed. raise ValueError('"EXPLICIT" is not a valid value for the padding ' "parameter. To use explicit padding, the padding " "parameter must be a list.") if isinstance(padding, (list, tuple)): for i, dim_paddings in enumerate(padding): if not isinstance(dim_paddings, (list, tuple)): raise ValueError("When padding is a list, each element of padding must " "be a list/tuple of size 2. Element with index %d of " "padding is not a list/tuple" % i) if len(dim_paddings) != 2: raise ValueError("When padding is a list, each element of padding must " "be a list/tuple of size 2. Element with index %d of " "padding has size %d" % (i, len(dim_paddings))) explicit_paddings.extend(dim_paddings) if len(padding) != 4: raise ValueError("When padding is a list, it must be of size 4. Got " "padding of size: %d" % len(padding)) padding = "EXPLICIT" return padding, explicit_paddings @tf_export(v1=["nn.conv1d"]) @deprecation.deprecated_arg_values( None, "`NCHW` for data_format is deprecated, use `NCW` instead", warn_once=True, data_format="NCHW") @deprecation.deprecated_arg_values( None, "`NHWC` for data_format is deprecated, use `NWC` instead", warn_once=True, data_format="NHWC") def conv1d( value=None, filters=None, stride=None, padding=None, use_cudnn_on_gpu=None, data_format=None, name=None, input=None, # pylint: disable=redefined-builtin dilations=None): r"""Computes a 1-D convolution given 3-D input and filter tensors. Given an input tensor of shape [batch, in_width, in_channels] if data_format is "NWC", or [batch, in_channels, in_width] if data_format is "NCW", and a filter / kernel tensor of shape [filter_width, in_channels, out_channels], this op reshapes the arguments to pass them to conv2d to perform the equivalent convolution operation. Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`. For example, if `data_format` does not start with "NC", a tensor of shape [batch, in_width, in_channels] is reshaped to [batch, 1, in_width, in_channels], and the filter is reshaped to [1, filter_width, in_channels, out_channels]. The result is then reshaped back to [batch, out_width, out_channels] \(where out_width is a function of the stride and padding as in conv2d\) and returned to the caller. Args: value: A 3D `Tensor`. Must be of type `float16`, `float32`, or `float64`. filters: A 3D `Tensor`. Must have the same type as `value`. stride: An int or list of `ints` that has length `1` or `3`. The number of entries by which the filter is moved right at each step. padding: 'SAME' or 'VALID' use_cudnn_on_gpu: An optional `bool`. Defaults to `True`. data_format: An optional `string` from `"NWC", "NCW"`. Defaults to `"NWC"`, the data is stored in the order of [batch, in_width, in_channels]. The `"NCW"` format stores data as [batch, in_channels, in_width]. name: A name for the operation (optional). input: Alias for value. dilations: An int or list of `ints` that has length `1` or `3` which defaults to 1. The dilation factor for each dimension of input. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. Dilations in the batch and depth dimensions must be 1. Returns: A `Tensor`. Has the same type as input. Raises: ValueError: if `data_format` is invalid. """ value = deprecation.deprecated_argument_lookup("input", input, "value", value) with ops.name_scope(name, "conv1d", [value, filters]) as name: # Reshape the input tensor to [batch, 1, in_width, in_channels] if data_format is None or data_format == "NHWC" or data_format == "NWC": data_format = "NHWC" spatial_start_dim = 1 channel_index = 2 elif data_format == "NCHW" or data_format == "NCW": data_format = "NCHW" spatial_start_dim = 2 channel_index = 1 else: raise ValueError("data_format must be \"NWC\" or \"NCW\".") strides = [1] + _get_sequence(stride, 1, channel_index, "stride") dilations = [1] + _get_sequence(dilations, 1, channel_index, "dilations") value = array_ops.expand_dims(value, spatial_start_dim) filters = array_ops.expand_dims(filters, 0) result = gen_nn_ops.conv2d( value, filters, strides, padding, use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format, dilations=dilations, name=name) return array_ops.squeeze(result, [spatial_start_dim]) @tf_export("nn.conv1d", v1=[]) def conv1d_v2( input, # pylint: disable=redefined-builtin filters, stride, padding, data_format="NWC", dilations=None, name=None): r"""Computes a 1-D convolution given 3-D input and filter tensors. Given an input tensor of shape [batch, in_width, in_channels] if data_format is "NWC", or [batch, in_channels, in_width] if data_format is "NCW", and a filter / kernel tensor of shape [filter_width, in_channels, out_channels], this op reshapes the arguments to pass them to conv2d to perform the equivalent convolution operation. Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`. For example, if `data_format` does not start with "NC", a tensor of shape [batch, in_width, in_channels] is reshaped to [batch, 1, in_width, in_channels], and the filter is reshaped to [1, filter_width, in_channels, out_channels]. The result is then reshaped back to [batch, out_width, out_channels] \(where out_width is a function of the stride and padding as in conv2d\) and returned to the caller. Args: input: A 3D `Tensor`. Must be of type `float16`, `float32`, or `float64`. filters: A 3D `Tensor`. Must have the same type as `input`. stride: An int or list of `ints` that has length `1` or `3`. The number of entries by which the filter is moved right at each step. padding: 'SAME' or 'VALID' data_format: An optional `string` from `"NWC", "NCW"`. Defaults to `"NWC"`, the data is stored in the order of [batch, in_width, in_channels]. The `"NCW"` format stores data as [batch, in_channels, in_width]. dilations: An int or list of `ints` that has length `1` or `3` which defaults to 1. The dilation factor for each dimension of input. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. Dilations in the batch and depth dimensions must be 1. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as input. Raises: ValueError: if `data_format` is invalid. """ return conv1d( input, # pylint: disable=redefined-builtin filters, stride, padding, use_cudnn_on_gpu=True, data_format=data_format, name=name, dilations=dilations) @tf_export("nn.conv1d_transpose") def conv1d_transpose( input, # pylint: disable=redefined-builtin filters, output_shape, strides, padding="SAME", data_format="NWC", dilations=None, name=None): """The transpose of `conv1d`. This operation is sometimes called "deconvolution" after [Deconvolutional Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf), but is really the transpose (gradient) of `conv1d` rather than an actual deconvolution. Args: input: A 3-D `Tensor` of type `float` and shape `[batch, in_width, in_channels]` for `NWC` data format or `[batch, in_channels, in_width]` for `NCW` data format. filters: A 3-D `Tensor` with the same type as `value` and shape `[filter_width, output_channels, in_channels]`. `filter`'s `in_channels` dimension must match that of `value`. output_shape: A 1-D `Tensor`, containing three elements, representing the output shape of the deconvolution op. strides: An int or list of `ints` that has length `1` or `3`. The number of entries by which the filter is moved right at each step. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the "returns" section of `tf.nn.convolution` for details. data_format: A string. `'NWC'` and `'NCW'` are supported. dilations: An int or list of `ints` that has length `1` or `3` which defaults to 1. The dilation factor for each dimension of input. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. Dilations in the batch and depth dimensions must be 1. name: Optional name for the returned tensor. Returns: A `Tensor` with the same type as `value`. Raises: ValueError: If input/output depth does not match `filter`'s shape, if `output_shape` is not at 3-element vector, if `padding` is other than `'VALID'` or `'SAME'`, or if `data_format` is invalid. """ with ops.name_scope(name, "conv1d_transpose", [input, filters, output_shape]) as name: # The format could be either NWC or NCW, map to NHWC or NCHW if data_format is None or data_format == "NWC": data_format = "NHWC" spatial_start_dim = 1 channel_index = 2 elif data_format == "NCW": data_format = "NCHW" spatial_start_dim = 2 channel_index = 1 else: raise ValueError("data_format must be \"NWC\" or \"NCW\".") # Reshape the input tensor to [batch, 1, in_width, in_channels] strides = [1] + _get_sequence(strides, 1, channel_index, "stride") dilations = [1] + _get_sequence(dilations, 1, channel_index, "dilations") input = array_ops.expand_dims(input, spatial_start_dim) filters = array_ops.expand_dims(filters, 0) output_shape = list(output_shape) if not isinstance( output_shape, ops.Tensor) else output_shape output_shape = array_ops.concat([output_shape[: spatial_start_dim], [1], output_shape[spatial_start_dim:]], 0) result = gen_nn_ops.conv2d_backprop_input( input_sizes=output_shape, filter=filters, out_backprop=input, strides=strides, padding=padding, data_format=data_format, dilations=dilations, name=name) return array_ops.squeeze(result, spatial_start_dim) @tf_export("nn.conv2d", v1=[]) def conv2d_v2(input, # pylint: disable=redefined-builtin filters, strides, padding, data_format="NHWC", dilations=None, name=None): # pylint: disable=line-too-long r"""Computes a 2-D convolution given 4-D `input` and `filters` tensors. Given an input tensor of shape `[batch, in_height, in_width, in_channels]` and a filter / kernel tensor of shape `[filter_height, filter_width, in_channels, out_channels]`, this op performs the following: 1. Flattens the filter to a 2-D matrix with shape `[filter_height * filter_width * in_channels, output_channels]`. 2. Extracts image patches from the input tensor to form a *virtual* tensor of shape `[batch, out_height, out_width, filter_height * filter_width * in_channels]`. 3. For each patch, right-multiplies the filter matrix and the image patch vector. In detail, with the default NHWC format, output[b, i, j, k] = sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * filter[di, dj, q, k] Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1]`. Args: input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. A 4-D tensor. The dimension order is interpreted according to the value of `data_format`, see below for details. filters: A `Tensor`. Must have the same type as `input`. A 4-D tensor of shape `[filter_height, filter_width, in_channels, out_channels]` strides: An int or list of `ints` that has length `1`, `2` or `4`. The stride of the sliding window for each dimension of `input`. If a single value is given it is replicated in the `H` and `W` dimension. By default the `N` and `C` dimensions are set to 1. The dimension order is determined by the value of `data_format`, see below for details. padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. When explicit padding is used and data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`. data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of: [batch, height, width, channels]. Alternatively, the format could be "NCHW", the data storage order of: [batch, channels, height, width]. dilations: An int or list of `ints` that has length `1`, `2` or `4`, defaults to 1. The dilation factor for each dimension of`input`. If a single value is given it is replicated in the `H` and `W` dimension. By default the `N` and `C` dimensions are set to 1. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions if a 4-d tensor must be 1. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ # pylint: enable=line-too-long return conv2d(input, # pylint: disable=redefined-builtin filters, strides, padding, use_cudnn_on_gpu=True, data_format=data_format, dilations=dilations, name=name) @tf_export(v1=["nn.conv2d"]) def conv2d( # pylint: disable=redefined-builtin,dangerous-default-value input, filter=None, strides=None, padding=None, use_cudnn_on_gpu=True, data_format="NHWC", dilations=[1, 1, 1, 1], name=None, filters=None): r"""Computes a 2-D convolution given 4-D `input` and `filter` tensors. Given an input tensor of shape `[batch, in_height, in_width, in_channels]` and a filter / kernel tensor of shape `[filter_height, filter_width, in_channels, out_channels]`, this op performs the following: 1. Flattens the filter to a 2-D matrix with shape `[filter_height * filter_width * in_channels, output_channels]`. 2. Extracts image patches from the input tensor to form a *virtual* tensor of shape `[batch, out_height, out_width, filter_height * filter_width * in_channels]`. 3. For each patch, right-multiplies the filter matrix and the image patch vector. In detail, with the default NHWC format, output[b, i, j, k] = sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * filter[di, dj, q, k] Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1]`. Args: input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. A 4-D tensor. The dimension order is interpreted according to the value of `data_format`, see below for details. filter: A `Tensor`. Must have the same type as `input`. A 4-D tensor of shape `[filter_height, filter_width, in_channels, out_channels]` strides: An int or list of `ints` that has length `1`, `2` or `4`. The stride of the sliding window for each dimension of `input`. If a single value is given it is replicated in the `H` and `W` dimension. By default the `N` and `C` dimensions are set to 1. The dimension order is determined by the value of `data_format`, see below for details. padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. When explicit padding is used and data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`. use_cudnn_on_gpu: An optional `bool`. Defaults to `True`. data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of: [batch, height, width, channels]. Alternatively, the format could be "NCHW", the data storage order of: [batch, channels, height, width]. dilations: An int or list of `ints` that has length `1`, `2` or `4`, defaults to 1. The dilation factor for each dimension of`input`. If a single value is given it is replicated in the `H` and `W` dimension. By default the `N` and `C` dimensions are set to 1. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions if a 4-d tensor must be 1. name: A name for the operation (optional). filters: Alias for filter. Returns: A `Tensor`. Has the same type as `input`. """ filter = deprecation.deprecated_argument_lookup( "filters", filters, "filter", filter) padding, explicit_paddings = _convert_padding(padding) if data_format is None: data_format = "NHWC" channel_index = 1 if data_format.startswith("NC") else 3 strides = _get_sequence(strides, 2, channel_index, "strides") dilations = _get_sequence(dilations, 2, channel_index, "dilations") return gen_nn_ops.conv2d(input, # pylint: disable=redefined-builtin filter, strides, padding, use_cudnn_on_gpu=use_cudnn_on_gpu, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations, name=name) @tf_export(v1=["nn.conv2d_backprop_filter"]) def conv2d_backprop_filter( # pylint: disable=redefined-builtin,dangerous-default-value input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu=True, data_format="NHWC", dilations=[1, 1, 1, 1], name=None): r"""Computes the gradients of convolution with respect to the filter. Args: input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 4-D with shape `[batch, in_height, in_width, in_channels]`. filter_sizes: A `Tensor` of type `int32`. An integer vector representing the tensor shape of `filter`, where `filter` is a 4-D `[filter_height, filter_width, in_channels, out_channels]` tensor. out_backprop: A `Tensor`. Must have the same type as `input`. 4-D with shape `[batch, out_height, out_width, out_channels]`. Gradients w.r.t. the output of the convolution. strides: A list of `ints`. The stride of the sliding window for each dimension of the input of the convolution. Must be in the same order as the dimension specified with format. padding: Either the `string `"SAME"` or `"VALID"` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. When explicit padding is used and data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`. use_cudnn_on_gpu: An optional `bool`. Defaults to `True`. data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of: [batch, in_height, in_width, in_channels]. Alternatively, the format could be "NCHW", the data storage order of: [batch, in_channels, in_height, in_width]. dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D tensor of length 4. The dilation factor for each dimension of `input`. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions must be 1. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ padding, explicit_paddings = _convert_padding(padding) return gen_nn_ops.conv2d_backprop_filter( input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu, explicit_paddings, data_format, dilations, name) @tf_export(v1=["nn.conv2d_backprop_input"]) def conv2d_backprop_input( # pylint: disable=redefined-builtin,dangerous-default-value input_sizes, filter=None, out_backprop=None, strides=None, padding=None, use_cudnn_on_gpu=True, data_format="NHWC", dilations=[1, 1, 1, 1], name=None, filters=None): r"""Computes the gradients of convolution with respect to the input. Args: input_sizes: A `Tensor` of type `int32`. An integer vector representing the shape of `input`, where `input` is a 4-D `[batch, height, width, channels]` tensor. filter: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 4-D with shape `[filter_height, filter_width, in_channels, out_channels]`. out_backprop: A `Tensor`. Must have the same type as `filter`. 4-D with shape `[batch, out_height, out_width, out_channels]`. Gradients w.r.t. the output of the convolution. strides: A list of `ints`. The stride of the sliding window for each dimension of the input of the convolution. Must be in the same order as the dimension specified with format. padding: Either the `string `"SAME"` or `"VALID"` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. When explicit padding is used and data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`. use_cudnn_on_gpu: An optional `bool`. Defaults to `True`. data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of: [batch, in_height, in_width, in_channels]. Alternatively, the format could be "NCHW", the data storage order of: [batch, in_channels, in_height, in_width]. dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D tensor of length 4. The dilation factor for each dimension of `input`. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions must be 1. name: A name for the operation (optional). filters: Alias for filter. Returns: A `Tensor`. Has the same type as `filter`. """ filter = deprecation.deprecated_argument_lookup( "filters", filters, "filter", filter) padding, explicit_paddings = _convert_padding(padding) return gen_nn_ops.conv2d_backprop_input( input_sizes, filter, out_backprop, strides, padding, use_cudnn_on_gpu, explicit_paddings, data_format, dilations, name) @tf_export(v1=["nn.conv2d_transpose"]) def conv2d_transpose( value=None, filter=None, # pylint: disable=redefined-builtin output_shape=None, strides=None, padding="SAME", data_format="NHWC", name=None, input=None, # pylint: disable=redefined-builtin filters=None, dilations=None): """The transpose of `conv2d`. This operation is sometimes called "deconvolution" after [Deconvolutional Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf), but is really the transpose (gradient) of `conv2d` rather than an actual deconvolution. Args: value: A 4-D `Tensor` of type `float` and shape `[batch, height, width, in_channels]` for `NHWC` data format or `[batch, in_channels, height, width]` for `NCHW` data format. filter: A 4-D `Tensor` with the same type as `value` and shape `[height, width, output_channels, in_channels]`. `filter`'s `in_channels` dimension must match that of `value`. output_shape: A 1-D `Tensor` representing the output shape of the deconvolution op. strides: An int or list of `ints` that has length `1`, `2` or `4`. The stride of the sliding window for each dimension of `input`. If a single value is given it is replicated in the `H` and `W` dimension. By default the `N` and `C` dimensions are set to 0. The dimension order is determined by the value of `data_format`, see below for details. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the "returns" section of `tf.nn.convolution` for details. data_format: A string. 'NHWC' and 'NCHW' are supported. name: Optional name for the returned tensor. input: Alias for value. filters: Alias for filter. dilations: An int or list of `ints` that has length `1`, `2` or `4`, defaults to 1. The dilation factor for each dimension of`input`. If a single value is given it is replicated in the `H` and `W` dimension. By default the `N` and `C` dimensions are set to 1. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions if a 4-d tensor must be 1. Returns: A `Tensor` with the same type as `value`. Raises: ValueError: If input/output depth does not match `filter`'s shape, or if padding is other than `'VALID'` or `'SAME'`. """ value = deprecated_argument_lookup("input", input, "value", value) filter = deprecated_argument_lookup("filters", filters, "filter", filter) with ops.name_scope(name, "conv2d_transpose", [value, filter, output_shape]) as name: return conv2d_transpose_v2( value, filter, output_shape, strides, padding=padding, data_format=data_format, dilations=dilations, name=name) @tf_export("nn.conv2d_transpose", v1=[]) def conv2d_transpose_v2( input, # pylint: disable=redefined-builtin filters, # pylint: disable=redefined-builtin output_shape, strides, padding="SAME", data_format="NHWC", dilations=None, name=None): """The transpose of `conv2d`. This operation is sometimes called "deconvolution" after [Deconvolutional Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is actually the transpose (gradient) of `conv2d` rather than an actual deconvolution. Args: input: A 4-D `Tensor` of type `float` and shape `[batch, height, width, in_channels]` for `NHWC` data format or `[batch, in_channels, height, width]` for `NCHW` data format. filters: A 4-D `Tensor` with the same type as `input` and shape `[height, width, output_channels, in_channels]`. `filter`'s `in_channels` dimension must match that of `input`. output_shape: A 1-D `Tensor` representing the output shape of the deconvolution op. strides: An int or list of `ints` that has length `1`, `2` or `4`. The stride of the sliding window for each dimension of `input`. If a single value is given it is replicated in the `H` and `W` dimension. By default the `N` and `C` dimensions are set to 0. The dimension order is determined by the value of `data_format`, see below for details. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the "returns" section of `tf.nn.convolution` for details. data_format: A string. 'NHWC' and 'NCHW' are supported. dilations: An int or list of `ints` that has length `1`, `2` or `4`, defaults to 1. The dilation factor for each dimension of`input`. If a single value is given it is replicated in the `H` and `W` dimension. By default the `N` and `C` dimensions are set to 1. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions if a 4-d tensor must be 1. name: Optional name for the returned tensor. Returns: A `Tensor` with the same type as `input`. Raises: ValueError: If input/output depth does not match `filter`'s shape, or if padding is other than `'VALID'` or `'SAME'`. """ with ops.name_scope(name, "conv2d_transpose", [input, filter, output_shape]) as name: if data_format is None: data_format = "NHWC" channel_index = 1 if data_format.startswith("NC") else 3 strides = _get_sequence(strides, 2, channel_index, "strides") dilations = _get_sequence(dilations, 2, channel_index, "dilations") return gen_nn_ops.conv2d_backprop_input( input_sizes=output_shape, filter=filters, out_backprop=input, strides=strides, padding=padding, data_format=data_format, dilations=dilations, name=name) @tf_export("nn.atrous_conv2d_transpose") def atrous_conv2d_transpose(value, filters, output_shape, rate, padding, name=None): """The transpose of `atrous_conv2d`. This operation is sometimes called "deconvolution" after [Deconvolutional Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf), but is really the transpose (gradient) of `atrous_conv2d` rather than an actual deconvolution. Args: value: A 4-D `Tensor` of type `float`. It needs to be in the default `NHWC` format. Its shape is `[batch, in_height, in_width, in_channels]`. filters: A 4-D `Tensor` with the same type as `value` and shape `[filter_height, filter_width, out_channels, in_channels]`. `filters`' `in_channels` dimension must match that of `value`. Atrous convolution is equivalent to standard convolution with upsampled filters with effective height `filter_height + (filter_height - 1) * (rate - 1)` and effective width `filter_width + (filter_width - 1) * (rate - 1)`, produced by inserting `rate - 1` zeros along consecutive elements across the `filters`' spatial dimensions. output_shape: A 1-D `Tensor` of shape representing the output shape of the deconvolution op. rate: A positive int32. The stride with which we sample input values across the `height` and `width` dimensions. Equivalently, the rate by which we upsample the filter values by inserting zeros across the `height` and `width` dimensions. In the literature, the same parameter is sometimes called `input stride` or `dilation`. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. name: Optional name for the returned tensor. Returns: A `Tensor` with the same type as `value`. Raises: ValueError: If input/output depth does not match `filters`' shape, or if padding is other than `'VALID'` or `'SAME'`, or if the `rate` is less than one, or if the output_shape is not a tensor with 4 elements. """ with ops.name_scope(name, "atrous_conv2d_transpose", [value, filters, output_shape]) as name: value = ops.convert_to_tensor(value, name="value") filters = ops.convert_to_tensor(filters, name="filters") if not value.get_shape().dims[3].is_compatible_with(filters.get_shape()[3]): raise ValueError( "value's input channels does not match filters' input channels, " "{} != {}".format(value.get_shape()[3], filters.get_shape()[3])) if rate < 1: raise ValueError("rate {} cannot be less than one".format(rate)) if rate == 1: return conv2d_transpose( value, filters, output_shape, strides=[1, 1, 1, 1], padding=padding, data_format="NHWC") output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape") if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)): raise ValueError("output_shape must have shape (4,), got {}".format( output_shape_.get_shape())) if isinstance(output_shape, tuple): output_shape = list(output_shape) if isinstance(output_shape, (list, np.ndarray)): # output_shape's shape should be == [4] if reached this point. if not filters.get_shape().dims[2].is_compatible_with(output_shape[3]): raise ValueError( "output_shape does not match filter's output channels, " "{} != {}".format(output_shape[3], filters.get_shape()[2])) # We have two padding contributions. The first is used for converting "SAME" # to "VALID". The second is required so that the height and width of the # zero-padded value tensor are multiples of rate. # Padding required to reduce to "VALID" convolution if padding == "SAME": # Handle filters whose shape is unknown during graph creation. if filters.get_shape().is_fully_defined(): filter_shape = filters.get_shape().as_list() else: filter_shape = array_ops.shape(filters) filter_height, filter_width = filter_shape[0], filter_shape[1] # Spatial dimensions of the filters and the upsampled filters in which we # introduce (rate - 1) zeros between consecutive filter values. filter_height_up = filter_height + (filter_height - 1) * (rate - 1) filter_width_up = filter_width + (filter_width - 1) * (rate - 1) pad_height = filter_height_up - 1 pad_width = filter_width_up - 1 # When pad_height (pad_width) is odd, we pad more to bottom (right), # following the same convention as conv2d(). pad_top = pad_height // 2 pad_bottom = pad_height - pad_top pad_left = pad_width // 2 pad_right = pad_width - pad_left elif padding == "VALID": pad_top = 0 pad_bottom = 0 pad_left = 0 pad_right = 0 else: raise ValueError("padding must be either VALID or SAME:" " {}".format(padding)) in_height = output_shape[1] + pad_top + pad_bottom in_width = output_shape[2] + pad_left + pad_right # More padding so that rate divides the height and width of the input. pad_bottom_extra = (rate - in_height % rate) % rate pad_right_extra = (rate - in_width % rate) % rate # The paddings argument to space_to_batch is just the extra padding # component. space_to_batch_pad = [[0, pad_bottom_extra], [0, pad_right_extra]] value = array_ops.space_to_batch( input=value, paddings=space_to_batch_pad, block_size=rate) input_sizes = [ rate * rate * output_shape[0], (in_height + pad_bottom_extra) // rate, (in_width + pad_right_extra) // rate, output_shape[3] ] value = gen_nn_ops.conv2d_backprop_input( input_sizes=input_sizes, filter=filters, out_backprop=value, strides=[1, 1, 1, 1], padding="VALID", data_format="NHWC") # The crops argument to batch_to_space includes both padding components. batch_to_space_crop = [[pad_top, pad_bottom + pad_bottom_extra], [pad_left, pad_right + pad_right_extra]] return array_ops.batch_to_space( input=value, crops=batch_to_space_crop, block_size=rate) @tf_export("nn.conv3d", v1=[]) def conv3d_v2(input, # pylint: disable=redefined-builtin,missing-docstring filters, strides, padding, data_format="NDHWC", dilations=None, name=None): if dilations is None: dilations = [1, 1, 1, 1, 1] return gen_nn_ops.conv3d(input, filters, strides, padding, data_format=data_format, dilations=dilations, name=name) @tf_export(v1=["nn.conv3d"]) def conv3d_v1( # pylint: disable=missing-docstring,dangerous-default-value input, # pylint: disable=redefined-builtin filter=None, # pylint: disable=redefined-builtin strides=None, padding=None, data_format="NDHWC", dilations=[1, 1, 1, 1, 1], name=None, filters=None): filter = deprecated_argument_lookup("filters", filters, "filter", filter) return gen_nn_ops.conv3d( input, filter, strides, padding, data_format, dilations, name) conv3d_v2.__doc__ = deprecation.rewrite_argument_docstring( gen_nn_ops.conv3d.__doc__, "filter", "filters") conv3d_v1.__doc__ = gen_nn_ops.conv3d.__doc__ @tf_export(v1=["nn.conv3d_transpose"]) def conv3d_transpose( value, filter=None, # pylint: disable=redefined-builtin output_shape=None, strides=None, padding="SAME", data_format="NDHWC", name=None, input=None, # pylint: disable=redefined-builtin filters=None, dilations=None): """The transpose of `conv3d`. This operation is sometimes called "deconvolution" after [Deconvolutional Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf), but is really the transpose (gradient) of `conv3d` rather than an actual deconvolution. Args: value: A 5-D `Tensor` of type `float` and shape `[batch, depth, height, width, in_channels]`. filter: A 5-D `Tensor` with the same type as `value` and shape `[depth, height, width, output_channels, in_channels]`. `filter`'s `in_channels` dimension must match that of `value`. output_shape: A 1-D `Tensor` representing the output shape of the deconvolution op. strides: A list of ints. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the "returns" section of `tf.nn.convolution` for details. data_format: A string, either `'NDHWC'` or `'NCDHW`' specifying the layout of the input and output tensors. Defaults to `'NDHWC'`. name: Optional name for the returned tensor. input: Alias of value. filters: Alias of filter. dilations: An int or list of `ints` that has length `1`, `3` or `5`, defaults to 1. The dilation factor for each dimension of`input`. If a single value is given it is replicated in the `D`, `H` and `W` dimension. By default the `N` and `C` dimensions are set to 1. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions if a 5-d tensor must be 1. Returns: A `Tensor` with the same type as `value`. Raises: ValueError: If input/output depth does not match `filter`'s shape, or if padding is other than `'VALID'` or `'SAME'`. """ filter = deprecated_argument_lookup("filters", filters, "filter", filter) value = deprecated_argument_lookup("input", input, "value", value) return conv3d_transpose_v2( value, filter, output_shape, strides, padding=padding, data_format=data_format, dilations=dilations, name=name) @tf_export("nn.conv3d_transpose", v1=[]) def conv3d_transpose_v2(input, # pylint: disable=redefined-builtin filters, output_shape, strides, padding="SAME", data_format="NDHWC", dilations=None, name=None): """The transpose of `conv3d`. This operation is sometimes called "deconvolution" after [Deconvolutional Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is actually the transpose (gradient) of `conv2d` rather than an actual deconvolution. Args: input: A 5-D `Tensor` of type `float` and shape `[batch, height, width, in_channels]` for `NHWC` data format or `[batch, in_channels, height, width]` for `NCHW` data format. filters: A 5-D `Tensor` with the same type as `value` and shape `[height, width, output_channels, in_channels]`. `filter`'s `in_channels` dimension must match that of `value`. output_shape: A 1-D `Tensor` representing the output shape of the deconvolution op. strides: An int or list of `ints` that has length `1`, `3` or `5`. The stride of the sliding window for each dimension of `input`. If a single value is given it is replicated in the `D`, `H` and `W` dimension. By default the `N` and `C` dimensions are set to 0. The dimension order is determined by the value of `data_format`, see below for details. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the "returns" section of `tf.nn.convolution` for details. data_format: A string. 'NDHWC' and 'NCDHW' are supported. dilations: An int or list of `ints` that has length `1`, `3` or `5`, defaults to 1. The dilation factor for each dimension of`input`. If a single value is given it is replicated in the `D`, `H` and `W` dimension. By default the `N` and `C` dimensions are set to 1. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions if a 5-d tensor must be 1. name: Optional name for the returned tensor. Returns: A `Tensor` with the same type as `value`. """ with ops.name_scope(name, "conv3d_transpose", [input, filter, output_shape]) as name: if data_format is None: data_format = "NDHWC" channel_index = 1 if data_format.startswith("NC") else 4 strides = _get_sequence(strides, 3, channel_index, "strides") dilations = _get_sequence(dilations, 3, channel_index, "dilations") return gen_nn_ops.conv3d_backprop_input_v2( input_sizes=output_shape, filter=filters, out_backprop=input, strides=strides, padding=padding, data_format=data_format, dilations=dilations, name=name) CONV_TRANSPOSE_OPS = ( conv1d_transpose, conv2d_transpose_v2, conv3d_transpose_v2, ) @tf_export("nn.conv_transpose") def conv_transpose(input, # pylint: disable=redefined-builtin filters, output_shape, strides, padding="SAME", data_format=None, dilations=None, name=None): """The transpose of `convolution`. This operation is sometimes called "deconvolution" after [Deconvolutional Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is actually the transpose (gradient) of `convolution` rather than an actual deconvolution. Args: input: An N+2 dimensional `Tensor` of shape `[batch_size] + input_spatial_shape + [in_channels]` if data_format does not start with "NC" (default), or `[batch_size, in_channels] + input_spatial_shape` if data_format starts with "NC". It must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. filters: An N+2 dimensional `Tensor` with the same type as `input` and shape `spatial_filter_shape + [in_channels, out_channels]`. output_shape: A 1-D `Tensor` representing the output shape of the deconvolution op. strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The stride of the sliding window for each dimension of `input`. If a single value is given it is replicated in the spatial dimensions. By default the `N` and `C` dimensions are set to 0. The dimension order is determined by the value of `data_format`, see below for details. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the "returns" section of `tf.nn.convolution` for details. data_format: A string or None. Specifies whether the channel dimension of the `input` and output is the last dimension (default, or if `data_format` does not start with "NC"), or the second dimension (if `data_format` starts with "NC"). For N=1, the valid values are "NWC" (default) and "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For N=3, the valid values are "NDHWC" (default) and "NCDHW". dilations: An int or list of `ints` that has length `1`, `N` or `N+2`, defaults to 1. The dilation factor for each dimension of`input`. If a single value is given it is replicated in the spatial dimensions. By default the `N` and `C` dimensions are set to 1. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. name: A name for the operation (optional). If not specified "conv_transpose" is used. Returns: A `Tensor` with the same type as `value`. """ with ops.name_scope(name, "conv_transpose", [input, filter, output_shape]) as name: if isinstance(output_shape, collections.Sized): n = len(output_shape) - 2 elif isinstance(output_shape, ops.Tensor): n = output_shape.shape[0] - 2 else: raise ValueError("output_shape must be a tensor or sized collection.") if not 1 <= n <= 3: raise ValueError( "output_shape must be of length 3, 4 or 5 but was {}.".format(n + 2)) op = CONV_TRANSPOSE_OPS[n-1] return op( input, filters, output_shape, strides, padding=padding, data_format=data_format, dilations=dilations, name=name) @tf_export("nn.bias_add") def bias_add(value, bias, data_format=None, name=None): """Adds `bias` to `value`. This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D. Broadcasting is supported, so `value` may have any number of dimensions. Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the case where both types are quantized. Args: value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, or `complex128`. bias: A 1-D `Tensor` with size matching the channel dimension of `value`. Must be the same type as `value` unless `value` is a quantized type, in which case a different quantized type may be used. data_format: A string. 'N...C' and 'NC...' are supported. name: A name for the operation (optional). Returns: A `Tensor` with the same type as `value`. """ with ops.name_scope(name, "BiasAdd", [value, bias]) as name: if data_format is not None: if data_format.startswith("NC"): data_format = "NCHW" elif data_format.startswith("N") and data_format.endswith("C"): data_format = "NHWC" else: raise ValueError("data_format must be of the form `N...C` or `NC...`") if not context.executing_eagerly(): value = ops.convert_to_tensor(value, name="input") bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias") return gen_nn_ops.bias_add(value, bias, data_format=data_format, name=name) def bias_add_v1(value, bias, name=None): """Adds `bias` to `value`. This is a deprecated version of bias_add and will soon to be removed. This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D. Broadcasting is supported, so `value` may have any number of dimensions. Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the case where both types are quantized. Args: value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, or `complex128`. bias: A 1-D `Tensor` with size matching the last dimension of `value`. Must be the same type as `value` unless `value` is a quantized type, in which case a different quantized type may be used. name: A name for the operation (optional). Returns: A `Tensor` with the same type as `value`. """ with ops.name_scope(name, "BiasAddV1", [value, bias]) as name: value = ops.convert_to_tensor(value, name="input") bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias") return gen_nn_ops.bias_add_v1(value, bias, name=name) @tf_export(v1=["nn.crelu"]) def crelu(features, name=None, axis=-1): """Computes Concatenated ReLU. Concatenates a ReLU which selects only the positive part of the activation with a ReLU which selects only the *negative* part of the activation. Note that as a result this non-linearity doubles the depth of the activations. Source: [Understanding and Improving Convolutional Neural Networks via Concatenated Rectified Linear Units. W. Shang, et al.](https://arxiv.org/abs/1603.05201) Args: features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`, `int16`, or `int8`. name: A name for the operation (optional). axis: The axis that the output values are concatenated along. Default is -1. Returns: A `Tensor` with the same type as `features`. """ with ops.name_scope(name, "CRelu", [features]) as name: features = ops.convert_to_tensor(features, name="features") c = array_ops.concat([features, -features], axis, name=name) return gen_nn_ops.relu(c) @tf_export("nn.crelu", v1=[]) def crelu_v2(features, axis=-1, name=None): return crelu(features, name=name, axis=axis) crelu_v2.__doc__ = crelu.__doc__ @tf_export("nn.relu6") def relu6(features, name=None): """Computes Rectified Linear 6: `min(max(features, 0), 6)`. Source: [Convolutional Deep Belief Networks on CIFAR-10. A. Krizhevsky](http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf) Args: features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`, `int16`, or `int8`. name: A name for the operation (optional). Returns: A `Tensor` with the same type as `features`. """ with ops.name_scope(name, "Relu6", [features]) as name: features = ops.convert_to_tensor(features, name="features") return gen_nn_ops.relu6(features, name=name) @tf_export("nn.leaky_relu") def leaky_relu(features, alpha=0.2, name=None): """Compute the Leaky ReLU activation function. Source: [Rectifier Nonlinearities Improve Neural Network Acoustic Models. AL Maas, AY Hannun, AY Ng - Proc. ICML, 2013](https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf). Args: features: A `Tensor` representing preactivation values. Must be one of the following types: `float16`, `float32`, `float64`, `int32`, `int64`. alpha: Slope of the activation function at x < 0. name: A name for the operation (optional). Returns: The activation value. """ with ops.name_scope(name, "LeakyRelu", [features, alpha]) as name: features = ops.convert_to_tensor(features, name="features") if features.dtype.is_integer: features = math_ops.cast(features, dtypes.float32) if isinstance(alpha, np.ndarray): alpha = alpha.item() return gen_nn_ops.leaky_relu(features, alpha=alpha, name=name) def _flatten_outer_dims(logits): """Flattens logits' outer dimensions and keep its last dimension.""" rank = array_ops.rank(logits) last_dim_size = array_ops.slice( array_ops.shape(logits), [math_ops.subtract(rank, 1)], [1]) output = array_ops.reshape(logits, array_ops.concat([[-1], last_dim_size], 0)) # Set output shape if known. if not context.executing_eagerly(): shape = logits.get_shape() if shape is not None and shape.dims is not None: shape = shape.as_list() product = 1 product_valid = True for d in shape[:-1]: if d is None: product_valid = False break else: product *= d if product_valid: output_shape = [product, shape[-1]] output.set_shape(output_shape) return output def _softmax(logits, compute_op, dim=-1, name=None): """Helper function for softmax and log_softmax. It reshapes and transposes the input logits into a 2-D Tensor and then invokes the tf.nn._softmax or tf.nn._log_softmax function. The output would be transposed and reshaped back. Args: logits: A non-empty `Tensor`. Must be one of the following types: `half`, `float32`, `float64`. compute_op: Either gen_nn_ops.softmax or gen_nn_ops.log_softmax dim: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `logits`. Same shape as `logits`. Raises: InvalidArgumentError: if `logits` is empty or `dim` is beyond the last dimension of `logits`. """ def _swap_axis(logits, dim_index, last_index, name=None): """Swaps logits's dim_index and last_index.""" return array_ops.transpose( logits, array_ops.concat([ math_ops.range(dim_index), [last_index], math_ops.range(dim_index + 1, last_index), [dim_index] ], 0), name=name) logits = ops.convert_to_tensor(logits) # We need its original shape for shape inference. shape = logits.get_shape() is_last_dim = (dim == -1) or (dim == shape.ndims - 1) if is_last_dim: return compute_op(logits, name=name) dim_val = dim if isinstance(dim, ops.Tensor): dim_val = tensor_util.constant_value(dim) if dim_val is not None and not -shape.ndims <= dim_val < shape.ndims: raise errors_impl.InvalidArgumentError( None, None, "Dimension (%d) must be in the range [%d, %d) where %d is the number of" " dimensions in the input." % (dim_val, -shape.ndims, shape.ndims, shape.ndims)) # If dim is not the last dimension, we have to do a transpose so that we can # still perform softmax on its last dimension. # In case dim is negative (and is not last dimension -1), add shape.ndims ndims = array_ops.rank(logits) if not isinstance(dim, ops.Tensor): if dim < 0: dim += ndims else: dim = array_ops.where(math_ops.less(dim, 0), dim + ndims, dim) # Swap logits' dimension of dim and its last dimension. input_rank = array_ops.rank(logits) dim_axis = dim % shape.ndims logits = _swap_axis(logits, dim_axis, math_ops.subtract(input_rank, 1)) # Do the actual softmax on its last dimension. output = compute_op(logits) output = _swap_axis( output, dim_axis, math_ops.subtract(input_rank, 1), name=name) # Make shape inference work since transpose may erase its static shape. output.set_shape(shape) return output @tf_export(v1=["nn.softmax", "math.softmax"]) @deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim") def softmax(logits, axis=None, name=None, dim=None): """Computes softmax activations. This function performs the equivalent of softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis) Args: logits: A non-empty `Tensor`. Must be one of the following types: `half`, `float32`, `float64`. axis: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. name: A name for the operation (optional). dim: Deprecated alias for `axis`. Returns: A `Tensor`. Has the same type and shape as `logits`. Raises: InvalidArgumentError: if `logits` is empty or `axis` is beyond the last dimension of `logits`. """ axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim) if axis is None: axis = -1 return _softmax(logits, gen_nn_ops.softmax, axis, name) @tf_export("nn.softmax", "math.softmax", v1=[]) def softmax_v2(logits, axis=None, name=None): """Computes softmax activations. This function performs the equivalent of softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis) Args: logits: A non-empty `Tensor`. Must be one of the following types: `half`, `float32`, `float64`. axis: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type and shape as `logits`. Raises: InvalidArgumentError: if `logits` is empty or `axis` is beyond the last dimension of `logits`. """ if axis is None: axis = -1 return _softmax(logits, gen_nn_ops.softmax, axis, name) @tf_export(v1=["nn.log_softmax", "math.log_softmax"]) @deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim") def log_softmax(logits, axis=None, name=None, dim=None): """Computes log softmax activations. For each batch `i` and class `j` we have logsoftmax = logits - log(reduce_sum(exp(logits), axis)) Args: logits: A non-empty `Tensor`. Must be one of the following types: `half`, `float32`, `float64`. axis: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. name: A name for the operation (optional). dim: Deprecated alias for `axis`. Returns: A `Tensor`. Has the same type as `logits`. Same shape as `logits`. Raises: InvalidArgumentError: if `logits` is empty or `axis` is beyond the last dimension of `logits`. """ axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim) if axis is None: axis = -1 return _softmax(logits, gen_nn_ops.log_softmax, axis, name) @tf_export("nn.log_softmax", "math.log_softmax", v1=[]) def log_softmax_v2(logits, axis=None, name=None): """Computes log softmax activations. For each batch `i` and class `j` we have logsoftmax = logits - log(reduce_sum(exp(logits), axis)) Args: logits: A non-empty `Tensor`. Must be one of the following types: `half`, `float32`, `float64`. axis: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `logits`. Same shape as `logits`. Raises: InvalidArgumentError: if `logits` is empty or `axis` is beyond the last dimension of `logits`. """ if axis is None: axis = -1 return _softmax(logits, gen_nn_ops.log_softmax, axis, name) def _ensure_xent_args(name, sentinel, labels, logits): # Make sure that all arguments were passed as named arguments. if sentinel is not None: raise ValueError("Only call `%s` with " "named arguments (labels=..., logits=..., ...)" % name) if labels is None or logits is None: raise ValueError("Both labels and logits must be provided.") @tf_export("nn.softmax_cross_entropy_with_logits", v1=[]) def softmax_cross_entropy_with_logits_v2(labels, logits, axis=-1, name=None): """Computes softmax cross entropy between `logits` and `labels`. Measures the probability error in discrete classification tasks in which the classes are mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is labeled with one and only one label: an image can be a dog or a truck, but not both. **NOTE:** While the classes are mutually exclusive, their probabilities need not be. All that is required is that each row of `labels` is a valid probability distribution. If they are not, the computation of the gradient will be incorrect. If using exclusive `labels` (wherein one and only one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`. **WARNING:** This op expects unscaled logits, since it performs a `softmax` on `logits` internally for efficiency. Do not call this op with the output of `softmax`, as it will produce incorrect results. A common use case is to have logits and labels of shape `[batch_size, num_classes]`, but higher dimensions are supported, with the `axis` argument specifying the class dimension. `logits` and `labels` must have the same dtype (either `float16`, `float32`, or `float64`). Backpropagation will happen into both `logits` and `labels`. To disallow backpropagation into `labels`, pass label tensors through `tf.stop_gradient` before feeding it to this function. **Note that to avoid confusion, it is required to pass only named arguments to this function.** Args: labels: Each vector along the class dimension should hold a valid probability distribution e.g. for the case in which labels are of shape `[batch_size, num_classes]`, each row of `labels[i]` must be a valid probability distribution. logits: Per-label activations, typically a linear output. These activation energies are interpreted as unnormalized log probabilities. axis: The class dimension. Defaulted to -1 which is the last dimension. name: A name for the operation (optional). Returns: A `Tensor` that contains the softmax cross entropy loss. Its type is the same as `logits` and its shape is the same as `labels` except that it does not have the last dimension of `labels`. """ return softmax_cross_entropy_with_logits_v2_helper( labels=labels, logits=logits, axis=axis, name=name) @tf_export(v1=["nn.softmax_cross_entropy_with_logits_v2"]) @deprecated_args(None, "dim is deprecated, use axis instead", "dim") def softmax_cross_entropy_with_logits_v2_helper( labels, logits, axis=None, name=None, dim=None): """Computes softmax cross entropy between `logits` and `labels`. Measures the probability error in discrete classification tasks in which the classes are mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is labeled with one and only one label: an image can be a dog or a truck, but not both. **NOTE:** While the classes are mutually exclusive, their probabilities need not be. All that is required is that each row of `labels` is a valid probability distribution. If they are not, the computation of the gradient will be incorrect. If using exclusive `labels` (wherein one and only one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`. **WARNING:** This op expects unscaled logits, since it performs a `softmax` on `logits` internally for efficiency. Do not call this op with the output of `softmax`, as it will produce incorrect results. A common use case is to have logits and labels of shape `[batch_size, num_classes]`, but higher dimensions are supported, with the `axis` argument specifying the class dimension. `logits` and `labels` must have the same dtype (either `float16`, `float32`, or `float64`). Backpropagation will happen into both `logits` and `labels`. To disallow backpropagation into `labels`, pass label tensors through `tf.stop_gradient` before feeding it to this function. **Note that to avoid confusion, it is required to pass only named arguments to this function.** Args: labels: Each vector along the class dimension should hold a valid probability distribution e.g. for the case in which labels are of shape `[batch_size, num_classes]`, each row of `labels[i]` must be a valid probability distribution. logits: Unscaled log probabilities. axis: The class dimension. Defaulted to -1 which is the last dimension. name: A name for the operation (optional). dim: Deprecated alias for axis. Returns: A `Tensor` that contains the softmax cross entropy loss. Its type is the same as `logits` and its shape is the same as `labels` except that it does not have the last dimension of `labels`. """ # TODO(pcmurray) Raise an error when the labels do not sum to 1. Note: This # could break users who call this with bad labels, but disregard the bad # results. axis = deprecated_argument_lookup("axis", axis, "dim", dim) del dim if axis is None: axis = -1 with ops.name_scope(name, "softmax_cross_entropy_with_logits", [logits, labels]) as name: logits = ops.convert_to_tensor(logits, name="logits") labels = ops.convert_to_tensor(labels, name="labels") convert_to_float32 = ( logits.dtype == dtypes.float16 or logits.dtype == dtypes.bfloat16) precise_logits = math_ops.cast( logits, dtypes.float32) if convert_to_float32 else logits # labels and logits must be of the same type labels = math_ops.cast(labels, precise_logits.dtype) input_rank = array_ops.rank(precise_logits) # For shape inference. shape = logits.get_shape() # Move the dim to the end if dim is not the last dimension. if axis != -1: def _move_dim_to_end(tensor, dim_index, rank): return array_ops.transpose( tensor, array_ops.concat([ math_ops.range(dim_index), math_ops.range(dim_index + 1, rank), [dim_index] ], 0)) precise_logits = _move_dim_to_end(precise_logits, axis, input_rank) labels = _move_dim_to_end(labels, axis, input_rank) input_shape = array_ops.shape(precise_logits) # Make precise_logits and labels into matrices. precise_logits = _flatten_outer_dims(precise_logits) labels = _flatten_outer_dims(labels) # Do the actual op computation. # The second output tensor contains the gradients. We use it in # _CrossEntropyGrad() in nn_grad but not here. cost, unused_backprop = gen_nn_ops.softmax_cross_entropy_with_logits( precise_logits, labels, name=name) # The output cost shape should be the input minus axis. output_shape = array_ops.slice(input_shape, [0], [math_ops.subtract(input_rank, 1)]) cost = array_ops.reshape(cost, output_shape) # Make shape inference work since reshape and transpose may erase its static # shape. if not context.executing_eagerly( ) and shape is not None and shape.dims is not None: shape = shape.as_list() del shape[axis] cost.set_shape(shape) if convert_to_float32: return math_ops.cast(cost, logits.dtype) else: return cost _XENT_DEPRECATION = """ Future major versions of TensorFlow will allow gradients to flow into the labels input on backprop by default. See `tf.nn.softmax_cross_entropy_with_logits_v2`. """ @tf_export(v1=["nn.softmax_cross_entropy_with_logits"]) @deprecation.deprecated(date=None, instructions=_XENT_DEPRECATION) def softmax_cross_entropy_with_logits( _sentinel=None, # pylint: disable=invalid-name labels=None, logits=None, dim=-1, name=None, axis=None): """Computes softmax cross entropy between `logits` and `labels`. Measures the probability error in discrete classification tasks in which the classes are mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is labeled with one and only one label: an image can be a dog or a truck, but not both. **NOTE:** While the classes are mutually exclusive, their probabilities need not be. All that is required is that each row of `labels` is a valid probability distribution. If they are not, the computation of the gradient will be incorrect. If using exclusive `labels` (wherein one and only one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`. **WARNING:** This op expects unscaled logits, since it performs a `softmax` on `logits` internally for efficiency. Do not call this op with the output of `softmax`, as it will produce incorrect results. A common use case is to have logits and labels of shape `[batch_size, num_classes]`, but higher dimensions are supported, with the `dim` argument specifying the class dimension. Backpropagation will happen only into `logits`. To calculate a cross entropy loss that allows backpropagation into both `logits` and `labels`, see `tf.nn.softmax_cross_entropy_with_logits_v2`. **Note that to avoid confusion, it is required to pass only named arguments to this function.** Args: _sentinel: Used to prevent positional parameters. Internal, do not use. labels: Each vector along the class dimension should hold a valid probability distribution e.g. for the case in which labels are of shape `[batch_size, num_classes]`, each row of `labels[i]` must be a valid probability distribution. logits: Per-label activations, typically a linear output. These activation energies are interpreted as unnormalized log probabilities. dim: The class dimension. Defaulted to -1 which is the last dimension. name: A name for the operation (optional). axis: Alias for dim. Returns: A `Tensor` that contains the softmax cross entropy loss. Its type is the same as `logits` and its shape is the same as `labels` except that it does not have the last dimension of `labels`. """ dim = deprecated_argument_lookup("axis", axis, "dim", dim) _ensure_xent_args("softmax_cross_entropy_with_logits", _sentinel, labels, logits) with ops.name_scope(name, "softmax_cross_entropy_with_logits_sg", [logits, labels]) as name: labels = array_ops.stop_gradient(labels, name="labels_stop_gradient") return softmax_cross_entropy_with_logits_v2( labels=labels, logits=logits, axis=dim, name=name) @tf_export(v1=["nn.sparse_softmax_cross_entropy_with_logits"]) def sparse_softmax_cross_entropy_with_logits( _sentinel=None, # pylint: disable=invalid-name labels=None, logits=None, name=None): """Computes sparse softmax cross entropy between `logits` and `labels`. Measures the probability error in discrete classification tasks in which the classes are mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is labeled with one and only one label: an image can be a dog or a truck, but not both. **NOTE:** For this operation, the probability of a given label is considered exclusive. That is, soft classes are not allowed, and the `labels` vector must provide a single specific index for the true class for each row of `logits` (each minibatch entry). For soft softmax classification with a probability distribution for each entry, see `softmax_cross_entropy_with_logits_v2`. **WARNING:** This op expects unscaled logits, since it performs a `softmax` on `logits` internally for efficiency. Do not call this op with the output of `softmax`, as it will produce incorrect results. A common use case is to have logits of shape `[batch_size, num_classes]` and have labels of shape `[batch_size]`, but higher dimensions are supported, in which case the `dim`-th dimension is assumed to be of size `num_classes`. `logits` must have the dtype of `float16`, `float32`, or `float64`, and `labels` must have the dtype of `int32` or `int64`. **Note that to avoid confusion, it is required to pass only named arguments to this function.** Args: _sentinel: Used to prevent positional parameters. Internal, do not use. labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of `labels` and result) and dtype `int32` or `int64`. Each entry in `labels` must be an index in `[0, num_classes)`. Other values will raise an exception when this op is run on CPU, and return `NaN` for corresponding loss and gradient rows on GPU. logits: Per-label activations (typically a linear output) of shape `[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32`, or `float64`. These activation energies are interpreted as unnormalized log probabilities. name: A name for the operation (optional). Returns: A `Tensor` of the same shape as `labels` and of the same type as `logits` with the softmax cross entropy loss. Raises: ValueError: If logits are scalars (need to have rank >= 1) or if the rank of the labels is not equal to the rank of the logits minus one. """ _ensure_xent_args("sparse_softmax_cross_entropy_with_logits", _sentinel, labels, logits) # TODO(pcmurray) Raise an error when the label is not an index in # [0, num_classes). Note: This could break users who call this with bad # labels, but disregard the bad results. # Reshape logits and labels to rank 2. with ops.name_scope(name, "SparseSoftmaxCrossEntropyWithLogits", [labels, logits]): labels = ops.convert_to_tensor(labels) logits = ops.convert_to_tensor(logits) precise_logits = math_ops.cast(logits, dtypes.float32) if (dtypes.as_dtype( logits.dtype) == dtypes.float16) else logits # Store label shape for result later. labels_static_shape = labels.get_shape() labels_shape = array_ops.shape(labels) static_shapes_fully_defined = ( labels_static_shape.is_fully_defined() and logits.get_shape()[:-1].is_fully_defined()) if logits.get_shape().ndims is not None and logits.get_shape().ndims == 0: raise ValueError( "Logits cannot be scalars - received shape %s." % logits.get_shape()) if logits.get_shape().ndims is not None and ( labels_static_shape.ndims is not None and labels_static_shape.ndims != logits.get_shape().ndims - 1): raise ValueError("Rank mismatch: Rank of labels (received %s) should " "equal rank of logits minus 1 (received %s)." % (labels_static_shape.ndims, logits.get_shape().ndims)) if (static_shapes_fully_defined and labels_static_shape != logits.get_shape()[:-1]): raise ValueError("Shape mismatch: The shape of labels (received %s) " "should equal the shape of logits except for the last " "dimension (received %s)." % (labels_static_shape, logits.get_shape())) # Check if no reshapes are required. if logits.get_shape().ndims == 2: cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits( precise_logits, labels, name=name) if logits.dtype == dtypes.float16: return math_ops.cast(cost, dtypes.float16) else: return cost # Perform a check of the dynamic shapes if the static shapes are not fully # defined. shape_checks = [] if not static_shapes_fully_defined: shape_checks.append( check_ops.assert_equal( array_ops.shape(labels), array_ops.shape(logits)[:-1])) with ops.control_dependencies(shape_checks): # Reshape logits to 2 dim, labels to 1 dim. num_classes = array_ops.shape(logits)[array_ops.rank(logits) - 1] precise_logits = array_ops.reshape(precise_logits, [-1, num_classes]) labels = array_ops.reshape(labels, [-1]) # The second output tensor contains the gradients. We use it in # _CrossEntropyGrad() in nn_grad but not here. cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits( precise_logits, labels, name=name) cost = array_ops.reshape(cost, labels_shape) cost.set_shape(labels_static_shape) if logits.dtype == dtypes.float16: return math_ops.cast(cost, dtypes.float16) else: return cost @tf_export("nn.sparse_softmax_cross_entropy_with_logits", v1=[]) def sparse_softmax_cross_entropy_with_logits_v2(labels, logits, name=None): """Computes sparse softmax cross entropy between `logits` and `labels`. Measures the probability error in discrete classification tasks in which the classes are mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is labeled with one and only one label: an image can be a dog or a truck, but not both. **NOTE:** For this operation, the probability of a given label is considered exclusive. That is, soft classes are not allowed, and the `labels` vector must provide a single specific index for the true class for each row of `logits` (each minibatch entry). For soft softmax classification with a probability distribution for each entry, see `softmax_cross_entropy_with_logits_v2`. **WARNING:** This op expects unscaled logits, since it performs a `softmax` on `logits` internally for efficiency. Do not call this op with the output of `softmax`, as it will produce incorrect results. A common use case is to have logits of shape `[batch_size, num_classes]` and have labels of shape `[batch_size]`, but higher dimensions are supported, in which case the `dim`-th dimension is assumed to be of size `num_classes`. `logits` must have the dtype of `float16`, `float32`, or `float64`, and `labels` must have the dtype of `int32` or `int64`. **Note that to avoid confusion, it is required to pass only named arguments to this function.** Args: labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of `labels` and result) and dtype `int32` or `int64`. Each entry in `labels` must be an index in `[0, num_classes)`. Other values will raise an exception when this op is run on CPU, and return `NaN` for corresponding loss and gradient rows on GPU. logits: Unscaled log probabilities of shape `[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32`, or `float64`. name: A name for the operation (optional). Returns: A `Tensor` of the same shape as `labels` and of the same type as `logits` with the softmax cross entropy loss. Raises: ValueError: If logits are scalars (need to have rank >= 1) or if the rank of the labels is not equal to the rank of the logits minus one. """ return sparse_softmax_cross_entropy_with_logits( labels=labels, logits=logits, name=name) @tf_export("nn.avg_pool", v1=["nn.avg_pool_v2"]) def avg_pool_v2(input, ksize, strides, padding, data_format=None, name=None): # pylint: disable=redefined-builtin """Performs the avg pooling on the input. Each entry in `output` is the mean of the corresponding size `ksize` window in `value`. Args: input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape + [num_channels]` if `data_format` does not start with "NC" (default), or `[batch_size, num_channels] + input_spatial_shape` if data_format starts with "NC". Pooling happens over the spatial dimensions only. ksize: An int or list of `ints` that has length `1`, `N` or `N+2`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the "returns" section of `tf.nn.convolution` for details. data_format: A string. Specifies the channel dimension. For N=1 it can be either "NWC" (default) or "NCW", for N=2 it can be either "NHWC" (default) or "NCHW" and for N=3 either "NDHWC" (default) or "NCDHW". name: Optional name for the operation. Returns: A `Tensor` of format specified by `data_format`. The average pooled output tensor. """ if input.shape is not None: n = len(input.shape) - 2 elif data_format is not None: n = len(data_format) - 2 else: raise ValueError( "The input must have a rank or a data format must be given.") if not 1 <= n <= 3: raise ValueError( "Input tensor must be of rank 3, 4 or 5 but was {}.".format(n + 2)) if data_format is None: channel_index = n + 1 else: channel_index = 1 if data_format.startswith("NC") else n + 1 ksize = _get_sequence(ksize, n, channel_index, "ksize") strides = _get_sequence(strides, n, channel_index, "strides") avg_pooling_ops = { 1: avg_pool1d, 2: gen_nn_ops.avg_pool, 3: gen_nn_ops.avg_pool3d } op = avg_pooling_ops[n] return op( input, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name) @tf_export(v1=["nn.avg_pool", "nn.avg_pool2d"]) def avg_pool(value, ksize, strides, padding, data_format="NHWC", name=None, input=None): # pylint: disable=redefined-builtin """Performs the average pooling on the input. Each entry in `output` is the mean of the corresponding size `ksize` window in `value`. Args: value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type `float32`, `float64`, `qint8`, `quint8`, or `qint32`. ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1`, `2` or `4`. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the "returns" section of `tf.nn.convolution` for details. data_format: A string. 'NHWC' and 'NCHW' are supported. name: Optional name for the operation. input: Alias for value. Returns: A `Tensor` with the same type as `value`. The average pooled output tensor. """ with ops.name_scope(name, "AvgPool", [value]) as name: value = deprecation.deprecated_argument_lookup( "input", input, "value", value) if data_format is None: data_format = "NHWC" channel_index = 1 if data_format.startswith("NC") else 3 ksize = _get_sequence(ksize, 2, channel_index, "ksize") strides = _get_sequence(strides, 2, channel_index, "strides") return gen_nn_ops.avg_pool( value, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name) @tf_export("nn.avg_pool2d", v1=[]) def avg_pool2d(input, ksize, strides, padding, data_format="NHWC", name=None): # pylint: disable=redefined-builtin """Performs the average pooling on the input. Each entry in `output` is the mean of the corresponding size `ksize` window in `value`. Args: input: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type `float32`, `float64`, `qint8`, `quint8`, or `qint32`. ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1`, `2` or `4`. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the "returns" section of `tf.nn.convolution` for details. data_format: A string. 'NHWC' and 'NCHW' are supported. name: Optional name for the operation. Returns: A `Tensor` with the same type as `value`. The average pooled output tensor. """ with ops.name_scope(name, "AvgPool2D", [input]) as name: if data_format is None: data_format = "NHWC" channel_index = 1 if data_format.startswith("NC") else 3 ksize = _get_sequence(ksize, 2, channel_index, "ksize") strides = _get_sequence(strides, 2, channel_index, "strides") return gen_nn_ops.avg_pool( input, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name) @tf_export("nn.avg_pool1d") def avg_pool1d(input, ksize, strides, padding, data_format="NWC", name=None): # pylint: disable=redefined-builtin """Performs the average pooling on the input. Each entry in `output` is the mean of the corresponding size `ksize` window in `value`. Note internally this op reshapes and uses the underlying 2d operation. Args: input: A 3-D `Tensor` of the format specified by `data_format`. ksize: An int or list of `ints` that has length `1` or `3`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1` or `3`. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the "returns" section of `tf.nn.convolution` for details. data_format: An optional string from: "NWC", "NCW". Defaults to "NWC". name: A name for the operation (optional). Returns: A `Tensor` of format specified by `data_format`. The max pooled output tensor. """ with ops.name_scope(name, "AvgPool1D", [input]) as name: if data_format is None: data_format = "NWC" channel_index = 1 if data_format.startswith("NC") else 2 ksize = [1] + _get_sequence(ksize, 1, channel_index, "ksize") strides = [1] + _get_sequence(strides, 1, channel_index, "strides") data_format = "NHWC" if data_format == "NWC" else "NCHW" expanding_dim = 1 if data_format == "NWC" else 2 input = array_ops.expand_dims_v2(input, expanding_dim) result = gen_nn_ops.avg_pool( input, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name) return array_ops.squeeze(result, expanding_dim) @tf_export("nn.avg_pool3d") def avg_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None): # pylint: disable=redefined-builtin """Performs the average pooling on the input. Each entry in `output` is the mean of the corresponding size `ksize` window in `value`. Args: input: A 5-D `Tensor` of shape `[batch, height, width, channels]` and type `float32`, `float64`, `qint8`, `quint8`, or `qint32`. ksize: An int or list of `ints` that has length `1`, `3` or `5`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1`, `3` or `5`. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the "returns" section of `tf.nn.convolution` for details. data_format: A string. 'NDHWC' and 'NCDHW' are supported. name: Optional name for the operation. Returns: A `Tensor` with the same type as `value`. The average pooled output tensor. """ with ops.name_scope(name, "AvgPool3D", [input]) as name: if data_format is None: data_format = "NDHWC" channel_index = 1 if data_format.startswith("NC") else 3 ksize = _get_sequence(ksize, 3, channel_index, "ksize") strides = _get_sequence(strides, 3, channel_index, "strides") return gen_nn_ops.avg_pool3d( input, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name) # pylint: disable=redefined-builtin @tf_export("nn.max_pool", v1=["nn.max_pool_v2"]) def max_pool_v2(input, ksize, strides, padding, data_format=None, name=None): """Performs the max pooling on the input. Args: input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape + [num_channels]` if `data_format` does not start with "NC" (default), or `[batch_size, num_channels] + input_spatial_shape` if data_format starts with "NC". Pooling happens over the spatial dimensions only. ksize: An int or list of `ints` that has length `1`, `N` or `N+2`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the "returns" section of `tf.nn.convolution` for details. data_format: A string. Specifies the channel dimension. For N=1 it can be either "NWC" (default) or "NCW", for N=2 it can be either "NHWC" (default) or "NCHW" and for N=3 either "NDHWC" (default) or "NCDHW". name: Optional name for the operation. Returns: A `Tensor` of format specified by `data_format`. The max pooled output tensor. """ if input.shape is not None: n = len(input.shape) - 2 elif data_format is not None: n = len(data_format) - 2 else: raise ValueError( "The input must have a rank or a data format must be given.") if not 1 <= n <= 3: raise ValueError( "Input tensor must be of rank 3, 4 or 5 but was {}.".format(n + 2)) if data_format is None: channel_index = n + 1 else: channel_index = 1 if data_format.startswith("NC") else n + 1 ksize = _get_sequence(ksize, n, channel_index, "ksize") strides = _get_sequence(strides, n, channel_index, "strides") max_pooling_ops = { 1: max_pool1d, 2: gen_nn_ops.max_pool, 3: gen_nn_ops.max_pool3d } op = max_pooling_ops[n] return op( input, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name) # pylint: enable=redefined-builtin @tf_export(v1=["nn.max_pool"]) def max_pool(value, ksize, strides, padding, data_format="NHWC", name=None, input=None): # pylint: disable=redefined-builtin """Performs the max pooling on the input. Args: value: A 4-D `Tensor` of the format specified by `data_format`. ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1`, `2` or `4`. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the "returns" section of `tf.nn.convolution` for details. data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported. name: Optional name for the operation. input: Alias for value. Returns: A `Tensor` of format specified by `data_format`. The max pooled output tensor. """ value = deprecation.deprecated_argument_lookup("input", input, "value", value) with ops.name_scope(name, "MaxPool", [value]) as name: if data_format is None: data_format = "NHWC" channel_index = 1 if data_format.startswith("NC") else 3 ksize = _get_sequence(ksize, 2, channel_index, "ksize") strides = _get_sequence(strides, 2, channel_index, "strides") if ((np.isscalar(ksize) and ksize == 0) or (isinstance(ksize, (list, tuple, np.ndarray)) and any(v == 0 for v in ksize))): raise ValueError("ksize cannot be zero.") return gen_nn_ops.max_pool( value, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name) # pylint: disable=redefined-builtin @tf_export("nn.max_pool1d") def max_pool1d(input, ksize, strides, padding, data_format="NWC", name=None): """Performs the max pooling on the input. Note internally this op reshapes and uses the underlying 2d operation. Args: input: A 3-D `Tensor` of the format specified by `data_format`. ksize: An int or list of `ints` that has length `1` or `3`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1` or `3`. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the "returns" section of `tf.nn.convolution` for details. data_format: An optional string from: "NWC", "NCW". Defaults to "NWC". name: A name for the operation (optional). Returns: A `Tensor` of format specified by `data_format`. The max pooled output tensor. """ with ops.name_scope(name, "MaxPool1d", [input]) as name: if data_format is None: data_format = "NWC" channel_index = 1 if data_format.startswith("NC") else 2 ksize = [1] + _get_sequence(ksize, 1, channel_index, "ksize") strides = [1] + _get_sequence(strides, 1, channel_index, "strides") data_format = "NHWC" if data_format == "NWC" else "NCHW" expanding_dim = 1 if data_format == "NWC" else 2 input = array_ops.expand_dims_v2(input, expanding_dim) result = gen_nn_ops.max_pool( input, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name) return array_ops.squeeze(result, expanding_dim) # pylint: enable=redefined-builtin # pylint: disable=redefined-builtin @tf_export("nn.max_pool2d") def max_pool2d(input, ksize, strides, padding, data_format="NHWC", name=None): """Performs the max pooling on the input. Args: input: A 4-D `Tensor` of the format specified by `data_format`. ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1`, `2` or `4`. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the "returns" section of `tf.nn.convolution` for details. data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported. name: Optional name for the operation. Returns: A `Tensor` of format specified by `data_format`. The max pooled output tensor. """ with ops.name_scope(name, "MaxPool2d", [input]) as name: if data_format is None: data_format = "NHWC" channel_index = 1 if data_format.startswith("NC") else 3 ksize = _get_sequence(ksize, 2, channel_index, "ksize") strides = _get_sequence(strides, 2, channel_index, "strides") return gen_nn_ops.max_pool( input, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name) # pylint: enable=redefined-builtin # pylint: disable=redefined-builtin @tf_export("nn.max_pool3d") def max_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None): """Performs the max pooling on the input. Args: input: A 5-D `Tensor` of the format specified by `data_format`. ksize: An int or list of `ints` that has length `1`, `3` or `5`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1`, `3` or `5`. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the "returns" section of `tf.nn.convolution` for details. data_format: An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC". The data format of the input and output data. With the default format "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. Alternatively, the format could be "NCDHW", the data storage order is: [batch, in_channels, in_depth, in_height, in_width]. name: A name for the operation (optional). Returns: A `Tensor` of format specified by `data_format`. The max pooled output tensor. """ with ops.name_scope(name, "MaxPool3D", [input]) as name: if data_format is None: data_format = "NDHWC" channel_index = 1 if data_format.startswith("NC") else 4 ksize = _get_sequence(ksize, 3, channel_index, "ksize") strides = _get_sequence(strides, 3, channel_index, "strides") return gen_nn_ops.max_pool3d( input, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name) # pylint: enable=redefined-builtin @tf_export("nn.max_pool_with_argmax", v1=[]) def max_pool_with_argmax_v2( input, # pylint: disable=redefined-builtin ksize, strides, padding, data_format="NHWC", output_dtype=dtypes.int64, include_batch_in_index=False, name=None): """Performs max pooling on the input and outputs both max values and indices. The indices in `argmax` are flattened, so that a maximum value at position `[b, y, x, c]` becomes flattened index: `(y * width + x) * channels + c` if `include_batch_in_index` is False; `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. The indices returned are always in `[0, height) x [0, width)` before flattening, even if padding is involved and the mathematically correct answer is outside (either negative or too large). This is a bug, but fixing it is difficult to do in a safe backwards compatible way, especially due to flattening. Args: input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 4-D with shape `[batch, height, width, channels]`. Input to pool over. ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1`, `2` or `4`. The stride of the sliding window for each dimension of the input tensor. padding: A `string` from: `"SAME", "VALID"`. The type of padding algorithm to use. data_format: An optional `string`, must be set to `"NHWC"`. Defaults to `"NHWC"`. Specify the data format of the input and output data. output_dtype: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`. The dtype of the returned argmax tensor. include_batch_in_index: An optional `boolean`. Defaults to `False`. Whether to include batch dimension in flattened index of `argmax`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (output, argmax). output: A `Tensor`. Has the same type as `input`. argmax: A `Tensor` of type `output_dtype`. """ if data_format != "NHWC": raise ValueError("Data formats other than 'NHWC' are not yet supported") ksize = _get_sequence(ksize, 2, 3, "ksize") strides = _get_sequence(strides, 2, 3, "strides") return gen_nn_ops.max_pool_with_argmax( input=input, ksize=ksize, strides=strides, padding=padding, Targmax=output_dtype, include_batch_in_index=include_batch_in_index, name=name) @tf_export(v1=["nn.max_pool_with_argmax"]) def max_pool_with_argmax_v1( # pylint: disable=missing-docstring,invalid-name input, # pylint: disable=redefined-builtin ksize, strides, padding, data_format="NHWC", Targmax=None, name=None, output_dtype=None, include_batch_in_index=False): if data_format != "NHWC": raise ValueError("Data formats other than 'NHWC' are not yet supported") Targmax = deprecated_argument_lookup( "output_dtype", output_dtype, "Targmax", Targmax) if Targmax is None: Targmax = dtypes.int64 return gen_nn_ops.max_pool_with_argmax( input=input, ksize=ksize, strides=strides, padding=padding, Targmax=Targmax, include_batch_in_index=include_batch_in_index, name=name) max_pool_with_argmax_v1.__doc__ = gen_nn_ops.max_pool_with_argmax.__doc__ @ops.RegisterStatistics("Conv3D", "flops") def _calc_conv3d_flops(graph, node): """Calculates the compute resources needed for Conv3D.""" input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) input_shape.assert_is_fully_defined() filter_shape = graph_util.tensor_shape_from_node_def_name( graph, node.input[1]) filter_shape.assert_is_fully_defined() output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name) output_shape.assert_is_fully_defined() filter_time = int(filter_shape[0]) filter_height = int(filter_shape[1]) filter_width = int(filter_shape[2]) filter_in_depth = int(filter_shape[3]) output_count = np.prod(output_shape.as_list(), dtype=np.int64) return ops.OpStats("flops", (output_count * filter_in_depth * filter_time * filter_height * filter_width * 2)) @ops.RegisterStatistics("Conv2D", "flops") def _calc_conv_flops(graph, node): """Calculates the compute resources needed for Conv2D.""" input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) input_shape.assert_is_fully_defined() filter_shape = graph_util.tensor_shape_from_node_def_name( graph, node.input[1]) filter_shape.assert_is_fully_defined() output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name) output_shape.assert_is_fully_defined() filter_height = int(filter_shape[0]) filter_width = int(filter_shape[1]) filter_in_depth = int(filter_shape[2]) output_count = np.prod(output_shape.as_list(), dtype=np.int64) return ops.OpStats( "flops", (output_count * filter_in_depth * filter_height * filter_width * 2)) @ops.RegisterStatistics("DepthwiseConv2dNative", "flops") def _calc_depthwise_conv_flops(graph, node): """Calculates the compute resources needed for DepthwiseConv2dNative.""" input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) input_shape.assert_is_fully_defined() filter_shape = graph_util.tensor_shape_from_node_def_name( graph, node.input[1]) filter_shape.assert_is_fully_defined() output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name) output_shape.assert_is_fully_defined() filter_height = int(filter_shape[0]) filter_width = int(filter_shape[1]) output_count = np.prod(output_shape.as_list(), dtype=np.int64) return ops.OpStats("flops", (output_count * filter_height * filter_width * 2)) @ops.RegisterStatistics("BiasAdd", "flops") def _calc_bias_add_flops(graph, node): """Calculates the computing needed for BiasAdd.""" input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) input_shape.assert_is_fully_defined() input_count = np.prod(input_shape.as_list()) return ops.OpStats("flops", input_count) @tf_export(v1=["nn.xw_plus_b"]) def xw_plus_b(x, weights, biases, name=None): # pylint: disable=invalid-name """Computes matmul(x, weights) + biases. Args: x: a 2D tensor. Dimensions typically: batch, in_units weights: a 2D tensor. Dimensions typically: in_units, out_units biases: a 1D tensor. Dimensions: out_units name: A name for the operation (optional). If not specified "xw_plus_b" is used. Returns: A 2-D Tensor computing matmul(x, weights) + biases. Dimensions typically: batch, out_units. """ with ops.name_scope(name, "xw_plus_b", [x, weights, biases]) as name: x = ops.convert_to_tensor(x, name="x") weights = ops.convert_to_tensor(weights, name="weights") biases = ops.convert_to_tensor(biases, name="biases") mm = math_ops.matmul(x, weights) return bias_add(mm, biases, name=name) def xw_plus_b_v1(x, weights, biases, name=None): """Computes matmul(x, weights) + biases. This is a deprecated version of that will soon be removed. Args: x: a 2D tensor. Dimensions typically: batch, in_units weights: a 2D tensor. Dimensions typically: in_units, out_units biases: a 1D tensor. Dimensions: out_units name: A name for the operation (optional). If not specified "xw_plus_b_v1" is used. Returns: A 2-D Tensor computing matmul(x, weights) + biases. Dimensions typically: batch, out_units. """ with ops.name_scope(name, "xw_plus_b_v1", [x, weights, biases]) as name: x = ops.convert_to_tensor(x, name="x") weights = ops.convert_to_tensor(weights, name="weights") biases = ops.convert_to_tensor(biases, name="biases") mm = math_ops.matmul(x, weights) return bias_add_v1(mm, biases, name=name) def _get_noise_shape(x, noise_shape): # If noise_shape is none return immediately. if noise_shape is None: return array_ops.shape(x) try: # Best effort to figure out the intended shape. # If not possible, let the op to handle it. # In eager mode exception will show up. noise_shape_ = tensor_shape.as_shape(noise_shape) except (TypeError, ValueError): return noise_shape if x.shape.dims is not None and len(x.shape.dims) == len(noise_shape_.dims): new_dims = [] for i, dim in enumerate(x.shape.dims): if noise_shape_.dims[i].value is None and dim.value is not None: new_dims.append(dim.value) else: new_dims.append(noise_shape_.dims[i].value) return tensor_shape.TensorShape(new_dims) return noise_shape @tf_export(v1=["nn.dropout"]) @deprecation.deprecated_args(None, "Please use `rate` instead of `keep_prob`. " "Rate should be set to `rate = 1 - keep_prob`.", "keep_prob") def dropout(x, keep_prob=None, noise_shape=None, seed=None, name=None, rate=None): """Computes dropout. For each element of `x`, with probability `rate`, outputs `0`, and otherwise scales up the input by `1 / (1-rate)`. The scaling is such that the expected sum is unchanged. By default, each element is kept or dropped independently. If `noise_shape` is specified, it must be [broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]` will make independent decisions. For example, if `shape(x) = [k, l, m, n]` and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be kept independently and each row and column will be kept or not kept together. Args: x: A floating point tensor. keep_prob: (deprecated) A deprecated alias for `(1-rate)`. noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for randomly generated keep/drop flags. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. name: A name for this operation (optional). rate: A scalar `Tensor` with the same type as `x`. The probability that each element of `x` is discarded. Returns: A Tensor of the same shape of `x`. Raises: ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating point tensor. """ try: keep = 1. - keep_prob if keep_prob is not None else None except TypeError: raise ValueError("keep_prob must be a floating point number or Tensor " "(got %r)" % keep_prob) rate = deprecation.deprecated_argument_lookup( "rate", rate, "keep_prob", keep) if rate is None: raise ValueError("You must provide a rate to dropout.") return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name) @tf_export("nn.dropout", v1=[]) def dropout_v2(x, rate, noise_shape=None, seed=None, name=None): """Computes dropout. With probability `rate`, drops elements of `x`. Input that are kept are scaled up by `1 / (1 - rate)`, otherwise outputs `0`. The scaling is so that the expected sum is unchanged. **Note:** The behavior of dropout has changed between TensorFlow 1.x and 2.x. When converting 1.x code, please use named arguments to ensure behavior stays consistent. By default, each element is kept or dropped independently. If `noise_shape` is specified, it must be [broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]` will make independent decisions. For example, if `shape(x) = [k, l, m, n]` and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be kept independently and each row and column will be kept or not kept together. Args: x: A floating point tensor. rate: A scalar `Tensor` with the same type as x. The probability that each element is dropped. For example, setting rate=0.1 would drop 10% of input elements. noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for randomly generated keep/drop flags. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. name: A name for this operation (optional). Returns: A Tensor of the same shape of `x`. Raises: ValueError: If `rate` is not in `(0, 1]` or if `x` is not a floating point tensor. """ with ops.name_scope(name, "dropout", [x]) as name: x = ops.convert_to_tensor(x, name="x") if not x.dtype.is_floating: raise ValueError("x has to be a floating point tensor since it's going to" " be scaled. Got a %s tensor instead." % x.dtype) if isinstance(rate, numbers.Real): if not (rate >= 0 and rate < 1): raise ValueError("rate must be a scalar tensor or a float in the " "range [0, 1), got %g" % rate) if rate > 0.5: logging.log_first_n( logging.WARN, "Large dropout rate: %g (>0.5). In TensorFlow " "2.x, dropout() uses dropout rate instead of keep_prob. " "Please ensure that this is intended.", 5, rate) # Early return if nothing needs to be dropped. if isinstance(rate, numbers.Real) and rate == 0: return x if context.executing_eagerly(): if isinstance(rate, ops.EagerTensor): if rate.numpy() == 0: return x else: rate = ops.convert_to_tensor( rate, dtype=x.dtype, name="rate") rate.get_shape().assert_is_compatible_with(tensor_shape.scalar()) # Do nothing if we know rate == 0 if tensor_util.constant_value(rate) == 0: return x noise_shape = _get_noise_shape(x, noise_shape) # Sample a uniform distribution on [0.0, 1.0) and select values larger than # rate. # # NOTE: Random uniform actually can only generate 2^23 floats on [1.0, 2.0) # and subtract 1.0. random_tensor = random_ops.random_uniform( noise_shape, seed=seed, dtype=x.dtype) keep_prob = 1 - rate scale = 1 / keep_prob # NOTE: if (1.0 + rate) - 1 is equal to rate, then we want to consider that # float to be selected, hence we use a >= comparison. keep_mask = random_tensor >= rate ret = x * scale * math_ops.cast(keep_mask, x.dtype) if not context.executing_eagerly(): ret.set_shape(x.get_shape()) return ret @tf_export("math.top_k", "nn.top_k") def top_k(input, k=1, sorted=True, name=None): # pylint: disable=redefined-builtin """Finds values and indices of the `k` largest entries for the last dimension. If the input is a vector (rank=1), finds the `k` largest entries in the vector and outputs their values and indices as vectors. Thus `values[j]` is the `j`-th largest entry in `input`, and its index is `indices[j]`. For matrices (resp. higher rank input), computes the top `k` entries in each row (resp. vector along the last dimension). Thus, values.shape = indices.shape = input.shape[:-1] + [k] If two elements are equal, the lower-index element appears first. Args: input: 1-D or higher `Tensor` with last dimension at least `k`. k: 0-D `int32` `Tensor`. Number of top elements to look for along the last dimension (along each row for matrices). sorted: If true the resulting `k` elements will be sorted by the values in descending order. name: Optional name for the operation. Returns: values: The `k` largest elements along each last dimensional slice. indices: The indices of `values` within the last dimension of `input`. """ return gen_nn_ops.top_kv2(input, k=k, sorted=sorted, name=name) def nth_element(input, n, reverse=False, name=None): # pylint: disable=redefined-builtin r"""Finds values of the `n`-th smallest value for the last dimension. Note that n is zero-indexed. If the input is a vector (rank-1), finds the entries which is the nth-smallest value in the vector and outputs their values as scalar tensor. For matrices (resp. higher rank input), computes the entries which is the nth-smallest value in each row (resp. vector along the last dimension). Thus, values.shape = input.shape[:-1] Args: input: 1-D or higher `Tensor` with last dimension at least `n+1`. n: A `Tensor` of type `int32`. 0-D. Position of sorted vector to select along the last dimension (along each row for matrices). Valid range of n is `[0, input.shape[:-1])` reverse: An optional `bool`. Defaults to `False`. When set to True, find the nth-largest value in the vector and vice versa. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. The `n`-th order statistic along each last dimensional slice. """ return gen_nn_ops.nth_element(input, n, reverse=reverse, name=name) @tf_export(v1=["nn.fractional_max_pool"]) @deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` " "args are deprecated. Use fractional_max_pool_v2.") def fractional_max_pool(value, pooling_ratio, pseudo_random=False, overlapping=False, deterministic=False, seed=0, seed2=0, name=None): # pylint: disable=redefined-builtin r"""Performs fractional max pooling on the input. This is a deprecated version of `fractional_max_pool`. Fractional max pooling is slightly different than regular max pooling. In regular max pooling, you downsize an input set by taking the maximum value of smaller N x N subsections of the set (often 2x2), and try to reduce the set by a factor of N, where N is an integer. Fractional max pooling, as you might expect from the word "fractional", means that the overall reduction ratio N does not have to be an integer. The sizes of the pooling regions are generated randomly but are fairly uniform. For example, let's look at the height dimension, and the constraints on the list of rows that will be pool boundaries. First we define the following: 1. input_row_length : the number of rows from the input set 2. output_row_length : which will be smaller than the input 3. alpha = input_row_length / output_row_length : our reduction ratio 4. K = floor(alpha) 5. row_pooling_sequence : this is the result list of pool boundary rows Then, row_pooling_sequence should satisfy: 1. a[0] = 0 : the first value of the sequence is 0 2. a[end] = input_row_length : the last value of the sequence is the size 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size 4. length(row_pooling_sequence) = output_row_length+1 For more details on fractional max pooling, see this paper: [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) Args: value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`. pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for each dimension of `value`, currently only supports row and col dimension and should be >= 1.0. For example, a valid pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions respectively. pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`, generates the pooling sequence in a pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between pseudorandom and random. overlapping: An optional `bool`. Defaults to `False`. When set to `True`, it means when pooling, the values at the boundary of adjacent pooling cells are used by both cells. For example: `index 0 1 2 3 4` `value 20 5 16 3 7` If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. The result would be [20, 16] for fractional max pooling. deterministic: An optional `bool`. Deprecated; use `fractional_max_pool_v2` instead. seed: An optional `int`. Defaults to `0`. If set to be non-zero, the random number generator is seeded by the given seed. Otherwise it is seeded by a random seed. seed2: An optional `int`. Deprecated; use `fractional_max_pool_v2` instead. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (`output`, `row_pooling_sequence`, `col_pooling_sequence`). output: Output `Tensor` after fractional max pooling. Has the same type as `value`. row_pooling_sequence: A `Tensor` of type `int64`. col_pooling_sequence: A `Tensor` of type `int64`. """ return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random, overlapping, deterministic, seed, seed2, name) @tf_export("nn.fractional_max_pool", v1=[]) def fractional_max_pool_v2(value, pooling_ratio, pseudo_random=False, overlapping=False, seed=0, name=None): # pylint: disable=redefined-builtin r"""Performs fractional max pooling on the input. Fractional max pooling is slightly different than regular max pooling. In regular max pooling, you downsize an input set by taking the maximum value of smaller N x N subsections of the set (often 2x2), and try to reduce the set by a factor of N, where N is an integer. Fractional max pooling, as you might expect from the word "fractional", means that the overall reduction ratio N does not have to be an integer. The sizes of the pooling regions are generated randomly but are fairly uniform. For example, let's look at the height dimension, and the constraints on the list of rows that will be pool boundaries. First we define the following: 1. input_row_length : the number of rows from the input set 2. output_row_length : which will be smaller than the input 3. alpha = input_row_length / output_row_length : our reduction ratio 4. K = floor(alpha) 5. row_pooling_sequence : this is the result list of pool boundary rows Then, row_pooling_sequence should satisfy: 1. a[0] = 0 : the first value of the sequence is 0 2. a[end] = input_row_length : the last value of the sequence is the size 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size 4. length(row_pooling_sequence) = output_row_length+1 For more details on fractional max pooling, see this paper: [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) Args: value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`. pooling_ratio: An int or list of `ints` that has length `1`, `2` or `4`. Pooling ratio for each dimension of `value`, currently only supports row and col dimension and should be >= 1.0. For example, a valid pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions respectively. pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`, generates the pooling sequence in a pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between pseudorandom and random. overlapping: An optional `bool`. Defaults to `False`. When set to `True`, it means when pooling, the values at the boundary of adjacent pooling cells are used by both cells. For example: `index 0 1 2 3 4` `value 20 5 16 3 7` If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. The result would be [20, 16] for fractional max pooling. seed: An optional `int`. Defaults to `0`. If set to be non-zero, the random number generator is seeded by the given seed. Otherwise it is seeded by a random seed. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (`output`, `row_pooling_sequence`, `col_pooling_sequence`). output: Output `Tensor` after fractional max pooling. Has the same type as `value`. row_pooling_sequence: A `Tensor` of type `int64`. col_pooling_sequence: A `Tensor` of type `int64`. """ pooling_ratio = _get_sequence(pooling_ratio, 2, 3, "pooling_ratio") if seed == 0: return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random, overlapping, deterministic=False, seed=0, seed2=0, name=name) else: seed1, seed2 = random_seed.get_seed(seed) return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random, overlapping, deterministic=True, seed=seed1, seed2=seed2, name=name) @tf_export(v1=["nn.fractional_avg_pool"]) @deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` " "args are deprecated. Use fractional_avg_pool_v2.") def fractional_avg_pool(value, pooling_ratio, pseudo_random=False, overlapping=False, deterministic=False, seed=0, seed2=0, name=None): # pylint: disable=redefined-builtin r"""Performs fractional average pooling on the input. This is a deprecated version of `fractional_avg_pool`. Fractional average pooling is similar to Fractional max pooling in the pooling region generation step. The only difference is that after pooling regions are generated, a mean operation is performed instead of a max operation in each pooling region. Args: value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`. pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for each dimension of `value`, currently only supports row and col dimension and should be >= 1.0. For example, a valid pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions respectively. pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`, generates the pooling sequence in a pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between pseudorandom and random. overlapping: An optional `bool`. Defaults to `False`. When set to `True`, it means when pooling, the values at the boundary of adjacent pooling cells are used by both cells. For example: `index 0 1 2 3 4` `value 20 5 16 3 7` If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. The result would be [20, 16] for fractional avg pooling. deterministic: An optional `bool`. Deprecated; use `fractional_avg_pool_v2` instead. seed: An optional `int`. Defaults to `0`. If set to be non-zero, the random number generator is seeded by the given seed. Otherwise it is seeded by a random seed. seed2: An optional `int`. Deprecated; use `fractional_avg_pool_v2` instead. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (`output`, `row_pooling_sequence`, `col_pooling_sequence`). output: Output `Tensor` after fractional avg pooling. Has the same type as `value`. row_pooling_sequence: A `Tensor` of type `int64`. col_pooling_sequence: A `Tensor` of type `int64`. """ return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random, overlapping, deterministic, seed, seed2, name=name) @tf_export("nn.fractional_avg_pool", v1=[]) def fractional_avg_pool_v2(value, pooling_ratio, pseudo_random=False, overlapping=False, seed=0, name=None): # pylint: disable=redefined-builtin r"""Performs fractional average pooling on the input. Fractional average pooling is similar to Fractional max pooling in the pooling region generation step. The only difference is that after pooling regions are generated, a mean operation is performed instead of a max operation in each pooling region. Args: value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`. pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for each dimension of `value`, currently only supports row and col dimension and should be >= 1.0. For example, a valid pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions respectively. pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`, generates the pooling sequence in a pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between pseudorandom and random. overlapping: An optional `bool`. Defaults to `False`. When set to `True`, it means when pooling, the values at the boundary of adjacent pooling cells are used by both cells. For example: `index 0 1 2 3 4` `value 20 5 16 3 7` If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. The result would be [20, 16] for fractional avg pooling. seed: An optional `int`. Defaults to `0`. If set to be non-zero, the random number generator is seeded by the given seed. Otherwise it is seeded by a random seed. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (`output`, `row_pooling_sequence`, `col_pooling_sequence`). output: Output `Tensor` after fractional avg pooling. Has the same type as `value`. row_pooling_sequence: A `Tensor` of type `int64`. col_pooling_sequence: A `Tensor` of type `int64`. """ if seed == 0: return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random, overlapping, deterministic=False, seed=0, seed2=0, name=name) else: seed1, seed2 = random_seed.get_seed(seed) return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random, overlapping, deterministic=True, seed=seed1, seed2=seed2, name=name) @ops.RegisterStatistics("Dilation2D", "flops") def _calc_dilation2d_flops(graph, node): """Calculates the compute resources needed for Dilation2D.""" input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) input_shape.assert_is_fully_defined() filter_shape = graph_util.tensor_shape_from_node_def_name( graph, node.input[1]) filter_shape.assert_is_fully_defined() output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name) output_shape.assert_is_fully_defined() filter_height = int(filter_shape[0]) filter_width = int(filter_shape[1]) output_count = np.prod(output_shape.as_list(), dtype=np.int64) return ops.OpStats("flops", (output_count * filter_height * filter_width * 2)) @tf_export(v1=["nn.erosion2d"]) def erosion2d(value, kernel, strides, rates, padding, name=None): """Computes the grayscale erosion of 4-D `value` and 3-D `kernel` tensors. The `value` tensor has shape `[batch, in_height, in_width, depth]` and the `kernel` tensor has shape `[kernel_height, kernel_width, depth]`, i.e., each input channel is processed independently of the others with its own structuring function. The `output` tensor has shape `[batch, out_height, out_width, depth]`. The spatial dimensions of the output tensor depend on the `padding` algorithm. We currently only support the default "NHWC" `data_format`. In detail, the grayscale morphological 2-D erosion is given by: output[b, y, x, c] = min_{dy, dx} value[b, strides[1] * y - rates[1] * dy, strides[2] * x - rates[2] * dx, c] - kernel[dy, dx, c] Duality: The erosion of `value` by the `kernel` is equal to the negation of the dilation of `-value` by the reflected `kernel`. Args: value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`. kernel: A `Tensor`. Must have the same type as `value`. 3-D with shape `[kernel_height, kernel_width, depth]`. strides: A list of `ints` that has length `>= 4`. 1-D of length 4. The stride of the sliding window for each dimension of the input tensor. Must be: `[1, stride_height, stride_width, 1]`. rates: A list of `ints` that has length `>= 4`. 1-D of length 4. The input stride for atrous morphological dilation. Must be: `[1, rate_height, rate_width, 1]`. padding: A `string` from: `"SAME", "VALID"`. The type of padding algorithm to use. name: A name for the operation (optional). If not specified "erosion2d" is used. Returns: A `Tensor`. Has the same type as `value`. 4-D with shape `[batch, out_height, out_width, depth]`. Raises: ValueError: If the `value` depth does not match `kernel`' shape, or if padding is other than `'VALID'` or `'SAME'`. """ with ops.name_scope(name, "erosion2d", [value, kernel]) as name: # Reduce erosion to dilation by duality. return math_ops.negative( gen_nn_ops.dilation2d( input=math_ops.negative(value), filter=array_ops.reverse_v2(kernel, [0, 1]), strides=strides, rates=rates, padding=padding, name=name)) @tf_export("nn.erosion2d", v1=[]) def erosion2d_v2(value, filters, strides, padding, data_format, dilations, name=None): """Computes the grayscale erosion of 4-D `value` and 3-D `filters` tensors. The `value` tensor has shape `[batch, in_height, in_width, depth]` and the `filters` tensor has shape `[filters_height, filters_width, depth]`, i.e., each input channel is processed independently of the others with its own structuring function. The `output` tensor has shape `[batch, out_height, out_width, depth]`. The spatial dimensions of the output tensor depend on the `padding` algorithm. We currently only support the default "NHWC" `data_format`. In detail, the grayscale morphological 2-D erosion is given by: output[b, y, x, c] = min_{dy, dx} value[b, strides[1] * y - dilations[1] * dy, strides[2] * x - dilations[2] * dx, c] - filters[dy, dx, c] Duality: The erosion of `value` by the `filters` is equal to the negation of the dilation of `-value` by the reflected `filters`. Args: value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`. filters: A `Tensor`. Must have the same type as `value`. 3-D with shape `[filters_height, filters_width, depth]`. strides: A list of `ints` that has length `>= 4`. 1-D of length 4. The stride of the sliding window for each dimension of the input tensor. Must be: `[1, stride_height, stride_width, 1]`. padding: A `string` from: `"SAME", "VALID"`. The type of padding algorithm to use. data_format: A `string`, only `"NHWC"` is currently supported. dilations: A list of `ints` that has length `>= 4`. 1-D of length 4. The input stride for atrous morphological dilation. Must be: `[1, rate_height, rate_width, 1]`. name: A name for the operation (optional). If not specified "erosion2d" is used. Returns: A `Tensor`. Has the same type as `value`. 4-D with shape `[batch, out_height, out_width, depth]`. Raises: ValueError: If the `value` depth does not match `filters`' shape, or if padding is other than `'VALID'` or `'SAME'`. """ if data_format != "NHWC": raise ValueError("Data formats other than NHWC are not yet supported") with ops.name_scope(name, "erosion2d", [value, filters]) as name: # Reduce erosion to dilation by duality. return math_ops.negative( gen_nn_ops.dilation2d( input=math_ops.negative(value), filter=array_ops.reverse_v2(filters, [0, 1]), strides=strides, rates=dilations, padding=padding, name=name)) @tf_export(v1=["math.in_top_k", "nn.in_top_k"]) def in_top_k(predictions, targets, k, name=None): r"""Says whether the targets are in the top `K` predictions. This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the prediction for the target class is finite (not inf, -inf, or nan) and among the top `k` predictions among all predictions for example `i`. Note that the behavior of `InTopK` differs from the `TopK` op in its handling of ties; if multiple classes have the same prediction value and straddle the top-`k` boundary, all of those classes are considered to be in the top `k`. More formally, let \\(predictions_i\\) be the predictions for all classes for example `i`, \\(targets_i\\) be the target class for example `i`, \\(out_i\\) be the output for example `i`, $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ Args: predictions: A `Tensor` of type `float32`. A `batch_size` x `classes` tensor. targets: A `Tensor`. Must be one of the following types: `int32`, `int64`. A `batch_size` vector of class ids. k: An `int`. Number of top elements to look at for computing precision. name: A name for the operation (optional). Returns: A `Tensor` of type `bool`. Computed Precision at `k` as a `bool Tensor`. """ with ops.name_scope(name, "in_top_k"): return gen_nn_ops.in_top_kv2(predictions, targets, k, name=name) @tf_export("math.in_top_k", "nn.in_top_k", v1=[]) def in_top_k_v2(targets, predictions, k, name=None): return in_top_k(predictions, targets, k, name) in_top_k_v2.__doc__ = in_top_k.__doc__ tf_export(v1=["nn.quantized_avg_pool"])(gen_nn_ops.quantized_avg_pool) tf_export(v1=["nn.quantized_conv2d"])(gen_nn_ops.quantized_conv2d) tf_export(v1=["nn.quantized_relu_x"])(gen_nn_ops.quantized_relu_x) tf_export(v1=["nn.quantized_max_pool"])(gen_nn_ops.quantized_max_pool)
tensorflow-master
tensorflow/python/ops/nn_ops.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for miscellaneous functionality in tensorflow.ops.nn.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math from absl.testing import parameterized import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_impl from tensorflow.python.ops import nn_ops from tensorflow.python.ops import partitioned_variables from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables import tensorflow.python.ops.nn_grad # pylint: disable=unused-import from tensorflow.python.ops.nn_impl import _compute_sampled_logits from tensorflow.python.platform import test as test_lib class ZeroFractionTest(test_lib.TestCase): def _ZeroFraction(self, x): assert x.shape total_elements = np.prod(x.shape) nonzeros = np.count_nonzero(x.flatten()) return 1.0 - nonzeros / total_elements @test_util.run_deprecated_v1 def testZeroFraction(self): x_shape = [5, 17] x_np = np.random.randint(0, 2, size=x_shape).astype(np.float32) y_np = self._ZeroFraction(x_np) x_tf = constant_op.constant(x_np) x_tf.set_shape(x_shape) y_tf = nn_impl.zero_fraction(x_tf) y_tf_np = self.evaluate(y_tf) eps = 1e-8 self.assertAllClose(y_tf_np, y_np, eps) @test_util.run_deprecated_v1 def testZeroFractionEmpty(self): x = np.zeros(0) y = self.evaluate(nn_impl.zero_fraction(x)) self.assertTrue(np.isnan(y)) @test_util.run_deprecated_v1 def testZeroFraction2_27Zeros(self): sparsity = nn_impl.zero_fraction( array_ops.zeros([int(2**27 * 1.01)], dtype=dtypes.int8)) self.assertAllClose(1.0, self.evaluate(sparsity)) @test_util.run_deprecated_v1 def testZeroFraction2_27Ones(self): sparsity = nn_impl.zero_fraction( array_ops.ones([int(2**27 * 1.01)], dtype=dtypes.int8)) self.assertAllClose(0.0, self.evaluate(sparsity)) @test_util.run_deprecated_v1 def testUnknownSize(self): value = array_ops.placeholder(dtype=dtypes.float32) sparsity = nn_impl.zero_fraction(value) with self.cached_session() as sess: self.assertAllClose( 0.25, sess.run(sparsity, {value: [[0., 1.], [0.3, 2.]]})) class SoftmaxTest(test_lib.TestCase, parameterized.TestCase): def _softmax(self, x): assert len(x.shape) == 2 m = x.max(1)[:, np.newaxis] u = np.exp(x - m) z = u.sum(1)[:, np.newaxis] return u / z @test_util.run_in_graph_and_eager_modes def testSoftmax(self): x_shape = [5, 10] x_np = np.random.randn(*x_shape).astype(np.float32) y_np = self._softmax(x_np) x_tf = constant_op.constant(x_np) y_tf = nn_ops.softmax_v2(x_tf) y_tf_last_dim = nn_ops.softmax_v2(x_tf, 1) y_tf_np = self.evaluate(y_tf) y_tf_last_dim_np = self.evaluate(y_tf_last_dim) eps = 1e-3 self.assertAllClose(y_tf_np, y_np, eps) self.assertAllClose(y_tf_last_dim_np, y_np, eps) def testSoftmaxAxes(self): arr = np.linspace(0., 1, 12).reshape(3, 4) x_neg_axis = nn_ops.softmax_v2(arr, axis=-2) y_pos_axis = nn_ops.softmax_v2(arr, axis=0) z_gt_axis = nn_ops.softmax_v2(arr, axis=0) x_neg_axis_tf = self.evaluate(x_neg_axis) y_pos_axis_tf = self.evaluate(y_pos_axis) z_gt_axis_tf = self.evaluate(z_gt_axis) eps = 1e-3 self.assertAllClose(x_neg_axis_tf, y_pos_axis_tf, eps) self.assertAllClose(y_pos_axis_tf, z_gt_axis_tf, eps) @parameterized.parameters(((5, 10),), ((2, 3, 4),)) @test_util.run_deprecated_v1 def testGradient(self, x_shape): x_np = np.random.randn(*x_shape).astype(np.float64) with self.cached_session(): x_tf = constant_op.constant(x_np) y_tf = nn_ops.softmax_v2(x_tf) err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf, x_shape) eps = 2e-8 self.assertLess(err, eps) class LogPoissonLossTest(test_lib.TestCase): def _log_poisson_loss(self, x, z, compute_full_loss=False): lpl = np.exp(x) - z * x if compute_full_loss: stirling_approx = z * np.log(z) - z + 0.5 * np.log(2. * np.pi * z) lpl += np.ma.masked_array(stirling_approx, mask=(z <= 1)).filled(0.) return lpl @test_util.run_in_graph_and_eager_modes def testLogPoissonLoss(self): x_shape = [5, 10] x_np = np.random.randn(*x_shape).astype(np.float32) z_np = np.random.randint(0, 5, size=x_shape).astype(np.float32) y_np = self._log_poisson_loss(x_np, z_np, compute_full_loss=False) y_np_stirling = self._log_poisson_loss(x_np, z_np, compute_full_loss=True) y_tf = nn_impl.log_poisson_loss(z_np, x_np, compute_full_loss=False) y_tf_stirling = nn_impl.log_poisson_loss(z_np, x_np, compute_full_loss=True) y_tf_np = self.evaluate(y_tf) y_tf_np_stirling = self.evaluate(y_tf_stirling) eps = 1e-3 self.assertAllClose(y_tf_np, y_np, eps) self.assertAllClose(y_tf_np_stirling, y_np_stirling, eps) @test_util.run_deprecated_v1 def testGradient(self): x_shape = [5, 10] x_np = np.random.randn(*x_shape).astype(np.float64) z_np = np.random.randint(0, 5, size=x_shape).astype(np.float64) with self.cached_session(): x_tf = constant_op.constant(x_np) y_tf = nn_impl.log_poisson_loss(z_np, x_tf, compute_full_loss=False) y_tf_stirling = nn_impl.log_poisson_loss( z_np, x_tf, compute_full_loss=True) err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf, x_shape) err_stirling = gradient_checker.compute_gradient_error( x_tf, x_shape, y_tf_stirling, x_shape) eps = 1e-6 self.assertLess(err, eps) self.assertLess(err_stirling, eps) class LogSoftmaxTest(test_lib.TestCase, parameterized.TestCase): def _log_softmax(self, x): assert len(x.shape) == 2 m = x.max(1)[:, np.newaxis] u = x - m return u - np.log(np.sum(np.exp(u), 1, keepdims=True)) @test_util.run_in_graph_and_eager_modes def testLogSoftmax(self): x_shape = [5, 10] x_np = np.random.randn(*x_shape).astype(np.float32) y_np = self._log_softmax(x_np) x_tf = constant_op.constant(x_np) y_tf = nn_ops.log_softmax_v2(x_tf) y_tf_np = self.evaluate(y_tf) eps = 1e-3 self.assertAllClose(y_tf_np, y_np, eps) def testLogSoftmaxAxes(self): arr = np.linspace(0., 1, 12).reshape(3, 4) x_neg_axis = nn_ops.log_softmax_v2(arr, axis=-2) y_pos_axis = nn_ops.log_softmax_v2(arr, axis=0) z_gt_axis = nn_ops.log_softmax_v2(arr, axis=0) x_neg_axis_tf = self.evaluate(x_neg_axis) y_pos_axis_tf = self.evaluate(y_pos_axis) z_gt_axis_tf = self.evaluate(z_gt_axis) eps = 1e-3 self.assertAllClose(x_neg_axis_tf, y_pos_axis_tf, eps) self.assertAllClose(y_pos_axis_tf, z_gt_axis_tf, eps) @parameterized.parameters(((5, 10),), ((2, 3, 4),)) @test_util.run_deprecated_v1 def testGradient(self, x_shape): x_np = np.random.randn(*x_shape).astype(np.float64) with self.cached_session(): x_tf = constant_op.constant(x_np) y_tf = nn_ops.log_softmax_v2(x_tf) err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf, x_shape) eps = 1e-7 self.assertLess(err, eps) class L2LossTest(test_lib.TestCase): @test_util.run_in_graph_and_eager_modes def testL2Loss(self): for dtype in [dtypes.float32, dtypes.float64]: x = constant_op.constant( [1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="x", dtype=dtype) l2loss = nn_ops.l2_loss(x) value = self.evaluate(l2loss) self.assertAllClose(7.0, value) @test_util.run_deprecated_v1 def testGradient(self): x_shape = [20, 7, 3] np.random.seed(1) # Make it reproducible. x_val = np.random.random_sample(x_shape).astype(np.float64) with self.cached_session(): x = constant_op.constant(x_val, name="x") output = nn_ops.l2_loss(x) err = gradient_checker.compute_gradient_error(x, x_shape, output, [1]) print("L2Loss gradient err = %g " % err) err_tolerance = 1e-10 self.assertLess(err, err_tolerance) class L2NormalizeTest(test_lib.TestCase): def _l2Normalize(self, x, dim): if isinstance(dim, list): norm = np.linalg.norm(x, axis=tuple(dim)) for d in dim: norm = np.expand_dims(norm, d) return x / norm else: norm = np.apply_along_axis(np.linalg.norm, dim, x) return x / np.expand_dims(norm, dim) @test_util.run_in_graph_and_eager_modes def testL2Normalize(self): x_shape = [20, 7, 3] np.random.seed(1) x_np = np.random.random_sample(x_shape).astype(np.float32) for dim in range(len(x_shape)): y_np = self._l2Normalize(x_np, dim) x_tf = constant_op.constant(x_np, name="x") y_tf = nn_impl.l2_normalize_v2(x_tf, dim) self.assertAllClose(y_np, self.evaluate(y_tf)) @test_util.run_in_graph_and_eager_modes def testL2NormalizeDimArray(self): x_shape = [20, 7, 3] np.random.seed(1) x_np = np.random.random_sample(x_shape).astype(np.float32) dim = [1, 2] y_np = self._l2Normalize(x_np, dim) x_tf = constant_op.constant(x_np, name="x") y_tf = nn_impl.l2_normalize_v2(x_tf, dim) self.assertAllClose(y_np, self.evaluate(y_tf)) @test_util.run_deprecated_v1 def testL2NormalizeGradient(self): x_shape = [20, 7, 3] np.random.seed(1) x_np = np.random.random_sample(x_shape).astype(np.float64) for dim in range(len(x_shape)): with self.cached_session(): x_tf = constant_op.constant(x_np, name="x") y_tf = nn_impl.l2_normalize_v2(x_tf, dim) err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf, x_shape) print("L2Normalize gradient err = %g " % err) self.assertLess(err, 1e-4) class DropoutTest(test_lib.TestCase): def testDropout(self): # Runs dropout with 0-1 tensor 10 times, sum the number of ones and validate # that it is producing approximately the right number of ones over a large # number of samples, based on the keep probability. x_dim = 40 y_dim = 30 num_iter = 10 for keep_prob in [0.1, 0.5, 0.8]: t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32) dropout = nn_ops.dropout(t, keep_prob) final_count = 0 self.assertEqual([x_dim, y_dim], dropout.get_shape()) for _ in xrange(0, num_iter): value = self.evaluate(dropout) final_count += np.count_nonzero(value) # Verifies that there are only two values: 0 and 1/keep_prob. sorted_value = np.unique(np.sort(value)) self.assertEqual(0, sorted_value[0]) self.assertAllClose(1 / keep_prob, sorted_value[1]) # Check that we are in the 15% error range expected_count = x_dim * y_dim * keep_prob * num_iter rel_error = math.fabs(final_count - expected_count) / expected_count print(rel_error) self.assertTrue(rel_error < 0.15) def testShapedDropout(self): # Runs dropout with 0-1 tensor 10 times, sum the number of ones and validate # that it is producing approximately the right number of ones over a large # number of samples, based on the keep probability. This time with shaped # noise. x_dim = 40 * 30 y_dim = 3 num_iter = 10 for keep_prob in [0.1, 0.5, 0.8]: t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32) dropout = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, 1]) self.assertEqual([x_dim, y_dim], dropout.get_shape()) final_count = 0 for _ in xrange(0, num_iter): value = self.evaluate(dropout) final_count += np.count_nonzero(value) # Verifies that there are only two values: 0 and 1/keep_prob. sorted_value = np.unique(np.sort(value)) self.assertEqual(0, sorted_value[0]) self.assertAllClose(1 / keep_prob, sorted_value[1]) # Check that we are in the 15% error range expected_count = x_dim * y_dim * keep_prob * num_iter rel_error = math.fabs(final_count - expected_count) / expected_count print(rel_error) self.assertTrue(rel_error < 0.15) def testShapedDropoutCorrelation(self): # Runs a shaped dropout and tests that the correlations are correct. x_dim = 40 y_dim = 30 num_iter = 10 for keep_prob in [0.1, 0.5, 0.8]: t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32) dropout = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, 1]) self.assertEqual([x_dim, y_dim], dropout.get_shape()) for _ in xrange(0, num_iter): value = self.evaluate(dropout) # Verifies that each y column as only one type of activation. for i in xrange(x_dim): sorted_value = np.unique(np.sort(value[i, :])) self.assertEqual(sorted_value.size, 1) @test_util.run_deprecated_v1 def testDropoutPlaceholderKeepProb(self): # Runs dropout with 0-1 tensor 10 times, sum the number of ones and validate # that it is producing approximately the right number of ones over a large # number of samples, based on the keep probability. x_dim = 40 y_dim = 30 num_iter = 10 for keep_prob in [0.1, 0.5, 0.8]: with self.cached_session(): t = constant_op.constant( 1.0, shape=[x_dim, y_dim], dtype=dtypes.float32) keep_prob_placeholder = array_ops.placeholder(dtypes.float32) dropout = nn_ops.dropout(t, keep_prob_placeholder) final_count = 0 self.assertEqual([x_dim, y_dim], dropout.get_shape()) for _ in xrange(0, num_iter): value = dropout.eval(feed_dict={keep_prob_placeholder: keep_prob}) final_count += np.count_nonzero(value) # Verifies that there are only two values: 0 and 1/keep_prob. sorted_value = np.unique(np.sort(value)) self.assertEqual(0, sorted_value[0]) self.assertAllClose(1 / keep_prob, sorted_value[1]) # Check that we are in the 15% error range expected_count = x_dim * y_dim * keep_prob * num_iter rel_error = math.fabs(final_count - expected_count) / expected_count print(rel_error) self.assertTrue(rel_error < 0.15) @test_util.run_deprecated_v1 def testShapedDropoutUnknownShape(self): x_dim = 40 y_dim = 30 keep_prob = 0.5 x = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32) dropout_x = nn_ops.dropout( x, keep_prob, noise_shape=array_ops.placeholder(dtypes.int32)) self.assertEqual(x.get_shape(), dropout_x.get_shape()) def testPartialShapedDropout(self): x_dim = 40 * 30 y_dim = 3 num_iter = 10 for keep_prob in [0.1, 0.5, 0.8]: t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32) # Set noise_shape=[None, 1] which means [x_dim, 1]. dropout = nn_ops.dropout(t, keep_prob, noise_shape=[None, 1]) self.assertEqual([x_dim, y_dim], dropout.get_shape()) final_count = 0 for _ in xrange(0, num_iter): value = self.evaluate(dropout) final_count += np.count_nonzero(value) # Verifies that there are only two values: 0 and 1/keep_prob. sorted_value = np.unique(np.sort(value)) self.assertEqual(0, sorted_value[0]) self.assertAllClose(1 / keep_prob, sorted_value[1]) # Check that we are in the 15% error range expected_count = x_dim * y_dim * keep_prob * num_iter rel_error = math.fabs(final_count - expected_count) / expected_count print(rel_error) self.assertTrue(rel_error < 0.15) @test_util.run_deprecated_v1 def testInvalidKeepProb(self): x_dim = 40 y_dim = 30 t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32) with self.assertRaises(ValueError): nn_ops.dropout(t, -1.0) with self.assertRaises(ValueError): nn_ops.dropout(t, 1.1) with self.assertRaises(ValueError): nn_ops.dropout(t, [0.0, 1.0]) with self.assertRaises(ValueError): nn_ops.dropout(t, array_ops.placeholder(dtypes.float64)) with self.assertRaises(ValueError): nn_ops.dropout(t, array_ops.placeholder(dtypes.float32, shape=[2])) @test_util.run_deprecated_v1 def testInvalidRate(self): x_dim = 40 y_dim = 30 t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32) with self.assertRaises(ValueError): nn_ops.dropout_v2(t, -1.0) with self.assertRaises(ValueError): nn_ops.dropout_v2(t, 1.1) with self.assertRaises(ValueError): nn_ops.dropout_v2(t, [0.0, 1.0]) def testLargeRate(self): x_dim = 40 y_dim = 30 t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32) _ = nn_ops.dropout_v2(t, 0.9) @test_util.run_deprecated_v1 def testShapedDropoutShapeError(self): # Runs shaped dropout and verifies an error is thrown on misshapen noise. x_dim = 40 y_dim = 30 keep_prob = 0.5 t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32) with self.assertRaises(ValueError): _ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, y_dim + 10]) with self.assertRaises(ValueError): _ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, y_dim, 5]) with self.assertRaises(ValueError): _ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim + 3]) with self.assertRaises(ValueError): _ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim]) # test that broadcasting proceeds _ = nn_ops.dropout(t, keep_prob, noise_shape=[y_dim]) _ = nn_ops.dropout(t, keep_prob, noise_shape=[1, y_dim]) _ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, 1]) _ = nn_ops.dropout(t, keep_prob, noise_shape=[1, 1]) def testNoDropoutFast(self): x = array_ops.zeros((5,)) y = nn_ops.dropout(x, keep_prob=1) self.assertTrue(x is y) y = nn_ops.dropout_v2(x, rate=0) self.assertTrue(x is y) def testDropoutWithIntegerInputs(self): x = constant_op.constant([1, 1, 1, 1, 1]) with self.assertRaises(ValueError): _ = nn_ops.dropout(x, 0.5) class ComputeSampledLogitsTest(test_lib.TestCase): def setUp(self): self._eps = 1e-3 def _GenerateTestData(self, num_classes, dim, batch_size, num_true, labels, sampled, subtract_log_q): """Randomly generates input/output data for a single test case. This function returns numpy constants for use in a test case. Args: num_classes: An int. The number of embedding classes in the test case. dim: An int. The dimension of the embedding. batch_size: An int. The batch size. num_true: An int. The number of target classes per training example. labels: A list of batch_size * num_true ints. The target classes. sampled: A list of indices in [0, num_classes). subtract_log_q: A bool corresponding to the parameter in _compute_sampled_logits(). Returns: weights: Embedding weights to use as test input. It is a numpy array of shape [num_classes, dim] biases: Embedding biases to use as test input. It is a numpy array of shape [num_classes]. hidden_acts: Forward activations of the network to use as test input. It is a numpy array of shape [batch_size, dim]. sampled_vals: A tuple based on `sampled` to use as test input in the format returned by a *_candidate_sampler function. exp_logits: The output logits expected from _compute_sampled_logits(). It is a numpy array of shape [batch_size, num_true + len(sampled)]. exp_labels: The output labels expected from _compute_sampled_logits(). It is a numpy array of shape [batch_size, num_true + len(sampled)]. """ weights = np.random.randn(num_classes, dim).astype(np.float32) biases = np.random.randn(num_classes).astype(np.float32) hidden_acts = np.random.randn(batch_size, dim).astype(np.float32) true_exp = np.full([batch_size, 1], fill_value=0.5, dtype=np.float32) sampled_exp = np.full([len(sampled)], fill_value=0.5, dtype=np.float32) sampled_vals = (sampled, true_exp, sampled_exp) sampled_w, sampled_b = weights[sampled], biases[sampled] true_w, true_b = weights[labels], biases[labels] true_logits = np.sum( hidden_acts.reshape((batch_size, 1, dim)) * true_w.reshape( (batch_size, num_true, dim)), axis=2) true_b = true_b.reshape((batch_size, num_true)) true_logits += true_b sampled_logits = np.dot(hidden_acts, sampled_w.T) + sampled_b if subtract_log_q: true_logits -= np.log(true_exp) sampled_logits -= np.log(sampled_exp[np.newaxis, :]) exp_logits = np.concatenate([true_logits, sampled_logits], axis=1) exp_labels = np.hstack((np.ones_like(true_logits) / num_true, np.zeros_like(sampled_logits))) return weights, biases, hidden_acts, sampled_vals, exp_logits, exp_labels def _ShardTestEmbeddings(self, weights, biases, num_shards): """Shards the weights and biases returned by _GenerateTestData. Args: weights: The weights returned by _GenerateTestData. biases: The biases returned by _GenerateTestData. num_shards: The number of shards to create. Returns: sharded_weights: A list of size `num_shards` containing all the weights. sharded_biases: A list of size `num_shards` containing all the biases. """ with ops.Graph().as_default() as g: sharded_weights = variable_scope.get_variable( "w", partitioner=partitioned_variables.fixed_size_partitioner(num_shards), initializer=constant_op.constant(weights)) sharded_biases = variable_scope.get_variable( "b", partitioner=partitioned_variables.fixed_size_partitioner(num_shards), initializer=constant_op.constant(biases)) with self.session(graph=g) as sess: variables.global_variables_initializer().run() return self.evaluate([list(sharded_weights), list(sharded_biases)]) def testShapes(self): np.random.seed(0) num_classes = 5 batch_size = 3 for num_true in range(1, 5): labels = np.random.randint( low=0, high=num_classes, size=batch_size * num_true) (weights, biases, hidden_acts, sampled_vals, exp_logits, exp_labels) = self._GenerateTestData( num_classes=num_classes, dim=10, batch_size=batch_size, num_true=num_true, labels=labels, sampled=[1, 0, 2, 3], subtract_log_q=False) logits_tensor, labels_tensor = _compute_sampled_logits( weights=constant_op.constant(weights), biases=constant_op.constant(biases), labels=constant_op.constant( labels, dtype=dtypes.int64, shape=(batch_size, num_true)), inputs=constant_op.constant(hidden_acts), num_sampled=4, num_classes=num_classes, num_true=num_true, sampled_values=sampled_vals, subtract_log_q=False, remove_accidental_hits=False, partition_strategy="div", name="sampled_logits_basic_num_true_%d" % num_true) got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor]) self.assertEqual(exp_logits.shape, got_logits.shape, self._eps) self.assertEqual(exp_labels.shape, got_labels.shape, self._eps) def testBasic(self): """Without accidental hit removal or subtract_log_q.""" np.random.seed(0) num_classes = 5 batch_size = 3 for num_true in range(1, 5): labels = np.random.randint( low=0, high=num_classes, size=batch_size * num_true) (weights, biases, hidden_acts, sampled_vals, exp_logits, exp_labels) = self._GenerateTestData( num_classes=num_classes, dim=10, batch_size=batch_size, num_true=num_true, labels=labels, sampled=[1, 0, 2, 3], subtract_log_q=False) logits_tensor, labels_tensor = _compute_sampled_logits( weights=constant_op.constant(weights), biases=constant_op.constant(biases), labels=constant_op.constant( labels, dtype=dtypes.int64, shape=(batch_size, num_true)), inputs=constant_op.constant(hidden_acts), num_sampled=4, num_classes=num_classes, num_true=num_true, sampled_values=sampled_vals, subtract_log_q=False, remove_accidental_hits=False, partition_strategy="div", name="sampled_logits_basic_num_true_%d" % num_true) got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor]) self.assertAllClose(exp_logits, got_logits, self._eps) self.assertAllClose(exp_labels, got_labels, self._eps) def testAccidentalHitRemoval(self): """With accidental hit removal, no subtract_log_q.""" np.random.seed(0) num_classes = 5 batch_size = 3 sampled = [1, 0, 2, 3] for num_true in range(1, 5): labels = np.random.randint( low=0, high=num_classes, size=batch_size * num_true) (weights, biases, hidden_acts, sampled_vals, _, _) = self._GenerateTestData( num_classes=num_classes, dim=10, batch_size=batch_size, num_true=num_true, labels=labels, sampled=sampled, subtract_log_q=False) logits_tensor, _ = _compute_sampled_logits( weights=constant_op.constant(weights), biases=constant_op.constant(biases), labels=constant_op.constant( labels, dtype=dtypes.int64, shape=(batch_size, num_true)), inputs=constant_op.constant(hidden_acts), num_sampled=len(sampled), num_classes=num_classes, num_true=num_true, sampled_values=sampled_vals, subtract_log_q=False, remove_accidental_hits=True, partition_strategy="div", name="sampled_logits_accidental_hit_removal_num_true_%d" % num_true) # Test that the exponentiated logits of accidental hits are near 0. # First we need to find the hits in this random test run: labels_reshape = labels.reshape((batch_size, num_true)) got_logits = self.evaluate(logits_tensor) for row in xrange(batch_size): row_labels = labels_reshape[row, :] for col in xrange(len(sampled)): if sampled[col] in row_labels: # We need to add the num_true_test offset into logits_* self.assertNear( np.exp(got_logits[row, col + num_true]), 0., self._eps) def testSubtractLogQ(self): """With subtract_log_q, no accidental hit removal.""" np.random.seed(0) num_classes = 5 batch_size = 3 for num_true in range(1, 5): labels = np.random.randint( low=0, high=num_classes, size=batch_size * num_true) (weights, biases, hidden_acts, sampled_vals, exp_logits, exp_labels) = self._GenerateTestData( num_classes=num_classes, dim=10, batch_size=batch_size, num_true=num_true, labels=labels, sampled=[1, 0, 2, 3], subtract_log_q=True) logits_tensor, labels_tensor = _compute_sampled_logits( weights=constant_op.constant(weights), biases=constant_op.constant(biases), labels=constant_op.constant( labels, dtype=dtypes.int64, shape=(batch_size, num_true)), inputs=constant_op.constant(hidden_acts), num_sampled=4, num_classes=num_classes, num_true=num_true, sampled_values=sampled_vals, subtract_log_q=True, remove_accidental_hits=False, partition_strategy="div", name="sampled_logits_subtract_log_q_num_true_%d" % num_true) got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor]) self.assertAllClose(exp_logits, got_logits, self._eps) self.assertAllClose(exp_labels, got_labels, self._eps) def testSharded(self): """With sharded weights and sharded biases.""" np.random.seed(0) num_classes = 5 batch_size = 3 for num_true in range(1, 5): labels = np.random.randint( low=0, high=num_classes, size=batch_size * num_true) (weights, biases, hidden_acts, sampled_vals, exp_logits, exp_labels) = self._GenerateTestData( num_classes=num_classes, dim=10, batch_size=batch_size, num_true=num_true, labels=labels, sampled=[1, 0, 2, 3], subtract_log_q=False) weight_shards, bias_shards = self._ShardTestEmbeddings( weights, biases, num_shards=3) logits_tensor, labels_tensor = _compute_sampled_logits( weights=[constant_op.constant(shard) for shard in weight_shards], biases=[constant_op.constant(shard) for shard in bias_shards], labels=constant_op.constant( labels, dtype=dtypes.int64, shape=(batch_size, num_true)), inputs=constant_op.constant(hidden_acts), num_sampled=4, num_classes=num_classes, num_true=num_true, sampled_values=sampled_vals, subtract_log_q=False, remove_accidental_hits=False, partition_strategy="div", name="sampled_logits_sharded_num_true_%d" % num_true) got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor]) self.assertAllClose(exp_logits, got_logits, self._eps) self.assertAllClose(exp_labels, got_labels, self._eps) def testNCELoss(self): # A simple test to verify the numerics. def _SigmoidCrossEntropyWithLogits(logits, targets): # logits, targets: float arrays of the same shape. assert logits.shape == targets.shape pred = 1. / (1. + np.exp(-logits)) eps = 0.0001 pred = np.minimum(np.maximum(pred, eps), 1 - eps) return -targets * np.log(pred) - (1. - targets) * np.log(1. - pred) np.random.seed(0) num_classes = 5 batch_size = 3 labels = [0, 1, 2] (weights, biases, hidden_acts, sampled_vals, exp_logits, exp_labels) = self._GenerateTestData( num_classes=num_classes, dim=10, batch_size=batch_size, num_true=1, labels=labels, sampled=[1, 0, 2, 3], subtract_log_q=True) exp_nce_loss = np.sum( _SigmoidCrossEntropyWithLogits(exp_logits, exp_labels), 1) got_nce_loss = nn_impl.nce_loss_v2( weights=constant_op.constant(weights), biases=constant_op.constant(biases), labels=constant_op.constant(labels, shape=(batch_size, 1)), inputs=constant_op.constant(hidden_acts), num_sampled=4, num_classes=num_classes, num_true=1, sampled_values=sampled_vals) self.assertAllClose(exp_nce_loss, self.evaluate(got_nce_loss), 1e-4) # Test with sharded weights and sharded biases. weight_shards, bias_shards = self._ShardTestEmbeddings( weights, biases, num_shards=3) got_nce_loss = nn_impl.nce_loss_v2( weights=[constant_op.constant(shard) for shard in weight_shards], biases=[constant_op.constant(shard) for shard in bias_shards], labels=constant_op.constant(labels, shape=(batch_size, 1)), inputs=constant_op.constant(hidden_acts), num_sampled=4, num_classes=num_classes, num_true=1, sampled_values=sampled_vals) self.assertAllClose(exp_nce_loss, self.evaluate(got_nce_loss), 1e-4) def testSampledSoftmaxLoss(self): # A simple test to verify the numerics. def _SoftmaxCrossEntropyWithLogits(logits, targets): # logits, targets: float arrays of the same shape. assert logits.shape == targets.shape stable_exp_logits = np.exp( logits - np.amax(logits, axis=1, keepdims=True)) pred = stable_exp_logits / np.sum(stable_exp_logits, 1, keepdims=True) return -np.sum(targets * np.log(pred + 1.0e-20), axis=1) np.random.seed(0) num_classes = 5 batch_size = 3 labels = [0, 1, 2] (weights, biases, hidden_acts, sampled_vals, exp_logits, exp_labels) = self._GenerateTestData( num_classes=num_classes, dim=10, batch_size=batch_size, num_true=1, labels=labels, sampled=[1, 0, 2, 3], subtract_log_q=True) exp_sampled_softmax_loss = _SoftmaxCrossEntropyWithLogits( exp_logits, exp_labels) got_sampled_softmax_loss = nn_impl.sampled_softmax_loss_v2( weights=constant_op.constant(weights), biases=constant_op.constant(biases), labels=constant_op.constant(labels, shape=(batch_size, 1)), inputs=constant_op.constant(hidden_acts), num_sampled=4, num_classes=num_classes, num_true=1, sampled_values=sampled_vals, remove_accidental_hits=False) self.assertAllClose(exp_sampled_softmax_loss, self.evaluate(got_sampled_softmax_loss), 1e-4) # Test with sharded weights and sharded biases. weight_shards, bias_shards = self._ShardTestEmbeddings( weights, biases, num_shards=3) got_sampled_softmax_loss = nn_impl.sampled_softmax_loss_v2( weights=[constant_op.constant(shard) for shard in weight_shards], biases=[constant_op.constant(shard) for shard in bias_shards], labels=constant_op.constant(labels, shape=(batch_size, 1)), inputs=constant_op.constant(hidden_acts), num_sampled=4, num_classes=num_classes, num_true=1, sampled_values=sampled_vals, remove_accidental_hits=False) self.assertAllClose(exp_sampled_softmax_loss, self.evaluate(got_sampled_softmax_loss), 1e-4) def testSampledSoftmaxLossBf16(self): # A simple test to verify the numerics for bfloat16. def _SoftmaxCrossEntropyWithLogits(logits, targets): # logits, targets: float arrays of the same shape. assert logits.shape == targets.shape stable_exp_logits = np.exp( logits - np.amax(logits, axis=1, keepdims=True)) pred = stable_exp_logits / np.sum(stable_exp_logits, 1, keepdims=True) return -np.sum(targets * np.log(pred + 1.0e-20), axis=1) np.random.seed(0) num_classes = 5 batch_size = 3 labels = [0, 1, 2] sampled = [1, 0, 2, 3] (weights, biases, hidden_acts, _, exp_logits, exp_labels) = self._GenerateTestData( num_classes=num_classes, dim=10, batch_size=batch_size, num_true=1, labels=labels, sampled=sampled, subtract_log_q=True) exp_sampled_softmax_loss = _SoftmaxCrossEntropyWithLogits( exp_logits, exp_labels) true_exp_bf16 = np.full([batch_size, 1], fill_value=0.5, dtype=dtypes.bfloat16.as_numpy_dtype) sampled_exp_bf16 = np.full([len(sampled)], fill_value=0.5, dtype=dtypes.bfloat16.as_numpy_dtype) sampled_vals_bf16 = (sampled, true_exp_bf16, sampled_exp_bf16) got_sampled_softmax_loss = math_ops.cast( nn_impl.sampled_softmax_loss_v2( weights=constant_op.constant(weights, dtype=dtypes.bfloat16), biases=constant_op.constant(biases, dtype=dtypes.bfloat16), labels=constant_op.constant( labels, shape=(batch_size, 1), dtype=dtypes.bfloat16), inputs=constant_op.constant(hidden_acts, dtype=dtypes.bfloat16), num_sampled=4, num_classes=num_classes, num_true=1, sampled_values=sampled_vals_bf16, remove_accidental_hits=False), dtypes.float32) self.assertAllClose(exp_sampled_softmax_loss, self.evaluate(got_sampled_softmax_loss), 1e-1) class CReluTest(test_lib.TestCase): def test(self): np.random.seed(1) # Make it reproducible. x = np.random.randn(3, 4).astype(np.float32) y = np.concatenate([x * (x > 0), -x * (x < 0)], axis=1) z = self.evaluate(nn_ops.crelu(constant_op.constant(x))) self.assertAllClose(y, z, 1e-4) class ReluTest(test_lib.TestCase): def test(self): np.random.seed(1) # Make it reproducible. x = np.random.randn(3, 4).astype(np.float32) y = np.maximum(x, 0.0) z = self.evaluate(nn_ops.relu(constant_op.constant(x))) self.assertAllEqual(y, z) @test_util.run_deprecated_v1 def testNaNs(self): # Test that relu(nan) = nan for various sizes. for i in range(18): x = np.zeros(i) + np.nan with self.cached_session(): z = nn_ops.relu(constant_op.constant(x)).eval() self.assertTrue(np.isnan(z).all()) class LeakyReluTest(test_lib.TestCase): def testRange(self): batch_size = 3 height, width = 4, 4 np.random.seed(1) # Make it reproducible. inputs = np.random.uniform(size=(batch_size, height, width, 3)).astype( np.float32) inputs = constant_op.constant(inputs) outputs = nn_ops.leaky_relu(inputs) self.assertEquals(inputs.shape, outputs.shape) inputs, outputs = self.evaluate([inputs, outputs]) self.assertGreaterEqual(outputs.min(), 0.0) self.assertLessEqual(outputs.max(), 1.0) self.assertAllClose(inputs, outputs) @test_util.run_deprecated_v1 def testValues(self): for dtype in [np.int32, np.int64, np.float16, np.float32, np.float64]: np_values = np.array([-2, -1, 0, 1, 2], dtype=dtype) outputs = nn_ops.leaky_relu(constant_op.constant(np_values)) outputs = self.evaluate(outputs) tol = 2e-3 if dtype == np.float16 else 1e-6 self.assertAllClose( outputs, [-0.4, -0.2, 0.0, 1.0, 2.0], rtol=tol, atol=tol) @test_util.run_deprecated_v1 def testName(self): np_values = np.array([-2, -1, 0, 1, 2], dtype=np.float64) outputs_with_name_set = nn_ops.leaky_relu( constant_op.constant(np_values), name='test_relu_op') self.assertEqual(outputs_with_name_set.name, 'test_relu_op:0') outputs_without_name_set = nn_ops.leaky_relu( constant_op.constant(np_values)) self.assertEqual(outputs_without_name_set.name, 'LeakyRelu:0') class SwishTest(test_lib.TestCase): @test_util.run_deprecated_v1 def testValues(self): np_values = np.array( [np.linspace(-7.0, 0.0, 100), np.linspace(0.0, 7.0, 100)], dtype=np.float32) tf_values = constant_op.constant(np_values) actual_tf_outputs = nn_impl.swish(tf_values) expected_tf_outputs = tf_values * math_ops.sigmoid(tf_values) actual_outputs, expected_outputs = self.evaluate( [actual_tf_outputs, expected_tf_outputs]) self.assertAllClose(actual_outputs, expected_outputs) @test_util.run_deprecated_v1 def testGradients(self): shape = [5, 3, 4] sigma = 5 input_values = np.random.randn(*shape) * sigma x_tf = constant_op.constant(input_values) y_tf = nn_impl.swish(x_tf) with self.cached_session(): err = gradient_checker.compute_gradient_error(x_tf, shape, y_tf, shape) self.assertLess(err, 1e-4) class MomentsTest(test_lib.TestCase): def doOutputTest(self, input_shape, moments_axes, tol=1e-4, check_gradients=False): for mu in [0.0, 1.0, 1e3]: for sigma in [1.0, 0.1]: for keep_dims in [True, False]: input_values = np.random.rand(*input_shape) * sigma + mu expected_mean = np.mean( input_values, axis=moments_axes, keepdims=keep_dims) expected_var = np.var( input_values, axis=moments_axes, keepdims=keep_dims) with ops.Graph().as_default() as g: with self.session(graph=g) as sess: inputs = constant_op.constant( input_values, shape=input_shape, dtype=dtypes.float32) mean, variance = nn_impl.moments_v2( inputs, moments_axes, keepdims=keep_dims) if check_gradients: err = gradient_checker.compute_gradient_error( inputs, input_shape, mean, mean.shape.as_list()) self.assertLess(err, 1e-3) err = gradient_checker.compute_gradient_error( inputs, input_shape, variance, variance.shape.as_list()) self.assertLess(err, 1e-3) # Evaluate. [mean, variance] = self.evaluate([mean, variance]) # Make sure that there are no NaNs self.assertFalse(np.isnan(mean).any()) self.assertFalse(np.isnan(variance).any()) self.assertAllClose(mean, expected_mean, rtol=tol, atol=tol) self.assertAllClose(variance, expected_var, rtol=tol, atol=tol) def testOutputAndGradient2DInput0(self): self.doOutputTest((10, 10), (0,), check_gradients=True) def testOutputAndGradient2DInput01(self): self.doOutputTest((10, 10), (0, 1), check_gradients=True) def testOutput2DInput0(self): self.doOutputTest((10, 300), (0,)) def testOutput2DInput1(self): self.doOutputTest((10, 300), (1,)) def testOutput2DInput01(self): self.doOutputTest((10, 300), (0, 1)) def testOutput4DInput0(self): self.doOutputTest((10, 10, 10, 30), (0,)) def testOutput4DInput1(self): self.doOutputTest((10, 10, 10, 30), (1,)) def testOutput4DInput3(self): self.doOutputTest((10, 10, 10, 30), (3,)) def testOutput4DInput012(self): self.doOutputTest((10, 10, 10, 30), (0, 1, 2)) def testOutput4DInput123(self): self.doOutputTest((10, 10, 10, 30), (1, 2, 3)) class DataFormatDimMapTest(test_lib.TestCase): def _test(self, x_val, y_val_expected): x = constant_op.constant(x_val) y = nn_ops.data_format_dim_map(x) y_val = self.evaluate(y) self.assertAllEqual(y_val, y_val_expected) def test(self): self._test(0, 0) self._test(1, 2) self._test(2, 3) self._test(3, 1) self._test(-1, 1) self._test(-2, 3) self._test(-3, 2) self._test(-4, 0) self._test([1, 3], [2, 1]) self._test([1, 3, -2], [2, 1, 3]) self._test([1, -3, -2], [2, 2, 3]) self._test([[1, -3], [1, -1]], [[2, 2], [2, 1]]) def testNHWCtoNCHW(self): x_val = [1, -3, -2] y_val_expected = [2, 2, 3] x = constant_op.constant(x_val) y = nn_ops.data_format_dim_map(x, src_format="NHWC", dst_format="NCHW") with test_util.use_gpu(): y_val = self.evaluate(y) self.assertAllEqual(y_val, y_val_expected) def testNHWCtoHWNC(self): x_val = [-4, -3, -2, -1, 0, 1, 2, 3] y_val_expected = [2, 0, 1, 3, 2, 0, 1, 3] x = constant_op.constant(x_val) y = nn_ops.data_format_dim_map(x, src_format="NHWC", dst_format="HWNC") with test_util.use_gpu(): y_val = self.evaluate(y) self.assertAllEqual(y_val, y_val_expected) def testNHWCtoWHCN(self): x_val = [-4, -3, -2, -1, 0, 1, 2, 3] y_val_expected = [3, 1, 0, 2, 3, 1, 0, 2] x = constant_op.constant(x_val) y = nn_ops.data_format_dim_map(x, src_format="NHWC", dst_format="WHCN") with test_util.use_gpu(): y_val = self.evaluate(y) self.assertAllEqual(y_val, y_val_expected) def testArbitraryASCII(self): x_val = [-4, -3, -2, -1, 0, 1, 2, 3] y_val_expected = [3, 2, 1, 0, 3, 2, 1, 0] x = constant_op.constant(x_val) y = nn_ops.data_format_dim_map(x, src_format="qwer", dst_format="rewq") with test_util.use_gpu(): y_val = self.evaluate(y) self.assertAllEqual(y_val, y_val_expected) class DataFormatVectorPermuteTest(test_lib.TestCase): def testNHWCToNCHW(self): x_val = [7, 4, 9, 3] x = constant_op.constant(x_val) y = nn_ops.data_format_vec_permute(x) with test_util.use_gpu(): y_val = self.evaluate(y) self.assertAllEqual(y_val, [7, 3, 4, 9]) def testNCHWToNHWC(self): x_val = [7, 4, 9, 3] x = constant_op.constant(x_val) y = nn_ops.data_format_vec_permute(x, src_format="NCHW", dst_format="NHWC") with test_util.use_gpu(): y_val = self.evaluate(y) self.assertAllEqual(y_val, [7, 9, 3, 4]) def testNHWCToHWNC(self): x_val = [7, 4, 9, 3] x = constant_op.constant(x_val) y = nn_ops.data_format_vec_permute(x, src_format="NHWC", dst_format="HWNC") with test_util.use_gpu(): y_val = self.evaluate(y) self.assertAllEqual(y_val, [4, 9, 7, 3]) def testHWNCToNHWC(self): x_val = [7, 4, 9, 3] x = constant_op.constant(x_val) y = nn_ops.data_format_vec_permute(x, src_format="HWNC", dst_format="NHWC") with test_util.use_gpu(): y_val = self.evaluate(y) self.assertAllEqual(y_val, [9, 7, 4, 3]) def testNHWCToNCHW2D(self): x_val = [[7, 4], [9, 3], [4, 5], [5, 1]] x = constant_op.constant(x_val) y = nn_ops.data_format_vec_permute(x) with test_util.use_gpu(): y_val = self.evaluate(y) self.assertAllEqual(y_val, [[7, 4], [5, 1], [9, 3], [4, 5]]) def testNHWCToHWNC2D(self): x_val = [[7, 4], [9, 3], [4, 5], [5, 1]] x = constant_op.constant(x_val) y = nn_ops.data_format_vec_permute(x, src_format="NHWC", dst_format="HWNC") with test_util.use_gpu(): y_val = self.evaluate(y) self.assertAllEqual(y_val, [[9, 3], [4, 5], [7, 4], [5, 1]]) def testHWNCToNHWC2D(self): x_val = [[7, 4], [9, 3], [4, 5], [5, 1]] x = constant_op.constant(x_val) y = nn_ops.data_format_vec_permute(x, src_format="HWNC", dst_format="NHWC") with test_util.use_gpu(): y_val = self.evaluate(y) self.assertAllEqual(y_val, [[4, 5], [7, 4], [9, 3], [5, 1]]) def testNCHWToNHWC2D(self): x_val = [[7, 4], [9, 3], [4, 5], [5, 1]] x = constant_op.constant(x_val) y = nn_ops.data_format_vec_permute(x, src_format="NCHW", dst_format="NHWC") with test_util.use_gpu(): y_val = self.evaluate(y) self.assertAllEqual(y_val, [[7, 4], [4, 5], [5, 1], [9, 3]]) @test_util.run_all_in_graph_and_eager_modes class AvgPoolTest(test_lib.TestCase): def test1DTensor(self): x = array_ops.ones([3, 6, 5]) ksize = 2 strides = 2 y1 = nn_ops.avg_pool_v2(x, ksize, strides, "SAME") y2 = nn_ops.avg_pool1d(x, ksize, strides, "SAME") self.assertAllEqual(self.evaluate(y1), self.evaluate(y2)) def test1DNumpy(self): x = np.ones([3, 6, 5]) ksize = 2 strides = 2 y1 = nn_ops.avg_pool_v2(x, ksize, strides, "SAME") y2 = nn_ops.avg_pool1d(x, ksize, strides, "SAME") self.assertAllEqual(self.evaluate(y1), self.evaluate(y2)) def test2DTensor(self): x = array_ops.ones([3, 6, 6, 5]) ksize = 2 strides = 2 y1 = nn_ops.avg_pool_v2(x, ksize, strides, "SAME") y2 = nn_ops.avg_pool(x, ksize, strides, "SAME") self.assertAllEqual(self.evaluate(y1), self.evaluate(y2)) def test2DNumpy(self): x = np.ones([3, 6, 6, 5]) ksize = 2 strides = 2 y1 = nn_ops.avg_pool_v2(x, ksize, strides, "SAME") y2 = nn_ops.avg_pool(x, ksize, strides, "SAME") self.assertAllEqual(self.evaluate(y1), self.evaluate(y2)) def test3DTensor(self): x = array_ops.ones([3, 7, 6, 6, 5]) ksize = 2 strides = 2 y1 = nn_ops.avg_pool_v2(x, ksize, strides, "SAME") y2 = nn_ops.avg_pool3d(x, ksize, strides, "SAME") self.assertAllEqual(self.evaluate(y1), self.evaluate(y2)) def test3DNumpy(self): x = np.ones([3, 7, 6, 6, 5], dtype=np.float32) ksize = 2 strides = 2 y1 = nn_ops.avg_pool_v2(x, ksize, strides, "SAME") y2 = nn_ops.avg_pool3d(x, ksize, strides, "SAME") self.assertAllEqual(self.evaluate(y1), self.evaluate(y2)) @test_util.run_all_in_graph_and_eager_modes class MaxPoolTest(test_lib.TestCase): def test1DTensor(self): x = array_ops.ones([3, 6, 5]) ksize = 2 strides = 2 y1 = nn_ops.max_pool_v2(x, ksize, strides, "SAME") y2 = nn_ops.max_pool1d(x, ksize, strides, "SAME") self.assertAllEqual(self.evaluate(y1), self.evaluate(y2)) def test1DNumpy(self): x = np.ones([3, 6, 5]) ksize = 2 strides = 2 y1 = nn_ops.max_pool_v2(x, ksize, strides, "SAME") y2 = nn_ops.max_pool1d(x, ksize, strides, "SAME") self.assertAllEqual(self.evaluate(y1), self.evaluate(y2)) def test2DTensor(self): x = array_ops.ones([3, 6, 6, 5]) ksize = 2 strides = 2 y1 = nn_ops.max_pool_v2(x, ksize, strides, "SAME") y2 = nn_ops.max_pool(x, ksize, strides, "SAME") self.assertAllEqual(self.evaluate(y1), self.evaluate(y2)) def test2DNumpy(self): x = np.ones([3, 6, 6, 5]) ksize = 2 strides = 2 y1 = nn_ops.max_pool_v2(x, ksize, strides, "SAME") y2 = nn_ops.max_pool(x, ksize, strides, "SAME") self.assertAllEqual(self.evaluate(y1), self.evaluate(y2)) def test3DTensor(self): x = array_ops.ones([3, 7, 6, 6, 5]) ksize = 2 strides = 2 y1 = nn_ops.max_pool_v2(x, ksize, strides, "SAME") y2 = nn_ops.max_pool3d(x, ksize, strides, "SAME") self.assertAllEqual(self.evaluate(y1), self.evaluate(y2)) def test3DNumpy(self): x = np.ones([3, 7, 6, 6, 5], dtype=np.float32) ksize = 2 strides = 2 y1 = nn_ops.max_pool_v2(x, ksize, strides, "SAME") y2 = nn_ops.max_pool3d(x, ksize, strides, "SAME") self.assertAllEqual(self.evaluate(y1), self.evaluate(y2)) def testIncorrectSizeInputSmall(self): x = array_ops.ones([3, 4]) with self.assertRaisesRegex( ValueError, "Input tensor must be of rank 3, 4 or 5 but was 2."): nn_ops.max_pool_v2(x, 2, 2, "SAME") def testIncorrectSizeInput(self): x = array_ops.ones([3, 4, 1, 2, 1, 2]) with self.assertRaisesRegex( ValueError, "Input tensor must be of rank 3, 4 or 5 but was 6."): nn_ops.max_pool_v2(x, 2, 2, "SAME") @test_util.run_all_in_graph_and_eager_modes class ConvolutionTest(test_lib.TestCase): def testUnknownSize(self): x = tensor_spec.TensorSpec(None, dtypes.float32, name="x") k = np.ones([3, 6, 6, 5]) @def_function.function def F(value): return nn_ops.convolution(value, k, "SAME") F.get_concrete_function(x) class ConvTransposeTest(test_lib.TestCase): def test1D(self): t = array_ops.ones([2, 4, 3]) v = array_ops.ones([2, 5, 3]) strides = 2 y1 = nn_ops.conv1d_transpose(t, v, [2, 8, 5], strides) y2 = nn_ops.conv_transpose(t, v, [2, 8, 5], strides) self.assertAllEqual(self.evaluate(y1), self.evaluate(y2)) def test1DTensor(self): t = array_ops.ones([2, 4, 3]) v = array_ops.ones([2, 5, 3]) strides = 2 y1 = nn_ops.conv1d_transpose(t, v, [2, 8, 5], strides) y2 = nn_ops.conv_transpose(t, v, constant_op.constant([2, 8, 5]), strides) self.assertAllEqual(self.evaluate(y1), self.evaluate(y2)) def test2D(self): t = array_ops.ones([2, 4, 4, 3]) v = array_ops.ones([2, 2, 5, 3]) strides = 2 y1 = nn_ops.conv2d_transpose_v2(t, v, [2, 8, 8, 5], strides) y2 = nn_ops.conv_transpose(t, v, [2, 8, 8, 5], strides) self.assertAllEqual(self.evaluate(y1), self.evaluate(y2)) def test2DTensor(self): t = array_ops.ones([2, 4, 4, 3]) v = array_ops.ones([2, 2, 5, 3]) strides = 2 y1 = nn_ops.conv2d_transpose_v2(t, v, [2, 8, 8, 5], strides) y2 = nn_ops.conv_transpose(t, v, constant_op.constant([2, 8, 8, 5]), strides) self.assertAllEqual(self.evaluate(y1), self.evaluate(y2)) def test3D(self): t = array_ops.ones([2, 4, 4, 4, 3]) v = array_ops.ones([2, 2, 2, 5, 3]) strides = 2 y1 = nn_ops.conv3d_transpose_v2(t, v, [2, 8, 8, 8, 5], strides) y2 = nn_ops.conv_transpose(t, v, [2, 8, 8, 8, 5], strides) self.assertAllEqual(self.evaluate(y1), self.evaluate(y2)) def test3DTensor(self): t = array_ops.ones([2, 4, 4, 4, 3]) v = array_ops.ones([2, 2, 2, 5, 3]) strides = 2 y1 = nn_ops.conv3d_transpose_v2(t, v, [2, 8, 8, 8, 5], strides) y2 = nn_ops.conv_transpose(t, v, constant_op.constant([2, 8, 8, 8, 5]), strides) self.assertAllEqual(self.evaluate(y1), self.evaluate(y2)) def testIncorrectSizeInputSmall(self): with self.assertRaisesRegex( ValueError, "output_shape must be of length 3, 4 or 5 but was 2."): nn_ops.conv_transpose(None, 2, [2, 3], "SAME") def testIncorrectSizeInput(self): with self.assertRaisesRegex( ValueError, "output_shape must be of length 3, 4 or 5 but was 6."): nn_ops.conv_transpose(None, 2, [2, 3, 4, 2, 5, 1], "SAME") def testTensorsNoShape(self): with self.assertRaisesRegex( ValueError, "output_shape must be a tensor or sized collection."): nn_ops.conv_transpose(None, None, None, None) if __name__ == "__main__": test_lib.main()
tensorflow-master
tensorflow/python/ops/nn_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Module contains the implementation of RNN cell wrappers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import hashlib import numbers import sys import types as python_types import warnings from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.keras.utils import generic_utils from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import tensor_array_ops from tensorflow.python.util import nest class DropoutWrapperBase(object): """Operator adding dropout to inputs and outputs of the given cell.""" def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0, state_keep_prob=1.0, variational_recurrent=False, input_size=None, dtype=None, seed=None, dropout_state_filter_visitor=None, **kwargs): """Create a cell with added input, state, and/or output dropout. If `variational_recurrent` is set to `True` (**NOT** the default behavior), then the same dropout mask is applied at every step, as described in: [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks. Y. Gal, Z. Ghahramani](https://arxiv.org/abs/1512.05287). Otherwise a different dropout mask is applied at every time step. Note, by default (unless a custom `dropout_state_filter` is provided), the memory state (`c` component of any `LSTMStateTuple`) passing through a `DropoutWrapper` is never modified. This behavior is described in the above article. Args: cell: an RNNCell, a projection to output_size is added to it. input_keep_prob: unit Tensor or float between 0 and 1, input keep probability; if it is constant and 1, no input dropout will be added. output_keep_prob: unit Tensor or float between 0 and 1, output keep probability; if it is constant and 1, no output dropout will be added. state_keep_prob: unit Tensor or float between 0 and 1, output keep probability; if it is constant and 1, no output dropout will be added. State dropout is performed on the outgoing states of the cell. **Note** the state components to which dropout is applied when `state_keep_prob` is in `(0, 1)` are also determined by the argument `dropout_state_filter_visitor` (e.g. by default dropout is never applied to the `c` component of an `LSTMStateTuple`). variational_recurrent: Python bool. If `True`, then the same dropout pattern is applied across all time steps per run call. If this parameter is set, `input_size` **must** be provided. input_size: (optional) (possibly nested tuple of) `TensorShape` objects containing the depth(s) of the input tensors expected to be passed in to the `DropoutWrapper`. Required and used **iff** `variational_recurrent = True` and `input_keep_prob < 1`. dtype: (optional) The `dtype` of the input, state, and output tensors. Required and used **iff** `variational_recurrent = True`. seed: (optional) integer, the randomness seed. dropout_state_filter_visitor: (optional), default: (see below). Function that takes any hierarchical level of the state and returns a scalar or depth=1 structure of Python booleans describing which terms in the state should be dropped out. In addition, if the function returns `True`, dropout is applied across this sublevel. If the function returns `False`, dropout is not applied across this entire sublevel. Default behavior: perform dropout on all terms except the memory (`c`) state of `LSTMCellState` objects, and don't try to apply dropout to `TensorArray` objects: ``` def dropout_state_filter_visitor(s): if isinstance(s, LSTMCellState): # Never perform dropout on the c state. return LSTMCellState(c=False, h=True) elif isinstance(s, TensorArray): return False return True ``` **kwargs: dict of keyword arguments for base layer. Raises: TypeError: if `cell` is not an `RNNCell`, or `keep_state_fn` is provided but not `callable`. ValueError: if any of the keep_probs are not between 0 and 1. """ super(DropoutWrapperBase, self).__init__(cell, dtype=dtype, **kwargs) if (dropout_state_filter_visitor is not None and not callable(dropout_state_filter_visitor)): raise TypeError("dropout_state_filter_visitor must be callable") self._dropout_state_filter = ( dropout_state_filter_visitor or _default_dropout_state_filter_visitor) with ops.name_scope("DropoutWrapperInit"): def tensor_and_const_value(v): tensor_value = ops.convert_to_tensor(v) const_value = tensor_util.constant_value(tensor_value) return (tensor_value, const_value) for prob, attr in [(input_keep_prob, "input_keep_prob"), (state_keep_prob, "state_keep_prob"), (output_keep_prob, "output_keep_prob")]: tensor_prob, const_prob = tensor_and_const_value(prob) if const_prob is not None: if const_prob < 0 or const_prob > 1: raise ValueError("Parameter %s must be between 0 and 1: %d" % (attr, const_prob)) setattr(self, "_%s" % attr, float(const_prob)) else: setattr(self, "_%s" % attr, tensor_prob) # Set variational_recurrent, seed before running the code below self._variational_recurrent = variational_recurrent self._input_size = input_size self._seed = seed self._recurrent_input_noise = None self._recurrent_state_noise = None self._recurrent_output_noise = None if variational_recurrent: if dtype is None: raise ValueError( "When variational_recurrent=True, dtype must be provided") def convert_to_batch_shape(s): # Prepend a 1 for the batch dimension; for recurrent # variational dropout we use the same dropout mask for all # batch elements. return array_ops.concat(([1], tensor_shape.TensorShape(s).as_list()), 0) def batch_noise(s, inner_seed): shape = convert_to_batch_shape(s) return random_ops.random_uniform(shape, seed=inner_seed, dtype=dtype) if (not isinstance(self._input_keep_prob, numbers.Real) or self._input_keep_prob < 1.0): if input_size is None: raise ValueError( "When variational_recurrent=True and input_keep_prob < 1.0 or " "is unknown, input_size must be provided") self._recurrent_input_noise = _enumerated_map_structure_up_to( input_size, lambda i, s: batch_noise(s, inner_seed=self._gen_seed("input", i)), input_size) self._recurrent_state_noise = _enumerated_map_structure_up_to( cell.state_size, lambda i, s: batch_noise(s, inner_seed=self._gen_seed("state", i)), cell.state_size) self._recurrent_output_noise = _enumerated_map_structure_up_to( cell.output_size, lambda i, s: batch_noise(s, inner_seed=self._gen_seed("output", i)), cell.output_size) def _gen_seed(self, salt_prefix, index): if self._seed is None: return None salt = "%s_%d" % (salt_prefix, index) string = (str(self._seed) + salt).encode("utf-8") return int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF @property def wrapped_cell(self): return self.cell @property def state_size(self): return self.cell.state_size @property def output_size(self): return self.cell.output_size def zero_state(self, batch_size, dtype): with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]): return self.cell.zero_state(batch_size, dtype) def _variational_recurrent_dropout_value( self, unused_index, value, noise, keep_prob): """Performs dropout given the pre-calculated noise tensor.""" # uniform [keep_prob, 1.0 + keep_prob) random_tensor = keep_prob + noise # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob) binary_tensor = math_ops.floor(random_tensor) ret = math_ops.div(value, keep_prob) * binary_tensor ret.set_shape(value.get_shape()) return ret def _dropout(self, values, salt_prefix, recurrent_noise, keep_prob, shallow_filtered_substructure=None): """Decides whether to perform standard dropout or recurrent dropout.""" if shallow_filtered_substructure is None: # Put something so we traverse the entire structure; inside the # dropout function we check to see if leafs of this are bool or not. shallow_filtered_substructure = values if not self._variational_recurrent: def dropout(i, do_dropout, v): if not isinstance(do_dropout, bool) or do_dropout: return nn_ops.dropout_v2( v, rate=1. - keep_prob, seed=self._gen_seed(salt_prefix, i)) else: return v return _enumerated_map_structure_up_to( shallow_filtered_substructure, dropout, *[shallow_filtered_substructure, values]) else: def dropout(i, do_dropout, v, n): if not isinstance(do_dropout, bool) or do_dropout: return self._variational_recurrent_dropout_value(i, v, n, keep_prob) else: return v return _enumerated_map_structure_up_to( shallow_filtered_substructure, dropout, *[shallow_filtered_substructure, values, recurrent_noise]) def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs): """Runs the wrapped cell and applies dropout. Args: inputs: A tensor with wrapped cell's input. state: A tensor or tuple of tensors with wrapped cell's state. cell_call_fn: Wrapped cell's method to use for step computation (cell's `__call__` or 'call' method). **kwargs: Additional arguments. Returns: A pair containing: - Output: A tensor with cell's output. - New state: A tensor or tuple of tensors with new wrapped cell's state. """ def _should_dropout(p): return (not isinstance(p, float)) or p < 1 if _should_dropout(self._input_keep_prob): inputs = self._dropout(inputs, "input", self._recurrent_input_noise, self._input_keep_prob) output, new_state = cell_call_fn(inputs, state, **kwargs) if _should_dropout(self._state_keep_prob): # Identify which subsets of the state to perform dropout on and # which ones to keep. shallow_filtered_substructure = nest.get_traverse_shallow_structure( self._dropout_state_filter, new_state) new_state = self._dropout(new_state, "state", self._recurrent_state_noise, self._state_keep_prob, shallow_filtered_substructure) if _should_dropout(self._output_keep_prob): output = self._dropout(output, "output", self._recurrent_output_noise, self._output_keep_prob) return output, new_state def get_config(self): """Returns the config of the dropout wrapper.""" config = { "input_keep_prob": self._input_keep_prob, "output_keep_prob": self._output_keep_prob, "state_keep_prob": self._state_keep_prob, "variational_recurrent": self._variational_recurrent, "input_size": self._input_size, "seed": self._seed, } if self._dropout_state_filter != _default_dropout_state_filter_visitor: function, function_type, function_module = _serialize_function_to_config( self._dropout_state_filter) config.update({"dropout_fn": function, "dropout_fn_type": function_type, "dropout_fn_module": function_module}) base_config = super(DropoutWrapperBase, self).get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config, custom_objects=None): if "dropout_fn" in config: config = config.copy() dropout_state_filter = _parse_config_to_function( config, custom_objects, "dropout_fn", "dropout_fn_type", "dropout_fn_module") config.pop("dropout_fn") config["dropout_state_filter_visitor"] = dropout_state_filter return super(DropoutWrapperBase, cls).from_config( config, custom_objects=custom_objects) class ResidualWrapperBase(object): """RNNCell wrapper that ensures cell inputs are added to the outputs.""" def __init__(self, cell, residual_fn=None, **kwargs): """Constructs a `ResidualWrapper` for `cell`. Args: cell: An instance of `RNNCell`. residual_fn: (Optional) The function to map raw cell inputs and raw cell outputs to the actual cell outputs of the residual network. Defaults to calling nest.map_structure on (lambda i, o: i + o), inputs and outputs. **kwargs: dict of keyword arguments for base layer. """ super(ResidualWrapperBase, self).__init__(cell, **kwargs) self._residual_fn = residual_fn @property def state_size(self): return self.cell.state_size @property def output_size(self): return self.cell.output_size def zero_state(self, batch_size, dtype): with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]): return self.cell.zero_state(batch_size, dtype) def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs): """Run the cell and then apply the residual_fn on its inputs to its outputs. Args: inputs: cell inputs. state: cell state. cell_call_fn: Wrapped cell's method to use for step computation (cell's `__call__` or 'call' method). **kwargs: Additional arguments passed to the wrapped cell's `call`. Returns: Tuple of cell outputs and new state. Raises: TypeError: If cell inputs and outputs have different structure (type). ValueError: If cell inputs and outputs have different structure (value). """ outputs, new_state = cell_call_fn(inputs, state, **kwargs) # Ensure shapes match def assert_shape_match(inp, out): inp.get_shape().assert_is_compatible_with(out.get_shape()) def default_residual_fn(inputs, outputs): nest.assert_same_structure(inputs, outputs) nest.map_structure(assert_shape_match, inputs, outputs) return nest.map_structure(lambda inp, out: inp + out, inputs, outputs) res_outputs = (self._residual_fn or default_residual_fn)(inputs, outputs) return (res_outputs, new_state) def get_config(self): """Returns the config of the residual wrapper.""" if self._residual_fn is not None: function, function_type, function_module = _serialize_function_to_config( self._residual_fn) config = {"residual_fn": function, "residual_fn_type": function_type, "residule_fn_module": function_module} else: config = {} base_config = super(ResidualWrapperBase, self).get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config, custom_objects=None): if "residual_fn" in config: config = config.copy() residual_function = _parse_config_to_function( config, custom_objects, "residual_fn", "residual_fn_type", "residule_fn_module") config["residual_fn"] = residual_function return super(ResidualWrapperBase, cls).from_config( config, custom_objects=custom_objects) class DeviceWrapperBase(object): """Operator that ensures an RNNCell runs on a particular device.""" def __init__(self, cell, device, **kwargs): """Construct a `DeviceWrapper` for `cell` with device `device`. Ensures the wrapped `cell` is called with `tf.device(device)`. Args: cell: An instance of `RNNCell`. device: A device string or function, for passing to `tf.device`. **kwargs: dict of keyword arguments for base layer. """ super(DeviceWrapperBase, self).__init__(cell, **kwargs) self._device = device @property def state_size(self): return self.cell.state_size @property def output_size(self): return self.cell.output_size def zero_state(self, batch_size, dtype): with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]): with ops.device(self._device): return self.cell.zero_state(batch_size, dtype) def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs): """Run the cell on specified device.""" with ops.device(self._device): return cell_call_fn(inputs, state, **kwargs) def get_config(self): config = {"device": self._device} base_config = super(DeviceWrapperBase, self).get_config() return dict(list(base_config.items()) + list(config.items())) def _serialize_function_to_config(function): """Serialize the function for get_config().""" if isinstance(function, python_types.LambdaType): output = generic_utils.func_dump(function) output_type = "lambda" module = function.__module__ elif callable(function): output = function.__name__ output_type = "function" module = function.__module__ else: raise ValueError("Unrecognized function type for input: {}".format( type(function))) return output, output_type, module def _parse_config_to_function(config, custom_objects, func_attr_name, func_type_attr_name, module_attr_name): """Reconstruct the function from the config.""" globs = globals() module = config.pop(module_attr_name, None) if module in sys.modules: globs.update(sys.modules[module].__dict__) elif module is not None: # Note: we don't know the name of the function if it's a lambda. warnings.warn("{} is not loaded, but a layer uses it. " "It may cause errors.".format(module), UserWarning) if custom_objects: globs.update(custom_objects) function_type = config.pop(func_type_attr_name) if function_type == "function": # Simple lookup in custom objects function = generic_utils.deserialize_keras_object( config[func_attr_name], custom_objects=custom_objects, printable_module_name="function in wrapper") elif function_type == "lambda": # Unsafe deserialization from bytecode function = generic_utils.func_load( config[func_attr_name], globs=globs) else: raise TypeError("Unknown function type:", function_type) return function def _default_dropout_state_filter_visitor(substate): from tensorflow.python.ops.rnn_cell_impl import LSTMStateTuple # pylint: disable=g-import-not-at-top if isinstance(substate, LSTMStateTuple): # Do not perform dropout on the memory state. return LSTMStateTuple(c=False, h=True) elif isinstance(substate, tensor_array_ops.TensorArray): return False return True def _enumerated_map_structure_up_to(shallow_structure, map_fn, *args, **kwargs): ix = [0] def enumerated_fn(*inner_args, **inner_kwargs): r = map_fn(ix[0], *inner_args, **inner_kwargs) ix[0] += 1 return r return nest.map_structure_up_to(shallow_structure, enumerated_fn, *args, **kwargs)
tensorflow-master
tensorflow/python/ops/rnn_cell_wrapper_impl.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """TensorArray: a dynamically sized array of Tensors.""" # Mixture of pep8 and non-pep8 names, so disable pylint bad-name # pylint: disable=g-bad-name from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib import traceback import weakref from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors_impl from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import tensor_util from tensorflow.python.framework import type_spec from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_util from tensorflow.python.ops import gen_control_flow_ops from tensorflow.python.ops import gen_data_flow_ops from tensorflow.python.ops import list_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import tf_should_use from tensorflow.python.util.tf_export import tf_export # _GraphTensorArray accesses many of the hidden generated ops, but is in # fact built to wrap these methods. # pylint: disable=protected-access class _GraphTensorArray(object): """Graph-mode implementation of TensorArray. """ def __init__(self, dtype, size=None, dynamic_size=None, clear_after_read=None, tensor_array_name=None, handle=None, flow=None, infer_shape=True, element_shape=None, colocate_with_first_write_call=True, name=None): """Constructs a graph mode TensorArray. Args: dtype: (required) data type of the TensorArray. size: (optional) int32 scalar `Tensor`: the size of the TensorArray. Required if handle is not provided. dynamic_size: (optional) Python bool: If true, writes to the TensorArray can grow the TensorArray past its initial size. Default: False. clear_after_read: Boolean (optional, default: True). If True, clear TensorArray values after reading them. This disables read-many semantics, but allows early release of memory. tensor_array_name: (optional) Python string: the name of the TensorArray. This is used when creating the TensorArray handle. If this value is set, handle should be None. handle: (optional) A `Tensor` handle to an existing TensorArray. If this is set, tensor_array_name should be None. Only supported in graph mode. flow: (optional) A float `Tensor` scalar coming from an existing `TensorArray.flow`. Only supported in graph mode. infer_shape: (optional, default: True) If True, shape inference is enabled. In this case, all elements must have the same shape. element_shape: (optional, default: None) A `TensorShape` object specifying the shape constraints of each of the elements of the TensorArray. Need not be fully defined. colocate_with_first_write_call: If `True`, the TensorArray will be colocated on the same device as the Tensor used on its first write (write operations include `write`, `unstack`, and `split`). If `False`, the TensorArray will be placed on the device determined by the device context available during its initialization. name: A name for the operation (optional). Raises: ValueError: if both handle and tensor_array_name are provided. TypeError: if handle is provided but is not a Tensor. """ if handle is not None and tensor_array_name: raise ValueError( "Cannot construct with both handle and tensor_array_name") if handle is not None and not isinstance(handle, ops.Tensor): raise TypeError("Handle must be a Tensor") if handle is None and size is None: raise ValueError("Size must be provided if handle is not provided") if handle is not None and size is not None: raise ValueError("Cannot provide both a handle and size " "at the same time") if handle is not None and element_shape is not None: raise ValueError("Cannot provide both a handle and element_shape " "at the same time") if handle is not None and dynamic_size is not None: raise ValueError("Cannot provide both a handle and dynamic_size " "at the same time") if handle is not None and clear_after_read is not None: raise ValueError("Cannot provide both a handle and clear_after_read " "at the same time") if clear_after_read is None: clear_after_read = True self._dynamic_size = dynamic_size or False self._dtype = dtypes.as_dtype(dtype).base_dtype # Used to keep track of what tensors the TensorArray should be # colocated with. We choose to colocate the TensorArray with the # first tensor written to it. self._colocate_with_first_write_call = colocate_with_first_write_call if colocate_with_first_write_call: self._colocate_with = [] else: self._colocate_with = None # Record the current static shape for the array elements. The element # shape is defined either by `element_shape` or the shape of the tensor # of the first write. If `infer_shape` is true, all writes checks for # shape equality. if element_shape is None: self._infer_shape = infer_shape self._element_shape = [] else: self._infer_shape = True self._element_shape = [tensor_shape.as_shape(element_shape)] with ops.name_scope(name, "TensorArray", [handle, size, flow]) as scope: if handle is not None: self._handle = handle if flow is None: raise ValueError("flow must not be None if handle is not None.") self._flow = flow else: # Construct the TensorArray with an empty device. The first # write into the TensorArray from a Tensor with a set device # will retroactively set the device value of this op. def create(): """Create the TensorArray op.""" return gen_data_flow_ops.tensor_array_v3( dtype=dtype, size=size, element_shape=element_shape, identical_element_shapes=infer_shape, dynamic_size=self._dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name, name=scope) if colocate_with_first_write_call: with ops.device(None), ops.colocate_with(None, ignore_existing=True): self._handle, self._flow = create() else: self._handle, self._flow = create() @property def flow(self): return self._flow @property def dtype(self): return self._dtype @property def handle(self): return self._handle @property def element_shape(self): if self._element_shape: return self._element_shape[0] else: return tensor_shape.unknown_shape(None) def _merge_element_shape(self, shape): """Changes the element shape of the array given a shape to merge with. Args: shape: A `TensorShape` object to merge with. Raises: ValueError: if the provided shape is incompatible with the current element shape of the `TensorArray`. """ if self._element_shape: if not shape.is_compatible_with(self._element_shape[0]): raise ValueError( "Inconsistent shapes: saw %s but expected %s " "(and infer_shape=True)" % (shape, self._element_shape[0])) self._element_shape[0] = self._element_shape[0].merge_with(shape) else: self._element_shape.append(shape) @contextlib.contextmanager def _maybe_colocate_with(self, value): """Colocate operations with an internal colocation group or `value`. Args: value: `Tensor`, the tensor to try to colocate with. Yields: Does not yield anything, but the new context is a colocation context. If no internal colocation group is set, colocate with `value` and set the internal colocation group to be value. """ if not self._colocate_with_first_write_call: yield else: if not self._colocate_with: self._colocate_with.append(value) with ops.colocate_with(self._colocate_with[0]): yield def identity(self): """See TensorArray.""" flow = array_ops.identity(self._flow) ta = TensorArray( dtype=self._dtype, handle=self._handle, flow=flow, infer_shape=self._infer_shape, colocate_with_first_write_call=self._colocate_with_first_write_call) ta._element_shape = self._element_shape ta._colocate_with = self._colocate_with ta._dynamic_size = self._dynamic_size return ta def grad(self, source, flow=None, name=None): """See TensorArray.""" # tensor_array_grad requires a flow input when forward # TensorArrays are dynamically sized. This forces the creation # of the grad TensorArray only once the final forward array's size # is fixed. if flow is None: flow = self.flow with ops.name_scope(name, "TensorArrayGrad", [self._handle]): with ops.colocate_with(self._handle): g_handle, unused_flow = gen_data_flow_ops.tensor_array_grad_v3( handle=self._handle, source=source, flow_in=flow, name=name) with ops.control_dependencies([g_handle]): flow = array_ops.identity(flow, name="gradient_flow") g = TensorArray( dtype=self._dtype, handle=g_handle, flow=flow, infer_shape=self._infer_shape, colocate_with_first_write_call=False) g._element_shape = self._element_shape return g def read(self, index, name=None): """See TensorArray.""" value = gen_data_flow_ops.tensor_array_read_v3( handle=self._handle, index=index, flow_in=self._flow, dtype=self._dtype, name=name) if self._element_shape: value.set_shape(self._element_shape[0].dims) return value @tf_should_use.should_use_result def write(self, index, value, name=None): """See TensorArray.""" with ops.name_scope(name, "TensorArrayWrite", [self._handle, index, value]): # TODO(b/129870929): Fix after all callers provide proper init dtype. value = ops.convert_to_tensor( value, preferred_dtype=self._dtype, name="value") _check_dtypes(value, self._dtype) if self._infer_shape: self._merge_element_shape(value.shape) with self._maybe_colocate_with(value): flow_out = gen_data_flow_ops.tensor_array_write_v3( handle=self._handle, index=index, value=value, flow_in=self._flow, name=name) ta = TensorArray( dtype=self._dtype, handle=self._handle, flow=flow_out, colocate_with_first_write_call=self._colocate_with_first_write_call) ta._infer_shape = self._infer_shape ta._element_shape = self._element_shape ta._colocate_with = self._colocate_with ta._dynamic_size = self._dynamic_size return ta def stack(self, name=None): """See TensorArray.""" with ops.colocate_with(self._handle): with ops.name_scope(name, "TensorArrayStack", [self._handle]): return self.gather(math_ops.range(0, self.size()), name=name) def gather(self, indices, name=None): """See TensorArray.""" if self._element_shape: element_shape = self._element_shape[0] else: element_shape = tensor_shape.unknown_shape(None) value = gen_data_flow_ops.tensor_array_gather_v3( handle=self._handle, indices=indices, flow_in=self._flow, dtype=self._dtype, name=name, element_shape=element_shape) if self._element_shape and self._element_shape[0].dims is not None: value.set_shape([None] + self._element_shape[0].dims) return value def concat(self, name=None): """See TensorArray.""" if self._element_shape and self._element_shape[0].dims is not None: element_shape_except0 = ( tensor_shape.TensorShape(self._element_shape[0].dims[1:])) else: element_shape_except0 = tensor_shape.TensorShape(None) value, _ = gen_data_flow_ops.tensor_array_concat_v3( handle=self._handle, flow_in=self._flow, dtype=self._dtype, name=name, element_shape_except0=element_shape_except0) if self._element_shape and self._element_shape[0].dims is not None: value.set_shape([None] + self._element_shape[0].dims[1:]) return value @tf_should_use.should_use_result def unstack(self, value, name=None): """See TensorArray.""" with ops.name_scope(name, "TensorArrayUnstack", [self._handle, value]): num_elements = array_ops.shape(value)[0] return self.scatter( indices=math_ops.range(0, num_elements), value=value, name=name) @tf_should_use.should_use_result def scatter(self, indices, value, name=None): """See TensorArray.""" with ops.name_scope(name, "TensorArrayScatter", [self._handle, value, indices]): # TODO(b/129870929): Fix after all callers provide proper init dtype. value = ops.convert_to_tensor( value, preferred_dtype=self._dtype, name="value") _check_dtypes(value, self._dtype) if self._infer_shape and not context.executing_eagerly(): self._merge_element_shape(value.shape[1:]) with self._maybe_colocate_with(value): flow_out = gen_data_flow_ops.tensor_array_scatter_v3( handle=self._handle, indices=indices, value=value, flow_in=self._flow, name=name) ta = TensorArray( dtype=self._dtype, handle=self._handle, flow=flow_out, colocate_with_first_write_call=self._colocate_with_first_write_call) ta._infer_shape = self._infer_shape ta._element_shape = self._element_shape ta._colocate_with = self._colocate_with ta._dynamic_size = self._dynamic_size return ta @tf_should_use.should_use_result def split(self, value, lengths, name=None): """See TensorArray.""" with ops.name_scope(name, "TensorArraySplit", [self._handle, value, lengths]): value = ops.convert_to_tensor(value, dtype=self._dtype, name="value") with self._maybe_colocate_with(value): lengths_64 = math_ops.cast(lengths, dtypes.int64) if self._infer_shape and not context.executing_eagerly(): clengths = tensor_util.constant_value(lengths_64) if value.shape.dims is not None: if clengths is not None and clengths.max() == clengths.min(): self._merge_element_shape( tensor_shape.TensorShape([clengths[0]]).concatenate( value.shape[1:])) flow_out = gen_data_flow_ops.tensor_array_split_v3( handle=self._handle, value=value, lengths=lengths_64, flow_in=self._flow, name=name) ta = TensorArray( dtype=self._dtype, handle=self._handle, flow=flow_out, colocate_with_first_write_call=self._colocate_with_first_write_call) ta._infer_shape = self._infer_shape ta._element_shape = self._element_shape ta._colocate_with = self._colocate_with ta._dynamic_size = self._dynamic_size return ta def size(self, name=None): """See TensorArray.""" return gen_data_flow_ops.tensor_array_size_v3( handle=self._handle, flow_in=self.flow, name=name) @tf_should_use.should_use_result def close(self, name=None): """See TensorArray.""" return gen_data_flow_ops.tensor_array_close_v3( handle=self._handle, name=name) class _GraphTensorArrayV2(object): """Graph-mode implementation of TensorArray backed by TensorLists. The backing tensor of this TensorArray is a TensorList variant tensor which is stored in the `flow`. The `handle` is always none here. The reason we use the `flow` field and not the `handle` field is to ensure backwards compatibility with legacy control flow. """ def __init__(self, dtype, size=None, dynamic_size=None, clear_after_read=None, tensor_array_name=None, handle=None, flow=None, infer_shape=True, element_shape=None, colocate_with_first_write_call=True, name=None): """Constructs a graph mode TensorArray. Args: dtype: (required) data type of the TensorArray. size: (optional) int32 scalar `Tensor`: the size of the TensorArray. Required if flow is not provided. dynamic_size: (optional) Python bool: If true, writes to the TensorArray can grow the TensorArray past its initial size. Default: False. clear_after_read: (optional) unused. Not supported in TensorLists. tensor_array_name: (optional) unused. handle: (optional) Must always be None. flow: (optional) A variant `Tensor` scalar for a TensorList. infer_shape: (optional, default: True) If True, shape inference is enabled. In this case, all elements must have the same shape. element_shape: (optional, default: None) A `TensorShape` object specifying the shape constraints of each of the elements of the TensorArray. Need not be fully defined. colocate_with_first_write_call: (optional). unused. name: (optional) A name for the operation. Raises: ValueError: if both handle and tensor_array_name are provided. TypeError: if handle is provided but is not a Tensor. """ assert handle is None del handle del clear_after_read del tensor_array_name del colocate_with_first_write_call self._dynamic_size = dynamic_size if (flow is not None and (not isinstance(flow, ops.Tensor) or flow.dtype != dtypes.variant)): raise TypeError("flow must be a variant tensor") if flow is None and size is None: raise ValueError("Size must be provided if flow is not provided") if flow is not None and size is not None: raise ValueError("Cannot provide both a flow and size " "at the same time") if flow is not None and element_shape is not None: raise ValueError("Cannot provide both a flow and element_shape " "at the same time") self._dtype = dtypes.as_dtype(dtype).base_dtype # Record the current static shape for the array elements. The element # shape is defined either by `element_shape` or the shape of the tensor # of the first write. If `infer_shape` is true, all writes checks for # shape equality. if element_shape is None: self._infer_shape = infer_shape self._element_shape = [] else: self._infer_shape = True self._element_shape = [tensor_shape.as_shape(element_shape)] with ops.name_scope(name, "TensorArrayV2", [size, flow]) as scope: if flow is None: self._flow = list_ops.tensor_list_reserve( element_shape=element_shape, num_elements=size, element_dtype=dtype, name=scope) else: self._flow = flow # For backwards compatibility. self._colocate_with_first_write_call = None self._colocate_with = None @property def flow(self): return self._flow @property def dtype(self): return self._dtype @property def element_shape(self): if self._element_shape: return self._element_shape[0] else: return tensor_shape.unknown_shape(None) @property def handle(self): # We intentionally do not raise an error so that legacy while_loop does not # complain. return None def _merge_element_shape(self, shape): """Changes the element shape of the array given a shape to merge with. Args: shape: A `TensorShape` object to merge with. Raises: ValueError: if the provided shape is incompatible with the current element shape of the `TensorArray`. """ if self._element_shape: if not shape.is_compatible_with(self._element_shape[0]): raise ValueError( "Inconsistent shapes: saw %s but expected %s " "(and infer_shape=True)" % (shape, self._element_shape[0])) self._element_shape[0] = self._element_shape[0].merge_with(shape) else: self._element_shape.append(shape) def identity(self): """See TensorArray.""" flow = array_ops.identity(self._flow) return build_ta_with_new_flow(self, flow) def grad(self, source, flow=None, name=None): """Not supported.""" raise NotImplementedError() def read(self, index, name=None): """See TensorArray.""" with ops.name_scope(name, "TensorArrayV2Read", [self._flow, index]): if self._element_shape: element_shape = self._element_shape[0] else: element_shape = tensor_shape.unknown_shape(None) value = list_ops.tensor_list_get_item( input_handle=self._flow, index=index, element_dtype=self._dtype, element_shape=element_shape, name=name) return value @tf_should_use.should_use_result def write(self, index, value, name=None): """See TensorArray.""" with ops.name_scope(name, "TensorArrayV2Write", [self._flow, index, value]): # TODO(b/129870929): Fix after all callers provide proper init dtype. value = ops.convert_to_tensor( value, preferred_dtype=self._dtype, name="value") _check_dtypes(value, self._dtype) if self._infer_shape: self._merge_element_shape(value.shape) flow_out = list_ops.tensor_list_set_item( input_handle=self._flow, index=index, item=value, resize_if_index_out_of_bounds=self._dynamic_size, name=name) return build_ta_with_new_flow(self, flow_out) def stack(self, name=None): """See TensorArray.""" with ops.name_scope(name, "TensorArrayV2Stack", [self._flow]): if self._element_shape: element_shape = self._element_shape[0] else: element_shape = tensor_shape.unknown_shape(None) value = list_ops.tensor_list_stack( input_handle=self._flow, element_dtype=self._dtype, element_shape=element_shape) return value def gather(self, indices, name=None): """See TensorArray.""" if self._element_shape: element_shape = self._element_shape[0] else: element_shape = tensor_shape.unknown_shape(None) value = list_ops.tensor_list_gather( input_handle=self._flow, indices=indices, element_dtype=self._dtype, element_shape=element_shape, name=name) return value def concat(self, name=None): """See TensorArray.""" if self._element_shape and self._element_shape[0].dims is not None: element_shape = [None] + self._element_shape[0].dims[1:] else: element_shape = None value = list_ops.tensor_list_concat( input_handle=self._flow, element_dtype=self._dtype, element_shape=element_shape, name=name) return value @tf_should_use.should_use_result def unstack(self, value, name=None): """See TensorArray.""" with ops.name_scope(name, "TensorArrayUnstack", [self._flow, value]): # TODO(b/129870929): Fix after all callers provide proper init dtype. value = ops.convert_to_tensor( value, preferred_dtype=self._dtype, name="value") _check_dtypes(value, self._dtype) if self._infer_shape and not context.executing_eagerly(): self._merge_element_shape(value.shape[1:]) flow_out = list_ops.tensor_list_from_tensor( tensor=value, element_shape=value.shape[1:]) return build_ta_with_new_flow(self, flow_out) @tf_should_use.should_use_result def scatter(self, indices, value, name=None): """See TensorArray.""" with ops.name_scope(name, "TensorArrayScatter", [self._flow, value, indices]): # TODO(b/129870929): Fix after all callers provide proper init dtype. value = ops.convert_to_tensor( value, preferred_dtype=self._dtype, name="value") _check_dtypes(value, self._dtype) if self._infer_shape and not context.executing_eagerly(): self._merge_element_shape(value.shape[1:]) element_shape = self._element_shape[0] if self._element_shape else None flow_out = list_ops.tensor_list_scatter( tensor=value, indices=indices, input_handle=self._flow) return build_ta_with_new_flow(self, flow_out) @tf_should_use.should_use_result def split(self, value, lengths, name=None): """See TensorArray.""" with ops.name_scope(name, "TensorArraySplit", [self._flow, value, lengths]): # TODO(b/129870929): Fix after all callers provide proper init dtype. value = ops.convert_to_tensor( value, preferred_dtype=self._dtype, name="value") _check_dtypes(value, self._dtype) lengths_64 = math_ops.cast(lengths, dtypes.int64) if self._infer_shape and not context.executing_eagerly(): clengths = tensor_util.constant_value(lengths_64) if value.shape.dims is not None: if clengths is not None and clengths.max() == clengths.min(): self._merge_element_shape( tensor_shape.TensorShape([clengths[0]]).concatenate( value.shape[1:])) flow_out = list_ops.tensor_list_split( tensor=value, lengths=lengths_64, element_shape=self._element_shape[0] if self._element_shape else None, name=name) return build_ta_with_new_flow(self, flow_out) def size(self, name=None): """See TensorArray.""" return list_ops.tensor_list_length(input_handle=self._flow, name=name) @tf_should_use.should_use_result def close(self, name=None): """See TensorArray.""" return gen_control_flow_ops.no_op(name=name) # pylint: enable=protected-access class _EagerTensorArray(object): """Eager-compatible implementation of TensorArray. """ def __init__(self, dtype, size=None, dynamic_size=None, clear_after_read=None, tensor_array_name=None, handle=None, flow=None, infer_shape=True, element_shape=None, colocate_with_first_write_call=True, name=None): """Constructs a TensorArray compatible with eager execution. Args: dtype: (required) data type of the TensorArray. size: (optional) int32 scalar `Tensor`: the size of the TensorArray. Required if handle is not provided. dynamic_size: (optional) Python bool: If true, writes to the TensorArray can grow the TensorArray past its initial size. Default: False. clear_after_read: Boolean (optional, default: True). If True, clear TensorArray values after reading them. This disables read-many semantics, but allows early release of memory. tensor_array_name: unused. handle: unsupported. flow: unsupported. infer_shape: used for error checking, same semantics as TensorArray. element_shape: used for error checking, same semantics as TensorArray. colocate_with_first_write_call: unsupported. name: unsupported. Raises: ValueError: handle or flow are supplied, or if size is not supplied. """ del (flow, tensor_array_name, name) # Unused. if handle is not None: raise ValueError("TensorArray handles are not supported when eager " "execution is enabled.") if size is None: raise ValueError("Size must be declared for TensorArrays when eager " "execution is enabled.") # These attributes are not meaningful when eager is enabled, but some # library functions (e.g., those in control_flow_ops.py) access them to # create new tensor arrays; as such, we define them for the sake of # compatibility. self._handle = None # we assign a dummy value to _flow in case other code assumes it to be # a Tensor self._flow = constant_op.constant(0, dtype=dtypes.int32) self._infer_shape = infer_shape self._element_shape = tensor_shape.as_shape(element_shape) self._colocate_with_first_write_call = colocate_with_first_write_call self._dtype = dtypes.as_dtype(dtype).base_dtype self._dynamic_size = dynamic_size or False self._clear_after_read = ( True if clear_after_read is None else clear_after_read) self._previously_read_indices = [] if isinstance(size, ops.EagerTensor): size = size.numpy() self._tensor_array = [None for _ in range(size)] @property def flow(self): """For compatibility; flows are not meaningful when eager is enabled.""" return self._flow @property def dtype(self): return self._dtype @property def handle(self): """For compatibility; handles are not meaningful when eager is enabled.""" return self._handle @property def element_shape(self): if not self._element_shape: return tensor_shape.unknown_shape(None) else: return def identity(self): """See TensorArray.""" return self.parent() def grad(self, source, flow=None, name=None): raise NotImplementedError( "TensorArray.grad is not supported when executing eagerly; eager's " "gradient implementation does not use/need this function to compute " "gradients of operations that use TensorArrays.") def read(self, index, name=None): """See TensorArray.""" del name # not meaningful when executing eagerly. if isinstance(index, ops.EagerTensor): index = index.numpy() if index < 0: raise errors_impl.OutOfRangeError( None, None, "Reading from negative indices (index %d) is not allowed." % index) if index >= len(self._tensor_array): raise errors_impl.OutOfRangeError( None, None, "Tried to read from index %d but array size is: %d" % (index, len(self._tensor_array))) tensor = self._tensor_array[index] if tensor is None: if index in self._previously_read_indices: raise errors_impl.InvalidArgumentError( None, None, "Could not read index %d twice because it was cleared after " "a previous read (perhaps try setting clear_after_read = false?)" % index) else: tensor = self._maybe_zero(index) if self._clear_after_read: self._tensor_array[index] = None self._previously_read_indices.append(index) return tensor def _write(self, index, value): """Writes `value` into index named by `index`. Args: index: 0-D. int32 scalar with the index to write to. value: N-D. Tensor of type `dtype`. The `Tensor` to write to `index`. Raises: errors_impl.InvalidArgumentError: `value` dtype does not match dtype. errors_impl.OutOfRangeError: `index` is out of bounds. ValueError: shape of `value` is not consistent with inferred shape. """ if isinstance(index, ops.EagerTensor): index = index.numpy() if index < 0: raise errors_impl.OutOfRangeError( None, None, "Writing to negative indices (index %d) is not allowed." % index) size = len(self._tensor_array) if index >= size: if not self._dynamic_size: raise errors_impl.OutOfRangeError( None, None, "Tried to write to index %d but array is not resizeable and size " "is: %d" % (index, size)) self._tensor_array.extend([None for _ in range(index - size + 1)]) if not isinstance(value, ops.EagerTensor): # TODO(b/129870929): Fix after all callers provide proper init dtype. value = ops.convert_to_tensor( value, preferred_dtype=self._dtype, name="value") if self._dtype != value.dtype: raise errors_impl.InvalidArgumentError( None, None, "TensorArray dtype is %s but Op is trying to write dtype %s" % (self._dtype.name, value.dtype.name)) if self._infer_shape: if not self._element_shape.is_compatible_with(value.shape): raise ValueError("Incompatible shape for value (%s), expected (%s)" % (value.shape, self._element_shape)) else: self._element_shape = self._element_shape.merge_with(value.shape) self._tensor_array[index] = value def write(self, index, value, name=None): """See TensorArray.""" del name # not meaningful when executing eagerly. self._write(index, value) return self.parent() def _maybe_zero(self, ix): val = self._tensor_array[ix] if val is None: val = self._tensor_array[ix] = array_ops.zeros( shape=self._element_shape, dtype=self._dtype) return val def stack(self, name=None): """See TensorArray.""" if self._tensor_array: for ix in range(len(self._tensor_array)): self._maybe_zero(ix) return ops.convert_to_tensor( self._tensor_array, name=name, dtype=self._dtype) def gather(self, indices, name=None): """See TensorArray.""" del name # not meaningful when executing eagerly. if isinstance(indices, ops.EagerTensor): indices = indices.numpy() return array_ops.stack([self._maybe_zero(i) for i in indices]) def concat(self, name=None): """See TensorArray.""" try: return array_ops.concat( [self._maybe_zero(ix) for ix in range(len(self._tensor_array))], 0, name=name) except errors_impl.OpError: # Reproduce a subset of the error-handling for graph-mode TensorArrays. shapes = [t.shape for t in self._tensor_array] ndims = [s.ndims for s in shapes] if 0 in ndims: idx = ndims.index(0) raise errors_impl.InvalidArgumentError( None, None, "Concat saw a scalar shape at index %d but requires " "at least vectors." % idx) else: raise def unstack(self, value, name=None): """See TensorArray.""" tensors = array_ops.unstack(value, name=name) if len(tensors) > len(self._tensor_array) and not self._dynamic_size: raise ValueError( "Cannot unstack %d tensors into a TensorArray of static size %d" % (len(tensors), len(self._tensor_array))) self._tensor_array = tensors return self.parent() def scatter(self, indices, value, name=None): """See TensorArray.""" del name # not meaningful when executing eagerly. if isinstance(indices, ops.EagerTensor): indices = indices.numpy() for index, val in zip(indices, array_ops.unstack(value)): self._write(index, val) # pylint: disable=protected-access return self.parent() def split(self, value, lengths, name=None): """See TensorArray.""" # TODO(b/129870929): Fix after all callers provide proper init dtype. value = ops.convert_to_tensor( value, preferred_dtype=self._dtype, name="value") _check_dtypes(value, self._dtype) lengths = ops.convert_to_tensor(lengths) sum_lengths = math_ops.reduce_sum(lengths) if lengths.shape.ndims != 1: raise errors_impl.InvalidArgumentError( None, None, "Expected lengths to be a vector, received shape: %s" % lengths.shape.as_list()) elif value.shape.ndims == 0: raise errors_impl.InvalidArgumentError( None, None, "Expected value to be at least a vector, " "but received shape: %s" % value.shape.as_list()) elif sum_lengths.numpy() != value.shape.as_list()[0]: raise errors_impl.InvalidArgumentError( None, None, "Expected sum of lengths to be equal to " "values.shape[0], but sum of lengths is %d and " "value's shape is: %s " % (sum_lengths.numpy(), value.shape.as_list())) elif not self._dynamic_size and lengths.shape[0] != len(self._tensor_array): raise errors_impl.InvalidArgumentError( None, None, "TensorArray's size is not equal to the size of " "lengths (%d vs. %d), and the TensorArray is not marked as " "dynamically resizeable" % (len(self._tensor_array), lengths.shape[0])) else: self._tensor_array = array_ops.split(value, lengths, name=name) return self.parent() def size(self, name=None): """See TensorArray.""" del name # not meaningful when executing eagerly. return constant_op.constant(len(self._tensor_array)) def close(self, name=None): del name # not meaningful when executing eagerly. del self._tensor_array[:] # TensorArray is designed to hide an underlying implementation object # and as such accesses many of that object's hidden fields. # pylint: disable=protected-access @tf_export("TensorArray") class TensorArray(object): """Class wrapping dynamic-sized, per-time-step, write-once Tensor arrays. This class is meant to be used with dynamic iteration primitives such as `while_loop` and `map_fn`. It supports gradient back-propagation via special "flow" control flow dependencies. """ def __init__(self, dtype, size=None, dynamic_size=None, clear_after_read=None, tensor_array_name=None, handle=None, flow=None, infer_shape=True, element_shape=None, colocate_with_first_write_call=True, name=None): """Construct a new TensorArray or wrap an existing TensorArray handle. A note about the parameter `name`: The name of the `TensorArray` (even if passed in) is uniquified: each time a new `TensorArray` is created at runtime it is assigned its own name for the duration of the run. This avoids name collisions if a `TensorArray` is created within a `while_loop`. Args: dtype: (required) data type of the TensorArray. size: (optional) int32 scalar `Tensor`: the size of the TensorArray. Required if handle is not provided. dynamic_size: (optional) Python bool: If true, writes to the TensorArray can grow the TensorArray past its initial size. Default: False. clear_after_read: Boolean (optional, default: True). If True, clear TensorArray values after reading them. This disables read-many semantics, but allows early release of memory. tensor_array_name: (optional) Python string: the name of the TensorArray. This is used when creating the TensorArray handle. If this value is set, handle should be None. handle: (optional) A `Tensor` handle to an existing TensorArray. If this is set, tensor_array_name should be None. Only supported in graph mode. flow: (optional) A float `Tensor` scalar coming from an existing `TensorArray.flow`. Only supported in graph mode. infer_shape: (optional, default: True) If True, shape inference is enabled. In this case, all elements must have the same shape. element_shape: (optional, default: None) A `TensorShape` object specifying the shape constraints of each of the elements of the TensorArray. Need not be fully defined. colocate_with_first_write_call: If `True`, the TensorArray will be colocated on the same device as the Tensor used on its first write (write operations include `write`, `unstack`, and `split`). If `False`, the TensorArray will be placed on the device determined by the device context available during its initialization. name: A name for the operation (optional). Raises: ValueError: if both handle and tensor_array_name are provided. TypeError: if handle is provided but is not a Tensor. """ if (context.executing_eagerly() and (flow is None or flow.dtype != dtypes.variant)): # It is possible to create a Variant-style TensorArray even in eager mode, # and this is fine but can have performance implications in eager. # An example of when this happens is if a tf.function returns a # TensorArray in its output; its flow variant object is returned to Eager. # This can be wrapped back up in a Variant-style TensorArray. implementation = _EagerTensorArray elif (flow is not None and flow.dtype == dtypes.variant or control_flow_util.EnableControlFlowV2(ops.get_default_graph())): implementation = _GraphTensorArrayV2 else: implementation = _GraphTensorArray self._implementation = implementation( dtype, size=size, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name, handle=handle, flow=flow, infer_shape=infer_shape, element_shape=element_shape, colocate_with_first_write_call=colocate_with_first_write_call, name=name) self._implementation.parent = weakref.ref(self) @property def flow(self): """The flow `Tensor` forcing ops leading to this TensorArray state.""" return self._implementation._flow @property def dtype(self): """The data type of this TensorArray.""" return self._implementation._dtype @property def handle(self): """The reference to the TensorArray.""" return self._implementation.handle @property def element_shape(self): """The `tf.TensorShape` of elements in this TensorArray.""" return self._implementation.element_shape @property def dynamic_size(self): """Python bool; if `True` the TensorArray can grow dynamically.""" return self._implementation._dynamic_size @property def _dynamic_size(self): return self._implementation._dynamic_size @_dynamic_size.setter def _dynamic_size(self, dynamic_size): self._implementation._dynamic_size = dynamic_size @property def _infer_shape(self): return self._implementation._infer_shape @_infer_shape.setter def _infer_shape(self, infer_shape): self._implementation._infer_shape = infer_shape @property def _element_shape(self): return self._implementation._element_shape @_element_shape.setter def _element_shape(self, element_shape): self._implementation._element_shape = element_shape @property def _colocate_with_first_write_call(self): return self._implementation._colocate_with_first_write_call @property def _colocate_with(self): return self._implementation._colocate_with @_colocate_with.setter def _colocate_with(self, colocate_with): self._implementation._colocate_with = colocate_with def identity(self): """Returns a TensorArray with the same content and properties. Returns: A new TensorArray object with flow that ensures the control dependencies from the contexts will become control dependencies for writes, reads, etc. Use this object all for subsequent operations. """ return self._implementation.identity() def grad(self, source, flow=None, name=None): return self._implementation.grad(source, flow=flow, name=name) def read(self, index, name=None): """Read the value at location `index` in the TensorArray. Args: index: 0-D. int32 tensor with the index to read from. name: A name for the operation (optional). Returns: The tensor at index `index`. """ return self._implementation.read(index, name=name) def write(self, index, value, name=None): """Write `value` into index `index` of the TensorArray. Args: index: 0-D. int32 scalar with the index to write to. value: N-D. Tensor of type `dtype`. The Tensor to write to this index. name: A name for the operation (optional). Returns: A new TensorArray object with flow that ensures the write occurs. Use this object all for subsequent operations. Raises: ValueError: if there are more writers than specified. """ return self._implementation.write(index, value, name=name) def stack(self, name=None): """Return the values in the TensorArray as a stacked `Tensor`. All of the values must have been written and their shapes must all match. If input shapes have rank-`R`, then output shape will have rank-`(R+1)`. Args: name: A name for the operation (optional). Returns: All the tensors in the TensorArray stacked into one tensor. """ return self._implementation.stack(name=name) def gather(self, indices, name=None): """Return selected values in the TensorArray as a packed `Tensor`. All of selected values must have been written and their shapes must all match. Args: indices: A `1-D` `Tensor` taking values in `[0, max_value)`. If the `TensorArray` is not dynamic, `max_value=size()`. name: A name for the operation (optional). Returns: The tensors in the `TensorArray` selected by `indices`, packed into one tensor. """ return self._implementation.gather(indices, name=name) def concat(self, name=None): """Return the values in the TensorArray as a concatenated `Tensor`. All of the values must have been written, their ranks must match, and and their shapes must all match for all dimensions except the first. Args: name: A name for the operation (optional). Returns: All the tensors in the TensorArray concatenated into one tensor. """ return self._implementation.concat(name=name) @tf_should_use.should_use_result def unstack(self, value, name=None): """Unstack the values of a `Tensor` in the TensorArray. If input value shapes have rank-`R`, then the output TensorArray will contain elements whose shapes are rank-`(R-1)`. Args: value: (N+1)-D. Tensor of type `dtype`. The Tensor to unstack. name: A name for the operation (optional). Returns: A new TensorArray object with flow that ensures the unstack occurs. Use this object all for subsequent operations. Raises: ValueError: if the shape inference fails. """ return self._implementation.unstack(value, name=name) @tf_should_use.should_use_result def scatter(self, indices, value, name=None): """Scatter the values of a `Tensor` in specific indices of a `TensorArray`. Args: indices: A `1-D` `Tensor` taking values in `[0, max_value)`. If the `TensorArray` is not dynamic, `max_value=size()`. value: (N+1)-D. Tensor of type `dtype`. The Tensor to unpack. name: A name for the operation (optional). Returns: A new TensorArray object with flow that ensures the scatter occurs. Use this object all for subsequent operations. Raises: ValueError: if the shape inference fails. """ return self._implementation.scatter(indices, value, name=name) @tf_should_use.should_use_result def split(self, value, lengths, name=None): """Split the values of a `Tensor` into the TensorArray. Args: value: (N+1)-D. Tensor of type `dtype`. The Tensor to split. lengths: 1-D. int32 vector with the lengths to use when splitting `value` along its first dimension. name: A name for the operation (optional). Returns: A new TensorArray object with flow that ensures the split occurs. Use this object all for subsequent operations. Raises: ValueError: if the shape inference fails. """ return self._implementation.split(value, lengths, name=name) def size(self, name=None): """Return the size of the TensorArray.""" return self._implementation.size(name=name) @tf_should_use.should_use_result def close(self, name=None): """Close the current TensorArray.""" return self._implementation.close(name=name) def build_ta_with_new_flow(old_ta, flow): """Builds a TensorArray with a new `flow` tensor.""" if not context.executing_eagerly(): # Sometimes we get old_ta as the implementation, sometimes it's the # TensorArray wrapper object. impl = (old_ta._implementation if isinstance(old_ta, TensorArray) else old_ta) if (not isinstance(impl, _GraphTensorArrayV2) and control_flow_util.EnableControlFlowV2(ops.get_default_graph())): raise NotImplementedError("Attempting to build a graph-mode TF2-style " "TensorArray from either an eager-mode " "TensorArray or a TF1-style TensorArray. " "This is not currently supported. You may be " "attempting to capture a TensorArray " "inside a tf.function or tf.data map function. " "Instead, construct a new TensorArray inside " "the function.") ta = TensorArray( dtype=old_ta.dtype, dynamic_size=old_ta._dynamic_size, handle=old_ta.handle, flow=flow, infer_shape=old_ta._infer_shape, colocate_with_first_write_call=old_ta._colocate_with_first_write_call) ta._colocate_with = old_ta._colocate_with ta._element_shape = old_ta._element_shape return ta # pylint: enable=protected-access def _check_dtypes(value, dtype): if value.dtype != dtype: logging.error( "Error: Input value {} has dtype {}, but expected dtype {}. " "This leads to undefined behavior and will be an error " "in future versions of TensorFlow. Traceback:\n{}".format( value, str(value.dtype), str(dtype), "".join(traceback.format_stack()))) @tf_export("TensorArraySpec") class TensorArraySpec(type_spec.TypeSpec): """Type specification for a `tf.TensorArray`.""" __slots__ = ["_element_shape", "_dtype", "_dynamic_size", "_infer_shape"] value_type = property(lambda self: TensorArray) def __init__(self, element_shape=None, dtype=dtypes.float32, dynamic_size=False, infer_shape=True): """Constructs a type specification for a `tf.TensorArray`. Args: element_shape: The shape of each element in the `TensorArray`. dtype: Data type of the `TensorArray`. dynamic_size: Whether the `TensorArray` can grow past its initial size. infer_shape: Whether shape inference is enabled. """ self._element_shape = tensor_shape.as_shape(element_shape) self._dtype = dtypes.as_dtype(dtype) self._dynamic_size = dynamic_size self._infer_shape = infer_shape def is_compatible_with(self, other): # pylint: disable=protected-access if not isinstance(other, type_spec.TypeSpec): other = type_spec.type_spec_from_value(other) # Note: we intentionally exclude infer_shape in this check. return (isinstance(other, TensorArraySpec) and self._dtype.is_compatible_with(other._dtype) and self._element_shape.is_compatible_with(other._element_shape) and self._dynamic_size == other._dynamic_size) def most_specific_compatible_type(self, other): # pylint: disable=protected-access if not self.is_compatible_with(other): raise ValueError("Types are not compatible") infer_shape = self._infer_shape and other._infer_shape return TensorArraySpec( self._element_shape.most_specific_compatible_shape( other._element_shape), self._dtype, self._dynamic_size, infer_shape) def _serialize(self): return (self._element_shape, self._dtype, self._dynamic_size, self._infer_shape) @property def _component_specs(self): return [tensor_spec.TensorSpec([], dtypes.variant)] def _to_components(self, value): if not isinstance(value, TensorArray): raise TypeError("value must be a TensorArray, but saw: {}" .format(type(value))) if value.flow is not None and value.flow.dtype == dtypes.variant: return [value.flow] else: # Convert to a TF2-style TensorArray. # TODO(ebrevdo): Add an "_as_variant" method to TensorArray class, or # "implementation / as_variant" arg to TensorArray constructor. with ops.name_scope("convert_tensor_array"): flow = list_ops.tensor_list_from_tensor( tensor=value.stack(), element_shape=value.element_shape) return [flow] def _from_components(self, tensor_list): # This will return a TF2 Graph-style TensorArray because tensor_list[0] is # a variant object. size == -1 implies unknown size. ret = TensorArray( dtype=self._dtype, flow=tensor_list[0], dynamic_size=self._dynamic_size, infer_shape=self._infer_shape) ret._element_shape = [self._element_shape] # pylint: disable=protected-access return ret @staticmethod def from_value(value): if not isinstance(value, TensorArray): raise TypeError("Expected value to be a TensorArray, but saw: {}". format(type(value))) return TensorArraySpec( dtype=value.dtype, element_shape=value.element_shape, dynamic_size=value.dynamic_size, infer_shape=value._infer_shape) # pylint: disable=protected-access def _to_legacy_output_types(self): return self._dtype def _to_legacy_output_shapes(self): # Sneak the dynamic_size and infer_shape values into the legacy shape. return (tensor_shape.matrix(self._dynamic_size, self._infer_shape) .concatenate(self._element_shape)) def _to_legacy_output_classes(self): return TensorArray # Register the TypeSpec for TensorArray. If TensorArray is updated to be a # CompositeTensor, then this registration can be deleted. type_spec.register_type_spec_from_value_converter( TensorArray, TensorArraySpec.from_value, allow_subclass=True)
tensorflow-master
tensorflow/python/ops/tensor_array_ops.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for nccl ops. See also the cc test for nccl_communicator.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from functools import partial import numpy as np from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradients from tensorflow.python.ops import nccl_ops from tensorflow.python.platform import test def _DeviceTensors(tensors, devices): res = [] for t, d in zip(tensors, devices): with ops.device(d): res.append(array_ops.identity(t)) return res def _NcclAllReduce(nccl_fun, tensors, devices): return nccl_fun(_DeviceTensors(tensors, devices)) def _NcclReduce(nccl_fun, tensors, devices): receiver = np.random.randint(0, len(devices)) with ops.device(devices[receiver]): return [nccl_fun(_DeviceTensors(tensors, devices))] def _NcclBroadcast(tensors, devices): sender = np.random.randint(0, len(devices)) with ops.device(devices[sender]): tensor = array_ops.identity(tensors[0]) broadcast = nccl_ops.broadcast(tensor) return _DeviceTensors([broadcast] * len(devices), devices) class NcclTestCase(test.TestCase): def _Test(self, nccl_reduce, numpy_fn, device_sets=(['/device:GPU:1', '/device:GPU:2', '/device:GPU:0'], ['/device:GPU:1', '/device:GPU:0'])): """Tests that nccl_reduce does the same as reduction with numpy_fn. Args: nccl_reduce: A function taking a list of tensors and a list of devices, and returns a list of reduced tensors and a list of ops to perform the reduction. numpy_fn: A function taking two tensors and returning the reduction of the two. device_sets: Tuple of virtual devices to run test on. """ for dtype in [np.float16, np.float32, np.int32, np.int64, np.float64]: # Create session inside outer loop to test use of # same communicator across multiple sessions. with self.test_session(use_gpu=True) as sess: for devices in device_sets: shape = (3, 4) random = (np.random.random_sample(shape) - .5) * 1024 tensors = [] for _ in devices: tensors.append(random.astype(dtype)) np_ans = tensors[0] for t in tensors[1:]: np_ans = numpy_fn(np_ans, t) reduce_tensors = nccl_reduce(tensors, devices) self.assertNotEmpty(reduce_tensors) # Test shape inference. for r in reduce_tensors: self.assertEqual(shape, r.get_shape()) result_tensors = [array_ops.identity(t) for t in reduce_tensors] # Check GPU availability *after* creating session, see b/68975239. if not test.is_gpu_available(): # If no GPU is available, only test graph construction. continue # Test execution and results. for t in self.evaluate(result_tensors): self.assertAllClose(t, np_ans) def _TestGradient(self, nccl_reduce, numpy_fn): """Tests the gradient of nccl_reduce. Args: nccl_reduce: A function taking a list of tensors and a list of devices, and returns a list of reduced tensors and a list of ops to perform the reduction. numpy_fn: A function taking two tensors and returning the gradient of the reduction of the two. """ def _Gradient(tensors, devices): inputs = [array_ops.placeholder(t.dtype, t.shape) for t in tensors] reduce_tensors = nccl_reduce(inputs, devices) losses = _DeviceTensors(tensors, [t.device for t in reduce_tensors]) grads = gradients.gradients( reduce_tensors, inputs, losses, colocate_gradients_with_ops=True) return [g for g in grads if g is not None] self._Test(_Gradient, numpy_fn) class AllReduceTest(NcclTestCase): def testAllReduce(self): self._Test(partial(_NcclAllReduce, nccl_ops.all_sum), lambda x, y: x + y) self._Test(partial(_NcclAllReduce, nccl_ops.all_prod), lambda x, y: x * y) self._Test(partial(_NcclAllReduce, nccl_ops.all_min), np.minimum) self._Test(partial(_NcclAllReduce, nccl_ops.all_max), np.maximum) def testAllSumGrad(self): self._TestGradient( partial(_NcclAllReduce, nccl_ops.all_sum), lambda x, y: x + y) def testErrors(self): with self.assertRaisesRegexp(ValueError, 'Device assignment required'): nccl_ops.all_sum([array_ops.identity(np.random.random_sample((3, 4)))]) with self.assertRaisesRegexp(ValueError, 'Must pass >0 tensors'): nccl_ops.all_sum([]) class SingleReduceTest(NcclTestCase): def testSum(self): self._Test(partial(_NcclReduce, nccl_ops.reduce_sum), lambda x, y: x + y) def testSumGrad(self): self._TestGradient(partial(_NcclReduce, nccl_ops.reduce_sum), lambda x, y: x) class BroadcastTest(NcclTestCase): def testBroadcast(self): self._Test(_NcclBroadcast, lambda x, y: x) def testBroadcastSingleDevice(self): # Broadcasts on a single device are removed completely during rewrite. self._Test(_NcclBroadcast, lambda x, y: x, (['/device:GPU:0', '/device:GPU:0'],)) def testBroadcastToCpuError(self): try: # Broadcasts to CPU is not supported. self._Test(_NcclBroadcast, lambda x, y: x, (['/device:GPU:0', '/device:CPU:0'],)) except errors.NotFoundError as e: self.assertRegexpMatches( str(e), "No registered '_NcclBroadcastRecv' OpKernel for CPU devices") else: # Session isn't executed when no GPU is available. if test.is_gpu_available(): self.fail("Didn't raise NotFoundError trying to broadcast to CPU") class CombinedTest(NcclTestCase): """Test all-reduce vs. single-reduce plus broadcast in one session.run.""" def _Combined(self, tensors, devices): all_reduce_tensors = _NcclAllReduce(nccl_ops.all_sum, tensors, devices) single_reduce_tensors = _NcclReduce(nccl_ops.reduce_sum, tensors, devices) broadcast_tensors = _NcclBroadcast(single_reduce_tensors, devices) return all_reduce_tensors + broadcast_tensors def testCombined(self): self._Test(self._Combined, lambda x, y: x + y) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/ops/nccl_ops_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Support for sorting tensors. @@argsort @@sort """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops as framework_ops from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.util.tf_export import tf_export @tf_export('sort') def sort(values, axis=-1, direction='ASCENDING', name=None): """Sorts a tensor. Usage: ```python import tensorflow as tf a = [1, 10, 26.9, 2.8, 166.32, 62.3] b = tf.sort(a,axis=-1,direction='ASCENDING',name=None) c = tf.keras.backend.eval(b) # Here, c = [ 1. 2.8 10. 26.9 62.3 166.32] ``` Args: values: 1-D or higher numeric `Tensor`. axis: The axis along which to sort. The default is -1, which sorts the last axis. direction: The direction in which to sort the values (`'ASCENDING'` or `'DESCENDING'`). name: Optional name for the operation. Returns: A `Tensor` with the same dtype and shape as `values`, with the elements sorted along the given `axis`. Raises: ValueError: If axis is not a constant scalar, or the direction is invalid. """ with framework_ops.name_scope(name, 'sort'): return _sort_or_argsort(values, axis, direction, return_argsort=False) @tf_export('argsort') def argsort(values, axis=-1, direction='ASCENDING', stable=False, name=None): """Returns the indices of a tensor that give its sorted order along an axis. For a 1D tensor, `tf.gather(values, tf.argsort(values))` is equivalent to `tf.sort(values)`. For higher dimensions, the output has the same shape as `values`, but along the given axis, values represent the index of the sorted element in that slice of the tensor at the given position. Usage: ```python import tensorflow as tf a = [1, 10, 26.9, 2.8, 166.32, 62.3] b = tf.argsort(a,axis=-1,direction='ASCENDING',stable=False,name=None) c = tf.keras.backend.eval(b) # Here, c = [0 3 1 2 5 4] ``` Args: values: 1-D or higher numeric `Tensor`. axis: The axis along which to sort. The default is -1, which sorts the last axis. direction: The direction in which to sort the values (`'ASCENDING'` or `'DESCENDING'`). stable: If True, equal elements in the original tensor will not be re-ordered in the returned order. Unstable sort is not yet implemented, but will eventually be the default for performance reasons. If you require a stable order, pass `stable=True` for forwards compatibility. name: Optional name for the operation. Returns: An int32 `Tensor` with the same shape as `values`. The indices that would sort each slice of the given `values` along the given `axis`. Raises: ValueError: If axis is not a constant scalar, or the direction is invalid. """ del stable # Unused. with framework_ops.name_scope(name, 'argsort'): return _sort_or_argsort(values, axis, direction, return_argsort=True) def _sort_or_argsort(values, axis, direction, return_argsort): """Internal sort/argsort implementation. Args: values: The input values. axis: The axis along which to sort. direction: 'ASCENDING' or 'DESCENDING'. return_argsort: Whether to return the argsort result. Returns: Either the sorted values, or the indices of the sorted values in the original tensor. See the `sort` and `argsort` docstrings. Raises: ValueError: If axis is not a constant scalar, or the direction is invalid. """ if direction not in _SORT_IMPL: raise ValueError('%s should be one of %s' % (direction, ', '.join( sorted(_SORT_IMPL.keys())))) # Axis must be an integer, not a Tensor. axis = framework_ops.convert_to_tensor(axis, name='axis') axis_static = tensor_util.constant_value(axis) if axis.shape.ndims != 0 or axis_static is None: raise ValueError('axis must be a constant scalar') axis_static = int(axis_static) # Avoids NumPy casting error values = framework_ops.convert_to_tensor(values, name='values') return _SORT_IMPL[direction](values, axis_static, return_argsort) def _descending_sort(values, axis, return_argsort=False): """Sorts values in reverse using `top_k`. Args: values: Tensor of numeric values. axis: Index of the axis which values should be sorted along. return_argsort: If False, return the sorted values. If True, return the indices that would sort the values. Returns: The sorted values. """ k = array_ops.shape(values)[axis] rank = array_ops.rank(values) static_rank = values.shape.ndims # Fast path: sorting the last axis. if axis == -1 or axis + 1 == values.get_shape().ndims: top_k_input = values transposition = None else: # Otherwise, transpose the array. Swap axes `axis` and `rank - 1`. if axis < 0: # Calculate the actual axis index if counting from the end. Use the static # rank if available, or else make the axis back into a tensor. axis += static_rank or rank if static_rank is not None: # Prefer to calculate the transposition array in NumPy and make it a # constant. transposition = constant_op.constant( np.r_[ # Axes up to axis are unchanged. np.arange(axis), # Swap axis and rank - 1. [static_rank - 1], # Axes in [axis + 1, rank - 1) are unchanged. np.arange(axis + 1, static_rank - 1), # Swap axis and rank - 1. [axis]], name='transposition') else: # Generate the transposition array from the tensors. transposition = array_ops.concat( [ # Axes up to axis are unchanged. math_ops.range(axis), # Swap axis and rank - 1. [rank - 1], # Axes in [axis + 1, rank - 1) are unchanged. math_ops.range(axis + 1, rank - 1), # Swap axis and rank - 1. [axis] ], axis=0) top_k_input = array_ops.transpose(values, transposition) values, indices = nn_ops.top_k(top_k_input, k) return_value = indices if return_argsort else values if transposition is not None: # transposition contains a single cycle of length 2 (swapping 2 elements), # so it is an involution (it is its own inverse). return_value = array_ops.transpose(return_value, transposition) return return_value def _ascending_sort(values, axis, return_argsort=False): # Negate the values to get the ascending order from descending sort. values_or_indices = _descending_sort(-values, axis, return_argsort) # If not argsort, negate the values again. return values_or_indices if return_argsort else -values_or_indices _SORT_IMPL = { 'ASCENDING': _ascending_sort, 'DESCENDING': _descending_sort, }
tensorflow-master
tensorflow/python/ops/sort_ops.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Methods for rewriting while_v2 grad functions with IndexedSlices output.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import func_graph from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_resource_variable_ops from tensorflow.python.util import nest def rewrite_grad_indexed_slices(grads, body_grad_graph, loop_vars, forward_inputs): """Handles special case of IndexedSlices returned from while gradient. Some gradient functions return IndexedSlices instead of a Tensor (e.g. the gradient of Gather ops). When this happens in the gradient of a while body, the resulting gradient body function will have mismatched inputs and outputs, since the input is a single Tensor, but the IndexedSlices gets unnested into three output Tensors. This function fixes this by rewriting the gradient body to have three inputs to match the three outputs, i.e., it effectively converts the input Tensor into an input IndexedSlices. It also returns new `loop_vars` to reflect the new inputs. Args: grads: the input gradient Tensors to the while gradient computation. body_grad_graph: _WhileBodyGradFuncGraph. loop_vars: list of Tensors. The inputs to body_grad_graph. forward_inputs: list of Tensors. The (flat) inputs to the forward-pass While op. Returns: The new loop_vars to pass to body_grad_graph. """ # Match up body_grad_graph.structured_outputs with the corresponding # forward_inputs. # # Note that we don't expect a gradient computation to have structured output # (e.g. no nested lists), so no need to flatten # body_grad_graph.structured_outputs. However, structured_outputs may still # contain composite tensors such as IndexedSlices, unlike # body_grad_graph.outputs, which contains flattened composite tensors. inputs_with_grads = [t for g, t in zip(grads, forward_inputs) if g is not None] # Skip loop counter, maximum_iterations and total number of loop iterations. structured_outputs = body_grad_graph.structured_outputs[3:] for forward_input, output in zip(inputs_with_grads, structured_outputs): if not isinstance(output, ops.IndexedSlices): continue if forward_input.dtype == dtypes.resource: # TODO(skyewm): In theory we should use this for all captured inputs, not # just resource handles (which can only be captured). We can do this by # checking that forward_input is passed straight through to its output. loop_vars = _rewrite_input_as_indexed_slices(body_grad_graph, output, forward_input, loop_vars) else: _rewrite_output_as_tensor(body_grad_graph, output) return loop_vars def _rewrite_output_as_tensor(body_grad_graph, grad_output_slices): """Rewrites grad_output_slices to be a Tensor output. Args: body_grad_graph: _WhileBodyGradFuncGraph. grad_output_slices: IndexedSlices output of body_grad_graph. """ with body_grad_graph.as_default(): new_output = ops.convert_to_tensor_v2(grad_output_slices) idx = body_grad_graph.structured_outputs.index(grad_output_slices) body_grad_graph.structured_outputs[idx] = new_output body_grad_graph.outputs = func_graph.flatten( body_grad_graph.structured_outputs) def _rewrite_input_as_indexed_slices(body_grad_graph, grad_output_slices, forward_input, loop_vars): """Rewrites grad_output_slices's corresponding input to be an IndexedSlices. This rewrite requires that forward_input was captured in the forward loop, i.e. is not a user-specified loop variable. This is important because the rewrite assumes that forward_input is passed through to its corresponding output unchanged. This assumption is used in _rewrite_input_as_indexed_slices, which depends on the exact gradient structure produced by the input's fanout. This can yield a more efficient computation than using _rewrite_output_as_tensor, since it preserves the IndexedSlices structure instead of converting the IndexedSlices to a dense Tensor. Args: body_grad_graph: _WhileBodyGradFuncGraph. grad_output_slices: IndexedSlices output of body_grad_graph. forward_input: the corresonding Tensor input to the forward loop. loop_vars: list of Tensors. The inputs to body_grad_graph. Returns: The new loop_vars to pass to body_grad_graph. """ # Create initial IndexedSlices that will be the input to the grad While # op. This will start as zeros, and accumulate the IndexedSlices grad output. # Note that because forward_input is captured and not a loop var, its incoming # gradient should always be zero. init_slices = _create_grad_indexed_slices_init(grad_output_slices, forward_input) # Create a new version of grad_output_slices's gradient computation that uses # the new IndexedSlices input instead of the original Tensor input. We'll # return the new computation and leave the old computation as dead code. # TODO(skyewm): considering pruning body_grad_graph to remove the old # computation. with body_grad_graph.as_default(): input_slices = ops.IndexedSlices( values=body_grad_graph.capture(init_slices.values, whitelisted=True), indices=body_grad_graph.capture(init_slices.indices, whitelisted=True), dense_shape=body_grad_graph.capture(init_slices.dense_shape, whitelisted=True)) # Remove the captured tensors from the function inputs. We'll add them back # at the correct index in _update_indexed_slices_param. for t in _flatten(init_slices): captured_t = body_grad_graph.captures.pop(t) body_grad_graph.inputs.remove(captured_t) new_output_slices = _rewrite_grad_indexed_slices_output(grad_output_slices, input_slices) # Update body_grad_graph's inputs and outputs to reflect the new # IndexedSlices computation. return _update_indexed_slices_param( body_grad_graph, loop_vars, init_slices, input_slices, new_output_slices, grad_output_slices) def _create_grad_indexed_slices_init(grad_output_slices, forward_input): """Creates an IndexedSlices to pass as input to the while grad function. Args: grad_output_slices: IndexedSlices. The corresponding while grad function output. forward_input: Tensor. The corresonding input to the forward while op. Returns: Zeros IndexedSlices, created in current Graph. """ assert isinstance(grad_output_slices, ops.IndexedSlices) assert isinstance(forward_input, ops.Tensor) values_out = grad_output_slices.values indices_out = grad_output_slices.indices # Create the initial values tensor. if values_out.shape.is_fully_defined(): values_shape = tensor_shape.TensorShape([0] + values_out.shape.as_list()[1:]) values = array_ops.zeros(values_shape, dtype=values_out.dtype, name="values_init") else: if forward_input.dtype == dtypes.resource: forward_shape = gen_resource_variable_ops.variable_shape(forward_input) else: forward_shape = array_ops.shape(forward_input) values_shape = array_ops.concat([[0], forward_shape[1:]], 0) values = array_ops.zeros(values_shape, dtype=values_out.dtype, name="values_init") # Create the initial indices tensor. indices = constant_op.constant([], indices_out.dtype, name="indices_init") # Create the initial dense_shape tensor. We assume is the same shape as # forward_input, since captured tensors don't change shape across loop # iterations. if forward_input.dtype == dtypes.resource: shape = gen_resource_variable_ops.variable_shape(forward_input, name="shape_init") else: shape = array_ops.shape(forward_input, name="shape_init") return ops.IndexedSlices(values=values, indices=indices, dense_shape=shape) def _rewrite_grad_indexed_slices_output(old_output_slices, new_input_slices): """Creates a new verson of old_output_slices with new_input_slices as input. This method assumes that old_output_slices.{values,indices} are produced by concatenating the incoming gradient Tensor input with the IndexedSlices produced by the gradient computation of the while body. See backprop.aggregate_indexed_slices_gradients for where these concats are constructed. We build new concats that use new_input_slices instead of the original Tensor input. Args: old_output_slices: original IndexedSlices output of while gradient. new_input_slices: new IndexedSlices to use as input to while gradient. Returns: A new IndexedSlices to replace old_output_slices. """ def rewrite(old_output, new_input): assert old_output.type == "Identity" concat_op = old_output.inputs[0].op assert concat_op.type == "ConcatV2" # Don't include axis arg old_concat_args = concat_op.inputs[:-1] # We assume that the original gradient input was the first argument to the # concat op. # TODO(skyewm): do this in a more robust way. return array_ops.concat([new_input] + old_concat_args[1:], 0) values = rewrite(old_output_slices.values.op, new_input_slices.values) indices = rewrite(old_output_slices.indices.op, new_input_slices.indices) return ops.IndexedSlices(values=values, indices=indices, dense_shape=new_input_slices.dense_shape) def _update_indexed_slices_param(graph, loop_vars, init_slices, input_slices, output_slices, old_output_slices): """Updates graph with new IndexedSlices input/output. Updates graph's metadata to output the gradient computation defined by init_slices, input_slices, and output_slices, instead of outputting old_output_slices. Also returns a new version of loop_vars with init_slices replacing the old input. Args: graph: _WhileBodyGradFuncGraph. loop_vars: the inputs to graph. init_slices: the new IndexedSlices to use as input to graph. input_slices: the new IndexedSlices in graph that should be fed by init_slices. output_slices: the new IndexedSlices in graph that should be the corresonding output to input_slices. old_output_slices: the IndexedSlices in graph that are currently being output. Returns: New loop_vars to pass to graph. """ structured_idx = graph.structured_outputs.index(old_output_slices) # We assume that the component tensors of old_output_slices appear # sequentially in graph.outputs. We use the first of these tensors # as the reference index. flat_idx = graph.outputs.index(func_graph.flatten(old_output_slices)[0]) graph.structured_outputs[structured_idx] = output_slices graph.outputs = func_graph.flatten( graph.structured_outputs) graph.inputs = (graph.inputs[:flat_idx] + _flatten(input_slices) + graph.inputs[flat_idx + 1:]) return loop_vars[:flat_idx] + _flatten(init_slices) + loop_vars[flat_idx + 1:] def _flatten(arg): return nest.flatten(arg, expand_composites=True)
tensorflow-master
tensorflow/python/ops/while_v2_indexed_slices_rewriter.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for sparse ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util # Need array_grad to register gradient for Identity. from tensorflow.python.ops import array_grad # pylint: disable=unused-import from tensorflow.python.ops import gradient_checker_v2 as gradient_checker from tensorflow.python.ops import math_ops # Need sparse_grad to register gradient for SparseToDense. from tensorflow.python.ops import sparse_grad # pylint: disable=unused-import from tensorflow.python.ops import sparse_ops from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class SparseOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase): def testSparseEye(self): def test_one(n, m, as_tensors): expected = np.eye(n, m) if as_tensors: m = constant_op.constant(m) n = constant_op.constant(n) s = sparse_ops.sparse_eye(n, m) d = sparse_ops.sparse_to_dense(s.indices, s.dense_shape, s.values) self.assertAllEqual(self.evaluate(d), expected) for n in range(2, 10, 2): for m in range(2, 10, 2): # Test with n and m as both constants and tensors. test_one(n, m, True) test_one(n, m, False) def testSparseExpandDims(self): for rank in range(1, 4): # Create a dummy input. When rank=3, shape=[2, 4, 6]. shape = np.arange(1, rank + 1) * 2 before = np.arange(np.prod(shape)).reshape(shape) # Make entries sparse. before *= np.random.binomial(1, .2, before.shape) dense_shape = before.shape indices = np.array(np.where(before)).T values = before[before != 0] # Try every possible valid value of axis. for axis in range(-rank - 1, rank): expected_after = np.expand_dims(before, axis) for axis_as_tensor in [False, True]: dense_shape_t = constant_op.constant(dense_shape, dtype=dtypes.int64) indices_t = constant_op.constant(indices) values_t = constant_op.constant(values) before_t = sparse_tensor.SparseTensor( indices=indices_t, values=values_t, dense_shape=dense_shape_t) if axis_as_tensor: axis = constant_op.constant(axis) s = sparse_ops.sparse_expand_dims(before_t, axis) d = sparse_ops.sparse_to_dense(s.indices, s.dense_shape, s.values) self.assertAllEqual(self.evaluate(d), expected_after) @parameterized.parameters([ (math_ops.abs, [1.0, -1.0, 3.0, -4.0], [1.0, 1.0, 3.0, 4.0]), (math_ops.negative, [1.0, -1.0, 3.0, -4.0], [-1.0, 1.0, -3.0, 4.0]), (math_ops.sign, [3.0, -2.0, 0.0, -4.0], [1.0, -1.0, 0.0, -1.0]), (math_ops.square, [1.0, -1.0, 3.0, -4.0], [1.0, 1.0, 9.0, 16.0]), ]) def testUnarySparseDispatch(self, op, values, expected): st = sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1], [2, 0], [2, 4]], values=values, dense_shape=[3, 6]) result = op(st) result_value = self.evaluate(result) self.assertAllEqual(result_value.indices, st.indices) self.assertAllEqual(result_value.values, expected) self.assertAllEqual(result_value.dense_shape, st.dense_shape) def testSparseToDenseGradient(self): def f(sparse_values, default_value): st = sparse_tensor.SparseTensor( indices=[[0, 3, 6], [1, 4, 7], [2, 5, 8]], values=sparse_values, dense_shape=[3, 6, 9]) return sparse_ops.sparse_tensor_to_dense(st, default_value) grads = gradient_checker.compute_gradient( f, [constant_op.constant([1.0, 2.0, 3.0]), constant_op.constant(0.0)]) epsilon = 1e-4 self.assertLess(gradient_checker.max_error(*grads), epsilon) if __name__ == '__main__': googletest.main()
tensorflow-master
tensorflow/python/ops/sparse_ops_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Raw ops tests.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import gen_math_ops from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class RawOpsTest(test.TestCase): def testSimple(self): x = constant_op.constant(1) self.assertEqual([2], self.evaluate(gen_math_ops.Add(x=x, y=x))) def testRequiresKwargs(self): with self.assertRaisesRegexp(TypeError, "only takes keyword args"): gen_math_ops.Add(1., 1.) def testRequiresKwargs_providesSuggestion(self): msg = "possible keys: \\['x', 'y', 'name'\\]" with self.assertRaisesRegexp(TypeError, msg): gen_math_ops.Add(1., y=2.) def testName(self): x = constant_op.constant(1) op = gen_math_ops.Add(x=x, y=x, name="double") if not context.executing_eagerly(): # `Tensor.name` is not available in eager. self.assertEqual(op.name, "double:0") def testDoc(self): self.assertEqual(gen_math_ops.add.__doc__, gen_math_ops.Add.__doc__) def testDefaults(self): x = constant_op.constant([[True]]) self.assertAllClose( gen_math_ops.Any(input=x, axis=0), gen_math_ops.Any(input=x, axis=0, keep_dims=False)) if __name__ == "__main__": ops.enable_eager_execution() test.main()
tensorflow-master
tensorflow/python/ops/raw_ops_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. # ============================================================================== """Tests for clustering_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import test_util from tensorflow.python.ops import clustering_ops from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class KmeansPlusPlusInitializationTest(test.TestCase): # All but one input point are close to (101, 1). With uniform random sampling, # it is highly improbable for (-1, -1) to be selected. def setUp(self): self._points = np.array([[100., 0.], [101., 2.], [102., 0.], [100., 1.], [100., 2.], [101., 0.], [101., 0.], [101., 1.], [102., 0.], [-1., -1.]]).astype(np.float32) def runTestWithSeed(self, seed): with self.cached_session(): sampled_points = clustering_ops.kmeans_plus_plus_initialization( self._points, 3, seed, (seed % 5) - 1) self.assertAllClose( sorted(self.evaluate(sampled_points).tolist()), [[-1., -1.], [101., 1.], [101., 1.]], atol=1.0) def testBasic(self): for seed in range(100): self.runTestWithSeed(seed) @test_util.run_all_in_graph_and_eager_modes class KMC2InitializationTest(test.TestCase): def runTestWithSeed(self, seed): with self.cached_session(): distances = np.zeros(1000).astype(np.float32) distances[6] = 10e7 distances[4] = 10e3 sampled_point = clustering_ops.kmc2_chain_initialization(distances, seed) self.assertAllEqual(sampled_point, 6) distances[6] = 0.0 sampled_point = clustering_ops.kmc2_chain_initialization(distances, seed) self.assertAllEqual(sampled_point, 4) def testBasic(self): for seed in range(100): self.runTestWithSeed(seed) @test_util.run_all_in_graph_and_eager_modes class KMC2InitializationLargeTest(test.TestCase): def setUp(self): self._distances = np.zeros(1001) self._distances[500] = 100.0 self._distances[1000] = 50.0 def testBasic(self): with self.cached_session(): counts = {} seed = 0 for i in range(50): sample = self.evaluate( clustering_ops.kmc2_chain_initialization(self._distances, seed + i)) counts[sample] = counts.get(sample, 0) + 1 self.assertEquals(len(counts), 2) self.assertTrue(500 in counts) self.assertTrue(1000 in counts) self.assertGreaterEqual(counts[500], 5) self.assertGreaterEqual(counts[1000], 5) @test_util.run_all_in_graph_and_eager_modes class KMC2InitializationCornercaseTest(test.TestCase): def setUp(self): self._distances = np.zeros(10) def runTestWithSeed(self, seed): with self.cached_session(): sampled_point = clustering_ops.kmc2_chain_initialization( self._distances, seed) self.assertAllEqual(sampled_point, 0) def testBasic(self): for seed in range(100): self.runTestWithSeed(seed) @test_util.run_all_in_graph_and_eager_modes # A simple test that can be verified by hand. class NearestCentersTest(test.TestCase): def setUp(self): self._points = np.array([[100., 0.], [101., 2.], [99., 2.], [1., 1.]]).astype(np.float32) self._centers = np.array([[100., 0.], [99., 1.], [50., 50.], [0., 0.], [1., 1.]]).astype(np.float32) def testNearest1(self): with self.cached_session(): [indices, distances] = clustering_ops.nearest_neighbors(self._points, self._centers, 1) self.assertAllClose(indices, [[0], [0], [1], [4]]) self.assertAllClose(distances, [[0.], [5.], [1.], [0.]]) def testNearest2(self): with self.cached_session(): [indices, distances] = clustering_ops.nearest_neighbors(self._points, self._centers, 2) self.assertAllClose(indices, [[0, 1], [0, 1], [1, 0], [4, 3]]) self.assertAllClose(distances, [[0., 2.], [5., 5.], [1., 5.], [0., 2.]]) @test_util.run_all_in_graph_and_eager_modes # A test with large inputs. class NearestCentersLargeTest(test.TestCase): def setUp(self): num_points = 1000 num_centers = 2000 num_dim = 100 max_k = 5 # Construct a small number of random points and later tile them. points_per_tile = 10 assert num_points % points_per_tile == 0 points = np.random.standard_normal( [points_per_tile, num_dim]).astype(np.float32) # Construct random centers. self._centers = np.random.standard_normal( [num_centers, num_dim]).astype(np.float32) # Exhaustively compute expected nearest neighbors. def squared_distance(x, y): return np.linalg.norm(x - y, ord=2)**2 nearest_neighbors = [ sorted([(squared_distance(point, self._centers[j]), j) for j in range(num_centers)])[:max_k] for point in points ] expected_nearest_neighbor_indices = np.array( [[i for _, i in nn] for nn in nearest_neighbors]) expected_nearest_neighbor_squared_distances = np.array( [[dist for dist, _ in nn] for nn in nearest_neighbors]) # Tile points and expected results to reach requested size (num_points) (self._points, self._expected_nearest_neighbor_indices, self._expected_nearest_neighbor_squared_distances) = ( np.tile(x, (int(num_points / points_per_tile), 1)) for x in (points, expected_nearest_neighbor_indices, expected_nearest_neighbor_squared_distances)) def testNearest1(self): with self.cached_session(): [indices, distances] = clustering_ops.nearest_neighbors(self._points, self._centers, 1) self.assertAllClose( indices, self._expected_nearest_neighbor_indices[:, [0]]) self.assertAllClose( distances, self._expected_nearest_neighbor_squared_distances[:, [0]]) def testNearest5(self): with self.cached_session(): [indices, distances] = clustering_ops.nearest_neighbors(self._points, self._centers, 5) self.assertAllClose( indices, self._expected_nearest_neighbor_indices[:, 0:5]) self.assertAllClose( distances, self._expected_nearest_neighbor_squared_distances[:, 0:5]) if __name__ == "__main__": np.random.seed(0) test.main()
tensorflow-master
tensorflow/python/ops/clustering_ops_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Operations for linear algebra.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.util import compat # Names below are lower_case. # pylint: disable=invalid-name def eye(num_rows, num_columns=None, batch_shape=None, dtype=dtypes.float32, name=None): """Construct an identity matrix, or a batch of matrices. See `linalg_ops.eye`. """ with ops.name_scope( name, default_name='eye', values=[num_rows, num_columns, batch_shape]): is_square = num_columns is None batch_shape = [] if batch_shape is None else batch_shape num_columns = num_rows if num_columns is None else num_columns # We cannot statically infer what the diagonal size should be: if (isinstance(num_rows, ops.Tensor) or isinstance(num_columns, ops.Tensor)): diag_size = math_ops.minimum(num_rows, num_columns) else: # We can statically infer the diagonal size, and whether it is square. if not isinstance(num_rows, compat.integral_types) or not isinstance( num_columns, compat.integral_types): raise TypeError( 'num_rows and num_columns must be positive integer values.') is_square = num_rows == num_columns diag_size = np.minimum(num_rows, num_columns) # We can not statically infer the shape of the tensor. if isinstance(batch_shape, ops.Tensor) or isinstance(diag_size, ops.Tensor): batch_shape = ops.convert_to_tensor( batch_shape, name='shape', dtype=dtypes.int32) diag_shape = array_ops.concat((batch_shape, [diag_size]), axis=0) if not is_square: shape = array_ops.concat((batch_shape, [num_rows, num_columns]), axis=0) # We can statically infer everything. else: batch_shape = list(batch_shape) diag_shape = batch_shape + [diag_size] if not is_square: shape = batch_shape + [num_rows, num_columns] diag_ones = array_ops.ones(diag_shape, dtype=dtype) if is_square: return array_ops.matrix_diag(diag_ones) else: zero_matrix = array_ops.zeros(shape, dtype=dtype) return array_ops.matrix_set_diag(zero_matrix, diag_ones) # pylint: enable=invalid-name,redefined-builtin
tensorflow-master
tensorflow/python/ops/linalg_ops_impl.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilty functions for control flow. This file is necessary to avoid cyclic dependencies between ops.py and control_flow_ops.py. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import traceback from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import tf_export ENABLE_CONTROL_FLOW_V2 = (os.getenv("TF_ENABLE_CONTROL_FLOW_V2", "0") != "0" or os.getenv("TF_ENABLE_COND_V2", "0") != "0" or os.getenv("TF_ENABLE_WHILE_V2", "0") != "0" or os.getenv("TF_ENABLE_TENSOR_ARRAY_V2", "0") != "0") @tf_export(v1=["enable_control_flow_v2"]) def enable_control_flow_v2(): # pylint: disable=invalid-name """Use control flow v2. control flow v2 (cfv2) is an improved version of control flow in TensorFlow with support for higher order derivatives. Enabling cfv2 will change the graph/function representation of control flow, e.g., `tf.while_loop` and `tf.cond` will generate functional `While` and `If` ops instead of low-level `Switch`, `Merge` etc. ops. Note: Importing and running graphs exported with old control flow will still be supported. Calling tf.enable_control_flow_v2() lets you opt-in to this TensorFlow 2.0 feature. """ global ENABLE_CONTROL_FLOW_V2 ENABLE_CONTROL_FLOW_V2 = True @tf_export(v1=["disable_control_flow_v2"]) def disable_control_flow_v2(): # pylint: disable=invalid-name """Opts out of control flow v2. If your code needs tf.disable_control_flow_v2() to be called to work properly please file a bug. """ global ENABLE_CONTROL_FLOW_V2 ENABLE_CONTROL_FLOW_V2 = False def EnableControlFlowV2(graph): """Returns whether control flow v2 should be used in `graph`.""" # Enable new control flow in FuncGraphs (but not legacy _FuncGraphs). # TODO(skyewm): do something better than hasattr without messing up imports. return ENABLE_CONTROL_FLOW_V2 or ( graph.building_function and not hasattr(graph, "_captured")) def IsInXLAContext(op): try: xla_compile = op.get_attr("_XlaCompile") if xla_compile: return True except ValueError: pass ctxt = op._get_control_flow_context() # pylint: disable=protected-access return GetContainingXLAContext(ctxt) is not None def InXlaContext(graph): ctxt = graph._get_control_flow_context() # pylint: disable=protected-access return GetContainingXLAContext(ctxt) is not None def GraphOrParentsInXlaContext(graph): while True: if InXlaContext(graph): return True try: graph = graph.outer_graph except AttributeError: return False def IsInWhileLoop(op): ctxt = op._get_control_flow_context() # pylint: disable=protected-access return GetContainingWhileContext(ctxt) is not None def IsInCond(op): ctxt = op._get_control_flow_context() # pylint: disable=protected-access return GetContainingCondContext(ctxt) is not None def IsSwitch(op): """Return true if `op` is a Switch.""" return op.type == "Switch" or op.type == "RefSwitch" def IsMerge(op): """Return true if `op` is a Merge.""" return op.type == "Merge" or op.type == "RefMerge" def IsLoopEnter(op): """Returns true if `op` is an Enter.""" return op.type == "Enter" or op.type == "RefEnter" def IsLoopExit(op): """Return true if `op` is an Exit.""" return op.type == "Exit" or op.type == "RefExit" def IsCondSwitch(op): """Return true if `op` is the Switch for a conditional.""" if not IsSwitch(op): return False if not op.outputs: return False # Switch nodes are not part of the cond control flow context that they # represent, so consider the consumers of its outputs to determine if it is # cond switch or not. A switch is a cond switch iff all its consumers are in # cond contexts. is_cond_switch = True for o in op.outputs: for c in o.consumers(): ctxt = c._get_control_flow_context() # pylint: disable=protected-access if IsLoopEnter(c): ctxt = ctxt.outer_context is_cond_switch = is_cond_switch and (ctxt is not None and ctxt.IsCondContext()) return is_cond_switch def IsCondMerge(op): """Return true if `op` is the Merge for a conditional.""" if not IsMerge(op): return False if not op.inputs: return False # Merge nodes are not part of the cond control flow context that they # represent, so consider the inputs to the merge of to determine if it is # cond merge or not: A merge is a cond merge iff all its inputs are in # cond contexts. is_cond_merge = True for i in op.inputs: ctxt = GetOutputContext(i.op) is_cond_merge = is_cond_merge and ctxt is not None and ctxt.IsCondContext() return is_cond_merge def IsLoopSwitch(op): """Return true if `op` is the Switch for a while loop.""" if IsSwitch(op): ctxt = op._get_control_flow_context() # pylint: disable=protected-access return ctxt is not None and ctxt.IsWhileContext() and not IsCondSwitch(op) return False def IsLoopMerge(op): """Return true if `op` is the Merge for a while loop.""" if IsMerge(op): ctxt = op._get_control_flow_context() # pylint: disable=protected-access return ctxt is not None and ctxt.IsWhileContext() and not IsCondMerge(op) return False def IsLoopConstantEnter(op): """Return true iff op is a loop invariant.""" return IsLoopEnter(op) and op.get_attr("is_constant") def GetLoopConstantEnter(value): """Return the enter op if we can infer `value` to be a loop invariant.""" id_ops = {"Switch", "RefSwitch", "Identity", "RefIdentity"} op = value.op while op.type in id_ops: op = op.inputs[0].op return op if IsLoopConstantEnter(op) else None def GetOutputContext(op): """Return the control flow context for the output of an op.""" ctxt = op._get_control_flow_context() # pylint: disable=protected-access # Exit nodes usually have a control flow context, except in the case where the # exit node was imported via import_graph_def (in which case no nodes have # control flow contexts). if ctxt is not None and IsLoopExit(op): ctxt = ctxt.outer_context return ctxt def GetContainingWhileContext(ctxt, stop_ctxt=None): """Returns the first ancestor WhileContext of `ctxt`. Returns `ctxt` if `ctxt` is a WhileContext, or None if `ctxt` is not in a while loop. Args: ctxt: ControlFlowContext stop_ctxt: ControlFlowContext, optional. If provided, the search will end if it sees stop_ctxt. Returns: `ctxt` if `ctxt` is a WhileContext, the most nested WhileContext containing `ctxt`, or None if `ctxt` is not in a while loop. If `stop_ctxt` is not `None`, this returns `ctxt` if it matches `stop_ctxt` in its traversal. """ while ctxt: if ctxt.IsWhileContext() or ctxt == stop_ctxt: return ctxt ctxt = ctxt.outer_context return None def GetContainingXLAContext(ctxt): """Returns the first ancestor XLAContext of `ctxt`. Returns `ctxt` if `ctxt` is a XLAContext, or None if `ctxt` is not in a while loop. Args: ctxt: ControlFlowContext Returns: `ctxt` if `ctxt` is a XLAContext, the most nested XLAContext containing `ctxt`, or None if `ctxt` is not in a while loop. """ while ctxt: if ctxt.IsXLAContext(): return ctxt ctxt = ctxt.outer_context return None def GetContainingCondContext(ctxt): """Returns the first ancestor CondContext of `ctxt`. Returns `ctxt` if `ctxt` is a CondContext, or None if `ctxt` is not in a cond. Args: ctxt: ControlFlowContext Returns: `ctxt` if `ctxt` is a CondContext, the most nested CondContext containing `ctxt`, or None if `ctxt` is not in a cond. """ while ctxt: if ctxt.IsCondContext(): return ctxt ctxt = ctxt.outer_context return None def IsContainingContext(ctxt, maybe_containing_ctxt): """Returns true if `maybe_containing_ctxt` is or contains `ctxt`.""" while ctxt is not maybe_containing_ctxt: if ctxt is None: return False ctxt = ctxt.outer_context return True def OpInContext(op, ctxt): return IsContainingContext(op._get_control_flow_context(), ctxt) # pylint: disable=protected-access def TensorInContext(tensor, ctxt): return OpInContext(tensor.op, ctxt) def CheckInputFromValidContext(op, input_op): """Returns whether `input_op` can be used from `op`s context. Conceptually, only inputs from op's while context or any ancestor while context (including outside of any context) are valid. In practice, there are many other edge cases as well. Args: op: Operation input_op: Operation Raises: ValueError: if input_op is from an invalid context. """ op_ctxt = op._get_control_flow_context() # pylint: disable=protected-access input_ctxt = GetOutputContext(input_op) valid = False if not input_ctxt: # input_op isn't in a control flow context. valid = True elif op_ctxt is input_ctxt: # input_op is in the same context as op. valid = True else: while_ctxt = GetContainingWhileContext(op_ctxt) input_while_ctxt = GetContainingWhileContext(input_ctxt) if while_ctxt is None: if input_while_ctxt is None: # Neither op nor input_op is in a while loop, but one or both are in # conds. We allow this, although execution will fail if the branch # corresponding to input_op's cond context isn't taken. valid = True # Invalid if op isn't in a while loop and input_op is. Unless... if IsLoopEnter(op): # WhileContext._BuildLoop clears context for Enter nodes. valid = True if IsSwitch(op): # CondContext.AddValue clears context for Switch nodes. valid = True elif IsContainingContext(while_ctxt, input_while_ctxt): # input_op is in a while loop which contains op's while loop (or not in a # while loop at all). valid = True elif (while_ctxt.grad_state and IsContainingContext(while_ctxt.grad_state.forward_context, input_while_ctxt)): # op is in a gradient context and input_op is in the associated forward # pass context or an ancestor thereof. This case is need to build while # loop gradients. # NOTE(skyewm): we theoretically also need this case for custom gradient # functions that close over tensors from ancestor contexts, but I haven't # verified this. valid = True elif (while_ctxt.grad_state and while_ctxt.grad_state.forward_context is input_while_ctxt._outer_context): # pylint: disable=protected-access # op is in a gradient context and input_op is in a child of the associated # forward pass context. This case is needed for the gradients of while # loops with conds. valid = True elif (input_while_ctxt.grad_state and input_while_ctxt.grad_state.forward_context is while_ctxt): # input_op is in the gradient context of op's context. This case is needed # when the gradient of a while loop gradient is requested (this will # eventually fail unless there is a stop_gradient() or similar). valid = True elif (input_while_ctxt.grad_state and input_ctxt.grad_state.forward_context.grad_state and input_ctxt.grad_state.forward_context.grad_state.forward_context is while_ctxt): # input_op is in the grad grad context of op's context. This case is # needed when the gradient of a while loop gradient is requested (this # will eventually fail unless there is a stop_gradient() or similar). valid = True if not valid: if while_ctxt: error_msg = ( "Cannot use '%s' as input to '%s' because they are in different while" " loops." % (input_op.name, op.name)) else: error_msg = ( "Cannot use '%s' as input to '%s' because '%s' is in a while loop." % (input_op.name, op.name, input_op.name)) # Log the error message plus the relevant stack traces. The stacks may be # useful for debugging this error, but we don't want to raise an # unreadable exception. log_msg = error_msg log_msg += "\n\n%s while context: %s" % (op.name, while_ctxt) log_msg += "\n%s while context: %s" % (input_op.name, input_while_ctxt) log_msg += "\n\nTraceback for %s:\n%s\nTraceback for %s:\n%s\n" % ( op.name, "".join(traceback.format_list(op.traceback)), input_op.name, "".join(traceback.format_list(input_op.traceback))) logging.info(log_msg) raise ValueError(error_msg + " See info log for more details.") def GetWhileContext(op): """Get the WhileContext to which this op belongs.""" ctxt = op._get_control_flow_context() # pylint: disable=protected-access if ctxt: ctxt = ctxt.GetWhileContext() return ctxt
tensorflow-master
tensorflow/python/ops/control_flow_util.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Operations for linear algebra.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_linalg_ops from tensorflow.python.ops import linalg_ops_impl from tensorflow.python.ops import map_fn from tensorflow.python.ops import math_ops # pylint: disable=wildcard-import from tensorflow.python.ops.gen_linalg_ops import * # pylint: enable=wildcard-import from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export # Names below are lower_case. # pylint: disable=invalid-name def _RegularizedGramianCholesky(matrix, l2_regularizer, first_kind): r"""Computes Cholesky factorization of regularized gramian matrix. Below we will use the following notation for each pair of matrix and right-hand sides in the batch: `matrix`=\\(A \in \Re^{m \times n}\\), `output`=\\(C \in \Re^{\min(m, n) \times \min(m,n)}\\), `l2_regularizer`=\\(\lambda\\). If `first_kind` is True, returns the Cholesky factorization \\(L\\) such that \\(L L^H = A^H A + \lambda I\\). If `first_kind` is False, returns the Cholesky factorization \\(L\\) such that \\(L L^H = A A^H + \lambda I\\). Args: matrix: `Tensor` of shape `[..., M, N]`. l2_regularizer: 0-D `double` `Tensor`. Ignored if `fast=False`. first_kind: bool. Controls what gramian matrix to factor. Returns: output: `Tensor` of shape `[..., min(M,N), min(M,N)]` whose inner-most 2 dimensions contain the Cholesky factors \\(L\\) described above. """ gramian = math_ops.matmul( matrix, matrix, adjoint_a=first_kind, adjoint_b=not first_kind) if isinstance(l2_regularizer, ops.Tensor) or l2_regularizer != 0: matrix_shape = array_ops.shape(matrix) batch_shape = matrix_shape[:-2] if first_kind: small_dim = matrix_shape[-1] else: small_dim = matrix_shape[-2] identity = eye(small_dim, batch_shape=batch_shape, dtype=matrix.dtype) small_dim_static = matrix.shape[-1 if first_kind else -2] identity.set_shape( matrix.shape[:-2].concatenate([small_dim_static, small_dim_static])) gramian += l2_regularizer * identity return gen_linalg_ops.cholesky(gramian) @tf_export( 'linalg.cholesky_solve', v1=['linalg.cholesky_solve', 'cholesky_solve']) @deprecation.deprecated_endpoints('cholesky_solve') def cholesky_solve(chol, rhs, name=None): """Solves systems of linear eqns `A X = RHS`, given Cholesky factorizations. ```python # Solve 10 separate 2x2 linear systems: A = ... # shape 10 x 2 x 2 RHS = ... # shape 10 x 2 x 1 chol = tf.linalg.cholesky(A) # shape 10 x 2 x 2 X = tf.linalg.cholesky_solve(chol, RHS) # shape 10 x 2 x 1 # tf.matmul(A, X) ~ RHS X[3, :, 0] # Solution to the linear system A[3, :, :] x = RHS[3, :, 0] # Solve five linear systems (K = 5) for every member of the length 10 batch. A = ... # shape 10 x 2 x 2 RHS = ... # shape 10 x 2 x 5 ... X[3, :, 2] # Solution to the linear system A[3, :, :] x = RHS[3, :, 2] ``` Args: chol: A `Tensor`. Must be `float32` or `float64`, shape is `[..., M, M]`. Cholesky factorization of `A`, e.g. `chol = tf.linalg.cholesky(A)`. For that reason, only the lower triangular parts (including the diagonal) of the last two dimensions of `chol` are used. The strictly upper part is assumed to be zero and not accessed. rhs: A `Tensor`, same type as `chol`, shape is `[..., M, K]`. name: A name to give this `Op`. Defaults to `cholesky_solve`. Returns: Solution to `A x = rhs`, shape `[..., M, K]`. """ # To solve C C^* x = rhs, we # 1. Solve C y = rhs for y, thus y = C^* x # 2. Solve C^* x = y for x with ops.name_scope(name, 'cholesky_solve', [chol, rhs]): y = gen_linalg_ops.matrix_triangular_solve( chol, rhs, adjoint=False, lower=True) x = gen_linalg_ops.matrix_triangular_solve( chol, y, adjoint=True, lower=True) return x @tf_export('eye', 'linalg.eye') def eye(num_rows, num_columns=None, batch_shape=None, dtype=dtypes.float32, name=None): """Construct an identity matrix, or a batch of matrices. ```python # Construct one identity matrix. tf.eye(2) ==> [[1., 0.], [0., 1.]] # Construct a batch of 3 identity matricies, each 2 x 2. # batch_identity[i, :, :] is a 2 x 2 identity matrix, i = 0, 1, 2. batch_identity = tf.eye(2, batch_shape=[3]) # Construct one 2 x 3 "identity" matrix tf.eye(2, num_columns=3) ==> [[ 1., 0., 0.], [ 0., 1., 0.]] ``` Args: num_rows: Non-negative `int32` scalar `Tensor` giving the number of rows in each batch matrix. num_columns: Optional non-negative `int32` scalar `Tensor` giving the number of columns in each batch matrix. Defaults to `num_rows`. batch_shape: A list or tuple of Python integers or a 1-D `int32` `Tensor`. If provided, the returned `Tensor` will have leading batch dimensions of this shape. dtype: The type of an element in the resulting `Tensor` name: A name for this `Op`. Defaults to "eye". Returns: A `Tensor` of shape `batch_shape + [num_rows, num_columns]` """ return linalg_ops_impl.eye(num_rows, num_columns=num_columns, batch_shape=batch_shape, dtype=dtype, name=name) @tf_export('linalg.lstsq', v1=['linalg.lstsq', 'matrix_solve_ls']) @deprecation.deprecated_endpoints('matrix_solve_ls') def matrix_solve_ls(matrix, rhs, l2_regularizer=0.0, fast=True, name=None): r"""Solves one or more linear least-squares problems. `matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions form `M`-by-`N` matrices. Rhs is a tensor of shape `[..., M, K]` whose inner-most 2 dimensions form `M`-by-`K` matrices. The computed output is a `Tensor` of shape `[..., N, K]` whose inner-most 2 dimensions form `M`-by-`K` matrices that solve the equations `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]` in the least squares sense. Below we will use the following notation for each pair of matrix and right-hand sides in the batch: `matrix`=\\(A \in \Re^{m \times n}\\), `rhs`=\\(B \in \Re^{m \times k}\\), `output`=\\(X \in \Re^{n \times k}\\), `l2_regularizer`=\\(\lambda\\). If `fast` is `True`, then the solution is computed by solving the normal equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then \\(X = (A^T A + \lambda I)^{-1} A^T B\\), which solves the least-squares problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k}} ||A Z - B||_F^2 + \lambda ||Z||_F^2\\). If \\(m \lt n\\) then `output` is computed as \\(X = A^T (A A^T + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is the minimum-norm solution to the under-determined linear system, i.e. \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k}} ||Z||_F^2 \\), subject to \\(A Z = B\\). Notice that the fast path is only numerically stable when \\(A\\) is numerically full rank and has a condition number \\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach}}}\\) or\\(\lambda\\) is sufficiently large. If `fast` is `False` an algorithm based on the numerically robust complete orthogonal decomposition is used. This computes the minimum-norm least-squares solution, even when \\(A\\) is rank deficient. This path is typically 6-7 times slower than the fast path. If `fast` is `False` then `l2_regularizer` is ignored. Args: matrix: `Tensor` of shape `[..., M, N]`. rhs: `Tensor` of shape `[..., M, K]`. l2_regularizer: 0-D `double` `Tensor`. Ignored if `fast=False`. fast: bool. Defaults to `True`. name: string, optional name of the operation. Returns: output: `Tensor` of shape `[..., N, K]` whose inner-most 2 dimensions form `M`-by-`K` matrices that solve the equations `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]` in the least squares sense. Raises: NotImplementedError: linalg.lstsq is currently disabled for complex128 and l2_regularizer != 0 due to poor accuracy. """ # pylint: disable=long-lambda def _use_composite_impl(fast, tensor_shape): """Determines whether to use the composite or specialized CPU kernel. When the total size of the tensor is larger than the cache size and the batch size is large compared to the smallest matrix dimension, then the composite implementation is inefficient since it has to read the entire tensor from memory multiple times. In this case we fall back to the original CPU kernel, which does all the computational steps on each matrix separately. Only fast mode is supported by the composite impl, so `False` is returned if `fast` is `False`. Args: fast: bool indicating if fast mode in the solver was requested. tensor_shape: The shape of the tensor. Returns: True if the composite impl should be used. False otherwise. """ if fast is False: return False batch_shape = tensor_shape[:-2] matrix_shape = tensor_shape[-2:] if not tensor_shape.is_fully_defined(): return True tensor_size = tensor_shape.num_elements() * matrix.dtype.size is_io_bound = batch_shape.num_elements() > np.min(matrix_shape) L2_CACHE_SIZE_GUESSTIMATE = 256000 if tensor_size > L2_CACHE_SIZE_GUESSTIMATE and is_io_bound: return False else: return True def _overdetermined(matrix, rhs, l2_regularizer): """Computes (A^H*A + l2_regularizer)^{-1} * A^H * rhs.""" chol = _RegularizedGramianCholesky( matrix, l2_regularizer=l2_regularizer, first_kind=True) return cholesky_solve(chol, math_ops.matmul(matrix, rhs, adjoint_a=True)) def _underdetermined(matrix, rhs, l2_regularizer): """Computes A^H * (A*A^H + l2_regularizer)^{-1} * rhs.""" chol = _RegularizedGramianCholesky( matrix, l2_regularizer=l2_regularizer, first_kind=False) return math_ops.matmul(matrix, cholesky_solve(chol, rhs), adjoint_a=True) def _composite_impl(matrix, rhs, l2_regularizer): """Composite implementation of matrix_solve_ls that supports GPU.""" with ops.name_scope(name, 'matrix_solve_ls', [matrix, rhs, l2_regularizer]): matrix_shape = matrix.get_shape()[-2:] if matrix_shape.is_fully_defined(): if matrix_shape[-2] >= matrix_shape[-1]: return _overdetermined(matrix, rhs, l2_regularizer) else: return _underdetermined(matrix, rhs, l2_regularizer) else: # We have to defer determining the shape to runtime and use # conditional execution of the appropriate graph. matrix_shape = array_ops.shape(matrix)[-2:] return control_flow_ops.cond( matrix_shape[-2] >= matrix_shape[-1], lambda: _overdetermined(matrix, rhs, l2_regularizer), lambda: _underdetermined(matrix, rhs, l2_regularizer)) matrix = ops.convert_to_tensor(matrix, name='matrix') if matrix.dtype == dtypes.complex128 and l2_regularizer != 0: # TODO(rmlarsen): Investigate and fix accuracy bug. raise NotImplementedError('matrix_solve_ls is currently disabled for ' 'complex128 and l2_regularizer != 0 due to ' 'poor accuracy.') tensor_shape = matrix.get_shape() if _use_composite_impl(fast, tensor_shape): return _composite_impl(matrix, rhs, l2_regularizer) else: return gen_linalg_ops.matrix_solve_ls( matrix, rhs, l2_regularizer, fast=fast, name=name) @tf_export('linalg.eigh', v1=['linalg.eigh', 'self_adjoint_eig']) @deprecation.deprecated_endpoints('self_adjoint_eig') def self_adjoint_eig(tensor, name=None): """Computes the eigen decomposition of a batch of self-adjoint matrices. Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices in `tensor` such that `tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i]`, for i=0...N-1. Args: tensor: `Tensor` of shape `[..., N, N]`. Only the lower triangular part of each inner inner matrix is referenced. name: string, optional name of the operation. Returns: e: Eigenvalues. Shape is `[..., N]`. Sorted in non-decreasing order. v: Eigenvectors. Shape is `[..., N, N]`. The columns of the inner most matrices contain eigenvectors of the corresponding matrices in `tensor` """ e, v = gen_linalg_ops.self_adjoint_eig_v2(tensor, compute_v=True, name=name) return e, v @tf_export('linalg.eigvalsh', v1=['linalg.eigvalsh', 'self_adjoint_eigvals']) @deprecation.deprecated_endpoints('self_adjoint_eigvals') def self_adjoint_eigvals(tensor, name=None): """Computes the eigenvalues of one or more self-adjoint matrices. Note: If your program backpropagates through this function, you should replace it with a call to tf.linalg.eigh (possibly ignoring the second output) to avoid computing the eigen decomposition twice. This is because the eigenvectors are used to compute the gradient w.r.t. the eigenvalues. See _SelfAdjointEigV2Grad in linalg_grad.py. Args: tensor: `Tensor` of shape `[..., N, N]`. name: string, optional name of the operation. Returns: e: Eigenvalues. Shape is `[..., N]`. The vector `e[..., :]` contains the `N` eigenvalues of `tensor[..., :, :]`. """ e, _ = gen_linalg_ops.self_adjoint_eig_v2(tensor, compute_v=False, name=name) return e @tf_export('linalg.svd', v1=['linalg.svd', 'svd']) @deprecation.deprecated_endpoints('svd') def svd(tensor, full_matrices=False, compute_uv=True, name=None): r"""Computes the singular value decompositions of one or more matrices. Computes the SVD of each inner matrix in `tensor` such that `tensor[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(conj(v[..., :, :]))` ```python # a is a tensor. # s is a tensor of singular values. # u is a tensor of left singular vectors. # v is a tensor of right singular vectors. s, u, v = svd(a) s = svd(a, compute_uv=False) ``` Args: tensor: `Tensor` of shape `[..., M, N]`. Let `P` be the minimum of `M` and `N`. full_matrices: If true, compute full-sized `u` and `v`. If false (the default), compute only the leading `P` singular vectors. Ignored if `compute_uv` is `False`. compute_uv: If `True` then left and right singular vectors will be computed and returned in `u` and `v`, respectively. Otherwise, only the singular values will be computed, which can be significantly faster. name: string, optional name of the operation. Returns: s: Singular values. Shape is `[..., P]`. The values are sorted in reverse order of magnitude, so s[..., 0] is the largest value, s[..., 1] is the second largest, etc. u: Left singular vectors. If `full_matrices` is `False` (default) then shape is `[..., M, P]`; if `full_matrices` is `True` then shape is `[..., M, M]`. Not returned if `compute_uv` is `False`. v: Right singular vectors. If `full_matrices` is `False` (default) then shape is `[..., N, P]`. If `full_matrices` is `True` then shape is `[..., N, N]`. Not returned if `compute_uv` is `False`. @compatibility(numpy) Mostly equivalent to numpy.linalg.svd, except that * The order of output arguments here is `s`, `u`, `v` when `compute_uv` is `True`, as opposed to `u`, `s`, `v` for numpy.linalg.svd. * full_matrices is `False` by default as opposed to `True` for numpy.linalg.svd. * tf.linalg.svd uses the standard definition of the SVD \\(A = U \Sigma V^H\\), such that the left singular vectors of `a` are the columns of `u`, while the right singular vectors of `a` are the columns of `v`. On the other hand, numpy.linalg.svd returns the adjoint \\(V^H\\) as the third output argument. ```python import tensorflow as tf import numpy as np s, u, v = tf.linalg.svd(a) tf_a_approx = tf.matmul(u, tf.matmul(tf.linalg.diag(s), v, adjoint_b=True)) u, s, v_adj = np.linalg.svd(a, full_matrices=False) np_a_approx = np.dot(u, np.dot(np.diag(s), v_adj)) # tf_a_approx and np_a_approx should be numerically close. ``` @end_compatibility """ s, u, v = gen_linalg_ops.svd( tensor, compute_uv=compute_uv, full_matrices=full_matrices, name=name) if compute_uv: return math_ops.real(s), u, v else: return math_ops.real(s) # pylint: disable=redefined-builtin @tf_export('norm', 'linalg.norm', v1=[]) def norm_v2(tensor, ord='euclidean', axis=None, keepdims=None, name=None): r"""Computes the norm of vectors, matrices, and tensors. This function can compute several different vector norms (the 1-norm, the Euclidean or 2-norm, the inf-norm, and in general the p-norm for p > 0) and matrix norms (Frobenius, 1-norm, 2-norm and inf-norm). Args: tensor: `Tensor` of types `float32`, `float64`, `complex64`, `complex128` ord: Order of the norm. Supported values are `'fro'`, `'euclidean'`, `1`, `2`, `np.inf` and any positive real number yielding the corresponding p-norm. Default is `'euclidean'` which is equivalent to Frobenius norm if `tensor` is a matrix and equivalent to 2-norm for vectors. Some restrictions apply: a) The Frobenius norm `'fro'` is not defined for vectors, b) If axis is a 2-tuple (matrix norm), only `'euclidean'`, '`fro'`, `1`, `2`, `np.inf` are supported. See the description of `axis` on how to compute norms for a batch of vectors or matrices stored in a tensor. axis: If `axis` is `None` (the default), the input is considered a vector and a single vector norm is computed over the entire set of values in the tensor, i.e. `norm(tensor, ord=ord)` is equivalent to `norm(reshape(tensor, [-1]), ord=ord)`. If `axis` is a Python integer, the input is considered a batch of vectors, and `axis` determines the axis in `tensor` over which to compute vector norms. If `axis` is a 2-tuple of Python integers it is considered a batch of matrices and `axis` determines the axes in `tensor` over which to compute a matrix norm. Negative indices are supported. Example: If you are passing a tensor that can be either a matrix or a batch of matrices at runtime, pass `axis=[-2,-1]` instead of `axis=None` to make sure that matrix norms are computed. keepdims: If True, the axis indicated in `axis` are kept with size 1. Otherwise, the dimensions in `axis` are removed from the output shape. name: The name of the op. Returns: output: A `Tensor` of the same type as tensor, containing the vector or matrix norms. If `keepdims` is True then the rank of output is equal to the rank of `tensor`. Otherwise, if `axis` is none the output is a scalar, if `axis` is an integer, the rank of `output` is one less than the rank of `tensor`, if `axis` is a 2-tuple the rank of `output` is two less than the rank of `tensor`. Raises: ValueError: If `ord` or `axis` is invalid. @compatibility(numpy) Mostly equivalent to numpy.linalg.norm. Not supported: ord <= 0, 2-norm for matrices, nuclear norm. Other differences: a) If axis is `None`, treats the flattened `tensor` as a vector regardless of rank. b) Explicitly supports 'euclidean' norm as the default, including for higher order tensors. @end_compatibility """ return norm(tensor=tensor, ord=ord, axis=axis, keepdims=keepdims, name=name) # pylint: disable=redefined-builtin @tf_export(v1=['norm', 'linalg.norm']) @deprecation.deprecated_args( None, 'keep_dims is deprecated, use keepdims instead', 'keep_dims') def norm(tensor, ord='euclidean', axis=None, keepdims=None, name=None, keep_dims=None): r"""Computes the norm of vectors, matrices, and tensors. This function can compute several different vector norms (the 1-norm, the Euclidean or 2-norm, the inf-norm, and in general the p-norm for p > 0) and matrix norms (Frobenius, 1-norm, 2-norm and inf-norm). Args: tensor: `Tensor` of types `float32`, `float64`, `complex64`, `complex128` ord: Order of the norm. Supported values are 'fro', 'euclidean', `1`, `2`, `np.inf` and any positive real number yielding the corresponding p-norm. Default is 'euclidean' which is equivalent to Frobenius norm if `tensor` is a matrix and equivalent to 2-norm for vectors. Some restrictions apply: a) The Frobenius norm `fro` is not defined for vectors, b) If axis is a 2-tuple (matrix norm), only 'euclidean', 'fro', `1`, `2`, `np.inf` are supported. See the description of `axis` on how to compute norms for a batch of vectors or matrices stored in a tensor. axis: If `axis` is `None` (the default), the input is considered a vector and a single vector norm is computed over the entire set of values in the tensor, i.e. `norm(tensor, ord=ord)` is equivalent to `norm(reshape(tensor, [-1]), ord=ord)`. If `axis` is a Python integer, the input is considered a batch of vectors, and `axis` determines the axis in `tensor` over which to compute vector norms. If `axis` is a 2-tuple of Python integers it is considered a batch of matrices and `axis` determines the axes in `tensor` over which to compute a matrix norm. Negative indices are supported. Example: If you are passing a tensor that can be either a matrix or a batch of matrices at runtime, pass `axis=[-2,-1]` instead of `axis=None` to make sure that matrix norms are computed. keepdims: If True, the axis indicated in `axis` are kept with size 1. Otherwise, the dimensions in `axis` are removed from the output shape. name: The name of the op. keep_dims: Deprecated alias for `keepdims`. Returns: output: A `Tensor` of the same type as tensor, containing the vector or matrix norms. If `keepdims` is True then the rank of output is equal to the rank of `tensor`. Otherwise, if `axis` is none the output is a scalar, if `axis` is an integer, the rank of `output` is one less than the rank of `tensor`, if `axis` is a 2-tuple the rank of `output` is two less than the rank of `tensor`. Raises: ValueError: If `ord` or `axis` is invalid. @compatibility(numpy) Mostly equivalent to numpy.linalg.norm. Not supported: ord <= 0, 2-norm for matrices, nuclear norm. Other differences: a) If axis is `None`, treats the flattened `tensor` as a vector regardless of rank. b) Explicitly supports 'euclidean' norm as the default, including for higher order tensors. @end_compatibility """ keepdims = deprecation.deprecated_argument_lookup('keepdims', keepdims, 'keep_dims', keep_dims) if keepdims is None: keepdims = False is_matrix_norm = ((isinstance(axis, tuple) or isinstance(axis, list)) and len(axis) == 2) if is_matrix_norm: axis = tuple(axis) if (not isinstance(axis[0], int) or not isinstance(axis[1], int) or axis[0] == axis[1]): raise ValueError( "'axis' must be None, an integer, or a tuple of 2 unique integers") supported_matrix_norms = ['euclidean', 'fro', 1, 2, np.inf] if ord not in supported_matrix_norms: raise ValueError("'ord' must be a supported matrix norm in %s, got %s" % (supported_matrix_norms, ord)) else: if not (isinstance(axis, int) or axis is None): raise ValueError( "'axis' must be None, an integer, or a tuple of 2 unique integers") supported_vector_norms = ['euclidean', 1, 2, np.inf] if (not np.isreal(ord) or ord <= 0) and ord not in supported_vector_norms: raise ValueError("'ord' must be a supported vector norm, got %s" % ord) if axis is not None: axis = (axis,) with ops.name_scope(name, 'norm', [tensor]): tensor = ops.convert_to_tensor(tensor) if ord in ['fro', 'euclidean', 2, 2.0]: if is_matrix_norm and ord in [2, 2.0]: rank = array_ops.rank(tensor) positive_axis = map_fn.map_fn( lambda i: control_flow_ops.cond(i >= 0, lambda: i, lambda: i + rank), ops.convert_to_tensor(axis)) axes = math_ops.range(rank) perm_before = array_ops.concat( [array_ops.setdiff1d(axes, positive_axis)[0], positive_axis], axis=0) perm_after = map_fn.map_fn( lambda i: math_ops.cast( array_ops.squeeze( array_ops.where(math_ops.equal(perm_before, i))), dtype=dtypes.int32), axes) permed = array_ops.transpose(tensor, perm=perm_before) matrix_2_norm = array_ops.expand_dims( math_ops.reduce_max( math_ops.abs(gen_linalg_ops.svd(permed, compute_uv=False)[0]), axis=-1, keepdims=True), axis=-1) result = array_ops.transpose(matrix_2_norm, perm=perm_after) else: result = math_ops.sqrt( math_ops.reduce_sum( tensor * math_ops.conj(tensor), axis, keepdims=True)) # TODO(rmlarsen): Replace with the following, once gradients are defined # result = math_ops.reduce_euclidean_norm(tensor, axis, keepdims=True) else: result = math_ops.abs(tensor) if ord == 1: sum_axis = None if axis is None else axis[0] result = math_ops.reduce_sum(result, sum_axis, keepdims=True) if is_matrix_norm: result = math_ops.reduce_max(result, axis[-1], keepdims=True) elif ord == np.inf: if is_matrix_norm: result = math_ops.reduce_sum(result, axis[1], keepdims=True) max_axis = None if axis is None else axis[0] result = math_ops.reduce_max(result, max_axis, keepdims=True) else: # General p-norms (positive p only) result = math_ops.pow( math_ops.reduce_sum(math_ops.pow(result, ord), axis, keepdims=True), 1.0 / ord) if not keepdims: result = array_ops.squeeze(result, axis) return result # pylint: enable=invalid-name,redefined-builtin
tensorflow-master
tensorflow/python/ops/linalg_ops.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ops for tensor_forest.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python import ops from tensorflow.python.ops import gen_tensor_forest_ops from tensorflow.python.ops import resources from tensorflow.python.training import saver class TreeVariableSaveable(saver.BaseSaverBuilder.SaveableObject): """Resource that holds a tree.""" def __init__(self, type_name, name, container, config, resource_handle_func, create_op_func, is_initialized_op_func, serialize_op_func, deserialize_op_func): with ops.name_scope(name, type_name) as name: self._resource_handle = resource_handle_func( container, shared_name=name, name=name) self._is_initialized_op = is_initialized_op_func(self._resource_handle) tensor = serialize_op_func(self._resource_handle) self._create_op = create_op_func(self._resource_handle, config) # slice_spec is useful for saving a slice from a variable. # It's not meaningful the tree variable. So we just pass an empty # value. slice_spec = '' specs = [saver.BaseSaverBuilder.SaveSpec(tensor, slice_spec, name)] super(TreeVariableSaveable, self).__init__(self._resource_handle, specs, name) ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, self) resources.register_resource(self._resource_handle, self._create_op, self._is_initialized_op) self._deserialize_op_func = deserialize_op_func def restore(self, restored_tensors, unused_restored_shapes): """Restores the associated tree from 'restored_tensors'. Args: restored_tensors: the tensors that were loaded from a checkpoint. unused_restored_shapes: the shapes this object should conform to after restore. Not meaningful for trees. Returns: The operation that restores the state of the tree variable. """ with ops.control_dependencies([self._create_op]): return self._deserialize_op_func( self._resource_handle, restored_tensors[0], ) @property def resource(self): return self._resource_handle def tree_variable(tree_config, name, container=None): return TreeVariableSaveable( 'TreeVariable', name, container, tree_config, gen_tensor_forest_ops.tensor_forest_tree_resource_handle_op, gen_tensor_forest_ops.tensor_forest_create_tree_variable, gen_tensor_forest_ops.tensor_forest_tree_is_initialized_op, gen_tensor_forest_ops.tensor_forest_tree_serialize, gen_tensor_forest_ops.tensor_forest_tree_deserialize).resource class ForestVariables(object): """Resource that holds all trees from a forest.""" def __init__(self, params, tree_configs=None): self._variables = [] for i in range(params.n_trees): tree_config = '' if tree_configs is not None: tree_config = tree_configs[i] self._variables.append(tree_variable( tree_config, 'tree-%s' % i, )) def __getitem__(self, t): return self._variables[t]
tensorflow-master
tensorflow/python/ops/tensor_forest_ops.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmark for control flow ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import time from tensorflow.python.client import session from tensorflow.python.eager import context from tensorflow.python.eager import function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import control_flow_util from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.platform import test class CondWithManyIntermediatesBenchmark(test.Benchmark): """Checks the runtime performance of outputting all intermediates.""" NUM_INTERMEDIATES = 1000 NUM_ITERS = 500 NUM_WARM_UP_ITERS = 50 def _create_cond(self, x): def branch_fn(): # Use a random value so the adds can't be constant folded. return x + sum(random_ops.random_normal([]) for _ in range(self.NUM_INTERMEDIATES)) # Use a dynamic predicate to make sure the cond isn't constant folded. return control_flow_ops.cond(math_ops.not_equal(x, -1), branch_fn, lambda: 0.0) def _benchmark_defun(self): """Benchmarks cond in a defun.""" @function.defun def cond_fn(x): return self._create_cond(x) # Warm up for _ in range(self.NUM_WARM_UP_ITERS): cond_fn(0.0) start_time = time.time() for _ in range(self.NUM_ITERS): cond_fn(0.0) self.report_benchmark( wall_time=time.time() - start_time, iters=self.NUM_ITERS) def _benchmark_graph(self): """Benchmarks cond in legacy graph mode.""" with context.graph_mode(): with ops.Graph().as_default(): x = array_ops.placeholder(dtypes.float32) cond_val = self._create_cond(x) with session.Session() as sess: cond_fn = sess.make_callable(cond_val, [x]) # Warm up for _ in range(self.NUM_WARM_UP_ITERS): cond_fn(0.0) start_time = time.time() for _ in range(self.NUM_ITERS): cond_fn(0.0) self.report_benchmark( wall_time=time.time() - start_time, iters=self.NUM_ITERS) def benchmark_cond_v1_defun(self): old_val = control_flow_util.ENABLE_CONTROL_FLOW_V2 control_flow_util.ENABLE_CONTROL_FLOW_V2 = False self._benchmark_defun() control_flow_util.ENABLE_CONTROL_FLOW_V2 = old_val def benchmark_cond_v2_defun(self): old_val = control_flow_util.ENABLE_CONTROL_FLOW_V2 control_flow_util.ENABLE_CONTROL_FLOW_V2 = True self._benchmark_defun() control_flow_util.ENABLE_CONTROL_FLOW_V2 = old_val def benchmark_cond_v1_graph(self): old_val = control_flow_util.ENABLE_CONTROL_FLOW_V2 control_flow_util.ENABLE_CONTROL_FLOW_V2 = False self._benchmark_graph() control_flow_util.ENABLE_CONTROL_FLOW_V2 = old_val def benchmark_cond_v2_graph(self): old_val = control_flow_util.ENABLE_CONTROL_FLOW_V2 control_flow_util.ENABLE_CONTROL_FLOW_V2 = True self._benchmark_graph() control_flow_util.ENABLE_CONTROL_FLOW_V2 = old_val if __name__ == "__main__": ops.enable_eager_execution() test.main()
tensorflow-master
tensorflow/python/ops/control_flow_ops_benchmark.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Operations often used for initializing tensors. All variable initializers returned by functions in this file should have the following signature: def _initializer(shape, dtype=dtypes.float32, partition_info=None): Args: shape: List of `int` representing the shape of the output `Tensor`. Some initializers may also be able to accept a `Tensor`. dtype: (Optional) Type of the output `Tensor`. partition_info: (Optional) variable_scope._PartitionInfo object holding additional information about how the variable is partitioned. May be `None` if the variable is not partitioned. Returns: A `Tensor` of type `dtype` and `shape`. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_linalg_ops from tensorflow.python.ops import linalg_ops_impl from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.util import deprecation from tensorflow.python.util.deprecation import deprecated from tensorflow.python.util.deprecation import deprecated_arg_values from tensorflow.python.util.deprecation import deprecated_args from tensorflow.python.util.tf_export import tf_export class Initializer(object): """Initializer base class: all initializers inherit from this class.""" def __call__(self, shape, dtype=None, partition_info=None): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. If not provided use the initializer dtype. partition_info: Optional information about the possible partitioning of a tensor. """ raise NotImplementedError def get_config(self): """Returns the configuration of the initializer as a JSON-serializable dict. Returns: A JSON-serializable Python dict. """ return {} @classmethod def from_config(cls, config): """Instantiates an initializer from a configuration dictionary. Example: ```python initializer = RandomUniform(-1, 1) config = initializer.get_config() initializer = RandomUniform.from_config(config) ``` Args: config: A Python dictionary. It will typically be the output of `get_config`. Returns: An Initializer instance. """ return cls(**config) @tf_export(v1=["initializers.zeros", "zeros_initializer"]) @deprecation.deprecated_endpoints("initializers.zeros") class Zeros(Initializer): """Initializer that generates tensors initialized to 0.""" @deprecated_args(None, "Call initializer instance with the dtype argument instead " "of passing it to the constructor", "dtype") def __init__(self, dtype=dtypes.float32): self.dtype = dtypes.as_dtype(dtype) def __call__(self, shape, dtype=None, partition_info=None): if dtype is None: dtype = self.dtype return array_ops.zeros(shape, dtype) def get_config(self): return {"dtype": self.dtype.name} @tf_export(v1=["initializers.ones", "ones_initializer"]) @deprecation.deprecated_endpoints("initializers.ones", "ones_initializer") class Ones(Initializer): """Initializer that generates tensors initialized to 1.""" @deprecated_args(None, "Call initializer instance with the dtype argument instead " "of passing it to the constructor", "dtype") def __init__(self, dtype=dtypes.float32): self.dtype = dtypes.as_dtype(dtype) def __call__(self, shape, dtype=None, partition_info=None): if dtype is None: dtype = self.dtype return array_ops.ones(shape, dtype) def get_config(self): return {"dtype": self.dtype.name} @tf_export(v1=["initializers.constant", "constant_initializer"]) @deprecation.deprecated_endpoints("constant_initializer") class Constant(Initializer): """Initializer that generates tensors with constant values. The resulting tensor is populated with values of type `dtype`, as specified by arguments `value` following the desired `shape` of the new tensor (see examples below). The argument `value` can be a constant value, or a list of values of type `dtype`. If `value` is a list, then the length of the list must be less than or equal to the number of elements implied by the desired shape of the tensor. In the case where the total number of elements in `value` is less than the number of elements required by the tensor shape, the last element in `value` will be used to fill the remaining entries. If the total number of elements in `value` is greater than the number of elements required by the tensor shape, the initializer will raise a `ValueError`. Args: value: A Python scalar, list or tuple of values, or a N-dimensional numpy array. All elements of the initialized variable will be set to the corresponding value in the `value` argument. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. verify_shape: Boolean that enables verification of the shape of `value`. If `True`, the initializer will throw an error if the shape of `value` is not compatible with the shape of the initialized tensor. Raises: TypeError: If the input `value` is not one of the expected types. Examples: The following example can be rewritten using a numpy.ndarray instead of the `value` list, even reshaped, as shown in the two commented lines below the `value` list initialization. ```python >>> import numpy as np >>> import tensorflow as tf >>> value = [0, 1, 2, 3, 4, 5, 6, 7] >>> # value = np.array(value) >>> # value = value.reshape([2, 4]) >>> init = tf.compat.v1.constant_initializer(value) >>> print('fitting shape:') >>> with tf.compat.v1.Session(): >>> x = tf.compat.v1.get_variable('x', shape=[2, 4], initializer=init) >>> x.initializer.run() >>> print(x.eval()) fitting shape: [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.]] >>> print('larger shape:') >>> with tf.compat.v1.Session(): >>> x = tf.compat.v1.get_variable('x', shape=[3, 4], initializer=init) >>> x.initializer.run() >>> print(x.eval()) larger shape: [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 7. 7. 7. 7.]] >>> print('smaller shape:') >>> with tf.compat.v1.Session(): >>> x = tf.compat.v1.get_variable('x', shape=[2, 3], initializer=init) ValueError: Too many elements provided. Needed at most 6, but received 8 >>> print('shape verification:') >>> init_verify = tf.compat.v1.constant_initializer(value, verify_shape=True) >>> with tf.compat.v1.Session(): >>> x = tf.compat.v1.get_variable('x', shape=[3, 4], initializer=init_verify) TypeError: Expected Tensor's shape: (3, 4), got (8,). ``` """ @deprecated_args(None, "Call initializer instance with the dtype argument instead " "of passing it to the constructor", "dtype") @deprecated_args(None, "Objects must now be the required shape or no shape " "can be specified", "verify_shape") def __init__(self, value=0, dtype=dtypes.float32, verify_shape=False): if not (np.isscalar(value) or isinstance(value, (list, tuple, np.ndarray))): raise TypeError( "Invalid type for initial value: %s (expected Python scalar, list or " "tuple of values, or numpy.ndarray)." % type(value)) self.value = value self.dtype = dtypes.as_dtype(dtype) self._verify_shape = verify_shape def __call__(self, shape, dtype=None, partition_info=None, verify_shape=None): if dtype is None: dtype = self.dtype if verify_shape is None: verify_shape = self._verify_shape return constant_op.constant_v1( self.value, dtype=dtype, shape=shape, verify_shape=verify_shape) def get_config(self): # We don't include `verify_shape` for compatibility with Keras. # `verify_shape` should be passed as an argument to `__call__` rather # than as a constructor argument: conceptually it isn't a property # of the initializer. return {"value": self.value, "dtype": self.dtype.name} @tf_export(v1=["initializers.random_uniform", "random_uniform_initializer"]) @deprecation.deprecated_endpoints("initializers.random_uniform") class RandomUniform(Initializer): """Initializer that generates tensors with a uniform distribution. Args: minval: A python scalar or a scalar tensor. Lower bound of the range of random values to generate. maxval: A python scalar or a scalar tensor. Upper bound of the range of random values to generate. Defaults to 1 for float types. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. """ @deprecated_args(None, "Call initializer instance with the dtype argument instead " "of passing it to the constructor", "dtype") def __init__(self, minval=0, maxval=None, seed=None, dtype=dtypes.float32): self.minval = minval self.maxval = maxval self.seed = seed self.dtype = dtypes.as_dtype(dtype) def __call__(self, shape, dtype=None, partition_info=None): if dtype is None: dtype = self.dtype return random_ops.random_uniform( shape, self.minval, self.maxval, dtype, seed=self.seed) def get_config(self): return { "minval": self.minval, "maxval": self.maxval, "seed": self.seed, "dtype": self.dtype.name } @tf_export(v1=["initializers.random_normal", "random_normal_initializer"]) @deprecation.deprecated_endpoints("initializers.random_normal") class RandomNormal(Initializer): """Initializer that generates tensors with a normal distribution. Args: mean: a python scalar or a scalar tensor. Mean of the random values to generate. stddev: a python scalar or a scalar tensor. Standard deviation of the random values to generate. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported. """ @deprecated_args(None, "Call initializer instance with the dtype argument instead " "of passing it to the constructor", "dtype") def __init__(self, mean=0.0, stddev=1.0, seed=None, dtype=dtypes.float32): self.mean = mean self.stddev = stddev self.seed = seed self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype)) def __call__(self, shape, dtype=None, partition_info=None): if dtype is None: dtype = self.dtype return random_ops.random_normal( shape, self.mean, self.stddev, dtype, seed=self.seed) def get_config(self): return { "mean": self.mean, "stddev": self.stddev, "seed": self.seed, "dtype": self.dtype.name } @tf_export(v1=["initializers.truncated_normal", "truncated_normal_initializer"]) @deprecation.deprecated_endpoints("initializers.truncated_normal", "truncated_normal_initializer") class TruncatedNormal(Initializer): """Initializer that generates a truncated normal distribution. These values are similar to values from a `random_normal_initializer` except that values more than two standard deviations from the mean are discarded and re-drawn. This is the recommended initializer for neural network weights and filters. Args: mean: a python scalar or a scalar tensor. Mean of the random values to generate. stddev: a python scalar or a scalar tensor. Standard deviation of the random values to generate. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported. """ @deprecated_args(None, "Call initializer instance with the dtype argument instead " "of passing it to the constructor", "dtype") def __init__(self, mean=0.0, stddev=1.0, seed=None, dtype=dtypes.float32): self.mean = mean self.stddev = stddev self.seed = seed self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype)) def __call__(self, shape, dtype=None, partition_info=None): if dtype is None: dtype = self.dtype return random_ops.truncated_normal( shape, self.mean, self.stddev, dtype, seed=self.seed) def get_config(self): return { "mean": self.mean, "stddev": self.stddev, "seed": self.seed, "dtype": self.dtype.name } @tf_export(v1=[ "initializers.uniform_unit_scaling", "uniform_unit_scaling_initializer" ]) @deprecation.deprecated_endpoints("uniform_unit_scaling_initializer", "initializers.uniform_unit_scaling") class UniformUnitScaling(Initializer): """Initializer that generates tensors without scaling variance. When initializing a deep network, it is in principle advantageous to keep the scale of the input variance constant, so it does not explode or diminish by reaching the final layer. If the input is `x` and the operation `x * W`, and we want to initialize `W` uniformly at random, we need to pick `W` from [-sqrt(3) / sqrt(dim), sqrt(3) / sqrt(dim)] to keep the scale intact, where `dim = W.shape[0]` (the size of the input). A similar calculation for convolutional networks gives an analogous result with `dim` equal to the product of the first 3 dimensions. When nonlinearities are present, we need to multiply this by a constant `factor`. See (Sussillo et al., 2014) for deeper motivation, experiments and the calculation of constants. In section 2.3 there, the constants were numerically computed: for a linear layer it's 1.0, relu: ~1.43, tanh: ~1.15. Args: factor: Float. A multiplicative factor by which the values will be scaled. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported. References: [Sussillo et al., 2014](https://arxiv.org/abs/1412.6558) ([pdf](http://arxiv.org/pdf/1412.6558.pdf)) """ @deprecated_args(None, "Call initializer instance with the dtype argument instead " "of passing it to the constructor", "dtype") @deprecated(None, "Use tf.initializers.variance_scaling instead with distribution=" "uniform to get equivalent behavior.") def __init__(self, factor=1.0, seed=None, dtype=dtypes.float32): self.factor = factor self.seed = seed self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype)) def __call__(self, shape, dtype=None, partition_info=None): if dtype is None: dtype = self.dtype scale_shape = shape if partition_info is not None: scale_shape = partition_info.full_shape input_size = 1.0 # Estimating input size is not possible to do perfectly, but we try. # The estimate, obtained by multiplying all dimensions but the last one, # is the right thing for matrix multiply and convolutions (see above). for dim in scale_shape[:-1]: input_size *= float(dim) # Avoid errors when initializing zero-size tensors. input_size = max(input_size, 1.0) max_val = math.sqrt(3 / input_size) * self.factor return random_ops.random_uniform( shape, -max_val, max_val, dtype, seed=self.seed) def get_config(self): return {"factor": self.factor, "seed": self.seed, "dtype": self.dtype.name} @tf_export(v1=["initializers.variance_scaling", "variance_scaling_initializer"]) @deprecation.deprecated_endpoints("initializers.variance_scaling", "variance_scaling_initializer") class VarianceScaling(Initializer): """Initializer capable of adapting its scale to the shape of weights tensors. With `distribution="truncated_normal" or "untruncated_normal"`, samples are drawn from a truncated/untruncated normal distribution with a mean of zero and a standard deviation (after truncation, if used) `stddev = sqrt(scale / n)` where n is: - number of input units in the weight tensor, if mode = "fan_in" - number of output units, if mode = "fan_out" - average of the numbers of input and output units, if mode = "fan_avg" With `distribution="uniform"`, samples are drawn from a uniform distribution within [-limit, limit], with `limit = sqrt(3 * scale / n)`. Args: scale: Scaling factor (positive float). mode: One of "fan_in", "fan_out", "fan_avg". distribution: Random distribution to use. One of "normal", "uniform". seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported. Raises: ValueError: In case of an invalid value for the "scale", mode" or "distribution" arguments. """ @deprecated_args(None, "Call initializer instance with the dtype argument instead " "of passing it to the constructor", "dtype") @deprecated_arg_values( None, "`normal` is a deprecated alias for `truncated_normal`", distribution="normal") def __init__(self, scale=1.0, mode="fan_in", distribution="truncated_normal", seed=None, dtype=dtypes.float32): if scale <= 0.: raise ValueError("`scale` must be positive float.") if mode not in {"fan_in", "fan_out", "fan_avg"}: raise ValueError("Invalid `mode` argument:", mode) distribution = distribution.lower() if distribution not in { "normal", "uniform", "truncated_normal", "untruncated_normal" }: raise ValueError("Invalid `distribution` argument:", distribution) self.scale = scale self.mode = mode self.distribution = distribution self.seed = seed self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype)) def __call__(self, shape, dtype=None, partition_info=None): if dtype is None: dtype = self.dtype scale = self.scale scale_shape = shape if partition_info is not None: scale_shape = partition_info.full_shape fan_in, fan_out = _compute_fans(scale_shape) if self.mode == "fan_in": scale /= max(1., fan_in) elif self.mode == "fan_out": scale /= max(1., fan_out) else: scale /= max(1., (fan_in + fan_out) / 2.) if self.distribution == "normal" or self.distribution == "truncated_normal": # constant taken from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.) stddev = math.sqrt(scale) / .87962566103423978 return random_ops.truncated_normal( shape, 0.0, stddev, dtype, seed=self.seed) elif self.distribution == "untruncated_normal": stddev = math.sqrt(scale) return random_ops.random_normal(shape, 0.0, stddev, dtype, seed=self.seed) else: limit = math.sqrt(3.0 * scale) return random_ops.random_uniform( shape, -limit, limit, dtype, seed=self.seed) def get_config(self): return { "scale": self.scale, "mode": self.mode, "distribution": self.distribution, "seed": self.seed, "dtype": self.dtype.name } @tf_export(v1=["initializers.orthogonal", "orthogonal_initializer"]) @deprecation.deprecated_endpoints("initializers.orthogonal", "orthogonal_initializer") class Orthogonal(Initializer): """Initializer that generates an orthogonal matrix. If the shape of the tensor to initialize is two-dimensional, it is initialized with an orthogonal matrix obtained from the QR decomposition of a matrix of random numbers drawn from a normal distribution. If the matrix has fewer rows than columns then the output will have orthogonal rows. Otherwise, the output will have orthogonal columns. If the shape of the tensor to initialize is more than two-dimensional, a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])` is initialized, where `n` is the length of the shape vector. The matrix is subsequently reshaped to give a tensor of the desired shape. Args: gain: multiplicative factor to apply to the orthogonal matrix seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported. References: [Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C) ([pdf](https://arxiv.org/pdf/1312.6120.pdf)) """ @deprecated_args(None, "Call initializer instance with the dtype argument instead " "of passing it to the constructor", "dtype") def __init__(self, gain=1.0, seed=None, dtype=dtypes.float32): self.gain = gain self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype)) self.seed = seed def __call__(self, shape, dtype=None, partition_info=None): if dtype is None: dtype = self.dtype # Check the shape if len(shape) < 2: raise ValueError("The tensor to initialize must be " "at least two-dimensional") # Flatten the input shape with the last dimension remaining # its original shape so it works for conv2d num_rows = 1 for dim in shape[:-1]: num_rows *= dim num_rows = int(num_rows) num_cols = int(shape[-1]) if num_rows < num_cols: flat_shape = (num_cols, num_rows) else: flat_shape = (num_rows, num_cols) # Generate a random matrix a = random_ops.random_normal(flat_shape, dtype=dtype, seed=self.seed) # Compute the qr factorization q, r = gen_linalg_ops.qr(a, full_matrices=False) # Make Q uniform d = array_ops.diag_part(r) q *= math_ops.sign(d) if num_rows < num_cols: q = array_ops.matrix_transpose(q) return self.gain * array_ops.reshape(q, shape) def get_config(self): return {"gain": self.gain, "seed": self.seed, "dtype": self.dtype.name} # Note these haven't been ported to TF2.0. They are not currently visible and # the tests are non trivial to port class ConvolutionDeltaOrthogonal(Initializer): """Initializer that generates a delta orthogonal kernel for ConvNets. The shape of the tensor must have length 3, 4 or 5. The number of input filters must not exceed the number of output filters. The center pixels of the tensor form an orthogonal matrix. Other pixels are set to be zero. See algorithm 2 in (Xiao et al., 2018). Args: gain: Multiplicative factor to apply to the orthogonal matrix. Default is 1. The 2-norm of an input is multiplied by a factor of `gain` after applying this convolution. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported. References: [Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html) ([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf)) """ def __init__(self, gain=1.0, seed=None, dtype=dtypes.float32): self.gain = gain self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype)) self.seed = seed def __call__(self, shape, dtype=None, partition_info=None): if dtype is None: dtype = self.dtype # Check the shape if len(shape) < 3 or len(shape) > 5: raise ValueError("The tensor to initialize must be at least " "three-dimensional and at most five-dimensional") if shape[-2] > shape[-1]: raise ValueError("In_filters cannot be greater than out_filters.") # Generate a random matrix a = random_ops.random_normal([shape[-1], shape[-1]], dtype=dtype, seed=self.seed) # Compute the qr factorization q, r = gen_linalg_ops.qr(a, full_matrices=False) # Make Q uniform d = array_ops.diag_part(r) q *= math_ops.sign(d) q = q[:shape[-2], :] q *= math_ops.cast(self.gain, dtype=dtype) if len(shape) == 3: weight = array_ops.scatter_nd([[(shape[0] - 1) // 2]], array_ops.expand_dims(q, 0), shape) elif len(shape) == 4: weight = array_ops.scatter_nd([[(shape[0] - 1) // 2, (shape[1] - 1) // 2]], array_ops.expand_dims(q, 0), shape) else: weight = array_ops.scatter_nd([[(shape[0] - 1) // 2, (shape[1] - 1) // 2, (shape[2] - 1) // 2]], array_ops.expand_dims(q, 0), shape) return weight def get_config(self): return {"gain": self.gain, "seed": self.seed, "dtype": self.dtype.name} class ConvolutionOrthogonal(Initializer): """Initializer that generates orthogonal kernel for ConvNets. Base class used to construct 1D, 2D and 3D orthogonal kernels for convolution. Args: gain: multiplicative factor to apply to the orthogonal matrix. Default is 1. The 2-norm of an input is multiplied by a factor of `gain` after applying this convolution. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported. References: [Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html) ([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf)) """ def __init__(self, gain=1.0, seed=None, dtype=dtypes.float32): self.gain = gain self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype)) self.seed = seed def __call__(self, shape, dtype=None, partition_info=None): raise NotImplementedError def get_config(self): return {"gain": self.gain, "seed": self.seed, "dtype": self.dtype.name} # Helper functions. def _orthogonal_matrix(self, n): """Construct an n x n orthogonal matrix. Args: n: Dimension. Returns: A n x n orthogonal matrix. """ a = random_ops.random_normal([n, n], dtype=self.dtype, seed=self.seed) if self.seed: self.seed += 1 q, r = gen_linalg_ops.qr(a) d = array_ops.diag_part(r) # make q uniform q *= math_ops.sign(d) return q def _symmetric_projection(self, n): """Compute a n x n symmetric projection matrix. Args: n: Dimension. Returns: A n x n symmetric projection matrix, i.e. a matrix P s.t. P=P*P, P=P^T. """ q = self._orthogonal_matrix(n) # randomly zeroing out some columns mask = math_ops.cast( random_ops.random_normal([n], seed=self.seed) > 0, self.dtype) if self.seed: self.seed += 1 c = math_ops.multiply(q, mask) return math_ops.matmul(c, array_ops.matrix_transpose(c)) class ConvolutionOrthogonal2D(ConvolutionOrthogonal): """Initializer that generates a 2D orthogonal kernel for ConvNets. The shape of the tensor must have length 4. The number of input filters must not exceed the number of output filters. The orthogonality(==isometry) is exact when the inputs are circular padded. There are finite-width effects with non-circular padding (e.g. zero padding). See algorithm 1 in (Xiao et al., 2018). Args: gain: Multiplicative factor to apply to the orthogonal matrix. Default is 1. This has the effect of scaling the output 2-norm by a factor of `gain`. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported. References: [Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html) ([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf)) """ def __call__(self, shape, dtype=None, partition_info=None): if dtype is None: dtype = self.dtype if len(shape) != 4: raise ValueError("The tensor to initialize must be four-dimensional") if shape[-2] > shape[-1]: raise ValueError("In_filters cannot be greater than out_filters.") if shape[0] != shape[1]: raise ValueError("Kernel sizes must be equal.") kernel = self._orthogonal_kernel(shape[0], shape[2], shape[3]) kernel *= math_ops.cast(self.gain, dtype=dtype) return kernel def _dict_to_tensor(self, x, k1, k2): """Convert a dictionary to a tensor. Args: x: A k1 * k2 dictionary. k1: First dimension of x. k2: Second dimension of x. Returns: A k1 * k2 tensor. """ return array_ops.stack([array_ops.stack([x[i, j] for j in range(k2)]) for i in range(k1)]) def _block_orth(self, p1, p2): """Construct a 2 x 2 kernel. Used to construct orthgonal kernel. Args: p1: A symmetric projection matrix. p2: A symmetric projection matrix. Returns: A 2 x 2 kernel [[p1p2, p1(1-p2)], [(1-p1)p2, (1-p1)(1-p2)]]. Raises: ValueError: If the dimensions of p1 and p2 are different. """ if p1.shape.as_list() != p2.shape.as_list(): raise ValueError("The dimension of the matrices must be the same.") n = p1.shape.as_list()[0] kernel2x2 = {} eye = linalg_ops_impl.eye(n, dtype=self.dtype) kernel2x2[0, 0] = math_ops.matmul(p1, p2) kernel2x2[0, 1] = math_ops.matmul(p1, (eye - p2)) kernel2x2[1, 0] = math_ops.matmul((eye - p1), p2) kernel2x2[1, 1] = math_ops.matmul((eye - p1), (eye - p2)) return kernel2x2 def _matrix_conv(self, m1, m2): """Matrix convolution. Args: m1: A k x k dictionary, each element is a n x n matrix. m2: A l x l dictionary, each element is a n x n matrix. Returns: (k + l - 1) * (k + l - 1) dictionary each element is a n x n matrix. Raises: ValueError: if the entries of m1 and m2 are of different dimensions. """ n = (m1[0, 0]).shape.as_list()[0] if n != (m2[0, 0]).shape.as_list()[0]: raise ValueError("The entries in matrices m1 and m2 " "must have the same dimensions!") k = int(np.sqrt(len(m1))) l = int(np.sqrt(len(m2))) result = {} size = k + l - 1 # Compute matrix convolution between m1 and m2. for i in range(size): for j in range(size): result[i, j] = array_ops.zeros([n, n], self.dtype) for index1 in range(min(k, i + 1)): for index2 in range(min(k, j + 1)): if (i - index1) < l and (j - index2) < l: result[i, j] += math_ops.matmul(m1[index1, index2], m2[i - index1, j - index2]) return result def _orthogonal_kernel(self, ksize, cin, cout): """Construct orthogonal kernel for convolution. Args: ksize: Kernel size. cin: Number of input channels. cout: Number of output channels. Returns: An [ksize, ksize, cin, cout] orthogonal kernel. Raises: ValueError: If cin > cout. """ if cin > cout: raise ValueError("The number of input channels cannot exceed " "the number of output channels.") orth = self._orthogonal_matrix(cout)[0:cin, :] if ksize == 1: return array_ops.expand_dims(array_ops.expand_dims(orth, 0), 0) p = self._block_orth( self._symmetric_projection(cout), self._symmetric_projection(cout)) for _ in range(ksize - 2): temp = self._block_orth( self._symmetric_projection(cout), self._symmetric_projection(cout)) p = self._matrix_conv(p, temp) for i in range(ksize): for j in range(ksize): p[i, j] = math_ops.matmul(orth, p[i, j]) return self._dict_to_tensor(p, ksize, ksize) class ConvolutionOrthogonal1D(ConvolutionOrthogonal): """Initializer that generates a 1D orthogonal kernel for ConvNets. The shape of the tensor must have length 3. The number of input filters must not exceed the number of output filters. The orthogonality(==isometry) is exact when the inputs are circular padded. There are finite-width effects with non-circular padding (e.g. zero padding). See algorithm 1 in (Xiao et al., 2018). Args: gain: Multiplicative factor to apply to the orthogonal matrix. Default is 1. The 2-norm of an input is multiplied by a factor of `gain` after applying this convolution. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported. References: [Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html) ([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf)) """ def __call__(self, shape, dtype=None, partition_info=None): if dtype is None: dtype = self.dtype if len(shape) != 3: raise ValueError("The tensor to initialize must be three-dimensional") if shape[-2] > shape[-1]: raise ValueError("In_filters cannot be greater than out_filters.") kernel = self._orthogonal_kernel(shape[0], shape[-2], shape[-1]) kernel *= math_ops.cast(self.gain, dtype=dtype) return kernel def _dict_to_tensor(self, x, k): """Convert a dictionary to a tensor. Args: x: A dictionary of length k. k: Dimension of x. Returns: A tensor with the same dimension. """ return array_ops.stack([x[i] for i in range(k)]) def _block_orth(self, projection_matrix): """Construct a kernel. Used to construct orthgonal kernel. Args: projection_matrix: A symmetric projection matrix of size n x n. Returns: [projection_matrix, (1 - projection_matrix)]. """ n = projection_matrix.shape.as_list()[0] kernel = {} eye = linalg_ops_impl.eye(n, dtype=self.dtype) kernel[0] = projection_matrix kernel[1] = eye - projection_matrix return kernel def _matrix_conv(self, m1, m2): """Matrix convolution. Args: m1: A dictionary of length k, each element is a n x n matrix. m2: A dictionary of length l, each element is a n x n matrix. Returns: (k + l - 1) dictionary each element is a n x n matrix. Raises: ValueError: Ff the entries of m1 and m2 are of different dimensions. """ n = (m1[0]).shape.as_list()[0] if n != (m2[0]).shape.as_list()[0]: raise ValueError("The entries in matrices m1 and m2 " "must have the same dimensions!") k = len(m1) l = len(m2) result = {} size = k + l - 1 # Compute matrix convolution between m1 and m2. for i in range(size): result[i] = array_ops.zeros([n, n], self.dtype) for index in range(min(k, i + 1)): if (i - index) < l: result[i] += math_ops.matmul(m1[index], m2[i - index]) return result def _orthogonal_kernel(self, ksize, cin, cout): """Construct orthogonal kernel for convolution. Args: ksize: Kernel size. cin: Number of input channels. cout: Number of output channels. Returns: An [ksize, ksize, cin, cout] orthogonal kernel. Raises: ValueError: If cin > cout. """ if cin > cout: raise ValueError("The number of input channels cannot exceed " "the number of output channels.") orth = self._orthogonal_matrix(cout)[0:cin, :] if ksize == 1: return array_ops.expand_dims(orth, 0) p = self._block_orth(self._symmetric_projection(cout)) for _ in range(ksize - 2): temp = self._block_orth(self._symmetric_projection(cout)) p = self._matrix_conv(p, temp) for i in range(ksize): p[i] = math_ops.matmul(orth, p[i]) return self._dict_to_tensor(p, ksize) class ConvolutionOrthogonal3D(ConvolutionOrthogonal): """Initializer that generates a 3D orthogonal kernel for ConvNets. The shape of the tensor must have length 5. The number of input filters must not exceed the number of output filters. The orthogonality(==isometry) is exact when the inputs are circular padded. There are finite-width effects with non-circular padding (e.g. zero padding). See algorithm 1 (Xiao et al., 2018). Args: gain: Multiplicative factor to apply to the orthogonal matrix. Default is 1. The 2-norm of an input is multiplied by a factor of `gain` after applying this convolution. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported. References: [Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html) ([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf)) """ def __call__(self, shape, dtype=None, partition_info=None): if dtype is None: dtype = self.dtype if len(shape) != 5: raise ValueError("The tensor to initialize must be five-dimensional") if shape[-2] > shape[-1]: raise ValueError("In_filters cannot be greater than out_filters.") if shape[0] != shape[1] or shape[0] != shape[2]: raise ValueError("Kernel sizes must be equal.") kernel = self._orthogonal_kernel(shape[0], shape[-2], shape[-1]) kernel *= math_ops.cast(self.gain, dtype=dtype) return kernel def _dict_to_tensor(self, x, k1, k2, k3): """Convert a dictionary to a tensor. Args: x: A k1 * k2 dictionary. k1: First dimension of x. k2: Second dimension of x. k3: Third dimension of x. Returns: A k1 * k2 * k3 tensor. """ return array_ops.stack([array_ops.stack( [array_ops.stack([x[i, j, k] for k in range(k3)]) for j in range(k2)]) for i in range(k1)]) def _block_orth(self, p1, p2, p3): """Construct a 3 x 3 kernel. Used to construct orthgonal kernel. Args: p1: A symmetric projection matrix. p2: A symmetric projection matrix. p3: A symmetric projection matrix. Returns: A 2 x 2 x 2 kernel. Raises: ValueError: If the dimensions of p1, p2 and p3 are different. """ p1_shape = p1.shape.as_list() if p1_shape != p2.shape.as_list() or p1_shape != p3.shape.as_list(): raise ValueError("The dimension of the matrices must be the same.") n = p1_shape[0] eye = linalg_ops_impl.eye(n, dtype=self.dtype) kernel2x2x2 = {} def matmul(p1, p2, p3): return math_ops.matmul(math_ops.matmul(p1, p2), p3) def cast(i, p): """Return p or (1-p).""" return i * p + (1 - i) * (eye - p) for i in [0, 1]: for j in [0, 1]: for k in [0, 1]: kernel2x2x2[i, j, k] = matmul(cast(i, p1), cast(j, p2), cast(k, p3)) return kernel2x2x2 def _matrix_conv(self, m1, m2): """Matrix convolution. Args: m1: is a k x k x k dictionary, each element is a n x n matrix. m2: is a l x l x l dictionary, each element is a n x n matrix. Returns: (k + l - 1) x (k + l - 1) x (k + l - 1) dictionary each element is a n x n matrix. Raises: ValueError: if the entries of m1 and m2 are of different dimensions. """ n = (m1[0, 0, 0]).shape.as_list()[0] if n != (m2[0, 0, 0]).shape.as_list()[0]: raise ValueError("The entries in matrices m1 and m2 " "must have the same dimensions!") k = int(np.cbrt(len(m1))) l = int(np.cbrt(len(m2))) result = {} size = k + l - 1 # Compute matrix convolution between m1 and m2. for i in range(size): for j in range(size): for r in range(size): result[i, j, r] = array_ops.zeros([n, n], self.dtype) for index1 in range(min(k, i + 1)): for index2 in range(min(k, j + 1)): for index3 in range(min(k, r + 1)): if (i - index1) < l and (j - index2) < l and (r - index3) < l: result[i, j, r] += math_ops.matmul( m1[index1, index2, index3], m2[i - index1, j - index2, r - index3]) return result def _orthogonal_kernel(self, ksize, cin, cout): """Construct orthogonal kernel for convolution. Args: ksize: Kernel size. cin: Number of input channels. cout: Number of output channels. Returns: An [ksize, ksize, ksize, cin, cout] orthogonal kernel. Raises: ValueError: If cin > cout. """ if cin > cout: raise ValueError("The number of input channels cannot exceed " "the number of output channels.") orth = self._orthogonal_matrix(cout)[0:cin, :] if ksize == 1: return array_ops.expand_dims( array_ops.expand_dims(array_ops.expand_dims(orth, 0), 0), 0) p = self._block_orth( self._symmetric_projection(cout), self._symmetric_projection(cout), self._symmetric_projection(cout)) for _ in range(ksize - 2): temp = self._block_orth( self._symmetric_projection(cout), self._symmetric_projection(cout), self._symmetric_projection(cout)) p = self._matrix_conv(p, temp) for i in range(ksize): for j in range(ksize): for k in range(ksize): p[i, j, k] = math_ops.matmul(orth, p[i, j, k]) return self._dict_to_tensor(p, ksize, ksize, ksize) @tf_export(v1=["initializers.identity"]) @deprecation.deprecated_endpoints("initializers.identity") class Identity(Initializer): """Initializer that generates the identity matrix. Only use for 2D matrices. Args: gain: Multiplicative factor to apply to the identity matrix. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported. """ @deprecated_args(None, "Call initializer instance with the dtype argument instead " "of passing it to the constructor", "dtype") def __init__(self, gain=1.0, dtype=dtypes.float32): self.gain = gain self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype)) def __call__(self, shape, dtype=None, partition_info=None): full_shape = shape if partition_info is None else partition_info.full_shape if len(full_shape) != 2: raise ValueError( "Identity matrix initializer can only be used for 2D matrices.") if dtype is None: dtype = self.dtype if isinstance(full_shape, tensor_shape.TensorShape): full_shape = full_shape.as_list() initializer = linalg_ops_impl.eye(*full_shape, dtype=dtype) if partition_info is not None: initializer = array_ops.slice(initializer, partition_info.var_offset, shape) return self.gain * initializer def get_config(self): return {"gain": self.gain, "dtype": self.dtype.name} @tf_export(v1=["glorot_uniform_initializer", "initializers.glorot_uniform"]) @deprecation.deprecated_endpoints("glorot_uniform_initializer", "initializers.glorot_uniform") class GlorotUniform(VarianceScaling): """The Glorot uniform initializer, also called Xavier uniform initializer. It draws samples from a uniform distribution within [-limit, limit] where `limit` is `sqrt(6 / (fan_in + fan_out))` where `fan_in` is the number of input units in the weight tensor and `fan_out` is the number of output units in the weight tensor. Args: seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported. References: [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html) ([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)) """ @deprecated_args(None, "Call initializer instance with the dtype argument instead " "of passing it to the constructor", "dtype") def __init__(self, seed=None, dtype=dtypes.float32): super(GlorotUniform, self).__init__( scale=1.0, mode="fan_avg", distribution="uniform", seed=seed) def get_config(self): return {"seed": self.seed, "dtype": self.dtype.name} @tf_export(v1=["glorot_normal_initializer", "initializers.glorot_normal"]) @deprecation.deprecated_endpoints("glorot_normal_initializer", "initializers.glorot_normal") class GlorotNormal(VarianceScaling): """The Glorot normal initializer, also called Xavier normal initializer. It draws samples from a truncated normal distribution centered on 0 with standard deviation (after truncation) given by `stddev = sqrt(2 / (fan_in + fan_out))` where `fan_in` is the number of input units in the weight tensor and `fan_out` is the number of output units in the weight tensor. Args: seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported. References: [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html) ([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)) """ @deprecated_args(None, "Call initializer instance with the dtype argument instead " "of passing it to the constructor", "dtype") def __init__(self, seed=None, dtype=dtypes.float32): super(GlorotNormal, self).__init__( scale=1.0, mode="fan_avg", distribution="truncated_normal", seed=seed) def get_config(self): return {"seed": self.seed, "dtype": self.dtype.name} # Aliases. # pylint: disable=invalid-name zeros_initializer = Zeros ones_initializer = Ones constant_initializer = Constant random_uniform_initializer = RandomUniform random_normal_initializer = RandomNormal truncated_normal_initializer = TruncatedNormal uniform_unit_scaling_initializer = UniformUnitScaling variance_scaling_initializer = VarianceScaling glorot_uniform_initializer = GlorotUniform glorot_normal_initializer = GlorotNormal orthogonal_initializer = Orthogonal identity_initializer = Identity convolutional_delta_orthogonal = ConvolutionDeltaOrthogonal convolutional_orthogonal_1d = ConvolutionOrthogonal1D convolutional_orthogonal_2d = ConvolutionOrthogonal2D convolutional_orthogonal_3d = ConvolutionOrthogonal3D # pylint: enable=invalid-name @tf_export(v1=["initializers.lecun_normal"]) def lecun_normal(seed=None): """LeCun normal initializer. It draws samples from a truncated normal distribution centered on 0 with standard deviation (after truncation) given by `stddev = sqrt(1 / fan_in)` where `fan_in` is the number of input units in the weight tensor. Arguments: seed: A Python integer. Used to seed the random generator. Returns: An initializer. References: - Self-Normalizing Neural Networks, [Klambauer et al., 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) # pylint: disable=line-too-long ([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf)) - Efficient Backprop, [Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf) """ return VarianceScaling( scale=1., mode="fan_in", distribution="truncated_normal", seed=seed) @tf_export(v1=["initializers.lecun_uniform"]) def lecun_uniform(seed=None): """LeCun uniform initializer. It draws samples from a uniform distribution within [-limit, limit] where `limit` is `sqrt(3 / fan_in)` where `fan_in` is the number of input units in the weight tensor. Arguments: seed: A Python integer. Used to seed the random generator. Returns: An initializer. References: - Self-Normalizing Neural Networks, [Klambauer et al., 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) # pylint: disable=line-too-long ([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf)) - Efficient Backprop, [Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf) """ return VarianceScaling( scale=1., mode="fan_in", distribution="uniform", seed=seed) @tf_export(v1=["initializers.he_normal"]) def he_normal(seed=None): """He normal initializer. It draws samples from a truncated normal distribution centered on 0 with standard deviation (after truncation) given by `stddev = sqrt(2 / fan_in)` where `fan_in` is the number of input units in the weight tensor. Arguments: seed: A Python integer. Used to seed the random generator. Returns: An initializer. References: [He et al., 2015] (https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf)) """ return VarianceScaling( scale=2., mode="fan_in", distribution="truncated_normal", seed=seed) @tf_export(v1=["initializers.he_uniform"]) def he_uniform(seed=None): """He uniform variance scaling initializer. It draws samples from a uniform distribution within [-limit, limit] where `limit` is `sqrt(6 / fan_in)` where `fan_in` is the number of input units in the weight tensor. Arguments: seed: A Python integer. Used to seed the random generator. Returns: An initializer. References: [He et al., 2015] (https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf)) """ return VarianceScaling( scale=2., mode="fan_in", distribution="uniform", seed=seed) # Utility functions. def _compute_fans(shape): """Computes the number of input and output units for a weight shape. Args: shape: Integer shape tuple or TF tensor shape. Returns: A tuple of integer scalars (fan_in, fan_out). """ if len(shape) < 1: # Just to avoid errors for constants. fan_in = fan_out = 1 elif len(shape) == 1: fan_in = fan_out = shape[0] elif len(shape) == 2: fan_in = shape[0] fan_out = shape[1] else: # Assuming convolution kernels (2D, 3D, or more). # kernel shape: (..., input_depth, depth) receptive_field_size = 1 for dim in shape[:-2]: receptive_field_size *= dim fan_in = shape[-2] * receptive_field_size fan_out = shape[-1] * receptive_field_size return int(fan_in), int(fan_out) def _assert_float_dtype(dtype): """Validate and return floating point type based on `dtype`. `dtype` must be a floating point type. Args: dtype: The data type to validate. Returns: Validated type. Raises: ValueError: if `dtype` is not a floating point type. """ if not dtype.is_floating: raise ValueError("Expected floating point type, got %s." % dtype) return dtype
tensorflow-master
tensorflow/python/ops/init_ops.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for loss scaling utilities in tensorflow.ops.nn.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.distribute import combinations from tensorflow.python.distribute import strategy_combinations from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.ops import array_ops from tensorflow.python.ops import nn_impl from tensorflow.python.platform import test as test_lib class LossUtilitiesTest(test_lib.TestCase, parameterized.TestCase): def setUp(self): strategy_combinations.set_virtual_cpus_to_at_least(3) super(LossUtilitiesTest, self).setUp() def testComputeAverageLossGlobalBatchSize(self): per_example_loss = [1, 2, 3, 4, 5] loss = nn_impl.compute_average_loss(per_example_loss, global_batch_size=10) self.assertEqual(self.evaluate(loss), 1.5) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_cpu_1_and_2 ], mode=["graph", "eager"])) def testComputeAverageLossDefaultGlobalBatchSize(self, distribution): # Without strategy - num replicas = 1 per_example_loss = constant_op.constant([2.5, 6.2, 5.]) loss = nn_impl.compute_average_loss(per_example_loss) self.assertAllClose(self.evaluate(loss), (2.5 + 6.2 + 5.) / 3) # With strategy - num replicas = 2 with distribution.scope(): per_replica_losses = distribution.experimental_run_v2( nn_impl.compute_average_loss, args=(per_example_loss,)) loss = distribution.reduce("SUM", per_replica_losses, axis=None) self.assertAllClose(self.evaluate(loss), (2.5 + 6.2 + 5.) / 3) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_cpu_1_and_2 ], mode=["graph", "eager"])) def testComputeAverageLossSampleWeights(self, distribution): with distribution.scope(): # Scalar sample weight per_replica_losses = distribution.experimental_run_v2( nn_impl.compute_average_loss, args=([2., 4., 6.],), kwargs={"sample_weight": 2}) loss = distribution.reduce("SUM", per_replica_losses, axis=None) self.assertAllClose(self.evaluate(loss), (2. + 4. + 6.) * 2. / 3) # Per example sample weight per_replica_losses = distribution.experimental_run_v2( nn_impl.compute_average_loss, args=([2., 4., 6.],), kwargs={"sample_weight": [0.3, 0.5, 0.2]}) loss = distribution.reduce("SUM", per_replica_losses, axis=None) self.assertAllClose( self.evaluate(loss), (2. * 0.3 + 4. * 0.5 + 6. * 0.2) / 3) # Time-step sample weight per_replica_losses = distribution.experimental_run_v2( nn_impl.compute_average_loss, args=([[2., 0.5], [4., 1.]],), kwargs={"sample_weight": [[0.3, 0.7], [0.2, 0.8]]}) loss = distribution.reduce("SUM", per_replica_losses, axis=None) self.assertAllClose( self.evaluate(loss), (2. * 0.3 + 0.5 * 0.7 + 4. * 0.2 + 1. * 0.8) / 2) def testComputeAverageLossInvalidSampleWeights(self): with self.assertRaisesRegex(ValueError, "weights can not be broadcast to values"): nn_impl.compute_average_loss([2.5, 6.2, 5.], sample_weight=[0.2, 0.8], global_batch_size=10) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_cpu_1_and_2 ], mode=["graph", "eager"])) def testComputeAverageLossDtype(self, distribution): with distribution.scope(): per_example_loss = constant_op.constant([2., 4., 6.], dtype=dtypes.float64) per_replica_losses = distribution.experimental_run_v2( nn_impl.compute_average_loss, args=(per_example_loss,), kwargs={"sample_weight": 2}) loss = distribution.reduce("SUM", per_replica_losses, axis=None) self.assertEqual(loss.dtype, dtypes.float64) def testComputeAverageLossInvalidRank(self): per_example_loss = constant_op.constant(2) # Static rank with self.assertRaisesRegex( ValueError, "Invalid value passed for `per_example_loss`. " "Expected a tensor with at least rank 1,"): nn_impl.compute_average_loss(per_example_loss) with context.graph_mode(): # Dynamic rank per_example_loss = array_ops.placeholder(dtype=dtypes.float32) loss = nn_impl.compute_average_loss(per_example_loss) with self.cached_session() as sess: with self.assertRaisesRegex( errors.InvalidArgumentError, "Invalid value passed for `per_example_loss`. " "Expected a tensor with at least rank 1."): sess.run(loss, {per_example_loss: 2}) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_cpu_1_and_2 ], mode=["graph", "eager"])) def testComputeAverageLossInCrossReplicaContext(self, distribution): with distribution.scope(): with self.assertRaisesRegex( RuntimeError, "You are calling `compute_average_loss` in cross replica context"): nn_impl.compute_average_loss([2, 3]) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_cpu_1_and_2 ], mode=["graph", "eager"])) def testScaleRegularizationLoss(self, distribution): # Without strategy - num replicas = 1 reg_losses = constant_op.constant([2.5, 6.2, 5.]) loss = nn_impl.scale_regularization_loss(reg_losses) self.assertAllClose(self.evaluate(loss), (2.5 + 6.2 + 5.)) # With strategy - num replicas = 2 with distribution.scope(): per_replica_losses = distribution.experimental_run_v2( nn_impl.scale_regularization_loss, args=(reg_losses,)) loss = distribution.reduce("SUM", per_replica_losses, axis=None) self.assertAllClose(self.evaluate(loss), (2.5 + 6.2 + 5.)) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_cpu_1_and_2 ], mode=["graph", "eager"])) def testScaleRegularizationLossInCrossReplicaContext(self, distribution): with distribution.scope(): with self.assertRaisesRegex( RuntimeError, "You are calling `scale_regularization_loss` in " "cross replica context"): nn_impl.scale_regularization_loss([2, 3])
tensorflow-master
tensorflow/python/ops/nn_loss_scaling_utilities_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.ops.gradients.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import warnings from absl.testing import parameterized import numpy as np from tensorflow.python.client import session from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.eager import function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import function as framework_function from tensorflow.python.framework import ops from tensorflow.python.framework import test_ops from tensorflow.python.framework import test_util from tensorflow.python.framework.constant_op import constant from tensorflow.python.keras.engine import training from tensorflow.python.layers import core as core_layers from tensorflow.python.ops import array_grad # pylint: disable=unused-import from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_grad # pylint: disable=unused-import from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import custom_gradient from tensorflow.python.ops import data_flow_grad # pylint: disable=unused-import from tensorflow.python.ops import data_flow_ops # pylint: disable=unused-import from tensorflow.python.ops import functional_ops # pylint: disable=unused-import from tensorflow.python.ops import gradients from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import list_ops from tensorflow.python.ops import math_grad # pylint: disable=unused-import from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_grad # pylint: disable=unused-import from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import state_grad # pylint: disable=unused-import from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import from tensorflow.python.ops import tensor_array_ops from tensorflow.python.ops import unconnected_gradients from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.ops.nn_ops import bias_add from tensorflow.python.platform import googletest class GradientsTest(test_util.TensorFlowTestCase, parameterized.TestCase): def testGradients(self): with ops.Graph().as_default(): inp = constant(1.0, shape=[32, 100], name="in") w = constant(1.0, shape=[100, 10], name="w") b = constant(1.0, shape=[10], name="b") xw = math_ops.matmul(inp, w, name="xw") h = bias_add(xw, b, name="h") w_grad = gradients.gradients(h, w)[0] self.assertEquals("MatMul", w_grad.op.type) self.assertEquals(w_grad.op._original_op, xw.op) self.assertTrue(w_grad.op.get_attr("transpose_a")) self.assertFalse(w_grad.op.get_attr("transpose_b")) def testUnusedOutput(self): with ops.Graph().as_default(): w = constant(1.0, shape=[2, 2]) x = constant(1.0, shape=[2, 2]) wx = math_ops.matmul(w, x) split_wx = array_ops.split(value=wx, num_or_size_splits=2, axis=0) c = math_ops.reduce_sum(split_wx[1]) gw = gradients.gradients(c, [w])[0] self.assertEquals("MatMul", gw.op.type) def testColocateGradients(self): with ops.Graph().as_default() as g: w = constant(1.0, shape=[1, 1]) x = constant(1.0, shape=[1, 2]) with g.device("/device:GPU:0"): wx = math_ops.matmul(w, x) gw = gradients.gradients(wx, [w], colocate_gradients_with_ops=True)[0] self.assertEqual(gw.op.colocation_groups(), wx.op.colocation_groups()) def testColocateGradientsWithAggregation(self): with ops.Graph().as_default() as g: with g.device("/device:GPU:1"): w = constant(1.0, shape=[1, 1]) x = constant(1.0, shape=[1, 2]) y = constant(1.0, shape=[1, 2]) wx = math_ops.matmul(w, x) wy = math_ops.matmul(w, y) with g.device("/device:GPU:0"): z = wx + wy gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0] self.assertEqual(gw1.op.colocation_groups(), wx.op.colocation_groups()) gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0] self.assertTrue(wx.op.colocation_groups() != gw2.op.colocation_groups()) def testColocateGradientsWithAggregationInMultipleDevices(self): with ops.Graph().as_default() as g: with g.device("/device:GPU:1"): w = constant(1.0, shape=[1, 1]) x = constant(1.0, shape=[1, 2]) y = constant(1.0, shape=[1, 2]) with g.device("/task:1"): wx = math_ops.matmul(w, x) with g.device("/task:2"): wy = math_ops.matmul(w, y) with g.device("/device:GPU:0"): z = wx + wy gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0] self.assertEqual(gw1.op.colocation_groups(), w.op.colocation_groups()) gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0] self.assertTrue(w.op.colocation_groups() != gw2.op.colocation_groups()) def testColocateGradientsWithGateGradients(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") with ops.Graph().as_default() as g: with g.device("/device:CPU:0"): x = constant(1.0, shape=[1, 1]) y = constant(1.0, shape=[1, 1]) s = x + y with g.device("/device:GPU:0"): z = math_ops.reduce_sum(s) gz_x = gradients.gradients(z, [x], colocate_gradients_with_ops=True, gate_gradients=True)[0] with session.Session(): # Make sure the placer doesn't complain. self.evaluate(gz_x) def testBoundaryStop(self): # Test that we don't differentiate 'x'. The gradient function for 'x' is # set explicitly to None so we will get an exception if the gradient code # tries to differentiate 'x'. with ops.Graph().as_default(): c = constant(1.0) x = array_ops.identity(c) y = x + 1.0 z = y + 1 grads = gradients.gradients(z, [x]) self.assertTrue(all(x is not None for x in grads)) @test_util.run_v1_only("b/120545219") def testBoundaryContinue(self): # Test that we differentiate both 'x' and 'y' correctly when x is a # predecessor of y. with self.cached_session(): x = constant(1.0) y = x * 2.0 z = y * 3.0 grads = gradients.gradients(z, [x, y]) self.assertTrue(all(x is not None for x in grads)) self.assertEqual(6.0, grads[0].eval()) @test_util.run_v1_only("b/120545219") def testAggregationMethodAccumulateN(self): with self.cached_session(): x = constant(1.0) y = x * 2.0 z = y + y + y + y + y + y + y + y + y + y grads = gradients.gradients( z, [x, y], aggregation_method=gradients.AggregationMethod. EXPERIMENTAL_ACCUMULATE_N) self.assertTrue(all(x is not None for x in grads)) self.assertEqual(20.0, grads[0].eval()) self.assertEqual(10.0, grads[1].eval()) @test_util.run_v1_only("b/120545219") def testAggregationMethodAddN(self): with self.cached_session(): x = constant(1.0) y = x * 2.0 z = y + y + y + y + y + y + y + y + y + y grads = gradients.gradients( z, [x, y], aggregation_method=gradients.AggregationMethod.ADD_N) self.assertTrue(all(x is not None for x in grads)) self.assertEqual(20.0, grads[0].eval()) self.assertEqual(10.0, grads[1].eval()) @test_util.run_v1_only("b/120545219") def testAggregationMethodTree(self): with self.cached_session(): x = constant(1.0) y = x * 2.0 z = y + y + y + y + y + y + y + y + y + y grads = gradients.gradients( z, [x, y], aggregation_method=gradients.AggregationMethod.EXPERIMENTAL_TREE) self.assertTrue(all(x is not None for x in grads)) self.assertEqual(20.0, grads[0].eval()) self.assertEqual(10.0, grads[1].eval()) def testNoGradientForStringOutputs(self): with ops.Graph().as_default(): def _TestOpGrad(_, float_grad, string_grad): """Gradient function for TestStringOutput.""" self.assertEquals(float_grad.dtype, dtypes.float32) self.assertFalse(string_grad) return float_grad ops.RegisterGradient("TestStringOutput")(_TestOpGrad) c = constant(1.0) x, _ = test_ops.test_string_output(c) z = x * 2.0 w = z * 3.0 grads = gradients.gradients(z, [c]) self.assertTrue(isinstance(grads[0], ops.Tensor)) grads = gradients.gradients(w, [c]) self.assertTrue(isinstance(grads[0], ops.Tensor)) def testSingletonIndexedSlices(self): with ops.Graph().as_default(): x = array_ops.placeholder(dtypes.float32) y = array_ops.identity(x) dy = ops.IndexedSlices( array_ops.placeholder(dtypes.float32), array_ops.placeholder(dtypes.int32)) dx, = gradients.gradients(y, x, grad_ys=dy) # The IndexedSlices gradient of tf.identity is the identity map. with self.cached_session() as sess: vdx, vdy = sess.run( [dx, dy], feed_dict={x: [1.0], dy.indices: [0], dy.values: [2.0]}) self.assertEqual(vdx, vdy) @test_util.run_v1_only("b/120545219") def testNonDifferentiableSwitchInWhileLoop(self): with ops.Graph().as_default(): v = array_ops.placeholder(dtypes.float32, []) def _Step(i, a, ta): a += math_ops.cast(v, dtypes.int32) return (i + 1, a, ta.write(i, a)) n = 4 i, _, ta = control_flow_ops.while_loop( lambda i, *_: i < n, _Step, [0, 0, tensor_array_ops.TensorArray( dtypes.int32, size=n)]) target = ta.read(i - 1) grad, = gradients.gradients(target, v) self.assertIsNone(grad) def testVariableReadValueGradient(self): with ops.Graph().as_default(): init = constant_op.constant(100.0) var = variables.Variable(init) gradient = gradients.gradients(var.read_value(), var) self.assertIsNotNone(gradient) @parameterized.parameters(dtypes.float32, dtypes.float64) def testVariableDefaultGrad(self, dtype): with ops.Graph().as_default(): init = constant_op.constant(100.0, dtype=dtype) var = variables.Variable(init) dummy_const = constant_op.constant(0.0) gradient = gradients.gradients( dummy_const, var, unconnected_gradients=unconnected_gradients.UnconnectedGradients.ZERO )[0] self.assertEqual(gradient.dtype, dtype) self.assertIsNotNone(gradient) def testVariableAsGraphElementGradient(self): with ops.Graph().as_default() as graph: init = constant_op.constant(100.0) var = variables.Variable(init) gradient = gradients.gradients(graph.as_graph_element(var), var) self.assertIsNotNone(gradient) @test_util.run_v1_only("b/120545219") def testVariableRefGradient(self): with ops.Graph().as_default(): init = constant_op.constant(100.0) var = variables.VariableV1(init) gradient = gradients.gradients(var._ref(), var) self.assertIsNotNone(gradient) @test_util.run_v1_only("b/120545219") def testDependentYs(self): with self.cached_session(): x = constant_op.constant(3.0) y = math_ops.square(x) y1 = math_ops.square(y) y2 = math_ops.square(y1) g = gradients.gradients([y, y2], x) self.assertAllClose(17502.0, g[0].eval()) g = gradients.gradients(y + y2, x) self.assertAllClose(17502.0, g[0].eval()) z = array_ops.identity(y) z2 = array_ops.identity(y2) g = gradients.gradients([z, z2], x) self.assertAllClose(17502.0, g[0].eval()) @test_util.run_v1_only("b/120545219") def testPartialDerivatives(self): with self.cached_session(): x = constant_op.constant(1.) y = 2 * x z = x + y totalg = gradients.gradients(z, [x, y]) self.assertEqual([3.0, 1.0], [g.eval() for g in totalg]) partialg = gradients.gradients(z, [x, y], stop_gradients=[x, y]) self.assertEqual([1.0, 1.0], [g.eval() for g in partialg]) @test_util.run_v1_only("b/120545219") def testStopGradients(self): def _MakeGraph(rng, stop_gradients=()): def _FunctionOf(xs, k=3): return ops.convert_to_tensor( sum(math_ops.matmul(rng.rand(k, k), x) for x in xs) + rng.rand(k, k)) a = _FunctionOf([]) if "a" in stop_gradients: a = array_ops.stop_gradient(a) b = _FunctionOf([a]) if "b" in stop_gradients: b = array_ops.stop_gradient(b) c = _FunctionOf([a, b]) if "c" in stop_gradients: c = array_ops.stop_gradient(c) d = _FunctionOf([b, c]) if "d" in stop_gradients: d = array_ops.stop_gradient(d) return dict(a=a, b=b, c=c, d=d) def _Gradients(ys, xs, **kwargs): dydxs = gradients.gradients(ys, xs, **kwargs) dydxs = [0. * x if dydx is None else dydx for x, dydx in zip(xs, dydxs)] return dydxs seed = np.random.randint(1000) cases = [] subsets = [""] + "a b c d ab ac ad bc bd cd abc abd acd bcd abcd".split() graph = _MakeGraph(np.random.RandomState(seed)) for constants in subsets: graph_with_stops = _MakeGraph(np.random.RandomState(seed), constants) for variables_ in subsets: # compute the gradient when stopped using tf.stop_gradients grad1 = _Gradients([graph_with_stops["d"]], [graph_with_stops[v] for v in variables_]) # compute the gradient when stopped using the stop_gradients kwarg grad2 = _Gradients([graph["d"]], [graph[v] for v in variables_], stop_gradients=[graph[v] for v in constants]) cases.append(dict(grad1=grad1, grad2=grad2, constants=constants, variables=variables_)) # evaluate all tensors in one call to session.run for speed with self.cached_session() as sess: results = sess.run([(case["grad1"], case["grad2"]) for case in cases]) for (npgrad1, npgrad2), case in zip(results, cases): for a, b in zip(npgrad1, npgrad2): np.testing.assert_allclose(a, b) def testUnconnectedGradientsNoneUnconnectedGradients(self): with ops.Graph().as_default(): x = constant(1.0, shape=[2, 2]) y = constant(3.0, shape=[3, 1]) grad = gradients.gradients( [y], [x], unconnected_gradients="none") self.assertIsNone(grad[0]) def testUnconnectedGradientsZerosUnconnectedGradients(self): with ops.Graph().as_default(): x = constant(1.0, shape=[2, 2]) y = constant(3.0, shape=[3, 1]) grads = gradients.gradients( [y], [x], unconnected_gradients="zero") with self.cached_session() as sess: self.assertAllEqual([[0.0, 0.0], [0.0, 0.0]], self.evaluate(grads)[0]) def testUnconnectedGradientsZeroConnectedGradients(self): with ops.Graph().as_default(): x = constant(1.0) y = x * 3.0 grad = gradients.gradients( [y], [x], unconnected_gradients="zero") with self.cached_session() as sess: self.assertEquals(3.0, self.evaluate(grad)[0]) def testUnknownUnconnectedGradientsValueGiven(self): with ops.Graph().as_default(): x = constant(1.0) y = constant(1.0) with self.assertRaisesRegexp( ValueError, "Unknown value for unconnected_gradients: 'nonsense'"): gradients.gradients([y], [x], unconnected_gradients="nonsense") class FunctionGradientsTest(test_util.TensorFlowTestCase): @classmethod def XSquarePlusB(cls, x, b): return x * x + b @classmethod def XSquarePlusBGradient(cls, x, b, g): # Perturb gradients (multiply by 2), so we can test that this was called. g *= 2.0 return g * 2.0 * x, g @classmethod def _PythonGradient(cls, op, grad): # Perturb gradients (multiply by 3), so we can test that this was called. grad *= 3.0 return grad * op.inputs[0] * 2.0, grad @classmethod def _GetFunc(cls, **kwargs): return framework_function.Defun(dtypes.float32, dtypes.float32, ** kwargs)(cls.XSquarePlusB) def _GetFuncGradients(self, f, x_value, b_value): x = constant_op.constant(x_value, name="x") b = constant_op.constant(b_value, name="b") y = f(x, b) grads = gradients.gradients(y, [x, b]) with self.cached_session() as sess: return sess.run(grads) def testFunctionGradientsBasic(self): g = ops.Graph() with g.as_default(): f = self._GetFunc() # Get gradients (should add SymbolicGradient node for function). grads = self._GetFuncGradients(f, [2.0], [1.0]) self.assertAllEqual([4.0], grads[0]) self.assertAllEqual([1.0], grads[1]) def testFunctionGradientsComposition(self): with ops.Graph().as_default(): f = self._GetFunc() x = constant_op.constant([2.0], name="x") b1 = constant_op.constant([1.0], name="b1") b2 = constant_op.constant([1.0], name="b2") y = f(f(x, b1), b2) # Build gradient graph (should add SymbolicGradient node for function). grads = gradients.gradients(y, [x, b1]) with self.cached_session() as sess: self.assertAllEqual([40.0], self.evaluate(grads)[0]) self.assertAllEqual([10.0], self.evaluate(grads)[1]) def testFunctionGradientsWithGradFunc(self): g = ops.Graph() with g.as_default(): grad_func = framework_function.Defun(dtypes.float32, dtypes.float32, dtypes.float32)( self.XSquarePlusBGradient) f = self._GetFunc(grad_func=grad_func) # Get gradients (should add SymbolicGradient node for function, which # uses the grad_func above, which multiplies all gradients by 2). grads = self._GetFuncGradients(f, [2.0], [1.0]) self.assertAllEqual([4.0 * 2], grads[0]) self.assertAllEqual([1.0 * 2], grads[1]) def testFunctionGradientWithRegistration(self): g = ops.Graph() with g.as_default(): f = self._GetFunc(python_grad_func=self._PythonGradient) # Get gradients, using the python gradient function. It multiplies the # gradients by 3. grads = self._GetFuncGradients(f, [2.0], [1.0]) self.assertAllEqual([4.0 * 3], grads[0]) self.assertAllEqual([1.0 * 3], grads[1]) def testFunctionGradientWithGradFuncAndRegistration(self): g = ops.Graph() with g.as_default(): grad_func = framework_function.Defun(dtypes.float32, dtypes.float32, dtypes.float32)( self.XSquarePlusBGradient) with self.assertRaisesRegexp(ValueError, "Gradient defined twice"): f = self._GetFunc( grad_func=grad_func, python_grad_func=self._PythonGradient) f.add_to_graph(ops.Graph()) def testGradientWrtCaptured(self): with ops.Graph().as_default(): x = constant_op.constant(1.0, name="x") @function.defun() def Foo(): y = math_ops.multiply(x, 2.0, name="y") g = gradients_impl.gradients(y, x) return g[0] f = Foo() with self.cached_session() as sess: self.assertEqual(self.evaluate(f), 2.0) def testGradientOfCaptured(self): with ops.Graph().as_default(): x = constant_op.constant(1.0, name="x") y = math_ops.multiply(x, 2.0, name="y") @framework_function.Defun() def Foo(): g = gradients_impl.gradients(y, x) return g[0] f = Foo() with self.cached_session() as sess: self.assertEqual(self.evaluate(f), 2.0) def testCapturedResourceVariable(self): with ops.Graph().as_default(): var = resource_variable_ops.ResourceVariable(1.0, name="var") @function.defun() def Foo(): y = math_ops.multiply(var, 2.0, name="y") g = gradients_impl.gradients(y, var) return g[0] f = Foo() with self.cached_session() as sess: self.evaluate(variables.global_variables_initializer()) self.assertEqual(self.evaluate(f), 2.0) def testCapturedNested(self): with ops.Graph().as_default(): x1 = constant_op.constant(1.0, name="x1") x2 = constant_op.constant(2.0, name="x2") x3 = math_ops.multiply(x1, x2, name="x3") @function.defun() def Outer(): outer1 = array_ops.identity(x1, name="outer1") @function.defun() def Inner(): inner1 = array_ops.identity(outer1, name="inner1") inner2 = array_ops.identity(x2, name="inner2") inner3 = array_ops.identity(x3, name="inner3") return gradients_impl.gradients([inner1, inner2, inner3, x1], [x1, x2]) return Inner() x1_grad, x2_grad = Outer() with self.cached_session() as sess: # 1.0 + None + 2.0 + 1.0 = 4.0 self.assertEqual(self.evaluate(x1_grad), 4.0) # None + 1.0 + 1.0 + None = 2.0 self.assertEqual(self.evaluate(x2_grad), 2.0) def testCapturedFromFunction(self): with ops.Graph().as_default(): x = constant_op.constant(1.0, name="x") @function.defun() def Outer(): y = math_ops.multiply(x, 2.0, name="y") @function.defun() def Inner(): z = math_ops.multiply(y, 3.0, name="z") g = gradients_impl.gradients(z, y) return g[0] return Inner() z_grad = Outer() with self.cached_session() as sess: self.assertEqual(self.evaluate(z_grad), 3.0) def testCapturedEagerTensors(self): # Test that we can handle captured eager tensors unrelated to the gradient # computation (i.e. we need to ignore them). # TODO(skyewm): make it an error if you try to take the gradient wrt a # captured EagerTensor with context.eager_mode(): c = constant_op.constant(2.0, name="c") @function.defun def Foo(): x = constant_op.constant(10.0, name="x") y = math_ops.multiply(x, c, name="y") # Regression test for b/122564611. z = math_ops.multiply(c, y, name="z") g = gradients_impl.gradients(z, x) return g[0] self.assertEqual(Foo().numpy(), 4.0) class StopGradientTest(test_util.TensorFlowTestCase): def testStopGradient(self): with ops.Graph().as_default(): inp = constant(1.0, shape=[100, 32], name="in") out = array_ops.stop_gradient(inp) igrad = gradients.gradients(out, inp)[0] assert igrad is None class PreventGradientTest(test_util.TensorFlowTestCase): def testPreventGradient(self): with ops.Graph().as_default(): inp = constant(1.0, shape=[100, 32], name="in") out = array_ops.prevent_gradient(inp) with self.assertRaisesRegexp(LookupError, "explicitly disabled"): _ = gradients.gradients(out, inp) class HessianVectorProductTest(test_util.TensorFlowTestCase): @test_util.run_v1_only("b/120545219") def testHessianVectorProduct(self): # Manually compute the Hessian explicitly for a low-dimensional problem # and check that HessianVectorProduct matches multiplication by the # explicit Hessian. # Specifically, the Hessian of f(x) = x^T A x is # H = A + A^T. # We expect HessianVectorProduct(f(x), x, v) to be H v. m = 4 rng = np.random.RandomState([1, 2, 3]) mat_value = rng.randn(m, m).astype("float32") v_value = rng.randn(m, 1).astype("float32") x_value = rng.randn(m, 1).astype("float32") hess_value = mat_value + mat_value.T hess_v_value = np.dot(hess_value, v_value) for use_gpu in [False, True]: with self.cached_session(use_gpu=use_gpu): mat = constant_op.constant(mat_value) v = constant_op.constant(v_value) x = constant_op.constant(x_value) mat_x = math_ops.matmul(mat, x, name="Ax") x_mat_x = math_ops.matmul(array_ops.transpose(x), mat_x, name="xAx") hess_v = gradients_impl._hessian_vector_product(x_mat_x, [x], [v])[0] hess_v_actual = self.evaluate(hess_v) self.assertAllClose(hess_v_value, hess_v_actual) class HessianTest(test_util.TensorFlowTestCase): @test_util.run_v1_only("b/120545219") def testHessian1D(self): # Manually compute the Hessian explicitly for a low-dimensional problem # and check that `hessian` matches. Specifically, the Hessian of # f(x) = x^T A x is H = A + A^T. m = 4 rng = np.random.RandomState([1, 2, 3]) mat_value = rng.randn(m, m).astype("float32") x_value = rng.randn(m).astype("float32") hess_value = mat_value + mat_value.T with self.session(use_gpu=True): mat = constant_op.constant(mat_value) x = constant_op.constant(x_value) x_mat_x = math_ops.reduce_sum(x[:, None] * mat * x[None, :]) hess = gradients.hessians(x_mat_x, x)[0] hess_actual = self.evaluate(hess) self.assertAllClose(hess_value, hess_actual) @test_util.run_v1_only("b/120545219") def testHessian1D_multi(self): # Test the computation of the hessian with respect to multiple tensors m = 4 n = 3 rng = np.random.RandomState([1, 2, 3]) mat_values = [rng.randn(m, m).astype("float32") for _ in range(n)] x_values = [rng.randn(m).astype("float32") for _ in range(n)] hess_values = [mat_value + mat_value.T for mat_value in mat_values] with self.session(use_gpu=True): mats = [constant_op.constant(mat_value) for mat_value in mat_values] xs = [constant_op.constant(x_value) for x_value in x_values] xs_mats_xs = [ math_ops.reduce_sum(x[:, None] * mat * x[None, :]) for x, mat in zip(xs, mats) ] hessians = gradients.hessians(xs_mats_xs, xs) hessians_actual = [hess.eval() for hess in hessians] for hess_value, hess_actual in zip(hess_values, hessians_actual): self.assertAllClose(hess_value, hess_actual) @test_util.run_v1_only("b/120545219") def testHessianInvalidDimension(self): for shape in [(10, 10), None]: with self.cached_session(use_gpu=True): x = array_ops.placeholder(dtypes.float32, shape) # Expect a ValueError because the dimensions are wrong with self.assertRaises(ValueError): gradients.hessians(x, x) @test_util.run_v1_only("b/120545219") def testHessian2D_square_matrix(self): # Manually compute the Hessian explicitly for a low-dimensional problem # and check that `hessian` matches. Specifically, the Hessian of # f(x) = 1/2 * x^T * x is H = constant (block identity matrix) m = 3 rng = np.random.RandomState([1, 2, 3]) x_value = rng.randn(m, m).astype("float32") with self.session(use_gpu=True): x = constant_op.constant(x_value) x_square = math_ops.reduce_sum( math_ops.matmul(array_ops.transpose(x), x) * 0.5 ) hess = gradients.hessians(x_square, x)[0] hess_actual = self.evaluate(hess) hess_value = np.bmat([ [elem*np.ones((m, m)) for elem in vec] for vec in np.eye(m) ]).astype("float32") self.assertAllEqual((m, m, m, m), hess_actual.shape) self.assertAllClose(hess_value, hess_actual.reshape((m * m, m * m))) @test_util.run_v1_only("b/120545219") def testHessian2D_non_square_matrix(self): m = 3 n = 4 rng = np.random.RandomState([1, 2, 3]) x_value = rng.randn(m, n).astype("float32") with self.session(use_gpu=True): x = constant_op.constant(x_value) x_square = math_ops.reduce_sum( math_ops.matmul(array_ops.transpose(x), x) * 0.5 ) hess = gradients.hessians(x_square, x)[0] hess_actual = self.evaluate(hess) hess_value = np.bmat([ [elem*np.ones((n, n)) for elem in vec] for vec in np.eye(m) ]).astype("float32") self.assertAllEqual((m, n, m, n), hess_actual.shape) self.assertAllClose(hess_value, hess_actual.reshape((m * n, m * n))) class IndexedSlicesToTensorTest(test_util.TensorFlowTestCase): @test_util.run_v1_only("b/120545219") def testIndexedSlicesToTensor(self): with self.cached_session(): np_val = np.random.rand(4, 4, 4, 4).astype(np.float32) c = constant_op.constant(np_val) c_sparse = math_ops._as_indexed_slices(c) self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval()) c_dense = math_ops.multiply(c_sparse, 1.0) self.assertAllClose(np_val, self.evaluate(c_dense)) @test_util.run_v1_only("b/120545219") def testIndexedSlicesToTensorList(self): with self.cached_session(): numpy_list = [] dense_list = [] sparse_list = [] for _ in range(3): np_val = np.random.rand(4, 4, 4, 4).astype(np.float32) c = constant_op.constant(np_val) c_sparse = math_ops._as_indexed_slices(c) numpy_list.append(np_val) dense_list.append(c) sparse_list.append(c_sparse) packed_dense = array_ops.stack(dense_list) packed_sparse = array_ops.stack(sparse_list) self.assertAllClose(packed_dense.eval(), self.evaluate(packed_sparse)) @test_util.run_v1_only("b/120545219") def testInt64Indices(self): with self.cached_session(): np_val = np.random.rand(4, 4, 4, 4).astype(np.float32) c = constant_op.constant(np_val) c_sparse = math_ops._as_indexed_slices(c) c_sparse = ops.IndexedSlices( c_sparse.values, math_ops.cast(c_sparse.indices, dtypes.int64), c_sparse.dense_shape) self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval()) c_dense = math_ops.multiply(c_sparse, 1.0) self.assertAllClose(np_val, self.evaluate(c_dense)) @test_util.run_v1_only("b/120545219") def testWarnings(self): # TODO(gunan) Reenable after this issue is fixed: # https://github.com/google/protobuf/issues/2812 if sys.version_info >= (3, 5): self.skipTest("Skipped test for Python 3.5+") # Smaller than the threshold: no warning. c_sparse = ops.IndexedSlices( array_ops.placeholder(dtypes.float32), array_ops.placeholder(dtypes.int32), constant([4, 4, 4, 4])) with warnings.catch_warnings(record=True) as w: math_ops.multiply(c_sparse, 1.0) self.assertEqual(0, len(w)) # Greater than or equal to the threshold: warning. c_sparse = ops.IndexedSlices( array_ops.placeholder(dtypes.float32), array_ops.placeholder(dtypes.int32), constant([100, 100, 100, 100])) # "always" filter prevents the warning from being suppressed if it was # already triggered in a different test. warnings.simplefilter("always") with warnings.catch_warnings(record=True) as w: math_ops.multiply(c_sparse, 1.0) self.assertEqual(1, len(w)) self.assertTrue( "with 100000000 elements. This may consume a large amount of memory." in str(w[0].message)) # Unknown dense shape: warning. c_sparse = ops.IndexedSlices( array_ops.placeholder(dtypes.float32), array_ops.placeholder(dtypes.int32), array_ops.placeholder(dtypes.int32)) with warnings.catch_warnings(record=True) as w: math_ops.multiply(c_sparse, 1.0) self.assertEqual(1, len(w)) self.assertTrue( "of unknown shape. This may consume a large amount of memory." in str(w[0].message)) class OnlyRealGradientsTest(test_util.TensorFlowTestCase): @test_util.run_v1_only("b/120545219") def testRealOnly(self): x = constant_op.constant(7+3j, dtype=dtypes.complex64) y = math_ops.square(x) with self.assertRaisesRegexp( TypeError, r"Gradients of complex tensors must set grad_ys " r"\(y\.dtype = tf\.complex64\)"): gradients.gradients(y, x) class ResourceCondTest(test_util.TensorFlowTestCase): @test_util.run_v1_only("b/120545219") def testBasic(self): gamma = resource_variable_ops.ResourceVariable( np.random.random((3,)), dtype="float32", name="gamma") inputs = array_ops.ones(shape=(3,), dtype="float32") def TestFn(): output = inputs + gamma return output training = array_ops.placeholder_with_default(True, shape=()) output = control_flow_ops.cond( training, TestFn, lambda: inputs) loss = output grads = gradients.gradients( loss, [gamma]) self.assertTrue(None not in grads) class CustomGradientTest(test_util.TensorFlowTestCase): def testCustomGradientTrivial(self): @custom_gradient.custom_gradient def MyIdentity(x): def Grad(dy): return [3 * dy] return x, Grad with ops.Graph().as_default(): x = constant(3.) y = MyIdentity(MyIdentity(x)) dy = gradients.gradients(y, x)[0] with session.Session(): self.assertEqual(9., self.evaluate(dy)) def testCustomGradient(self): @custom_gradient.custom_gradient def MyMultiply(x1, x2): result = x1 * x2 def Grad(dy): # Switched the ordering here. return [dy * x1, dy * x2] return result, Grad with ops.Graph().as_default(): x1 = constant(3.) x2 = constant(5.) y = MyMultiply(x1, x2) dy = gradients.gradients(y, [x1, x2]) with session.Session() as sess: self.assertAllEqual([3., 5.], self.evaluate(dy)) def testCustomGradientErrors(self): @custom_gradient.custom_gradient def F(x): def Grad(_): raise RuntimeError("x") return x, Grad with ops.Graph().as_default(): x = constant(1.0) y = F(x) with self.assertRaises(RuntimeError): gradients.gradients(y, x) def testCustomGradientWithVariables(self): @custom_gradient.custom_gradient def F(x): out = core_layers.dense(x, 3, use_bias=False) def Grad(out_grad, variables=None): # pylint: disable=redefined-outer-name self.assertEqual(1, len(variables)) grads = gradients.gradients(out, [x, variables[0]], grad_ys=out_grad) return grads[0], [array_ops.ones((4, 3))] return out, Grad with ops.Graph().as_default(): x = array_ops.ones((2, 4)) with variable_scope.variable_scope("f", use_resource=True) as vs: y = F(x) all_vars = vs.global_variables() assert len(all_vars) == 1 grads = gradients.gradients(y, [x, all_vars[0]]) for g in grads: self.assertTrue(g is not None) with session.Session() as sess: self.evaluate(variables.global_variables_initializer()) dw = sess.run(math_ops.reduce_sum(grads[1])) self.assertEqual(12., dw) def testCustomGradientWithVariablesEager(self): with context.eager_mode(): layer = core_layers.Dense(4, use_bias=False) @custom_gradient.custom_gradient def F(x): out = layer(x) def Grad(out_grad, variables=None): # pylint: disable=redefined-outer-name del out_grad self.assertEqual(1, len(variables)) return (array_ops.ones((3, 2)), [array_ops.ones((2, 4))]) return out, Grad x = array_ops.ones((3, 2)) + 2. with backprop.GradientTape() as tape: tape.watch(x) y = F(x) w, = layer.variables dx, dw = tape.gradient(y, [x, w]) self.assertEqual(6., math_ops.reduce_sum(dx).numpy()) self.assertEqual(8., math_ops.reduce_sum(dw).numpy()) @test_util.run_v1_only("b/120545219") def testCustomGradientErrorsWithNonResourceVariables(self): def F(x, use_resource=False): with variable_scope.variable_scope("f", use_resource=use_resource): out = core_layers.dense(x, 4, use_bias=False) def Grad(out_grad, variables=None): # pylint: disable=redefined-outer-name del out_grad self.assertEqual(1, len(variables)) return (array_ops.ones((3, 2)), [array_ops.ones((2, 4))]) return out, Grad @custom_gradient.custom_gradient def FResource(x): return F(x, use_resource=True) @custom_gradient.custom_gradient def FNonResource(x): return F(x, use_resource=False) x = array_ops.ones((3, 2)) + 2. # Wrapping scope has use_resource=True but inner scope sets to False. Fails. with variable_scope.variable_scope("vs1", use_resource=True): with self.assertRaisesWithPredicateMatch(TypeError, "must be `ResourceVariable`s"): FNonResource(x) # Wrapping scope has use_resource=False but inner scope sets to True. # Passes. with variable_scope.variable_scope("vs2", use_resource=False): FResource(x) def testWithNumpyInputs(self): with context.eager_mode(): @custom_gradient.custom_gradient def F(x): out = x def Grad(_): return (None, None) return out, Grad x = np.ones((3, 2), dtype=np.float32) # Smoke test to ensure numpy inputs are accepted F(x) @test_util.run_v1_only("b/120545219") def testRVGradientsDynamicCond(self): with self.cached_session(): alpha = resource_variable_ops.ResourceVariable( np.random.random((1,)), dtype="float32") conditional = array_ops.placeholder_with_default(True, shape=()) output = control_flow_ops.cond( conditional, lambda: alpha * 2, lambda: alpha * 3) g, = gradients_impl.gradients(output, alpha) self.evaluate(variables.global_variables_initializer()) self.assertAllEqual(g.eval(), [2.0]) self.assertAllEqual(g.eval(feed_dict={conditional: False}), [3.0]) def testRecursiveCustomGradient(self): @custom_gradient.custom_gradient def F(x): out = core_layers.dense(x, 3, use_bias=False) def Grad(out_grad, variables=None): # pylint: disable=redefined-outer-name self.assertEqual(1, len(variables)) grads = gradients.gradients(out, [x, variables[0]], grad_ys=out_grad) return grads[0], [array_ops.ones((4, 3))] return out, Grad @custom_gradient.custom_gradient def DoubleF(x): out = F(x) def Grad(out_grad, variables=None): # pylint: disable=redefined-outer-name self.assertEqual(1, len(variables)) grads = gradients.gradients(out, [x, variables[0]], grad_ys=out_grad) return grads[0], [array_ops.ones((4, 3))] return out, Grad with ops.Graph().as_default(): x = array_ops.ones((2, 4)) with variable_scope.variable_scope("f", use_resource=True) as vs: y = DoubleF(x) all_vars = vs.global_variables() assert len(all_vars) == 1 grads = gradients.gradients(y, [x, all_vars[0]]) for g in grads: self.assertIsNotNone(g) with session.Session() as sess: self.evaluate(variables.global_variables_initializer()) dw = sess.run(math_ops.reduce_sum(grads[1])) self.assertEqual(12., dw) class TensorListGradientsTest(test_util.TensorFlowTestCase): def testDefaultGradYs(self): with ops.Graph().as_default(): tl = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=ops.convert_to_tensor([], dtype=dtypes.int32)) a = constant(1.0) tl = list_ops.tensor_list_push_back(tl, a) grad_tl = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=ops.convert_to_tensor([], dtype=dtypes.int32)) grad_tl = list_ops.tensor_list_push_back(tl, constant(5.0)) grad = gradients.gradients(tl, a, grad_ys=grad_tl)[0] with self.cached_session() as sess: self.assertEquals(self.evaluate(grad), 5.) class TestKerasModelClass(training.Model): """A simple tensorflow keras Model class definition.""" def __init__(self, width): super(TestKerasModelClass, self).__init__() self.weight = variable_scope.get_variable( name="test_keras_var", shape=width, dtype=dtypes.float32, trainable=True, use_resource=True, ) def call(self, inputs): return self.weight * inputs class VariablesGradientTest(test_util.TensorFlowTestCase): def _TestVariablesGradient(self, inputs, test_model, vars_to_grad): """Returns gradients of `test_model` with respect to `vars_to_grad`.""" test_model_re = custom_gradient.recompute_grad(test_model) with backprop.GradientTape(persistent=True) as tape: tape.watch(vars_to_grad) out_re = test_model_re(inputs) out = test_model(inputs) grads_re = tape.gradient(out_re, vars_to_grad) grads = tape.gradient(out, vars_to_grad) return grads_re, grads def _TestFnVariablesGradient(self, inputs, test_fn, vars_to_grad): """Returns gradients of `test_model` with respect to `vars_to_grad`.""" test_fn_re = custom_gradient.recompute_grad(test_fn) with backprop.GradientTape(persistent=True) as tape: tape.watch(vars_to_grad) out_re = test_fn_re(inputs, vars_to_grad) out = test_fn(inputs, vars_to_grad) grads_re = tape.gradient(out_re, vars_to_grad) grads = tape.gradient(out, vars_to_grad) return grads_re, grads @test_util.run_in_graph_and_eager_modes def testKerasRecompute(self): """Checks that recompute_grad works for a simple Keras Model.""" test_model = TestKerasModelClass(10) test_input = constant(np.zeros((10, 10), dtype=np.float32)) self.evaluate(variables.global_variables_initializer()) test_model(test_input) # Ensures keras model is initialized. grads_re, grads = self._TestVariablesGradient(test_input, test_model, test_input) grads_re = self.evaluate(grads_re) grads = self.evaluate(grads) for g, g_re in zip(grads, grads_re): self.assertAllClose(g, g_re) grads_re, grads = self._TestVariablesGradient(test_input, test_model, test_model.variables) grads_re = self.evaluate(grads_re) grads = self.evaluate(grads) for g, g_re in zip(grads, grads_re): self.assertAllClose(g, g_re) @test_util.run_in_graph_and_eager_modes def testFnRecompute(self): """Checks that recompute_grad works grads of function args.""" def TestFn(inputs, input_vars): return inputs * input_vars with variable_scope.variable_scope("test", use_resource=True): test_var = variable_scope.get_variable( name="test_var", shape=10, trainable=True, ) test_input = constant(np.zeros((10, 10), dtype=np.float32)) grads_re, grads = self._TestFnVariablesGradient(test_input, TestFn, test_input) grads_re = self.evaluate(grads_re) grads = self.evaluate(grads) for g, g_re in zip(grads, grads_re): self.assertAllClose(g, g_re) grads_re, grads = self._TestFnVariablesGradient(test_input, TestFn, test_var) grads_re = self.evaluate(grads_re) grads = self.evaluate(grads) for g, g_re in zip(grads, grads_re): self.assertAllClose(g, g_re) if __name__ == "__main__": googletest.main()
tensorflow-master
tensorflow/python/ops/gradients_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A class to store named variables and a scope operator to manage sharing.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections as collections_lib import copy import enum # pylint: disable=g-bad-import-order import functools import sys import threading import traceback import six from six import iteritems from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python import tf2 from tensorflow.python.eager import context from tensorflow.python.eager import monitoring from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import deprecation from tensorflow.python.util import function_utils from tensorflow.python.util import tf_contextlib from tensorflow.python.util import tf_inspect from tensorflow.python.util.tf_export import tf_export __all__ = [ "AUTO_REUSE", "VariableScope", "get_variable_scope", "get_variable", "get_local_variable", "variable_scope", "variable_op_scope", "no_regularizer", "VariableSynchronization", "VariableAggregation" ] _api_usage_gauge = monitoring.BoolGauge( "/tensorflow/api/resource_variables", "Whether variable_scope.enable_resource_variables() is called.") class _PartitionInfo(object): """Holds partition info used by initializer functions.""" def __init__(self, full_shape, var_offset): """Constructor. Args: full_shape: Tuple or list of `int` indicating the full combined shape of the partitioned variables. var_offset: Tuple or list of `int` specifying offset of this partition with respect to the full variable for each dimension. Raises: TypeError: If `full_shape` or `var_offset` is not a sequence. ValueError: If `full_shape` or `var_offset` differ in length. If `var_offset` exceeds `full_shape` in any dimension. """ if not isinstance(full_shape, collections_lib.Sequence) or isinstance( full_shape, six.string_types): raise TypeError( "`full_shape` must be a sequence (like tuple or list) instead of " + type(full_shape).__name__) if not isinstance(var_offset, collections_lib.Sequence) or isinstance( var_offset, six.string_types): raise TypeError( "`var_offset` must be a sequence (like tuple or list) instead of " + type(var_offset).__name__) if len(var_offset) != len(full_shape): raise ValueError( "Expected equal length, but `var_offset` is of length {} while " "full_shape is of length {}.".format( len(var_offset), len(full_shape))) for i in xrange(len(full_shape)): offset = var_offset[i] shape = full_shape[i] if offset < 0 or offset >= shape: raise ValueError( "Expected 0 <= offset < shape but found offset={}, shape={} for " "var_offset={}, full_shape={}".format(offset, shape, var_offset, full_shape)) self._full_shape = full_shape self._var_offset = var_offset @property def full_shape(self): return self._full_shape @property def var_offset(self): return self._var_offset def single_offset(self, shape): """Returns the offset when the variable is partitioned in at most one dim. Args: shape: Tuple or list of `int` indicating the shape of one specific variable partition. Returns: `int` representing the offset in the dimension along which the variable is partitioned. Returns 0 if the variable is not being partitioned. Raises: ValueError: Depending on self.single_slice_dim(). """ single_slice_dim = self.single_slice_dim(shape) # If this variable is not being partitioned at all, single_slice_dim() could # return None. if single_slice_dim is None: return 0 return self.var_offset[single_slice_dim] def single_slice_dim(self, shape): """Returns the slice dim when the variable is partitioned only in one dim. Args: shape: Tuple or list of `int` indicating the shape of one specific variable partition. Returns: `int` representing the dimension that the variable is partitioned in, or `None` if the variable doesn't seem to be partitioned at all. Raises: TypeError: If `shape` is not a sequence. ValueError: If `shape` is not the same length as `self.full_shape`. If the variable is partitioned in more than one dimension. """ if not isinstance(shape, collections_lib.Sequence) or isinstance( shape, six.string_types): raise TypeError( "`shape` must be a sequence (like tuple or list) instead of " + type(shape).__name__) if len(shape) != len(self.full_shape): raise ValueError( "Expected equal length, but received shape={} of length {} while " "self.full_shape={} is of length {}.".format(shape, len(shape), self.full_shape, len(self.full_shape))) for i in xrange(len(shape)): if self.var_offset[i] + shape[i] > self.full_shape[i]: raise ValueError( "With self.var_offset={}, a partition of shape={} would exceed " "self.full_shape={} in dimension {}.".format( self.var_offset, shape, self.full_shape, i)) slice_dim = None for i in xrange(len(shape)): if shape[i] == self.full_shape[i]: continue if slice_dim is not None: raise ValueError( "Cannot use single_slice_dim() with shape={} and " "self.full_shape={} since slice dim could be either dimension {} " "or {}.".format(shape, self.full_shape, i, slice_dim)) slice_dim = i return slice_dim class _ReuseMode(enum.Enum): """Mode for variable access within a variable scope.""" # Indicates that variables are to be fetched if they already exist or # otherwise created. AUTO_REUSE = 1 # TODO(alive): For TensorFlow 2.0, Deprecate True/False/None API in favor of # enum values. # REUSE_FALSE = 2 # REUSE_TRUE = 3 # TODO(apassos) remove these forwarding symbols. VariableSynchronization = variables.VariableSynchronization # pylint: disable=invalid-name VariableAggregation = variables.VariableAggregation # pylint: disable=invalid-name AUTO_REUSE = _ReuseMode.AUTO_REUSE tf_export(v1=["AUTO_REUSE"]).export_constant(__name__, "AUTO_REUSE") AUTO_REUSE.__doc__ = """ When passed in as the value for the `reuse` flag, AUTO_REUSE indicates that get_variable() should create the requested variable if it doesn't exist or, if it does exist, simply return it. """ _DEFAULT_USE_RESOURCE = tf2.enabled() @tf_export(v1=["enable_resource_variables"]) def enable_resource_variables(): """Creates resource variables by default. Resource variables are improved versions of TensorFlow variables with a well-defined memory model. Accessing a resource variable reads its value, and all ops which access a specific read value of the variable are guaranteed to see the same value for that tensor. Writes which happen after a read (by having a control or data dependency on the read) are guaranteed not to affect the value of the read tensor, and similarly writes which happen before a read are guaranteed to affect the value. No guarantees are made about unordered read/write pairs. Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0 feature. """ global _DEFAULT_USE_RESOURCE _DEFAULT_USE_RESOURCE = True _api_usage_gauge.get_cell().set(True) @tf_export(v1=["resource_variables_enabled"]) def resource_variables_enabled(): """Returns `True` if resource variables are enabled. Resource variables are improved versions of TensorFlow variables with a well-defined memory model. Accessing a resource variable reads its value, and all ops which access a specific read value of the variable are guaranteed to see the same value for that tensor. Writes which happen after a read (by having a control or data dependency on the read) are guaranteed not to affect the value of the read tensor, and similarly writes which happen before a read are guaranteed to affect the value. No guarantees are made about unordered read/write pairs. Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0 feature. """ global _DEFAULT_USE_RESOURCE return _DEFAULT_USE_RESOURCE @deprecation.deprecated( None, "non-resource variables are not supported in the long term") @tf_export(v1=["disable_resource_variables"]) def disable_resource_variables(): """Opts out of resource variables. If your code needs tf.disable_resource_variables() to be called to work properly please file a bug. """ global _DEFAULT_USE_RESOURCE _DEFAULT_USE_RESOURCE = False _api_usage_gauge.get_cell().set(False) class _VariableStore(object): """Variable store that carries a number of named Variables. New variable names and new variables can be created; all stored variables are initialized with the initializer passed to __init__. Attributes: vars: a dictionary with string names (same as passed in GetVar) as keys and the corresponding TensorFlow Variables as values. """ def __init__(self): """Create a variable store.""" self._vars = {} # A dictionary of the stored TensorFlow variables. self._partitioned_vars = {} # A dict of the stored PartitionedVariables. self._store_eager_variables = False def get_variable(self, name, shape=None, dtype=dtypes.float32, initializer=None, regularizer=None, reuse=None, trainable=None, collections=None, caching_device=None, partitioner=None, validate_shape=True, use_resource=None, custom_getter=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): """Gets an existing variable with these parameters or create a new one. If a variable with the given name is already stored, we return the stored variable. Otherwise, we create a new one. Set `reuse` to `True` when you only want to reuse existing Variables. Set `reuse` to `False` when you only want to create new Variables. Set `reuse` to None (the default) or tf.compat.v1.AUTO_REUSE when you want variables to be created if they don't exist or returned if they do. If initializer is `None` (the default), the default initializer passed in the constructor is used. If that one is `None` too, we use a new `glorot_uniform_initializer`. If initializer is a Tensor, we use it as a value and derive the shape from the initializer. If a partitioner is provided, a `PartitionedVariable` is returned. Accessing this object as a `Tensor` returns the shards concatenated along the partition axis. Some useful partitioners are available. See, e.g., `variable_axis_size_partitioner` and `min_max_variable_partitioner`. Args: name: The name of the new or existing variable. shape: Shape of the new or existing variable. dtype: Type of the new or existing variable (defaults to `DT_FLOAT`). initializer: Initializer for the variable. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection GraphKeys.REGULARIZATION_LOSSES and can be used for regularization. reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation of variables. When eager execution is enabled this argument is always forced to be False. trainable: If `True` also add the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). `trainable` defaults to `True`, unless `synchronization` is set to `ON_READ`, in which case it defaults to `False`. collections: List of graph collections keys to add the `Variable` to. Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`). caching_device: Optional device string or function describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the `Variable` reside, to deduplicate copying through `Switch` and other conditional statements. partitioner: Optional callable that accepts a fully defined `TensorShape` and dtype of the `Variable` to be created, and returns a list of partitions for each axis (currently only one axis can be partitioned). validate_shape: If False, allows the variable to be initialized with a value of unknown shape. If True, the default, the shape of initial_value must be known. use_resource: If False, creates a regular Variable. If True, creates instead an experimental ResourceVariable which has well-defined semantics. Defaults to False (will later change to True). When eager execution is enabled this argument is always forced to be true. custom_getter: Callable that takes as a first argument the true getter, and allows overwriting the internal get_variable method. The signature of `custom_getter` should match that of this method, but the most future-proof version will allow for changes: `def custom_getter(getter, *args, **kwargs)`. Direct access to all `get_variable` parameters is also allowed: `def custom_getter(getter, name, *args, **kwargs)`. A simple identity custom getter that simply creates variables with modified names is: ```python def custom_getter(getter, name, *args, **kwargs): return getter(name + '_suffix', *args, **kwargs) ``` constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. Returns: The created or existing `Variable` (or `PartitionedVariable`, if a partitioner was used). Raises: ValueError: when creating a new variable and shape is not declared, when reusing a variable and specifying a conflicting shape, or when violating reuse during variable creation. RuntimeError: when eager execution is enabled and not called from an EagerVariableStore. """ if custom_getter is not None and not callable(custom_getter): raise ValueError("Passed a custom_getter which is not callable: %s" % custom_getter) with ops.init_scope(): if context.executing_eagerly(): # Variable creation and initialization takes place in `init_scope`s; # as such, if an `init_scope` lifts us into the eager context, then we # need to use `ResourceVariable`s. use_resource = True # Note that it's fine to reuse eager variables whose initialization was # lifted from a function-building graph into the eager context (that's why # the following clause is not wrapped in an `init_scope`); lifted variables # are tracked by the graph's `VariableStore`. if context.executing_eagerly(): if not self._store_eager_variables and reuse: raise RuntimeError( "When eager execution is enabled variable reuse is only supported" " when an EagerVariableStore is active. See the documentation on" " EagerVariableStore for example usage.") if self._store_eager_variables: reuse = AUTO_REUSE # If a *_ref type is passed in an error would be triggered further down the # stack. We prevent this using base_dtype to get a non-ref version of the # type, before doing anything else. When _ref types are removed in favor of # resources, this line can be removed. try: dtype = dtype.base_dtype except AttributeError: # .base_dtype not existing means that we will try and use the raw dtype # which was passed in - this might be a NumPy type which is valid. pass # This is the main logic of get_variable. However, custom_getter # may override this logic. So we save it as a callable and pass # it to custom_getter. # Note: the parameters of _true_getter, and their documentation, match # *exactly* item-for-item with the docstring of this method. def _true_getter( # pylint: disable=missing-docstring name, shape=None, dtype=dtypes.float32, initializer=None, regularizer=None, reuse=None, trainable=None, collections=None, caching_device=None, partitioner=None, validate_shape=True, use_resource=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): is_scalar = ( shape is not None and isinstance(shape, collections_lib.Sequence) and not shape) # Partitioned variable case if partitioner is not None and not is_scalar: if not callable(partitioner): raise ValueError("Partitioner must be callable, but received: %s" % partitioner) with ops.name_scope(None): return self._get_partitioned_variable( name=name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, reuse=reuse, trainable=trainable, collections=collections, caching_device=caching_device, partitioner=partitioner, validate_shape=validate_shape, use_resource=use_resource, constraint=constraint, synchronization=synchronization, aggregation=aggregation) # Special case for partitioned variable to allow reuse without having to # specify partitioner. if (reuse is True and partitioner is None and name in self._partitioned_vars): return self._get_partitioned_variable( name=name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, reuse=reuse, trainable=trainable, collections=collections, caching_device=caching_device, partitioner=None, validate_shape=validate_shape, use_resource=use_resource, constraint=constraint, synchronization=synchronization, aggregation=aggregation) # Single variable case if "%s/part_0" % name in self._vars: raise ValueError( "No partitioner was provided, but a partitioned version of the " "variable was found: %s/part_0. Perhaps a variable of the same " "name was already created with partitioning?" % name) return self._get_single_variable( name=name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, reuse=reuse, trainable=trainable, collections=collections, caching_device=caching_device, validate_shape=validate_shape, use_resource=use_resource, constraint=constraint, synchronization=synchronization, aggregation=aggregation) synchronization, aggregation, trainable = ( variables.validate_synchronization_aggregation_trainable( synchronization, aggregation, trainable, name)) if custom_getter is not None: # Handle backwards compatibility with getter arguments that were added # to the API after users started writing custom getters. custom_getter_kwargs = { "getter": _true_getter, "name": name, "shape": shape, "dtype": dtype, "initializer": initializer, "regularizer": regularizer, "reuse": reuse, "trainable": trainable, "collections": collections, "caching_device": caching_device, "partitioner": partitioner, "validate_shape": validate_shape, "use_resource": use_resource, "synchronization": synchronization, "aggregation": aggregation, } # `fn_args` and `has_kwargs` can handle functions, `functools.partial`, # `lambda`. if ("constraint" in function_utils.fn_args(custom_getter) or function_utils.has_kwargs(custom_getter)): custom_getter_kwargs["constraint"] = constraint return custom_getter(**custom_getter_kwargs) else: return _true_getter( name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, reuse=reuse, trainable=trainable, collections=collections, caching_device=caching_device, partitioner=partitioner, validate_shape=validate_shape, use_resource=use_resource, constraint=constraint, synchronization=synchronization, aggregation=aggregation) def _get_partitioned_variable(self, name, partitioner, shape=None, dtype=dtypes.float32, initializer=None, regularizer=None, reuse=None, trainable=None, collections=None, caching_device=None, validate_shape=True, use_resource=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): """Gets or creates a sharded variable list with these parameters. The `partitioner` must be a callable that accepts a fully defined `TensorShape` and returns a sequence of integers (the `partitions`). These integers describe how to partition the given sharded `Variable` along the given dimension. That is, `partitions[1] = 3` means split the `Variable` into 3 shards along dimension 1. Currently, sharding along only one axis is supported. If the list of variables with the given name (prefix) is already stored, we return the stored variables. Otherwise, we create a new one. Set `reuse` to `True` when you only want to reuse existing Variables. Set `reuse` to `False` when you only want to create new Variables. Set `reuse` to None (the default) or tf.compat.v1.AUTO_REUSE when you want variables to be created if they don't exist or returned if they do. If initializer is `None` (the default), the default initializer passed in the constructor is used. If that one is `None` too, we use a new `glorot_uniform_initializer`. If initializer is a Tensor, we use it as a value and derive the shape from the initializer. If the initializer is a callable, then it will be called for each shard. Otherwise the initializer should match the shape of the entire sharded Variable, and it will be sliced accordingly for each shard. Some useful partitioners are available. See, e.g., `variable_axis_size_partitioner` and `min_max_variable_partitioner`. Args: name: the name of the new or existing sharded variable. partitioner: Optional callable that accepts a fully defined `TensorShape` and `dtype` of the Variable to be created, and returns a list of partitions for each axis (currently only one axis can be partitioned). shape: shape of the new or existing sharded variable. dtype: type of the new or existing sharded variable (defaults to `DT_FLOAT`). initializer: initializer for the sharded variable. regularizer: a (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection GraphKeys.REGULARIZATION_LOSSES and can be used for regularization. reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation of variables. trainable: If `True` also add the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). collections: List of graph collections keys to add the Variable to. Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`). caching_device: Optional device string or function describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. validate_shape: If False, allows the variable to be initialized with a value of unknown shape. If True, the default, the shape of initial_value must be known. use_resource: If False, creates a regular Variable. If True, creates an experimental ResourceVariable which has well-defined semantics. Defaults to False (will later change to True). constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. Returns: A `PartitionedVariable` object. Raises: ValueError: when creating a new variable and shape is not declared, when reusing a variable and specifying a conflicting shape, when violating reuse during variable creation, or if an existing sharded variable exists for the given name but with different sharding. """ initializing_from_value = initializer is not None and isinstance( initializer, ops.Tensor) if name in self._vars: raise ValueError( "A partitioner was provided, but an unpartitioned version of the " "variable was found: %s. Perhaps a variable of the same name was " "already created without partitioning?" % name) shape = tensor_shape.as_shape(shape) if initializing_from_value: shape = shape.merge_with(initializer.get_shape()) partitions = None if not reuse or partitioner: partitions = _call_partitioner(partitioner, shape, dtype) if name in self._partitioned_vars: if reuse is False: raise ValueError( "Partitioned variable with name %s already exists. Did you mean to " "set reuse=True or reuse=tf.AUTO_REUSE in VarScope?" % name) existing_var = self._partitioned_vars[name] if not shape.is_compatible_with(existing_var.get_shape()): raise ValueError( "Trying to reuse partitioned variable %s, but specified shape %s " "and found shape %s." % (name, shape, existing_var.get_shape())) if not dtype.is_compatible_with(existing_var.dtype): raise ValueError( "Trying to reuse partitioned variable %s, but specified dtype %s " "and found dtype %s." % (name, dtype.name, existing_var.dtype.name)) # pylint: disable=protected-access if (partitions is not None and existing_var._get_partitions() != partitions): raise ValueError( "Trying to reuse partitioned variable %s, but specified partitions " "%s and found partitions %s." % (name, partitions, existing_var._get_partitions())) # pylint: enable=protected-access return existing_var if reuse is True: raise ValueError("PartitionedVariable %s does not exist, or was not " "created with tf.get_variable(). Did you mean to set " "reuse=False or reuse=tf.AUTO_REUSE in VarScope?" % name) slice_dim, num_slices = _get_slice_dim_and_num_slices(partitions) if "%s/part_0" % name in self._vars: if "%s/part_%d" % (name, num_slices - 1) not in self._vars: raise ValueError( "Partitioner returned a different partitioning than what was " "already found. Partitioner returned %d shards, and shard " "%s/part_0 was found, but %s/part_%d was not." % (num_slices, name, name, num_slices - 1)) if "%s/part_%d" % (name, num_slices) in self._vars: raise ValueError( "Partitioner returned a different partitioning than what was " "already found. Partitioner returned %d shards, and shard " "%s/part_0 was found, but so was the extra shard %s/part_%d." % (num_slices, name, name, num_slices)) vs = [] for i, (var_offset, var_shape) in enumerate( _iter_slices(shape.as_list(), num_slices, slice_dim)): partition_info = _PartitionInfo( full_shape=shape.as_list(), var_offset=var_offset) var_full_name = "%s/part_%d" % (name, i) with ops.name_scope(var_full_name + "/PartitionedInitializer"): # Create the tensor to initialize the variable with default value. if initializer is None: init, initializing_from_value = self._get_default_initializer( name=name, shape=shape, dtype=dtype) if initializing_from_value: init_shape = None else: init_shape = var_shape elif callable(initializer): init = initializer init_shape = var_shape elif isinstance(initializer, ops.Tensor): init = array_ops.slice(initializer, var_offset, var_shape) # Use the dtype of the given tensor. dtype = init.dtype.base_dtype init_shape = None else: init = ops.convert_to_tensor(initializer, dtype=dtype) init = array_ops.slice(init, var_offset, var_shape) init_shape = None with ops.name_scope(None): var = self._get_single_variable( name=var_full_name, shape=init_shape, dtype=dtype, initializer=init, partition_info=partition_info, regularizer=regularizer, reuse=reuse, trainable=trainable, collections=collections, caching_device=caching_device, validate_shape=validate_shape, use_resource=use_resource, constraint=constraint, synchronization=synchronization, aggregation=aggregation) # pylint: disable=protected-access var._set_save_slice_info( variables.Variable.SaveSliceInfo(name, shape.as_list(), var_offset, var_shape)) vs.append(var) # pylint: enable=protected-access partitioned_var = variables.PartitionedVariable( name=name, shape=shape, dtype=dtype, variable_list=vs, partitions=partitions) if not context.executing_eagerly() or self._store_eager_variables: self._partitioned_vars[name] = partitioned_var return partitioned_var def _get_single_variable(self, name, shape=None, dtype=dtypes.float32, initializer=None, regularizer=None, partition_info=None, reuse=None, trainable=None, collections=None, caching_device=None, validate_shape=True, use_resource=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): """Get or create a single Variable (e.g. a shard or entire variable). See the documentation of get_variable above (ignore partitioning components) for details. Args: name: see get_variable. shape: see get_variable. dtype: see get_variable. initializer: see get_variable. regularizer: see get_variable. partition_info: _PartitionInfo object. reuse: see get_variable. trainable: see get_variable. collections: see get_variable. caching_device: see get_variable. validate_shape: see get_variable. use_resource: see get_variable. constraint: see get_variable. synchronization: see get_variable. aggregation: see get_variable. Returns: A Variable. See documentation of get_variable above. Raises: ValueError: See documentation of get_variable above. """ # Set to true if initializer is a constant. initializing_from_value = False if initializer is not None and not callable(initializer): initializing_from_value = True if shape is not None and initializing_from_value: raise ValueError("If initializer is a constant, do not specify shape.") dtype = dtypes.as_dtype(dtype) shape = tensor_shape.as_shape(shape) if name in self._vars: # Here we handle the case when returning an existing variable. if reuse is False: var = self._vars[name] err_msg = ("Variable %s already exists, disallowed." " Did you mean to set reuse=True or " "reuse=tf.AUTO_REUSE in VarScope?" % name) # ResourceVariables don't have an op associated with so no traceback if isinstance(var, resource_variable_ops.ResourceVariable): raise ValueError(err_msg) tb = var.op.traceback[::-1] # Throw away internal tf entries and only take a few lines. In some # cases the traceback can be longer (e.g. if someone uses factory # functions to create variables) so we take more than needed in the # default case. tb = [x for x in tb if "tensorflow/python" not in x[0]][:5] raise ValueError("%s Originally defined at:\n\n%s" % (err_msg, "".join(traceback.format_list(tb)))) found_var = self._vars[name] if not shape.is_compatible_with(found_var.get_shape()): raise ValueError("Trying to share variable %s, but specified shape %s" " and found shape %s." % (name, shape, found_var.get_shape())) if not dtype.is_compatible_with(found_var.dtype): dtype_str = dtype.name found_type_str = found_var.dtype.name raise ValueError("Trying to share variable %s, but specified dtype %s" " and found dtype %s." % (name, dtype_str, found_type_str)) return found_var # The code below handles only the case of creating a new variable. if reuse is True: raise ValueError("Variable %s does not exist, or was not created with " "tf.get_variable(). Did you mean to set " "reuse=tf.AUTO_REUSE in VarScope?" % name) # Create the tensor to initialize the variable with default value. if initializer is None: initializer, initializing_from_value = self._get_default_initializer( name=name, shape=shape, dtype=dtype) # Enter an init scope when creating the initializer. with ops.init_scope(): if initializing_from_value: init_val = initializer variable_dtype = None else: # Instantiate initializer if provided initializer is a type object. if tf_inspect.isclass(initializer): initializer = initializer() if shape is not None and shape.is_fully_defined(): init_val = lambda: initializer( # pylint: disable=g-long-lambda shape.as_list(), dtype=dtype, partition_info=partition_info) variable_dtype = dtype.base_dtype elif len(tf_inspect.getargspec(initializer).args) == len( tf_inspect.getargspec(initializer).defaults or []): init_val = initializer variable_dtype = None else: raise ValueError("The initializer passed is not valid. It should " "be a callable with no arguments and the " "shape should not be provided or an instance of " "`tf.keras.initializers.*' and `shape` should be " "fully defined.") # Create the variable. if use_resource is None: # Set the default value if unspecified. use_resource = _DEFAULT_USE_RESOURCE v = variables.VariableV1( initial_value=init_val, name=name, trainable=trainable, collections=collections, caching_device=caching_device, dtype=variable_dtype, validate_shape=validate_shape, constraint=constraint, use_resource=use_resource, synchronization=synchronization, aggregation=aggregation) if context.executing_eagerly() and self._store_eager_variables: if collections: ops.add_to_collections(collections, v) else: ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, v) if trainable: ops.add_to_collection(ops.GraphKeys.TRAINABLE_VARIABLES, v) if not context.executing_eagerly() or self._store_eager_variables: # In eager mode we do not want to keep default references to Variable # objects as this will prevent their memory from being released. self._vars[name] = v logging.vlog(1, "Created variable %s with shape %s and init %s", v.name, format(shape), initializer) # Run the regularizer if requested and save the resulting loss. if regularizer: with ops.colocate_with(v): with ops.name_scope(name + "/Regularizer/"): with ops.init_scope(): loss = regularizer(v) if loss is not None: if context.executing_eagerly(): v_name = "v_%s" % type(v) loss_name = "loss_%s" % type(loss) else: v_name = v.name loss_name = loss.name logging.vlog( 1, "Applied regularizer to %s and added the result %s " "to REGULARIZATION_LOSSES.", v_name, loss_name) ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss) return v # Initialize variable when no initializer provided def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32): """Provide a default initializer and a corresponding value. Args: name: see get_variable. shape: see get_variable. dtype: see get_variable. Returns: initializer and initializing_from_value. See get_variable above. Raises: ValueError: When giving unsupported dtype. """ del shape # If dtype is DT_FLOAT, provide a uniform unit scaling initializer if dtype.is_floating: initializer = init_ops.glorot_uniform_initializer() initializing_from_value = False # If dtype is DT_INT/DT_UINT, provide a default value `zero` # If dtype is DT_BOOL, provide a default value `FALSE` elif (dtype.is_integer or dtype.is_unsigned or dtype.is_bool or dtype == dtypes.string): initializer = init_ops.zeros_initializer() initializing_from_value = False # NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here? else: raise ValueError("An initializer for variable %s of %s is required" % (name, dtype.base_dtype)) return initializer, initializing_from_value # To stop regularization, use this regularizer @tf_export(v1=["no_regularizer"]) def no_regularizer(_): """Use this function to prevent regularization of variables.""" return None # TODO(alive): support caching devices and partitioned variables in Eager mode. @tf_export(v1=["VariableScope"]) class VariableScope(object): """Variable scope object to carry defaults to provide to `get_variable`. Many of the arguments we need for `get_variable` in a variable store are most easily handled with a context. This object is used for the defaults. Attributes: name: name of the current scope, used as prefix in get_variable. initializer: default initializer passed to get_variable. regularizer: default regularizer passed to get_variable. reuse: Boolean, None, or tf.compat.v1.AUTO_REUSE, setting the reuse in get_variable. When eager execution is enabled this argument is always forced to be False. caching_device: string, callable, or None: the caching device passed to get_variable. partitioner: callable or `None`: the partitioner passed to `get_variable`. custom_getter: default custom getter passed to get_variable. name_scope: The name passed to `tf.name_scope`. dtype: default type passed to get_variable (defaults to DT_FLOAT). use_resource: if False, create a normal Variable; if True create an experimental ResourceVariable with well-defined semantics. Defaults to False (will later change to True). When eager execution is enabled this argument is always forced to be True. constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. """ def __init__(self, reuse, name="", initializer=None, regularizer=None, caching_device=None, partitioner=None, custom_getter=None, name_scope="", dtype=dtypes.float32, use_resource=None, constraint=None): """Creates a new VariableScope with the given properties.""" self._name = name self._initializer = initializer self._regularizer = regularizer self._reuse = reuse self._caching_device = caching_device self._partitioner = partitioner self._custom_getter = custom_getter self._name_scope = name_scope self._dtype = dtype self._use_resource = use_resource self._constraint = constraint if context.executing_eagerly(): if self._caching_device is not None: raise NotImplementedError("Caching devices is not yet supported " "when eager execution is enabled.") self._reuse = AUTO_REUSE self._use_resource = True @property def name(self): return self._name @property def original_name_scope(self): return self._name_scope @property def reuse(self): return self._reuse @property def initializer(self): return self._initializer @property def dtype(self): return self._dtype @property def use_resource(self): return self._use_resource @property def regularizer(self): return self._regularizer @property def caching_device(self): return self._caching_device @property def partitioner(self): return self._partitioner @property def custom_getter(self): return self._custom_getter @property def constraint(self): return self._constraint def reuse_variables(self): """Reuse variables in this scope.""" self._reuse = True def set_initializer(self, initializer): """Set initializer for this scope.""" self._initializer = initializer def set_dtype(self, dtype): """Set data type for this scope.""" self._dtype = dtype def set_use_resource(self, use_resource): """Sets whether to use ResourceVariables for this scope.""" if context.executing_eagerly() and not use_resource: raise ValueError("When eager execution is enabled, " "use_resource cannot be set to false.") self._use_resource = use_resource def set_regularizer(self, regularizer): """Set regularizer for this scope.""" self._regularizer = regularizer def set_caching_device(self, caching_device): """Set caching_device for this scope.""" if context.executing_eagerly(): raise NotImplementedError("Caching devices are not yet supported " "when eager execution is enabled.") self._caching_device = caching_device def set_partitioner(self, partitioner): """Set partitioner for this scope.""" self._partitioner = partitioner def set_custom_getter(self, custom_getter): """Set custom getter for this scope.""" self._custom_getter = custom_getter def get_collection(self, name): """Get this scope's variables.""" scope = self._name + "/" if self._name else "" return ops.get_collection(name, scope) def trainable_variables(self): """Get this scope's trainable variables.""" return self.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) def global_variables(self): """Get this scope's global variables.""" return self.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) def local_variables(self): """Get this scope's local variables.""" return self.get_collection(ops.GraphKeys.LOCAL_VARIABLES) def get_variable(self, var_store, name, shape=None, dtype=None, initializer=None, regularizer=None, reuse=None, trainable=None, collections=None, caching_device=None, partitioner=None, validate_shape=True, use_resource=None, custom_getter=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): """Gets an existing variable with this name or create a new one.""" if regularizer is None: regularizer = self._regularizer if caching_device is None: caching_device = self._caching_device if partitioner is None: partitioner = self._partitioner if custom_getter is None: custom_getter = self._custom_getter if context.executing_eagerly(): reuse = False use_resource = True else: if reuse is None: reuse = self._reuse if use_resource is None: use_resource = self._use_resource full_name = self.name + "/" + name if self.name else name # Variable names only depend on variable_scope (full_name here), # not name_scope, so we reset it below for the time of variable creation. with ops.name_scope(None): # Check that `initializer` dtype and `dtype` are consistent before # replacing them with defaults. if (dtype is not None and initializer is not None and not callable(initializer)): init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype if init_dtype != dtype: raise ValueError("Initializer type '%s' and explicit dtype '%s' " "don't match." % (init_dtype, dtype)) if initializer is None: initializer = self._initializer if constraint is None: constraint = self._constraint if dtype is None: dtype = self._dtype return var_store.get_variable( full_name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, reuse=reuse, trainable=trainable, collections=collections, caching_device=caching_device, partitioner=partitioner, validate_shape=validate_shape, use_resource=use_resource, custom_getter=custom_getter, constraint=constraint, synchronization=synchronization, aggregation=aggregation) def _get_partitioned_variable(self, var_store, name, shape=None, dtype=None, initializer=None, regularizer=None, trainable=None, collections=None, caching_device=None, partitioner=None, validate_shape=True, use_resource=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): """Gets an existing variable with this name or create a new one.""" if initializer is None: initializer = self._initializer if regularizer is None: regularizer = self._regularizer if constraint is None: constraint = self._constraint if caching_device is None: caching_device = self._caching_device if partitioner is None: partitioner = self._partitioner if dtype is None: dtype = self._dtype if use_resource is None: use_resource = self._use_resource if self._custom_getter is not None: raise ValueError( "Private access to _get_partitioned_variable is not allowed when " "a custom getter is set. Current custom getter: %s. " "It is likely that you're using create_partitioned_variables. " "If so, consider instead using get_variable with a non-empty " "partitioner parameter instead." % self._custom_getter) if partitioner is None: raise ValueError("No partitioner was specified") # This allows the variable scope name to be used as the variable name if # this function is invoked with an empty name arg, for backward # compatibility with create_partitioned_variables(). full_name_list = [] if self.name: full_name_list.append(self.name) if name: full_name_list.append(name) full_name = "/".join(full_name_list) # Variable names only depend on variable_scope (full_name here), # not name_scope, so we reset it below for the time of variable creation. with ops.name_scope(None): # pylint: disable=protected-access return var_store._get_partitioned_variable( full_name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, reuse=self.reuse, trainable=trainable, collections=collections, caching_device=caching_device, partitioner=partitioner, validate_shape=validate_shape, use_resource=use_resource, constraint=constraint, synchronization=synchronization, aggregation=aggregation) # pylint: enable=protected-access _VARSTORE_KEY = ("__variable_store",) _VARSCOPESTORE_KEY = ("__varscope",) class _VariableScopeStore(threading.local): """A thread local store for the current variable scope and scope counts.""" def __init__(self): super(_VariableScopeStore, self).__init__() self.current_scope = VariableScope(False) self.variable_scopes_count = {} def open_variable_scope(self, scope_name): if scope_name in self.variable_scopes_count: self.variable_scopes_count[scope_name] += 1 else: self.variable_scopes_count[scope_name] = 1 def close_variable_subscopes(self, scope_name): for k in list(self.variable_scopes_count.keys()): if scope_name is None or k.startswith(scope_name + "/"): self.variable_scopes_count[k] = 0 def variable_scope_count(self, scope_name): return self.variable_scopes_count.get(scope_name, 0) def get_variable_scope_store(): """Returns the variable scope store for current thread.""" scope_store = ops.get_collection(_VARSCOPESTORE_KEY) if not scope_store: scope_store = _VariableScopeStore() ops.add_to_collection(_VARSCOPESTORE_KEY, scope_store) else: scope_store = scope_store[0] return scope_store @tf_export(v1=["get_variable_scope"]) def get_variable_scope(): """Returns the current variable scope.""" return get_variable_scope_store().current_scope def _get_default_variable_store(): store = ops.get_collection(_VARSTORE_KEY) if store: return store[0] store = _VariableStore() ops.add_to_collection(_VARSTORE_KEY, store) return store @tf_contextlib.contextmanager def with_variable_store(store): store_collection = ops.get_collection_ref(_VARSTORE_KEY) old = list(store_collection) store_collection[:] = [store] try: yield finally: store_collection[:] = old class EagerVariableStore(object): """Wrapper allowing functional layers to be used with eager execution. When eager execution is enabled Variables get deleted when they go out of scope, and are not stored in global collections by default. A lot of code (mostly the functional layers in tf.layers) assumes that variables are kept in a global list. EagerVariableStore can be used in conjunction with this code to make it eager-friendly. For example, to create a dense layer, use: ``` container = tfe.EagerVariableStore() for input in dataset_iterator: with container.as_default(): x = tf.compat.v1.layers.dense(input, name="l1") print(container.variables) # Should print the variables used in the layer. ``` """ def __init__(self, store=None): if store is not None: if not store._store_eager_variables: # pylint: disable=protected-access raise ValueError("Cannot construct EagerVariableStore from a " "VariableStore object that does not hold eager " "variables.") self._store = store else: self._store = _VariableStore() self._store._store_eager_variables = True # pylint: disable=protected-access def as_default(self): return with_variable_store(self._store) def variables(self): return sorted(self._store._vars.values(), key=lambda x: x.name) # pylint: disable=protected-access def trainable_variables(self): # pylint: disable=protected-access return sorted([x for x in self._store._vars.values() if x.trainable], key=lambda x: x.name) # pylint: enable=protected-access def non_trainable_variables(self): # pylint: disable=protected-access return sorted([x for x in self._store._vars.values() if not x.trainable], key=lambda x: x.name) # pylint: enable=protected-access def copy(self): """Copy this variable store and all of its contents. Variables contained in this store will be copied over to the new variable store, meaning that they can be modified without affecting the variables in this store. Returns: A new EagerVariableStore instance containing copied variables. """ # pylint: disable=protected-access new_store = EagerVariableStore() for key, var in iteritems(self._store._vars): # Strip device out of variable name. try: index = var.name.index(":") except ValueError: stripped_var_name = var.name else: stripped_var_name = var.name[:index] # Create new variable with same value, name, and "trainable" flag. new_var = resource_variable_ops.ResourceVariable( var.read_value(), name=stripped_var_name, trainable=var.trainable) new_store._store._vars[key] = new_var return new_store # pylint: enable=protected-access # The argument list for get_variable must match arguments to get_local_variable. # So, if you are updating the arguments, also update arguments to # get_local_variable below. @tf_export(v1=["get_variable"]) def get_variable(name, shape=None, dtype=None, initializer=None, regularizer=None, trainable=None, collections=None, caching_device=None, partitioner=None, validate_shape=True, use_resource=None, custom_getter=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): return get_variable_scope().get_variable( _get_default_variable_store(), name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, trainable=trainable, collections=collections, caching_device=caching_device, partitioner=partitioner, validate_shape=validate_shape, use_resource=use_resource, custom_getter=custom_getter, constraint=constraint, synchronization=synchronization, aggregation=aggregation) get_variable_or_local_docstring = ("""%s %sThis function prefixes the name with the current variable scope and performs reuse checks. See the [Variable Scope How To](https://tensorflow.org/guide/variables) for an extensive description of how reusing works. Here is a basic example: ```python def foo(): with tf.variable_scope("foo", reuse=tf.AUTO_REUSE): v = tf.get_variable("v", [1]) return v v1 = foo() # Creates v. v2 = foo() # Gets the same, existing v. assert v1 == v2 ``` If initializer is `None` (the default), the default initializer passed in the variable scope will be used. If that one is `None` too, a `glorot_uniform_initializer` will be used. The initializer can also be a Tensor, in which case the variable is initialized to this value and shape. Similarly, if the regularizer is `None` (the default), the default regularizer passed in the variable scope will be used (if that is `None` too, then by default no regularization is performed). If a partitioner is provided, a `PartitionedVariable` is returned. Accessing this object as a `Tensor` returns the shards concatenated along the partition axis. Some useful partitioners are available. See, e.g., `variable_axis_size_partitioner` and `min_max_variable_partitioner`. Args: name: The name of the new or existing variable. shape: Shape of the new or existing variable. dtype: Type of the new or existing variable (defaults to `DT_FLOAT`). initializer: Initializer for the variable if one is created. Can either be an initializer object or a Tensor. If it's a Tensor, its shape must be known unless validate_shape is False. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection `tf.GraphKeys.REGULARIZATION_LOSSES` and can be used for regularization. %scollections: List of graph collections keys to add the Variable to. Defaults to `[%s]` (see `tf.Variable`). caching_device: Optional device string or function describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. partitioner: Optional callable that accepts a fully defined `TensorShape` and `dtype` of the Variable to be created, and returns a list of partitions for each axis (currently only one axis can be partitioned). validate_shape: If False, allows the variable to be initialized with a value of unknown shape. If True, the default, the shape of initial_value must be known. For this to be used the initializer must be a Tensor and not an initializer object. use_resource: If False, creates a regular Variable. If true, creates an experimental ResourceVariable instead with well-defined semantics. Defaults to False (will later change to True). When eager execution is enabled this argument is always forced to be True. custom_getter: Callable that takes as a first argument the true getter, and allows overwriting the internal get_variable method. The signature of `custom_getter` should match that of this method, but the most future-proof version will allow for changes: `def custom_getter(getter, *args, **kwargs)`. Direct access to all `get_variable` parameters is also allowed: `def custom_getter(getter, name, *args, **kwargs)`. A simple identity custom getter that simply creates variables with modified names is: ```python def custom_getter(getter, name, *args, **kwargs): return getter(name + '_suffix', *args, **kwargs) ``` constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. Returns: The created or existing `Variable` (or `PartitionedVariable`, if a partitioner was used). Raises: ValueError: when creating a new variable and shape is not declared, when violating reuse during variable creation, or when `initializer` dtype and `dtype` don't match. Reuse is set inside `variable_scope`. """) get_variable.__doc__ = get_variable_or_local_docstring % ( "Gets an existing variable with these parameters or create a new one.", "", "trainable: If `True` also add the variable to the graph collection\n" " `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n ", "GraphKeys.GLOBAL_VARIABLES") # The argument list for get_local_variable must match arguments to get_variable. # So, if you are updating the arguments, also update arguments to get_variable. @tf_export(v1=["get_local_variable"]) def get_local_variable( # pylint: disable=missing-docstring name, shape=None, dtype=None, initializer=None, regularizer=None, trainable=False, # pylint: disable=unused-argument collections=None, caching_device=None, partitioner=None, validate_shape=True, use_resource=None, custom_getter=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): if collections: collections += [ops.GraphKeys.LOCAL_VARIABLES] else: collections = [ops.GraphKeys.LOCAL_VARIABLES] return get_variable( name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, trainable=False, collections=collections, caching_device=caching_device, partitioner=partitioner, validate_shape=validate_shape, use_resource=use_resource, synchronization=synchronization, aggregation=aggregation, custom_getter=custom_getter, constraint=constraint) get_local_variable.__doc__ = get_variable_or_local_docstring % ( "Gets an existing *local* variable or creates a new one.", "Behavior is the same as in `get_variable`, except that variables are\n" "added to the `LOCAL_VARIABLES` collection and `trainable` is set to\n" "`False`.\n", "", "GraphKeys.LOCAL_VARIABLES") def _get_partitioned_variable(name, shape=None, dtype=None, initializer=None, regularizer=None, trainable=True, collections=None, caching_device=None, partitioner=None, validate_shape=True, use_resource=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): """Gets or creates a sharded variable list with these parameters. The `partitioner` must be a callable that accepts a fully defined `TensorShape` and returns a sequence of integers (the `partitions`). These integers describe how to partition the given sharded `Variable` along the given dimension. That is, `partitions[1] = 3` means split the `Variable` into 3 shards along dimension 1. Currently, sharding along only one axis is supported. If the list of variables with the given name (prefix) is already stored, we return the stored variables. Otherwise, we create a new one. If initializer is `None` (the default), the default initializer passed in the constructor is used. If that one is `None` too, we use a new `glorot_uniform_initializer`. If initializer is a Tensor, we use it as a value and derive the shape from the initializer. If the initializer is a callable, then it will be called for each shard. Otherwise the initializer should match the shape of the entire sharded Variable, and it will be sliced accordingly for each shard. Some useful partitioners are available. See, e.g., `variable_axis_size_partitioner` and `min_max_variable_partitioner`. Args: name: The name of the new or existing variable. shape: Shape of the new or existing variable. dtype: Type of the new or existing variable (defaults to `DT_FLOAT`). initializer: Initializer for the variable if one is created. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection GraphKeys.REGULARIZATION_LOSSES and can be used for regularization. trainable: If `True` also add the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). collections: List of graph collections keys to add the Variable to. Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`). caching_device: Optional device string or function describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. partitioner: Optional callable that accepts a fully defined `TensorShape` and `dtype` of the Variable to be created, and returns a list of partitions for each axis (currently only one axis can be partitioned). validate_shape: If False, allows the variable to be initialized with a value of unknown shape. If True, the default, the shape of initial_value must be known. use_resource: If False, creates a regular Variable. If True, creates an experimental ResourceVariable instead which has well-defined semantics. Defaults to False (will later change to True). constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. Returns: A tuple `(shards, partitions)` where `shards` is the list of `Variable` shards and `partitions` is the output of the partitioner on the input shape. Raises: ValueError: when creating a new variable and shape is not declared, or when violating reuse during variable creation. Reuse is set inside `variable_scope`. """ # pylint: disable=protected-access scope = get_variable_scope() if scope.custom_getter is not None: raise ValueError( "Private access to _get_partitioned_variable is not allowed when " "a custom getter is set. Current custom getter: %s. " "It is likely that you're using create_partitioned_variables. " "If so, consider instead using get_variable with a non-empty " "partitioner parameter instead." % scope.custom_getter) return scope._get_partitioned_variable( _get_default_variable_store(), name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, trainable=trainable, collections=collections, caching_device=caching_device, partitioner=partitioner, validate_shape=validate_shape, use_resource=use_resource, constraint=constraint, synchronization=synchronization, aggregation=aggregation) # pylint: enable=protected-access # Named like a function for compatibility with the previous # @tf_contextlib.contextmanager definition. class _pure_variable_scope(object): # pylint: disable=invalid-name """A context for the variable_scope, see `variable_scope` for docs.""" def __init__(self, name_or_scope, reuse=None, initializer=None, regularizer=None, caching_device=None, partitioner=None, custom_getter=None, old_name_scope=None, dtype=dtypes.float32, use_resource=None, constraint=None): """Creates a context for the variable_scope, see `variable_scope` for docs. Note: this does not create a name scope. Args: name_or_scope: `string` or `VariableScope`: the scope to open. reuse: `True` or None, or tf.compat.v1.AUTO_REUSE; if `None`, we inherit the parent scope's reuse flag. initializer: default initializer for variables within this scope. regularizer: default regularizer for variables within this scope. caching_device: default caching device for variables within this scope. partitioner: default partitioner for variables within this scope. custom_getter: default custom getter for variables within this scope. old_name_scope: the original name scope when re-entering a variable scope. dtype: type of the variables within this scope (defaults to `DT_FLOAT`). use_resource: If False, variables in this scope will be regular Variables. If True, experimental ResourceVariables will be creates instead, with well-defined semantics. Defaults to False (will later change to True). constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. """ self._name_or_scope = name_or_scope self._reuse = reuse self._initializer = initializer self._regularizer = regularizer self._caching_device = caching_device self._partitioner = partitioner self._custom_getter = custom_getter self._old_name_scope = old_name_scope self._dtype = dtype self._use_resource = use_resource self._constraint = constraint self._var_store = _get_default_variable_store() self._var_scope_store = get_variable_scope_store() self._last_variable_scope_object = None if isinstance(self._name_or_scope, VariableScope): self._new_name = self._name_or_scope.name name_scope = self._name_or_scope._name_scope # pylint: disable=protected-access # Handler for the case when we jump to a shared scope. We create a new # VariableScope (self._var_scope_object) that contains a copy of the # provided shared scope, possibly with changed reuse and initializer, if # the user requested this. variable_scope_object = VariableScope( self._name_or_scope.reuse if not self._reuse else self._reuse, name=self._new_name, initializer=self._name_or_scope.initializer, regularizer=self._name_or_scope.regularizer, caching_device=self._name_or_scope.caching_device, partitioner=self._name_or_scope.partitioner, dtype=self._name_or_scope.dtype, custom_getter=self._name_or_scope.custom_getter, name_scope=name_scope, use_resource=self._name_or_scope.use_resource, constraint=self._constraint) if self._initializer is not None: variable_scope_object.set_initializer(self._initializer) if self._regularizer is not None: variable_scope_object.set_regularizer(self._regularizer) if self._caching_device is not None: variable_scope_object.set_caching_device(self._caching_device) if self._partitioner is not None: variable_scope_object.set_partitioner(self._partitioner) if self._custom_getter is not None: variable_scope_object.set_custom_getter( _maybe_wrap_custom_getter(self._custom_getter, self._name_or_scope.custom_getter)) if self._dtype is not None: variable_scope_object.set_dtype(self._dtype) if self._use_resource is not None: variable_scope_object.set_use_resource(self._use_resource) self._cached_variable_scope_object = variable_scope_object def __enter__(self): """Begins the scope block. Returns: A VariableScope. Raises: ValueError: when trying to reuse within a create scope, or create within a reuse scope, or if reuse is not `None` or `True`. TypeError: when the types of some arguments are not appropriate. """ self._old = self._var_scope_store.current_scope if isinstance(self._name_or_scope, VariableScope): self._var_scope_store.open_variable_scope(self._new_name) self._old_subscopes = copy.copy( self._var_scope_store.variable_scopes_count) variable_scope_object = self._cached_variable_scope_object else: # Handler for the case when we just prolong current variable scope. # VariableScope with name extended by the provided one, and inherited # reuse and initializer (except if the user provided values to set). self._new_name = ( self._old.name + "/" + self._name_or_scope if self._old.name else self._name_or_scope) self._reuse = (self._reuse or self._old.reuse) # Re-using is inherited by sub-scopes. if self._old_name_scope is None: name_scope = self._name_or_scope else: name_scope = self._old_name_scope variable_scope_object = VariableScope( self._reuse, name=self._new_name, initializer=self._old.initializer, regularizer=self._old.regularizer, caching_device=self._old.caching_device, partitioner=self._old.partitioner, dtype=self._old.dtype, use_resource=self._old.use_resource, custom_getter=self._old.custom_getter, name_scope=name_scope, constraint=self._constraint) if self._initializer is not None: variable_scope_object.set_initializer(self._initializer) if self._regularizer is not None: variable_scope_object.set_regularizer(self._regularizer) if self._caching_device is not None: variable_scope_object.set_caching_device(self._caching_device) if self._partitioner is not None: variable_scope_object.set_partitioner(self._partitioner) if self._custom_getter is not None: variable_scope_object.set_custom_getter( _maybe_wrap_custom_getter(self._custom_getter, self._old.custom_getter)) if self._dtype is not None: variable_scope_object.set_dtype(self._dtype) if self._use_resource is not None: variable_scope_object.set_use_resource(self._use_resource) self._var_scope_store.open_variable_scope(self._new_name) self._var_scope_store.current_scope = variable_scope_object self._last_variable_scope_object = variable_scope_object return variable_scope_object def __exit__(self, type_arg, value_arg, traceback_arg): if (self._var_scope_store.current_scope is not self._last_variable_scope_object): raise RuntimeError("Improper nesting of variable_scope.") # If jumping out from a non-prolonged scope, restore counts. if isinstance(self._name_or_scope, VariableScope): self._var_scope_store.variable_scopes_count = self._old_subscopes else: self._var_scope_store.close_variable_subscopes(self._new_name) self._var_scope_store.current_scope = self._old def _maybe_wrap_custom_getter(custom_getter, old_getter): """Wrap a call to a custom_getter to use the old_getter internally.""" if old_getter is None: return custom_getter # The new custom_getter should call the old one def wrapped_custom_getter(getter, *args, **kwargs): # Call: # custom_getter( # lambda: old_getter(true_getter, ...), *args, **kwargs) # which means custom_getter will call old_getter, which # will call the true_getter, perform any intermediate # processing, and return the results to the current # getter, which will also perform additional processing. return custom_getter(functools.partial(old_getter, getter), *args, **kwargs) return wrapped_custom_getter def _get_unique_variable_scope(prefix): """Get a name with the given prefix unique in the current variable scope.""" var_scope_store = get_variable_scope_store() current_scope = get_variable_scope() name = current_scope.name + "/" + prefix if current_scope.name else prefix if var_scope_store.variable_scope_count(name) == 0: return prefix idx = 1 while var_scope_store.variable_scope_count(name + ("_%d" % idx)) > 0: idx += 1 return prefix + ("_%d" % idx) # Named like a function for backwards compatibility with the # @tf_contextlib.contextmanager version, which was switched to a class to avoid # some object creation overhead. @tf_export(v1=["variable_scope"]) # pylint: disable=invalid-name class variable_scope(object): """A context manager for defining ops that creates variables (layers). This context manager validates that the (optional) `values` are from the same graph, ensures that graph is the default graph, and pushes a name scope and a variable scope. If `name_or_scope` is not None, it is used as is. If `name_or_scope` is None, then `default_name` is used. In that case, if the same name has been previously used in the same scope, it will be made unique by appending `_N` to it. Variable scope allows you to create new variables and to share already created ones while providing checks to not create or share by accident. For details, see the [Variable Scope How To](https://tensorflow.org/guide/variables), here we present only a few basic examples. Simple example of how to create a new variable: ```python with tf.compat.v1.variable_scope("foo"): with tf.compat.v1.variable_scope("bar"): v = tf.compat.v1.get_variable("v", [1]) assert v.name == "foo/bar/v:0" ``` Simple example of how to reenter a premade variable scope safely: ```python with tf.compat.v1.variable_scope("foo") as vs: pass # Re-enter the variable scope. with tf.compat.v1.variable_scope(vs, auxiliary_name_scope=False) as vs1: # Restore the original name_scope. with tf.name_scope(vs1.original_name_scope): v = tf.compat.v1.get_variable("v", [1]) assert v.name == "foo/v:0" c = tf.constant([1], name="c") assert c.name == "foo/c:0" ``` Basic example of sharing a variable AUTO_REUSE: ```python def foo(): with tf.compat.v1.variable_scope("foo", reuse=tf.compat.v1.AUTO_REUSE): v = tf.compat.v1.get_variable("v", [1]) return v v1 = foo() # Creates v. v2 = foo() # Gets the same, existing v. assert v1 == v2 ``` Basic example of sharing a variable with reuse=True: ```python with tf.compat.v1.variable_scope("foo"): v = tf.compat.v1.get_variable("v", [1]) with tf.compat.v1.variable_scope("foo", reuse=True): v1 = tf.compat.v1.get_variable("v", [1]) assert v1 == v ``` Sharing a variable by capturing a scope and setting reuse: ```python with tf.compat.v1.variable_scope("foo") as scope: v = tf.compat.v1.get_variable("v", [1]) scope.reuse_variables() v1 = tf.compat.v1.get_variable("v", [1]) assert v1 == v ``` To prevent accidental sharing of variables, we raise an exception when getting an existing variable in a non-reusing scope. ```python with tf.compat.v1.variable_scope("foo"): v = tf.compat.v1.get_variable("v", [1]) v1 = tf.compat.v1.get_variable("v", [1]) # Raises ValueError("... v already exists ..."). ``` Similarly, we raise an exception when trying to get a variable that does not exist in reuse mode. ```python with tf.compat.v1.variable_scope("foo", reuse=True): v = tf.compat.v1.get_variable("v", [1]) # Raises ValueError("... v does not exists ..."). ``` Note that the `reuse` flag is inherited: if we open a reusing scope, then all its sub-scopes become reusing as well. A note about name scoping: Setting `reuse` does not impact the naming of other ops such as mult. See related discussion on [github#6189](https://github.com/tensorflow/tensorflow/issues/6189) Note that up to and including version 1.0, it was allowed (though explicitly discouraged) to pass False to the reuse argument, yielding undocumented behaviour slightly different from None. Starting at 1.1.0 passing None and False as reuse has exactly the same effect. A note about using variable scopes in multi-threaded environment: Variable scopes are thread local, so one thread will not see another thread's current scope. Also, when using `default_name`, unique scopes names are also generated only on a per thread basis. If the same name was used within a different thread, that doesn't prevent a new thread from creating the same scope. However, the underlying variable store is shared across threads (within the same graph). As such, if another thread tries to create a new variable with the same name as a variable created by a previous thread, it will fail unless reuse is True. Further, each thread starts with an empty variable scope. So if you wish to preserve name prefixes from a scope from the main thread, you should capture the main thread's scope and re-enter it in each thread. For e.g. ``` main_thread_scope = variable_scope.get_variable_scope() # Thread's target function: def thread_target_fn(captured_scope): with variable_scope.variable_scope(captured_scope): # .... regular code for this thread thread = threading.Thread(target=thread_target_fn, args=(main_thread_scope,)) ``` """ def __init__(self, name_or_scope, default_name=None, values=None, initializer=None, regularizer=None, caching_device=None, partitioner=None, custom_getter=None, reuse=None, dtype=None, use_resource=None, constraint=None, auxiliary_name_scope=True): """Initialize the context manager. Args: name_or_scope: `string` or `VariableScope`: the scope to open. default_name: The default name to use if the `name_or_scope` argument is `None`, this name will be uniquified. If name_or_scope is provided it won't be used and therefore it is not required and can be None. values: The list of `Tensor` arguments that are passed to the op function. initializer: default initializer for variables within this scope. regularizer: default regularizer for variables within this scope. caching_device: default caching device for variables within this scope. partitioner: default partitioner for variables within this scope. custom_getter: default custom getter for variables within this scope. reuse: `True`, None, or tf.compat.v1.AUTO_REUSE; if `True`, we go into reuse mode for this scope as well as all sub-scopes; if tf.compat.v1.AUTO_REUSE, we create variables if they do not exist, and return them otherwise; if None, we inherit the parent scope's reuse flag. When eager execution is enabled, new variables are always created unless an EagerVariableStore or template is currently active. dtype: type of variables created in this scope (defaults to the type in the passed scope, or inherited from parent scope). use_resource: If False, all variables will be regular Variables. If True, experimental ResourceVariables with well-defined semantics will be used instead. Defaults to False (will later change to True). When eager execution is enabled this argument is always forced to be True. constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. auxiliary_name_scope: If `True`, we create an auxiliary name scope with the scope. If `False`, we don't create it. Note that the argument is not inherited, and it only takes effect for once when creating. You should only use it for re-entering a premade variable scope. Returns: A scope that can be captured and reused. Raises: ValueError: when trying to reuse within a create scope, or create within a reuse scope. TypeError: when the types of some arguments are not appropriate. """ self._name_or_scope = name_or_scope self._default_name = default_name self._values = values self._initializer = initializer self._regularizer = regularizer self._caching_device = caching_device self._partitioner = partitioner self._custom_getter = custom_getter self._reuse = reuse self._dtype = dtype self._use_resource = use_resource self._constraint = constraint if self._default_name is None and self._name_or_scope is None: raise TypeError("If default_name is None then name_or_scope is required") if self._reuse is False: # We don't allow non-inheriting scopes, False = None here. self._reuse = None if not (self._reuse is True or self._reuse is None or self._reuse is AUTO_REUSE): raise ValueError("The reuse parameter must be True or False or None.") if self._values is None: self._values = [] self._in_graph_mode = not context.executing_eagerly() if self._in_graph_mode: self._graph = ops._get_graph_from_inputs(self._values) # pylint: disable=protected-access self._cached_pure_variable_scope = None self._current_name_scope = None if not isinstance(auxiliary_name_scope, bool): raise TypeError("The auxiliary_name_scope must be `True` or `False`, " "while get {}".format(auxiliary_name_scope)) self._auxiliary_name_scope = auxiliary_name_scope def __enter__(self): # If the default graph is building a function, then we should not replace it # with the cached graph. if ops.get_default_graph().building_function: self._building_function = True else: self._building_function = False if self._in_graph_mode and not self._building_function: self._graph_context_manager = self._graph.as_default() self._graph_context_manager.__enter__() if self._cached_pure_variable_scope is not None: # Fast path for re-entering variable_scopes. We've held on to the pure # variable scope from a previous successful __enter__, so we avoid some # overhead by re-using that object. if self._current_name_scope is not None: self._current_name_scope.__enter__() return self._cached_pure_variable_scope.__enter__() try: return self._enter_scope_uncached() finally: if (self._in_graph_mode and not self._building_function and self._graph_context_manager is not None): self._graph_context_manager.__exit__(*sys.exc_info()) def _enter_scope_uncached(self): """Enters the context manager when there is no cached scope yet. Returns: The entered variable scope. Raises: TypeError: A wrong type is passed as `scope` at __init__(). ValueError: `reuse` is incorrectly set at __init__(). """ if self._auxiliary_name_scope: # Create a new name scope later current_name_scope = None else: # Reenter the current name scope name_scope = ops.get_name_scope() if name_scope: # Hack to reenter name_scope += "/" current_name_scope = ops.name_scope(name_scope) else: # Root scope current_name_scope = ops.name_scope(name_scope) # IMPORTANT: Only assign to self._cached_pure_variable_scope and # self._current_name_scope after successful __enter__() calls. if self._name_or_scope is not None: if not isinstance(self._name_or_scope, (VariableScope,) + six.string_types): raise TypeError("VariableScope: name_or_scope must be a string or " "VariableScope.") if isinstance(self._name_or_scope, six.string_types): name_scope = self._name_or_scope else: name_scope = self._name_or_scope.name.split("/")[-1] if name_scope or current_name_scope: current_name_scope = current_name_scope or ops.name_scope(name_scope) try: current_name_scope_name = current_name_scope.__enter__() except: current_name_scope.__exit__(*sys.exc_info()) raise self._current_name_scope = current_name_scope if isinstance(self._name_or_scope, six.string_types): old_name_scope = current_name_scope_name else: old_name_scope = self._name_or_scope.original_name_scope pure_variable_scope = _pure_variable_scope( self._name_or_scope, reuse=self._reuse, initializer=self._initializer, regularizer=self._regularizer, caching_device=self._caching_device, partitioner=self._partitioner, custom_getter=self._custom_getter, old_name_scope=old_name_scope, dtype=self._dtype, use_resource=self._use_resource, constraint=self._constraint) try: entered_pure_variable_scope = pure_variable_scope.__enter__() except: pure_variable_scope.__exit__(*sys.exc_info()) raise self._cached_pure_variable_scope = pure_variable_scope return entered_pure_variable_scope else: self._current_name_scope = None # This can only happen if someone is entering the root variable scope. pure_variable_scope = _pure_variable_scope( self._name_or_scope, reuse=self._reuse, initializer=self._initializer, regularizer=self._regularizer, caching_device=self._caching_device, partitioner=self._partitioner, custom_getter=self._custom_getter, dtype=self._dtype, use_resource=self._use_resource, constraint=self._constraint) try: entered_pure_variable_scope = pure_variable_scope.__enter__() except: pure_variable_scope.__exit__(*sys.exc_info()) raise self._cached_pure_variable_scope = pure_variable_scope return entered_pure_variable_scope else: # Here name_or_scope is None. Using default name, but made unique. if self._reuse: raise ValueError("reuse=True cannot be used without a name_or_scope") current_name_scope = current_name_scope or ops.name_scope( self._default_name) try: current_name_scope_name = current_name_scope.__enter__() except: current_name_scope.__exit__(*sys.exc_info()) raise self._current_name_scope = current_name_scope unique_default_name = _get_unique_variable_scope(self._default_name) pure_variable_scope = _pure_variable_scope( unique_default_name, initializer=self._initializer, regularizer=self._regularizer, caching_device=self._caching_device, partitioner=self._partitioner, custom_getter=self._custom_getter, old_name_scope=current_name_scope_name, dtype=self._dtype, use_resource=self._use_resource, constraint=self._constraint) try: entered_pure_variable_scope = pure_variable_scope.__enter__() except: pure_variable_scope.__exit__(*sys.exc_info()) raise self._cached_pure_variable_scope = pure_variable_scope return entered_pure_variable_scope def __exit__(self, type_arg, value_arg, traceback_arg): self._cached_pure_variable_scope.__exit__(type_arg, value_arg, traceback_arg) if self._current_name_scope: self._current_name_scope.__exit__(type_arg, value_arg, traceback_arg) if self._in_graph_mode and not self._building_function: self._graph_context_manager.__exit__(type_arg, value_arg, traceback_arg) # pylint: disable=g-doc-return-or-yield @tf_export(v1=["variable_op_scope"]) @tf_contextlib.contextmanager def variable_op_scope(values, name_or_scope, default_name=None, initializer=None, regularizer=None, caching_device=None, partitioner=None, custom_getter=None, reuse=None, dtype=None, use_resource=None, constraint=None): """Deprecated: context manager for defining an op that creates variables.""" logging.warn("tf.variable_op_scope(values, name, default_name) is deprecated," " use tf.variable_scope(name, default_name, values)") with variable_scope( name_or_scope, default_name=default_name, values=values, initializer=initializer, regularizer=regularizer, caching_device=caching_device, partitioner=partitioner, custom_getter=custom_getter, reuse=reuse, dtype=dtype, use_resource=use_resource, constraint=constraint) as scope: yield scope def _call_partitioner(partitioner, shape, dtype): """Call partitioner validating its inputs/output. Args: partitioner: a function mapping `Tensor` shape and dtype to a list of partitions. shape: shape of the `Tensor` to partition, must have at least two dimensions. dtype: dtype of the elements in the `Tensor`. Returns: A list with elements >=1 and exactly one >1. The index of that element corresponds to the partitioning axis. """ if not shape.is_fully_defined(): raise ValueError("Shape of a new partitioned variable must be " "fully defined, but instead was %s." % (shape,)) if shape.ndims < 1: raise ValueError("A partitioned Variable must have rank at least 1, " "shape: %s" % shape) slicing = partitioner(shape=shape, dtype=dtype) if not isinstance(slicing, collections_lib.Sequence): raise ValueError("Partitioner must return a sequence, but saw: %s" % slicing) if len(slicing) != shape.ndims: raise ValueError( "Partitioner returned a partition list that does not match the " "Variable's rank: %s vs. %s" % (slicing, shape)) if any(p < 1 for p in slicing): raise ValueError("Partitioner returned zero partitions for some axes: %s" % slicing) if sum(p > 1 for p in slicing) > 1: raise ValueError("Can only slice a variable along one dimension: " "shape: %s, partitioning: %s" % (shape, slicing)) return slicing # TODO(slebedev): could be inlined, but # `_VariableStore._get_partitioned_variable` is too complex even # without this logic. def _get_slice_dim_and_num_slices(slicing): """Get slicing dimension and number of slices from the partitioner output.""" for slice_dim, num_slices in enumerate(slicing): if num_slices > 1: break else: # Degenerate case: no partitioning applied. slice_dim = 0 num_slices = 1 return slice_dim, num_slices def _iter_slices(full_shape, num_slices, slice_dim): """Slices a given a shape along the specified dimension.""" num_slices_with_excess = full_shape[slice_dim] % num_slices offset = [0] * len(full_shape) min_slice_len = full_shape[slice_dim] // num_slices for i in xrange(num_slices): shape = full_shape[:] shape[slice_dim] = min_slice_len + bool(i < num_slices_with_excess) yield offset[:], shape offset[slice_dim] += shape[slice_dim] def default_variable_creator(next_creator=None, **kwargs): """Default variable creator.""" assert next_creator is None initial_value = kwargs.get("initial_value", None) trainable = kwargs.get("trainable", None) collections = kwargs.get("collections", None) validate_shape = kwargs.get("validate_shape", True) caching_device = kwargs.get("caching_device", None) name = kwargs.get("name", None) variable_def = kwargs.get("variable_def", None) dtype = kwargs.get("dtype", None) expected_shape = kwargs.get("expected_shape", None) import_scope = kwargs.get("import_scope", None) constraint = kwargs.get("constraint", None) use_resource = kwargs.get("use_resource", None) synchronization = kwargs.get("synchronization", None) aggregation = kwargs.get("aggregation", None) shape = kwargs.get("shape", None) if use_resource is None: use_resource = get_variable_scope().use_resource if use_resource is None: use_resource = _DEFAULT_USE_RESOURCE use_resource = use_resource or context.executing_eagerly() if use_resource: distribute_strategy = kwargs.get("distribute_strategy", None) return resource_variable_ops.ResourceVariable( initial_value=initial_value, trainable=trainable, collections=collections, validate_shape=validate_shape, caching_device=caching_device, name=name, dtype=dtype, constraint=constraint, variable_def=variable_def, import_scope=import_scope, distribute_strategy=distribute_strategy, synchronization=synchronization, aggregation=aggregation, shape=shape) else: return variables.RefVariable( initial_value=initial_value, trainable=trainable, collections=collections, validate_shape=validate_shape, caching_device=caching_device, name=name, dtype=dtype, constraint=constraint, variable_def=variable_def, expected_shape=expected_shape, import_scope=import_scope, synchronization=synchronization, aggregation=aggregation, shape=shape) def default_variable_creator_v2(next_creator=None, **kwargs): """Default variable creator.""" assert next_creator is None initial_value = kwargs.get("initial_value", None) trainable = kwargs.get("trainable", None) validate_shape = kwargs.get("validate_shape", True) caching_device = kwargs.get("caching_device", None) name = kwargs.get("name", None) variable_def = kwargs.get("variable_def", None) dtype = kwargs.get("dtype", None) import_scope = kwargs.get("import_scope", None) constraint = kwargs.get("constraint", None) distribute_strategy = kwargs.get("distribute_strategy", None) synchronization = kwargs.get("synchronization", None) aggregation = kwargs.get("aggregation", None) shape = kwargs.get("shape", None) return resource_variable_ops.ResourceVariable( initial_value=initial_value, trainable=trainable, validate_shape=validate_shape, caching_device=caching_device, name=name, dtype=dtype, constraint=constraint, variable_def=variable_def, import_scope=import_scope, distribute_strategy=distribute_strategy, synchronization=synchronization, aggregation=aggregation, shape=shape) variables.default_variable_creator = default_variable_creator variables.default_variable_creator_v2 = default_variable_creator_v2 def _make_getter(captured_getter, captured_previous): """Gets around capturing loop variables in python being broken.""" return lambda **kwargs: captured_getter(captured_previous, **kwargs) # TODO(apassos) remove forwarding symbol variable = variables.VariableV1 @tf_export(v1=["variable_creator_scope"]) @tf_contextlib.contextmanager def variable_creator_scope_v1(variable_creator): """Scope which defines a variable creation function to be used by variable(). variable_creator is expected to be a function with the following signature: ``` def variable_creator(next_creator, **kwargs) ``` The creator is supposed to eventually call the next_creator to create a variable if it does want to create a variable and not call Variable or ResourceVariable directly. This helps make creators composable. A creator may choose to create multiple variables, return already existing variables, or simply register that a variable was created and defer to the next creators in line. Creators can also modify the keyword arguments seen by the next creators. Custom getters in the variable scope will eventually resolve down to these custom creators when they do create variables. The valid keyword arguments in kwds are: initial_value: A `Tensor`, or Python object convertible to a `Tensor`, which is the initial value for the Variable. The initial value must have a shape specified unless `validate_shape` is set to False. Can also be a callable with no argument that returns the initial value when called. In that case, `dtype` must be specified. (Note that initializer functions from init_ops.py must first be bound to a shape before being used here.) trainable: If `True`, the default, also adds the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as the default list of variables to use by the `Optimizer` classes. `trainable` defaults to `True`, unless `synchronization` is set to `ON_READ`, in which case it defaults to `False`. collections: List of graph collections keys. The new variable is added to these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`. validate_shape: If `False`, allows the variable to be initialized with a value of unknown shape. If `True`, the default, the shape of `initial_value` must be known. caching_device: Optional device string describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. name: Optional name for the variable. Defaults to `'Variable'` and gets uniquified automatically. dtype: If set, initial_value will be converted to the given type. If `None`, either the datatype will be kept (if `initial_value` is a Tensor), or `convert_to_tensor` will decide. constraint: A constraint function to be applied to the variable after updates by some algorithms. use_resource: if True, a ResourceVariable is always created. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. This set may grow over time, so it's important the signature of creators is as mentioned above. Args: variable_creator: the passed creator Yields: A scope in which the creator is active """ with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access yield # Note: only the docstrings differ between this and v1. @tf_export("variable_creator_scope", v1=[]) @tf_contextlib.contextmanager def variable_creator_scope(variable_creator): """Scope which defines a variable creation function to be used by variable(). variable_creator is expected to be a function with the following signature: ``` def variable_creator(next_creator, **kwargs) ``` The creator is supposed to eventually call the next_creator to create a variable if it does want to create a variable and not call Variable or ResourceVariable directly. This helps make creators composable. A creator may choose to create multiple variables, return already existing variables, or simply register that a variable was created and defer to the next creators in line. Creators can also modify the keyword arguments seen by the next creators. Custom getters in the variable scope will eventually resolve down to these custom creators when they do create variables. The valid keyword arguments in kwds are: initial_value: A `Tensor`, or Python object convertible to a `Tensor`, which is the initial value for the Variable. The initial value must have a shape specified unless `validate_shape` is set to False. Can also be a callable with no argument that returns the initial value when called. In that case, `dtype` must be specified. (Note that initializer functions from init_ops.py must first be bound to a shape before being used here.) trainable: If `True`, the default, GradientTapes automatically watch uses of this Variable. validate_shape: If `False`, allows the variable to be initialized with a value of unknown shape. If `True`, the default, the shape of `initial_value` must be known. caching_device: Optional device string describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. name: Optional name for the variable. Defaults to `'Variable'` and gets uniquified automatically. dtype: If set, initial_value will be converted to the given type. If `None`, either the datatype will be kept (if `initial_value` is a Tensor), or `convert_to_tensor` will decide. constraint: A constraint function to be applied to the variable after updates by some algorithms. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. This set may grow over time, so it's important the signature of creators is as mentioned above. Args: variable_creator: the passed creator Yields: A scope in which the creator is active """ with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access yield
tensorflow-master
tensorflow/python/ops/variable_scope.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ops to manipulate lists of tensors.""" # pylint: disable=g-bad-name from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_list_ops # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.gen_list_ops import * # pylint: enable=wildcard-import from tensorflow.python.util.lazy_loader import LazyLoader # list_ops -> control_flow_ops -> tensor_array_ops -> list_ops control_flow_ops = LazyLoader( "control_flow_ops", globals(), "tensorflow.python.ops.control_flow_ops") ops.NotDifferentiable("TensorListConcatLists") ops.NotDifferentiable("TensorListElementShape") ops.NotDifferentiable("TensorListLength") ops.NotDifferentiable("TensorListPushBackBatch") def empty_tensor_list(element_shape, element_dtype, max_num_elements=None, name=None): if max_num_elements is None: max_num_elements = -1 return gen_list_ops.empty_tensor_list( element_shape=_build_element_shape(element_shape), element_dtype=element_dtype, max_num_elements=max_num_elements, name=name) def tensor_list_reserve(element_shape, num_elements, element_dtype, name=None): return gen_list_ops.tensor_list_reserve( element_shape=_build_element_shape(element_shape), num_elements=num_elements, element_dtype=element_dtype, name=name) def tensor_list_from_tensor(tensor, element_shape, name=None): return gen_list_ops.tensor_list_from_tensor( tensor=tensor, element_shape=_build_element_shape(element_shape), name=name) def tensor_list_get_item(input_handle, index, element_dtype, element_shape=None, name=None): return gen_list_ops.tensor_list_get_item( input_handle=input_handle, index=index, element_shape=_build_element_shape(element_shape), element_dtype=element_dtype, name=name) def tensor_list_pop_back(input_handle, element_dtype, name=None): return gen_list_ops.tensor_list_pop_back( input_handle=input_handle, element_shape=-1, element_dtype=element_dtype, name=name) def tensor_list_gather(input_handle, indices, element_dtype, element_shape=None, name=None): return gen_list_ops.tensor_list_gather( input_handle=input_handle, indices=indices, element_shape=_build_element_shape(element_shape), element_dtype=element_dtype, name=name) def tensor_list_scatter(tensor, indices, element_shape=None, input_handle=None, name=None): if input_handle is not None: return gen_list_ops.tensor_list_scatter_into_existing_list( input_handle=input_handle, tensor=tensor, indices=indices, name=name) else: return gen_list_ops.tensor_list_scatter_v2( tensor=tensor, indices=indices, element_shape=_build_element_shape(element_shape), num_elements=-1, name=name) def tensor_list_stack(input_handle, element_dtype, num_elements=-1, element_shape=None, name=None): return gen_list_ops.tensor_list_stack( input_handle=input_handle, element_shape=_build_element_shape(element_shape), element_dtype=element_dtype, num_elements=num_elements, name=name) def tensor_list_concat(input_handle, element_dtype, element_shape=None, name=None): # Ignore the lengths output of TensorListConcat. It is only used during # gradient computation. return gen_list_ops.tensor_list_concat_v2( input_handle=input_handle, element_dtype=element_dtype, element_shape=_build_element_shape(element_shape), leading_dims=ops.convert_to_tensor([], dtype=dtypes.int64), name=name)[0] def tensor_list_split(tensor, element_shape, lengths, name=None): return gen_list_ops.tensor_list_split( tensor=tensor, element_shape=_build_element_shape(element_shape), lengths=lengths, name=name) def tensor_list_set_item(input_handle, index, item, resize_if_index_out_of_bounds=False, name=None): """Sets `item` at `index` in input list.""" if resize_if_index_out_of_bounds: input_list_size = gen_list_ops.tensor_list_length(input_handle) # TODO(srbs): This could cause some slowdown. Consider fusing resize # functionality in the SetItem op. input_handle = control_flow_ops.cond( index >= input_list_size, lambda: gen_list_ops.tensor_list_resize( # pylint: disable=g-long-lambda input_handle, index + 1), lambda: input_handle) return gen_list_ops.tensor_list_set_item( input_handle=input_handle, index=index, item=item, name=name) @ops.RegisterGradient("TensorListPushBack") def _PushBackGrad(op, dresult): return gen_list_ops.tensor_list_pop_back( dresult, element_shape=array_ops.shape(op.inputs[1]), element_dtype=op.get_attr("element_dtype")) @ops.RegisterGradient("TensorListPopBack") def _PopBackGrad(op, dlist, delement): if dlist is None: dlist = empty_tensor_list( element_dtype=delement.dtype, element_shape=gen_list_ops.tensor_list_element_shape( op.outputs[0], shape_type=dtypes.int32)) return gen_list_ops.tensor_list_push_back(dlist, delement), None @ops.RegisterGradient("TensorListStack") def _TensorListStackGrad(unused_op, dtensor): return tensor_list_from_tensor(dtensor, element_shape=dtensor.shape[1:]), None @ops.RegisterGradient("TensorListConcat") @ops.RegisterGradient("TensorListConcatV2") def _TensorListConcatGrad(op, dtensor, unused_dlengths): """Gradient function for TensorListConcat.""" dlist = tensor_list_split( dtensor, element_shape=gen_list_ops.tensor_list_element_shape( op.inputs[0], shape_type=dtypes.int32), lengths=op.outputs[1]) if op.type == "TensorListConcatV2": return dlist, None, None else: return dlist @ops.RegisterGradient("TensorListSplit") def _TensorListSplitGrad(op, dlist): tensor, _, lengths = op.inputs element_shape = array_ops.slice(array_ops.shape(tensor), [1], [-1]) element_shape = array_ops.concat([[-1], element_shape], axis=0) return gen_list_ops.tensor_list_concat_v2( dlist, element_shape=element_shape, leading_dims=lengths, element_dtype=op.inputs[0].dtype)[0], None, None @ops.RegisterGradient("TensorListFromTensor") def _TensorListFromTensorGrad(op, dlist): """Gradient for TensorListFromTensor.""" t = op.inputs[0] if t.shape.dims and t.shape.dims[0].value is not None: num_elements = t.shape.dims[0].value else: num_elements = None if dlist is None: dlist = empty_tensor_list( element_dtype=t.dtype, element_shape=gen_list_ops.tensor_list_element_shape( op.outputs[0], shape_type=dtypes.int32)) tensor_grad = gen_list_ops.tensor_list_stack( dlist, element_shape=array_ops.slice(array_ops.shape(t), [1], [-1]), element_dtype=t.dtype, num_elements=num_elements) shape_grad = None return tensor_grad, shape_grad @ops.RegisterGradient("TensorListGetItem") def _TensorListGetItemGrad(op, ditem): """Gradient for TensorListGetItem.""" list_size = gen_list_ops.tensor_list_length(op.inputs[0]) list_grad = gen_list_ops.tensor_list_set_item( gen_list_ops.tensor_list_reserve( gen_list_ops.tensor_list_element_shape(op.inputs[0], shape_type=dtypes.int32), list_size, element_dtype=ditem.dtype), index=op.inputs[1], item=ditem) index_grad = None element_shape_grad = None return list_grad, index_grad, element_shape_grad @ops.RegisterGradient("TensorListSetItem") def _TensorListSetItemGrad(op, dlist): """Gradient function for TensorListSetItem.""" _, index, item = op.inputs list_grad = gen_list_ops.tensor_list_set_item( dlist, index=index, item=array_ops.zeros_like(item)) index_grad = None element_grad = tensor_list_get_item( dlist, index, element_shape=array_ops.shape(item), element_dtype=item.dtype) return list_grad, index_grad, element_grad @ops.RegisterGradient("TensorListResize") def _TensorListResizeGrad(op, dlist): input_list, _ = op.inputs input_list_size = gen_list_ops.tensor_list_length(input_list) return gen_list_ops.tensor_list_resize(dlist, input_list_size), None @ops.RegisterGradient("TensorListGather") def _TensorListGatherGrad(op, dtensor): """Gradient function for TensorListGather.""" input_list, indices, _ = op.inputs element_shape = gen_list_ops.tensor_list_element_shape( input_list, shape_type=dtypes.int32) num_elements = gen_list_ops.tensor_list_length(input_list) dlist = tensor_list_reserve(element_shape, num_elements, dtensor.dtype) dlist = tensor_list_scatter( tensor=dtensor, indices=indices, input_handle=dlist) return dlist, None, None @ops.RegisterGradient("TensorListScatter") @ops.RegisterGradient("TensorListScatterV2") def _TensorListScatterGrad(op, dlist): """Gradient function for TensorListScatter.""" tensor = op.inputs[0] indices = op.inputs[1] dtensor = gen_list_ops.tensor_list_gather( dlist, indices, element_shape=array_ops.slice(array_ops.shape(tensor), [1], [-1]), element_dtype=tensor.dtype) if op.type == "TensorListScatterV2": return dtensor, None, None, None else: return dtensor, None, None @ops.RegisterGradient("TensorListScatterIntoExistingList") def _TensorListScatterIntoExistingListGrad(op, dlist): """Gradient function for TensorListScatterIntoExistingList.""" _, tensor, indices = op.inputs dtensor = gen_list_ops.tensor_list_gather( dlist, indices, element_shape=array_ops.slice(array_ops.shape(tensor), [1], [-1]), element_dtype=tensor.dtype) zeros = array_ops.zeros_like(tensor) dlist = tensor_list_scatter(zeros, indices, indices, input_handle=dlist) return dlist, dtensor, None def _build_element_shape(shape): """Converts shape to a format understood by list_ops for element_shape. If `shape` is already a `Tensor` it is returned as-is. We do not perform a type check here. If shape is None or a TensorShape with unknown rank, -1 is returned. If shape is a scalar, an int32 tensor with empty list is returned. Note we do directly return an empty list since ops.convert_to_tensor would conver it to a float32 which is not a valid type for element_shape. If shape is a sequence of dims, None's in the list are replaced with -1. We do not check the dtype of the other dims. Args: shape: Could be None, Tensor, TensorShape or a list of dims (each dim could be a None, scalar or Tensor). Returns: A None-free shape that can be converted to a tensor. """ if isinstance(shape, ops.Tensor): return shape if isinstance(shape, tensor_shape.TensorShape): # `TensorShape.as_list` requires rank to be known. shape = shape.as_list() if shape else None # Shape is unknown. if shape is None: return -1 # Shape is a scalar. if not shape: return ops.convert_to_tensor(shape, dtype=dtypes.int32) # Shape is a sequence of dimensions. Convert None dims to -1. def convert(val): if val is None: return -1 if isinstance(val, ops.Tensor): return val if isinstance(val, tensor_shape.Dimension): return val.value if val.value is not None else -1 return val return [convert(d) for d in shape]
tensorflow-master
tensorflow/python/ops/list_ops.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ops to use variables as resources.""" # pylint: disable=g-bad-name from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib import functools from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.framework import variable_pb2 from tensorflow.python import pywrap_tensorflow from tensorflow.python.eager import context from tensorflow.python.eager import tape from tensorflow.python.framework import constant_op from tensorflow.python.framework import cpp_shape_inference_pb2 from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_array_ops from tensorflow.python.ops import gen_logging_ops from tensorflow.python.ops import gen_resource_variable_ops from tensorflow.python.ops import gen_state_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variables # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.gen_resource_variable_ops import * # pylint: enable=wildcard-import from tensorflow.python.training.tracking import base as trackable from tensorflow.python.util import compat from tensorflow.python.util.deprecation import deprecated from tensorflow.python.util.deprecation import deprecated_args def get_resource_handle_data(graph_op): assert type(graph_op) == ops.Tensor # pylint: disable=unidiomatic-typecheck handle_data = pywrap_tensorflow.GetHandleShapeAndType( graph_op.graph._c_graph, graph_op._as_tf_output()) # pylint: disable=protected-access return cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData.FromString( compat.as_bytes(handle_data)) def get_eager_safe_handle_data(handle): """Get the data handle from the Tensor `handle`.""" assert isinstance(handle, ops.Tensor) if isinstance(handle, ops.EagerTensor): return handle._handle_data # pylint: disable=protected-access else: return get_resource_handle_data(handle) def _set_handle_shapes_and_types(tensor, handle_data, graph_mode): """Sets the shape inference result HandleData on tensor. Args: tensor: A `Tensor` or `EagerTensor`. handle_data: A `CppShapeInferenceResult.HandleData`. graph_mode: A python bool. """ tensor._handle_data = handle_data # pylint: disable=protected-access if not graph_mode: return # Not an EagerTensor, so a graph tensor. shapes, types = zip(*[(pair.shape, pair.dtype) for pair in handle_data.shape_and_type]) ranks = [len(s.dim) if not s.unknown_rank else -1 for s in shapes] shapes = [[d.size for d in s.dim] # pylint: disable=g-complex-comprehension if not s.unknown_rank else None for s in shapes] pywrap_tensorflow.TF_GraphSetOutputHandleShapesAndTypes_wrapper( tensor._op._graph._c_graph, # pylint: disable=protected-access tensor._as_tf_output(), # pylint: disable=protected-access shapes, ranks, types) def _combine_handle_data(handle, initial_value): """Concats HandleData from tensors `handle` and `initial_value`. Args: handle: A `Tensor` of dtype `resource`. initial_value: A `Tensor`. Returns: A `CppShapeInferenceResult.HandleData`. If `initial_value` has dtype `variant`, the `HandleData` contains the concatenation of the shape_and_type from both `handle` and `initial_value`. Raises: RuntimeError: If handle, which was returned by VarHandleOp, either has no handle data, or its len(handle_data.shape_and_type) != 1. """ assert handle.dtype == dtypes.resource variable_handle_data = get_eager_safe_handle_data(handle) if initial_value.dtype != dtypes.variant: return variable_handle_data extra_handle_data = get_eager_safe_handle_data(initial_value) if extra_handle_data is not None and extra_handle_data.is_set: if (variable_handle_data is None or not variable_handle_data.is_set or len(variable_handle_data.shape_and_type) != 1): raise RuntimeError( "Expected VarHandleOp to return a length==1 shape_and_type, " "but saw: '%s'" % (variable_handle_data,)) variable_handle_data.shape_and_type.extend( extra_handle_data.shape_and_type) return variable_handle_data def variable_handle_from_shape_and_dtype( shape, dtype, shared_name, name, graph_mode, extra_handle_data=None): """Create a new variable handle, optionally copying in `extra_handle_data`.""" container = ops.get_default_graph()._container # pylint: disable=protected-access if container is None: container = "" handle = gen_resource_variable_ops.var_handle_op(shape=shape, dtype=dtype, shared_name=shared_name, name=name, container=container) if extra_handle_data is None: extra_handle_data = handle if graph_mode: full_handle_data = _combine_handle_data(handle, extra_handle_data) _set_handle_shapes_and_types(handle, full_handle_data, graph_mode) return handle else: # We do not want two distinct ResourceVariable objects for the same # underlying resource in the runtime. # When in eager mode, explicitly ensure so here. When in graph mode, it's # ensured by always generating different variable names. exists = gen_resource_variable_ops.var_is_initialized_op(handle) # We create an assert Op instead of checking right away in order to be # compatible with ASYNC execution mode. Further, since not all devices # support string tensors, we encode the assertion string in the Op name gen_logging_ops._assert( # pylint: disable=protected-access math_ops.logical_not(exists), [exists], name="EagerVariableNameReuse") with context.graph_mode(), ops.Graph().as_default() as graph: h = gen_resource_variable_ops.var_handle_op(shape=shape, dtype=dtype, shared_name=shared_name, name=name, container=container) # Tensor._handle_data contains information for the shape-inference code to # know the shape and dtype of the variable pointed to by a handle. Since # shape inference doesn't run in eager mode we copy this data here for # when the handle is captured by an eager mode function. # pylint: disable=protected-access full_handle_data = _combine_handle_data(h, extra_handle_data) _set_handle_shapes_and_types(handle, full_handle_data, graph_mode) # pylint: enable=protected-access # Clean up op->graph->op reference cycles. ops.dismantle_graph(graph) return handle def eager_safe_variable_handle(initial_value, shape, shared_name, name, graph_mode): """Creates a variable handle with information to do shape inference. The dtype is read from `initial_value` and stored in the returned resource tensor's handle data. If `initial_value.dtype == tf.variant`, we additionally extract the handle data (if any) from `initial_value` and append it to the `handle_data`. In this case, the returned tensor's handle data is in the form ``` is_set: true shape_and_type { shape { // initial_value.shape } dtype: DT_VARIANT } shape_and_type { // handle_data(initial_value).shape_and_type[0] } shape_and_type { // handle_data(initial_value).shape_and_type[1] } ... ``` Ops that read from this tensor, such as `ReadVariableOp` and `AssignVariableOp`, know that `handle_data(handle).shape_and_type[1:]` correspond to the handle data of the variant(s) stored in the Variable. Args: initial_value: A `Tensor`. shape: The shape of the handle data. Can be `TensorShape(None)` (i.e. unknown shape). shared_name: A string. name: A string. graph_mode: A python bool. Returns: The handle, a `Tensor` of type `resource`. """ dtype = initial_value.dtype.base_dtype return variable_handle_from_shape_and_dtype( shape, dtype, shared_name, name, graph_mode, initial_value) @contextlib.contextmanager def _handle_graph(handle): # Note: might have an eager tensor but not be executing eagerly when building # functions. if (context.executing_eagerly() or isinstance(handle, ops.EagerTensor) or ops.has_default_graph()): yield else: with handle.graph.as_default(): yield class EagerResourceDeleter(object): """An object which cleans up a resource handle. An alternative to defining a __del__ method on an object. The intended use is that ResourceVariables or other objects with resource handles will maintain a single reference to this object. When the parent object is collected, this object will be too. Even if the parent object is part of a reference cycle, the cycle will be collectable. """ def __init__(self, handle, handle_device): if not isinstance(handle, ops.Tensor): raise ValueError( ("Passed handle=%s to EagerResourceDeleter. Was expecting a handle " "Tensor." % (handle,))) self._handle = handle self._handle_device = handle_device # This is held since the __del__ function runs an op, and if the context() # is collected before this object, there will be a segfault when running the # op. self._context = context.context() def __del__(self): # Resources follow object-identity when executing eagerly, so it is safe to # delete the resource we have a handle to. try: # This resource was created in eager mode. However, this destructor may be # running in graph mode (especially during unit tests). To clean up # successfully, we switch back into eager mode temporarily. with context.eager_mode(): with ops.device(self._handle_device): gen_resource_variable_ops.destroy_resource_op( self._handle, ignore_lookup_error=True) except TypeError: # Suppress some exceptions, mainly for the case when we're running on # module deletion. Things that can go wrong include the context module # already being unloaded, self._handle._handle_data no longer being # valid, and so on. Printing warnings in these cases is silly # (exceptions raised from __del__ are printed as warnings to stderr). pass # 'NoneType' object is not callable when the handle has been # partially unloaded. except AttributeError: pass # 'NoneType' object has no attribute 'eager_mode' when context has # been unloaded. Will catch other module unloads as well. def shape_safe_assign_variable_handle(handle, shape, value, name=None): """Helper that checks shape compatibility and assigns variable.""" with _handle_graph(handle): value_tensor = ops.convert_to_tensor(value) shape.assert_is_compatible_with(value_tensor.shape) return gen_resource_variable_ops.assign_variable_op(handle, value_tensor, name=name) def _maybe_set_handle_data(dtype, handle, tensor): if dtype == dtypes.variant: # For DT_VARIANT types, the handle's shape_and_type[1:] stores the # variant's handle data. Extract it. handle_data = get_eager_safe_handle_data(handle) if handle_data.is_set and len(handle_data.shape_and_type) > 1: tensor._handle_data = ( # pylint: disable=protected-access cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData( is_set=True, shape_and_type=handle_data.shape_and_type[1:])) def variable_accessed(variable): """Records that `variable` was accessed for the tape and FuncGraph.""" if hasattr(ops.get_default_graph(), "watch_variable"): ops.get_default_graph().watch_variable(variable) if variable.trainable: tape.variable_accessed(variable) class BaseResourceVariable(variables.VariableV1): """A python variable from an existing handle.""" @deprecated_args( None, "If using Keras pass *_constraint arguments to layers.", "constraint") def __init__( # pylint: disable=super-init-not-called self, trainable=None, shape=None, dtype=None, handle=None, constraint=None, synchronization=None, aggregation=None, distribute_strategy=None, name=None, unique_id=None, handle_name=None, graph_element=None, initial_value=None, initializer_op=None, is_initialized_op=None, cached_value=None, save_slice_info=None, handle_deleter=None, **unused_kwargs): """Creates a variable from a handle. Args: trainable: If `True`, GradientTapes automatically watch uses of this Variable. shape: The variable's shape. dtype: The variable's dtype. handle: The variable's handle constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. distribute_strategy: The distribution strategy this variable was created under. name: The name for this variable. unique_id: Internal. Unique ID for this variable's handle. handle_name: The name for the variable's handle. graph_element: Optional, required only in session.run-mode. Pre-created tensor which reads this variable's value. initial_value: Optional. Variable's initial value. initializer_op: Operation which assigns the variable's initial value. is_initialized_op: Pre-created operation to check whether this variable is initialized. cached_value: Pre-created operation to read this variable in a specific device. save_slice_info: Metadata for variable partitioning. handle_deleter: EagerResourceDeleter responsible for cleaning up the handle. """ with ops.init_scope(): self._in_graph_mode = not context.executing_eagerly() synchronization, aggregation, trainable = ( variables.validate_synchronization_aggregation_trainable( synchronization, aggregation, trainable, name)) self._trainable = trainable self._synchronization = synchronization self._aggregation = aggregation self._save_slice_info = save_slice_info self._initial_value = initial_value self._initializer_op = initializer_op self._is_initialized_op = is_initialized_op self._graph_element = graph_element self._cached_value = cached_value self._distribute_strategy = distribute_strategy # Store the graph key so optimizers know how to only retrieve variables from # this graph. Guaranteed to be the same as the eager graph_key. self._graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access self._shape = tensor_shape.as_shape(shape) self._dtype = dtypes.as_dtype(dtype) self._handle = handle self._graph_element = graph_element self._unique_id = unique_id self._handle_name = handle_name + ":0" self._constraint = constraint # After the handle has been created, set up a way to clean it up when # executing eagerly. We'll hold the only reference to the deleter, so that # when this object is garbage collected the deleter will be too. This # means ResourceVariables can be part of reference cycles without those # cycles being uncollectable. if not self._in_graph_mode: if handle_deleter is None: handle_deleter = EagerResourceDeleter( handle=self._handle, handle_device=self._handle.device) self._handle_deleter = handle_deleter self._cached_shape_as_list = None def __repr__(self): if context.executing_eagerly() and not self._in_graph_mode: return "<tf.Variable '%s' shape=%s dtype=%s, numpy=%s>" % ( self.name, self.get_shape(), self.dtype.name, ops.numpy_text(self.read_value(), is_repr=True)) else: return "<tf.Variable '%s' shape=%s dtype=%s>" % ( self.name, self.get_shape(), self.dtype.name) @contextlib.contextmanager def _assign_dependencies(self): """Makes assignments depend on the cached value, if any. This prevents undefined behavior with reads not ordered wrt writes. Yields: None. """ if self._cached_value is not None: with ops.control_dependencies([self._cached_value]): yield else: yield def __nonzero__(self): return self.__bool__() def __bool__(self): return bool(self.read_value()) def __copy__(self): return self def __deepcopy__(self, memo): if not context.executing_eagerly(): raise NotImplementedError( "__deepcopy__() is only available when eager execution is enabled.") copied_variable = ResourceVariable( initial_value=self.read_value(), trainable=self._trainable, constraint=self._constraint, dtype=self._dtype, name=self._shared_name + "_copy", distribute_strategy=self._distribute_strategy) memo[self._unique_id] = copied_variable return copied_variable @property def dtype(self): """The dtype of this variable.""" return self._dtype @property def device(self): """The device this variable is on.""" return self._handle.device @property def graph(self): """The `Graph` of this variable.""" return self._handle.graph @property def name(self): """The name of the handle for this variable.""" return self._handle_name @property def shape(self): """The shape of this variable.""" return self._shape def _shape_as_list(self): if self.shape.ndims is None: return None return [dim.value for dim in self.shape.dims] def _shape_tuple(self): shape = self._shape_as_list() if shape is None: return None return tuple(shape) @property def create(self): """The op responsible for initializing this variable.""" if not self._in_graph_mode: raise RuntimeError("Calling create is not supported when eager execution" " is enabled.") return self._initializer_op @property def handle(self): """The handle by which this variable can be accessed.""" return self._handle def value(self): """A cached operation which reads the value of this variable.""" if self._cached_value is not None: return self._cached_value with ops.colocate_with(None, ignore_existing=True): with ops.device(self._handle.device): return self._read_variable_op() def _as_graph_element(self): """Conversion function for Graph.as_graph_element().""" return self._graph_element @property def initializer(self): """The op responsible for initializing this variable.""" return self._initializer_op @property def initial_value(self): """Returns the Tensor used as the initial value for the variable.""" if context.executing_eagerly(): raise RuntimeError("initial_value not supported in EAGER mode.") return self._initial_value @property @deprecated( None, "Apply a constraint manually following the optimizer update step.") def constraint(self): """Returns the constraint function associated with this variable. Returns: The constraint function that was passed to the variable constructor. Can be `None` if no constraint was passed. """ return self._constraint @property def op(self): """The op for this variable.""" return self._handle.op @property def trainable(self): return self._trainable @property def synchronization(self): return self._synchronization @property def aggregation(self): return self._aggregation def eval(self, session=None): """Evaluates and returns the value of this variable.""" if context.executing_eagerly(): raise RuntimeError("Trying to eval in EAGER mode") return self._graph_element.eval(session=session) def numpy(self): if context.executing_eagerly(): return self.read_value().numpy() raise NotImplementedError( "numpy() is only available when eager execution is enabled.") @deprecated(None, "Prefer Dataset.range instead.") def count_up_to(self, limit): """Increments this variable until it reaches `limit`. When that Op is run it tries to increment the variable by `1`. If incrementing the variable would bring it above `limit` then the Op raises the exception `OutOfRangeError`. If no error is raised, the Op outputs the value of the variable before the increment. This is essentially a shortcut for `count_up_to(self, limit)`. Args: limit: value at which incrementing the variable raises an error. Returns: A `Tensor` that will hold the variable value before the increment. If no other Op modifies this variable, the values produced will all be distinct. """ return gen_state_ops.resource_count_up_to(self.handle, limit=limit, T=self.dtype) def _read_variable_op(self): variable_accessed(self) result = gen_resource_variable_ops.read_variable_op(self._handle, self._dtype) _maybe_set_handle_data(self._dtype, self._handle, result) if not context.executing_eagerly(): # Note that if a control flow context is active the input of the read op # might not actually be the handle. This line bypasses it. tape.record_operation( "ReadVariableOp", [result], [self._handle], lambda x: [x]) return result def read_value(self): """Constructs an op which reads the value of this variable. Should be used when there are multiple reads, or when it is desirable to read the value only after some condition is true. Returns: the read operation. """ with ops.name_scope("Read"): # Ensure we read the variable in the same device as the handle. with ops.device(self._handle.device): value = self._read_variable_op() # Return an identity so it can get placed on whatever device the context # specifies instead of the device where the variable is. return array_ops.identity(value) def sparse_read(self, indices, name=None): """Reads the value of this variable sparsely, using `gather`.""" with ops.name_scope("Gather" if name is None else name) as name: variable_accessed(self) value = gen_resource_variable_ops.resource_gather( self._handle, indices, dtype=self._dtype, name=name) if self._dtype == dtypes.variant: # For DT_VARIANT types, the handle's shape_and_type[1:] stores the # variant's handle data. Extract it. handle_data = get_eager_safe_handle_data(self._handle) if handle_data.is_set and len(handle_data.shape_and_type) > 1: value._handle_data = ( # pylint: disable=protected-access cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData( is_set=True, shape_and_type=handle_data.shape_and_type[1:])) return array_ops.identity(value) def gather_nd(self, indices, name=None): """Reads the value of this variable sparsely, using `gather_nd`.""" with ops.name_scope("GatherNd" if name is None else name) as name: if self.trainable: variable_accessed(self) value = gen_resource_variable_ops.resource_gather_nd( self._handle, indices, dtype=self._dtype, name=name) return array_ops.identity(value) def to_proto(self, export_scope=None): """Converts a `ResourceVariable` to a `VariableDef` protocol buffer. Args: export_scope: Optional `string`. Name scope to remove. Raises: RuntimeError: If run in EAGER mode. Returns: A `VariableDef` protocol buffer, or `None` if the `Variable` is not in the specified name scope. """ if context.executing_eagerly(): raise RuntimeError("to_proto not supported in EAGER mode.") if export_scope is None or self.handle.name.startswith(export_scope): var_def = variable_pb2.VariableDef() var_def.variable_name = ops.strip_name_scope(self.handle.name, export_scope) if self._initial_value is not None: # This is inside an if-statement for backwards compatibility, since # self._initial_value might be None for variables constructed from old # protos. var_def.initial_value_name = ops.strip_name_scope( self._initial_value.name, export_scope) var_def.initializer_name = ops.strip_name_scope(self.initializer.name, export_scope) if self._cached_value is not None: var_def.snapshot_name = ops.strip_name_scope(self._cached_value.name, export_scope) else: # Store the graph_element here var_def.snapshot_name = ops.strip_name_scope(self._graph_element.name, export_scope) var_def.is_resource = True var_def.trainable = self.trainable var_def.synchronization = self.synchronization.value var_def.aggregation = self.aggregation.value if self._save_slice_info: var_def.save_slice_info_def.MergeFrom( self._save_slice_info.to_proto(export_scope=export_scope)) return var_def else: return None @staticmethod def from_proto(variable_def, import_scope=None): if context.executing_eagerly(): raise RuntimeError("from_proto not supported in EAGER mode.") return ResourceVariable( variable_def=variable_def, import_scope=import_scope) def set_shape(self, shape): """Unsupported.""" raise NotImplementedError("ResourceVariable does not implement set_shape()") __array_priority__ = 100 def is_initialized(self, name=None): """Checks whether a resource variable has been initialized. Outputs boolean scalar indicating whether the tensor has been initialized. Args: name: A name for the operation (optional). Returns: A `Tensor` of type `bool`. """ return gen_resource_variable_ops.var_is_initialized_op(self.handle, name) def assign_sub(self, delta, use_locking=None, name=None, read_value=True): """Subtracts a value from this variable. Args: delta: A `Tensor`. The value to subtract from this variable. use_locking: If `True`, use locking during the operation. name: The name to use for the operation. read_value: A `bool`. Whether to read and return the new value of the variable or not. Returns: If `read_value` is `True`, this method will return the new value of the variable after the assignment has completed. Otherwise, when in graph mode it will return the `Operation` that does the assignment, and when in eager mode it will return `None`. """ # TODO(apassos): this here and below is not atomic. Consider making it # atomic if there's a way to do so without a performance cost for those who # don't need it. with _handle_graph(self.handle), self._assign_dependencies(): assign_sub_op = gen_resource_variable_ops.assign_sub_variable_op( self.handle, ops.convert_to_tensor(delta, dtype=self.dtype), name=name) if read_value: return self._lazy_read(assign_sub_op) return assign_sub_op def assign_add(self, delta, use_locking=None, name=None, read_value=True): """Adds a value to this variable. Args: delta: A `Tensor`. The value to add to this variable. use_locking: If `True`, use locking during the operation. name: The name to use for the operation. read_value: A `bool`. Whether to read and return the new value of the variable or not. Returns: If `read_value` is `True`, this method will return the new value of the variable after the assignment has completed. Otherwise, when in graph mode it will return the `Operation` that does the assignment, and when in eager mode it will return `None`. """ with _handle_graph(self.handle), self._assign_dependencies(): assign_add_op = gen_resource_variable_ops.assign_add_variable_op( self.handle, ops.convert_to_tensor(delta, dtype=self.dtype), name=name) if read_value: return self._lazy_read(assign_add_op) return assign_add_op def _lazy_read(self, op): variable_accessed(self) return _UnreadVariable( handle=self._handle, dtype=self.dtype, shape=self._shape, in_graph_mode=self._in_graph_mode, deleter=self._handle_deleter if not self._in_graph_mode else None, parent_op=op, unique_id=self._unique_id) def assign(self, value, use_locking=None, name=None, read_value=True): """Assigns a new value to this variable. Args: value: A `Tensor`. The new value for this variable. use_locking: If `True`, use locking during the assignment. name: The name to use for the assignment. read_value: A `bool`. Whether to read and return the new value of the variable or not. Returns: If `read_value` is `True`, this method will return the new value of the variable after the assignment has completed. Otherwise, when in graph mode it will return the `Operation` that does the assignment, and when in eager mode it will return `None`. """ # Note: not depending on the cached value here since this can used to # initialize the variable. with _handle_graph(self.handle): value_tensor = ops.convert_to_tensor(value, dtype=self.dtype) self._shape.assert_is_compatible_with(value_tensor.shape) assign_op = gen_resource_variable_ops.assign_variable_op( self.handle, value_tensor, name=name) if read_value: return self._lazy_read(assign_op) return assign_op def __reduce__(self): # The implementation mirrors that of __deepcopy__. return functools.partial( ResourceVariable, initial_value=self.numpy(), trainable=self.trainable, name=self._shared_name, dtype=self.dtype, constraint=self.constraint, distribute_strategy=self._distribute_strategy), () def scatter_sub(self, sparse_delta, use_locking=False, name=None): """Subtracts `tf.IndexedSlices` from this variable. Args: sparse_delta: `tf.IndexedSlices` to be subtracted from this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered subtraction has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ if not isinstance(sparse_delta, ops.IndexedSlices): raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) return self._lazy_read(gen_resource_variable_ops.resource_scatter_sub( self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name)) def scatter_add(self, sparse_delta, use_locking=False, name=None): """Adds `tf.IndexedSlices` to this variable. Args: sparse_delta: `tf.IndexedSlices` to be added to this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered addition has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ if not isinstance(sparse_delta, ops.IndexedSlices): raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) return self._lazy_read(gen_resource_variable_ops.resource_scatter_add( self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name)) def scatter_max(self, sparse_delta, use_locking=False, name=None): """Updates this variable with the max of `tf.IndexedSlices` and itself. Args: sparse_delta: `tf.IndexedSlices` to use as an argument of max with this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered maximization has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ if not isinstance(sparse_delta, ops.IndexedSlices): raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) return self._lazy_read(gen_resource_variable_ops.resource_scatter_max( self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name)) def scatter_min(self, sparse_delta, use_locking=False, name=None): """Updates this variable with the min of `tf.IndexedSlices` and itself. Args: sparse_delta: `tf.IndexedSlices` to use as an argument of min with this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered minimization has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ if not isinstance(sparse_delta, ops.IndexedSlices): raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) return self._lazy_read(gen_resource_variable_ops.resource_scatter_min( self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name)) def scatter_mul(self, sparse_delta, use_locking=False, name=None): """Multiply this variable by `tf.IndexedSlices`. Args: sparse_delta: `tf.IndexedSlices` to multiply this variable by. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered multiplication has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ if not isinstance(sparse_delta, ops.IndexedSlices): raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) return self._lazy_read(gen_resource_variable_ops.resource_scatter_mul( self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name)) def scatter_div(self, sparse_delta, use_locking=False, name=None): """Divide this variable by `tf.IndexedSlices`. Args: sparse_delta: `tf.IndexedSlices` to divide this variable by. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered division has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ if not isinstance(sparse_delta, ops.IndexedSlices): raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) return self._lazy_read(gen_resource_variable_ops.resource_scatter_div( self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name)) def scatter_update(self, sparse_delta, use_locking=False, name=None): """Assigns `tf.IndexedSlices` to this variable. Args: sparse_delta: `tf.IndexedSlices` to be assigned to this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered subtraction has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ if not isinstance(sparse_delta, ops.IndexedSlices): raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) return self._lazy_read(gen_resource_variable_ops.resource_scatter_update( self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name)) def batch_scatter_update(self, sparse_delta, use_locking=False, name=None): """Assigns `tf.IndexedSlices` to this variable batch-wise. Analogous to `batch_gather`. This assumes that this variable and the sparse_delta IndexedSlices have a series of leading dimensions that are the same for all of them, and the updates are performed on the last dimension of indices. In other words, the dimensions should be the following: `num_prefix_dims = sparse_delta.indices.ndims - 1` `batch_dim = num_prefix_dims + 1` `sparse_delta.updates.shape = sparse_delta.indices.shape + var.shape[ batch_dim:]` where `sparse_delta.updates.shape[:num_prefix_dims]` `== sparse_delta.indices.shape[:num_prefix_dims]` `== var.shape[:num_prefix_dims]` And the operation performed can be expressed as: `var[i_1, ..., i_n, sparse_delta.indices[i_1, ..., i_n, j]] = sparse_delta.updates[ i_1, ..., i_n, j]` When sparse_delta.indices is a 1D tensor, this operation is equivalent to `scatter_update`. To avoid this operation one can looping over the first `ndims` of the variable and using `scatter_update` on the subtensors that result of slicing the first dimension. This is a valid option for `ndims = 1`, but less efficient than this implementation. Args: sparse_delta: `tf.IndexedSlices` to be assigned to this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered subtraction has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ if not isinstance(sparse_delta, ops.IndexedSlices): raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) return self._lazy_read(state_ops.batch_scatter_update( self, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name)) def scatter_nd_sub(self, indices, updates, name=None): """Applies sparse subtraction to individual values or slices in a Variable. `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. `indices` must be integer tensor, containing indices into `ref`. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. The innermost dimension of `indices` (with length `K`) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th dimension of `ref`. `updates` is `Tensor` of rank `Q-1+P-K` with shape: ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ``` For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this: ```python ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = tf.constant([[4], [3], [1] ,[7]]) updates = tf.constant([9, 10, 11, 12]) op = ref.scatter_nd_sub(indices, updates) with tf.compat.v1.Session() as sess: print sess.run(op) ``` The resulting update to ref would look like this: [1, -9, 3, -6, -6, 6, 7, -4] See `tf.scatter_nd` for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered subtraction has completed. """ return self._lazy_read(gen_state_ops.resource_scatter_nd_sub( self.handle, indices, ops.convert_to_tensor(updates, self.dtype), name=name)) def scatter_nd_add(self, indices, updates, name=None): """Applies sparse addition to individual values or slices in a Variable. `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. `indices` must be integer tensor, containing indices into `ref`. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. The innermost dimension of `indices` (with length `K`) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th dimension of `ref`. `updates` is `Tensor` of rank `Q-1+P-K` with shape: ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ``` For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this: ```python ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = tf.constant([[4], [3], [1] ,[7]]) updates = tf.constant([9, 10, 11, 12]) add = ref.scatter_nd_add(indices, updates) with tf.compat.v1.Session() as sess: print sess.run(add) ``` The resulting update to ref would look like this: [1, 13, 3, 14, 14, 6, 7, 20] See `tf.scatter_nd` for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered subtraction has completed. """ return self._lazy_read(gen_state_ops.resource_scatter_nd_add( self.handle, indices, ops.convert_to_tensor(updates, self.dtype), name=name)) def scatter_nd_update(self, indices, updates, name=None): """Applies sparse assignment to individual values or slices in a Variable. `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. `indices` must be integer tensor, containing indices into `ref`. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. The innermost dimension of `indices` (with length `K`) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th dimension of `ref`. `updates` is `Tensor` of rank `Q-1+P-K` with shape: ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ``` For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this: ```python ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = tf.constant([[4], [3], [1] ,[7]]) updates = tf.constant([9, 10, 11, 12]) op = ref.scatter_nd_update(indices, updates) with tf.compat.v1.Session() as sess: print sess.run(op) ``` The resulting update to ref would look like this: [1, 11, 3, 10, 9, 6, 7, 12] See `tf.scatter_nd` for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered subtraction has completed. """ return self._lazy_read(gen_state_ops.resource_scatter_nd_update( self.handle, indices, ops.convert_to_tensor(updates, self.dtype), name=name)) def _strided_slice_assign(self, begin, end, strides, value, name, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask): with _handle_graph(self.handle), self._assign_dependencies(): return self._lazy_read( gen_array_ops.resource_strided_slice_assign( ref=self.handle, begin=begin, end=end, strides=strides, value=ops.convert_to_tensor(value, dtype=self.dtype), name=name, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask)) def __int__(self): if self.dtype != dtypes.int32 and self.dtype != dtypes.int64: raise TypeError("Non-integer variable can't be converted to integer.") return int(self.value().numpy()) def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False): del name if dtype is not None and not dtype.is_compatible_with(self.dtype): raise ValueError( "Incompatible type conversion requested to type {!r} for variable " "of type {!r}".format(dtype.name, self.dtype.name)) if as_ref: return self.read_value().op.inputs[0] else: return self.value() def __iadd__(self, unused_other): raise RuntimeError("Variable += value not supported. Use " "variable.assign_add(value) to modify the variable " "value and variable = variable + value to get a new " "Tensor object.") def __isub__(self, unused_other): raise RuntimeError("Variable -= value not supported. Use " "variable.assign_sub(value) to modify the variable " "value and variable = variable - value to get a new " "Tensor object.") def __imul__(self, unused_other): raise RuntimeError("Variable *= value not supported. Use " "`var.assign(var * value)` to modify the variable or " "`var = var * value` to get a new Tensor object.") def __idiv__(self, unused_other): raise RuntimeError("Variable /= value not supported. Use " "`var.assign(var / value)` to modify the variable or " "`var = var / value` to get a new Tensor object.") def __itruediv__(self, unused_other): raise RuntimeError("Variable /= value not supported. Use " "`var.assign(var / value)` to modify the variable or " "`var = var / value` to get a new Tensor object.") def __irealdiv__(self, unused_other): raise RuntimeError("Variable /= value not supported. Use " "`var.assign(var / value)` to modify the variable or " "`var = var / value` to get a new Tensor object.") def __ipow__(self, unused_other): raise RuntimeError("Variable **= value not supported. Use " "`var.assign(var ** value)` to modify the variable or " "`var = var ** value` to get a new Tensor object.") class ResourceVariable(BaseResourceVariable): """Variable based on resource handles. See the [Variables How To](https://tensorflow.org/guide/variables) for a high level overview. A `ResourceVariable` allows you to maintain state across subsequent calls to session.run. The `ResourceVariable` constructor requires an initial value for the variable, which can be a `Tensor` of any type and shape. The initial value defines the type and shape of the variable. After construction, the type and shape of the variable are fixed. The value can be changed using one of the assign methods. Just like any `Tensor`, variables created with `tf.Variable(use_resource=True)` can be used as inputs for other Ops in the graph. Additionally, all the operators overloaded for the `Tensor` class are carried over to variables, so you can also add nodes to the graph by just doing arithmetic on variables. Unlike ref-based variable, a ResourceVariable has well-defined semantics. Each usage of a ResourceVariable in a TensorFlow graph adds a read_value operation to the graph. The Tensors returned by a read_value operation are guaranteed to see all modifications to the value of the variable which happen in any operation on which the read_value depends on (either directly, indirectly, or via a control dependency) and guaranteed to not see any modification to the value of the variable from operations that depend on the read_value operation. Updates from operations that have no dependency relationship to the read_value operation might or might not be visible to read_value. For example, if there is more than one assignment to a ResourceVariable in a single session.run call there is a well-defined value for each operation which uses the variable's value if the assignments and the read are connected by edges in the graph. Consider the following example, in which two writes can cause tf.Variable and tf.ResourceVariable to behave differently: ```python a = tf.Variable(1.0, use_resource=True) a.initializer.run() assign = a.assign(2.0) with tf.control_dependencies([assign]): b = a.read_value() with tf.control_dependencies([b]): other_assign = a.assign(3.0) with tf.control_dependencies([other_assign]): # Will print 2.0 because the value was read before other_assign ran. If # `a` was a tf.Variable instead, 2.0 or 3.0 could be printed. tf.compat.v1.Print(b, [b]).eval() ``` """ def __init__(self, # pylint: disable=super-init-not-called initial_value=None, trainable=None, collections=None, validate_shape=True, # pylint: disable=unused-argument caching_device=None, name=None, dtype=None, variable_def=None, import_scope=None, constraint=None, distribute_strategy=None, synchronization=None, aggregation=None, shape=None): """Creates a variable. Args: initial_value: A `Tensor`, or Python object convertible to a `Tensor`, which is the initial value for the Variable. Can also be a callable with no argument that returns the initial value when called. (Note that initializer functions from init_ops.py must first be bound to a shape before being used here.) trainable: If `True`, the default, also adds the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as the default list of variables to use by the `Optimizer` classes. Defaults to `True`, unless `synchronization` is set to `ON_READ`, in which case it defaults to `False`. collections: List of graph collections keys. The new variable is added to these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`. validate_shape: Ignored. Provided for compatibility with tf.Variable. caching_device: Optional device string or function describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. name: Optional name for the variable. Defaults to `'Variable'` and gets uniquified automatically. dtype: If set, initial_value will be converted to the given type. If None, either the datatype will be kept (if initial_value is a Tensor) or float32 will be used (if it is a Python object convertible to a Tensor). variable_def: `VariableDef` protocol buffer. If not None, recreates the `ResourceVariable` object with its contents. `variable_def` and other arguments (except for import_scope) are mutually exclusive. import_scope: Optional `string`. Name scope to add to the ResourceVariable. Only used when `variable_def` is provided. constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. distribute_strategy: The tf.distribute.Strategy this variable is being created inside of. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. shape: (optional) The shape of this variable. If None, the shape of `initial_value` will be used. When setting this argument to `tf.TensorShape(None)` (representing an unspecified shape), the variable can be assigned with values of different shapes. Raises: ValueError: If the initial value is not specified, or does not have a shape and `validate_shape` is `True`. @compatibility(eager) When Eager Execution is enabled, the default for the `collections` argument is `None`, which signifies that this `Variable` will not be added to any collections. @end_compatibility """ if variable_def: if initial_value is not None: raise ValueError("variable_def and initial_value are mutually " "exclusive.") if context.executing_eagerly(): raise ValueError("Creating ResourceVariable from variable_def is " "not supported when eager execution is enabled.") self._init_from_proto(variable_def, import_scope=import_scope) else: self._init_from_args( initial_value=initial_value, trainable=trainable, collections=collections, caching_device=caching_device, name=name, dtype=dtype, constraint=constraint, synchronization=synchronization, aggregation=aggregation, shape=shape, distribute_strategy=distribute_strategy) def _init_from_args(self, initial_value=None, trainable=None, collections=None, caching_device=None, name=None, dtype=None, constraint=None, synchronization=None, aggregation=None, distribute_strategy=None, shape=None): """Creates a variable. Args: initial_value: A `Tensor`, or Python object convertible to a `Tensor`, which is the initial value for the Variable. The initial value must have a shape specified unless `validate_shape` is set to False. Can also be a callable with no argument that returns the initial value when called. (Note that initializer functions from init_ops.py must first be bound to a shape before being used here.) trainable: If `True`, the default, also adds the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as the default list of variables to use by the `Optimizer` classes. Defaults to `True`, unless `synchronization` is set to `ON_READ`, in which case it defaults to `False`. collections: List of graph collections keys. The new variable is added to these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`. caching_device: Optional device string or function describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. name: Optional name for the variable. Defaults to `'Variable'` and gets uniquified automatically. dtype: If set, initial_value will be converted to the given type. If None, either the datatype will be kept (if initial_value is a Tensor) or float32 will be used (if it is a Python object convertible to a Tensor). constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. distribute_strategy: DistributionStrategy under which this variable was created. shape: (optional) The shape of this variable. If None, the shape of `initial_value` will be used. When setting this argument to `tf.TensorShape(None)` (representing an unspecified shape), the variable can be assigned with values of different shapes. Raises: ValueError: If the initial value is not specified, or does not have a shape and `validate_shape` is `True`. @compatibility(eager) When Eager Execution is enabled, variables are never added to collections. It is not implicitly added to the `GLOBAL_VARIABLES` or `TRAINABLE_VARIABLES` collections, and the `collections` argument is ignored. @end_compatibility """ synchronization, aggregation, trainable = ( variables.validate_synchronization_aggregation_trainable( synchronization, aggregation, trainable, name)) if initial_value is None: raise ValueError("initial_value must be specified.") init_from_fn = callable(initial_value) if isinstance(initial_value, ops.Tensor) and hasattr( initial_value, "graph") and initial_value.graph.building_function: raise ValueError("Tensor-typed variable initializers must either be " "wrapped in an init_scope or callable " "(e.g., `tf.Variable(lambda : " "tf.truncated_normal([10, 40]))`) when building " "functions. Please file a feature request if this " "restriction inconveniences you.") if collections is None: collections = [ops.GraphKeys.GLOBAL_VARIABLES] if not isinstance(collections, (list, tuple, set)): raise ValueError( "collections argument to Variable constructor must be a list, tuple, " "or set. Got %s of type %s" % (collections, type(collections))) if constraint is not None and not callable(constraint): raise ValueError("The `constraint` argument must be a callable.") if isinstance(initial_value, trackable.CheckpointInitialValue): self._maybe_initialize_trackable() self._update_uid = initial_value.checkpoint_position.restore_uid initial_value = initial_value.wrapped_value if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections: collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES] with ops.init_scope(): self._in_graph_mode = not context.executing_eagerly() with ops.name_scope(name, "Variable", [] if init_from_fn else [initial_value]) as name: # pylint: disable=protected-access handle_name = ops.name_from_scope_name(name) if self._in_graph_mode: shared_name = handle_name unique_id = shared_name else: # When in eager mode use a uid for the shared_name, to prevent # accidental sharing. unique_id = "%s_%d" % (handle_name, ops.uid()) shared_name = context.shared_name() # Use attr_scope and device(None) to simulate the behavior of # colocate_with when the variable we want to colocate with doesn't # yet exist. device_context_manager = ( ops.device if self._in_graph_mode else ops.NullContextmanager) attr = attr_value_pb2.AttrValue( list=attr_value_pb2.AttrValue.ListValue( s=[compat.as_bytes("loc:@%s" % handle_name)])) with ops.get_default_graph()._attr_scope({"_class": attr}): with ops.name_scope("Initializer"), device_context_manager(None): initial_value = ops.convert_to_tensor( initial_value() if init_from_fn else initial_value, name="initial_value", dtype=dtype) # Don't use `shape or initial_value.shape` since TensorShape has # overridden `__bool__`. shape = shape if shape is not None else initial_value.shape handle = eager_safe_variable_handle( initial_value=initial_value, shape=shape, shared_name=shared_name, name=name, graph_mode=self._in_graph_mode) # pylint: disable=protected-access if (self._in_graph_mode and initial_value is not None and initial_value.op._get_control_flow_context() is not None): raise ValueError( "Initializer for variable %s is from inside a control-flow " "construct, such as a loop or conditional. When creating a " "variable inside a loop or conditional, use a lambda as the " "initializer." % name) # pylint: enable=protected-access dtype = initial_value.dtype.base_dtype if self._in_graph_mode: with ops.name_scope("IsInitialized"): is_initialized_op = ( gen_resource_variable_ops.var_is_initialized_op(handle)) if initial_value is not None: with ops.name_scope("Assign") as n, ops.colocate_with(handle): # pylint: disable=protected-access initializer_op = ( gen_resource_variable_ops.assign_variable_op( handle, variables._try_guard_against_uninitialized_dependencies( name, initial_value), name=n)) # pylint: enable=protected-access with ops.name_scope("Read"), ops.colocate_with(handle): # Manually assign reads to the handle's device to avoid log # messages. with ops.device(handle.device): value = gen_resource_variable_ops.read_variable_op(handle, dtype) _maybe_set_handle_data(dtype, handle, value) graph_element = value if caching_device is not None: # Variables may be created in a tf.device() or ops.colocate_with() # context. At the same time, users would expect caching device to # be independent of this context, and/or would not expect the # current device context to be merged with the caching device # spec. Therefore we reset the colocation stack before creating # the cached value. Note that resetting the colocation stack will # also reset the device stack. with ops.colocate_with(None, ignore_existing=True): with ops.device(caching_device): cached_value = array_ops.identity(value) else: cached_value = None else: gen_resource_variable_ops.assign_variable_op(handle, initial_value) is_initialized_op = None initializer_op = None graph_element = None if caching_device: with ops.device(caching_device): cached_value = gen_resource_variable_ops.read_variable_op( handle, dtype) _maybe_set_handle_data(dtype, handle, cached_value) else: cached_value = None if not context.executing_eagerly(): # Eager variables are only added to collections if they are part of an # eager variable store (otherwise in an interactive session they would # hog memory and cause OOM). This is done in ops/variable_scope.py. ops.add_to_collections(collections, self) elif ops.GraphKeys.GLOBAL_STEP in collections: ops.add_to_collections(ops.GraphKeys.GLOBAL_STEP, self) initial_value = initial_value if self._in_graph_mode else None super(ResourceVariable, self).__init__( trainable=trainable, shape=shape, dtype=dtype, handle=handle, synchronization=synchronization, constraint=constraint, aggregation=aggregation, distribute_strategy=distribute_strategy, name=name, unique_id=unique_id, handle_name=handle_name, graph_element=graph_element, initial_value=initial_value, initializer_op=initializer_op, is_initialized_op=is_initialized_op, cached_value=cached_value) def _init_from_proto(self, variable_def, import_scope=None): """Initializes from `VariableDef` proto.""" # Note that init_from_proto is currently not supported in Eager mode. assert not context.executing_eagerly() self._in_graph_mode = True assert isinstance(variable_def, variable_pb2.VariableDef) if not variable_def.is_resource: raise ValueError("Trying to restore Variable as ResourceVariable.") # Create from variable_def. g = ops.get_default_graph() self._handle = g.as_graph_element( ops.prepend_name_scope( variable_def.variable_name, import_scope=import_scope)) self._shape = tensor_shape.TensorShape( self._handle.op.get_attr("shape")) self._handle_name = self._handle.name self._unique_id = self._handle_name self._initializer_op = g.as_graph_element( ops.prepend_name_scope( variable_def.initializer_name, import_scope=import_scope)) # Check whether initial_value_name exists for backwards compatibility. if (hasattr(variable_def, "initial_value_name") and variable_def.initial_value_name): self._initial_value = g.as_graph_element( ops.prepend_name_scope(variable_def.initial_value_name, import_scope=import_scope)) else: self._initial_value = None synchronization, aggregation, trainable = ( variables.validate_synchronization_aggregation_trainable( variable_def.synchronization, variable_def.aggregation, variable_def.trainable, variable_def.variable_name)) self._synchronization = synchronization self._aggregation = aggregation self._trainable = trainable if variable_def.snapshot_name: snapshot = g.as_graph_element( ops.prepend_name_scope( variable_def.snapshot_name, import_scope=import_scope)) if snapshot.op.type != "ReadVariableOp": self._cached_value = snapshot else: self._cached_value = None while snapshot.op.type != "ReadVariableOp": snapshot = snapshot.op.inputs[0] self._graph_element = snapshot else: self._cached_value = None # Legacy case for protos without the snapshot name; assume it's the # following. self._graph_element = g.get_tensor_by_name( self._handle.op.name + "/Read/ReadVariableOp:0") if variable_def.HasField("save_slice_info_def"): self._save_slice_info = variables.Variable.SaveSliceInfo( save_slice_info_def=variable_def.save_slice_info_def, import_scope=import_scope) else: self._save_slice_info = None self._caching_device = None self._dtype = dtypes.as_dtype(self._handle.op.get_attr("dtype")) self._constraint = None class UninitializedVariable(BaseResourceVariable): """A variable with no initializer.""" def __init__( # pylint: disable=super-init-not-called self, trainable=None, caching_device=None, name=None, shape=None, dtype=None, constraint=None, synchronization=None, aggregation=None, extra_handle_data=None, distribute_strategy=None, **unused_kwargs): """Creates the variable handle. Args: trainable: If `True`, GradientTapes automatically watch uses of this Variable. caching_device: Optional device string or function describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. name: Optional name for the variable. Defaults to `'Variable'` and gets uniquified automatically. shape: The variable's shape. dtype: The variable's dtype. constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. extra_handle_data: Optional, another resource handle or Tensor with handle data to merge with `shape` and `dtype`. distribute_strategy: The tf.distribute.Strategy this variable is being created inside of. """ with ops.init_scope(): self._in_graph_mode = not context.executing_eagerly() with ops.init_scope(): with ops.name_scope(name, "Variable") as name: handle_name = ops.name_from_scope_name(name) if self._in_graph_mode: shared_name = handle_name unique_id = shared_name else: unique_id = "%s_%d" % (handle_name, ops.uid()) shared_name = context.shared_name(unique_id) handle = variable_handle_from_shape_and_dtype( shape=shape, dtype=dtype, shared_name=shared_name, name=name, graph_mode=self._in_graph_mode, extra_handle_data=extra_handle_data) if not context.executing_eagerly(): with ops.name_scope("Read"), ops.colocate_with(handle): # Manually assign reads to the handle's device to avoid log # messages. with ops.device(handle.device): value = gen_resource_variable_ops.read_variable_op(handle, dtype) _maybe_set_handle_data(dtype, handle, value) graph_element = value ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, self) # Do *not* add to TRAINABLE_VARIABLES here, even if self._trainable, # because retraining or frozen use of imported SavedModels is # controlled at higher levels of model building. else: graph_element = None super(UninitializedVariable, self).__init__( distribute_strategy=distribute_strategy, shape=shape, dtype=dtype, unique_id=unique_id, handle_name=handle_name, constraint=constraint, handle=handle, graph_element=graph_element, trainable=trainable, synchronization=synchronization, aggregation=aggregation) pywrap_tensorflow.TFE_Py_RegisterResourceVariableType(ResourceVariable) math_ops._resource_variable_type = ResourceVariable # pylint: disable=protected-access def _dense_var_to_tensor(var, dtype=None, name=None, as_ref=False): return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access # Register a conversion function which reads the value of the variable, # allowing instances of the class to be used as tensors. ops.register_tensor_conversion_function(BaseResourceVariable, _dense_var_to_tensor) ops.register_dense_tensor_like_type(BaseResourceVariable) class _UnreadVariable(BaseResourceVariable): """Represents a future for a read of a variable. Pretends to be the tensor if anyone looks. """ def __init__(self, handle, dtype, shape, in_graph_mode, deleter, parent_op, unique_id): if isinstance(handle, ops.EagerTensor): handle_name = "" else: handle_name = handle.name # Only create a graph_element if we're in session.run-land as only # session.run requires a preexisting tensor to evaluate. Otherwise we can # avoid accidentally reading the variable. if (context.executing_eagerly() or ops.get_default_graph()._building_function): # pylint: disable=protected-access graph_element = None else: with ops.control_dependencies([parent_op]): graph_element = gen_resource_variable_ops.read_variable_op( handle, dtype) _maybe_set_handle_data(dtype, handle, graph_element) super(_UnreadVariable, self).__init__( handle=handle, shape=shape, handle_name=handle_name, unique_id=unique_id, dtype=dtype, handle_deleter=deleter, graph_element=graph_element) self._parent_op = parent_op @property def name(self): if self._in_graph_mode: return self._parent_op.name else: return "UnreadVariable" def value(self): return self._read_variable_op() def read_value(self): return self._read_variable_op() def _read_variable_op(self): with ops.control_dependencies([self._parent_op]): result = gen_resource_variable_ops.read_variable_op(self._handle, self._dtype) _maybe_set_handle_data(self._dtype, self._handle, result) return result @property def op(self): """The op for this variable.""" return self._parent_op ops.register_dense_tensor_like_type(_UnreadVariable) @ops.RegisterGradient("ReadVariableOp") def _ReadGrad(_, grad): """Gradient for read op.""" return grad def variable_shape(handle, out_type=dtypes.int32): if getattr( handle, "_handle_data", None) is None or not handle._handle_data.is_set: # pylint: disable=protected-access return gen_resource_variable_ops.variable_shape(handle, out_type=out_type) shape_proto = handle._handle_data.shape_and_type[0].shape # pylint: disable=protected-access if shape_proto.unknown_rank or any(x.size == -1 for x in shape_proto.dim): return gen_resource_variable_ops.variable_shape(handle, out_type=out_type) return constant_op.constant([x.size for x in shape_proto.dim], dtype=out_type) @ops.RegisterGradient("ResourceGather") def _GatherGrad(op, grad): """Gradient for gather op.""" # Build appropriately shaped IndexedSlices handle = op.inputs[0] indices = op.inputs[1] params_shape = variable_shape(handle) size = array_ops.expand_dims(array_ops.size(indices), 0) values_shape = array_ops.concat([size, params_shape[1:]], 0) values = array_ops.reshape(grad, values_shape) indices = array_ops.reshape(indices, size) return (ops.IndexedSlices(values, indices, params_shape), None) def _to_proto_fn(v, export_scope=None): """Converts Variable and ResourceVariable to VariableDef for collections.""" return v.to_proto(export_scope=export_scope) def _from_proto_fn(v, import_scope=None): """Creates Variable or ResourceVariable from VariableDef as needed.""" if v.is_resource: return ResourceVariable.from_proto(v, import_scope=import_scope) return variables.Variable.from_proto(v, import_scope=import_scope) ops.register_proto_function( ops.GraphKeys.GLOBAL_VARIABLES, proto_type=variable_pb2.VariableDef, to_proto=_to_proto_fn, from_proto=_from_proto_fn) ops.register_proto_function( ops.GraphKeys.TRAINABLE_VARIABLES, proto_type=variable_pb2.VariableDef, to_proto=_to_proto_fn, from_proto=_from_proto_fn) ops.register_proto_function( ops.GraphKeys.MOVING_AVERAGE_VARIABLES, proto_type=variable_pb2.VariableDef, to_proto=_to_proto_fn, from_proto=_from_proto_fn) ops.register_proto_function( ops.GraphKeys.LOCAL_VARIABLES, proto_type=variable_pb2.VariableDef, to_proto=_to_proto_fn, from_proto=_from_proto_fn) ops.register_proto_function( ops.GraphKeys.MODEL_VARIABLES, proto_type=variable_pb2.VariableDef, to_proto=_to_proto_fn, from_proto=_from_proto_fn) ops.register_proto_function( ops.GraphKeys.GLOBAL_STEP, proto_type=variable_pb2.VariableDef, to_proto=_to_proto_fn, from_proto=_from_proto_fn) ops.register_proto_function( ops.GraphKeys.METRIC_VARIABLES, proto_type=variable_pb2.VariableDef, to_proto=_to_proto_fn, from_proto=_from_proto_fn) def is_resource_variable(var): """"Returns True if `var` is to be considered a ResourceVariable.""" return isinstance(var, BaseResourceVariable) or hasattr( var, "_should_act_as_resource_variable") def copy_to_graph_uninitialized(var): """Copies an existing variable to a new graph, with no initializer.""" # Like ResourceVariable.__deepcopy__, but does not set an initializer on the # new variable. # pylint: disable=protected-access new_variable = UninitializedVariable( trainable=var.trainable, constraint=var._constraint, shape=var.shape, dtype=var.dtype, name=var._shared_name, synchronization=var.synchronization, aggregation=var.aggregation, extra_handle_data=var.handle) new_variable._maybe_initialize_trackable() # pylint: enable=protected-access return new_variable ops.NotDifferentiable("Assert") ops.NotDifferentiable("VarIsInitializedOp") ops.NotDifferentiable("VariableShape")
tensorflow-master
tensorflow/python/ops/resource_variable_ops.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Python ops defined in nn_grad.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_nn_ops from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import nn_grad # pylint: disable=unused-import from tensorflow.python.ops import nn_impl from tensorflow.python.ops import nn_ops from tensorflow.python.platform import test class Relu6OpTest(test.TestCase): @test_util.run_deprecated_v1 def testRelu6GradGrad(self): inputs = constant_op.constant( [[-2, -1, 1, 3], [5, 7, 8, 9]], dtype=dtypes.float32) x_init_value = np.array([[-3.5, -1.5, 2, 4], [4.5, 7.5, 8.5, 11]]) r = nn_ops.relu6(inputs) r_g = gradients_impl.gradients(r, inputs)[0] with self.cached_session(): error = gradient_checker.compute_gradient_error( inputs, inputs.get_shape().as_list(), r_g, r_g.get_shape().as_list(), x_init_value=x_init_value) self.assertLess(error, 1e-4) class Conv2dOpTest(test.TestCase): def run_test(self, x, y): with self.test_session(): error = gradient_checker.compute_gradient_error(x, x.get_shape().as_list(), y, y.get_shape().as_list()) self.assertLess(error, 1e-3) @test_util.run_deprecated_v1 def testConv2dGradWRTInput(self): x = array_ops.placeholder( dtype=dtypes.float32, shape=[1, 4, 4, 3], name='input') f = constant_op.constant([0.5], dtype=dtypes.float32, shape=[2, 2, 3, 2], name='filter') y = nn_ops.conv2d(x, f, [1, 1, 1, 1], 'SAME') self.run_test(x, y) @test_util.run_deprecated_v1 def testConv2dGradWRTFilter(self): x = constant_op.constant([0.5], dtype=dtypes.float32, shape=[1, 4, 4, 3], name='input') f = array_ops.placeholder( dtype=dtypes.float32, shape=[2, 2, 3, 2], name='filter') y = nn_ops.conv2d(x, f, [1, 1, 1, 1], 'SAME') self.run_test(f, y) @test_util.run_deprecated_v1 def testConv2dBackpropFilterGrad(self): x = array_ops.placeholder( dtype=dtypes.float32, shape=[1, 4, 4, 3], name='input') f = constant_op.constant([0.5], dtype=dtypes.float32, shape=[2, 2, 3, 2], name='filter') strides = [1, 1, 1, 1] padding = 'SAME' out = nn_impl.depthwise_conv2d(x, f, strides, padding) grad_wrt_input = gradients_impl.gradients(out, x)[0] self.run_test(f, grad_wrt_input) grad_wrt_filter = gradients_impl.gradients(out, f)[0] self.run_test(x, grad_wrt_filter) class DepthwiseConv2dTest(test.TestCase): def run_test(self, x, y): with self.test_session(): error = gradient_checker.compute_gradient_error(x, x.get_shape().as_list(), y, y.get_shape().as_list()) self.assertLess(error, 1e-3) @test_util.run_deprecated_v1 def testDepthwiseConv2dGradWRTInput(self): x = array_ops.placeholder( dtype=dtypes.float32, shape=[1, 4, 4, 3], name='input') f = constant_op.constant([0.5], dtype=dtypes.float32, shape=[2, 2, 3, 2], name='filter') strides = [1, 1, 1, 1] padding = 'SAME' y = nn_impl.depthwise_conv2d(x, f, strides, padding) self.run_test(x, y) @test_util.run_deprecated_v1 def testDepthwiseConv2dGradWRTFilter(self): x = constant_op.constant([0.5], dtype=dtypes.float32, shape=[1, 4, 4, 3], name='input') f = array_ops.placeholder( dtype=dtypes.float32, shape=[2, 2, 3, 2], name='filter') strides = [1, 1, 1, 1] padding = 'SAME' y = nn_impl.depthwise_conv2d(x, f, strides, padding) self.run_test(f, y) @test_util.run_deprecated_v1 def testDepthwiseConv2dBackpropFilterGrad(self): x = array_ops.placeholder( dtype=dtypes.float32, shape=[1, 4, 4, 3], name='input') f = constant_op.constant([0.5], dtype=dtypes.float32, shape=[2, 2, 3, 2], name='filter') strides = [1, 1, 1, 1] padding = 'SAME' out = nn_impl.depthwise_conv2d(x, f, strides, padding) grad_wrt_input = gradients_impl.gradients(out, x)[0] self.run_test(f, grad_wrt_input) grad_wrt_filter = gradients_impl.gradients(out, f)[0] self.run_test(x, grad_wrt_filter) class EluGradOpTest(test.TestCase): @test_util.run_deprecated_v1 def testEluGradGradWRTgrad_ys(self): inputs = constant_op.constant( [[-2, -1, 1, 3], [5, 7, 8, 9]], dtype=dtypes.float32) dummy = constant_op.constant( [[3, 1, -1, -2], [9, 8, 7, 6]], dtype=dtypes.float32) elu = gen_nn_ops.elu(inputs) elu_grad = gradients_impl.gradients(elu, inputs, grad_ys=dummy)[0] with self.cached_session(): error = gradient_checker.compute_gradient_error( dummy, dummy.shape, elu_grad, elu_grad.shape) self.assertLess(error, 1e-4) @test_util.run_deprecated_v1 def testEluGradGradWRTinputs(self): inputs = constant_op.constant( [[-2, -1, 1, 3], [5, 7, 8, 9]], dtype=dtypes.float32) dummy = constant_op.constant( [[3, 1, -1, -2], [9, 8, 7, 6]], dtype=dtypes.float32) elu = gen_nn_ops.elu(inputs) elu_grad = gradients_impl.gradients(elu, inputs, grad_ys=dummy)[0] with self.cached_session(): error = gradient_checker.compute_gradient_error( inputs, inputs.shape, elu_grad, elu_grad.shape) self.assertLess(error, 1e-4) class SeluGradOpTest(test.TestCase): @test_util.run_deprecated_v1 def testSeluGradGradWRTgrad_ys(self): inputs = constant_op.constant( [[-2, -1, 1, 3], [5, 7, 8, 9]], dtype=dtypes.float32) dummy = constant_op.constant( [[3, 1, -1, -2], [9, 8, 7, 6]], dtype=dtypes.float32) selu = gen_nn_ops.selu(inputs) selu_grad = gradients_impl.gradients(selu, inputs, grad_ys=dummy)[0] with self.cached_session(): error = gradient_checker.compute_gradient_error( dummy, dummy.shape, selu_grad, selu_grad.shape) self.assertLess(error, 1e-4) @test_util.run_deprecated_v1 def testSeluGradGradWRTinputs(self): inputs = constant_op.constant( [[-2, -1, 1, 3], [5, 7, 8, 9]], dtype=dtypes.float32) dummy = constant_op.constant( [[3, 1, -1, -2], [9, 8, 7, 6]], dtype=dtypes.float32) selu = gen_nn_ops.selu(inputs) selu_grad = gradients_impl.gradients(selu, inputs, grad_ys=dummy)[0] with self.cached_session(): error = gradient_checker.compute_gradient_error( inputs, inputs.shape, selu_grad, selu_grad.shape) self.assertLess(error, 1e-4) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/ops/nn_grad_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Operations for generating random numbers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_stateful_random_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import variables from tensorflow.python.training.tracking import tracking from tensorflow.python.util.tf_export import tf_export # A seed for random ops (stateful and stateless) will always be 1024 # bits, all of which will be sent to the C++ code. The actual C++ # implementation of some algorithms may only use a lower part of the bits. MAX_INT64 = 2**63 - 1 MIN_INT64 = -(2**63) UINT64_SPAN = 2**64 # 'Variable' doesn't support uint32 or uint64 yet (due to reasons explained in # b/111604096 and cl/171681867), so I use signed int here. I choose int64 # instead of int32 here because `VarHandleOp` doesn't support int32 on GPU. SEED_TYPE = "int64" SEED_MIN = MIN_INT64 SEED_MAX = MAX_INT64 SEED_UINT_SPAN = UINT64_SPAN SEED_TYPE_BITS = 64 SEED_BIT_MASK = 0xFFFFFFFFFFFFFFFF SEED_SIZE = 16 # in units of SEED_TYPE STATE_TYPE = SEED_TYPE ALGORITHM_TYPE = STATE_TYPE RNG_ALG_PHILOX = 1 RNG_ALG_THREEFRY = 2 DEFAULT_ALGORITHM = RNG_ALG_PHILOX PHILOX_STATE_SIZE = 3 THREEFRY_STATE_SIZE = 2 def non_deterministic_ints(shape, dtype=dtypes.int64): """Non-deterministically generates some integers. This op may use some OS-provided source of non-determinism (e.g. an RNG), so each execution will give different results. Args: shape: the shape of the result. dtype: (optional) the dtype of the result. Returns: a tensor whose element values are non-deterministically chosen. """ return gen_stateful_random_ops.non_deterministic_ints( shape=shape, dtype=dtype) def _uint_to_int(n): if n > SEED_MAX: n = n - SEED_UINT_SPAN return n def _make_1d_state(state_size, seed): """Makes a 1-D RNG state. Args: state_size: an integer. seed: an integer or 1-D tensor. Returns: a 1-D tensor of shape [state_size] and dtype STATE_TYPE. """ int_types = (int,) if sys.version_info >= (3, 0) else (int, long) if isinstance(seed, int_types): # chop the Python integer (infinite precision) into chunks of SEED_TYPE ls = [] for _ in range(state_size): ls.append(seed & SEED_BIT_MASK) seed >>= SEED_TYPE_BITS seed = ls # to avoid overflow error from np.asarray seed = list(map(_uint_to_int, seed)) seed = np.asarray(seed, dtype=STATE_TYPE) if len(seed.shape) != 1: raise ValueError( "seed should only have one dimension; got shape: %s" % seed.shape) seed = seed[0:state_size] # Padding with zeros on the *left* if too short. Padding on the right would # cause a small seed to be used as the "counter" while the "key" is always # zero (for counter-based RNG algorithms), because in the current memory # layout counter is stored before key. In such a situation two RNGs with # two different small seeds may generate overlapping outputs. seed_size = seed.shape[0] if seed_size < state_size: seed = np.pad( seed, [(state_size - seed_size, 0)], mode="constant", constant_values=0) assert seed.shape == (state_size,), "Wrong seed.shape: %s" % seed.shape return seed def _get_state_size(alg): if alg == RNG_ALG_PHILOX: return PHILOX_STATE_SIZE elif alg == RNG_ALG_THREEFRY: return THREEFRY_STATE_SIZE else: raise ValueError("Unsupported algorithm id: %s" % alg) def _make_state_from_seed(seed, alg): return _make_1d_state(_get_state_size(alg), seed) @tf_export("random.experimental.create_rng_state") def create_rng_state(seed, algorithm): """Creates a RNG state. Args: seed: an integer or 1-D tensor. algorithm: an integer representing the RNG algorithm. Returns: a 1-D tensor whose size depends on the algorithm. """ return _make_state_from_seed(seed, algorithm) def _shape_tensor(shape): """Convert to an int32 or int64 tensor, defaulting to int64 if empty.""" if isinstance(shape, (tuple, list)) and not shape: dtype = dtypes.int64 else: dtype = None return ops.convert_to_tensor(shape, dtype=dtype, name="shape") def _convert_to_state_tensor(t): if isinstance(t, list): # to avoid out-of-range error from ops.convert_to_tensor t = list(map(_uint_to_int, t)) return ops.convert_to_tensor(t, dtype=STATE_TYPE) @tf_export("random.experimental.Generator") class Generator(tracking.AutoTrackable): """Random-number generator. It uses Variable to manage its internal state, and allows choosing an Random-Number-Generation (RNG) algorithm. CPU, GPU and TPU with the same algorithm and seed will generate the same integer random numbers. Float-point results (such as the output of `normal`) may have small numerical discrepancies between CPU and GPU. """ def __init__(self, copy_from=None, state=None, alg=None): """Creates a generator. The new generator will be initialized by one of the following ways, with decreasing precedence: (1) If `copy_from` is not None, the new generator is initialized by copying information from another generator. (3) If `state` and `alg` are not None (they must be set together), the new generator is initialized by a state. Args: copy_from: a generator to be copied from. state: a vector of dtype STATE_TYPE representing the initial state of the RNG, whose length and semantics are algorithm-specific. alg: the RNG algorithm. Possible values are RNG_ALG_PHILOX for the Philox algorithm and RNG_ALG_THREEFRY for the ThreeFry algorithm (see paper 'Parallel Random Numbers: As Easy as 1, 2, 3' [https://www.thesalmons.org/john/random123/papers/random123sc11.pdf]). """ if copy_from is not None: # All other arguments should be None assert (alg or state) is None self._state_var = variables.Variable(copy_from.state, dtype=STATE_TYPE, trainable=False) self._alg_var = copy_from.algorithm else: assert alg is not None and state is not None state = _convert_to_state_tensor(state) state.shape.assert_is_compatible_with([_get_state_size(alg)]) self._state_var = variables.Variable(state, dtype=STATE_TYPE, trainable=False) self._alg_var = alg @classmethod def from_state(cls, state, alg): """Creates a generator from a state. See `__init__` for description of `state` and `alg`. Args: state: the new state. alg: the RNG algorithm. Returns: The new generator. """ return cls(alg=alg, state=state) @classmethod def from_seed(cls, seed, alg=None): """Creates a generator from a seed. A seed is a 1024-bit unsigned integer represented either as a Python integer or a vector of integers. Seeds shorter than 1024-bit will be padded. The padding, the internal structure of a seed and the way a seed is converted to a state are all opaque (unspecified). The only semantics specification of seeds is that two different seeds are likely to produce two independent generators (but no guarantee). Args: seed: the seed for the RNG. alg: (optional) the RNG algorithm. If None, it will be auto-selected. See `__init__` for its possible values. Returns: The new generator. """ if alg is None: # TODO(wangpeng): more sophisticated algorithm selection alg = DEFAULT_ALGORITHM state = create_rng_state(seed, alg) return cls(state=state, alg=alg) @classmethod def from_non_deterministic_state(cls, alg=None): """Creates a generator by non-deterministically initializing its state. The source of the non-determinism will be platform- and time-dependent. Args: alg: (optional) the RNG algorithm. If None, it will be auto-selected. See `__init__` for its possible values. Returns: The new generator. """ if alg is None: # TODO(wangpeng): more sophisticated algorithm selection alg = DEFAULT_ALGORITHM state = non_deterministic_ints(shape=[_get_state_size(alg)], dtype=SEED_TYPE) return cls(state=state, alg=alg) @classmethod def from_key_counter(cls, key, counter, alg): """Creates a generator from a key and a counter. This constructor only applies if the algorithm is a counter-based algorithm. See method `key` for the meaning of "key" and "counter". Args: key: the key for the RNG, a scalar of type STATE_TYPE. counter: a vector of dtype STATE_TYPE representing the initial counter for the RNG, whose length is algorithm-specific., alg: the RNG algorithm. If None, it will be auto-selected. See `__init__` for its possible values. Returns: The new generator. """ counter = _convert_to_state_tensor(counter) key = _convert_to_state_tensor(key) counter.shape.assert_is_compatible_with([_get_state_size(alg) - 1]) key.shape.assert_is_compatible_with([]) key = array_ops.reshape(key, [1]) state = array_ops.concat([counter, key], 0) return cls(state=state, alg=alg) def reset(self, state): """Resets the generator by a new state. See `__init__` for the meaning of "state". Args: state: the new state. """ state = _convert_to_state_tensor(state) state.shape.assert_is_compatible_with([_get_state_size(self.algorithm)]) self._state_var.assign(state) def reset_from_seed(self, seed): """Resets the generator by a new seed. See `from_seed` for the meaning of "seed". Args: seed: the new seed. """ state = create_rng_state(seed, self.algorithm) self._state_var.assign(state) def reset_from_key_counter(self, key, counter): """Resets the generator by a new key-counter pair. See `from_key_counter` for the meaning of "key" and "counter". Args: key: the new key. counter: the new counter. """ counter = _convert_to_state_tensor(counter) key = _convert_to_state_tensor(key) counter.shape.assert_is_compatible_with( [_get_state_size(self.algorithm) - 1]) key.shape.assert_is_compatible_with([]) key = array_ops.reshape(key, [1]) state = array_ops.concat([counter, key], 0) self._state_var.assign(state) @property def state(self): """The internal state of the RNG.""" return self._state_var @property def algorithm(self): """The RNG algorithm.""" return self._alg_var def _standard_normal(self, shape, dtype): return gen_stateful_random_ops.stateful_standard_normal_v2( self.state.handle, self.algorithm, shape, dtype=dtype) @property def key(self): """The 'key' part of the state of a counter-based RNG. For a counter-base RNG algorithm such as Philox and ThreeFry (as described in paper 'Parallel Random Numbers: As Easy as 1, 2, 3' [https://www.thesalmons.org/john/random123/papers/random123sc11.pdf]), the RNG state consists of two parts: counter and key. The output is generated via the formula: output=hash(key, counter), i.e. a hashing of the counter parametrized by the key. Two RNGs with two different keys can be thought as generating two independent random-number streams (a stream is formed by increasing the counter). Returns: A scalar which is the 'key' part of the state, if the RNG algorithm is counter-based; otherwise it raises a ValueError. """ alg = self.algorithm if alg == RNG_ALG_PHILOX or alg == RNG_ALG_THREEFRY: return self._state_var[-1] else: raise ValueError("Unsupported algorithm id: %s" % alg) def skip(self, delta): """Advance the counter of a counter-based RNG. Args: delta: the amount of advancement. The state of the RNG after `skip(n)` will be the same as that after `normal([n])` (or any other distribution). The actual increment added to the counter is an unspecified implementation detail. """ gen_stateful_random_ops.rng_skip(self.state.handle, self.algorithm, delta) # The following functions return a tensor and as a side effect update # self._state_var. def normal(self, shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, name=None): """Outputs random values from a normal distribution. Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. mean: A 0-D Tensor or Python value of type `dtype`. The mean of the normal distribution. stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation of the normal distribution. dtype: The type of the output. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random normal values. """ with ops.name_scope(name, "stateful_normal", [shape, mean, stddev]) as name: shape = _shape_tensor(shape) mean = ops.convert_to_tensor(mean, dtype=dtype, name="mean") stddev = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev") rnd = self._standard_normal(shape, dtype=dtype) return math_ops.add(rnd * stddev, mean, name=name) def _truncated_normal(self, shape, dtype): return gen_stateful_random_ops.stateful_truncated_normal( self.state.handle, self.algorithm, shape, dtype=dtype) def truncated_normal(self, shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, name=None): """Outputs random values from a truncated normal distribution. The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked. Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. mean: A 0-D Tensor or Python value of type `dtype`. The mean of the truncated normal distribution. stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation of the normal distribution, before truncation. dtype: The type of the output. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random truncated normal values. """ with ops.name_scope( name, "truncated_normal", [shape, mean, stddev]) as name: shape_tensor = _shape_tensor(shape) mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean") stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev") rnd = self._truncated_normal(shape_tensor, dtype=dtype) mul = rnd * stddev_tensor return math_ops.add(mul, mean_tensor, name=name) def _uniform(self, shape, dtype): return gen_stateful_random_ops.stateful_uniform( self.state.handle, self.algorithm, shape=shape, dtype=dtype) def uniform(self, shape, minval=0, maxval=None, dtype=dtypes.float32, name=None): """Outputs random values from a uniform distribution. The generated values follow a uniform distribution in the range `[minval, maxval)`. The lower bound `minval` is included in the range, while the upper bound `maxval` is excluded. (For float numbers especially low-precision types like bfloat16, because of rounding, the result may sometimes include `maxval`.) For floats, the default range is `[0, 1)`. For ints, at least `maxval` must be specified explicitly. In the integer case, the random integers are slightly biased unless `maxval - minval` is an exact power of two. The bias is small for values of `maxval - minval` significantly smaller than the range of the output (either `2**32` or `2**64`). Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. minval: A 0-D Tensor or Python value of type `dtype`. The lower bound on the range of random values to generate. Defaults to 0. maxval: A 0-D Tensor or Python value of type `dtype`. The upper bound on the range of random values to generate. Defaults to 1 if `dtype` is floating point. dtype: The type of the output. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random uniform values. Raises: ValueError: If `dtype` is integral and `maxval` is not specified. """ dtype = dtypes.as_dtype(dtype) if maxval is None: if dtype.is_integer: raise ValueError("Must specify maxval for integer dtype %r" % dtype) maxval = 1 with ops.name_scope(name, "stateful_uniform", [shape, minval, maxval]) as name: shape = _shape_tensor(shape) minval = ops.convert_to_tensor(minval, dtype=dtype, name="min") maxval = ops.convert_to_tensor(maxval, dtype=dtype, name="max") if dtype.is_integer: return gen_stateful_random_ops.stateful_uniform_int( self.state.handle, self.algorithm, shape=shape, minval=minval, maxval=maxval, name=name) else: rnd = self._uniform(shape=shape, dtype=dtype) return math_ops.add(rnd * (maxval - minval), minval, name=name) def uniform_full_int(self, shape, dtype=dtypes.uint64, name=None): """Uniform distribution on an integer type's entire range. The other method `uniform` only covers the range [minval, maxval), which cannot be `dtype`'s full range because `maxval` is of type `dtype`. Args: shape: the shape of the output. dtype: (optional) the integer type, default to uint64. name: (optional) the name of the node. Returns: A tensor of random numbers of the required shape. """ dtype = dtypes.as_dtype(dtype) with ops.name_scope(name, "stateful_uniform_full_int", [shape]) as name: shape = _shape_tensor(shape) return gen_stateful_random_ops.stateful_uniform_full_int( self.state.handle, self.algorithm, shape=shape, dtype=dtype, name=name) def binomial(self, shape, counts, probs, dtype=dtypes.int32, name=None): """Outputs random values from a binomial distribution. The generated values follow a binomial distribution with specified count and probability of success parameters. Example: ```python counts = [10., 20.] # Probability of success. probs = [0.8, 0.9] rng = tf.random.experimental.Generator(seed=234) binomial_samples = rng.binomial(shape=[2], counts=counts, probs=probs) ``` Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. counts: A 0/1-D Tensor or Python value`. The counts of the binomial distribution. probs: A 0/1-D Tensor or Python value`. The probability of success for the binomial distribution. dtype: The type of the output. Default: tf.int32 name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random binomial values. """ dtype = dtypes.as_dtype(dtype) with ops.name_scope(name, "binomial", [shape, counts, probs]) as name: counts = ops.convert_to_tensor(counts, name="counts") probs = ops.convert_to_tensor(probs, name="probs") shape_tensor = _shape_tensor(shape) return gen_stateful_random_ops.stateful_random_binomial( self.state.handle, self.algorithm, shape=shape_tensor, counts=counts, probs=probs, dtype=dtype, name=name) # TODO(wangpeng): implement other distributions def _make_int64_keys(self, shape=()): # New independent keys are generated via # `new_key[i] = hash(old_key, counter+i)`, which is exactly what # `uniform_full_int(dtype=int64)` does for PhiloxRandom_64_128_128 and # ThreeFry_64_64_64. return self.uniform_full_int(shape=shape, dtype=dtypes.int64) def make_seeds(self, count=1): """Generates seeds for stateless random ops. For example: ```python seeds = get_global_generator().make_seeds(count=10) for i in range(10): seed = seeds[:, i] numbers = stateless_random_normal(shape=[2, 3], seed=seed) ... ``` Args: count: the number of seed pairs (note that stateless random ops need a pair of seeds to invoke). Returns: A tensor of shape [2, count] and dtype int64. """ alg = self.algorithm if alg == RNG_ALG_PHILOX or alg == RNG_ALG_THREEFRY: keys = self._make_int64_keys(shape=[count]) # The two seeds for stateless random ops don't have individual semantics # and are scrambled together, so setting one to zero is fine. zeros = array_ops.zeros_like(keys) return array_ops.stack([keys, zeros]) else: raise ValueError("Unsupported algorithm id: %s" % alg) def split(self, count=1): """Returns a list of independent `Generator` objects. Two generators are independent of each other in the sense that the random-number streams they generate don't have statistically detectable correlations. The new generators are also independent of the old one. The old generator's state will be changed (like other random-number generating methods), so two calls of `split` will return different new generators. For example: ```python gens = get_global_generator().split(count=10) for gen in gens: numbers = gen.normal(shape=[2, 3]) # ... gens2 = get_global_generator().split(count=10) # gens2 will be different from gens ``` The new generators will be put on the current device (possible different from the old generator's), for example: ```python with tf.device("/device:CPU:0"): gen = Generator(seed=1234) # gen is on CPU with tf.device("/device:GPU:0"): gens = gen.split(count=10) # gens are on GPU ``` Args: count: the number of generators to return. Returns: A list (length `count`) of `Generator` objects independent of each other. The new generators have the same RNG algorithm as the old one. """ def _key_to_state(alg, key): # Padding with zeros on the left. The zeros will be the counter. return [0] * (_get_state_size(alg) - 1) + [key] alg = self.algorithm if alg == RNG_ALG_PHILOX or alg == RNG_ALG_THREEFRY: keys = self._make_int64_keys(shape=[count]) return [Generator(state=_key_to_state(alg, key), alg=alg) for key in keys.numpy()] else: raise ValueError("Unsupported algorithm id: %s" % alg) # It's not safe to create TF ops before `init_google` is called, so this is # initialized to None and get a value the first time `get_global_generator` is # called. global_generator = None @tf_export("random.experimental.get_global_generator") def get_global_generator(): global global_generator if global_generator is None: global_generator = Generator.from_non_deterministic_state() return global_generator @tf_export("random.experimental.set_global_generator") def set_global_generator(generator): """Replaces the global generator with another `Generator` object. This function creates a new Generator object (and the Variable object within), which does not work well with tf.function because (1) tf.function puts restrictions on Variable creation thus reset_global_generator can't be freely used inside tf.function; (2) redirecting a global variable to a new object is problematic with tf.function because the old object may be captured by a 'tf.function'ed function and still be used by it. A 'tf.function'ed function only keeps weak references to variables, so deleting a variable and then calling that function again may raise an error, as demonstrated by random_test.py/RandomTest.testResetGlobalGeneratorBadWithDefun . Args: generator: the new `Generator` object. """ global global_generator global_generator = generator
tensorflow-master
tensorflow/python/ops/stateful_random_ops.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Implements the graph generation for computation of gradients.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import contextlib from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.core.framework import attr_value_pb2 from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import function as framework_function from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework.func_graph import FuncGraph from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import control_flow_state from tensorflow.python.ops import control_flow_util from tensorflow.python.ops import default_gradient from tensorflow.python.ops import functional_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import compat from tensorflow.python.util.tf_export import tf_export def _MarkReachedOps(from_ops, reached_ops, func_graphs): """Mark all ops reached from "from_ops". Args: from_ops: list of Operations. reached_ops: set of Operations. func_graphs: list of FuncGraphs. This method will traverse through these functions if they capture from_ops or any reachable ops. """ queue = collections.deque() queue.extend(from_ops) while queue: op = queue.popleft() if op not in reached_ops: reached_ops.add(op) for output in op.outputs: if _IsBackpropagatable(output): queue.extend(_Consumers(output, func_graphs)) def _PendingCount(to_ops, from_ops, colocate_gradients_with_ops, func_graphs, xs): """Initialize the pending count for ops between two lists of Operations. 'pending_count[op]' indicates the number of backprop inputs to this operation. Args: to_ops: list of Operations. from_ops: list of Operations. colocate_gradients_with_ops: Python bool. See docstring of gradients(). func_graphs: list of FuncGraphs. This method will traverse through these functions if they capture from_ops or any reachable ops. This is useful if to_ops occur in a function and from_ops are in an outer function or graph. xs: list of Tensors. Returns: A tuple containing: (1) the subset of to_ops reachable from from_ops by a path of zero or more backpropagatable tensors, (2) a mapping from operation to the number of backprop inputs to that op, and (3) a ControlFlowState object which is not None if the ops between from_ops and to_ops contain control flow loops. """ # Mark reachable ops from from_ops. reached_ops = set() _MarkReachedOps(from_ops, reached_ops, func_graphs) # X in reached_ops iff X is reachable from from_ops by a path of zero or more # backpropagatable tensors. reachable_to_ops = set(op for op in to_ops if op in reached_ops) # Mark between ops. between_ops = set() between_op_list = [] queue = collections.deque() queue.extend(to_ops) while queue: op = queue.popleft() # We are interested in this op. if op in reached_ops: between_ops.add(op) between_op_list.append(op) # Clear the boolean so we won't add the inputs again. reached_ops.remove(op) for inp in _NonEagerInputs(op, xs): queue.append(inp.op) # X in between_ops iff X is on a path of zero or more backpropagatable tensors # between from_ops and to_ops # 'loop_state' is None if there are no while loops. loop_state = control_flow_state.MaybeCreateControlFlowState( between_op_list, between_ops, colocate_gradients_with_ops) # Initialize pending count for between ops. pending_count = collections.defaultdict(int) for op in between_op_list: for x in _NonEagerInputs(op, xs): if x.op in between_ops: pending_count[x.op] += 1 return reachable_to_ops, pending_count, loop_state def _AsList(x): return x if isinstance(x, (list, tuple)) else [x] def _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops, gradient_uid="__unsupported__"): """Fill in default values for grad_ys. Args: grad_ys: List of gradients, can contain None. ys: List of tensors. colocate_gradients_with_ops: If True, try colocating gradients with the corresponding op. gradient_uid: A unique identifier within the graph indicating which invocation of gradients is being executed. Used to cluster ops for compilation. Returns: A list of gradients to use, without None. Raises: ValueError: If sizes of gradients and inputs don't match TypeError: If type of any gradient is not valid for its input. """ if len(grad_ys) != len(ys): raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys))) grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y") new_grad_ys = [] for i in xrange(len(grad_ys)): grad_y = grad_ys[i] y = ys[i] with _maybe_colocate_with(y.op, gradient_uid, colocate_gradients_with_ops): if grad_y is None: if y.dtype.is_complex: raise TypeError( "Gradients of complex tensors must set grad_ys (y.dtype = %r)" % y.dtype) new_grad_ys.append( array_ops.fill( array_ops.shape(y), constant_op.constant(1, dtype=y.dtype, name="grad_ys_%d" % i))) continue if y.dtype.is_floating or y.dtype.is_integer: if not grad_y.dtype.is_floating and not grad_y.dtype.is_integer: raise TypeError( "Gradient type %s generated for real or " "integer-valued tensor %s with type %s must be " "real or integer" % (dtypes.as_dtype(grad_y.dtype).name, y, dtypes.as_dtype(y.dtype).name)) elif y.dtype.is_complex: if not grad_y.dtype.is_complex: raise TypeError( "Gradient type %s generated for complex-valued " "tensor %s with type %s must be real" % (dtypes.as_dtype( grad_y.dtype).name, y, dtypes.as_dtype(y.dtype).name)) elif y.dtype == dtypes.variant: if grad_y.dtype != dtypes.variant: raise TypeError( "Gradient type %s generated for variant " "tensor %s with type %s must be variant" % (dtypes.as_dtype( grad_y.dtype).name, y, dtypes.as_dtype(y.dtype).name)) elif y.dtype == dtypes.resource: # We assume y is the handle of a ResourceVariable. The gradient of a # ResourceVariable should be a numeric value, not another resource. if grad_y.dtype == dtypes.resource: raise TypeError("Input gradient %s for resource tensor %s should not " "be a resource" % (grad_y, y)) else: raise TypeError( "Tensor %s with type %s must be numeric " "to obtain a default gradient" % (y, dtypes.as_dtype(y.dtype).name)) # Create a grad_y tensor in the name scope of the gradient. # Required for TensorArrays to identify which gradient call a # grad_y value is coming from. if isinstance(grad_y, ops.IndexedSlices): new_grad_ys.append( ops.IndexedSlices( indices=(array_ops.identity( grad_y.indices, name="grad_ys_%d_indices" % i) if isinstance(grad_y.indices, ops.Tensor) else grad_y.indices), values=(array_ops.identity( grad_y.values, name="grad_ys_%d_values" % i) if isinstance( grad_y.values, ops.Tensor) else grad_y.values), dense_shape=(array_ops.identity( grad_y.dense_shape, name="grad_ys_%d_shape" % i) if isinstance(grad_y.dense_shape, ops.Tensor) else grad_y.dense_shape))) else: new_grad_ys.append(array_ops.identity(grad_y, name="grad_ys_%d" % i)) return new_grad_ys def IsTrainable(tensor_or_dtype): if isinstance(tensor_or_dtype, ops.Tensor): dtype = tensor_or_dtype.dtype else: dtype = tensor_or_dtype dtype = dtypes.as_dtype(dtype) return dtype.base_dtype in (dtypes.float16, dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128, dtypes.resource, dtypes.variant) def _IsBackpropagatable(tensor): if IsTrainable(tensor): return True dtype = dtypes.as_dtype(tensor.dtype) return dtype.base_dtype == dtypes.bfloat16 def _VerifyGeneratedGradients(grads, op): """Verify that gradients are valid in number and type. Args: grads: List of generated gradients. op: Operation for which the gradients where generated. Raises: ValueError: if sizes of gradients and inputs don't match. TypeError: if type of any gradient is not valid for its input. """ # While ops have inputs added to them during the gradient computation, so we # skip the below check. See while_v2 for details. if op.type == "While": return if len(grads) != len(op.inputs): raise ValueError("Num gradients %d generated for op %s do not match num " "inputs %d" % (len(grads), op.node_def, len(op.inputs))) def _StopOps(from_ops, stop_gradient_ops, pending_count, xs): """The set of ops that terminate the gradient computation. This computes the frontier of the forward graph *before* which backprop should stop. Operations in the returned set will not be differentiated. This set is defined as the subset of `from_ops` containing ops that have no predecessor in `from_ops`. `pending_count` is the result of `_PendingCount(xs, from_ops)`. An 'op' has predecessors in `from_ops` iff pending_count[op] > 0. In addition, none of `stop_gradient_ops` will be differentiated. Args: from_ops: list of Operations. stop_gradient_ops: list of Operations never to backprop through. pending_count: mapping from operation to number of backprop inputs. xs: list of Tensors. Returns: The set of operations. """ stop_ops = set() for op in from_ops: is_stop_op = True for inp in _NonEagerInputs(op, xs): if pending_count[inp.op] > 0: is_stop_op = False break if is_stop_op: stop_ops.add(op) stop_ops.update(op for op in stop_gradient_ops) return stop_ops @contextlib.contextmanager def _maybe_colocate_with(op, gradient_uid, colocate_gradients_with_ops): # pylint: disable=invalid-name """Context to colocate with `op` if `colocate_gradients_with_ops`.""" if colocate_gradients_with_ops: with ops._colocate_with_for_gradient(op, gradient_uid): # pylint: disable=protected-access yield else: yield def _IsPartitionedCall(op): return op.type == "PartitionedCall" or op.type == "StatefulPartitionedCall" def _SymGrad(op, out_grads): """Backprop through a function call node op given its outputs' gradients.""" f_in = [x for x in op.inputs] + out_grads f_types = [x.dtype for x in op.inputs] f = attr_value_pb2.NameAttrList() if _IsPartitionedCall(op): f.name = op.get_attr("f").name else: f.name = op.type for k in op.node_def.attr: f.attr[k].CopyFrom(op.node_def.attr[k]) # TODO(apassos) use a better dtype here in_grads = functional_ops.symbolic_gradient( input=f_in, Tout=[x if x != dtypes.resource else dtypes.float32 for x in f_types], f=f) return in_grads def _MaybeCompile(scope, op, func, grad_fn): """Compile the calculation in grad_fn if op was marked as compiled.""" scope = scope.rstrip("/").replace("/", "_") if func is not None: xla_compile = func.definition.attr["_XlaCompile"].b xla_separate_compiled_gradients = func.definition.attr[ "_XlaSeparateCompiledGradients"].b xla_scope = func.definition.attr["_XlaScope"].s.decode() else: try: xla_compile = op.get_attr("_XlaCompile") xla_separate_compiled_gradients = op.get_attr( "_XlaSeparateCompiledGradients") xla_scope = op.get_attr("_XlaScope").decode() except ValueError: return grad_fn() # Exit early if not xla_compile: return grad_fn() # Exit early # If the gradients are supposed to be compiled separately, we give them a # _XlaScope name that is based on the name_scope of the gradients. Otherwise # they just inherit the existing _XlaScope name, which lets them be merged # together with the non-gradient computation. if xla_separate_compiled_gradients: xla_grad_scope = "%s_grad_%s" % (xla_scope, scope) else: xla_grad_scope = xla_scope attrs = { "_XlaCompile": attr_value_pb2.AttrValue(b=xla_compile), "_XlaScope": attr_value_pb2.AttrValue(s=xla_grad_scope.encode()) } with ops.get_default_graph()._attr_scope(attrs): # pylint: disable=protected-access return grad_fn() def _RaiseNoGradWrtInitialLoopValError(op, from_ops, xs): """Raises an error if we backprop through a loop var.""" # Find the nearest 'to_op' reachable from 'op' to provide a more helpful error # message. target_op = None queue = collections.deque([op]) visited = set() while queue: curr_op = queue.popleft() if curr_op in visited: continue visited.add(curr_op) if curr_op in from_ops: target_op = curr_op break queue.extend(t.op for t in _NonEagerInputs(curr_op, xs)) assert target_op raise ValueError( "Cannot compute gradient inside while loop with respect to op '%s'. " "We do not support taking the gradient wrt or through the initial value " "of a loop variable. Gradients can be computed through loop invariants " "or wrt the input parameters to the loop body." % target_op.name) def _IsFunction(graph): return (isinstance(graph, FuncGraph) or isinstance(graph, framework_function._FuncGraph)) # pylint: disable=protected-access def _Captures(func_graph): if isinstance(func_graph, FuncGraph): return func_graph.captures else: assert isinstance(func_graph, framework_function._FuncGraph) # pylint: disable=protected-access return func_graph._captured # pylint: disable=protected-access def _MaybeCaptured(t): """If t is a captured value placeholder, returns the original captured value. Args: t: Tensor Returns: A tensor, potentially from a different Graph/FuncGraph. """ # pylint: disable=protected-access if (not isinstance(t, ops.EagerTensor) and _IsFunction(t.op.graph) and t.op.type == "Placeholder"): for input_t, placeholder_t in _Captures(t.op.graph).items(): if t == placeholder_t: return _MaybeCaptured(input_t) # pylint: enable=protected-access return t def _NonEagerInputs(op, xs): """Returns the inputs of op, crossing closure boundaries where necessary. Does not return any captured EagerTensors, i.e., the number of tensors returned may be less than than the actual number of inputs. Args: op: Operation xs: list of Tensors we are differentiating w.r.t. Returns: A list of tensors. The tensors may be from multiple Graph/FuncGraphs if op is in a FuncGraph and has captured inputs. """ return [t for t in _Inputs(op, xs) if not isinstance(t, ops.EagerTensor)] # TODO(skyewm): plumbing xs through everywhere is ugly, consider making # _GradientsHelper a class with xs as a member variable. def _Inputs(op, xs): """Returns the inputs of op, crossing closure boundaries where necessary. Args: op: Operation xs: list of Tensors we are differentiating w.r.t. Returns: A list of tensors. The tensors may be from multiple Graph/FuncGraphs if op is in a FuncGraph and has captured inputs. """ if _IsFunction(op.graph): # pylint: disable=protected-access inputs = [] for t in op.inputs: # If we're differentiating w.r.t. `t`, do not attempt to traverse through # it to a captured value. The algorithm needs to "see" `t` in this case, # even if it's a function input for a captured value, whereas usually we'd # like to traverse through these closures as if the captured value was the # direct input to op. if t not in xs: t = _MaybeCaptured(t) inputs.append(t) return inputs else: return op.inputs def _Consumers(t, func_graphs): """Returns the consumers of t, crossing closure boundaries where necessary. Args: t: Tensor func_graphs: a list of FuncGraphs that may have captured t. Returns: A list of tensors. The tensors will be from the current graph and/or func_graphs. """ consumers = t.consumers() for func in func_graphs: for input_t, placeholder in _Captures(func).items(): if input_t == t: consumers.extend(_Consumers(placeholder, func_graphs)) return consumers def _GradientsHelper(ys, xs, grad_ys=None, name="gradients", colocate_gradients_with_ops=False, gate_gradients=False, aggregation_method=None, stop_gradients=None, unconnected_gradients=UnconnectedGradients.NONE, src_graph=None): """Implementation of gradients().""" if context.executing_eagerly(): raise RuntimeError("tf.gradients is not supported when eager execution " "is enabled. Use tf.GradientTape instead.") if src_graph is None: src_graph = ops.get_default_graph() try: unconnected_gradients = UnconnectedGradients(unconnected_gradients) except ValueError: raise ValueError( "Unknown value for unconnected_gradients: %r" % unconnected_gradients) # If src_graph is a _FuncGraph (i.e. a function body), gather it and all # ancestor graphs. This is necessary for correctly handling captured values. func_graphs = [] curr_graph = src_graph while _IsFunction(curr_graph): func_graphs.append(curr_graph) if isinstance(curr_graph, FuncGraph): curr_graph = curr_graph.outer_graph else: assert isinstance(curr_graph, framework_function._FuncGraph) # pylint: disable=protected-access curr_graph = curr_graph._outer_graph # pylint: disable=protected-access ys = _AsList(ys) xs = _AsList(xs) stop_gradients = [] if stop_gradients is None else _AsList(stop_gradients) if grad_ys is None: grad_ys = [None] * len(ys) else: grad_ys = _AsList(grad_ys) with ops.name_scope( name, "gradients", list(ys) + list(xs) + list(stop_gradients) + list(grad_ys)) as grad_scope: # Get a uid for this call to gradients that can be used to help # cluster ops for compilation. gradient_uid = ops.get_default_graph().unique_name("uid") ys = ops.convert_n_to_tensor_or_indexed_slices(ys, name="y") xs = [ x.handle if resource_variable_ops.is_resource_variable(x) else x for x in xs ] xs = ops.internal_convert_n_to_tensor_or_indexed_slices( xs, name="x", as_ref=True) grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops, gradient_uid) # The approach we take here is as follows: Create a list of all ops in the # subgraph between the ys and xs. Visit these ops in reverse order of ids # to ensure that when we visit an op the gradients w.r.t its outputs have # been collected. Then aggregate these gradients if needed, call the op's # gradient function, and add the generated gradients to the gradients for # its input. # Initialize the pending count for ops in the connected subgraph from ys # to the xs. to_ops = [t.op for t in ys] from_ops = [t.op for t in xs] stop_gradient_ops = [t.op for t in stop_gradients] reachable_to_ops, pending_count, loop_state = _PendingCount( to_ops, from_ops, colocate_gradients_with_ops, func_graphs, xs) # Iterate over the collected ops. # # grads: op => list of gradients received on each output endpoint of the # op. The gradients for each endpoint are initially collected as a list. # When it is time to call the op's gradient function, for each endpoint we # aggregate the list of received gradients into a Add() Operation if there # is more than one. grads = {} # Add the initial gradients for the ys. for y, grad_y in zip(ys, grad_ys): _SetGrad(grads, y, grad_y) # Initialize queue with to_ops. queue = collections.deque() # Add the ops in 'to_ops' into the queue. to_ops_set = set() for op in to_ops: # 'ready' handles the case where one output gradient relies on # another output's gradient. ready = (pending_count[op] == 0) if ready and op not in to_ops_set and op in reachable_to_ops: to_ops_set.add(op) queue.append(op) if loop_state: loop_exits = loop_state.ProcessUnusedLoopExits(pending_count, to_ops_set) for y in loop_exits: if IsTrainable(y): _SetGrad(grads, y, loop_state.ZerosLikeForExit(y)) queue.append(y.op) stop_ops = _StopOps(from_ops, stop_gradient_ops, pending_count, xs) while queue: # generate gradient subgraph for op. op = queue.popleft() with _maybe_colocate_with(op, gradient_uid, colocate_gradients_with_ops): if loop_state: loop_state.EnterGradWhileContext(op, before=True) out_grads = _AggregatedGrads(grads, op, gradient_uid, loop_state, aggregation_method) if loop_state: loop_state.ExitGradWhileContext(op, before=True) grad_fn = None func_call = None is_partitioned_call = _IsPartitionedCall(op) # pylint: disable=protected-access is_func_call = ( src_graph._is_function(op.type) or is_partitioned_call) # pylint: enable=protected-access has_out_grads = any(isinstance(g, ops.Tensor) or g for g in out_grads) if has_out_grads and (op not in stop_ops): try: grad_fn = ops.get_gradient_function(op) except LookupError: if is_func_call: if is_partitioned_call: func_call = src_graph._get_function( # pylint: disable=protected-access compat.as_bytes(op.get_attr("f").name)) else: func_call = src_graph._get_function(op.type) # pylint: disable=protected-access # Note that __defun is not set if the graph is # imported. If it's set, we prefer to access the original # defun. func_call = getattr(op, "__defun", func_call) grad_fn = func_call.python_grad_func else: raise LookupError( "No gradient defined for operation '%s' (op type: %s)" % (op.name, op.type)) if loop_state: loop_state.EnterGradWhileContext(op, before=False) # NOTE(skyewm): We don't support computing gradients wrt a loop variable # unless it's within the context of a single iteration (i.e. the # gradient is wrt to the loop parameter in the body function, not wrt or # through the initial value). This means if we're in a while loop # context, we should never see a switch node from this context. # pylint: disable=protected-access if (control_flow_util.IsSwitch(op) and op._control_flow_context is not None and op._control_flow_context.IsWhileContext() and op._control_flow_context == ops.get_default_graph()._get_control_flow_context()): _RaiseNoGradWrtInitialLoopValError(op, from_ops, xs) # pylint: enable=protected-access if (grad_fn or is_func_call) and has_out_grads: # NOTE: If _AggregatedGrads didn't compute a value for the i'th # output, it means that the cost does not depend on output[i], # therefore dC/doutput[i] is 0. for i, out_grad in enumerate(out_grads): if (not isinstance(out_grad, ops.Tensor) and not out_grad) and ( (not grad_fn and is_func_call) or IsTrainable(op.outputs[i])): # Only trainable outputs or outputs for a function call that # will use SymbolicGradient get a zero gradient. Gradient # functions should ignore the gradient for other outputs. # TODO(apassos) gradients of resource handles might be an # issue here because of zeros. if loop_state: out_grads[i] = loop_state.ZerosLike(op, i) else: out_grads[i] = control_flow_state.ZerosLikeOutsideLoop(op, i) with ops.name_scope(op.name + "_grad"): # pylint: disable=protected-access with src_graph._original_op(op): # pylint: enable=protected-access if grad_fn: # If grad_fn was found, do not use SymbolicGradient even for # functions. in_grads = _MaybeCompile(grad_scope, op, func_call, lambda: grad_fn(op, *out_grads)) else: # For function call ops, we add a 'SymbolicGradient' # node to the graph to compute gradients. in_grads = _MaybeCompile(grad_scope, op, func_call, lambda: _SymGrad(op, out_grads)) in_grads = _AsList(in_grads) _VerifyGeneratedGradients(in_grads, op) if gate_gradients and len([x for x in in_grads if x is not None]) > 1: with ops.device(None): with ops._colocate_with_for_gradient( # pylint: disable=protected-access None, gradient_uid, ignore_existing=True): in_grads = control_flow_ops.tuple(in_grads) _LogOpGradients(op, out_grads, in_grads) else: # If no grad_fn is defined or none of out_grads is available, # just propagate a list of None backwards. in_grads = [None] * len(_Inputs(op, xs)) # Note: we don't filter out eager inputs here because the inputs need to # line up with in_grads. for i, (t_in, in_grad) in enumerate(zip(_Inputs(op, xs), in_grads)): if in_grad is not None: if (isinstance(in_grad, ops.Tensor) and t_in.dtype != dtypes.resource): try: in_grad.set_shape(t_in.get_shape()) except ValueError: raise ValueError( "Incompatible shapes between op input and calculated " "input gradient. Forward operation: %s. Input index: %d. " "Original input shape: %s. " "Calculated input gradient shape: %s" % (op.name, i, t_in.shape, in_grad.shape)) if not isinstance(t_in, ops.EagerTensor): _SetGrad(grads, t_in, in_grad) if loop_state: loop_state.ExitGradWhileContext(op, before=False) # Update pending count for the inputs of op and enqueue ready ops. _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state, xs) if loop_state: loop_state.PostProcessing() return [_GetGrad(grads, x, unconnected_gradients) for x in xs] def _HasAnyNotNoneGrads(grads, op): """Return true iff op has real gradient.""" out_grads = _GetGrads(grads, op) for out_grad in out_grads: if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)): return True if out_grad and isinstance(out_grad, collections.Sequence): if any(g is not None for g in out_grad): return True return False def _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state, xs): """Update pending count for the inputs of op and enqueue ready ops.""" for x in _NonEagerInputs(op, xs): pending_count[x.op] -= 1 ready = (pending_count[x.op] == 0) if loop_state and not ready: ready = pending_count[x.op] > 0 and control_flow_util.IsLoopSwitch(x.op) if ready: if control_flow_util.IsLoopExit(x.op): # if x is an exit without real gradient, defer processing them. grad_state = loop_state.GetGradState(x.op, before=False) grad_state.deferred_exits.append(x) grad_state.pending_exits_count -= 1 if grad_state.pending_exits_count == 0: # We now have all the exits so process them. has_not_none_grad = False for y in grad_state.deferred_exits: if _HasAnyNotNoneGrads(grads, y.op): has_not_none_grad = True queue.append(y.op) else: grad_state.unused_exits.append(y) if has_not_none_grad: # For an unused exit, if it has trainable outputs, backprop # a zero gradient. Otherwise, just ignore it. for y in grad_state.unused_exits: if IsTrainable(y): _SetGrad(grads, y, loop_state.ZerosLikeForExit(y)) queue.append(y.op) else: # All exits are "unused" so use None as gradient. for y in grad_state.unused_exits: queue.append(y.op) else: queue.append(x.op) def _SetGrad(grads, t, grad): """Sets gradient "grad" in "grads" for tensor "t".""" op = t.op op_grads = grads.get(op) if not op_grads: op_grads = [[] for _ in xrange(len(op.outputs))] grads[op] = op_grads t_grads = op_grads[t.value_index] if isinstance(t_grads, list): t_grads.append(grad) else: assert control_flow_util.IsLoopSwitch(op) op_grads[t.value_index] = grad def _GetGrad(grads, t, unconnected_gradients): """Gets gradient for tensor "t".""" op = t.op op_grads = grads.get(op) if not op_grads: if unconnected_gradients == UnconnectedGradients.ZERO: t_dtype = default_gradient.get_zeros_dtype(t) return array_ops.zeros_like(t, dtype=t_dtype) elif unconnected_gradients == UnconnectedGradients.NONE: return None else: raise ValueError( "Unknown value for unconnected_gradients: %r" % unconnected_gradients) t_grad = op_grads[t.value_index] assert not isinstance( t_grad, list), ("gradients list should have been aggregated by now.") return t_grad def _GetGrads(grads, op): """Gets all gradients for op.""" if op in grads: return grads[op] else: return [[] for _ in xrange(len(op.outputs))] def _AccumulatorShape(inputs): shape = tensor_shape.unknown_shape() for i in inputs: if isinstance(i, ops.Tensor): shape = shape.merge_with(i.get_shape()) return shape def _LogOpGradients(op, out_grads, in_grads): """Log the in and out grads of an op.""" logging.vlog(1, "Gradient for '" + op.name + "'") def _FilterGrad(x): if x is None: return False if isinstance(x, (list, tuple)): return bool(x) else: return True logging.vlog(1, " in --> %s", ", ".join([x.name for x in out_grads if _FilterGrad(x)])) logging.vlog(1, " out --> %s", ", ".join([x.name for x in in_grads if _FilterGrad(x)])) def _MultiDeviceAddN(tensor_list, gradient_uid): """Adds tensors from potentially multiple devices.""" # Basic function structure comes from control_flow_ops.group(). # Sort tensors according to their devices. tensors_on_device = collections.defaultdict(lambda: []) for tensor in tensor_list: tensors_on_device[tensor.device].append(tensor) # For each device, add the tensors on that device first. # Then gather the partial sums from multiple devices. # TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion. # E.g., aggregate per GPU, then per task, and so on. summands = [] def DeviceKey(dev): return "" if dev is None else dev for dev in sorted(tensors_on_device, key=DeviceKey): tensors = tensors_on_device[dev] with ops._colocate_with_for_gradient( # pylint: disable=protected-access tensors[0].op, gradient_uid, ignore_existing=True): summands.append(math_ops.add_n(tensors)) return math_ops.add_n(summands) @tf_export("AggregationMethod") class AggregationMethod(object): """A class listing aggregation methods used to combine gradients. Computing partial derivatives can require aggregating gradient contributions. This class lists the various methods that can be used to combine gradients in the graph. The following aggregation methods are part of the stable API for aggregating gradients: * `ADD_N`: All of the gradient terms are summed as part of one operation using the "AddN" op (see `tf.add_n`). This method has the property that all gradients must be ready and buffered separately in memory before any aggregation is performed. * `DEFAULT`: The system-chosen default aggregation method. The following aggregation methods are experimental and may not be supported in future releases: * `EXPERIMENTAL_TREE`: Gradient terms are summed in pairs using using the "AddN" op. This method of summing gradients may reduce performance, but it can improve memory utilization because the gradients can be released earlier. * `EXPERIMENTAL_ACCUMULATE_N`: Gradient terms are summed using the "AccumulateN" op (see `tf.accumulate_n`), which accumulates the overall sum in a single buffer that is shared across threads. This method of summing gradients can result in a lower memory footprint and lower latency at the expense of higher CPU/GPU utilization. For gradients of types that "AccumulateN" does not support, this summation method falls back on the behavior of `EXPERIMENTAL_TREE` """ ADD_N = 0 DEFAULT = ADD_N # The following are experimental and may not be supported in future releases. EXPERIMENTAL_TREE = 1 EXPERIMENTAL_ACCUMULATE_N = 2 def _AggregatedGrads(grads, op, gradient_uid, loop_state, aggregation_method=None): """Get the aggregated gradients for op. Args: grads: The map of memoized gradients. op: The op to get gradients for. gradient_uid: A unique identifier within the graph indicating which invocation of gradients is being executed. Used to cluster ops for compilation. loop_state: An object for maintaining the state of the while loops in the graph. It is of type ControlFlowState. None if the graph contains no while loops. aggregation_method: Specifies the method used to combine gradient terms. Accepted values are constants defined in the class `AggregationMethod`. Returns: A list of gradients, one per each output of `op`. If the gradients for a particular output is a list, this function aggregates it before returning. Raises: TypeError: if the incoming grads are not Tensors or IndexedSlices. ValueError: if the arguments are invalid. """ if aggregation_method is None: aggregation_method = AggregationMethod.DEFAULT if aggregation_method not in [ AggregationMethod.ADD_N, AggregationMethod.EXPERIMENTAL_TREE, AggregationMethod.EXPERIMENTAL_ACCUMULATE_N ]: raise ValueError( "Invalid aggregation_method specified %s." % aggregation_method) out_grads = _GetGrads(grads, op) for i, out_grad in enumerate(out_grads): if loop_state: if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)): assert control_flow_util.IsLoopSwitch(op) continue # Grads have to be Tensors or IndexedSlices if (isinstance(out_grad, collections.Sequence) and not all( isinstance(g, (ops.Tensor, ops.IndexedSlices)) for g in out_grad if g is not None )): raise TypeError("gradients have to be either all Tensors " "or all IndexedSlices") # Aggregate multiple gradients, and convert [] to None. if out_grad: if len(out_grad) < 2: used = "nop" out_grads[i] = out_grad[0] elif all(isinstance(g, ops.Tensor) for g in out_grad if g is not None): tensor_shape = _AccumulatorShape(out_grad) if (aggregation_method == AggregationMethod.EXPERIMENTAL_ACCUMULATE_N and len(out_grad) > 2 and tensor_shape.is_fully_defined()): # The benefit of using AccumulateN is that its inputs can be combined # in any order and this can allow the expression to be evaluated with # a smaller memory footprint. When used with gpu_allocator_retry, # it is possible to compute a sum of terms which are much larger than # total GPU memory. # AccumulateN can currently only be used if we know the shape for # an accumulator variable. If this is not known, or if we only have # 2 grads then we fall through to the "tree" case below. used = "accumulate_n" out_grads[i] = math_ops.accumulate_n(out_grad) elif aggregation_method in [ AggregationMethod.EXPERIMENTAL_TREE, AggregationMethod.EXPERIMENTAL_ACCUMULATE_N ]: # Aggregate all gradients by doing pairwise sums: this may # reduce performance, but it can improve memory because the # gradients can be released earlier. # # TODO(vrv): Consider replacing this with a version of # tf.AddN() that eagerly frees its inputs as soon as they are # ready, so the order of this tree does not become a problem. used = "tree" with ops.name_scope(op.name + "_gradient_sum"): running_sum = out_grad[0] for grad in out_grad[1:]: running_sum = math_ops.add_n([running_sum, grad]) out_grads[i] = running_sum else: used = "add_n" out_grads[i] = _MultiDeviceAddN(out_grad, gradient_uid) logging.vlog(2, " _AggregatedGrads %d x %s using %s", len(out_grad), tensor_shape, used) else: out_grads[i] = backprop.aggregate_indexed_slices_gradients(out_grad) # pylint: disable=protected-access else: # not out_grad # out_grads[i] is [], thus its aggregation is simply None. out_grads[i] = None return out_grads
tensorflow-master
tensorflow/python/ops/gradients_util.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Variables. See the [Variables](https://www.tensorflow.org/guide/variables) guide. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_math_ops from tensorflow.python.ops import gen_resource_variable_ops from tensorflow.python.ops import gen_state_ops # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.gen_state_ops import * # pylint: enable=wildcard-import from tensorflow.python.util import deprecation from tensorflow.python.util.deprecation import deprecated from tensorflow.python.util.tf_export import tf_export # pylint: disable=protected-access,g-doc-return-or-yield,g-doc-args def variable_op(shape, dtype, name="Variable", set_shape=True, container="", shared_name=""): """Deprecated. Used variable_op_v2 instead.""" if not set_shape: shape = tensor_shape.unknown_shape() ret = gen_state_ops.variable(shape=shape, dtype=dtype, name=name, container=container, shared_name=shared_name) # TODO(mrry): Move this to where it is used, so we can get rid of this op # wrapper? if set_shape: ret.set_shape(shape) return ret def variable_op_v2(shape, dtype, name="Variable", container="", shared_name=""): """Create a variable Operation. See also variables.Variable. Args: shape: The shape of the tensor managed by this variable dtype: The underlying type of the tensor values. name: optional name to use for the variable op. container: An optional string. Defaults to "". If non-empty, this variable is placed in the given container. Otherwise, a default container is used. shared_name: An optional string. Defaults to "". If non-empty, this variable is named in the given bucket with this shared_name. Otherwise, the node name is used instead. Returns: A variable tensor. """ return gen_state_ops.variable_v2( shape=shape, dtype=dtype, name=name, container=container, shared_name=shared_name) def init_variable(v, init, name="init"): """Initializes variable with "init". This op does the following: if init is a Tensor, v = init if callable(init): v = init(VariableShape(v), v.dtype) Args: v: Variable to initialize init: Tensor to assign to v, Or an object convertible to Tensor e.g. nparray, Or an Initializer that generates a tensor given the shape and type of v. An "Initializer" is a callable that returns a tensor that "v" should be set to. It will be called as init(shape, dtype). name: Optional name for the op. Returns: The operation that initializes v. """ with ops.name_scope(None, v.op.name + "/", [v, init]): with ops.name_scope(name) as scope: with ops.colocate_with(v): if callable(init): assert v.get_shape().is_fully_defined(), "Variable shape unknown." # TODO(mrry): Convert to v.shape when the property and # accessor are reconciled (and all initializers support # tf.TensorShape objects). value = init(v.get_shape().as_list(), v.dtype.base_dtype) value = ops.convert_to_tensor(value, name="value") return gen_state_ops.assign(v, value, name=scope) else: init = ops.convert_to_tensor(init, name="init") return gen_state_ops.assign(v, init, name=scope) def is_variable_initialized(ref, name=None): """Checks whether a tensor has been initialized. Outputs boolean scalar indicating whether the tensor has been initialized. Args: ref: A mutable `Tensor`. Should be from a `Variable` node. May be uninitialized. name: A name for the operation (optional). Returns: A `Tensor` of type `bool`. """ if ref.dtype._is_ref_dtype: return gen_state_ops.is_variable_initialized(ref=ref, name=name) # Handle resource variables. return ref.is_initialized(name=name) @tf_export(v1=["assign_sub"]) def assign_sub(ref, value, use_locking=None, name=None): """Update `ref` by subtracting `value` from it. This operation outputs `ref` after the update is done. This makes it easier to chain operations that need to use the reset value. Unlike `tf.math.subtract`, this op does not broadcast. `ref` and `value` must have the same shape. Args: ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`. Should be from a `Variable` node. value: A `Tensor`. Must have the same shape and dtype as `ref`. The value to be subtracted to the variable. use_locking: An optional `bool`. Defaults to `False`. If True, the subtraction will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: Same as "ref". Returned as a convenience for operations that want to use the new value after the variable has been updated. """ if ref.dtype._is_ref_dtype: return gen_state_ops.assign_sub( ref, value, use_locking=use_locking, name=name) return ref.assign_sub(value) @tf_export(v1=["assign_add"]) def assign_add(ref, value, use_locking=None, name=None): """Update `ref` by adding `value` to it. This operation outputs "ref" after the update is done. This makes it easier to chain operations that need to use the reset value. Unlike `tf.math.add`, this op does not broadcast. `ref` and `value` must have the same shape. Args: ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`. Should be from a `Variable` node. value: A `Tensor`. Must have the same shape and dtype as `ref`. The value to be added to the variable. use_locking: An optional `bool`. Defaults to `False`. If True, the addition will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: Same as "ref". Returned as a convenience for operations that want to use the new value after the variable has been updated. """ if ref.dtype._is_ref_dtype: return gen_state_ops.assign_add( ref, value, use_locking=use_locking, name=name) return ref.assign_add(value) @tf_export(v1=["assign"]) def assign(ref, value, validate_shape=None, use_locking=None, name=None): """Update `ref` by assigning `value` to it. This operation outputs a Tensor that holds the new value of `ref` after the value has been assigned. This makes it easier to chain operations that need to use the reset value. Args: ref: A mutable `Tensor`. Should be from a `Variable` node. May be uninitialized. value: A `Tensor`. Must have the same shape and dtype as `ref`. The value to be assigned to the variable. validate_shape: An optional `bool`. Defaults to `True`. If true, the operation will validate that the shape of 'value' matches the shape of the Tensor being assigned to. If false, 'ref' will take on the shape of 'value'. use_locking: An optional `bool`. Defaults to `True`. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: A `Tensor` that will hold the new value of `ref` after the assignment has completed. """ if ref.dtype._is_ref_dtype: return gen_state_ops.assign( ref, value, use_locking=use_locking, name=name, validate_shape=validate_shape) return ref.assign(value, name=name) @tf_export(v1=["count_up_to"]) @deprecated(None, "Prefer Dataset.range instead.") def count_up_to(ref, limit, name=None): r"""Increments 'ref' until it reaches 'limit'. Args: ref: A Variable. Must be one of the following types: `int32`, `int64`. Should be from a scalar `Variable` node. limit: An `int`. If incrementing ref would bring it above limit, instead generates an 'OutOfRange' error. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `ref`. A copy of the input before increment. If nothing else modifies the input, the values produced will all be distinct. """ if ref.dtype._is_ref_dtype: return gen_state_ops.count_up_to(ref, limit=limit, name=name) return gen_state_ops.resource_count_up_to( ref.handle, limit, T=ref.dtype, name=name) @tf_export(v1=["scatter_update"]) def scatter_update(ref, indices, updates, use_locking=True, name=None): # pylint: disable=line-too-long r"""Applies sparse updates to a variable reference. This operation computes ```python # Scalar indices ref[indices, ...] = updates[...] # Vector indices (for each i) ref[indices[i], ...] = updates[i, ...] # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] ``` This operation outputs `ref` after the update is done. This makes it easier to chain operations that need to use the reset value. If values in `ref` is to be updated more than once, because there are duplicate entries in `indices`, the order at which the updates happen for each value is undefined. Requires `updates.shape = indices.shape + ref.shape[1:]`. <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src="https://www.tensorflow.org/images/ScatterUpdate.png" alt> </div> Args: ref: A `Variable`. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A tensor of indices into the first dimension of `ref`. updates: A `Tensor`. Must have the same type as `ref`. A tensor of updated values to store in `ref`. use_locking: An optional `bool`. Defaults to `True`. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: Same as `ref`. Returned as a convenience for operations that want to use the updated values after the update is done. """ if ref.dtype._is_ref_dtype: return gen_state_ops.scatter_update(ref, indices, updates, use_locking=use_locking, name=name) return ref._lazy_read(gen_resource_variable_ops.resource_scatter_update( # pylint: disable=protected-access ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), name=name)) @tf_export(v1=["scatter_nd_update"]) def scatter_nd_update(ref, indices, updates, use_locking=True, name=None): r"""Applies sparse `updates` to individual values or slices in a Variable. `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. `indices` must be integer tensor, containing indices into `ref`. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. The innermost dimension of `indices` (with length `K`) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th dimension of `ref`. `updates` is `Tensor` of rank `Q-1+P-K` with shape: ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ``` For example, say we want to update 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this: ```python ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = tf.constant([[4], [3], [1] ,[7]]) updates = tf.constant([9, 10, 11, 12]) update = tf.compat.v1.scatter_nd_update(ref, indices, updates) with tf.compat.v1.Session() as sess: print sess.run(update) ``` The resulting update to ref would look like this: [1, 11, 3, 10, 9, 6, 7, 12] See `tf.scatter_nd` for more details about how to make updates to slices. Args: ref: A Variable. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A tensor of indices into ref. updates: A `Tensor`. Must have the same type as `ref`. A Tensor. Must have the same type as ref. A tensor of updated values to add to ref. use_locking: An optional `bool`. Defaults to `True`. An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: The value of the variable after the update. """ if ref.dtype._is_ref_dtype: return gen_state_ops.scatter_nd_update( ref, indices, updates, use_locking, name) return ref._lazy_read(gen_state_ops.resource_scatter_nd_update( # pylint: disable=protected-access ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), name=name)) @tf_export(v1=["scatter_add"]) def scatter_add(ref, indices, updates, use_locking=False, name=None): # pylint: disable=line-too-long r"""Adds sparse updates to the variable referenced by `resource`. This operation computes ```python # Scalar indices ref[indices, ...] += updates[...] # Vector indices (for each i) ref[indices[i], ...] += updates[i, ...] # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] ``` This operation outputs `ref` after the update is done. This makes it easier to chain operations that need to use the updated value. Duplicate entries are handled correctly: if multiple `indices` reference the same location, their contributions add. Requires `updates.shape = indices.shape + ref.shape[1:]`. <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt> </div> Args: ref: A `Variable`. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A tensor of indices into the first dimension of `ref`. updates: A `Tensor`. Must have the same type as `ref`. A tensor of updated values to store in `ref`. use_locking: An optional `bool`. Defaults to `False`. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: Same as `ref`. Returned as a convenience for operations that want to use the updated values after the update is done. """ if ref.dtype._is_ref_dtype: return gen_state_ops.scatter_add(ref, indices, updates, use_locking=use_locking, name=name) return ref._lazy_read(gen_resource_variable_ops.resource_scatter_add( # pylint: disable=protected-access ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), name=name)) @tf_export(v1=["scatter_nd_add"]) def scatter_nd_add(ref, indices, updates, use_locking=False, name=None): r"""Applies sparse addition to individual values or slices in a Variable. `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. `indices` must be integer tensor, containing indices into `ref`. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. The innermost dimension of `indices` (with length `K`) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th dimension of `ref`. `updates` is `Tensor` of rank `Q-1+P-K` with shape: ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] ``` For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that addition would look like this: ```python ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, 10, 11, 12]) add = tf.compat.v1.scatter_nd_add(ref, indices, updates) with tf.compat.v1.Session() as sess: print sess.run(add) ``` The resulting update to ref would look like this: [1, 13, 3, 14, 14, 6, 7, 20] See `tf.scatter_nd` for more details about how to make updates to slices. Args: ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. A mutable Tensor. Should be from a Variable node. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A tensor of indices into ref. updates: A `Tensor`. Must have the same type as `ref`. A tensor of updated values to add to ref. use_locking: An optional `bool`. Defaults to `False`. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: A mutable `Tensor`. Has the same type as `ref`. """ if ref.dtype._is_ref_dtype: return gen_state_ops.scatter_nd_add( ref, indices, updates, use_locking, name) return ref._lazy_read(gen_state_ops.resource_scatter_nd_add( # pylint: disable=protected-access ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), name=name)) @tf_export(v1=["scatter_sub"]) def scatter_sub(ref, indices, updates, use_locking=False, name=None): r"""Subtracts sparse updates to a variable reference. ```python # Scalar indices ref[indices, ...] -= updates[...] # Vector indices (for each i) ref[indices[i], ...] -= updates[i, ...] # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] ``` This operation outputs `ref` after the update is done. This makes it easier to chain operations that need to use the reset value. Duplicate entries are handled correctly: if multiple `indices` reference the same location, their (negated) contributions add. Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src="https://www.tensorflow.org/images/ScatterSub.png" alt> </div> Args: ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. Should be from a `Variable` node. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A tensor of indices into the first dimension of `ref`. updates: A `Tensor`. Must have the same type as `ref`. A tensor of updated values to subtract from `ref`. use_locking: An optional `bool`. Defaults to `False`. If True, the subtraction will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: A mutable `Tensor`. Has the same type as `ref`. """ if ref.dtype._is_ref_dtype: return gen_state_ops.scatter_sub(ref, indices, updates, use_locking=use_locking, name=name) return ref._lazy_read(gen_resource_variable_ops.resource_scatter_sub( # pylint: disable=protected-access ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), name=name)) @tf_export(v1=["scatter_nd_sub"]) def scatter_nd_sub(ref, indices, updates, use_locking=False, name=None): r"""Applies sparse subtraction to individual values or slices in a Variable. `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. `indices` must be integer tensor, containing indices into `ref`. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. The innermost dimension of `indices` (with length `K`) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th dimension of `ref`. `updates` is `Tensor` of rank `Q-1+P-K` with shape: ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] ``` For example, say we want to subtract 4 scattered elements from a rank-1 tensor with 8 elements. In Python, that update would look like this: ```python ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = tf.constant([[4], [3], [1] ,[7]]) updates = tf.constant([9, 10, 11, 12]) op = tf.compat.v1.scatter_nd_sub(ref, indices, updates) with tf.compat.v1.Session() as sess: print sess.run(op) ``` The resulting update to ref would look like this: [1, -9, 3, -6, -6, 6, 7, -4] See `tf.scatter_nd` for more details about how to make updates to slices. Args: ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. A mutable Tensor. Should be from a Variable node. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A tensor of indices into ref. updates: A `Tensor`. Must have the same type as `ref`. A tensor of updated values to add to ref. use_locking: An optional `bool`. Defaults to `False`. An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: A mutable `Tensor`. Has the same type as `ref`. """ if ref.dtype._is_ref_dtype: return gen_state_ops.scatter_nd_sub( ref, indices, updates, use_locking, name) return ref._lazy_read(gen_state_ops.resource_scatter_nd_sub( # pylint: disable=protected-access ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), name=name)) @tf_export(v1=["scatter_mul"]) def scatter_mul(ref, indices, updates, use_locking=False, name=None): # pylint: disable=line-too-long r"""Multiplies sparse updates into a variable reference. This operation computes ```python # Scalar indices ref[indices, ...] *= updates[...] # Vector indices (for each i) ref[indices[i], ...] *= updates[i, ...] # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] ``` This operation outputs `ref` after the update is done. This makes it easier to chain operations that need to use the reset value. Duplicate entries are handled correctly: if multiple `indices` reference the same location, their contributions multiply. Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. Args: ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. Should be from a `Variable` node. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A tensor of indices into the first dimension of `ref`. updates: A `Tensor`. Must have the same type as `ref`. A tensor of updated values to multiply to `ref`. use_locking: An optional `bool`. Defaults to `False`. If True, the operation will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: A mutable `Tensor`. Has the same type as `ref`. """ if ref.dtype._is_ref_dtype: return gen_state_ops.scatter_mul(ref, indices, updates, use_locking=use_locking, name=name) return ref._lazy_read(gen_resource_variable_ops.resource_scatter_mul( # pylint: disable=protected-access ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), name=name)) @tf_export(v1=["scatter_div"]) def scatter_div(ref, indices, updates, use_locking=False, name=None): # pylint: disable=line-too-long r"""Divides a variable reference by sparse updates. This operation computes ```python # Scalar indices ref[indices, ...] /= updates[...] # Vector indices (for each i) ref[indices[i], ...] /= updates[i, ...] # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] ``` This operation outputs `ref` after the update is done. This makes it easier to chain operations that need to use the reset value. Duplicate entries are handled correctly: if multiple `indices` reference the same location, their contributions divide. Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. Args: ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. Should be from a `Variable` node. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A tensor of indices into the first dimension of `ref`. updates: A `Tensor`. Must have the same type as `ref`. A tensor of values that `ref` is divided by. use_locking: An optional `bool`. Defaults to `False`. If True, the operation will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: A mutable `Tensor`. Has the same type as `ref`. """ if ref.dtype._is_ref_dtype: return gen_state_ops.scatter_div(ref, indices, updates, use_locking=use_locking, name=name) return ref._lazy_read(gen_resource_variable_ops.resource_scatter_div( # pylint: disable=protected-access ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), name=name)) @tf_export(v1=["scatter_max"]) def scatter_max(ref, indices, updates, use_locking=False, name=None): # pylint: disable=line-too-long r"""Reduces sparse updates into a variable reference using the `max` operation. This operation computes # Scalar indices ref[indices, ...] = max(ref[indices, ...], updates[...]) # Vector indices (for each i) ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) This operation outputs `ref` after the update is done. This makes it easier to chain operations that need to use the reset value. Duplicate entries are handled correctly: if multiple `indices` reference the same location, their contributions combine. Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src="https://www.tensorflow.org/images/ScatterAdd.png" alt> </div> Args: ref: A mutable `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`, `int32`, `int64`. Should be from a `Variable` node. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A tensor of indices into the first dimension of `ref`. updates: A `Tensor`. Must have the same type as `ref`. A tensor of updated values to reduce into `ref`. use_locking: An optional `bool`. Defaults to `False`. If True, the update will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: A mutable `Tensor`. Has the same type as `ref`. """ if ref.dtype._is_ref_dtype: return gen_state_ops.scatter_max(ref, indices, updates, use_locking=use_locking, name=name) return ref._lazy_read(gen_resource_variable_ops.resource_scatter_max( # pylint: disable=protected-access ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), name=name)) @tf_export(v1=["scatter_min"]) def scatter_min(ref, indices, updates, use_locking=False, name=None): # pylint: disable=line-too-long r"""Reduces sparse updates into a variable reference using the `min` operation. This operation computes # Scalar indices ref[indices, ...] = min(ref[indices, ...], updates[...]) # Vector indices (for each i) ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) This operation outputs `ref` after the update is done. This makes it easier to chain operations that need to use the reset value. Duplicate entries are handled correctly: if multiple `indices` reference the same location, their contributions combine. Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src="https://www.tensorflow.org/images/ScatterAdd.png" alt> </div> Args: ref: A mutable `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`, `int32`, `int64`. Should be from a `Variable` node. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A tensor of indices into the first dimension of `ref`. updates: A `Tensor`. Must have the same type as `ref`. A tensor of updated values to reduce into `ref`. use_locking: An optional `bool`. Defaults to `False`. If True, the update will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: A mutable `Tensor`. Has the same type as `ref`. """ if ref.dtype._is_ref_dtype: return gen_state_ops.scatter_min(ref, indices, updates, use_locking=use_locking, name=name) return ref._lazy_read(gen_resource_variable_ops.resource_scatter_min( # pylint: disable=protected-access ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), name=name)) @tf_export(v1=["batch_scatter_update"]) @deprecation.deprecated( "2018-11-29", "Use the batch_scatter_update method of Variable instead.") def batch_scatter_update(ref, indices, updates, use_locking=True, name=None): """Generalization of `tf.compat.v1.scatter_update` to axis different than 0. Analogous to `batch_gather`. This assumes that `ref`, `indices` and `updates` have a series of leading dimensions that are the same for all of them, and the updates are performed on the last dimension of indices. In other words, the dimensions should be the following: `num_prefix_dims = indices.ndims - 1` `batch_dim = num_prefix_dims + 1` `updates.shape = indices.shape + var.shape[batch_dim:]` where `updates.shape[:num_prefix_dims]` `== indices.shape[:num_prefix_dims]` `== var.shape[:num_prefix_dims]` And the operation performed can be expressed as: `var[i_1, ..., i_n, indices[i_1, ..., i_n, j]] = updates[i_1, ..., i_n, j]` When indices is a 1D tensor, this operation is equivalent to `tf.compat.v1.scatter_update`. To avoid this operation there would be 2 alternatives: 1) Reshaping the variable by merging the first `ndims` dimensions. However, this is not possible because `tf.reshape` returns a Tensor, which we cannot use `tf.compat.v1.scatter_update` on. 2) Looping over the first `ndims` of the variable and using `tf.compat.v1.scatter_update` on the subtensors that result of slicing the first dimension. This is a valid option for `ndims = 1`, but less efficient than this implementation. See also `tf.compat.v1.scatter_update` and `tf.compat.v1.scatter_nd_update`. Args: ref: `Variable` to scatter onto. indices: Tensor containing indices as described above. updates: Tensor of updates to apply to `ref`. use_locking: Boolean indicating whether to lock the writing operation. name: Optional scope name string. Returns: Ref to `variable` after it has been modified. Raises: ValueError: If the initial `ndims` of `ref`, `indices`, and `updates` are not the same. """ with ops.name_scope(name): indices = ops.convert_to_tensor(indices, name="indices") indices_shape = array_ops.shape(indices) indices_dimensions = indices.get_shape().ndims if indices_dimensions is None: raise ValueError("batch_gather does not allow indices with unknown " "shape.") nd_indices = array_ops.expand_dims(indices, axis=-1) nd_indices_list = [] # Scatter ND requires indices to have an additional dimension, in which the # coordinates of the updated things are specified. For this to be adapted to # the scatter_update with several leading dimensions, we simply make use of # a tf.range for all the leading dimensions followed by concat of all the # coordinates we created with the original indices. # For example if indices.shape = [2, 3, 4], we should generate the following # indices for tf.compat.v1.scatter_nd_update: # nd_indices[:, :, 0] = [[0, 0, 0], [1, 1, 1]] # nd_indices[:, :, 1] = [[0, 1, 2], [0, 1, 2]] # nd_indices[:, :, 2] = indices for dimension in range(indices_dimensions - 1): # In this loop we generate the following for the example (one for each # iteration). # nd_indices[:, :, 0] = [[0, 0, 0], [1, 1, 1]] # nd_indices[:, :, 1] = [[0, 1, 2], [0, 1, 2]] # This is done at every iteration with a tf.range over the size of the # i-th dimension and using broadcasting over the desired shape. dimension_size = indices_shape[dimension] shape_to_broadcast = [1] * (indices_dimensions + 1) shape_to_broadcast[dimension] = dimension_size dimension_range = array_ops.reshape( gen_math_ops._range(0, dimension_size, 1), shape_to_broadcast) if dimension_range.dtype.base_dtype != nd_indices.dtype: dimension_range = gen_math_ops.cast(dimension_range, nd_indices.dtype) nd_indices_list.append( dimension_range * array_ops.ones_like(nd_indices)) # Add the original indices at the end, as described above, and concat. nd_indices_list.append(nd_indices) final_indices = array_ops.concat(nd_indices_list, axis=-1) return scatter_nd_update( ref, final_indices, updates, use_locking=use_locking)
tensorflow-master
tensorflow/python/ops/state_ops.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for writing test involving spectral_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.platform import test def _use_eigen_kernels(): use_eigen_kernels = False # Eigen kernels are default if test.is_gpu_available(cuda_only=True): use_eigen_kernels = False return use_eigen_kernels def fft_kernel_label_map(): """Returns a generator overriding kernel selection. This is used to force testing of the eigen kernels, even when they are not the default registered kernels. Returns: A generator in which to wrap every test. """ if _use_eigen_kernels(): d = dict([(op, "eigen") for op in [ "FFT", "FFT2D", "FFT3D", "IFFT", "IFFT2D", "IFFT3D", "IRFFT", "IRFFT2D", "IRFFT3D", "RFFT", "RFFT2D", "RFFT3D" ]]) return ops.get_default_graph()._kernel_label_map(d) # pylint: disable=protected-access else: return ops.get_default_graph()._kernel_label_map({}) # pylint: disable=protected-access
tensorflow-master
tensorflow/python/ops/spectral_ops_test_util.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Gradients for operators defined in random_ops.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_random_ops from tensorflow.python.ops import math_ops def add_leading_unit_dimensions(x, num_dimensions): new_shape = array_ops.concat( [array_ops.ones([num_dimensions], dtype=dtypes.int32), array_ops.shape(x)], axis=0) return array_ops.reshape(x, new_shape) @ops.RegisterGradient("RandomGamma") def _RandomGammaGrad(op, grad): # pylint: disable=invalid-name """Returns the gradient of a Gamma sample w.r.t. alpha. The gradient is computed using implicit differentiation, see "Implicit Reparameterization Gradients" (https://arxiv.org/abs/1805.08498). Args: op: A `RandomGamma` operation. We assume that the inputs to the operation are `shape` and `alpha` tensors, and the output is the `sample` tensor. grad: The incoming gradient `dloss / dsample` of the same shape as `op.outputs[0]`. Returns: A `Tensor` with derivatives `dloss / dalpha` """ shape = op.inputs[0] alpha = op.inputs[1] sample = op.outputs[0] with ops.control_dependencies([grad]): # Make the parameters alpha broadcastable with samples by appending # unit dimensions. num_sample_dimensions = array_ops.shape(shape)[0] alpha_broadcastable = add_leading_unit_dimensions( alpha, num_sample_dimensions) partial_a = gen_random_ops.random_gamma_grad(alpha_broadcastable, sample) # The first input is shape; the second input is alpha. return (None, math_ops.reduce_sum( grad * partial_a, axis=math_ops.range(num_sample_dimensions)))
tensorflow-master
tensorflow/python/ops/random_grad.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the sort wrapper.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import sort_ops from tensorflow.python.platform import test class SortTest(test.TestCase): @test_util.run_deprecated_v1 def testRandom_lowDimensionality(self): self._testRandom_lowDimensionality(negative_axis=False) @test_util.run_deprecated_v1 def testRandom_lowDimensionality_negative(self): self._testRandom_lowDimensionality(negative_axis=True) def _testRandom_lowDimensionality(self, negative_axis): np.random.seed(42) for _ in range(20): rank = np.random.randint(1, 3) shape = [np.random.randint(0, 20) for _ in range(rank)] arr = np.random.random(shape) sort_axis = np.random.choice(rank) if negative_axis: sort_axis = -1 - sort_axis with self.cached_session(): self.assertAllEqual( np.sort(arr, axis=sort_axis), sort_ops.sort(constant_op.constant(arr), axis=sort_axis).eval()) @test_util.run_deprecated_v1 def testRandom_highDimensionality(self): np.random.seed(100) for _ in range(20): rank = np.random.randint(5, 15) shape = [np.random.randint(1, 4) for _ in range(rank)] arr = np.random.random(shape) sort_axis = np.random.choice(rank) with self.cached_session(): self.assertAllEqual( np.sort(arr, axis=sort_axis), sort_ops.sort(constant_op.constant(arr), axis=sort_axis).eval()) @test_util.run_deprecated_v1 def testScalar(self): # Create an empty scalar where the static shape is unknown. zeros_length_1 = array_ops.zeros( random_ops.random_uniform([1], minval=0, maxval=1, dtype=dtypes.int32), dtype=dtypes.int32) scalar = array_ops.zeros(zeros_length_1) sort = sort_ops.sort(scalar) with self.cached_session(): with self.assertRaises(errors.InvalidArgumentError): sort.eval() @test_util.run_deprecated_v1 def testNegativeOutOfBounds_staticShape(self): arr = constant_op.constant([3, 4, 5]) with self.assertRaises(ValueError): sort_ops.sort(arr, axis=-4) @test_util.run_deprecated_v1 def testDescending(self): arr = np.random.random((10, 5, 5)) with self.cached_session(): self.assertAllEqual( np.sort(arr, axis=0)[::-1], sort_ops.sort( constant_op.constant(arr), axis=0, direction='DESCENDING').eval()) @test_util.run_deprecated_v1 def testSort_staticallyKnownRank_constantTransposition(self): # The transposition array should be a constant if the rank of "values" is # statically known. tensor = random_ops.random_uniform( # Rank is statically known to be 5, but the dimension lengths are not # known. random_ops.random_uniform( shape=(5,), minval=0, maxval=10, dtype=dtypes.int32)) sort_ops.sort(tensor, axis=1) transposition = ( ops.get_default_graph().get_tensor_by_name('sort/transposition:0')) self.assertFalse(tensor_util.constant_value(transposition) is None) self.assertAllEqual( # Swaps "1" and "4" to put "1" at the end. tensor_util.constant_value(transposition), [0, 4, 2, 3, 1]) @test_util.run_deprecated_v1 def testArgsort_1d(self): arr = np.random.random(42) with self.cached_session(): self.assertAllEqual( np.sort(arr), array_ops.gather(arr, sort_ops.argsort(arr)).eval()) @test_util.run_deprecated_v1 def testArgsort(self): arr = np.random.random((5, 6, 7, 8)) for axis in range(4): with self.cached_session(): self.assertAllEqual( np.argsort(arr, axis=axis), sort_ops.argsort(arr, axis=axis).eval()) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/ops/sort_ops_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for cross entropy related functionality in tensorflow.ops.nn.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import nn_impl import tensorflow.python.ops.nn_grad # pylint: disable=unused-import from tensorflow.python.platform import test exp = math.exp log = math.log class SigmoidCrossEntropyWithLogitsTest(test.TestCase): def _SigmoidCrossEntropyWithLogits(self, logits, targets): assert len(logits) == len(targets) pred = [1 / (1 + exp(-x)) for x in logits] eps = 0.0001 pred = [min(max(p, eps), 1 - eps) for p in pred] return [-z * log(y) - (1 - z) * log(1 - y) for y, z in zip(pred, targets)] def _Inputs(self, x=None, y=None, dtype=dtypes.float64, sizes=None): x = [-100, -2, -2, 0, 2, 2, 2, 100] if x is None else x y = [0, 0, 1, 0, 0, 1, 0.5, 1] if y is None else y assert len(x) == len(y) sizes = sizes if sizes else [len(x)] logits = constant_op.constant(x, shape=sizes, dtype=dtype, name="logits") targets = constant_op.constant(y, shape=sizes, dtype=dtype, name="targets") losses = np.array(self._SigmoidCrossEntropyWithLogits(x, y)).reshape(*sizes) return logits, targets, losses @test_util.run_deprecated_v1 def testConstructionNamed(self): with self.cached_session(): logits, targets, _ = self._Inputs() loss = nn_impl.sigmoid_cross_entropy_with_logits( labels=targets, logits=logits, name="mylogistic") self.assertEqual("mylogistic", loss.op.name) def testLogisticOutput(self): for use_gpu in [True, False]: for dtype in [dtypes.float32, dtypes.float16]: with self.cached_session(use_gpu=use_gpu): logits, targets, losses = self._Inputs(dtype=dtype) loss = nn_impl.sigmoid_cross_entropy_with_logits( labels=targets, logits=logits) np_loss = np.array(losses).astype(np.float32) tf_loss = self.evaluate(loss) self.assertAllClose(np_loss, tf_loss, atol=0.001) def testLogisticOutputMultiDim(self): for use_gpu in [True, False]: for dtype in [dtypes.float32, dtypes.float16]: with self.cached_session(use_gpu=use_gpu): logits, targets, losses = self._Inputs(dtype=dtype, sizes=[2, 2, 2]) loss = nn_impl.sigmoid_cross_entropy_with_logits( labels=targets, logits=logits) np_loss = np.array(losses).astype(np.float32) tf_loss = self.evaluate(loss) self.assertAllClose(np_loss, tf_loss, atol=0.001) @test_util.run_deprecated_v1 def testGradient(self): sizes = [4, 2] with self.cached_session(): logits, targets, _ = self._Inputs(sizes=sizes) loss = nn_impl.sigmoid_cross_entropy_with_logits( labels=targets, logits=logits) err = gradient_checker.compute_gradient_error(logits, sizes, loss, sizes) print("logistic loss gradient err = ", err) self.assertLess(err, 1e-7) @test_util.run_deprecated_v1 def testGradientAtZero(self): with self.cached_session(): logits = constant_op.constant([0.0, 0.0], dtype=dtypes.float64) targets = constant_op.constant([0.0, 1.0], dtype=dtypes.float64) loss = nn_impl.sigmoid_cross_entropy_with_logits( labels=targets, logits=logits) grads = gradients_impl.gradients(loss, logits)[0].eval() self.assertAllClose(grads, [0.5, -0.5]) def testShapeError(self): with self.assertRaisesRegexp(ValueError, "must have the same shape"): nn_impl.sigmoid_cross_entropy_with_logits(labels=[1, 2, 3], logits=[[2, 1]]) class WeightedCrossEntropyTest(test.TestCase): def _WeightedCrossEntropy(self, logits, targets, pos_coeff): assert len(logits) == len(targets) pred = [1 / (1 + exp(-x)) for x in logits] eps = 0.0001 pred = [min(max(p, eps), 1 - eps) for p in pred] return [ -z * pos_coeff * log(y) - (1 - z) * log(1 - y) for y, z in zip(pred, targets) ] def _Inputs(self, x=None, y=None, q=3.0, dtype=dtypes.float64, sizes=None): x = [-100, -2, -2, 0, 2, 2, 2, 100] if x is None else x y = [0, 0, 1, 0, 0, 1, 0.5, 1] if y is None else y assert len(x) == len(y) sizes = sizes if sizes else [len(x)] logits = constant_op.constant(x, shape=sizes, dtype=dtype, name="logits") targets = constant_op.constant(y, shape=sizes, dtype=dtype, name="targets") losses = np.array(self._WeightedCrossEntropy(x, y, q)).reshape(*sizes) return logits, targets, q, losses @test_util.run_deprecated_v1 def testConstructionNamed(self): with self.cached_session(): logits, targets, pos_weight, _ = self._Inputs() loss = nn_impl.weighted_cross_entropy_with_logits( targets=targets, logits=logits, pos_weight=pos_weight, name="mybce") self.assertEqual("mybce", loss.op.name) def testOutput(self): for use_gpu in [True, False]: with self.cached_session(use_gpu=use_gpu): logits, targets, pos_weight, losses = self._Inputs(dtype=dtypes.float32) loss = nn_impl.weighted_cross_entropy_with_logits( targets=targets, logits=logits, pos_weight=pos_weight) np_loss = np.array(losses).astype(np.float32) tf_loss = self.evaluate(loss) self.assertAllClose(np_loss, tf_loss, atol=0.001) def testOutputMultiDim(self): for use_gpu in [True, False]: with self.cached_session(use_gpu=use_gpu): logits, targets, pos_weight, losses = self._Inputs( dtype=dtypes.float32, sizes=[2, 2, 2]) loss = nn_impl.weighted_cross_entropy_with_logits( targets=targets, logits=logits, pos_weight=pos_weight) np_loss = np.array(losses).astype(np.float32) tf_loss = self.evaluate(loss) self.assertAllClose(np_loss, tf_loss, atol=0.001) @test_util.run_deprecated_v1 def testGradient(self): sizes = [4, 2] with self.cached_session(): logits, targets, pos_weight, _ = self._Inputs(sizes=sizes) loss = nn_impl.weighted_cross_entropy_with_logits( targets=targets, logits=logits, pos_weight=pos_weight) err = gradient_checker.compute_gradient_error(logits, sizes, loss, sizes) print("logistic loss gradient err = ", err) self.assertLess(err, 1e-7) def testShapeError(self): with self.assertRaisesRegexp(ValueError, "must have the same shape"): nn_impl.weighted_cross_entropy_with_logits( targets=[1, 2, 3], logits=[[2, 1]], pos_weight=2.0) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/ops/nn_xent_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Provides templates which allow variable sharing.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import traceback from tensorflow.python.eager import context from tensorflow.python.eager import function from tensorflow.python.framework import ops from tensorflow.python.ops import variable_scope from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training.tracking import base as trackable from tensorflow.python.training.tracking import util as trackable_util from tensorflow.python.util import tf_contextlib from tensorflow.python.util import tf_decorator from tensorflow.python.util.deprecation import deprecated from tensorflow.python.util.tf_export import tf_export __all__ = ["make_template"] @tf_export(v1=["make_template"]) def make_template(name_, func_, create_scope_now_=False, unique_name_=None, custom_getter_=None, **kwargs): """Given an arbitrary function, wrap it so that it does variable sharing. This wraps `func_` in a Template and partially evaluates it. Templates are functions that create variables the first time they are called and reuse them thereafter. In order for `func_` to be compatible with a `Template` it must have the following properties: * The function should create all trainable variables and any variables that should be reused by calling `tf.compat.v1.get_variable`. If a trainable variable is created using `tf.Variable`, then a ValueError will be thrown. Variables that are intended to be locals can be created by specifying `tf.Variable(..., trainable=false)`. * The function may use variable scopes and other templates internally to create and reuse variables, but it shouldn't use `tf.compat.v1.global_variables` to capture variables that are defined outside of the scope of the function. * Internal scopes and variable names should not depend on any arguments that are not supplied to `make_template`. In general you will get a ValueError telling you that you are trying to reuse a variable that doesn't exist if you make a mistake. In the following example, both `z` and `w` will be scaled by the same `y`. It is important to note that if we didn't assign `scalar_name` and used a different name for z and w that a `ValueError` would be thrown because it couldn't reuse the variable. ```python def my_op(x, scalar_name): var1 = tf.compat.v1.get_variable(scalar_name, shape=[], initializer=tf.compat.v1.constant_initializer(1)) return x * var1 scale_by_y = tf.compat.v1.make_template('scale_by_y', my_op, scalar_name='y') z = scale_by_y(input1) w = scale_by_y(input2) ``` As a safe-guard, the returned function will raise a `ValueError` after the first call if trainable variables are created by calling `tf.Variable`. If all of these are true, then 2 properties are enforced by the template: 1. Calling the same template multiple times will share all non-local variables. 2. Two different templates are guaranteed to be unique, unless you reenter the same variable scope as the initial definition of a template and redefine it. An examples of this exception: ```python def my_op(x, scalar_name): var1 = tf.compat.v1.get_variable(scalar_name, shape=[], initializer=tf.compat.v1.constant_initializer(1)) return x * var1 with tf.compat.v1.variable_scope('scope') as vs: scale_by_y = tf.compat.v1.make_template('scale_by_y', my_op, scalar_name='y') z = scale_by_y(input1) w = scale_by_y(input2) # Creates a template that reuses the variables above. with tf.compat.v1.variable_scope(vs, reuse=True): scale_by_y2 = tf.compat.v1.make_template('scale_by_y', my_op, scalar_name='y') z2 = scale_by_y2(input1) w2 = scale_by_y2(input2) ``` Depending on the value of `create_scope_now_`, the full variable scope may be captured either at the time of first call or at the time of construction. If this option is set to True, then all Tensors created by repeated calls to the template will have an extra trailing _N+1 to their name, as the first time the scope is entered in the Template constructor no Tensors are created. Note: `name_`, `func_` and `create_scope_now_` have a trailing underscore to reduce the likelihood of collisions with kwargs. Args: name_: A name for the scope created by this template. If necessary, the name will be made unique by appending `_N` to the name. func_: The function to wrap. create_scope_now_: Boolean controlling whether the scope should be created when the template is constructed or when the template is called. Default is False, meaning the scope is created when the template is called. unique_name_: When used, it overrides name_ and is not made unique. If a template of the same scope/unique_name already exists and reuse is false, an error is raised. Defaults to None. custom_getter_: Optional custom getter for variables used in `func_`. See the `tf.compat.v1.get_variable` `custom_getter` documentation for more information. **kwargs: Keyword arguments to apply to `func_`. Returns: A function to encapsulate a set of variables which should be created once and reused. An enclosing scope will be created either when `make_template` is called or when the result is called, depending on the value of `create_scope_now_`. Regardless of the value, the first time the template is called it will enter the scope with no reuse, and call `func_` to create variables, which are guaranteed to be unique. All subsequent calls will re-enter the scope and reuse those variables. Raises: ValueError: if `name_` is None. """ return make_template_internal( name_, func_, create_scope_now_, unique_name_, custom_getter_, create_graph_function_=False, **kwargs) def make_template_internal(name_, func_, create_scope_now_=False, unique_name_=None, custom_getter_=None, create_graph_function_=False, **kwargs): """Make a template, optionally compiling func_ into a graph function. See `make_template` for full documentation. Args: name_: A name for the scope created by this template. If necessary, the name will be made unique by appending `_N` to the name. func_: The function to wrap. create_scope_now_: Boolean controlling whether the scope should be created when the template is constructed or when the template is called. Default is False, meaning the scope is created when the template is called. unique_name_: When used, it overrides name_ and is not made unique. If a template of the same scope/unique_name already exists and reuse is false, an error is raised. Defaults to None. If executing eagerly, must be None. custom_getter_: Optional custom getter for variables used in `func_`. See the `tf.compat.v1.get_variable` `custom_getter` documentation for more information. create_graph_function_: When True, `func_` will be executed as a graph function. This implies that `func_` must satisfy the properties that `function.defun` requires of functions: See the documentation of `function.defun` for details. When executing eagerly, setting this flag to True can improve performance. Regardless of whether eager execution is enabled, enabling this flag gives the caller access to graph-function semantics, i.e., accesses to variables are totally ordered and side-effecting ops are not pruned. **kwargs: Keyword arguments to apply to `func_`. Returns: A function to encapsulate a set of variables which should be created once and reused. An enclosing scope will be created either when `make_template` is called or when the result is called, depending on the value of `create_scope_now_`. Regardless of the value, the first time the template is called it will enter the scope with no reuse, and call `func_` to create variables, which are guaranteed to be unique. All subsequent calls will re-enter the scope and reuse those variables. Raises: ValueError: if `name_` is None. ValueError: if `unique_name_` is not None and eager execution is enabled. """ if kwargs: func_ = tf_decorator.make_decorator(func_, functools.partial(func_, **kwargs)) if context.executing_eagerly(): if unique_name_ is not None: raise ValueError( "unique_name_ cannot be used when eager exeuction is enabled.") return EagerTemplate( name_, func_, create_scope_now=create_scope_now_, custom_getter=custom_getter_, create_graph_function=create_graph_function_) return Template( name_, func_, create_scope_now=create_scope_now_, unique_name=unique_name_, custom_getter=custom_getter_, create_graph_function=create_graph_function_) def _skip_common_stack_elements(stacktrace, base_case): """Skips items that the target stacktrace shares with the base stacktrace.""" for i, (trace, base) in enumerate(zip(stacktrace, base_case)): if trace != base: return stacktrace[i:] return stacktrace[-1:] class Template(trackable.Trackable): """Wrap a function to aid in variable sharing. Templates are functions that create variables the first time they are called and reuse them thereafter. See `make_template` for full documentation. Note: By default, the full variable scope is captured at the time of first call. If `create_scope_now_` is passed as True to the constructor, the full scope will be captured there, but no variables will created until the first call. """ def __init__(self, name, func, create_scope_now=False, unique_name=None, custom_getter=None, create_graph_function=False): """Creates a template for the given function. Args: name: A name for the scope created by this template. The name will be made unique by appending `_N` to the it (see how `tf.compat.v1.variable_scope` treats the `default_name` for details). func: The function to apply each time. create_scope_now: Whether to create the scope at Template construction time, rather than first call. Defaults to false. Creating the scope at construction time may be more convenient if the template is to passed through much lower level code, and you want to be sure of the scope name without knowing exactly where it will be first called. If set to True, the scope will be created in the constructor, and all subsequent times in `__call__`, leading to a trailing numeral being added to the names of all created Tensors. If set to False, the scope will be created at the first call location. unique_name: When used, it overrides `name` and is not made unique. If a template of the same scope/unique_name already exists and reuse is false, an error is raised. Defaults to None. custom_getter: optional custom getter to pass to `variable_scope()` create_graph_function: When True, `func` will be executed as a graph function. Enabling this flag gives the caller access to graph-function semantics, i.e., accesses to variables are totally ordered and side-effecting ops are not pruned. Raises: ValueError: if `name` is None. """ if create_graph_function: self._func = function.defun(func) else: self._func = func self._stacktrace = traceback.format_stack()[:-2] self._name = name self._unique_name = unique_name self._custom_getter = custom_getter if name is None: raise ValueError("name cannot be None.") if create_scope_now: with variable_scope._pure_variable_scope( # pylint:disable=protected-access (self._unique_name or variable_scope._get_unique_variable_scope(self._name)), # pylint:disable=protected-access custom_getter=self._custom_getter) as vs: self._variable_scope = vs else: self._variable_scope = None # This variable keeps track of whether the template has been called to # completion, which is not the same as whether the scope has been created. self._variables_created = False # `MirroredStrategy` builds the graph with multiple threads. If a # `merge_call` happens within a template, multiple calls may be in progress # simultaneously. This variable keeps track of whether any call of the # template has started. self._first_call = True def _call_func(self, args, kwargs): try: if self._variables_created: vars_at_start = len( ops.get_collection_ref(ops.GraphKeys.GLOBAL_VARIABLES)) trainable_at_start = len( ops.get_collection_ref(ops.GraphKeys.TRAINABLE_VARIABLES)) result = self._func(*args, **kwargs) # Variables were previously created, implying this is not the first # time the template has been called. Check to make sure that no new # trainable variables were created this time around. trainable_variables = ops.get_collection_ref( ops.GraphKeys.TRAINABLE_VARIABLES) # If a variable that we intend to train is created as a side effect # of creating a template, then that is almost certainly an error. if trainable_at_start != len(trainable_variables): raise ValueError("Trainable variable created when calling a template " "after the first time, perhaps you used tf.Variable " "when you meant tf.get_variable: %s" % (trainable_variables[trainable_at_start:],)) # Non-trainable tracking variables are a legitimate reason why a new # variable would be created, but it is a relatively advanced use-case, # so log it. variables = ops.get_collection_ref(ops.GraphKeys.GLOBAL_VARIABLES) if vars_at_start != len(variables): logging.info( "New variables created when calling a template after " "the first time, perhaps you used tf.Variable when you " "meant tf.get_variable: %s", variables[vars_at_start:]) elif self._first_call: self._first_call = False try: # The first time we run, restore variables if necessary (via # Trackable). with trackable_util.capture_dependencies(template=self): result = self._func(*args, **kwargs) except: self._first_call = True raise self._variables_created = True else: # We are calling the template in parallel from another thread. result = self._func(*args, **kwargs) return result except Exception as exc: # Reraise the exception, but append the original definition to the # trace. args = exc.args if not args: arg0 = "" else: arg0 = args[0] trace = "".join( _skip_common_stack_elements(self._stacktrace, traceback.format_stack())) arg0 = "%s\n\noriginally defined at:\n%s" % (arg0, trace) new_args = [arg0] new_args.extend(args[1:]) exc.args = tuple(new_args) raise def __call__(self, *args, **kwargs): if self._variable_scope: # Only reuse variables if not on first call. with variable_scope.variable_scope( self._variable_scope, reuse=not self._first_call): return self._call_func(args, kwargs) else: # The scope was not created at construction time, so create it here. # Subsequent calls should reuse variables. with variable_scope.variable_scope( self._unique_name, self._name, custom_getter=self._custom_getter) as vs: self._variable_scope = vs return self._call_func(args, kwargs) @property def name(self): """Returns the name given to this Template.""" return self._name @property def func(self): """Returns the func given to this Template.""" return self._func @property def variable_scope(self): """Returns the variable scope object created by this Template.""" return self._variable_scope @property def variable_scope_name(self): """Returns the variable scope name created by this Template.""" if self._variable_scope: name = self._variable_scope.name if not name or name[-1] == "/": return name else: # To prevent partial matches on the scope_name, we add '/' at the end. return name + "/" @property def variables(self): """Returns the list of global and local variables created by the Template.""" return self.global_variables + self.local_variables @property def trainable_variables(self): """Returns the list of trainable variables created by the Template.""" if self._variables_created: return ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, self.variable_scope_name) else: return [] @property def non_trainable_variables(self): """Returns the list of non-trainable variables created by the Template.""" # TODO(apassos) Make sure it matches Eager when using local variables. global_variables = self.global_variables trainable_variables = set(self.trainable_variables) return [x for x in global_variables if x not in trainable_variables] @property def global_variables(self): """Returns the list of global variables created by the Template.""" if self._variables_created: return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, self.variable_scope_name) else: return [] @property def local_variables(self): """Returns the list of global variables created by the Template.""" if self._variables_created: return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES, self.variable_scope_name) else: return [] @property def weights(self): """List of weights/variables created by the Template.""" return self.variables @property def trainable_weights(self): """List of trainable weights/variables created by the Template.""" return self.trainable_variables @property def non_trainable_weights(self): """List of non-trainable weights/variables created by the Template.""" return self.non_trainable_variables @property @deprecated("2017-02-21", "The .var_scope property is deprecated. Please change your " "code to use the .variable_scope property") def var_scope(self): """Returns the variable scope object created by this Template.""" return self._variable_scope class _EagerTemplateVariableStore(object): """Wrapper around EagerVariableStore to support nesting EagerTemplates.""" def __init__(self, variable_scope_name): self._variable_scope_name = variable_scope_name default = variable_scope._get_default_variable_store() # pylint: disable=protected-access if default._store_eager_variables: # pylint: disable=protected-access self._eager_variable_store = variable_scope.EagerVariableStore(default) else: self._eager_variable_store = variable_scope.EagerVariableStore() def set_variable_scope_name(self, variable_scope_name): self._variable_scope_name = variable_scope_name @tf_contextlib.contextmanager def as_default(self): try: with self._eager_variable_store.as_default(): yield finally: # Each _EagerTemplateVariableStore object lives underneath a variable # scope (see EagerTemplate.__call__). This variable scope's subscopes are # closed when the EagerTemplate object returns from __call__. For # top-level _EagerTemplateVariableStore objects, the variable store to # which the variable scope is attached is different from the # EagerVariableStore; as such it is necessary to close its subscopes # here as well. if self._variable_scope_name is None: raise RuntimeError("A variable scope must be set before an " "_EagerTemplateVariableStore object exits.") variable_scope.get_variable_scope_store().close_variable_subscopes( self._variable_scope_name) def _variables_in_scope(self, variable_list): if self._variable_scope_name is None: raise RuntimeError( "A variable scope must be set before variables can be accessed.") return [ v for v in variable_list if v.name.startswith(self._variable_scope_name + "/") ] def variables(self): return self._variables_in_scope(self._eager_variable_store.variables()) def trainable_variables(self): return self._variables_in_scope( self._eager_variable_store.trainable_variables()) def non_trainable_variables(self): return self._variables_in_scope( self._eager_variable_store.non_trainable_variables()) class EagerTemplate(Template): """Wrap a function to aid in variable sharing in Eager mode. Templates are functions that create variables the first time they are called and reuse them thereafter. See `make_template` for full documentation. Note: By default, the full variable scope is captured at the time of first call. If `create_scope_now` is passed as True to the constructor, the full scope will be captured there, but no variables will be created until the first call. """ def __init__(self, name, func, create_scope_now=False, custom_getter=None, create_graph_function=False): """Creates a template for the given function. Args: name: A name for the scope created by this template. The name will be made unique by appending `_N` to the it (see how `tf.compat.v1.variable_scope` treats the `default_name` for details). func: The function to apply each time. create_scope_now: Whether to create the scope at Template construction time, rather than first call. Defaults to false. Creating the scope at construction time may be more convenient if the template is passed through much lower level code, and you want to be sure of the scope name without knowing exactly where it will be first called. If set to True, the scope will be created in the constructor, and all subsequent times in `__call__`, leading to a trailing numeral being added to the names of all created Tensors. If set to False, the scope will be created at the first call location. custom_getter: optional custom getter to pass to `variable_scope()` create_graph_function: When True, `func` will be executed as a graph function. Enabling this flag allows the caller to reap the performance benefits associated with executing graphs, at the cost of sacrificing debuggability; however, not all Python functions can be compiled into graph functions. See the documentation for `function.defun` for details. Raises: RuntimeError: if eager execution is not enabled. """ if not context.executing_eagerly(): raise RuntimeError( "{} objects can only be used when eager execution is enabled, use " "tf.Template for graph construction".format(type(self))) super(EagerTemplate, self).__init__(name, func, create_scope_now, None, custom_getter, create_graph_function) if self._variable_scope is not None: variable_scope_name = self._variable_scope.name else: # Defer setting the variable scope name until the variable scope # is created in __call__. variable_scope_name = None self._template_store = _EagerTemplateVariableStore(variable_scope_name) self._variable_scope_context_manager = None def _call_func(self, args, kwargs): try: vars_at_start = self._template_store.variables() trainable_at_start = self._template_store.trainable_variables() if self._variables_created: result = self._func(*args, **kwargs) else: # The first time we run, restore variables if necessary (via # Trackable). with trackable_util.capture_dependencies(template=self): result = self._func(*args, **kwargs) if self._variables_created: # Variables were previously created, implying this is not the first # time the template has been called. Check to make sure that no new # trainable variables were created this time around. trainable_variables = self._template_store.trainable_variables() # If a variable that we intend to train is created as a side effect # of creating a template, then that is almost certainly an error. if len(trainable_at_start) != len(trainable_variables): raise ValueError( "Trainable variable created when calling a template " "after the first time, perhaps you used tf.Variable " "when you meant tf.get_variable: %s" % list(set(trainable_variables) - set(trainable_at_start))) # Non-trainable tracking variables are a legitimate reason why a new # variable would be created, but it is a relatively advanced use-case, # so log it. variables = self._template_store.variables() if len(vars_at_start) != len(variables): logging.info( "New variables created when calling a template after " "the first time, perhaps you used tf.Variable when you " "meant tf.get_variable: %s", list(set(variables) - set(vars_at_start))) else: self._variables_created = True return result except Exception as exc: # Reraise the exception, but append the original definition to the # trace. args = exc.args if not args: arg0 = "" else: arg0 = args[0] trace = "".join( _skip_common_stack_elements(self._stacktrace, traceback.format_stack())) arg0 = "%s\n\noriginally defined at:\n%s" % (arg0, trace) new_args = [arg0] new_args.extend(args[1:]) exc.args = tuple(new_args) raise def __call__(self, *args, **kwargs): # In both branches below, the template store is installed as default after # the variable scope is opened in order to ensure that templates nested at # the same level correctly uniquify lower variable scope names. if self._variable_scope: # Create a cache for the variable scope context manager the first time # around so that we don't have to keep recreating it. if not self._variable_scope_context_manager: self._variable_scope_context_manager = variable_scope.variable_scope( self._variable_scope, reuse=variable_scope.AUTO_REUSE) with self._variable_scope_context_manager: with self._template_store.as_default(): return self._call_func(args, kwargs) else: # The scope was not created at construction time, so create it here. # Subsequent calls should reuse variables. with variable_scope.variable_scope( self._unique_name, self._name, custom_getter=self._custom_getter) as vs: self._variable_scope = vs # Because the scope was not created at construction time, the template # store's variable scope name is unset; set it here. self._template_store.set_variable_scope_name(vs.name) with self._template_store.as_default(): return self._call_func(args, kwargs) @property def variables(self): """Returns the list of variables created by the Template.""" # Currently there is no local variable in Eager mode. if not self._variables_created: return [] return self._template_store.variables() @property def trainable_variables(self): """Returns the list of trainable variables created by the Template.""" # Currently there is no local variable in Eager mode. if not self._variables_created: return [] return self._template_store.trainable_variables() @property def non_trainable_variables(self): """Returns the list of non-trainable variables created by the Template.""" # Currently there is no local variable in Eager mode. if not self._variables_created: return [] return self._template_store.non_trainable_variables() @property def global_variables(self): """Returns the list of global variables created by the Template.""" # Currently there is no local variable in Eager mode. if not self._variables_created: return [] return self.variables @property def local_variables(self): """Returns the list of global variables created by the Template.""" # Currently there is no local variable in Eager mode. return []
tensorflow-master
tensorflow/python/ops/template.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ops for boosted_trees.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_boosted_trees_ops from tensorflow.python.ops import resources # Re-exporting ops used by other modules. # pylint: disable=unused-import from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_aggregate_stats from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_bucketize from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_calculate_best_feature_split as calculate_best_feature_split from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_calculate_best_gains_per_feature as calculate_best_gains_per_feature from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_center_bias as center_bias from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_create_quantile_stream_resource as create_quantile_stream_resource from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_example_debug_outputs as example_debug_outputs from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_make_quantile_summaries as make_quantile_summaries from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_make_stats_summary as make_stats_summary from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_predict as predict from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_add_summaries as quantile_add_summaries from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_deserialize as quantile_resource_deserialize from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_flush as quantile_flush from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_get_bucket_boundaries as get_bucket_boundaries from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_handle_op as quantile_resource_handle_op from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_sparse_aggregate_stats from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_training_predict as training_predict from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_update_ensemble as update_ensemble from tensorflow.python.ops.gen_boosted_trees_ops import is_boosted_trees_quantile_stream_resource_initialized as is_quantile_resource_initialized # pylint: enable=unused-import from tensorflow.python.training import saver from tensorflow.python.training.tracking import tracking class PruningMode(object): """Class for working with Pruning modes.""" NO_PRUNING, PRE_PRUNING, POST_PRUNING = range(0, 3) _map = {'none': NO_PRUNING, 'pre': PRE_PRUNING, 'post': POST_PRUNING} @classmethod def from_str(cls, mode): if mode in cls._map: return cls._map[mode] else: raise ValueError('pruning_mode mode must be one of: {}'.format(', '.join( sorted(cls._map)))) class QuantileAccumulatorSaveable(saver.BaseSaverBuilder.SaveableObject): """SaveableObject implementation for QuantileAccumulator.""" def __init__(self, resource_handle, create_op, num_streams, name): self._resource_handle = resource_handle self._num_streams = num_streams self._create_op = create_op bucket_boundaries = get_bucket_boundaries(self._resource_handle, self._num_streams) slice_spec = '' specs = [] def make_save_spec(tensor, suffix): return saver.BaseSaverBuilder.SaveSpec(tensor, slice_spec, name + suffix) for i in range(self._num_streams): specs += [ make_save_spec(bucket_boundaries[i], '_bucket_boundaries_' + str(i)) ] super(QuantileAccumulatorSaveable, self).__init__(self._resource_handle, specs, name) def restore(self, restored_tensors, unused_tensor_shapes): bucket_boundaries = restored_tensors with ops.control_dependencies([self._create_op]): return quantile_resource_deserialize( self._resource_handle, bucket_boundaries=bucket_boundaries) class QuantileAccumulator(tracking.TrackableResource): """SaveableObject implementation for QuantileAccumulator. The bucket boundaries are serialized and deserialized from checkpointing. """ def __init__(self, epsilon, num_streams, num_quantiles, name=None, max_elements=None): self._eps = epsilon self._num_streams = num_streams self._num_quantiles = num_quantiles super(QuantileAccumulator, self).__init__() with ops.name_scope(name, 'QuantileAccumulator') as name: self._name = name self._resource_handle = self._create_resource() self._init_op = self._initialize() is_initialized_op = self.is_initialized() resources.register_resource(self.resource_handle, self._init_op, is_initialized_op) self._saveable = QuantileAccumulatorSaveable( self.resource_handle, self._init_op, self._num_streams, self.resource_handle.name) ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, self._saveable) def _create_resource(self): return quantile_resource_handle_op( container='', shared_name=self._name, name=self._name) def _initialize(self): return create_quantile_stream_resource(self.resource_handle, self._eps, self._num_streams) @property def initializer(self): if self._init_op is None: self._init_op = self._initialize() return self._init_op def is_initialized(self): return is_quantile_resource_initialized(self.resource_handle) @property def saveable(self): return self._saveable def _gather_saveables_for_checkpoint(self): return {'quantile_accumulator', self._saveable} def add_summaries(self, float_columns, example_weights): summaries = make_quantile_summaries(float_columns, example_weights, self._eps) summary_op = quantile_add_summaries(self.resource_handle, summaries) return summary_op def flush(self): return quantile_flush(self.resource_handle, self._num_quantiles) def get_bucket_boundaries(self): return get_bucket_boundaries(self.resource_handle, self._num_streams) class _TreeEnsembleSavable(saver.BaseSaverBuilder.SaveableObject): """SaveableObject implementation for TreeEnsemble.""" def __init__(self, resource_handle, create_op, name): """Creates a _TreeEnsembleSavable object. Args: resource_handle: handle to the decision tree ensemble variable. create_op: the op to initialize the variable. name: the name to save the tree ensemble variable under. """ stamp_token, serialized = ( gen_boosted_trees_ops.boosted_trees_serialize_ensemble(resource_handle)) # slice_spec is useful for saving a slice from a variable. # It's not meaningful the tree ensemble variable. So we just pass an empty # value. slice_spec = '' specs = [ saver.BaseSaverBuilder.SaveSpec(stamp_token, slice_spec, name + '_stamp'), saver.BaseSaverBuilder.SaveSpec(serialized, slice_spec, name + '_serialized'), ] super(_TreeEnsembleSavable, self).__init__(resource_handle, specs, name) self._resource_handle = resource_handle self._create_op = create_op def restore(self, restored_tensors, unused_restored_shapes): """Restores the associated tree ensemble from 'restored_tensors'. Args: restored_tensors: the tensors that were loaded from a checkpoint. unused_restored_shapes: the shapes this object should conform to after restore. Not meaningful for trees. Returns: The operation that restores the state of the tree ensemble variable. """ with ops.control_dependencies([self._create_op]): return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble( self._resource_handle, stamp_token=restored_tensors[0], tree_ensemble_serialized=restored_tensors[1]) class TreeEnsemble(tracking.TrackableResource): """Creates TreeEnsemble resource.""" def __init__(self, name, stamp_token=0, is_local=False, serialized_proto=''): self._stamp_token = stamp_token self._serialized_proto = serialized_proto self._is_local = is_local with ops.name_scope(name, 'TreeEnsemble') as name: self._name = name self._resource_handle = self._create_resource() self._init_op = self._initialize() is_initialized_op = self.is_initialized() # Adds the variable to the savable list. if not is_local: self._saveable = _TreeEnsembleSavable( self.resource_handle, self.initializer, self.resource_handle.name) ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, self._saveable) resources.register_resource( self.resource_handle, self.initializer, is_initialized_op, is_shared=not is_local) def _create_resource(self): return gen_boosted_trees_ops.boosted_trees_ensemble_resource_handle_op( container='', shared_name=self._name, name=self._name) def _initialize(self): return gen_boosted_trees_ops.boosted_trees_create_ensemble( self.resource_handle, self._stamp_token, tree_ensemble_serialized=self._serialized_proto) @property def initializer(self): if self._init_op is None: self._init_op = self._initialize() return self._init_op def is_initialized(self): return gen_boosted_trees_ops.is_boosted_trees_ensemble_initialized( self.resource_handle) def _gather_saveables_for_checkpoint(self): if not self._is_local: return {'tree_ensemble': self._saveable} def get_stamp_token(self): """Returns the current stamp token of the resource.""" stamp_token, _, _, _, _ = ( gen_boosted_trees_ops.boosted_trees_get_ensemble_states( self.resource_handle)) return stamp_token def get_states(self): """Returns states of the tree ensemble. Returns: stamp_token, num_trees, num_finalized_trees, num_attempted_layers and range of the nodes in the latest layer. """ (stamp_token, num_trees, num_finalized_trees, num_attempted_layers, nodes_range) = ( gen_boosted_trees_ops.boosted_trees_get_ensemble_states( self.resource_handle)) # Use identity to give names. return (array_ops.identity(stamp_token, name='stamp_token'), array_ops.identity(num_trees, name='num_trees'), array_ops.identity(num_finalized_trees, name='num_finalized_trees'), array_ops.identity( num_attempted_layers, name='num_attempted_layers'), array_ops.identity(nodes_range, name='last_layer_nodes_range')) def serialize(self): """Serializes the ensemble into proto and returns the serialized proto. Returns: stamp_token: int64 scalar Tensor to denote the stamp of the resource. serialized_proto: string scalar Tensor of the serialized proto. """ return gen_boosted_trees_ops.boosted_trees_serialize_ensemble( self.resource_handle) def deserialize(self, stamp_token, serialized_proto): """Deserialize the input proto and resets the ensemble from it. Args: stamp_token: int64 scalar Tensor to denote the stamp of the resource. serialized_proto: string scalar Tensor of the serialized proto. Returns: Operation (for dependencies). """ return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble( self.resource_handle, stamp_token, serialized_proto)
tensorflow-master
tensorflow/python/ops/boosted_trees_ops.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """while_v2 and gradient. This is a version of while_loop that emits a single While op, as well as the gradient function for While ops produced by while_loop. This will eventually replace the current tf.while_loop implementation once it reaches feature and performance parity. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import func_graph as func_graph_module from tensorflow.python.framework import function_def_to_graph from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import type_spec from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import control_flow_util_v2 as util from tensorflow.python.ops import custom_gradient from tensorflow.python.ops import gen_functional_ops from tensorflow.python.ops import gen_resource_variable_ops from tensorflow.python.ops import gradients_util from tensorflow.python.ops import list_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import tensor_array_ops from tensorflow.python.ops import while_v2_indexed_slices_rewriter from tensorflow.python.util import nest # pylint: disable=protected-access # TODO(b/79881896): Handle external control dependencies. tf.while_loop allows # control dependencies on external nodes with at least 1 output. # Another idea is to create const nodes outside the loop and add control edges # to them and then pass those in as data inputs. This should probably be # handled in the CapturingGraph itself. def while_loop(cond, body, loop_vars, shape_invariants=None, parallel_iterations=10, maximum_iterations=None, name=None, return_same_structure=True): """Like tf.while_loop, except emits a single While op.""" # Keep the original loop_vars around to know which args were TensorArrays. orig_loop_vars = loop_vars # Cache its length since we use it at multiple places below. len_orig_loop_vars = len(orig_loop_vars) # Convert TensorArrays to their flow variables. These get converted back to # TensorArrays before calling `cond` and `body`. See `wrapped_cond` and # `wrapped_body` below. loop_vars = list(_tensor_array_to_flow(orig_loop_vars)) loop_vars = nest.map_structure( ops.internal_convert_to_tensor_or_indexed_slices, loop_vars, expand_composites=True) if shape_invariants is not None: nest.assert_same_structure(orig_loop_vars, shape_invariants, expand_composites=False) signature = nest.map_structure( control_flow_ops._shape_invariant_to_type_spec, loop_vars, list(shape_invariants), expand_composites=False) shape_invariants = nest.map_structure( control_flow_ops._get_shape_invariant, loop_vars, list(shape_invariants), expand_composites=False) else: signature = nest.map_structure( type_spec.type_spec_from_value, loop_vars, expand_composites=False) shape_invariants = nest.map_structure( control_flow_ops._get_shape_invariant, loop_vars, expand_composites=False) if not name: name = "while" with ops.name_scope(name) as scope: with ops.name_scope(None): cond_name = util.unique_fn_name(scope, "cond") body_name = util.unique_fn_name(scope, "body") maximum_iterations_loop_var = _build_maximum_iterations_loop_var( maximum_iterations) loop_counter = constant_op.constant( 0, dtype=maximum_iterations_loop_var.dtype if maximum_iterations is not None else None, name="loop_counter") # Add loop counter needed for computing gradients. loop_vars = [loop_counter, maximum_iterations_loop_var] + loop_vars shape_invariants = ( [tensor_shape.scalar(), tensor_shape.scalar()] + shape_invariants) signature = ( [tensor_spec.TensorSpec.from_tensor(loop_counter), tensor_spec.TensorSpec.from_tensor(maximum_iterations_loop_var)] + signature) # Automatic control dependencies are added in defuns, but not in v1 # graphs. Propagate that behavior here. add_control_dependencies = ops.get_default_graph()._add_control_dependencies # Build a `cond` wrapper that can handle the extra counter loop_var. def wrapped_cond(loop_counter, maximum_iterations_arg, *args): # Convert the flow variables in `args` to TensorArrays. `args` should # already have the same structure as `orig_loop_vars` but currently there # is no nest.zip so we call `_pack_sequence_as` which flattens both # `orig_loop_vars` and `args`, converts flows in `args` to TensorArrays # and packs it into the structure of `orig_loop_vars`. if maximum_iterations is None: return cond(*_pack_sequence_as(orig_loop_vars, args)) else: return math_ops.logical_and( loop_counter < maximum_iterations_arg, cond(*_pack_sequence_as(orig_loop_vars, args))) # NOTE(skyewm): we set collections to the outer graph's collections for # compatibility with TPUEstimator. cond_graph = func_graph_module.func_graph_from_py_func( cond_name, wrapped_cond, [], # We provide signature instead of args. {}, signature=signature, func_graph=util.WhileCondFuncGraph( cond_name, collections=ops.get_default_graph()._collections), # pylint: disable=protected-access add_control_dependencies=add_control_dependencies) def wrapped_body(loop_counter, maximum_iterations_arg, *args): """Loop body augmented with counter update. Args: loop_counter: Loop counter which needs to be incremented in the body. maximum_iterations_arg: Maximum iterations of the loop. *args: List of args Returns: A list of tensors the same length as args. """ # Capture the tensors already captured in cond_graph so that they appear # in the same order in body_graph.external_captures. for t in cond_graph.external_captures: ops.get_default_graph().capture(t) # Convert the flow variables in `args` to TensorArrays. `args` should # already have the same structure as `orig_loop_vars` but currently there # is no nest.zip so we call `_pack_sequence_as` which flattens both # `orig_loop_vars` and `args`, converts flows in `args` to TensorArrays # and packs it into the structure of `orig_loop_vars`. outputs = body(*_pack_sequence_as(orig_loop_vars, args)) if not nest.is_sequence_or_composite(outputs): outputs = [outputs] # Compare the structure of input and output of body converting the # top-level tuples to list to be compatible with legacy while_loop. nest.assert_same_structure(list(outputs), list(orig_loop_vars), expand_composites=True) outputs = _tensor_array_to_flow(outputs) # TODO(srbs): Update lowering code to create _Enter nodes with # is_constant=True for inputs that are directly passed to outputs. return [loop_counter + 1, maximum_iterations_arg] + list(outputs) body_graph = func_graph_module.func_graph_from_py_func( body_name, wrapped_body, [], # We provide signature instead of args. {}, signature=signature, func_graph=util.WhileBodyFuncGraph( body_name, collections=ops.get_default_graph()._collections), # pylint: disable=protected-access add_control_dependencies=add_control_dependencies) # Add external captures of body to the list of loop vars. # Note that external tensors will be treated as loop invariants, i.e., # the value of that tensor in each iteration is the same as it was at the # beginning of the loop execution. loop_vars = loop_vars + body_graph.external_captures # TODO(srbs): Update lowering code to create _Enter nodes with # is_constant=True for inputs that are directly passed to outputs. body_graph.outputs.extend(body_graph.internal_captures) # Capture the extra `external_captures` of `body_graph` in `cond_graph` so # that it expects to receive those as arguments. with cond_graph.as_default(): num_cond_captures = len(cond_graph.external_captures) assert (cond_graph.external_captures == body_graph.external_captures[:num_cond_captures]) for body_capture in body_graph.external_captures[num_cond_captures:]: assert body_capture not in cond_graph.captures cond_graph.capture(body_capture) # Make sure that the shapes of the loop outputs are compatible with the # shape invariants, or the shapes of the loop vars if the invariants are not # specified. num_flattened_outputs = len(nest.flatten(orig_loop_vars, expand_composites=True)) # First var is loop counter and second var is maximum_iterations. first_loop_var_index = 2 _check_shapes_compat( body_graph.outputs[first_loop_var_index:first_loop_var_index + num_flattened_outputs], nest.flatten( shape_invariants[first_loop_var_index:first_loop_var_index + len_orig_loop_vars], expand_composites=True), nest.flatten(loop_vars[first_loop_var_index:first_loop_var_index + len_orig_loop_vars], expand_composites=True)) flattened_loop_vars = nest.flatten(loop_vars, expand_composites=True) _check_num_inputs_outputs(cond_graph, body_graph, len(flattened_loop_vars)) _check_inputs_outputs_types_match(body_graph, flattened_loop_vars) with ops.control_dependencies( list(cond_graph.control_captures) + list(body_graph.control_captures)): output_shapes = [t.shape for t in body_graph.outputs] orig_loop_vars_range = slice(first_loop_var_index, first_loop_var_index + num_flattened_outputs) output_shapes[orig_loop_vars_range] = nest.flatten( shape_invariants, expand_composites=True)[orig_loop_vars_range] outputs = gen_functional_ops._while( flattened_loop_vars, util.create_new_tf_function(cond_graph), util.create_new_tf_function(body_graph), output_shapes=output_shapes, parallel_iterations=parallel_iterations, name=scope) _copy_handle_data(body_graph.outputs, outputs) util.maybe_set_lowering_attr(outputs[0].op) util.maybe_propagate_compile_time_consts_in_xla(outputs[0].op) # Return identities for each output of the While op, rather than the output # of the While op directly. This makes pruning work if the output of # while_loop() is fetched: the lowering pass converts the While outputs into # IdentityN outputs, which if fetched will cause all ops in the body to be # run (since it takes all exit ops as input). After lowering, each output # identity op will end up with only the appropriate exit op as input. outputs = tuple(array_ops.identity(t) for t in outputs) outputs = _pack_sequence_as( orig_loop_vars, outputs[first_loop_var_index:first_loop_var_index + num_flattened_outputs]) if return_same_structure: return outputs flattened_outputs = nest.flatten(outputs, expand_composites=True) if len(flattened_outputs) == 1: return flattened_outputs[0] else: return outputs @ops.RegisterGradient("While") def _WhileGrad(op, *grads): # pylint: disable=invalid-name """The gradient of a While op produced by while_loop.""" # Note that op is not always the same as while_op because the gradient tape, # for eager mode compatibility, forgets information about the proper op. Since # the loop cannot run in eager mode, however, we can safely introspect into # the graph here. while_op = op.outputs[0].op cond_graph = _get_graph(while_op, "cond") body_graph = _get_graph(while_op, "body") orig_num_params = len(body_graph.outputs) maximum_iterations = op.inputs[1] parallel_iterations = op.get_attr("parallel_iterations") grads = [_preprocess_grad(grad, body_out, while_out) for grad, body_out, while_out in zip(grads, body_graph.outputs, while_op.outputs)] # We compute the gradient for the sub-graph between trainable ys and xs # with non-None incoming gradients. We later pad the None's to the list of # outputs. ys, xs, non_none_grads = zip(*[(y, x, grad) for (y, x, grad) in zip( body_graph.outputs, body_graph.inputs, grads) if grad is not None]) body_grad_graph, args = _create_grad_func( ys, xs, non_none_grads, cond_graph, body_graph, util.unique_grad_fn_name(body_graph.name), op, maximum_iterations) if body_grad_graph.while_op_needs_rewrite: # Modify 'op' to output the intermediate accumulators needed by the grad # function. # NOTE(skyewm): if there are any active sessions, this modification to `op` # may make them unrunnable! cond_graph.name += "_rewritten" body_graph.name += "_rewritten" new_inputs = body_grad_graph.empty_tensor_lists new_outputs = body_graph.outputs[orig_num_params:] while_op._set_func_attr("cond", util.create_new_tf_function(cond_graph)) while_op._set_func_attr("body", util.create_new_tf_function(body_graph)) while_op._set_type_list_attr("T", body_graph.output_types) while_op._set_shape_list_attr("output_shapes", body_graph.output_shapes) while_op._add_while_inputs(new_inputs) while_op._add_outputs([t.dtype for t in new_outputs], [t.shape for t in new_outputs]) _copy_handle_data(new_outputs, op.outputs[orig_num_params:]) captured_inputs = _resolve_grad_captures(body_graph, body_grad_graph, while_op) loop_vars = args + captured_inputs # This modifies body_grad_graph. loop_vars = while_v2_indexed_slices_rewriter.rewrite_grad_indexed_slices( grads, body_grad_graph, loop_vars, while_op.inputs) def grad_cond(counter, unused_maximum_iterations_arg, forward_loop_iters, *unused_args): return counter < forward_loop_iters grad_cond_name = util.unique_grad_fn_name(op.get_attr("cond").name) cond_grad_graph = func_graph_module.func_graph_from_py_func( grad_cond_name, grad_cond, loop_vars, {}, func_graph=util.WhileCondFuncGraph(grad_cond_name)) _check_num_inputs_outputs(cond_grad_graph, body_grad_graph, len(loop_vars)) outputs = gen_functional_ops._while( loop_vars, util.create_new_tf_function(cond_grad_graph), util.create_new_tf_function(body_grad_graph), output_shapes=[t.shape for t in body_grad_graph.outputs], parallel_iterations=parallel_iterations, name="%s_grad" % while_op.name) grad_op = outputs[0].op _copy_handle_data(body_grad_graph.outputs, outputs) util.maybe_set_lowering_attr(grad_op) util.maybe_propagate_compile_time_consts_in_xla(grad_op) # See comment in while_loop. outputs = [array_ops.identity(t) for t in outputs] return _get_structured_grad_output(outputs, grads, body_grad_graph) def _preprocess_grad(grad, body_graph_output, while_op_output): """Returns the initial gradient to be used for a given output tensor. Args: grad: the original gradient Tensor passed to the gradient function. body_graph_output: the corresponding Tensor in the body graph. while_op_output: the corresponding Tensor output of the While op. Returns: A Tensor or None. """ # Set the incoming gradient of non-trainable inputs to None. It is possible # that we receive non-None gradients for non-trainable types in nested while # loops because we accumulate outputs of the inner while as variant tensors # which are trainable and hence receive zeros_like tensors in the gradient # pass. The non-trainable tensors then receive the popped zeros tensor from # this zeros variant. The gradient for the loop vars corresponding to these # tensors is None or zeros (this happens only if the loop var is accumulated # as well) in _grad_fn so we reset these. # TODO(b/118712257): Remove once we can handle None output grads in _grad_fn. if not _is_trainable(body_graph_output): return None # GradientTape initializes resource and variant grads as None instead of # zeros. Set to zeros so _GradientsHelper computes the gradients instead of # returning None. if (while_op_output.dtype in (dtypes.resource, dtypes.variant) and grad is None): return _zeros_like(while_op_output) return grad # TODO(skyewm): make this return constants if op_output's shape is fully # defined (this can be done by checking the "shape" attr of resource vars). def _zeros_like(op_output): """Like array_ops.zeros_like() but also accepts resource var handles.""" if op_output.dtype == dtypes.resource: return array_ops.zeros( gen_resource_variable_ops.variable_shape(op_output)) return array_ops.zeros_like(op_output) def _is_trainable(tensor): """Returns whether the given tensor is trainable.""" if not gradients_util.IsTrainable(tensor): return False # Special case: untrainable accumulator output. The gradients algorithm # doesn't know about tensor lists of untrainable elements. In theory the # tensor list gradient functions should return None as appropriate, but # because we can't return None from the gradient function we filter out # untrainable accumulator output here to avoid computing the gradient at all. if tensor.op.type == "TensorListPopBack" and tensor.value_index == 0: assert tensor.dtype == dtypes.variant element_type = tensor.op.get_attr("element_dtype") return gradients_util.IsTrainable(element_type) return True # TODO(srbs): Pull this into common utils for cond_v2 and while_v2. def _get_graph(while_op, func_attr_name): """Returns `FuncGraph` for the given function attribute. Args: while_op: The While Operation. func_attr_name: string Returns: `FuncGraph` """ # TODO(srbs): Handle TensorShapeProto in function_def_to_graph.input_shapes. input_shapes = [ tensor_shape.TensorShape(s) for s in while_op.get_attr("output_shapes") ] func_name = while_op.get_attr(func_attr_name).name fdef = while_op.graph._get_function(func_name).definition # `while_op.graph` may not be the same as `ops.get_default_graph()` e.g. # if the `while_op` is in the body of another if/while/defun. We build the # `func_graph` with `while_op.graph` as its `outer_graph`. This resembles how # the `FuncGraph` was built in the forward pass. We need this so that we can # appropriately capture references to outer tensors in the nested grad graphs. with while_op.graph.as_default(): func_graph = function_def_to_graph.function_def_to_graph(fdef, input_shapes) func_graph._while = while_op return func_graph def _create_grad_func(ys, xs, grads, cond_graph, body_graph, name, while_op, maximum_iterations): """Builds and returns the gradient FuncGraph of `func_graph` and its args. The returned grad_func_graph must be called with the returned args + grad_func_graph.captures. Args: ys: A `Tensor` or list of tensors to be differentiated. xs: A `Tensor` or list of tensors to be used for differentiation. grads: The incoming grads for `ys`. cond_graph: FuncGraph for the forward cond function. body_graph: FuncGraph for the forward body function. name: Name of the returned gradient function. while_op: The forward While op. maximum_iterations: Tensor. The maximum number of iterations. Returns: 2-tuple of (grad_func_graph, args). """ assert len(ys) == len(grads) total_iters = while_op.outputs[0] counter = constant_op.constant( 0, dtype=total_iters.dtype, name="grad_counter") args = [counter, maximum_iterations, total_iters] + list(grads) # Note: The returned function does not have `args` in the list of # `external_captures`. grad_func_graph = func_graph_module.func_graph_from_py_func( name, lambda *args: _grad_fn(ys, xs, args, body_graph), args, {}, func_graph=_WhileBodyGradFuncGraph(name, cond_graph, body_graph, maximum_iterations, while_op)) # Add the popped accumulators to the list of outputs. for internal_capture in grad_func_graph.internal_captures: if internal_capture in grad_func_graph.popped_tensor_lists: new_output = grad_func_graph.popped_tensor_lists[internal_capture] elif internal_capture.dtype == dtypes.resource: new_output = internal_capture else: raise ValueError("Tensor %s is in list of internal_captures but is" " neither a resource nor is in popped_tensor_lists." % str(internal_capture)) grad_func_graph.outputs.append(new_output) grad_func_graph.structured_outputs.append(new_output) return grad_func_graph, args def _grad_fn(ys, xs, args, func_graph): """Computes the gradient of `func_graph` in the current graph. This function builds the gradient graph of the corresponding forward-pass `func_graph` by differentiating `func_graph`'s outputs w.r.t. its inputs. Args: ys: A `Tensor` or list of tensors to be differentiated. xs: A `Tensor` or list of tensors to be used for differentiation. args: The input arguments. args[0] - Loop counter args[1] - Total number of iterations. args[2] - maximum_iterations. args[3:] - Incoming gradients for `ys`. func_graph: function.FuncGraph. The corresponding forward-pass function. Returns: The output gradient Tensors. """ grad_ys = args[3:] # Build the gradient graph. Note that this builds the gradient computation of # func_graph in the current graph, which requires capturing tensors from # func_graph. The captured func_graph tensors are resolved to external tensors # after the forward While op has been rewritten in _resolve_grad_captures. # TODO(srbs): Mark GradientsHelper as public? grad_outs = gradients_util._GradientsHelper( ys, xs, grad_ys=grad_ys, src_graph=func_graph, unconnected_gradients="zero") # TODO(b/118712257): Handle the case when grad_outs has None's e.g. when there # is a tf.StopGradient in the loop body. assert all(g is not None for g in grad_outs) counter = args[0] maximum_iterations = args[1] total_iters = args[2] return [counter + 1, maximum_iterations, total_iters] + grad_outs def _resolve_grad_captures(body_graph, body_grad_graph, while_op): """Returns the tensors to pass as captured inputs to `body_grad_graph`. `body_grad_graph` may have external references to: 1. Its outer graph containing the input gradients. These are left as-is. 2. Accumulators captured from the forward-pass graph. These should have been added as `while_op` outputs after the gradient graph was built. We replace these with the corresponding output of `while_op`, i.e. a tensor in `body_graph.outer_graph`. In the case of nested control flow or functions, the gradient logic handling `body_grad_graph.outer_graph` will make sure the tensor from `body_graph.outer_graph` is also correctly captured. Args: body_graph: FuncGraph. The forward-pass body function. body_grad_graph: FuncGraph. The body gradients function. while_op: The forward-pass While Operation calling `body_graph`. Returns: A list of input tensors to be passed as the captured inputs to `body_grad_graph`. """ new_capture_inputs = [] for t in body_grad_graph.external_captures: # All values captured by gradient computation should be from the forward # graph or a captured resource variable (note that input gradients are # regular non-captured inputs). if t.graph == body_graph: # Captured accumulator t = while_op.outputs[t.graph.outputs.index(t)] # Note: We rely on the capturing logic of the gradient While op graph to # correctly capture the tensors in `body_graph.outer_graph`. Both cond_v2 # and while_v2 handle this while building their gradient functions. assert t.graph == body_graph.outer_graph else: # Captured resource variable assert t.dtype == dtypes.resource new_capture_inputs.append(t) return new_capture_inputs def _get_structured_grad_output(outputs, grads, body_grad_graph): """Returns the values that should be returned from the while grad function. Args: outputs: the raw Tensor outputs of the grad While op. grads: the input gradients to the gradient function. body_grad_graph: _WhileBodyGradFuncGraph. Returns: A list of gradient values. May include Nones. """ result = [] # outputs[0] is the loop counter. # outputs[1] is maximum_iterations. # outputs[2] is the total number of loop iterations. outputs_idx = 3 structured_outputs_idx = 3 for g in grads: # Set None as the output gradient for tensors with None input gradient. if g is None: result.append(None) continue output = body_grad_graph.structured_outputs[structured_outputs_idx] structured_outputs_idx += 1 if isinstance(output, ops.IndexedSlices): # TODO(skyewm): is there a more robust way to determine the order of # flattened IndexedSlices components? result.append(ops.IndexedSlices( values=outputs[outputs_idx], indices=outputs[outputs_idx + 1], dense_shape=outputs[outputs_idx + 2])) outputs_idx += 3 else: assert isinstance(output, ops.Tensor) result.append(outputs[outputs_idx]) outputs_idx += 1 return result def _get_accumulator(tensor): r"""Returns TensorList if any containing accumulated values of tensor. We try to find a pattern of the form: input_tl tensor \ / (TensorListPushBack) | output_tl which satisfies the following conditions: 1. input_tl must be in tensor.graph.inputs. 2. output_tl or Identity(output_tl) must be in tensor.graph.outputs. 3. tensor.graph.input_index(input_tl) == tensor.graph.output_index(output_t). output_tl or Identity(output_tl) (whichever is in tensor.graph.outputs) is returned if such a pattern is found else None is returned. Args: tensor: The Tensor to be accumulated. Returns: A variant tensor in the same graph as `tensor` or None if no accumulator is found. """ assert isinstance(tensor.graph, func_graph_module.FuncGraph) def get_func_graph_output(t): """Returns t or Identity(t) whichever exists in graph outputs else None.""" if t in tensor.graph.outputs: return t # tf.defun adds an Identity for each output, check whether that is the case. identity_op = t.consumers()[0] if (identity_op.type == "Identity" and identity_op.outputs[0] in tensor.graph.outputs): return identity_op.outputs[0] return None for consumer in tensor.consumers(): # Find the consumer that is a TensorListPushBack node whose TensorList input # is in the list of function inputs. if (consumer.type != "TensorListPushBack" or consumer.inputs[0] not in tensor.graph.inputs): continue output = get_func_graph_output(consumer.outputs[0]) if output is None: # The TensorList output of `consumer` is not in the list of function # outputs. continue accum_input_idx = tensor.graph.inputs.index(consumer.inputs[0]) accum_output_idx = tensor.graph.outputs.index(output) if accum_input_idx == accum_output_idx: return output return None class _WhileBodyGradFuncGraph(util.WhileBodyFuncGraph): """FuncGraph for the gradient function of the body of a While op. Contains the logic for capturing the tensors from the body of the forward While op which is as follows: 1. If the tensor is of resource type (these are not accumulated): a. Ensure that the tensor is a loop invariant, i.e., it exists in both loop inputs and outputs at the same index. b. Lookup the corresponding resource tensor in the forward outer graph and try to capture that. 2. If the tensor is not of resource type: a. Create an accumulator for that tensor and output it from the forward pass. Note this also requires adding it as an input to the forward pass. b. Capture the accumulator from the forward pass in this FuncGraph. This will later be resolved to the correct output of the forward While op. c. Pop a value from the captured placeholder and use it as the captured value for the forward pass tensor. This only allows capturing tensors in the forward graph. A ValueError is raised if an attempt is made to capture a tensor not in the forward graph. To manually capture capture a tensor that is not in the forward graph, call `capture` with `whitelisted=True`. Note: The `captures` dict does not contain the forward tensor since it is not directly captured. It contains the accumulator corresponding to this forward tensor. Attributes: while_op_needs_rewrite: True if any non-resource intermediates were captured, meaning the forward While op needs to be rewritten to output the corresponding accumulators. empty_tensor_lists: list of EmptyTensorList tensors to be used as initial input to the new accumulators in the forward graph. popped_tensor_lists: dict from the captured accumulator placeholder to the TensorList obtained after popping the intermediate tensor from it. The values of this dict need to be added to the list of outputs. """ def __init__(self, name, forward_cond_graph, forward_body_graph, maximum_iterations, forward_while_op): super(_WhileBodyGradFuncGraph, self).__init__(name) self.empty_tensor_lists = [] self.popped_tensor_lists = {} # FuncGraph for the body of the forward While op. self._forward_graph = forward_body_graph # FuncGraph for the cond of the forward While op. self._forward_cond_graph = forward_cond_graph self._maximum_iterations = maximum_iterations self._forward_while_op = forward_while_op # Dict from forward intermediate tensor to its indirectly captured tensor # in this graph. Indirect capturing happens in two ways: # 1. For non-resource tensors we capture their accumulators from the forward # outer graph and pop values from that accumulator inside this graph # using TensorListPopBack. # 2. For resource tensors we directly capture their corresponding tensor # in the forward outer graph. self._indirect_captures = {} @property def while_op_needs_rewrite(self): return self.empty_tensor_lists def capture(self, tensor, name=None, whitelisted=False): """Selectively captures external tensors. If `whitelisted` is False only allows capturing tensors in the `_forward_graph`. Args: tensor: Tensor. May be from this FuncGraph or a different graph. name: Optional name if a placeholder is created. whitelisted: If False (default), only allows capturing tensors from the forward graph. Returns: The placeholder in this graph for the tensor. Raises: ValueError: If attempting to capture an external tensor not in the forward graph with `whitelisted` set to False. """ if (not whitelisted and tensor.graph is not self and tensor.graph != self._forward_graph): raise ValueError("Attempting to capture tensor %s which is not in the " "forward graph but in %s." % (str(tensor), _graph_name(tensor.graph))) return super(_WhileBodyGradFuncGraph, self).capture(tensor, name) def _capture_helper(self, tensor, name): if tensor.graph is not self._forward_graph: return super(_WhileBodyGradFuncGraph, self)._capture_helper(tensor, name) while tensor.op.type == "Identity": # We do not accumulate the output of identity nodes so we try to capture # the input of the Identity node instead. tensor = tensor.op.inputs[0] captured_tensor = self._indirect_captures.get(tensor) if captured_tensor is not None: return captured_tensor # Resource tensors are not accumulated and handled specially. if tensor.dtype == dtypes.resource: return self._resource_capture_helper(tensor) # Create or find an existing accumulator output for `tensor` in the forward # graph, and fetch from this accumulator in the gradient graph to get the # raw intermediate value. accumulator = _get_accumulator(tensor) if accumulator is None: # Create the initial empty tensor list. # # Note: We clear the control dependencies to avoid a cycle in case a # control tensor has an input path to an output of the forward While. # # E.g.: # x = tf.while_loop(...) # y = f(x) # with tf.control_dependencies([y]): # tf.gradients(y, x) # # Since the EmptyTensorList is fed back into the forward While, not # removing the control edge would cause a cycle. with self._forward_graph.outer_graph.as_default(): with util.clear_control_inputs(): tensor_list = list_ops.empty_tensor_list( element_dtype=tensor.dtype, element_shape=tensor.shape, max_num_elements=self._maximum_iterations, name=_build_accumulator_name(tensor)) self.empty_tensor_lists.append(tensor_list) # Push the intermediate tensor to the tensor list. This captures # `tensor_list`. with self._forward_graph.as_default(): accumulator = list_ops.tensor_list_push_back(tensor_list, tensor) # Add the modified tensor list to the list of outputs. This output will be # all the accumulated values. self._forward_graph.outputs.append(accumulator) # Capture in the cond graph as well so the forward cond and body inputs # match. with self._forward_cond_graph.as_default(): self._forward_cond_graph.capture(tensor_list) # Capture the accumulator tensor list in the gradient graph directly from # the forward graph -- we'll later modify this to capture the final list # output by the forward While op instead. captured_accumulator = super(_WhileBodyGradFuncGraph, self)._capture_helper( accumulator, name) # Pop the intermediate value from the tensor list in the gradient graph. new_tensor_list, captured_tensor = list_ops.tensor_list_pop_back( captured_accumulator, element_dtype=tensor.dtype) self._indirect_captures[tensor] = captured_tensor self.popped_tensor_lists[captured_accumulator] = new_tensor_list return captured_tensor def _resource_capture_helper(self, tensor): """Returns the captured resource tensor. Resource-type tensors are not accumulated. If a resource tensor exists in the loop body it must either be a loop input or an output of a nested While op inside the loop body which had captured the external resource. Args: tensor: the external resource Tensor to be captured. Returns: Tensor in this graph. """ assert tensor.dtype == dtypes.resource index = util.resource_input_index( tensor.name, [t.name for t in self._forward_graph.inputs], {op.name: op.node_def for op in self._forward_graph.get_operations()}, self._forward_graph._functions) input_placeholder = self._forward_graph.inputs[index] tensor_in_outer_graph = self._forward_graph._while.inputs[index] assert input_placeholder.dtype == dtypes.resource assert tensor_in_outer_graph.dtype == dtypes.resource # This must be a loop invariant. assert input_placeholder == self._forward_graph.outputs[index], ( "Resource tensors must be loop invariants %s." % tensor_in_outer_graph) self._indirect_captures[tensor] = self.capture( tensor_in_outer_graph, whitelisted=True) return self._indirect_captures[tensor] def _check_shapes_compat(output_tensors, shape_invariants, input_tensors): for (t, shape, input_t) in zip(output_tensors, shape_invariants, input_tensors): if not control_flow_ops._ShapeLessThanOrEqual(t.shape, shape): raise ValueError( "Input tensor '%s' enters the loop with shape %s, but has " "shape %s after one iteration. To allow the shape to vary across " "iterations, use the `shape_invariants` argument of tf.while_loop to " "specify a less-specific shape." % (input_t.name, shape, t.shape)) def _check_num_inputs_outputs(cond_graph, body_graph, num_flattened_loop_vars): """Checks the number of inputs/outputs of `cond_graph` and `body_graph`.""" assert len(cond_graph.inputs) == num_flattened_loop_vars, ( "cond_graph takes %d inputs; Expected: %d" % (len(cond_graph.inputs), num_flattened_loop_vars)) assert len(cond_graph.outputs) == 1, ( "cond_graph has %d outputs; Expected: 1" % len(cond_graph.outputs)) assert len(body_graph.inputs) == num_flattened_loop_vars, ( "body_graph takes %d inputs; Expected: %d" % (len(body_graph.inputs), num_flattened_loop_vars)) assert len(body_graph.outputs) == num_flattened_loop_vars, ( "body_graph has %d outputs; Expected: %d" % (len(body_graph.outputs), num_flattened_loop_vars)) def _check_inputs_outputs_types_match(body_graph, flattened_loop_vars): for inp, out, loop_var in zip(body_graph.inputs, body_graph.outputs, flattened_loop_vars): if inp.dtype != out.dtype: raise TypeError("Loop var {} enters the loop with type {} " "but has type {} after 1 iteration.".format( loop_var.name, inp.dtype, out.dtype)) def _copy_handle_data(src_tensors, tgt_tensors): for src_t, tgt_t in zip(src_tensors, tgt_tensors): custom_gradient.copy_handle_data(src_t, tgt_t) def _graph_name(graph): if isinstance(graph, func_graph_module.FuncGraph): return graph.name return "Base" def _pack_sequence_as(structure_with_tas, loop_vars): """Like `nest.pack_sequence_as` but also replaces flows with TensorArrays.""" def flow_to_tensor_array(flow, ta): # pylint: disable=missing-docstring return (tensor_array_ops.build_ta_with_new_flow(ta, flow) if isinstance( # pylint: disable=g-long-ternary ta, tensor_array_ops.TensorArray) else flow) flattened_loop_vars = [ flow_to_tensor_array(*z) for z in zip(nest.flatten(loop_vars, expand_composites=True), nest.flatten(structure_with_tas, expand_composites=True)) ] return nest.pack_sequence_as(structure_with_tas, flattened_loop_vars, expand_composites=True) def _tensor_array_to_flow(loop_vars): def f(maybe_ta): if isinstance(maybe_ta, tensor_array_ops.TensorArray): return maybe_ta.flow return maybe_ta return nest.map_structure(f, loop_vars, expand_composites=True) def _build_maximum_iterations_loop_var(maximum_iterations): if maximum_iterations is None: # Default value for max_num_elements to EmptyTensorList meaning that the # list size is unbounded. maximum_iterations = -1 # EmptyTensorList expects `max_num_elements` to be of type int32. return ops.convert_to_tensor( maximum_iterations, dtype=dtypes.int32, name="maximum_iterations") def _build_accumulator_name(tensor): # Tensor name may be of the form "pow/y:0". Name scope does not allow ":". return "{}/accumulator".format(tensor.name).replace(":", "_") # pylint: enable=protected-access
tensorflow-master
tensorflow/python/ops/while_v2.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Critical Section object and execution logic.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections # TODO(ebrevdo): Re-enable once CriticalSection is in core. # from tensorflow.core.protobuf import critical_section_pb2 from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_resource_variable_ops from tensorflow.python.ops import tensor_array_ops from tensorflow.python.util import nest from tensorflow.python.util.tf_export import tf_export __all__ = ["CriticalSection"] # Graph Keys CRITICAL_SECTIONS = "critical_sections" CRITICAL_SECTION_EXECUTIONS = "critical_section_executions" class _ExecutionSignature( collections.namedtuple("_ExecutionSignature", ("op", "handle", "resources", "exclusive_resource_access"))): """A class storing an `ExecuteInCriticalResource` op and associated attrs.""" pass def _identity(x): """Identity op that recognizes `TensorArray`, `Operation`, and `Tensor`.""" if isinstance(x, tensor_array_ops.TensorArray): return x.identity() elif isinstance(x, ops.Operation): return control_flow_ops.group(x) elif context.executing_eagerly() and x is None: return None else: return array_ops.identity(x) def _get_colocation(op): """Get colocation symbol from op, if any.""" try: return op.get_attr("_class") except ValueError: return None @tf_export("CriticalSection") class CriticalSection(object): """Critical section. A `CriticalSection` object is a resource in the graph which executes subgraphs in **serial** order. A common example of a subgraph one may wish to run exclusively is the one given by the following function: ```python v = resource_variable_ops.ResourceVariable(0.0, name="v") def count(): value = v.read_value() with tf.control_dependencies([value]): with tf.control_dependencies([v.assign_add(1)]): return tf.identity(value) ``` Here, a snapshot of `v` is captured in `value`; and then `v` is updated. The snapshot value is returned. If multiple workers or threads all execute `count` in parallel, there is no guarantee that access to the variable `v` is atomic at any point within any thread's calculation of `count`. In fact, even implementing an atomic counter that guarantees that the user will see each value `0, 1, ...,` is currently impossible. The solution is to ensure any access to the underlying resource `v` is only processed through a critical section: ```python cs = CriticalSection() f1 = cs.execute(count) f2 = cs.execute(count) output = f1 + f2 session.run(output) ``` The functions `f1` and `f2` will be executed serially, and updates to `v` will be atomic. **NOTES** All resource objects, including the critical section and any captured variables of functions executed on that critical section, will be colocated to the same device (host and cpu/gpu). When using multiple critical sections on the same resources, there is no guarantee of exclusive access to those resources. This behavior is disallowed by default (but see the kwarg `exclusive_resource_access`). For example, running the same function in two separate critical sections will not ensure serial execution: ```python v = tf.compat.v1.get_variable("v", initializer=0.0, use_resource=True) def accumulate(up): x = v.read_value() with tf.control_dependencies([x]): with tf.control_dependencies([v.assign_add(up)]): return tf.identity(x) ex1 = CriticalSection().execute( accumulate, 1.0, exclusive_resource_access=False) ex2 = CriticalSection().execute( accumulate, 1.0, exclusive_resource_access=False) bad_sum = ex1 + ex2 sess.run(v.initializer) sess.run(bad_sum) # May return 0.0 ``` """ def __init__(self, name=None, shared_name=None, critical_section_def=None, import_scope=None): """Creates a critical section.""" context.ensure_initialized() if critical_section_def and name is not None: raise ValueError("critical_section_def and shared_name are " "mutually exclusive.") if critical_section_def: self._init_from_proto(critical_section_def, import_scope=import_scope) else: self._init_from_args(name, shared_name) def _init_from_proto(self, critical_section_def, import_scope): # pylint: disable=invalid-name raise NotImplementedError("Not yet implemented") # TODO(ebrevdo): Re-enable once CriticalSection is in core. # assert isinstance( # critical_section_def, critical_section_pb2.CriticalSectionDef) # # Create from critical_section_def. # g = ops.get_default_graph() # self._handle = g.as_graph_element( # ops.prepend_name_scope( # critical_section_def.critical_section_name, # import_scope=import_scope)) def _init_from_args(self, name, shared_name): # pylint: disable=invalid-name """Initialize the CriticalSection from constructor arguments.""" with ops.name_scope(name, "CriticalSection", []) as name: with ops.init_scope(): # pylint: disable=protected-access container = ops.get_default_graph()._container # pylint: enable=protected-access if shared_name is None: shared_name = name if container is None: container = "" self._handle = gen_resource_variable_ops.mutex_v2( shared_name=shared_name, container=container, name=name) if not context.executing_eagerly(): ops.add_to_collections(CRITICAL_SECTIONS, self) @property def name(self): return self._handle.op.name def execute(self, fn, exclusive_resource_access=True, name=None): """Execute function `fn()` inside the critical section. `fn` should not accept any arguments. To add extra arguments to when calling `fn` in the critical section, create a lambda: ```python critical_section.execute(lambda: fn(*my_args, **my_kwargs)) ``` Args: fn: The function to execute. Must return at least one tensor. exclusive_resource_access: Whether the resources required by `fn` should be exclusive to this `CriticalSection`. Default: `True`. You may want to set this to `False` if you will be accessing a resource in read-only mode in two different CriticalSections. name: The name to use when creating the execute operation. Returns: The tensors returned from `fn()`. Raises: ValueError: If `fn` attempts to lock this `CriticalSection` in any nested or lazy way that may cause a deadlock. ValueError: If `exclusive_resource_access == True` and another `CriticalSection` has an execution requesting the same resources as `fn``. Note, even if `exclusive_resource_access` is `True`, if another execution in another `CriticalSection` was created without `exclusive_resource_access=True`, a `ValueError` will be raised. """ with ops.name_scope(name, "critical_section_execute", []): # Ensure that mutex locking only happens *after* all args and # kwargs have been executed. This avoids certain types of deadlocks. lock = gen_resource_variable_ops.mutex_lock(self._handle) if not context.executing_eagerly(): # NOTE(ebrevdo): This is to ensure we don't pick up spurious # Operations created by other threads. with ops.get_default_graph()._lock: # pylint: disable=protected-access existing_ops = ops.get_default_graph().get_operations() with ops.control_dependencies([lock]): r = fn() # TODO(ebrevdo): If creating critical sections in a python loop, this # makes graph creation time quadratic. Revisit if this # becomes a problem. created_ops = (set(ops.get_default_graph().get_operations()) .difference(existing_ops)) else: with ops.control_dependencies([lock]): r = fn() if not context.executing_eagerly(): self._add_control_dependencies_to_lock(created_ops, lock.op) # captured_resources is a list of resources that are directly # accessed only by ops created during fn(), not by any # ancestors of those ops in the graph. captured_resources = set([ input_ for op in created_ops for input_ in op.inputs if input_.dtype == dtypes.resource ]) # NOTE(ebrevdo): The only time self._is_self_handle() is True # in this call is if one of the recently created ops, within # the execute(), themselves attempt to access the # CriticalSection. This will cause a deadlock. if any(self._is_self_handle(x) for x in captured_resources): raise ValueError("The function fn attempts to directly access the " "CriticalSection in which it would be running. " "This is illegal and would cause deadlocks.") self._check_multiple_access_to_resources( captured_resources, exclusive_resource_access) r_flat = [_identity(x) for x in nest.flatten(r)] with ops.control_dependencies(r_flat): # The identity must run on the same machine as self._handle with ops.colocate_with(self._handle): # Do not use array_ops.identity as there are special # optimizations within TensorFlow which seem to elide it # even when optimizations are disabled(!). ensure_lock_exists = gen_resource_variable_ops.consume_mutex_lock( lock) # Make sure that if any element of r is accessed, all of # them are executed together. r = nest.pack_sequence_as(r, control_flow_ops.tuple(nest.flatten(r))) with ops.control_dependencies([ensure_lock_exists]): outputs = nest.map_structure(_identity, r) if not context.executing_eagerly(): signature = _ExecutionSignature( op=lock.op, handle=self._handle, resources=list(captured_resources), exclusive_resource_access=exclusive_resource_access) ops.add_to_collections( CRITICAL_SECTION_EXECUTIONS, signature) return outputs def _add_control_dependencies_to_lock(self, created_ops, lock_op): """To avoid deadlocks, all args must be executed before lock_op.""" # Get all arguments (explicit and captured) of all ops created by fn(). all_args = set([input_.op for op in created_ops for input_ in op.inputs]) all_args.update( input_op for op in created_ops for input_op in op.control_inputs) # Unfortunately, we can't use sets throughout because TF seems to # create new Operation objects for the same op sometimes; and we # can't rely on id(op). # pylint: disable=protected-access all_args_dict = dict((op._id, op) for op in all_args) # Remove ops created within fn, or that lock_op already has a # control dependency on. Also remove a possible self-loop. for op in created_ops: all_args_dict.pop(op._id, None) for op in lock_op.control_inputs: all_args_dict.pop(op._id, None) for input_ in lock_op.inputs: all_args_dict.pop(input_.op._id, None) all_args_dict.pop(lock_op._id, None) all_args = all_args_dict.values() if not all_args: # No control dependencies to add; return early. return # This group is important: it ensures that any ops in all_args # outside the control context of the lock_op (and this fn, which # runs in the same context) are added to this context before # being added to the control dependencies of lock_op. all_args = control_flow_ops.group(*all_args) lock_op._add_control_input(all_args) # pylint: enable=protected-access def _is_self_handle(self, x): """Check if the tensor `x` is the same Mutex as `self._handle`.""" if isinstance(x, ops.EagerTensor): return x is self._handle return (x.op.type == "MutexV2" # blank shared_name means the op will create a unique one. and x.op.get_attr("shared_name") and (x.op.get_attr("shared_name") == self._handle.op.get_attr("shared_name")) and (x.op.device == self._handle.op.device or _get_colocation(x.op) == _get_colocation(self._handle.op))) def _check_multiple_access_to_resources( self, captured_resources, exclusive_resource_access): """Raise if captured_resources are accessed by another CriticalSection. Args: captured_resources: Set of tensors of type resource. exclusive_resource_access: Whether this execution requires exclusive resource access. Raises: ValueError: If any tensors in `captured_resources` are also accessed by another `CriticalSection`, and at least one of them requires exclusive resource access. """ # Collections and op introspection does not work in eager # mode. This is generally ok; since eager mode (as of # writing) executes sequentially anyway. for sg in ops.get_collection(CRITICAL_SECTION_EXECUTIONS): if self._is_self_handle(sg.handle): # Other executions in the same critical section are allowed. continue if not (exclusive_resource_access or sg.exclusive_resource_access): # Neither execution requested exclusive access. continue resource_intersection = captured_resources.intersection(sg.resources) if resource_intersection: raise ValueError( "This execution would access resources: %s. Either this " "lock (CriticalSection: %s) or lock '%s' " "(CriticalSection: %s) requested exclusive resource access " "of this resource. Did you mean to call execute with keyword " "argument exclusive_resource_access=False?" % (list(resource_intersection), self._handle, sg, sg.handle)) # TODO(ebrevdo): Re-enable once CriticalSection is in core. # def to_proto(self, export_scope=None): # """Converts a `CriticalSection` to a `CriticalSectoinDef` protocol buffer. # Args: # export_scope: Optional `string`. Name scope to remove. # Returns: # A `CriticalSectionDef` protocol buffer, or `None` if the # `CriticalSection` is not in the specified name scope. # """ # if export_scope is None or self.handle.name.startswith(export_scope): # cs_def = critical_section_pb2.CriticalSectionDef() # cs_def.critical_section_name = ops.strip_name_scope( # self._handle.name, export_scope) # return cs_def # else: # return None # @staticmethod # def from_proto(critical_section_def, import_scope=None): # return CriticalSection( # critical_section_def=critical_section_def, import_scope=import_scope) # TODO(ebrevdo): Re-enable once CriticalSection is in core. # def _execution_to_proto_fn(execution_signature, export_scope=None): # """Converts `_ExecutionSignature` to a `CriticalSectionExecutionDef`. # # TODO(ebrevdo): Update for _ExecutionSignature storing resource list. # Args: # execution_signature: Instance of `_ExecutionSignature`. # export_scope: The export scope, if any. # Returns: # An instance of `CriticalSectionExecutionDef`. # """ # if (export_scope is None # or execution_signature.op.name.startswith(export_scope)): # op_def = critical_section_pb2.CriticalSectionExecutionDef() # op_def.execute_in_critical_section_name = ops.strip_name_scope( # execution_signature.op.name, export_scope) # op_def.exclusive_resource_access = ( # execution_signature.exclusive_resource_access) # return op_def # else: # return None # def _execution_from_proto_fn(op_def, import_scope=None): # """Converts a `CriticalSectionExecutionDef` to a `_ExecutionSignature`.""" # # TODO(ebrevdo): Update for _ExecutionSignature storing resource list. # assert isinstance( # op_def, critical_section_pb2.CriticalSectionExecutionDef) # # Create from op_def. # g = ops.get_default_graph() # execution_op = g.as_graph_element( # ops.prepend_name_scope( # op_def.execute_in_critical_section_name, # import_scope=import_scope)) # return _ExecutionSignature( # op=execution_op, # exclusive_resource_access=op_def.exclusive_resource_access) # ops.register_proto_function( # CRITICAL_SECTIONS, # proto_type=critical_section_pb2.CriticalSectionDef, # to_proto=CriticalSection.to_proto, # from_proto=CriticalSection.from_proto) # ops.register_proto_function( # CRITICAL_SECTION_EXECUTIONS, # proto_type=critical_section_pb2.CriticalSectionExecutionDef, # to_proto=_execution_to_proto_fn, # from_proto=_execution_from_proto_fn)
tensorflow-master
tensorflow/python/ops/critical_section_ops.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Functional operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.framework import attr_value_pb2 from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import function from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_functional_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import tensor_array_ops from tensorflow.python.ops import variable_scope as vs # pylint: disable=unused-import from tensorflow.python.ops.gen_functional_ops import remote_call # pylint: enable=unused-import from tensorflow.python.ops.gen_functional_ops import symbolic_gradient from tensorflow.python.util import compat from tensorflow.python.util import function_utils from tensorflow.python.util import nest from tensorflow.python.util.tf_export import tf_export # TODO(yuanbyu, mrry): Handle stride to support sliding windows. @tf_export("foldl") def foldl(fn, elems, initializer=None, parallel_iterations=10, back_prop=True, swap_memory=False, name=None): """foldl on the list of tensors unpacked from `elems` on dimension 0. This foldl operator repeatedly applies the callable `fn` to a sequence of elements from first to last. The elements are made of the tensors unpacked from `elems` on dimension 0. The callable fn takes two tensors as arguments. The first argument is the accumulated value computed from the preceding invocation of fn, and the second is the value at the current position of `elems`. If `initializer` is None, `elems` must contain at least one element, and its first element is used as the initializer. Suppose that `elems` is unpacked into `values`, a list of tensors. The shape of the result tensor is fn(initializer, values[0]).shape`. This method also allows multi-arity `elems` and output of `fn`. If `elems` is a (possibly nested) list or tuple of tensors, then each of these tensors must have a matching first (unpack) dimension. The signature of `fn` may match the structure of `elems`. That is, if `elems` is `(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is: `fn = lambda (t1, [t2, t3, [t4, t5]]):`. Args: fn: The callable to be performed. elems: A tensor or (possibly nested) sequence of tensors, each of which will be unpacked along their first dimension. The nested sequence of the resulting slices will be the first argument to `fn`. initializer: (optional) A tensor or (possibly nested) sequence of tensors, as the initial value for the accumulator. parallel_iterations: (optional) The number of iterations allowed to run in parallel. back_prop: (optional) True enables support for back propagation. swap_memory: (optional) True enables GPU-CPU memory swapping. name: (optional) Name prefix for the returned tensors. Returns: A tensor or (possibly nested) sequence of tensors, resulting from applying `fn` consecutively to the list of tensors unpacked from `elems`, from first to last. Raises: TypeError: if `fn` is not callable. Example: ```python elems = tf.constant([1, 2, 3, 4, 5, 6]) sum = foldl(lambda a, x: a + x, elems) # sum == 21 ``` """ if not callable(fn): raise TypeError("fn must be callable.") def create_ta(elem): return tensor_array_ops.TensorArray( dtype=elem.dtype, size=n, dynamic_size=False, infer_shape=True).unstack(elem) in_graph_mode = not context.executing_eagerly() with ops.name_scope(name, "foldl", [elems]): # TODO(akshayka): Remove the in_graph_mode check once caching devices are # supported in Eager if in_graph_mode: # Any get_variable calls in fn will cache the first call locally # and not issue repeated network I/O requests for each iteration. varscope = vs.get_variable_scope() varscope_caching_device_was_none = False if varscope.caching_device is None: # TODO(ebrevdo): Change to using colocate_with here and in other # methods. varscope.set_caching_device(lambda op: op.device) varscope_caching_device_was_none = True # Convert elems to tensor array. n may be known statically. elems_flat = [ ops.convert_to_tensor(elem, name="elem") for elem in nest.flatten(elems) ] n = ( tensor_shape.dimension_value(elems_flat[0].shape[0]) or array_ops.shape(elems_flat[0])[0]) elems_ta = nest.map_structure(create_ta, elems) if initializer is None: a = nest.map_structure(lambda elem: elem.read(0), elems_ta) i = constant_op.constant(1) else: a = initializer i = constant_op.constant(0) def compute(i, a): elem_i = nest.map_structure(lambda elem: elem.read(i), elems_ta) a = fn(a, elem_i) return [i + 1, a] _, r_a = control_flow_ops.while_loop( lambda i, a: i < n, compute, [i, a], parallel_iterations=parallel_iterations, back_prop=back_prop, swap_memory=swap_memory, maximum_iterations=n) # TODO(akshayka): Remove the in_graph_mode check once caching devices are # supported in Eager if in_graph_mode and varscope_caching_device_was_none: varscope.set_caching_device(None) return r_a @tf_export("foldr") def foldr(fn, elems, initializer=None, parallel_iterations=10, back_prop=True, swap_memory=False, name=None): """foldr on the list of tensors unpacked from `elems` on dimension 0. This foldr operator repeatedly applies the callable `fn` to a sequence of elements from last to first. The elements are made of the tensors unpacked from `elems`. The callable fn takes two tensors as arguments. The first argument is the accumulated value computed from the preceding invocation of fn, and the second is the value at the current position of `elems`. If `initializer` is None, `elems` must contain at least one element, and its first element is used as the initializer. Suppose that `elems` is unpacked into `values`, a list of tensors. The shape of the result tensor is `fn(initializer, values[0]).shape`. This method also allows multi-arity `elems` and output of `fn`. If `elems` is a (possibly nested) list or tuple of tensors, then each of these tensors must have a matching first (unpack) dimension. The signature of `fn` may match the structure of `elems`. That is, if `elems` is `(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is: `fn = lambda (t1, [t2, t3, [t4, t5]]):`. Args: fn: The callable to be performed. elems: A tensor or (possibly nested) sequence of tensors, each of which will be unpacked along their first dimension. The nested sequence of the resulting slices will be the first argument to `fn`. initializer: (optional) A tensor or (possibly nested) sequence of tensors, as the initial value for the accumulator. parallel_iterations: (optional) The number of iterations allowed to run in parallel. back_prop: (optional) True enables support for back propagation. swap_memory: (optional) True enables GPU-CPU memory swapping. name: (optional) Name prefix for the returned tensors. Returns: A tensor or (possibly nested) sequence of tensors, resulting from applying `fn` consecutively to the list of tensors unpacked from `elems`, from last to first. Raises: TypeError: if `fn` is not callable. Example: ```python elems = [1, 2, 3, 4, 5, 6] sum = foldr(lambda a, x: a + x, elems) # sum == 21 ``` """ if not callable(fn): raise TypeError("fn must be callable.") def create_ta(elem): return tensor_array_ops.TensorArray( dtype=elem.dtype, size=n, dynamic_size=False, infer_shape=True).unstack(elem) in_graph_mode = not context.executing_eagerly() with ops.name_scope(name, "foldr", [elems]): # TODO(akshayka): Remove the in_graph_mode check once caching devices are # supported in Eager if in_graph_mode: # Any get_variable calls in fn will cache the first call locally and not # issue repeated network I/O requests for each iteration. varscope = vs.get_variable_scope() varscope_caching_device_was_none = False if varscope.caching_device is None: # TODO(ebrevdo): Change to using colocate_with here and in other # methods. varscope.set_caching_device(lambda op: op.device) varscope_caching_device_was_none = True # Convert elems to tensor array. n may be known statically. elems_flat = [ ops.convert_to_tensor(elem, name="elem") for elem in nest.flatten(elems) ] n = ( tensor_shape.dimension_value(elems_flat[0].shape[0]) or array_ops.shape(elems_flat[0])[0]) elems_ta = nest.map_structure(create_ta, elems) if initializer is None: i = n - 1 a = nest.map_structure(lambda elem: elem.read(i), elems_ta) else: i = n a = initializer def compute(i, a): i -= 1 elem = nest.map_structure(lambda elem: elem.read(i), elems_ta) a_out = fn(a, elem) return [i, a_out] _, r_a = control_flow_ops.while_loop( lambda i, a: i > 0, compute, [i, a], parallel_iterations=parallel_iterations, back_prop=back_prop, swap_memory=swap_memory, maximum_iterations=n) # TODO(akshayka): Remove the in_graph_mode check once caching devices are # supported in Eager if in_graph_mode and varscope_caching_device_was_none: varscope.set_caching_device(None) return r_a @tf_export("scan") def scan(fn, elems, initializer=None, parallel_iterations=10, back_prop=True, swap_memory=False, infer_shape=True, reverse=False, name=None): """scan on the list of tensors unpacked from `elems` on dimension 0. The simplest version of `scan` repeatedly applies the callable `fn` to a sequence of elements from first to last. The elements are made of the tensors unpacked from `elems` on dimension 0. The callable fn takes two tensors as arguments. The first argument is the accumulated value computed from the preceding invocation of fn, and the second is the value at the current position of `elems`. If `initializer` is None, `elems` must contain at least one element, and its first element is used as the initializer. Suppose that `elems` is unpacked into `values`, a list of tensors. The shape of the result tensor is `[len(values)] + fn(initializer, values[0]).shape`. If reverse=True, it's fn(initializer, values[-1]).shape. This method also allows multi-arity `elems` and accumulator. If `elems` is a (possibly nested) list or tuple of tensors, then each of these tensors must have a matching first (unpack) dimension. The second argument of `fn` must match the structure of `elems`. If no `initializer` is provided, the output structure and dtypes of `fn` are assumed to be the same as its input; and in this case, the first argument of `fn` must match the structure of `elems`. If an `initializer` is provided, then the output of `fn` must have the same structure as `initializer`; and the first argument of `fn` must match this structure. For example, if `elems` is `(t1, [t2, t3])` and `initializer` is `[i1, i2]` then an appropriate signature for `fn` in `python2` is: `fn = lambda (acc_p1, acc_p2), (t1, [t2, t3]):` and `fn` must return a list, `[acc_n1, acc_n2]`. An alternative correct signature for `fn`, and the one that works in `python3`, is: `fn = lambda a, t:`, where `a` and `t` correspond to the input tuples. Args: fn: The callable to be performed. It accepts two arguments. The first will have the same structure as `initializer` if one is provided, otherwise it will have the same structure as `elems`. The second will have the same (possibly nested) structure as `elems`. Its output must have the same structure as `initializer` if one is provided, otherwise it must have the same structure as `elems`. elems: A tensor or (possibly nested) sequence of tensors, each of which will be unpacked along their first dimension. The nested sequence of the resulting slices will be the first argument to `fn`. initializer: (optional) A tensor or (possibly nested) sequence of tensors, initial value for the accumulator, and the expected output type of `fn`. parallel_iterations: (optional) The number of iterations allowed to run in parallel. back_prop: (optional) True enables support for back propagation. swap_memory: (optional) True enables GPU-CPU memory swapping. infer_shape: (optional) False disables tests for consistent output shapes. reverse: (optional) True scans the tensor last to first (instead of first to last). name: (optional) Name prefix for the returned tensors. Returns: A tensor or (possibly nested) sequence of tensors. Each tensor packs the results of applying `fn` to tensors unpacked from `elems` along the first dimension, and the previous accumulator value(s), from first to last (or last to first, if `reverse=True`). Raises: TypeError: if `fn` is not callable or the structure of the output of `fn` and `initializer` do not match. ValueError: if the lengths of the output of `fn` and `initializer` do not match. Examples: ```python elems = np.array([1, 2, 3, 4, 5, 6]) sum = scan(lambda a, x: a + x, elems) # sum == [1, 3, 6, 10, 15, 21] sum = scan(lambda a, x: a + x, elems, reverse=True) # sum == [21, 20, 18, 15, 11, 6] ``` ```python elems = np.array([1, 2, 3, 4, 5, 6]) initializer = np.array(0) sum_one = scan( lambda a, x: x[0] - x[1] + a, (elems + 1, elems), initializer) # sum_one == [1, 2, 3, 4, 5, 6] ``` ```python elems = np.array([1, 0, 0, 0, 0, 0]) initializer = (np.array(0), np.array(1)) fibonaccis = scan(lambda a, _: (a[1], a[0] + a[1]), elems, initializer) # fibonaccis == ([1, 1, 2, 3, 5, 8], [1, 2, 3, 5, 8, 13]) ``` """ if not callable(fn): raise TypeError("fn must be callable.") input_is_sequence = nest.is_sequence(elems) input_flatten = lambda x: nest.flatten(x) if input_is_sequence else [x] def input_pack(x): return nest.pack_sequence_as(elems, x) if input_is_sequence else x[0] if initializer is None: output_is_sequence = input_is_sequence output_flatten = input_flatten output_pack = input_pack else: output_is_sequence = nest.is_sequence(initializer) output_flatten = lambda x: nest.flatten(x) if output_is_sequence else [x] def output_pack(x): return (nest.pack_sequence_as(initializer, x) if output_is_sequence else x[0]) elems_flat = input_flatten(elems) in_graph_mode = not context.executing_eagerly() with ops.name_scope(name, "scan", elems_flat): # TODO(akshayka): Remove the in_graph_mode check once caching devices are # supported in Eager if in_graph_mode: # Any get_variable calls in fn will cache the first call locally # and not issue repeated network I/O requests for each iteration. varscope = vs.get_variable_scope() varscope_caching_device_was_none = False if varscope.caching_device is None: # TODO(ebrevdo): Change to using colocate_with here and in other # methods. varscope.set_caching_device(lambda op: op.device) varscope_caching_device_was_none = True # Convert elems to tensor array. elems_flat = [ ops.convert_to_tensor(elem, name="elem") for elem in elems_flat ] # Convert elems to tensor array. n may be known statically. n = tensor_shape.dimension_value(elems_flat[0].shape[0]) if n is None: n = array_ops.shape(elems_flat[0])[0] # TensorArrays are always flat elems_ta = [ tensor_array_ops.TensorArray( dtype=elem.dtype, size=n, dynamic_size=False, element_shape=elem.shape[1:], infer_shape=True) for elem in elems_flat ] # Unpack elements elems_ta = [ elem_ta.unstack(elem) for elem_ta, elem in zip(elems_ta, elems_flat) ] if initializer is None: a_flat = [elem.read(n - 1 if reverse else 0) for elem in elems_ta] i = 1 else: initializer_flat = output_flatten(initializer) a_flat = [ops.convert_to_tensor(init) for init in initializer_flat] i = 0 # Create a tensor array to store the intermediate values. accs_ta = [ tensor_array_ops.TensorArray( dtype=init.dtype, size=n, element_shape=init.shape if infer_shape else None, dynamic_size=False, infer_shape=infer_shape) for init in a_flat ] if initializer is None: accs_ta = [ acc_ta.write(n - 1 if reverse else 0, a) for (acc_ta, a) in zip(accs_ta, a_flat) ] def compute(i, a_flat, tas): """The loop body of scan. Args: i: the loop counter. a_flat: the accumulator value(s), flattened. tas: the output accumulator TensorArray(s), flattened. Returns: [i + 1, a_flat, tas]: the updated counter + new accumulator values + updated TensorArrays Raises: TypeError: if initializer and fn() output structure do not match ValueType: if initializer and fn() output lengths do not match """ packed_elems = input_pack([elem_ta.read(i) for elem_ta in elems_ta]) packed_a = output_pack(a_flat) a_out = fn(packed_a, packed_elems) nest.assert_same_structure(elems if initializer is None else initializer, a_out) flat_a_out = output_flatten(a_out) tas = [ta.write(i, value) for (ta, value) in zip(tas, flat_a_out)] if reverse: next_i = i - 1 else: next_i = i + 1 return (next_i, flat_a_out, tas) if reverse: initial_i = n - 1 - i condition = lambda i, _1, _2: i >= 0 else: initial_i = i condition = lambda i, _1, _2: i < n _, _, r_a = control_flow_ops.while_loop( condition, compute, (initial_i, a_flat, accs_ta), parallel_iterations=parallel_iterations, back_prop=back_prop, swap_memory=swap_memory, maximum_iterations=n) results_flat = [r.stack() for r in r_a] n_static = tensor_shape.Dimension( tensor_shape.dimension_value( elems_flat[0].get_shape().with_rank_at_least(1)[0])) for elem in elems_flat[1:]: n_static.merge_with( tensor_shape.Dimension( tensor_shape.dimension_value( elem.get_shape().with_rank_at_least(1)[0]))) for r in results_flat: r.set_shape( tensor_shape.TensorShape(n_static).concatenate(r.get_shape()[1:])) # TODO(akshayka): Remove the in_graph_mode check once caching devices are # supported in Eager if in_graph_mode and varscope_caching_device_was_none: varscope.set_caching_device(None) return output_pack(results_flat) # pylint: disable=invalid-name def If(cond, inputs, then_branch, else_branch, name=None): r"""output = Cond(inputs) ? then_branch(inputs) : else_branch(inputs). Args: cond: A `Tensor`. A scalar. If the scalar is not a boolean, the scalar is converted to a boolean according to the following rule: if the scalar is a numerical value, non-zero means True and zero means False; if the scalar is a string, non-empty means True and empty means False. inputs: A list of input tensors. then_branch: A function takes 'inputs' and returns a list of tensors, whose types are the same as what else_branch returns. else_branch: A function takes 'inputs' and returns a list of tensors. whose types are the same as what then_branch returns. name: A name for the operation (optional). Returns: A list of tensors returned by either then_branch(inputs) or else_branch(inputs). """ # pylint: disable=protected-access return gen_functional_ops._if( cond, inputs, [_.type for _ in then_branch.definition.signature.output_arg], then_branch, else_branch, name=name) def Gradient(inputs, f, name=None): r"""Computes the gradient function for function f via backpropagation. Args: inputs: A list of tensors of size N + M. f: The function we want to compute the gradient for. The function 'f' must be a numerical function which takes N inputs and produces M outputs. Its gradient function 'g', which is a function taking N + M inputs and produces N outputs. I.e. if we have (y1, y2, ..., yM) = f(x1, x2, ..., xN), then, g is (dL/dx1, dL/dx2, ..., dL/dxN) = g(x1, x2, ..., xN, dL/dy1, dL/dy2, ..., dL/dyM), where L is a scalar-value function of (x1, x2, ..., xN) (e.g., the loss function). dL/dxi is the partial derivative of L with respect to xi. name: A name for the operation (optional). Returns: A list of tensors of size N. """ # TODO(zhifengc): Pretty-print the above spec in latex. # TODO(zhfiengc): Needs some math expert to say the comment above better. tlist = [_.type for _ in f.definition.signature.input_arg] return symbolic_gradient(input=inputs, Tout=tlist, f=f, name=name) def _LoopBodyCaptureWrapper(func): """Returns a wrapper for `func` that handles loop-carried captured inputs.""" @function.Defun( *func.declared_input_types, func_name="%s_Wrapper" % func.name) def Wrapper(*args): """A wrapper that handles loop-carried captured inputs.""" result = func(*args) extra_args = tuple(function.get_extra_args()) # Nullary functions return an Operation. Normal functions can't do this # because their return values are converted to Tensors. if isinstance(result, ops.Operation): return extra_args # Unary functions return a single Tensor value. elif not isinstance(result, tuple): return (result,) + extra_args # N-ary functions return a tuple of Tensors. else: return result + extra_args return Wrapper # pylint: disable=invalid-name,protected-access def While(input_, cond, body, name=None, hostmem=None): r"""output = input; While (Cond(output)) { output = Body(output) }. Args: input_: A list of `Tensor` objects. A list of input tensors whose types are T. cond: . A function takes 'input' and returns a tensor. If the tensor is a scalar of non-boolean, the scalar is converted to a boolean according to the following rule: if the scalar is a numerical value, non-zero means True and zero means False; if the scalar is a string, non-empty means True and empty means False. If the tensor is not a scalar, non-emptiness means True and False otherwise. body: . A function takes a list of tensors and returns another list tensors. Both lists have the same types as specified by T. name: A name for the operation (optional). hostmem: A list of integer. If i is in the list, input[i] is a host memory tensor. Raises: ValueError: if `cond` has implicitly captured inputs or if `cond` and `body` have different signatures. Returns: A list of `Tensor` objects. Has the same type as `input`. A list of output tensors whose types are T. """ if cond.captured_inputs: raise ValueError("While op 'cond' argument must be a function " "without implicitly captured inputs.") if cond.declared_input_types != body.declared_input_types: raise ValueError( "While op 'cond' and 'body' signatures do not match. %r vs %r" % (cond.declared_input_types, body.declared_input_types)) if body.captured_inputs: cond_dtypes = list( body.declared_input_types) + [t.dtype for t in body.captured_inputs] @function.Defun(*cond_dtypes, func_name="%s_Wrapper" % cond.name) def CondWrapper(*args): """A wrapper that handles loop-carried captured inputs.""" return cond(*args[:len(body.declared_input_types)]) ret = gen_functional_ops._while( input_ + body.captured_inputs, CondWrapper, _LoopBodyCaptureWrapper(body), name=name) # Slice off the loop-carried captured inputs. ret = ret[:-len(body.captured_inputs)] else: ret = gen_functional_ops._while(input_, cond, body, name=name) if hostmem: input_attr = attr_value_pb2.AttrValue() input_attr.list.i.extend(hostmem) ret[0].op._set_attr("_input_hostmem", input_attr) # pylint: disable=protected-access output_attr = attr_value_pb2.AttrValue() output_attr.list.i.extend(hostmem) ret[0].op._set_attr("_output_hostmem", output_attr) # pylint: disable=protected-access return ret # b/36459430 # # Ideally, we do not need this rewrite For loop into a While loop. # However, today, if a While runs on GPU and the condition returns a # boolean, the While kernel crashes. Even if we fix the crash, the # bool needs to be copied between GPU and CPU. So, a for loop is much # preferred when running on GPU. # # On the other hand, For op has no directly XLA kernel. So, when we run # a for loop, we need to rewrite it using a While op. # # It should be possible and probably better to write a XLA C++ kernel # implementing the logic in _ForUsingWhile. def _ForUsingWhile(start, limit, delta, inputs, forbody, name=None, hostmem=None): """Helper to implement a For loop using a While.""" # To support negative delta (e.g., range(100, 0, -3)), we iterate # over the range(n) and use iter * delta + start as the real # iteration index. (e.g., for i in range(34): iter = i * (-3) + # 100). d = math_ops.abs(delta) # XLA on TPUs doesn't support integer division n = math_ops.cast( math_ops.cast((math_ops.abs(limit - start) + d - 1), dtypes.float32) / math_ops.cast(d, dtypes.float32), dtypes.int32) # Carried loop variables ("extra_args") are implicitly added to the input list # of the WhileBody function. WhileCond does not call forbody, and so does not # depend on any of forbody's extra_args. Since WhileCond and WhileBody # must have identical inputs, we have to augment the cond signature to take # the same types as the carried loop variables. body_sig = [dtypes.int32] * 4 + list(forbody.declared_input_types)[1:] cond_name = "%s_Cond" % forbody.name @function.Defun(*body_sig, func_name=cond_name) def WhileCond(i, n, *args): del args return i < n body_name = "%s_Body" % forbody.name @function.Defun(*body_sig, func_name=body_name) def WhileBody(i, n, start, delta, *args): """A While wrapper for forbody that handles loop-carried captured inputs.""" for_result = forbody(start + i * delta, *args) # Nullary functions return an Operation. Normal functions can't do this # because their return values are converted to Tensors. if isinstance(for_result, ops.Operation): for_result = () # Unary functions return a single Tensor value. elif isinstance(for_result, ops.Tensor): for_result = (for_result,) return (i + 1, n, start, delta) + tuple(for_result) if hostmem is not None: hostmem = [0, 1, 2, 3] + [(4 + _) for _ in hostmem] else: hostmem = [0, 1, 2, 3] results = While( input_=[0, n, start, delta] + inputs, cond=WhileCond, body=WhileBody, name=name, hostmem=hostmem) # Slice off the loop-carried captured inputs. return list(results[4:len(results)]) def For(start, limit, delta, inputs, body, name=None, hostmem=None, rewrite_with_while=None): r"""out = input; for i in range(start, limit, delta) out = body(i, out). Args: start: A `Tensor` of type `int32`. limit: A `Tensor` of type `int32`. delta: A `Tensor` of type `int32`. inputs: A list of `Tensor` objects. A list of input tensors whose types are T. body: A function takes a list of tensors and returns another list of tensors. Both lists have the same types as (int32, T...). name: A name for the operation (optional). hostmem: A list of integer. If i is in the list, inputs[i] is a host memory tensor. In other words, (i+1)-th argument of the body function is expecting a host memory. rewrite_with_while: If True, using While op to implement the For. Returns: A list of `Tensor` objects. Has the same type as `input`. A list of output tensors whose types are T. """ if rewrite_with_while: return _ForUsingWhile(start, limit, delta, inputs, body, name, hostmem) if body.captured_inputs: ret = gen_functional_ops._for( start, limit, delta, inputs + body.captured_inputs, _LoopBodyCaptureWrapper(body), name=name) # Slice off the loop-carried captured inputs. ret = ret[:-len(body.captured_inputs)] else: ret = gen_functional_ops._for(start, limit, delta, inputs, body, name=name) if hostmem: num_for_params = 3 # start/limit/delta input_attr = attr_value_pb2.AttrValue() input_attr.list.i.extend([num_for_params + i for i in hostmem]) ret[0].op._set_attr("_input_hostmem", input_attr) # pylint: disable=protected-access output_attr = attr_value_pb2.AttrValue() output_attr.list.i.extend(hostmem) ret[0].op._set_attr("_output_hostmem", output_attr) # pylint: disable=protected-access return ret # pylint: enable=invalid-name,protected-access def partitioned_call(args, f, tout=None, executing_eagerly=None, config=None, executor_type=None): """Executes a function while respecting device annotations. Currently, only those functions that execute within the same address space can be executed. Args: args: The arguments of the function, including captured inputs. f: The function to execute; an instance of `_DefinedFunction` or `_EagerDefinedFunction`. tout: a list containing the output dtypes enums; if `None`, inferred from the signature of `f`. executing_eagerly: (Optional) A boolean indicating whether the context is executing eagerly. If `None`, fetched from the global context. config: (Optional) A `tensorflow::ConfigProto` proto, serialized. If `None`, all optimizations are disabled. Currently only handled for eager defined functions. executor_type: (Optional) A string for the name of the executor to be used in the function call. If not set, or set to an empty string, the default tensorflow executor will be used. Returns: The list of `Tensor`s returned by invoking `f(args)`. If the function does not return anything, then returns `None` if eager execution is enabled, or the `Operation` if not. """ if tout is None: tout = tuple(x.type for x in f.definition.signature.output_arg) if executing_eagerly is None: executing_eagerly = context.executing_eagerly() if config is None: config = function_utils.get_disabled_rewriter_config() if executor_type is None: executor_type = "" if executing_eagerly or len(tout): if f.stateful_ops: outputs = gen_functional_ops.stateful_partitioned_call( args=args, Tout=tout, f=f, config_proto=config, executor_type=executor_type) else: outputs = gen_functional_ops.partitioned_call( args=args, Tout=tout, f=f, config_proto=config, executor_type=executor_type) return outputs if outputs else None # The generated binding returns an empty list for functions that don't # return any Tensors, hence the need to use `create_op` directly. args = [ops.internal_convert_to_tensor(x) for x in args] tin_attr = attr_value_pb2.AttrValue( list=attr_value_pb2.AttrValue.ListValue( type=[x.dtype.as_datatype_enum for x in args])) tout_attr = attr_value_pb2.AttrValue( list=attr_value_pb2.AttrValue.ListValue(type=tout)) func_attr = attr_value_pb2.AttrValue( func=attr_value_pb2.NameAttrList(name=f.name)) executor_type_attr = attr_value_pb2.AttrValue( s=compat.as_bytes(executor_type)) # When running in graph mode, the graph and function graphs are optimized # (i.e. run through grappler) per the session options, so we can disable any # eager-specific rewriting. config_proto = attr_value_pb2.AttrValue( s=function_utils.get_disabled_rewriter_config()) graph = ops.get_default_graph() f.add_to_graph(graph) op_name = "StatefulPartitionedCall" if f.stateful_ops else "PartitionedCall" op = graph.create_op( op_name, args, tout, name="PartitionedFunctionCall", attrs={ "Tin": tin_attr, "Tout": tout_attr, "f": func_attr, "config_proto": config_proto, "executor_type": executor_type_attr, }) outputs = op.outputs return outputs if outputs else op
tensorflow-master
tensorflow/python/ops/functional_ops.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmark for split and grad of split.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.core.protobuf import config_pb2 from tensorflow.python.client import session as session_lib from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import variables from tensorflow.python.platform import benchmark from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging as logging def build_graph(device, input_shape, output_sizes, axis): """Build a graph containing a sequence of split operations. Args: device: string, the device to run on. input_shape: shape of the input tensor. output_sizes: size of each output along axis. axis: axis to be split along. Returns: An array of tensors to run() """ with ops.device("/%s:0" % device): inp = array_ops.zeros(input_shape) outputs = [] for _ in range(100): outputs.extend(array_ops.split(inp, output_sizes, axis)) return control_flow_ops.group(*outputs) class SplitBenchmark(test.Benchmark): """Benchmark split!""" def _run_graph(self, device, output_shape, variable, num_outputs, axis): """Run the graph and print its execution time. Args: device: string, the device to run on. output_shape: shape of each output tensors. variable: whether or not the output shape should be fixed num_outputs: the number of outputs to split the input into axis: axis to be split Returns: The duration of the run in seconds. """ graph = ops.Graph() with graph.as_default(): if not variable: if axis == 0: input_shape = [output_shape[0] * num_outputs, output_shape[1]] sizes = [output_shape[0] for _ in range(num_outputs)] else: input_shape = [output_shape[0], output_shape[1] * num_outputs] sizes = [output_shape[1] for _ in range(num_outputs)] else: sizes = np.random.randint( low=max(1, output_shape[axis] - 2), high=output_shape[axis] + 2, size=num_outputs) total_size = np.sum(sizes) if axis == 0: input_shape = [total_size, output_shape[1]] else: input_shape = [output_shape[0], total_size] outputs = build_graph(device, input_shape, sizes, axis) config = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions( optimizer_options=config_pb2.OptimizerOptions( opt_level=config_pb2.OptimizerOptions.L0))) with session_lib.Session(graph=graph, config=config) as session: logging.set_verbosity("info") variables.global_variables_initializer().run() bench = benchmark.TensorFlowBenchmark() bench.run_op_benchmark( session, outputs, mbs=input_shape[0] * input_shape[1] * 4 * 2 * 100 / 1e6, extras={ "input_shape": input_shape, "variable": variable, "axis": axis }) def benchmark_split(self): print("Forward vs backward concat") shapes = [[2000, 8], [8, 2000], [100, 18], [1000, 18], [10000, 18], [100, 97], [1000, 97], [10000, 1], [1, 10000]] axis_ = [1] # 0 is very fast because it doesn't actually do any copying num_outputs = 100 variable = [False, True] # fixed input size or not for shape in shapes: for axis in axis_: for v in variable: self._run_graph("gpu", shape, v, num_outputs, axis) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/ops/split_benchmark.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Math Operations. Note: Functions taking `Tensor` arguments can also take anything accepted by `tf.convert_to_tensor`. Note: Elementwise binary operations in TensorFlow follow [numpy-style broadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). TensorFlow provides a variety of math functions including: * Basic arithmetic operators and trigonometric functions. * Special math functions (like: `tf.math.igamma` and `tf.math.zeta`) * Complex number functions (like: `tf.math.imag` and `tf.math.angle`) * Reductions and scans (like: `tf.math.reduce_mean` and `tf.math.cumsum`) * Segment functions (like: `tf.math.segment_sum`) See: `tf.linalg` for matrix and tensor functions. <a id=Segmentation></a> ## About Segmentation TensorFlow provides several operations that you can use to perform common math computations on tensor segments. Here a segmentation is a partitioning of a tensor along the first dimension, i.e. it defines a mapping from the first dimension onto `segment_ids`. The `segment_ids` tensor should be the size of the first dimension, `d0`, with consecutive IDs in the range `0` to `k`, where `k<d0`. In particular, a segmentation of a matrix tensor is a mapping of rows to segments. For example: ```python c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) tf.math.segment_sum(c, tf.constant([0, 0, 1])) # ==> [[0 0 0 0] # [5 6 7 8]] ``` The standard `segment_*` functions assert that the segment indices are sorted. If you have unsorted indices use the equivalent `unsorted_segment_` function. Thses functions take an additional argument `num_segments` so that the output tensor can be efficiently allocated. ``` python c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) tf.math.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2) # ==> [[ 6, 8, 10, 12], # [-1, -2, -3, -4]] ``` """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.compat import compat as fwd_compat from tensorflow.python.eager import context from tensorflow.python.framework import common_shapes from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import graph_util from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_data_flow_ops from tensorflow.python.ops import gen_math_ops from tensorflow.python.ops import gen_nn_ops from tensorflow.python.ops import gen_sparse_ops # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.gen_math_ops import * # pylint: enable=wildcard-import from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import compat from tensorflow.python.util import deprecation from tensorflow.python.util import dispatch from tensorflow.python.util import nest from tensorflow.python.util.tf_export import tf_export # Aliases for some automatically-generated names. linspace = gen_math_ops.lin_space nextafter = gen_math_ops.next_after arg_max = deprecation.deprecated(None, "Use `tf.math.argmax` instead")(arg_max) # pylint: disable=used-before-assignment arg_min = deprecation.deprecated(None, "Use `tf.math.argmin` instead")(arg_min) # pylint: disable=used-before-assignment tf_export(v1=["arg_max"])(arg_max) tf_export(v1=["arg_min"])(arg_min) # This is set by resource_variable_ops.py. It is included in this way since # there is a circular dependency between math_ops and resource_variable_ops _resource_variable_type = None def _set_doc(doc): def _decorator(func): func.__doc__ = doc return func return _decorator # pylint: disable=redefined-builtin @tf_export(v1=["math.argmax", "argmax"]) @deprecation.deprecated_args(None, "Use the `axis` argument instead", "dimension") @_set_doc( gen_math_ops.arg_max.__doc__.replace("dimensions", "axes").replace("dimension", "axis")) def argmax(input, axis=None, name=None, dimension=None, output_type=dtypes.int64): axis = deprecation.deprecated_argument_lookup("axis", axis, "dimension", dimension) return argmax_v2(input, axis, output_type, name) @tf_export("math.argmax", "argmax", v1=[]) def argmax_v2(input, axis=None, output_type=dtypes.int64, name=None): """Returns the index with the largest value across axes of a tensor. Note that in case of ties the identity of the return value is not guaranteed. Args: input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. int32 or int64, must be in the range `-rank(input), rank(input))`. Describes which axis of the input Tensor to reduce across. For vectors, use axis = 0. output_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`. name: A name for the operation (optional). Returns: A `Tensor` of type `output_type`. Usage: ```python import tensorflow as tf a = [1, 10, 26.9, 2.8, 166.32, 62.3] b = tf.math.argmax(input = a) c = tf.keras.backend.eval(b) # c = 4 # here a[4] = 166.32 which is the largest element of a across axis 0 ``` """ if axis is None: axis = 0 return gen_math_ops.arg_max(input, axis, name=name, output_type=output_type) @tf_export(v1=["math.argmin", "argmin"]) @deprecation.deprecated_args(None, "Use the `axis` argument instead", "dimension") @_set_doc( gen_math_ops.arg_min.__doc__.replace("dimensions", "axes").replace("dimension", "axis")) def argmin(input, axis=None, name=None, dimension=None, output_type=dtypes.int64): axis = deprecation.deprecated_argument_lookup("axis", axis, "dimension", dimension) return argmin_v2(input, axis, output_type, name) @tf_export("math.argmin", "argmin", v1=[]) def argmin_v2(input, axis=None, output_type=dtypes.int64, name=None): """Returns the index with the smallest value across axes of a tensor. Note that in case of ties the identity of the return value is not guaranteed. Args: input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. int32 or int64, must be in the range `-rank(input), rank(input))`. Describes which axis of the input Tensor to reduce across. For vectors, use axis = 0. output_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`. name: A name for the operation (optional). Returns: A `Tensor` of type `output_type`. Usage: ```python import tensorflow as tf a = [1, 10, 26.9, 2.8, 166.32, 62.3] b = tf.math.argmin(input = a) c = tf.keras.backend.eval(b) # c = 0 # here a[0] = 1 which is the smallest element of a across axis 0 ``` """ if axis is None: axis = 0 return gen_math_ops.arg_min(input, axis, name=name, output_type=output_type) # pylint: enable=redefined-builtin # pylint: disable=anomalous-backslash-in-string,protected-access # pylint: disable=g-docstring-has-escape @tf_export("math.abs", "abs") @dispatch.add_dispatch_support def abs(x, name=None): # pylint: disable=redefined-builtin r"""Computes the absolute value of a tensor. Given a tensor of integer or floating-point values, this operation returns a tensor of the same type, where each element contains the absolute value of the corresponding element in the input. Given a tensor `x` of complex numbers, this operation returns a tensor of type `float32` or `float64` that is the absolute value of each element in `x`. All elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute value is computed as \\( \sqrt{a^2 + b^2}\\). For example: ```python x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]]) tf.abs(x) # [5.25594902, 6.60492229] ``` Args: x: A `Tensor` or `SparseTensor` of type `float16`, `float32`, `float64`, `int32`, `int64`, `complex64` or `complex128`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` the same size, type, and sparsity as `x` with absolute values. Note, for `complex64` or `complex128` input, the returned `Tensor` will be of type `float32` or `float64`, respectively. """ with ops.name_scope(name, "Abs", [x]) as name: x = ops.convert_to_tensor(x, name="x") if x.dtype.is_complex: return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name) return gen_math_ops._abs(x, name=name) # pylint: enable=g-docstring-has-escape # pylint: disable=redefined-builtin def _bucketize(input, boundaries, name=None): return gen_math_ops.bucketize(input=input, boundaries=boundaries, name=name) # pylint: enable=redefined-builtin class DivideDelegateWithName(object): """Use Python2/Python3 division delegation to implement divide for tensors.""" def __init__(self, x, name): """Construct DivideDelegateWithName. Args: x: Tensor to use as left operand in operator overloads name: The name that is preferred for the op created. """ self.x = x self.name = name def __truediv__(self, y): return _truediv_python3(self.x, y, self.name) def __floordiv__(self, y): return floordiv(self.x, y, self.name) def __div__(self, y): return _div_python2(self.x, y, self.name) @tf_export("math.divide", "divide") @dispatch.add_dispatch_support def divide(x, y, name=None): """Computes Python style division of `x` by `y`.""" if name is not None: # Cannot use tensors operator overload, because it has no way to track # override names. Use a dummy class to track the runtime division behavior return DivideDelegateWithName(x, name) / y else: return x / y @tf_export("math.multiply", "multiply") @dispatch.add_dispatch_support def multiply(x, y, name=None): return gen_math_ops.mul(x, y, name) multiply.__doc__ = gen_math_ops.mul.__doc__.replace("Multiply", "`tf.multiply`") # TODO(aselle): put deprecation in after another round of global code changes @deprecation.deprecated( "2016-12-30", "`tf.mul(x, y)` is deprecated, please use `tf.multiply(x, y)` or `x * y`") def _mul(x, y, name=None): return gen_math_ops.mul(x, y, name) _mul.__doc__ = ( gen_math_ops.mul.__doc__ + ("" if _mul.__doc__ is None else _mul.__doc__)) @tf_export("math.subtract", "subtract") @dispatch.add_dispatch_support def subtract(x, y, name=None): return gen_math_ops.sub(x, y, name) subtract.__doc__ = gen_math_ops.sub.__doc__.replace("`Sub`", "`tf.subtract`") # TODO(aselle): put deprecation in after another round of global code changes @deprecation.deprecated( "2016-12-30", "`tf.sub(x, y)` is deprecated, please use `tf.subtract(x, y)` or `x - y`") def _sub(x, y, name=None): return gen_math_ops.sub(x, y, name) _sub.__doc__ = ( gen_math_ops.sub.__doc__ + ("" if _sub.__doc__ is None else _sub.__doc__)) negative = gen_math_ops.neg # pylint: disable=g-docstring-has-escape @deprecation.deprecated( "2016-12-30", "`tf.neg(x)` is deprecated, please use `tf.negative(x)` or `-x`") def _neg(x, name=None): """Computes numerical negative value element-wise. I.e., \\(y = -x\\). Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. """ return negative(x, name) # pylint: enable=g-docstring-has-escape @tf_export(v1=["math.scalar_mul", "scalar_mul"]) def scalar_mul(scalar, x, name=None): """Multiplies a scalar times a `Tensor` or `IndexedSlices` object. Intended for use in gradient code which might deal with `IndexedSlices` objects, which are easy to multiply by a scalar but more expensive to multiply with arbitrary tensors. Args: scalar: A 0-D scalar `Tensor`. Must have known shape. x: A `Tensor` or `IndexedSlices` to be scaled. name: A name for the operation (optional). Returns: `scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`. Raises: ValueError: if scalar is not a 0-D `scalar`. """ scalar = ops.convert_to_tensor( scalar, dtype=x.dtype.base_dtype, name="scalar") shape = scalar.get_shape() if shape.ndims == 0: if isinstance(x, ops.IndexedSlices): return ops.IndexedSlices( gen_math_ops.mul(scalar, x.values, name), x.indices, x.dense_shape) else: return gen_math_ops.mul(scalar, x, name) else: raise ValueError("Only scalar multiply works, got shape %s" % shape) @tf_export("math.scalar_mul", "scalar_mul", v1=[]) @_set_doc(scalar_mul.__doc__) def scalar_mul_v2(scalar, x, name=None): with ops.name_scope(name, "scalar_mul", [x]) as name: return scalar_mul(scalar, x, name) @tf_export("math.pow", "pow") @dispatch.add_dispatch_support def pow(x, y, name=None): # pylint: disable=redefined-builtin r"""Computes the power of one value to another. Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for corresponding elements in `x` and `y`. For example: ```python x = tf.constant([[2, 2], [3, 3]]) y = tf.constant([[8, 16], [2, 3]]) tf.pow(x, y) # [[256, 65536], [9, 27]] ``` Args: x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`, `complex64`, or `complex128`. y: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`, `complex64`, or `complex128`. name: A name for the operation (optional). Returns: A `Tensor`. """ with ops.name_scope(name, "Pow", [x]) as name: return gen_math_ops._pow(x, y, name=name) # pylint: disable=redefined-builtin,redefined-outer-name @tf_export("dtypes.complex", "complex") @dispatch.add_dispatch_support def complex(real, imag, name=None): r"""Converts two real numbers to a complex number. Given a tensor `real` representing the real part of a complex number, and a tensor `imag` representing the imaginary part of a complex number, this operation returns complex numbers elementwise of the form \\(a + bj\\), where *a* represents the `real` part and *b* represents the `imag` part. The input tensors `real` and `imag` must have the same shape. For example: ```python real = tf.constant([2.25, 3.25]) imag = tf.constant([4.75, 5.75]) tf.complex(real, imag) # [[2.25 + 4.75j], [3.25 + 5.75j]] ``` Args: real: A `Tensor`. Must be one of the following types: `float32`, `float64`. imag: A `Tensor`. Must have the same type as `real`. name: A name for the operation (optional). Returns: A `Tensor` of type `complex64` or `complex128`. Raises: TypeError: Real and imag must be correct types """ real = ops.convert_to_tensor(real, name="real") imag = ops.convert_to_tensor(imag, name="imag") with ops.name_scope(name, "Complex", [real, imag]) as name: input_types = (real.dtype, imag.dtype) if input_types == (dtypes.float64, dtypes.float64): Tout = dtypes.complex128 elif input_types == (dtypes.float32, dtypes.float32): Tout = dtypes.complex64 else: raise TypeError("real and imag have incorrect types: " "{} {}".format(real.dtype.name, imag.dtype.name)) return gen_math_ops._complex(real, imag, Tout=Tout, name=name) @tf_export("math.real", v1=["math.real", "real"]) @deprecation.deprecated_endpoints("real") @dispatch.add_dispatch_support def real(input, name=None): r"""Returns the real part of a complex (or real) tensor. Given a tensor `input`, this operation returns a tensor of type `float` that is the real part of each element in `input` considered as a complex number. For example: ```python x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j]) tf.math.real(x) # [-2.25, 3.25] ``` If `input` is already real, it is returned unchanged. Args: input: A `Tensor`. Must have numeric type. name: A name for the operation (optional). Returns: A `Tensor` of type `float32` or `float64`. """ with ops.name_scope(name, "Real", [input]) as name: if input.dtype.is_complex: real_dtype = input.dtype.real_dtype return gen_math_ops.real(input, Tout=real_dtype, name=name) else: return input @tf_export("math.imag", v1=["math.imag", "imag"]) @deprecation.deprecated_endpoints("imag") @dispatch.add_dispatch_support def imag(input, name=None): r"""Returns the imaginary part of a complex (or real) tensor. Given a tensor `input`, this operation returns a tensor of type `float` that is the imaginary part of each element in `input` considered as a complex number. If `input` is real, a tensor of all zeros is returned. For example: ```python x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j]) tf.math.imag(x) # [4.75, 5.75] ``` Args: input: A `Tensor`. Must be one of the following types: `float`, `double`, `complex64`, `complex128`. name: A name for the operation (optional). Returns: A `Tensor` of type `float32` or `float64`. """ with ops.name_scope(name, "Imag", [input]) as name: if input.dtype.is_complex: return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name) else: return array_ops.zeros_like(input) @tf_export("math.angle", v1=["math.angle", "angle"]) @deprecation.deprecated_endpoints("angle") @dispatch.add_dispatch_support def angle(input, name=None): r"""Returns the element-wise argument of a complex (or real) tensor. Given a tensor `input`, this operation returns a tensor of type `float` that is the argument of each element in `input` considered as a complex number. The elements in `input` are considered to be complex numbers of the form \\(a + bj\\), where *a* is the real part and *b* is the imaginary part. If `input` is real then *b* is zero by definition. The argument returned by this function is of the form \\(atan2(b, a)\\). If `input` is real, a tensor of all zeros is returned. For example: ``` input = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j], dtype=tf.complex64) tf.math.angle(input).numpy() # ==> array([2.0131705, 1.056345 ], dtype=float32) ``` Args: input: A `Tensor`. Must be one of the following types: `float`, `double`, `complex64`, `complex128`. name: A name for the operation (optional). Returns: A `Tensor` of type `float32` or `float64`. """ with ops.name_scope(name, "Angle", [input]) as name: if input.dtype.is_complex: return gen_math_ops.angle(input, Tout=input.dtype.real_dtype, name=name) else: return array_ops.zeros_like(input) # pylint: enable=redefined-outer-name,redefined-builtin @tf_export("math.round", "round") @dispatch.add_dispatch_support def round(x, name=None): # pylint: disable=redefined-builtin """Rounds the values of a tensor to the nearest integer, element-wise. Rounds half to even. Also known as bankers rounding. If you want to round according to the current system rounding mode use tf::cint. For example: ```python x = tf.constant([0.9, 2.5, 2.3, 1.5, -4.5]) tf.round(x) # [ 1.0, 2.0, 2.0, 2.0, -4.0 ] ``` Args: x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, or `int64`. name: A name for the operation (optional). Returns: A `Tensor` of same shape and type as `x`. """ x = ops.convert_to_tensor(x, name="x") if x.dtype.is_integer: return x else: return gen_math_ops.round(x, name=name) @tf_export("dtypes.cast", "cast") @dispatch.add_dispatch_support def cast(x, dtype, name=None): """Casts a tensor to a new type. The operation casts `x` (in case of `Tensor`) or `x.values` (in case of `SparseTensor` or `IndexedSlices`) to `dtype`. For example: ```python x = tf.constant([1.8, 2.2], dtype=tf.float32) tf.dtypes.cast(x, tf.int32) # [1, 2], dtype=tf.int32 ``` The operation supports data types (for `x` and `dtype`) of `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`, `float16`, `float32`, `float64`, `complex64`, `complex128`, `bfloat16`. In case of casting from complex types (`complex64`, `complex128`) to real types, only the real part of `x` is returned. In case of casting from real types to complex types (`complex64`, `complex128`), the imaginary part of the returned value is set to `0`. The handling of complex types here matches the behavior of numpy. Args: x: A `Tensor` or `SparseTensor` or `IndexedSlices` of numeric type. It could be `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`, `float16`, `float32`, `float64`, `complex64`, `complex128`, `bfloat16`. dtype: The destination type. The list of supported dtypes is the same as `x`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` and same type as `dtype`. Raises: TypeError: If `x` cannot be cast to the `dtype`. """ base_type = dtypes.as_dtype(dtype).base_dtype if isinstance(x, (ops.Tensor, _resource_variable_type)) and base_type == x.dtype: return x with ops.name_scope(name, "Cast", [x]) as name: if isinstance(x, sparse_tensor.SparseTensor): values_cast = cast(x.values, base_type, name=name) x = sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape) elif isinstance(x, ops.IndexedSlices): values_cast = cast(x.values, base_type, name=name) x = ops.IndexedSlices(values_cast, x.indices, x.dense_shape) else: # TODO(josh11b): If x is not already a Tensor, we could return # ops.convert_to_tensor(x, dtype=dtype, ...) here, but that # allows some conversions that cast() can't do, e.g. casting numbers to # strings. x = ops.convert_to_tensor(x, name="x") if x.dtype.base_dtype != base_type: x = gen_math_ops.cast(x, base_type, name=name) if x.dtype.is_complex and base_type.is_floating: logging.warn("Casting complex to real discards imaginary part.") return x @tf_export("dtypes.saturate_cast", "saturate_cast") @dispatch.add_dispatch_support def saturate_cast(value, dtype, name=None): """Performs a safe saturating cast of `value` to `dtype`. This function casts the input to `dtype` without applying any scaling. If there is a danger that values would over or underflow in the cast, this op applies the appropriate clamping before the cast. Args: value: A `Tensor`. dtype: The desired output `DType`. name: A name for the operation (optional). Returns: `value` safely cast to `dtype`. """ # When casting to a type with smaller representable range, clamp. # Note that this covers casting to unsigned types as well. with ops.name_scope(name, "saturate_cast", [value]) as name: value = ops.convert_to_tensor(value, name="value") dtype = dtypes.as_dtype(dtype).base_dtype if value.dtype.min < dtype.min: value = gen_math_ops.maximum( value, ops.convert_to_tensor(dtype.min, dtype=value.dtype, name="min")) if value.dtype.max > dtype.max: value = gen_math_ops.minimum( value, ops.convert_to_tensor(dtype.max, dtype=value.dtype, name="max")) return cast(value, dtype, name=name) @deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.") @tf_export(v1=["to_float"]) def to_float(x, name="ToFloat"): """Casts a tensor to type `float32`. Args: x: A `Tensor` or `SparseTensor` or `IndexedSlices`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with type `float32`. Raises: TypeError: If `x` cannot be cast to the `float32`. """ return cast(x, dtypes.float32, name=name) @deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.") @tf_export(v1=["to_double"]) def to_double(x, name="ToDouble"): """Casts a tensor to type `float64`. Args: x: A `Tensor` or `SparseTensor` or `IndexedSlices`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with type `float64`. Raises: TypeError: If `x` cannot be cast to the `float64`. """ return cast(x, dtypes.float64, name=name) @deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.") @tf_export(v1=["to_int32"]) def to_int32(x, name="ToInt32"): """Casts a tensor to type `int32`. Args: x: A `Tensor` or `SparseTensor` or `IndexedSlices`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with type `int32`. Raises: TypeError: If `x` cannot be cast to the `int32`. """ return cast(x, dtypes.int32, name=name) @deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.") @tf_export(v1=["to_int64"]) def to_int64(x, name="ToInt64"): """Casts a tensor to type `int64`. Args: x: A `Tensor` or `SparseTensor` or `IndexedSlices`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with type `int64`. Raises: TypeError: If `x` cannot be cast to the `int64`. """ return cast(x, dtypes.int64, name=name) @deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.") @tf_export(v1=["to_bfloat16"]) def to_bfloat16(x, name="ToBFloat16"): """Casts a tensor to type `bfloat16`. Args: x: A `Tensor` or `SparseTensor` or `IndexedSlices`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with type `bfloat16`. Raises: TypeError: If `x` cannot be cast to the `bfloat16`. """ return cast(x, dtypes.bfloat16, name=name) @deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.") @tf_export(v1=["to_complex64"]) def to_complex64(x, name="ToComplex64"): """Casts a tensor to type `complex64`. Args: x: A `Tensor` or `SparseTensor` or `IndexedSlices`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with type `complex64`. Raises: TypeError: If `x` cannot be cast to the `complex64`. """ return cast(x, dtypes.complex64, name=name) @deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.") @tf_export(v1=["to_complex128"]) def to_complex128(x, name="ToComplex128"): """Casts a tensor to type `complex128`. Args: x: A `Tensor` or `SparseTensor` or `IndexedSlices`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with type `complex128`. Raises: TypeError: If `x` cannot be cast to the `complex128`. """ return cast(x, dtypes.complex128, name=name) ops.Tensor._override_operator("__neg__", gen_math_ops.neg) ops.Tensor._override_operator("__abs__", abs) # __invert__ corresponds to the ~ operator. Here we follow the numpy convention # ~ marks an elementwise bit-wise inverse. This is only implemented for boolean # tensors and will throw a TypeError if used on nonboolean arrays ops.Tensor._override_operator("__invert__", gen_math_ops.logical_not) def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor): """Register operators with different tensor and scalar versions. If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices, sp_values, sp_shape, dense)` and outputs `(new_sp_values)`. Args: func: the operator op_name: name of the operator being overridden clazz_object: class to override for. Either `Tensor` or `SparseTensor`. """ def binary_op_wrapper(x, y): with ops.name_scope(None, op_name, [x, y]) as name: if isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor): return func(x, y, name=name) elif not isinstance(y, sparse_tensor.SparseTensor): try: y = ops.convert_to_tensor_v2( y, dtype_hint=x.dtype.base_dtype, name="y") except TypeError: # If the RHS is not a tensor, it might be a tensor aware object # that can implement the operator with knowledge of itself # and the tensor. if hasattr(type(y), "__r%s__" % op_name): return NotImplemented else: raise return func(x, y, name=name) def binary_op_wrapper_sparse(sp_x, y): with ops.name_scope(None, op_name, [sp_x, y]) as name: y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y") return sparse_tensor.SparseTensor( sp_x.indices, func(sp_x.indices, sp_x.values, sp_x.dense_shape, y, name=name), sp_x.dense_shape) def r_binary_op_wrapper(y, x): with ops.name_scope(None, op_name, [x, y]) as name: x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x") return func(x, y, name=name) # Propagate func.__doc__ to the wrappers try: doc = func.__doc__ except AttributeError: doc = None binary_op_wrapper.__doc__ = doc r_binary_op_wrapper.__doc__ = doc binary_op_wrapper_sparse.__doc__ = doc if clazz_object is ops.Tensor: clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper) del binary_op_wrapper clazz_object._override_operator("__r%s__" % op_name, r_binary_op_wrapper) del r_binary_op_wrapper else: clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper_sparse) del binary_op_wrapper_sparse # Conversion table for __truediv__. None entries mean no conversion required. _TRUEDIV_TABLE = { dtypes.uint8: dtypes.float32, dtypes.int8: dtypes.float32, dtypes.uint16: dtypes.float32, dtypes.int16: dtypes.float32, dtypes.int32: dtypes.float64, dtypes.int64: dtypes.float64, dtypes.bfloat16: None, dtypes.float16: None, dtypes.float32: None, dtypes.float64: None, dtypes.complex64: None, dtypes.complex128: None, } # NOTE: the support of "sparse (true)div dense" is currently not baked in into # "tf.(true_)div()". Until such an API decision is made, the supported usage is # to explicitly use the "/" operator to invoke either truediv or div. def _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None): """Internal helper function for 'sp_t / dense_t'.""" with ops.name_scope(name, "truediv", [sp_indices, sp_values, sp_shape, y]) as name: sp_values = ops.convert_to_tensor(sp_values, name="sp_values") y = ops.convert_to_tensor(y, name="y") x_dtype = sp_values.dtype.base_dtype y_dtype = y.dtype.base_dtype if x_dtype != y_dtype: raise TypeError("x and y must have the same dtype, got %r != %r" % (x_dtype, y_dtype)) try: dtype = _TRUEDIV_TABLE[x_dtype] except KeyError: raise TypeError("Invalid dtype %r in __truediv__" % x_dtype) if dtype is not None: sp_values = cast(sp_values, dtype) y = cast(y, dtype) return gen_sparse_ops.sparse_dense_cwise_div( sp_indices, sp_values, sp_shape, y, name=name) def _truediv_python3(x, y, name=None): with ops.name_scope(name, "truediv", [x, y]) as name: x = ops.convert_to_tensor(x, name="x") y = ops.convert_to_tensor(y, name="y") x_dtype = x.dtype.base_dtype y_dtype = y.dtype.base_dtype if x_dtype != y_dtype: raise TypeError("x and y must have the same dtype, got %r != %r" % (x_dtype, y_dtype)) try: dtype = _TRUEDIV_TABLE[x_dtype] except KeyError: raise TypeError("Invalid dtype %r in __truediv__" % x_dtype) if dtype is not None: x = cast(x, dtype) y = cast(y, dtype) return gen_math_ops.real_div(x, y, name=name) def _div_python2(x, y, name=None): """Divide two values using Python 2 semantics. Used for Tensor.__div__. Args: x: `Tensor` numerator of real numeric type. y: `Tensor` denominator of real numeric type. name: A name for the operation (optional). Returns: `x / y` returns the quotient of x and y. """ with ops.name_scope(name, "div", [x, y]) as name: x = ops.convert_to_tensor(x, name="x") y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype) x_dtype = x.dtype.base_dtype y_dtype = y.dtype.base_dtype if x_dtype != y_dtype: raise TypeError("x and y must have the same dtype, got %r != %r" % (x_dtype, y_dtype)) if x_dtype.is_floating or x_dtype.is_complex: return gen_math_ops.real_div(x, y, name=name) else: return gen_math_ops.floor_div(x, y, name=name) @tf_export("math.truediv", "truediv") @dispatch.add_dispatch_support def truediv(x, y, name=None): """Divides x / y elementwise (using Python 3 division operator semantics). NOTE: Prefer using the Tensor operator or tf.divide which obey Python division operator semantics. This function forces Python 3 division operator semantics where all integer arguments are cast to floating types first. This op is generated by normal `x / y` division in Python 3 and in Python 2.7 with `from __future__ import division`. If you want integer division that rounds down, use `x // y` or `tf.math.floordiv`. `x` and `y` must have the same numeric type. If the inputs are floating point, the output will have the same type. If the inputs are integral, the inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32` and `int64` (matching the behavior of Numpy). Args: x: `Tensor` numerator of numeric type. y: `Tensor` denominator of numeric type. name: A name for the operation (optional). Returns: `x / y` evaluated in floating point. Raises: TypeError: If `x` and `y` have different dtypes. """ return _truediv_python3(x, y, name) @deprecation.deprecated( date=None, instructions="Deprecated in favor of operator or tf.math.divide.") @tf_export(v1=["div"]) def div(x, y, name=None): """Divides x / y elementwise (using Python 2 division operator semantics). NOTE: Prefer using the Tensor division operator or tf.divide which obey Python 3 division operator semantics. This function divides `x` and `y`, forcing Python 2 semantics. That is, if `x` and `y` are both integers then the result will be an integer. This is in contrast to Python 3, where division with `/` is always a float while division with `//` is always an integer. Args: x: `Tensor` numerator of real numeric type. y: `Tensor` denominator of real numeric type. name: A name for the operation (optional). Returns: `x / y` returns the quotient of x and y. """ return _div_python2(x, y, name) @tf_export("math.divide_no_nan", v1=["math.divide_no_nan", "div_no_nan"]) @deprecation.deprecated_endpoints("div_no_nan") @dispatch.add_dispatch_support def div_no_nan(x, y, name=None): """Computes an unsafe divide which returns 0 if the y is zero. Args: x: A `Tensor`. Must be one of the following types: `float32`, `float64`. y: A `Tensor` whose dtype is compatible with `x`. name: A name for the operation (optional). Returns: The element-wise value of the x divided by y. """ with ops.name_scope(name, "div_no_nan", [x, y]) as name: x = ops.convert_to_tensor(x, name="x") y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype) x_dtype = x.dtype.base_dtype y_dtype = y.dtype.base_dtype if x_dtype != y_dtype: raise TypeError("x and y must have the same dtype, got %r != %r" % (x_dtype, y_dtype)) return gen_math_ops.div_no_nan(x, y, name=name) @tf_export("math.multiply_no_nan") @dispatch.add_dispatch_support def multiply_no_nan(x, y, name=None): """Computes the product of x and y and returns 0 if the y is zero, even if x is NaN or infinite. Args: x: A `Tensor`. Must be one of the following types: `float32`, `float64`. y: A `Tensor` whose dtype is compatible with `x`. name: A name for the operation (optional). Returns: The element-wise value of the x times y. """ with ops.name_scope(name, "multiply_no_nan", [x, y]) as name: x = ops.convert_to_tensor(x, name="x") y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype) x_dtype = x.dtype.base_dtype y_dtype = y.dtype.base_dtype if x_dtype != y_dtype: raise TypeError("x and y must have the same dtype, got %r != %r" % (x_dtype, y_dtype)) return gen_math_ops.mul_no_nan(x, y, name=name) # TODO(aselle): This should be removed mod = gen_math_ops.floor_mod # TODO(aselle): Deprecate this once all internal functionality uses # tf.truncatediv @tf_export("math.floordiv", v1=["math.floordiv", "floordiv"]) @dispatch.add_dispatch_support @deprecation.deprecated_endpoints("floordiv") def floordiv(x, y, name=None): """Divides `x / y` elementwise, rounding toward the most negative integer. The same as `tf.compat.v1.div(x,y)` for integers, but uses `tf.floor(tf.compat.v1.div(x,y))` for floating point arguments so that the result is always an integer (though possibly an integer represented as floating point). This op is generated by `x // y` floor division in Python 3 and in Python 2.7 with `from __future__ import division`. `x` and `y` must have the same type, and the result will have the same type as well. Args: x: `Tensor` numerator of real numeric type. y: `Tensor` denominator of real numeric type. name: A name for the operation (optional). Returns: `x / y` rounded down. Raises: TypeError: If the inputs are complex. """ with ops.name_scope(name, "floordiv", [x, y]) as name: return gen_math_ops.floor_div(x, y, name=name) realdiv = gen_math_ops.real_div truncatediv = gen_math_ops.truncate_div # TODO(aselle): Rename this to floordiv when we can. floor_div = gen_math_ops.floor_div truncatemod = gen_math_ops.truncate_mod floormod = gen_math_ops.floor_mod def _add_dispatch(x, y, name=None): """Dispatches to add for strings and add_v2 for all other types.""" if fwd_compat.forward_compatible(2019, 6, 25): if x.dtype == dtypes.string: return gen_math_ops.add(x, y, name=name) else: return gen_math_ops.add_v2(x, y, name=name) else: return gen_math_ops.add(x, y, name=name) def _mul_dispatch(x, y, name=None): """Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse".""" is_tensor_y = isinstance(y, ops.Tensor) if is_tensor_y: return gen_math_ops.mul(x, y, name=name) else: assert isinstance(y, sparse_tensor.SparseTensor) # Case: Dense * Sparse. new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values, y.dense_shape, x, name) return sparse_tensor.SparseTensor(y.indices, new_vals, y.dense_shape) # NOTE(aselle): When integer division is added for sparse_dense_cwise, # div, truediv, and floordiv should be delegated appropriately for # Python sematnics, analogous to dense cwise tensor operations. _OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div", sparse_tensor.SparseTensor) _OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv", sparse_tensor.SparseTensor) _OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, "mul", sparse_tensor.SparseTensor) _OverrideBinaryOperatorHelper(_add_dispatch, "add") _OverrideBinaryOperatorHelper(gen_math_ops.sub, "sub") _OverrideBinaryOperatorHelper(_mul_dispatch, "mul") _OverrideBinaryOperatorHelper(_div_python2, "div") _OverrideBinaryOperatorHelper(_truediv_python3, "truediv") _OverrideBinaryOperatorHelper(floordiv, "floordiv") _OverrideBinaryOperatorHelper(gen_math_ops.floor_mod, "mod") _OverrideBinaryOperatorHelper(pow, "pow") @tf_export("math.logical_xor", v1=["math.logical_xor", "logical_xor"]) @dispatch.add_dispatch_support @deprecation.deprecated_endpoints("logical_xor") def logical_xor(x, y, name="LogicalXor"): """Logical XOR function. x ^ y = (x | y) & ~(x & y) Inputs are tensor and if the tensors contains more than one element, an element-wise logical XOR is computed. Usage: ```python x = tf.constant([False, False, True, True], dtype = tf.bool) y = tf.constant([False, True, False, True], dtype = tf.bool) z = tf.logical_xor(x, y, name="LogicalXor") # here z = [False True True False] ``` Args: x: A `Tensor` type bool. y: A `Tensor` of type bool. Returns: A `Tensor` of type bool with the same size as that of x or y. """ # TODO(alemi) Make this a cwise op if people end up relying on it. return gen_math_ops.logical_and( gen_math_ops.logical_or(x, y), gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)), name=name) _OverrideBinaryOperatorHelper(gen_math_ops.logical_and, "and") _OverrideBinaryOperatorHelper(gen_math_ops.logical_or, "or") _OverrideBinaryOperatorHelper(logical_xor, "xor") ops.Tensor._override_operator("__lt__", gen_math_ops.less) ops.Tensor._override_operator("__le__", gen_math_ops.less_equal) ops.Tensor._override_operator("__gt__", gen_math_ops.greater) ops.Tensor._override_operator("__ge__", gen_math_ops.greater_equal) @tf_export("range") def range(start, limit=None, delta=1, dtype=None, name="range"): # pylint: disable=redefined-builtin """Creates a sequence of numbers. Creates a sequence of numbers that begins at `start` and extends by increments of `delta` up to but not including `limit`. The dtype of the resulting tensor is inferred from the inputs unless it is provided explicitly. Like the Python builtin `range`, `start` defaults to 0, so that `range(n) = range(0, n)`. For example: ```python start = 3 limit = 18 delta = 3 tf.range(start, limit, delta) # [3, 6, 9, 12, 15] start = 3 limit = 1 delta = -0.5 tf.range(start, limit, delta) # [3, 2.5, 2, 1.5] limit = 5 tf.range(limit) # [0, 1, 2, 3, 4] ``` Args: start: A 0-D `Tensor` (scalar). Acts as first entry in the range if `limit` is not None; otherwise, acts as range limit and first entry defaults to 0. limit: A 0-D `Tensor` (scalar). Upper limit of sequence, exclusive. If None, defaults to the value of `start` while the first entry of the range defaults to 0. delta: A 0-D `Tensor` (scalar). Number that increments `start`. Defaults to 1. dtype: The type of the elements of the resulting tensor. name: A name for the operation. Defaults to "range". Returns: An 1-D `Tensor` of type `dtype`. @compatibility(numpy) Equivalent to np.arange @end_compatibility """ if limit is None: start, limit = 0, start with ops.name_scope(name, "Range", [start, limit, delta]) as name: start = ops.convert_to_tensor(start, dtype=dtype, name="start") limit = ops.convert_to_tensor(limit, dtype=dtype, name="limit") delta = ops.convert_to_tensor(delta, dtype=dtype, name="delta") # infer dtype if not explicitly provided if dtype is None: dtype_hierarchy = [ dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64 ] assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta]) inferred_dtype = max([arg.dtype for arg in [start, limit, delta]], key=dtype_hierarchy.index) start = cast(start, inferred_dtype) limit = cast(limit, inferred_dtype) delta = cast(delta, inferred_dtype) return gen_math_ops._range(start, limit, delta, name=name) # Reduction operations def _ReductionDims(x, axis, reduction_indices=None): # pylint: disable=invalid-name """Returns range(0, rank(x)) if reduction_indices is None.""" # TODO(aselle): Remove this after deprecation if reduction_indices is not None: if axis is not None: raise ValueError("Can't specify both axis' and 'reduction_indices'.") axis = reduction_indices if axis is not None: return axis else: # Fast path: avoid creating Rank and Range ops if ndims is known. rank = common_shapes.rank(x) if rank is not None: return constant_op.constant(np.arange(rank), dtype=dtypes.int32) if (isinstance(x, sparse_tensor.SparseTensor) and x.dense_shape.shape.is_fully_defined()): rank = x.dense_shape.shape.dims[0].value # sparse.dense_shape is 1-D. return constant_op.constant(np.arange(rank), dtype=dtypes.int32) # Otherwise, we rely on Range and Rank to do the right thing at run-time. return range(0, array_ops.rank(x)) def _may_reduce_to_scalar(keepdims, axis, output): """Set a reduction's output shape to be a scalar if we are certain.""" if not common_shapes.has_fully_defined_shape(output) and (not keepdims) and ( axis is None): output.set_shape(()) return output @tf_export(v1=["math.reduce_sum", "reduce_sum"]) @deprecation.deprecated_args(None, "keep_dims is deprecated, use keepdims instead", "keep_dims") def reduce_sum_v1(input_tensor, axis=None, keepdims=None, name=None, reduction_indices=None, keep_dims=None): """Computes the sum of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. For example: ```python x = tf.constant([[1, 1, 1], [1, 1, 1]]) tf.reduce_sum(x) # 6 tf.reduce_sum(x, 0) # [2, 2, 2] tf.reduce_sum(x, 1) # [3, 3] tf.reduce_sum(x, 1, keepdims=True) # [[3], [3]] tf.reduce_sum(x, [0, 1]) # 6 ``` Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). reduction_indices: The old (deprecated) name for axis. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced tensor, of the same dtype as the input_tensor. @compatibility(numpy) Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to int64 while tensorflow returns the same dtype as the input. @end_compatibility """ axis = deprecation.deprecated_argument_lookup("axis", axis, "reduction_indices", reduction_indices) keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, "keep_dims", keep_dims) return reduce_sum(input_tensor, axis, keepdims, name) @tf_export("math.reduce_sum", "reduce_sum", v1=[]) @dispatch.add_dispatch_support def reduce_sum(input_tensor, axis=None, keepdims=False, name=None): """Computes the sum of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. For example: ```python x = tf.constant([[1, 1, 1], [1, 1, 1]]) tf.reduce_sum(x) # 6 tf.reduce_sum(x, 0) # [2, 2, 2] tf.reduce_sum(x, 1) # [3, 3] tf.reduce_sum(x, 1, keepdims=True) # [[3], [3]] tf.reduce_sum(x, [0, 1]) # 6 ``` Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor, of the same dtype as the input_tensor. @compatibility(numpy) Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to int64 while tensorflow returns the same dtype as the input. @end_compatibility """ keepdims = False if keepdims is None else keepdims return _may_reduce_to_scalar( keepdims, axis, gen_math_ops._sum( input_tensor, _ReductionDims(input_tensor, axis), keepdims, name=name)) @tf_export("math.reduce_euclidean_norm") def reduce_euclidean_norm(input_tensor, axis=None, keepdims=False, name=None): """Computes the Euclidean norm of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. For example: ```python x = tf.constant([[1, 2, 3], [1, 1, 1]]) tf.reduce_euclidean_norm(x) # sqrt(17) tf.reduce_euclidean_norm(x, 0) # [sqrt(2), sqrt(5), sqrt(10)] tf.reduce_euclidean_norm(x, 1) # [sqrt(14), sqrt(3)] tf.reduce_euclidean_norm(x, 1, keepdims=True) # [[sqrt(14)], [sqrt(3)]] tf.reduce_euclidean_norm(x, [0, 1]) # sqrt(17) ``` Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor, of the same dtype as the input_tensor. """ return _may_reduce_to_scalar( keepdims, axis, gen_math_ops.euclidean_norm( input_tensor, _ReductionDims(input_tensor, axis), keepdims, name=name)) @tf_export(v1=["math.count_nonzero", "count_nonzero"]) @deprecation.deprecated_args(None, "keep_dims is deprecated, use keepdims instead", "keep_dims") @deprecation.deprecated_args( None, "reduction_indices is deprecated, use axis instead", "axis") def count_nonzero(input_tensor=None, axis=None, keepdims=None, dtype=dtypes.int64, name=None, reduction_indices=None, keep_dims=None, input=None): # pylint: disable=redefined-builtin """Computes number of nonzero elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` has no entries, all dimensions are reduced, and a tensor with a single element is returned. **NOTE** Floating point comparison to zero is done by exact floating point equality check. Small values are **not** rounded to zero for purposes of the nonzero check. For example: ```python x = tf.constant([[0, 1, 0], [1, 1, 0]]) tf.math.count_nonzero(x) # 3 tf.math.count_nonzero(x, 0) # [1, 2, 0] tf.math.count_nonzero(x, 1) # [1, 2] tf.math.count_nonzero(x, 1, keepdims=True) # [[1], [2]] tf.math.count_nonzero(x, [0, 1]) # 3 ``` **NOTE** Strings are compared against zero-length empty string `""`. Any string with a size greater than zero is already considered as nonzero. For example: ```python x = tf.constant(["", "a", " ", "b", ""]) tf.math.count_nonzero(x) # 3, with "a", " ", and "b" as nonzero strings. ``` Args: input_tensor: The tensor to reduce. Should be of numeric type, `bool`, or `string`. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. dtype: The output dtype; defaults to `tf.int64`. name: A name for the operation (optional). reduction_indices: The old (deprecated) name for axis. keep_dims: Deprecated alias for `keepdims`. input: Overrides input_tensor. For compatibility. Returns: The reduced tensor (number of nonzero values). """ keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, "keep_dims", keep_dims) input_tensor = deprecation.deprecated_argument_lookup("input", input, "input_tensor", input_tensor) axis = deprecation.deprecated_argument_lookup("axis", axis, "reduction_indices", reduction_indices) return count_nonzero_v2(input_tensor, axis, keepdims, dtype, name) @tf_export("math.count_nonzero", v1=[]) def count_nonzero_v2( input, # pylint: disable=redefined-builtin axis=None, keepdims=None, dtype=dtypes.int64, name=None): """Computes number of nonzero elements across dimensions of a tensor. Reduces `input` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` has no entries, all dimensions are reduced, and a tensor with a single element is returned. **NOTE** Floating point comparison to zero is done by exact floating point equality check. Small values are **not** rounded to zero for purposes of the nonzero check. For example: ```python x = tf.constant([[0, 1, 0], [1, 1, 0]]) tf.math.count_nonzero(x) # 3 tf.math.count_nonzero(x, 0) # [1, 2, 0] tf.math.count_nonzero(x, 1) # [1, 2] tf.math.count_nonzero(x, 1, keepdims=True) # [[1], [2]] tf.math.count_nonzero(x, [0, 1]) # 3 ``` **NOTE** Strings are compared against zero-length empty string `""`. Any string with a size greater than zero is already considered as nonzero. For example: ```python x = tf.constant(["", "a", " ", "b", ""]) tf.math.count_nonzero(x) # 3, with "a", " ", and "b" as nonzero strings. ``` Args: input: The tensor to reduce. Should be of numeric type, `bool`, or `string`. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input), rank(input))`. keepdims: If true, retains reduced dimensions with length 1. dtype: The output dtype; defaults to `tf.int64`. name: A name for the operation (optional). Returns: The reduced tensor (number of nonzero values). """ if keepdims is None: keepdims = False with ops.name_scope(name, "count_nonzero", [input]): input = ops.convert_to_tensor(input, name="input") # A scalar of 'zero' is enough as `not_equal` will broadcast. zero = array_ops.zeros([], dtype=input.dtype) return cast( reduce_sum( # int64 reduction happens on GPU cast(gen_math_ops.not_equal(input, zero), dtypes.int64), axis=axis, keepdims=keepdims), dtype=dtype) @tf_export(v1=["math.reduce_mean", "reduce_mean"]) def reduce_mean_v1(input_tensor, axis=None, keepdims=None, name=None, reduction_indices=None, keep_dims=None): """Computes the mean of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. For example: ```python x = tf.constant([[1., 1.], [2., 2.]]) tf.reduce_mean(x) # 1.5 tf.reduce_mean(x, 0) # [1.5, 1.5] tf.reduce_mean(x, 1) # [1., 2.] ``` Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). reduction_indices: The old (deprecated) name for axis. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.mean Please note that `np.mean` has a `dtype` parameter that could be used to specify the output type. By default this is `dtype=float64`. On the other hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`, for example: ```python x = tf.constant([1, 0, 1, 0]) tf.reduce_mean(x) # 0 y = tf.constant([1., 0., 1., 0.]) tf.reduce_mean(y) # 0.5 ``` @end_compatibility """ axis = deprecation.deprecated_argument_lookup("axis", axis, "reduction_indices", reduction_indices) keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, "keep_dims", keep_dims) return reduce_mean(input_tensor, axis, keepdims, name) @tf_export("math.reduce_mean", "reduce_mean", v1=[]) @dispatch.add_dispatch_support def reduce_mean(input_tensor, axis=None, keepdims=False, name=None): """Computes the mean of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. For example: ```python x = tf.constant([[1., 1.], [2., 2.]]) tf.reduce_mean(x) # 1.5 tf.reduce_mean(x, 0) # [1.5, 1.5] tf.reduce_mean(x, 1) # [1., 2.] ``` Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.mean Please note that `np.mean` has a `dtype` parameter that could be used to specify the output type. By default this is `dtype=float64`. On the other hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`, for example: ```python x = tf.constant([1, 0, 1, 0]) tf.reduce_mean(x) # 0 y = tf.constant([1., 0., 1., 0.]) tf.reduce_mean(y) # 0.5 ``` @end_compatibility """ keepdims = False if keepdims is None else keepdims return _may_reduce_to_scalar( keepdims, axis, gen_math_ops.mean( input_tensor, _ReductionDims(input_tensor, axis), keepdims, name=name)) @tf_export("math.reduce_variance") def reduce_variance(input_tensor, axis=None, keepdims=False, name=None): """Computes the variance of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. For example: ```python x = tf.constant([[1., 2.], [3., 4.]]) tf.reduce_variance(x) # 1.25 tf.reduce_variance(x, 0) # [1., 1.] tf.reduce_variance(x, 1) # [0.25, 0.25] ``` Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name scope for the associated operations (optional). Returns: The reduced tensor, of the same dtype as the input_tensor. @compatibility(numpy) Equivalent to np.var Please note that `np.var` has a `dtype` parameter that could be used to specify the output type. By default this is `dtype=float64`. On the other hand, `tf.reduce_variance` has an aggressive type inference from `input_tensor`, @end_compatibility """ name = name if name else "reduce_variance" with ops.name_scope(name): means = reduce_mean(input_tensor, axis=axis, keepdims=True) squared_deviations = gen_math_ops.square(input_tensor - means) return reduce_mean(squared_deviations, axis=axis, keepdims=keepdims) @tf_export("math.reduce_std") def reduce_std(input_tensor, axis=None, keepdims=False, name=None): """Computes the standard deviation of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. For example: ```python x = tf.constant([[1., 2.], [3., 4.]]) tf.reduce_std(x) # 1.1180339887498949 tf.reduce_std(x, 0) # [1., 1.] tf.reduce_std(x, 1) # [0.5, 0.5] ``` Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name scope for the associated operations (optional). Returns: The reduced tensor, of the same dtype as the input_tensor. @compatibility(numpy) Equivalent to np.std Please note that `np.std` has a `dtype` parameter that could be used to specify the output type. By default this is `dtype=float64`. On the other hand, `tf.reduce_std` has an aggressive type inference from `input_tensor`, @end_compatibility """ name = name if name else "reduce_std" with ops.name_scope(name): variance = reduce_variance(input_tensor, axis=axis, keepdims=keepdims) return gen_math_ops.sqrt(variance) @tf_export("math.reduce_prod", "reduce_prod", v1=[]) @dispatch.add_dispatch_support def reduce_prod(input_tensor, axis=None, keepdims=False, name=None): """Computes the product of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.prod @end_compatibility """ keepdims = False if keepdims is None else keepdims return _may_reduce_to_scalar( keepdims, axis, gen_math_ops.prod( input_tensor, _ReductionDims(input_tensor, axis), keepdims, name=name)) @tf_export(v1=["math.reduce_prod", "reduce_prod"]) @deprecation.deprecated_args(None, "keep_dims is deprecated, use keepdims instead", "keep_dims") def reduce_prod_v1(input_tensor, axis=None, keepdims=None, name=None, reduction_indices=None, keep_dims=None): """Computes the product of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). reduction_indices: The old (deprecated) name for axis. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.prod @end_compatibility """ axis = deprecation.deprecated_argument_lookup("axis", axis, "reduction_indices", reduction_indices) keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, "keep_dims", keep_dims) return reduce_prod(input_tensor, axis, keepdims, name) @tf_export(v1=["math.reduce_min", "reduce_min"]) @deprecation.deprecated_args(None, "keep_dims is deprecated, use keepdims instead", "keep_dims") def reduce_min_v1(input_tensor, axis=None, keepdims=None, name=None, reduction_indices=None, keep_dims=None): """Computes the minimum of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. Args: input_tensor: The tensor to reduce. Should have real numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). reduction_indices: The old (deprecated) name for axis. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.min @end_compatibility """ axis = deprecation.deprecated_argument_lookup("axis", axis, "reduction_indices", reduction_indices) keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, "keep_dims", keep_dims) return reduce_min(input_tensor, axis, keepdims, name) @tf_export("math.reduce_min", "reduce_min", v1=[]) @dispatch.add_dispatch_support def reduce_min(input_tensor, axis=None, keepdims=False, name=None): """Computes the minimum of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. Args: input_tensor: The tensor to reduce. Should have real numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.min @end_compatibility """ keepdims = False if keepdims is None else keepdims return _may_reduce_to_scalar( keepdims, axis, gen_math_ops._min( input_tensor, _ReductionDims(input_tensor, axis), keepdims, name=name)) @tf_export(v1=["math.reduce_max", "reduce_max"]) @deprecation.deprecated_args(None, "keep_dims is deprecated, use keepdims instead", "keep_dims") def reduce_max_v1(input_tensor, axis=None, keepdims=None, name=None, reduction_indices=None, keep_dims=None): """Computes the maximum of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. Args: input_tensor: The tensor to reduce. Should have real numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). reduction_indices: The old (deprecated) name for axis. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.max @end_compatibility """ axis = deprecation.deprecated_argument_lookup("axis", axis, "reduction_indices", reduction_indices) keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, "keep_dims", keep_dims) return reduce_max(input_tensor, axis, keepdims, name) @tf_export("math.reduce_max", "reduce_max", v1=[]) @dispatch.add_dispatch_support def reduce_max(input_tensor, axis=None, keepdims=False, name=None): """Computes the maximum of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. Args: input_tensor: The tensor to reduce. Should have real numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.max @end_compatibility """ keepdims = False if keepdims is None else keepdims return _may_reduce_to_scalar( keepdims, axis, gen_math_ops._max( input_tensor, _ReductionDims(input_tensor, axis), keepdims, name=name)) @tf_export(v1=["math.reduce_all", "reduce_all"]) @deprecation.deprecated_args(None, "keep_dims is deprecated, use keepdims instead", "keep_dims") def reduce_all_v1(input_tensor, axis=None, keepdims=None, name=None, reduction_indices=None, keep_dims=None): """Computes the "logical and" of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. For example: ```python x = tf.constant([[True, True], [False, False]]) tf.reduce_all(x) # False tf.reduce_all(x, 0) # [False, False] tf.reduce_all(x, 1) # [True, False] ``` Args: input_tensor: The boolean tensor to reduce. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). reduction_indices: The old (deprecated) name for axis. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.all @end_compatibility """ axis = deprecation.deprecated_argument_lookup("axis", axis, "reduction_indices", reduction_indices) keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, "keep_dims", keep_dims) return reduce_all(input_tensor, axis, keepdims, name) @tf_export("reduce_all", "math.reduce_all", v1=[]) @dispatch.add_dispatch_support def reduce_all(input_tensor, axis=None, keepdims=False, name=None): """Computes the "logical and" of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. For example: ```python x = tf.constant([[True, True], [False, False]]) tf.reduce_all(x) # False tf.reduce_all(x, 0) # [False, False] tf.reduce_all(x, 1) # [True, False] ``` Args: input_tensor: The boolean tensor to reduce. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.all @end_compatibility """ keepdims = False if keepdims is None else keepdims return _may_reduce_to_scalar( keepdims, axis, gen_math_ops._all( input_tensor, _ReductionDims(input_tensor, axis), keepdims, name=name)) @tf_export(v1=["math.reduce_any", "reduce_any"]) @deprecation.deprecated_args(None, "keep_dims is deprecated, use keepdims instead", "keep_dims") def reduce_any_v1(input_tensor, axis=None, keepdims=None, name=None, reduction_indices=None, keep_dims=None): """Computes the "logical or" of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. For example: ```python x = tf.constant([[True, True], [False, False]]) tf.reduce_any(x) # True tf.reduce_any(x, 0) # [True, True] tf.reduce_any(x, 1) # [True, False] ``` Args: input_tensor: The boolean tensor to reduce. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). reduction_indices: The old (deprecated) name for axis. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.any @end_compatibility """ axis = deprecation.deprecated_argument_lookup("axis", axis, "reduction_indices", reduction_indices) keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, "keep_dims", keep_dims) return reduce_any(input_tensor, axis, keepdims, name) @tf_export("math.reduce_any", "reduce_any", v1=[]) @dispatch.add_dispatch_support def reduce_any(input_tensor, axis=None, keepdims=False, name=None): """Computes the "logical or" of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. For example: ```python x = tf.constant([[True, True], [False, False]]) tf.reduce_any(x) # True tf.reduce_any(x, 0) # [True, True] tf.reduce_any(x, 1) # [True, False] ``` Args: input_tensor: The boolean tensor to reduce. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.any @end_compatibility """ keepdims = False if keepdims is None else keepdims return _may_reduce_to_scalar( keepdims, axis, gen_math_ops._any( input_tensor, _ReductionDims(input_tensor, axis), keepdims, name=name)) @tf_export(v1=["math.reduce_logsumexp", "reduce_logsumexp"]) @deprecation.deprecated_args(None, "keep_dims is deprecated, use keepdims instead", "keep_dims") def reduce_logsumexp_v1(input_tensor, axis=None, keepdims=None, name=None, reduction_indices=None, keep_dims=None): """Computes log(sum(exp(elements across dimensions of a tensor))). Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` has no entries, all dimensions are reduced, and a tensor with a single element is returned. This function is more numerically stable than log(sum(exp(input))). It avoids overflows caused by taking the exp of large inputs and underflows caused by taking the log of small inputs. For example: ```python x = tf.constant([[0., 0., 0.], [0., 0., 0.]]) tf.reduce_logsumexp(x) # log(6) tf.reduce_logsumexp(x, 0) # [log(2), log(2), log(2)] tf.reduce_logsumexp(x, 1) # [log(3), log(3)] tf.reduce_logsumexp(x, 1, keepdims=True) # [[log(3)], [log(3)]] tf.reduce_logsumexp(x, [0, 1]) # log(6) ``` Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). reduction_indices: The old (deprecated) name for axis. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced tensor. """ axis = deprecation.deprecated_argument_lookup("axis", axis, "reduction_indices", reduction_indices) keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, "keep_dims", keep_dims) return reduce_logsumexp(input_tensor, axis, keepdims, name) @tf_export("math.reduce_logsumexp", "reduce_logsumexp", v1=[]) def reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=None): """Computes log(sum(exp(elements across dimensions of a tensor))). Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` has no entries, all dimensions are reduced, and a tensor with a single element is returned. This function is more numerically stable than log(sum(exp(input))). It avoids overflows caused by taking the exp of large inputs and underflows caused by taking the log of small inputs. For example: ```python x = tf.constant([[0., 0., 0.], [0., 0., 0.]]) tf.reduce_logsumexp(x) # log(6) tf.reduce_logsumexp(x, 0) # [log(2), log(2), log(2)] tf.reduce_logsumexp(x, 1) # [log(3), log(3)] tf.reduce_logsumexp(x, 1, keepdims=True) # [[log(3)], [log(3)]] tf.reduce_logsumexp(x, [0, 1]) # log(6) ``` Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor. """ keepdims = False if keepdims is None else keepdims input_tensor = ops.convert_to_tensor(input_tensor) with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name: raw_max = reduce_max(input_tensor, axis=axis, keepdims=True) my_max = array_ops.stop_gradient( array_ops.where( gen_math_ops.is_finite(raw_max), raw_max, array_ops.zeros_like(raw_max))) result = gen_math_ops.log( reduce_sum( gen_math_ops.exp(gen_math_ops.sub(input_tensor, my_max)), axis, keepdims=keepdims)) if not keepdims: my_max = array_ops.reshape(my_max, array_ops.shape(result)) result = gen_math_ops.add(result, my_max) return _may_reduce_to_scalar(keepdims, axis, result) @tf_export("linalg.trace", v1=["linalg.trace", "trace"]) @deprecation.deprecated_endpoints("trace") def trace(x, name=None): """Compute the trace of a tensor `x`. `trace(x)` returns the sum along the main diagonal of each inner-most matrix in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where `output[i, j, k, ..., l] = trace(x[i, j, i, ..., l, :, :])` For example: ```python x = tf.constant([[1, 2], [3, 4]]) tf.linalg.trace(x) # 5 x = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) tf.linalg.trace(x) # 15 x = tf.constant([[[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[-1, -2, -3], [-4, -5, -6], [-7, -8, -9]]]) tf.linalg.trace(x) # [15, -15] ``` Args: x: tensor. name: A name for the operation (optional). Returns: The trace of input tensor. """ with ops.name_scope(name, "Trace", [x]) as name: x = ops.convert_to_tensor(x, name="x") return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name) @tf_export("linalg.matmul", "matmul") @dispatch.add_dispatch_support def matmul(a, b, transpose_a=False, transpose_b=False, adjoint_a=False, adjoint_b=False, a_is_sparse=False, b_is_sparse=False, name=None): """Multiplies matrix `a` by matrix `b`, producing `a` * `b`. The inputs must, following any transpositions, be tensors of rank >= 2 where the inner 2 dimensions specify valid matrix multiplication arguments, and any further outer dimensions match. Both matrices must be of the same type. The supported types are: `float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`. Either matrix can be transposed or adjointed (conjugated and transposed) on the fly by setting one of the corresponding flag to `True`. These are `False` by default. If one or both of the matrices contain a lot of zeros, a more efficient multiplication algorithm can be used by setting the corresponding `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default. This optimization is only available for plain matrices (rank-2 tensors) with datatypes `bfloat16` or `float32`. For example: ```python # 2-D tensor `a` # [[1, 2, 3], # [4, 5, 6]] a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) # 2-D tensor `b` # [[ 7, 8], # [ 9, 10], # [11, 12]] b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2]) # `a` * `b` # [[ 58, 64], # [139, 154]] c = tf.matmul(a, b) # 3-D tensor `a` # [[[ 1, 2, 3], # [ 4, 5, 6]], # [[ 7, 8, 9], # [10, 11, 12]]] a = tf.constant(np.arange(1, 13, dtype=np.int32), shape=[2, 2, 3]) # 3-D tensor `b` # [[[13, 14], # [15, 16], # [17, 18]], # [[19, 20], # [21, 22], # [23, 24]]] b = tf.constant(np.arange(13, 25, dtype=np.int32), shape=[2, 3, 2]) # `a` * `b` # [[[ 94, 100], # [229, 244]], # [[508, 532], # [697, 730]]] c = tf.matmul(a, b) # Since python >= 3.5 the @ operator is supported (see PEP 465). # In TensorFlow, it simply calls the `tf.matmul()` function, so the # following lines are equivalent: d = a @ b @ [[10.], [11.]] d = tf.matmul(tf.matmul(a, b), [[10.], [11.]]) ``` Args: a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`, `complex128` and rank > 1. b: `Tensor` with same type and rank as `a`. transpose_a: If `True`, `a` is transposed before multiplication. transpose_b: If `True`, `b` is transposed before multiplication. adjoint_a: If `True`, `a` is conjugated and transposed before multiplication. adjoint_b: If `True`, `b` is conjugated and transposed before multiplication. a_is_sparse: If `True`, `a` is treated as a sparse matrix. b_is_sparse: If `True`, `b` is treated as a sparse matrix. name: Name for the operation (optional). Returns: A `Tensor` of the same type as `a` and `b` where each inner-most matrix is the product of the corresponding matrices in `a` and `b`, e.g. if all transpose or adjoint attributes are `False`: `output`[..., i, j] = sum_k (`a`[..., i, k] * `b`[..., k, j]), for all indices i, j. Note: This is matrix product, not element-wise product. Raises: ValueError: If transpose_a and adjoint_a, or transpose_b and adjoint_b are both set to True. """ with ops.name_scope(name, "MatMul", [a, b]) as name: if transpose_a and adjoint_a: raise ValueError("Only one of transpose_a and adjoint_a can be True.") if transpose_b and adjoint_b: raise ValueError("Only one of transpose_b and adjoint_b can be True.") if context.executing_eagerly(): if not isinstance(a, (ops.EagerTensor, _resource_variable_type)): a = ops.convert_to_tensor(a, name="a") if not isinstance(b, (ops.EagerTensor, _resource_variable_type)): b = ops.convert_to_tensor(b, name="b") else: a = ops.convert_to_tensor(a, name="a") b = ops.convert_to_tensor(b, name="b") # TODO(apassos) remove _shape_tuple here when it is not needed. a_shape = a._shape_tuple() # pylint: disable=protected-access b_shape = b._shape_tuple() # pylint: disable=protected-access if fwd_compat.forward_compatible(2019, 4, 25): output_may_have_non_empty_batch_shape = ( (a_shape is None or len(a_shape) > 2) or (b_shape is None or len(b_shape) > 2)) batch_mat_mul_fn = gen_math_ops.batch_mat_mul_v2 else: output_may_have_non_empty_batch_shape = ( (a_shape is None or len(a_shape) > 2) and (b_shape is None or len(b_shape) > 2)) batch_mat_mul_fn = gen_math_ops.batch_mat_mul if (not a_is_sparse and not b_is_sparse) and output_may_have_non_empty_batch_shape: # BatchMatmul does not support transpose, so we conjugate the matrix and # use adjoint instead. Conj() is a noop for real matrices. if transpose_a: a = conj(a) adjoint_a = True if transpose_b: b = conj(b) adjoint_b = True return batch_mat_mul_fn(a, b, adj_x=adjoint_a, adj_y=adjoint_b, name=name) # Neither matmul nor sparse_matmul support adjoint, so we conjugate # the matrix and use transpose instead. Conj() is a noop for real # matrices. if adjoint_a: a = conj(a) transpose_a = True if adjoint_b: b = conj(b) transpose_b = True use_sparse_matmul = False if a_is_sparse or b_is_sparse: sparse_matmul_types = [dtypes.bfloat16, dtypes.float32] use_sparse_matmul = ( a.dtype in sparse_matmul_types and b.dtype in sparse_matmul_types) if ((a.dtype == dtypes.bfloat16 or b.dtype == dtypes.bfloat16) and a.dtype != b.dtype): # matmul currently doesn't handle mixed-precision inputs. use_sparse_matmul = True if use_sparse_matmul: ret = sparse_matmul( a, b, transpose_a=transpose_a, transpose_b=transpose_b, a_is_sparse=a_is_sparse, b_is_sparse=b_is_sparse, name=name) # sparse_matmul always returns float32, even with # bfloat16 inputs. This prevents us from configuring bfloat16 training. # casting to bfloat16 also matches non-sparse matmul behavior better. if a.dtype == dtypes.bfloat16 and b.dtype == dtypes.bfloat16: ret = cast(ret, dtypes.bfloat16) return ret else: return gen_math_ops.mat_mul( a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name) @tf_export("linalg.matvec") def matvec(a, b, transpose_a=False, adjoint_a=False, a_is_sparse=False, b_is_sparse=False, name=None): """Multiplies matrix `a` by vector `b`, producing `a` * `b`. The matrix `a` must, following any transpositions, be a tensor of rank >= 2, and we must have `shape(b) = shape(a)[:-2] + [shape(a)[-1]]`. Both `a` and `b` must be of the same type. The supported types are: `float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`. Matrix `a` can be transposed or adjointed (conjugated and transposed) on the fly by setting one of the corresponding flag to `True`. These are `False` by default. If one or both of the inputs contain a lot of zeros, a more efficient multiplication algorithm can be used by setting the corresponding `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default. This optimization is only available for plain matrices/vectors (rank-2/1 tensors) with datatypes `bfloat16` or `float32`. For example: ```python # 2-D tensor `a` # [[1, 2, 3], # [4, 5, 6]] a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) # 1-D tensor `b` # [7, 9, 11] b = tf.constant([7, 9, 11], shape=[3]) # `a` * `b` # [ 58, 64] c = tf.matvec(a, b) # 3-D tensor `a` # [[[ 1, 2, 3], # [ 4, 5, 6]], # [[ 7, 8, 9], # [10, 11, 12]]] a = tf.constant(np.arange(1, 13, dtype=np.int32), shape=[2, 2, 3]) # 2-D tensor `b` # [[13, 14, 15], # [16, 17, 18]] b = tf.constant(np.arange(13, 19, dtype=np.int32), shape=[2, 3]) # `a` * `b` # [[ 86, 212], # [410, 563]] c = tf.matvec(a, b) ``` Args: a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`, `complex128` and rank > 1. b: `Tensor` with same type and rank = `rank(a) - 1`. transpose_a: If `True`, `a` is transposed before multiplication. adjoint_a: If `True`, `a` is conjugated and transposed before multiplication. a_is_sparse: If `True`, `a` is treated as a sparse matrix. b_is_sparse: If `True`, `b` is treated as a sparse matrix. name: Name for the operation (optional). Returns: A `Tensor` of the same type as `a` and `b` where each inner-most vector is the product of the corresponding matrices in `a` and vectors in `b`, e.g. if all transpose or adjoint attributes are `False`: `output`[..., i] = sum_k (`a`[..., i, k] * `b`[..., k]), for all indices i. Note: This is matrix-vector product, not element-wise product. Raises: ValueError: If transpose_a and adjoint_a are both set to True. """ with ops.name_scope(name, "MatVec", [a, b]) as name: output = matmul( a, array_ops.expand_dims(b, axis=-1), transpose_a=transpose_a, adjoint_a=adjoint_a, a_is_sparse=a_is_sparse, b_is_sparse=b_is_sparse) return array_ops.squeeze(output, axis=-1) _OverrideBinaryOperatorHelper(matmul, "matmul") sparse_matmul = deprecation.deprecated(None, "Use `tf.linalg.matmul` instead")( gen_math_ops.sparse_mat_mul) tf_export(v1=["sparse_matmul"])(sparse_matmul) @ops.RegisterStatistics("MatMul", "flops") def _calc_mat_mul_flops(graph, node): """Calculates the compute resources needed for MatMul.""" transpose_a = node.attr["transpose_a"].b a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) a_shape.assert_is_fully_defined() if transpose_a: k = int(a_shape[0]) else: k = int(a_shape[1]) output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name) output_shape.assert_is_fully_defined() output_count = np.prod(output_shape.as_list()) return ops.OpStats("flops", (k * output_count * 2)) def _as_indexed_slices(x, optimize=True): """Convert 'x' to IndexedSlices. Convert a dense Tensor to a block-sparse IndexedSlices. Args: x: Either a Tensor object, or an IndexedSlices object. optimize: if true, attempt to optimize the conversion of 'x'. Returns: An IndexedSlices object. Raises: TypeError: If 'x' is not a Tensor or an IndexedSlices object. """ # TODO(touts): op_scope if not isinstance(x, (ops.Tensor, ops.IndexedSlices)): raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x)) if isinstance(x, ops.IndexedSlices): return x x_shape = array_ops.shape_internal(x, optimize=optimize) return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape) def _as_indexed_slices_list(inputs, optimize=True): """Convert all elements of 'inputs' to IndexedSlices. Additionally, homogenize the types of all the indices to either int32 or int64. Args: inputs: List containing either Tensor or IndexedSlices objects. optimize: if true, attempt to optimize the conversion of each input. Returns: A list of IndexedSlices objects. Raises: TypeError: If 'inputs' is not a list or a tuple. """ if not isinstance(inputs, (list, tuple)): raise TypeError("Expected a list or tuple, not a %s" % type(inputs)) outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs] with_int32_index = [ o.indices for o in outputs if o.indices.dtype == dtypes.int32 ] if not with_int32_index or len(with_int32_index) == len(outputs): return outputs casted_outputs = [] for o in outputs: if o.indices.dtype == dtypes.int32: casted_outputs.append( ops.IndexedSlices(o.values, cast(o.indices, dtypes.int64), o.dense_shape)) else: casted_outputs.append(o) return casted_outputs @tf_export("math.add_n", "add_n") @dispatch.add_dispatch_support def add_n(inputs, name=None): """Adds all input tensors element-wise. Converts `IndexedSlices` objects into dense tensors prior to adding. `tf.math.add_n` performs the same operation as `tf.math.accumulate_n`, but it waits for all of its inputs to be ready before beginning to sum. This buffering can result in higher memory consumption when inputs are ready at different times, since the minimum temporary storage required is proportional to the input size rather than the output size. This op does not [broadcast]( https://docs.scipy.org/doc/numpy-1.13.0/user/basics.broadcasting.html) its inputs. If you need broadcasting, use `tf.math.add` (or the `+` operator) instead. For example: ```python a = tf.constant([[3, 5], [4, 8]]) b = tf.constant([[1, 6], [2, 9]]) tf.math.add_n([a, b, a]) # [[7, 16], [10, 25]] ``` Args: inputs: A list of `tf.Tensor` or `tf.IndexedSlices` objects, each with same shape and type. name: A name for the operation (optional). Returns: A `Tensor` of same shape and type as the elements of `inputs`. Raises: ValueError: If `inputs` don't all have same shape and dtype or the shape cannot be inferred. """ if not inputs or not isinstance(inputs, (list, tuple)): raise ValueError("inputs must be a list of at least one " "Tensor/IndexedSlices with the same dtype and shape") inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs) if not all(isinstance(x, (ops.Tensor, ops.IndexedSlices)) for x in inputs): raise ValueError("inputs must be a list of at least one " "Tensor/IndexedSlices with the same dtype and shape") if len(inputs) == 1: if isinstance(inputs[0], ops.IndexedSlices): values = ops.convert_to_tensor(inputs[0]) else: values = inputs[0] if name: return array_ops.identity(values, name=name) return values return gen_math_ops.add_n(inputs, name=name) @tf_export("math.accumulate_n", v1=["math.accumulate_n", "accumulate_n"]) @deprecation.deprecated_endpoints("accumulate_n") def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None): """Returns the element-wise sum of a list of tensors. Optionally, pass `shape` and `tensor_dtype` for shape and type checking, otherwise, these are inferred. `accumulate_n` performs the same operation as `tf.math.add_n`, but does not wait for all of its inputs to be ready before beginning to sum. This approach can save memory if inputs are ready at different times, since minimum temporary storage is proportional to the output size rather than the inputs' size. `accumulate_n` is differentiable (but wasn't previous to TensorFlow 1.7). For example: ```python a = tf.constant([[1, 2], [3, 4]]) b = tf.constant([[5, 0], [0, 6]]) tf.math.accumulate_n([a, b, a]) # [[7, 4], [6, 14]] # Explicitly pass shape and type tf.math.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32) # [[7, 4], # [6, 14]] ``` Args: inputs: A list of `Tensor` objects, each with same shape and type. shape: Expected shape of elements of `inputs` (optional). Also controls the output shape of this op, which may affect type inference in other ops. A value of `None` means "infer the input shape from the shapes in `inputs`". tensor_dtype: Expected data type of `inputs` (optional). A value of `None` means "infer the input dtype from `inputs[0]`". name: A name for the operation (optional). Returns: A `Tensor` of same shape and type as the elements of `inputs`. Raises: ValueError: If `inputs` don't all have same shape and dtype or the shape cannot be inferred. """ def _input_error(): return ValueError("inputs must be a list of at least one Tensor with the " "same dtype and shape") if not inputs or not isinstance(inputs, (list, tuple)): raise _input_error() inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs) if not all(isinstance(x, ops.Tensor) for x in inputs): raise _input_error() if not all(x.dtype == inputs[0].dtype for x in inputs): raise _input_error() if shape is not None: shape = tensor_shape.as_shape(shape) else: shape = tensor_shape.unknown_shape() for input_tensor in inputs: if isinstance(input_tensor, ops.Tensor): shape = shape.merge_with(input_tensor.get_shape()) # tensor_dtype is for safety only; operator's output type computed in C++ if tensor_dtype is not None and tensor_dtype != inputs[0].dtype: raise TypeError("tensor_dtype is {}, but input is of type {}".format( tensor_dtype, inputs[0].dtype)) if len(inputs) == 1 and name is None: return inputs[0] elif len(inputs) == 1 and name is not None: return array_ops.identity(inputs[0], name=name) elif context.executing_eagerly(): # TemporaryVariable not currently supported in eager mode; fall back # onto AddN for now. # TODO(frreiss) remove this once the lifetime of eager variables gets # addressed return add_n(inputs, name=name) else: return gen_math_ops.accumulate_nv2(inputs, name=name, shape=shape) # pylint: disable=protected-access @ops.RegisterGradient("AccumulateNV2") def _accumulate_n_grad(op, grad): """Same as gradient for AddN. Copies the gradient to all inputs.""" # Not broadcasting. return [grad] * len(op.inputs) @tf_export("math.sigmoid", "nn.sigmoid", "sigmoid") def sigmoid(x, name=None): """Computes sigmoid of `x` element-wise. Specifically, `y = 1 / (1 + exp(-x))`. Args: x: A Tensor with type `float16`, `float32`, `float64`, `complex64`, or `complex128`. name: A name for the operation (optional). Returns: A Tensor with the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.expit @end_compatibility """ with ops.name_scope(name, "Sigmoid", [x]) as name: x = ops.convert_to_tensor(x, name="x") return gen_math_ops.sigmoid(x, name=name) @tf_export("math.log_sigmoid", v1=["math.log_sigmoid", "log_sigmoid"]) @dispatch.add_dispatch_support @deprecation.deprecated_endpoints("log_sigmoid") def log_sigmoid(x, name=None): """Computes log sigmoid of `x` element-wise. Specifically, `y = log(1 / (1 + exp(-x)))`. For numerical stability, we use `y = -tf.nn.softplus(-x)`. Args: x: A Tensor with type `float32` or `float64`. name: A name for the operation (optional). Returns: A Tensor with the same type as `x`. """ with ops.name_scope(name, "LogSigmoid", [x]) as name: x = ops.convert_to_tensor(x, name="x") return gen_math_ops.neg(gen_nn_ops.softplus(-x), name=name) @tf_export("math.bincount", v1=[]) def bincount(arr, weights=None, minlength=None, maxlength=None, dtype=dtypes.int32, name=None): """Counts the number of occurrences of each value in an integer array. If `minlength` and `maxlength` are not given, returns a vector with length `tf.reduce_max(arr) + 1` if `arr` is non-empty, and length 0 otherwise. If `weights` are non-None, then index `i` of the output stores the sum of the value in `weights` at each index where the corresponding value in `arr` is `i`. Args: arr: An int32 tensor of non-negative values. weights: If non-None, must be the same shape as arr. For each value in `arr`, the bin will be incremented by the corresponding weight instead of 1. minlength: If given, ensures the output has length at least `minlength`, padding with zeros at the end if necessary. maxlength: If given, skips values in `arr` that are equal or greater than `maxlength`, ensuring that the output has length at most `maxlength`. dtype: If `weights` is None, determines the type of the output bins. name: A name scope for the associated operations (optional). Returns: A vector with the same dtype as `weights` or the given `dtype`. The bin values. """ name = "bincount" if name is None else name with ops.name_scope(name): arr = ops.convert_to_tensor(arr, name="arr", dtype=dtypes.int32) array_is_nonempty = reduce_prod(array_ops.shape(arr)) > 0 output_size = cast(array_is_nonempty, dtypes.int32) * (reduce_max(arr) + 1) if minlength is not None: minlength = ops.convert_to_tensor( minlength, name="minlength", dtype=dtypes.int32) output_size = gen_math_ops.maximum(minlength, output_size) if maxlength is not None: maxlength = ops.convert_to_tensor( maxlength, name="maxlength", dtype=dtypes.int32) output_size = gen_math_ops.minimum(maxlength, output_size) if weights is not None: weights = ops.convert_to_tensor(weights, name="weights") return gen_math_ops.unsorted_segment_sum(weights, arr, output_size) weights = constant_op.constant([], dtype) return gen_math_ops.bincount(arr, output_size, weights) @tf_export(v1=["math.bincount", "bincount"]) @deprecation.deprecated_endpoints("bincount") def bincount_v1(arr, weights=None, minlength=None, maxlength=None, dtype=dtypes.int32): """Counts the number of occurrences of each value in an integer array. If `minlength` and `maxlength` are not given, returns a vector with length `tf.reduce_max(arr) + 1` if `arr` is non-empty, and length 0 otherwise. If `weights` are non-None, then index `i` of the output stores the sum of the value in `weights` at each index where the corresponding value in `arr` is `i`. Args: arr: An int32 tensor of non-negative values. weights: If non-None, must be the same shape as arr. For each value in `arr`, the bin will be incremented by the corresponding weight instead of 1. minlength: If given, ensures the output has length at least `minlength`, padding with zeros at the end if necessary. maxlength: If given, skips values in `arr` that are equal or greater than `maxlength`, ensuring that the output has length at most `maxlength`. dtype: If `weights` is None, determines the type of the output bins. Returns: A vector with the same dtype as `weights` or the given `dtype`. The bin values. """ return bincount(arr, weights, minlength, maxlength, dtype) @tf_export("math.cumsum", "cumsum") def cumsum(x, axis=0, exclusive=False, reverse=False, name=None): """Compute the cumulative sum of the tensor `x` along `axis`. By default, this op performs an inclusive cumsum, which means that the first element of the input is identical to the first element of the output: ```python tf.cumsum([a, b, c]) # [a, a + b, a + b + c] ``` By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed instead: ```python tf.cumsum([a, b, c], exclusive=True) # [0, a, a + b] ``` By setting the `reverse` kwarg to `True`, the cumsum is performed in the opposite direction: ```python tf.cumsum([a, b, c], reverse=True) # [a + b + c, b + c, c] ``` This is more efficient than using separate `tf.reverse` ops. The `reverse` and `exclusive` kwargs can also be combined: ```python tf.cumsum([a, b, c], exclusive=True, reverse=True) # [b + c, c, 0] ``` Args: x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`. axis: A `Tensor` of type `int32` (default: 0). Must be in the range `[-rank(x), rank(x))`. exclusive: If `True`, perform exclusive cumsum. reverse: A `bool` (default: False). name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ with ops.name_scope(name, "Cumsum", [x]) as name: x = ops.convert_to_tensor(x, name="x") return gen_math_ops.cumsum( x, axis, exclusive=exclusive, reverse=reverse, name=name) @tf_export("math.cumprod", v1=["math.cumprod", "cumprod"]) @deprecation.deprecated_endpoints("cumprod") def cumprod(x, axis=0, exclusive=False, reverse=False, name=None): """Compute the cumulative product of the tensor `x` along `axis`. By default, this op performs an inclusive cumprod, which means that the first element of the input is identical to the first element of the output: ```python tf.math.cumprod([a, b, c]) # [a, a * b, a * b * c] ``` By setting the `exclusive` kwarg to `True`, an exclusive cumprod is performed instead: ```python tf.math.cumprod([a, b, c], exclusive=True) # [1, a, a * b] ``` By setting the `reverse` kwarg to `True`, the cumprod is performed in the opposite direction: ```python tf.math.cumprod([a, b, c], reverse=True) # [a * b * c, b * c, c] ``` This is more efficient than using separate `tf.reverse` ops. The `reverse` and `exclusive` kwargs can also be combined: ```python tf.math.cumprod([a, b, c], exclusive=True, reverse=True) # [b * c, c, 1] ``` Args: x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`. axis: A `Tensor` of type `int32` (default: 0). Must be in the range `[-rank(x), rank(x))`. exclusive: If `True`, perform exclusive cumprod. reverse: A `bool` (default: False). name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ with ops.name_scope(name, "Cumprod", [x]) as name: x = ops.convert_to_tensor(x, name="x") return gen_math_ops.cumprod( x, axis, exclusive=exclusive, reverse=reverse, name=name) @tf_export("math.conj", v1=["math.conj", "conj"]) @dispatch.add_dispatch_support @deprecation.deprecated_endpoints("conj") def conj(x, name=None): r"""Returns the complex conjugate of a complex number. Given a tensor `input` of complex numbers, this operation returns a tensor of complex numbers that are the complex conjugate of each element in `input`. The complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the real part and *b* is the imaginary part. The complex conjugate returned by this operation is of the form \\(a - bj\\). For example: # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] tf.math.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] If `x` is real, it is returned unchanged. Args: x: `Tensor` to conjugate. Must have numeric or variant type. name: A name for the operation (optional). Returns: A `Tensor` that is the conjugate of `x` (with the same type). Raises: TypeError: If `x` is not a numeric tensor. """ if isinstance(x, ops.Tensor): dt = x.dtype if dt.is_floating or dt.is_integer: return x with ops.name_scope(name, "Conj", [x]) as name: x = ops.convert_to_tensor(x, name="x") if x.dtype.is_complex or x.dtype == dtypes.variant: return gen_math_ops.conj(x, name=name) elif x.dtype.is_floating or x.dtype.is_integer: return x else: raise TypeError("Expected numeric or variant tensor, got dtype %r" % x.dtype) def _BroadcastShape(op): """Common shape function for binary operators that broadcast their inputs.""" return [ common_shapes.broadcast_shape(op.inputs[0].get_shape(), op.inputs[1].get_shape()) ] def reduced_shape(input_shape, axes): """Helper function for reduction ops. Args: input_shape: 1-D Tensor, the shape of the Tensor being reduced. axes: 1-D Tensor, the reduction axes. Returns: A 1-D Tensor, the output shape as if keepdims were set to True. """ # Example: # cast needed for SparseTensor reductions if context.executing_eagerly(): input_shape = input_shape.numpy() axes = axes.numpy() input_shape[axes] = 1 return input_shape input_shape = cast(input_shape, dtypes.int32) # [2, 3, 5, 7] axes = cast(axes, dtypes.int32) # [1, 2] input_rank = array_ops.size(input_shape) # 4 axes = (axes + input_rank) % input_rank axes_shape = array_ops.shape(axes) # [2] return gen_data_flow_ops.dynamic_stitch( # [2, 1, 1, 7] [ range(input_rank), # [0, 1, 2, 3] axes ], # [1, 2] [ input_shape, # [2, 3, 5, 7] array_ops.fill(axes_shape, 1) ]) # [1, 1] def _unsorted_segment_N(data, segment_ids, num_segments): """ Helper function for unsorted_segment_mean/_sqrtN. Computes the number of segment entries with 0-entries set to 1 to allow division by N. """ # bincount doesn't support negative indices so we use unsorted_segment_sum segment_ids_shape = array_ops.shape_internal(segment_ids) ones_tensor = array_ops.ones(segment_ids_shape, dtype=data.dtype) N = gen_math_ops.unsorted_segment_sum(ones_tensor, segment_ids, num_segments) # add dimensions for all non-reduced axes ndims_output = data.shape.ndims - segment_ids.shape.ndims broadcast_shape = [num_segments] + [1] * ndims_output N = array_ops.reshape(N, broadcast_shape) return gen_math_ops.maximum(N, 1) @tf_export( "math.unsorted_segment_mean", v1=["math.unsorted_segment_mean", "unsorted_segment_mean"]) @deprecation.deprecated_endpoints("unsorted_segment_mean") @dispatch.add_dispatch_support def unsorted_segment_mean(data, segment_ids, num_segments, name=None): r"""Computes the mean along segments of a tensor. Read [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) for an explanation of segments. This operator is similar to the unsorted segment sum operator found [here](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). Instead of computing the sum over segments, it computes the mean of all entries belonging to a segment such that: \\(output_i = 1/N_i \sum_{j...} data[j...]\\) where the sum is over tuples `j...` such that `segment_ids[j...] == i` with \\N_i\\ being the number of occurrences of id \\i\\. If there is no entry for a given segment ID `i`, it outputs 0. If the given segment ID `i` is negative, the value is dropped and will not be added to the sum of the segment. Args: data: A `Tensor` with floating point or complex dtype. segment_ids: An integer tensor whose shape is a prefix of `data.shape`. num_segments: An integer scalar `Tensor`. The number of distinct segment IDs. name: A name for the operation (optional). Returns: A `Tensor`. Has same shape as data, except for the first `segment_ids.rank` dimensions, which are replaced with a single dimension which has size `num_segments`. """ with ops.name_scope(name, "UnsortedSegmentMean"): data = ops.convert_to_tensor(data) segment_ids = ops.convert_to_tensor(segment_ids) N = _unsorted_segment_N(data, segment_ids, num_segments) summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments) return summed / N @tf_export( "math.unsorted_segment_sqrt_n", v1=["math.unsorted_segment_sqrt_n", "unsorted_segment_sqrt_n"]) @deprecation.deprecated_endpoints("unsorted_segment_sqrt_n") @dispatch.add_dispatch_support def unsorted_segment_sqrt_n(data, segment_ids, num_segments, name=None): r"""Computes the sum along segments of a tensor divided by the sqrt(N). Read [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) for an explanation of segments. This operator is similar to the unsorted segment sum operator found [here](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). Additionally to computing the sum over segments, it divides the results by sqrt(N). \\(output_i = 1/sqrt(N_i) \sum_{j...} data[j...]\\) where the sum is over tuples `j...` such that `segment_ids[j...] == i` with \\N_i\\ being the number of occurrences of id \\i\\. If there is no entry for a given segment ID `i`, it outputs 0. Note that this op only supports floating point and complex dtypes, due to tf.sqrt only supporting these types. If the given segment ID `i` is negative, the value is dropped and will not be added to the sum of the segment. Args: data: A `Tensor` with floating point or complex dtype. segment_ids: An integer tensor whose shape is a prefix of `data.shape`. num_segments: An integer scalar `Tensor`. The number of distinct segment IDs. name: A name for the operation (optional). Returns: A `Tensor`. Has same shape as data, except for the first `segment_ids.rank` dimensions, which are replaced with a single dimension which has size `num_segments`. """ with ops.name_scope(name, "UnsortedSegmentSqrtN"): data = ops.convert_to_tensor(data) segment_ids = ops.convert_to_tensor(segment_ids) N = _unsorted_segment_N(data, segment_ids, num_segments) summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments) return summed / gen_math_ops.sqrt(N) @tf_export(v1=["sparse.segment_sum", "sparse_segment_sum"]) @deprecation.deprecated_endpoints("sparse_segment_sum") def sparse_segment_sum(data, indices, segment_ids, name=None, num_segments=None): r"""Computes the sum along sparse segments of a tensor. Read [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) for an explanation of segments. Like `tf.math.segment_sum`, but `segment_ids` can have rank less than `data`'s first dimension, selecting a subset of dimension 0, specified by `indices`. `segment_ids` is allowed to have missing ids, in which case the output will be zeros at those indices. In those cases `num_segments` is used to determine the size of the output. For example: ```python c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) # Select two rows, one segment. tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) # => [[0 0 0 0]] # Select two rows, two segment. tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) # => [[ 1 2 3 4] # [-1 -2 -3 -4]] # With missing segment ids. tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]), num_segments=4) # => [[ 1 2 3 4] # [ 0 0 0 0] # [-1 -2 -3 -4] # [ 0 0 0 0]] # Select all rows, two segments. tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) # => [[0 0 0 0] # [5 6 7 8]] # Which is equivalent to: tf.math.segment_sum(c, tf.constant([0, 0, 1])) ``` Args: data: A `Tensor` with data that will be assembled in the output. indices: A 1-D `Tensor` with indices into `data`. Has same rank as `segment_ids`. segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values should be sorted and can be repeated. name: A name for the operation (optional). num_segments: An optional int32 scalar. Indicates the size of the output `Tensor`. Returns: A `tensor` of the shape as data, except for dimension 0 which has size `k`, the number of segments specified via `num_segments` or inferred for the last element in `segments_ids`. """ if num_segments is not None: return gen_math_ops.sparse_segment_sum_with_num_segments( data=data, indices=indices, segment_ids=segment_ids, num_segments=num_segments, name=name) else: return gen_math_ops.sparse_segment_sum( data=data, indices=indices, segment_ids=segment_ids, name=name) @tf_export("sparse.segment_sum", v1=[]) def sparse_segment_sum_v2(data, indices, segment_ids, num_segments=None, name=None): r"""Computes the sum along sparse segments of a tensor. Read [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) for an explanation of segments. Like `tf.math.segment_sum`, but `segment_ids` can have rank less than `data`'s first dimension, selecting a subset of dimension 0, specified by `indices`. `segment_ids` is allowed to have missing ids, in which case the output will be zeros at those indices. In those cases `num_segments` is used to determine the size of the output. For example: ```python c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) # Select two rows, one segment. tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) # => [[0 0 0 0]] # Select two rows, two segment. tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) # => [[ 1 2 3 4] # [-1 -2 -3 -4]] # With missing segment ids. tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]), num_segments=4) # => [[ 1 2 3 4] # [ 0 0 0 0] # [-1 -2 -3 -4] # [ 0 0 0 0]] # Select all rows, two segments. tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) # => [[0 0 0 0] # [5 6 7 8]] # Which is equivalent to: tf.math.segment_sum(c, tf.constant([0, 0, 1])) ``` Args: data: A `Tensor` with data that will be assembled in the output. indices: A 1-D `Tensor` with indices into `data`. Has same rank as `segment_ids`. segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values should be sorted and can be repeated. num_segments: An optional int32 scalar. Indicates the size of the output `Tensor`. name: A name for the operation (optional). Returns: A `tensor` of the shape as data, except for dimension 0 which has size `k`, the number of segments specified via `num_segments` or inferred for the last element in `segments_ids`. """ return sparse_segment_sum( data, indices, segment_ids, name=name, num_segments=num_segments) @tf_export(v1=["sparse.segment_mean", "sparse_segment_mean"]) @deprecation.deprecated_endpoints("sparse_segment_mean") def sparse_segment_mean(data, indices, segment_ids, name=None, num_segments=None): r"""Computes the mean along sparse segments of a tensor. Read [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) for an explanation of segments. Like `tf.math.segment_mean`, but `segment_ids` can have rank less than `data`'s first dimension, selecting a subset of dimension 0, specified by `indices`. `segment_ids` is allowed to have missing ids, in which case the output will be zeros at those indices. In those cases `num_segments` is used to determine the size of the output. Args: data: A `Tensor` with data that will be assembled in the output. indices: A 1-D `Tensor` with indices into `data`. Has same rank as `segment_ids`. segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values should be sorted and can be repeated. name: A name for the operation (optional). num_segments: An optional int32 scalar. Indicates the size of the output `Tensor`. Returns: A `tensor` of the shape as data, except for dimension 0 which has size `k`, the number of segments specified via `num_segments` or inferred for the last element in `segments_ids`. """ if num_segments is not None: return gen_math_ops.sparse_segment_mean_with_num_segments( data=data, indices=indices, segment_ids=segment_ids, num_segments=num_segments, name=name) else: return gen_math_ops.sparse_segment_mean( data=data, indices=indices, segment_ids=segment_ids, name=name) @tf_export("sparse.segment_mean", v1=[]) def sparse_segment_mean_v2(data, indices, segment_ids, num_segments=None, name=None): r"""Computes the mean along sparse segments of a tensor. Read [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) for an explanation of segments. Like `tf.math.segment_mean`, but `segment_ids` can have rank less than `data`'s first dimension, selecting a subset of dimension 0, specified by `indices`. `segment_ids` is allowed to have missing ids, in which case the output will be zeros at those indices. In those cases `num_segments` is used to determine the size of the output. Args: data: A `Tensor` with data that will be assembled in the output. indices: A 1-D `Tensor` with indices into `data`. Has same rank as `segment_ids`. segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values should be sorted and can be repeated. num_segments: An optional int32 scalar. Indicates the size of the output `Tensor`. name: A name for the operation (optional). Returns: A `tensor` of the shape as data, except for dimension 0 which has size `k`, the number of segments specified via `num_segments` or inferred for the last element in `segments_ids`. """ return sparse_segment_mean( data, indices, segment_ids, name=name, num_segments=num_segments) @tf_export(v1=["sparse.segment_sqrt_n", "sparse_segment_sqrt_n"]) @deprecation.deprecated_endpoints("sparse_segment_sqrt_n") def sparse_segment_sqrt_n(data, indices, segment_ids, name=None, num_segments=None): r"""Computes the sum along sparse segments of a tensor divided by the sqrt(N). `N` is the size of the segment being reduced. Args: data: A `Tensor` with data that will be assembled in the output. indices: A 1-D `Tensor` with indices into `data`. Has same rank as `segment_ids`. segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values should be sorted and can be repeated. name: A name for the operation (optional). num_segments: An optional int32 scalar. Indicates the size of the output `Tensor`. Returns: A `tensor` of the shape as data, except for dimension 0 which has size `k`, the number of segments specified via `num_segments` or inferred for the last element in `segments_ids`. """ if num_segments is not None: return gen_math_ops.sparse_segment_sqrt_n_with_num_segments( data=data, indices=indices, segment_ids=segment_ids, num_segments=num_segments, name=name) else: return gen_math_ops.sparse_segment_sqrt_n( data=data, indices=indices, segment_ids=segment_ids, name=name) @tf_export("sparse.segment_sqrt_n", v1=[]) def sparse_segment_sqrt_n_v2(data, indices, segment_ids, num_segments=None, name=None): r"""Computes the sum along sparse segments of a tensor divided by the sqrt(N). Read [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) for an explanation of segments. Like `tf.sparse.segment_mean`, but instead of dividing by the size of the segment, `N`, divide by `sqrt(N)` instead. Args: data: A `Tensor` with data that will be assembled in the output. indices: A 1-D `Tensor` with indices into `data`. Has same rank as `segment_ids`. segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values should be sorted and can be repeated. num_segments: An optional int32 scalar. Indicates the size of the output `Tensor`. name: A name for the operation (optional). Returns: A `tensor` of the shape as data, except for dimension 0 which has size `k`, the number of segments specified via `num_segments` or inferred for the last element in `segments_ids`. """ return sparse_segment_sqrt_n( data, indices, segment_ids, name=name, num_segments=num_segments) @tf_export("tensordot", "linalg.tensordot") def tensordot(a, b, axes, name=None): r"""Tensor contraction of a and b along specified axes. Tensordot (also known as tensor contraction) sums the product of elements from `a` and `b` over the indices specified by `a_axes` and `b_axes`. The lists `a_axes` and `b_axes` specify those pairs of axes along which to contract the tensors. The axis `a_axes[i]` of `a` must have the same dimension as axis `b_axes[i]` of `b` for all `i` in `range(0, len(a_axes))`. The lists `a_axes` and `b_axes` must have identical length and consist of unique integers that specify valid axes for each of the tensors. This operation corresponds to `numpy.tensordot(a, b, axes)`. Example 1: When `a` and `b` are matrices (order 2), the case `axes = 1` is equivalent to matrix multiplication. Example 2: When `a` and `b` are matrices (order 2), the case `axes = [[1], [0]]` is equivalent to matrix multiplication. Example 3: Suppose that \\(a_{ijk}\\) and \\(b_{lmn}\\) represent two tensors of order 3. Then, `contract(a, b, [[0], [2]])` is the order 4 tensor \\(c_{jklm}\\) whose entry corresponding to the indices \\((j,k,l,m)\\) is given by: \\( c_{jklm} = \sum_i a_{ijk} b_{lmi} \\). In general, `order(c) = order(a) + order(b) - 2*len(axes[0])`. Args: a: `Tensor` of type `float32` or `float64`. b: `Tensor` with the same type as `a`. axes: Either a scalar `N`, or a list or an `int32` `Tensor` of shape [2, k]. If axes is a scalar, sum over the last N axes of a and the first N axes of b in order. If axes is a list or `Tensor` the first and second row contain the set of unique integers specifying axes along which the contraction is computed, for `a` and `b`, respectively. The number of axes for `a` and `b` must be equal. name: A name for the operation (optional). Returns: A `Tensor` with the same type as `a`. Raises: ValueError: If the shapes of `a`, `b`, and `axes` are incompatible. IndexError: If the values in axes exceed the rank of the corresponding tensor. """ def _tensordot_reshape(a, axes, flipped=False): """Helper method to perform transpose and reshape for contraction op. This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul` using `array_ops.transpose` and `array_ops.reshape`. The method takes a tensor and performs the correct transpose and reshape operation for a given set of indices. It returns the reshaped tensor as well as a list of indices necessary to reshape the tensor again after matrix multiplication. Args: a: `Tensor`. axes: List or `int32` `Tensor` of unique indices specifying valid axes of `a`. flipped: An optional `bool`. Defaults to `False`. If `True`, the method assumes that `a` is the second argument in the contraction operation. Returns: A tuple `(reshaped_a, free_dims, free_dims_static)` where `reshaped_a` is the tensor `a` reshaped to allow contraction via `matmul`, `free_dims` is either a list of integers or an `int32` `Tensor`, depending on whether the shape of a is fully specified, and free_dims_static is either a list of integers and None values, or None, representing the inferred static shape of the free dimensions """ if a.get_shape().is_fully_defined() and isinstance(axes, (list, tuple)): shape_a = a.get_shape().as_list() axes = [i if i >= 0 else i + len(shape_a) for i in axes] free = [i for i in xrange(len(shape_a)) if i not in axes] free_dims = [shape_a[i] for i in free] prod_free = int(np.prod([shape_a[i] for i in free])) prod_axes = int(np.prod([shape_a[i] for i in axes])) perm = list(axes) + free if flipped else free + list(axes) new_shape = [prod_axes, prod_free] if flipped else [prod_free, prod_axes] reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape) return reshaped_a, free_dims, free_dims else: if a.get_shape().ndims is not None and isinstance(axes, (list, tuple)): shape_a = a.get_shape().as_list() axes = [i if i >= 0 else i + len(shape_a) for i in axes] free = [i for i in xrange(len(shape_a)) if i not in axes] axes_dims = [shape_a[i] for i in axes] free_dims = [shape_a[i] for i in free] free_dims_static = free_dims axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes") free = ops.convert_to_tensor(free, dtype=dtypes.int32, name="free") shape_a = array_ops.shape(a) else: free_dims_static = None shape_a = array_ops.shape(a) rank_a = array_ops.rank(a) axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes") axes = array_ops.where(axes >= 0, axes, axes + rank_a) free, _ = array_ops.setdiff1d(range(rank_a), axes) free_dims = array_ops.gather(shape_a, free) axes_dims = array_ops.gather(shape_a, axes) prod_free_dims = reduce_prod(free_dims) prod_axes_dims = reduce_prod(axes_dims) if flipped: perm = array_ops.concat([axes, free], 0) new_shape = array_ops.stack([prod_axes_dims, prod_free_dims]) else: perm = array_ops.concat([free, axes], 0) new_shape = array_ops.stack([prod_free_dims, prod_axes_dims]) reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape) return reshaped_a, free_dims, free_dims_static def _tensordot_axes(a, axes): """Generates two sets of contraction axes for the two tensor arguments.""" a_shape = a.get_shape() if isinstance(axes, compat.integral_types): if axes < 0: raise ValueError("'axes' must be at least 0.") if a_shape.ndims is not None: if axes > a_shape.ndims: raise ValueError("'axes' must not be larger than the number of " "dimensions of tensor %s." % a) return (list(xrange(a_shape.ndims - axes, a_shape.ndims)), list(xrange(axes))) else: rank = array_ops.rank(a) return (range(rank - axes, rank, dtype=dtypes.int32), range(axes, dtype=dtypes.int32)) elif isinstance(axes, (list, tuple)): if len(axes) != 2: raise ValueError("'axes' must be an integer or have length 2.") a_axes = axes[0] b_axes = axes[1] if isinstance(a_axes, compat.integral_types) and \ isinstance(b_axes, compat.integral_types): a_axes = [a_axes] b_axes = [b_axes] if len(a_axes) != len(b_axes): raise ValueError( "Different number of contraction axes 'a' and 'b', %s != %s." % (len(a_axes), len(b_axes))) return a_axes, b_axes else: axes = ops.convert_to_tensor(axes, name="axes", dtype=dtypes.int32) return axes[0], axes[1] with ops.name_scope(name, "Tensordot", [a, b, axes]) as name: a = ops.convert_to_tensor(a, name="a") b = ops.convert_to_tensor(b, name="b") a_axes, b_axes = _tensordot_axes(a, axes) a_reshape, a_free_dims, a_free_dims_static = _tensordot_reshape(a, a_axes) b_reshape, b_free_dims, b_free_dims_static = _tensordot_reshape( b, b_axes, True) ab_matmul = matmul(a_reshape, b_reshape) if isinstance(a_free_dims, list) and isinstance(b_free_dims, list): return array_ops.reshape(ab_matmul, a_free_dims + b_free_dims, name=name) else: a_free_dims = ops.convert_to_tensor(a_free_dims, dtype=dtypes.int32) b_free_dims = ops.convert_to_tensor(b_free_dims, dtype=dtypes.int32) product = array_ops.reshape( ab_matmul, array_ops.concat([a_free_dims, b_free_dims], 0), name=name) if a_free_dims_static is not None and b_free_dims_static is not None: product.set_shape(a_free_dims_static + b_free_dims_static) return product @tf_export("math.polyval") def polyval(coeffs, x, name=None): r"""Computes the elementwise value of a polynomial. If `x` is a tensor and `coeffs` is a list n + 1 tensors, this function returns the value of the n-th order polynomial p(x) = coeffs[n-1] + coeffs[n-2] * x + ... + coeffs[0] * x**(n-1) evaluated using Horner's method, i.e. p(x) = coeffs[n-1] + x * (coeffs[n-2] + ... + x * (coeffs[1] + x * coeffs[0])) Args: coeffs: A list of `Tensor` representing the coefficients of the polynomial. x: A `Tensor` representing the variable of the polynomial. name: A name for the operation (optional). Returns: A `tensor` of the shape as the expression p(x) with usual broadcasting rules for element-wise addition and multiplication applied. @compatibility(numpy) Equivalent to numpy.polyval. @end_compatibility """ with ops.name_scope(name, "polyval", nest.flatten(coeffs) + [x]) as name: x = ops.convert_to_tensor(x, name="x") if len(coeffs) < 1: return array_ops.zeros_like(x, name=name) coeffs = [ ops.convert_to_tensor(coeff, name=("coeff_%d" % index)) for index, coeff in enumerate(coeffs) ] p = coeffs[0] for c in coeffs[1:]: p = c + p * x return p
tensorflow-master
tensorflow/python/ops/math_ops.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for using generic resources.""" # pylint: disable=g-bad-name from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import os from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.util import tf_should_use _Resource = collections.namedtuple("_Resource", ["handle", "create", "is_initialized"]) def register_resource(handle, create_op, is_initialized_op, is_shared=True): """Registers a resource into the appropriate collections. This makes the resource findable in either the shared or local resources collection. Args: handle: op which returns a handle for the resource. create_op: op which initializes the resource. is_initialized_op: op which returns a scalar boolean tensor of whether the resource has been initialized. is_shared: if True, the resource gets added to the shared resource collection; otherwise it gets added to the local resource collection. """ resource = _Resource(handle, create_op, is_initialized_op) if is_shared: ops.add_to_collection(ops.GraphKeys.RESOURCES, resource) else: ops.add_to_collection(ops.GraphKeys.LOCAL_RESOURCES, resource) def shared_resources(): """Returns resources visible to all tasks in the cluster.""" return ops.get_collection(ops.GraphKeys.RESOURCES) def local_resources(): """Returns resources intended to be local to this session.""" return ops.get_collection(ops.GraphKeys.LOCAL_RESOURCES) def report_uninitialized_resources(resource_list=None, name="report_uninitialized_resources"): """Returns the names of all uninitialized resources in resource_list. If the returned tensor is empty then all resources have been initialized. Args: resource_list: resources to check. If None, will use shared_resources() + local_resources(). name: name for the resource-checking op. Returns: Tensor containing names of the handles of all resources which have not yet been initialized. """ if resource_list is None: resource_list = shared_resources() + local_resources() with ops.name_scope(name): # Run all operations on CPU local_device = os.environ.get( "TF_DEVICE_FOR_UNINITIALIZED_VARIABLE_REPORTING", "/cpu:0") with ops.device(local_device): if not resource_list: # Return an empty tensor so we only need to check for returned tensor # size being 0 as an indication of model ready. return array_ops.constant([], dtype=dtypes.string) # Get a 1-D boolean tensor listing whether each resource is initialized. variables_mask = math_ops.logical_not( array_ops.stack([r.is_initialized for r in resource_list])) # Get a 1-D string tensor containing all the resource names. variable_names_tensor = array_ops.constant( [s.handle.name for s in resource_list]) # Return a 1-D tensor containing all the names of uninitialized resources. return array_ops.boolean_mask(variable_names_tensor, variables_mask) @tf_should_use.should_use_result def initialize_resources(resource_list, name="init"): """Initializes the resources in the given list. Args: resource_list: list of resources to initialize. name: name of the initialization op. Returns: op responsible for initializing all resources. """ if resource_list: return control_flow_ops.group(*[r.create for r in resource_list], name=name) return control_flow_ops.no_op(name=name)
tensorflow-master
tensorflow/python/ops/resources.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Implementation of tf.metrics module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import confusion_matrix from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import sets from tensorflow.python.ops import sparse_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import weights_broadcast_ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.deprecation import deprecated from tensorflow.python.util.tf_export import tf_export def metric_variable(shape, dtype, validate_shape=True, name=None): """Create variable in `GraphKeys.(LOCAL|METRIC_VARIABLES)` collections. If running in a `DistributionStrategy` context, the variable will be "sync on read". This means: * The returned object will be a container with separate variables per replica of the model. * When writing to the variable, e.g. using `assign_add` in a metric update, the update will be applied to the variable local to the replica. * To get a metric's result value, we need to sum the variable values across the replicas before computing the final answer. Furthermore, the final answer should be computed once instead of in every replica. Both of these are accomplished by running the computation of the final result value inside `distribution_strategy_context.get_replica_context().merge_call(fn)`. Inside the `merge_call()`, ops are only added to the graph once and access to a sync on read variable in a computation returns the sum across all replicas. Args: shape: Shape of the created variable. dtype: Type of the created variable. validate_shape: (Optional) Whether shape validation is enabled for the created variable. name: (Optional) String name of the created variable. Returns: A (non-trainable) variable initialized to zero, or if inside a `DistributionStrategy` scope a sync on read variable container. """ # Note that synchronization "ON_READ" implies trainable=False. return variable_scope.variable( lambda: array_ops.zeros(shape, dtype), trainable=False, collections=[ ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.METRIC_VARIABLES ], validate_shape=validate_shape, synchronization=variable_scope.VariableSynchronization.ON_READ, aggregation=variable_scope.VariableAggregation.SUM, name=name) def _remove_squeezable_dimensions(predictions, labels, weights): """Squeeze or expand last dim if needed. Squeezes last dim of `predictions` or `labels` if their rank differs by 1 (using confusion_matrix.remove_squeezable_dimensions). Squeezes or expands last dim of `weights` if its rank differs by 1 from the new rank of `predictions`. If `weights` is scalar, it is kept scalar. This will use static shape if available. Otherwise, it will add graph operations, which could result in a performance hit. Args: predictions: Predicted values, a `Tensor` of arbitrary dimensions. labels: Optional label `Tensor` whose dimensions match `predictions`. weights: Optional weight scalar or `Tensor` whose dimensions match `predictions`. Returns: Tuple of `predictions`, `labels` and `weights`. Each of them possibly has the last dimension squeezed, `weights` could be extended by one dimension. """ predictions = ops.convert_to_tensor(predictions) if labels is not None: labels, predictions = confusion_matrix.remove_squeezable_dimensions( labels, predictions) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) if weights is None: return predictions, labels, None weights = ops.convert_to_tensor(weights) weights_shape = weights.get_shape() weights_rank = weights_shape.ndims if weights_rank == 0: return predictions, labels, weights predictions_shape = predictions.get_shape() predictions_rank = predictions_shape.ndims if (predictions_rank is not None) and (weights_rank is not None): # Use static rank. if weights_rank - predictions_rank == 1: weights = array_ops.squeeze(weights, [-1]) elif predictions_rank - weights_rank == 1: weights = array_ops.expand_dims(weights, [-1]) else: # Use dynamic rank. weights_rank_tensor = array_ops.rank(weights) rank_diff = weights_rank_tensor - array_ops.rank(predictions) def _maybe_expand_weights(): return control_flow_ops.cond( math_ops.equal(rank_diff, -1), lambda: array_ops.expand_dims(weights, [-1]), lambda: weights) # Don't attempt squeeze if it will fail based on static check. if ((weights_rank is not None) and (not weights_shape.dims[-1].is_compatible_with(1))): maybe_squeeze_weights = lambda: weights else: maybe_squeeze_weights = lambda: array_ops.squeeze(weights, [-1]) def _maybe_adjust_weights(): return control_flow_ops.cond( math_ops.equal(rank_diff, 1), maybe_squeeze_weights, _maybe_expand_weights) # If weights are scalar, do nothing. Otherwise, try to add or remove a # dimension to match predictions. weights = control_flow_ops.cond( math_ops.equal(weights_rank_tensor, 0), lambda: weights, _maybe_adjust_weights) return predictions, labels, weights def _maybe_expand_labels(labels, predictions): """If necessary, expand `labels` along last dimension to match `predictions`. Args: labels: `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels] or [D1, ... DN]. The latter implies num_labels=1, in which case the result is an expanded `labels` with shape [D1, ... DN, 1]. predictions: `Tensor` with shape [D1, ... DN, num_classes]. Returns: `labels` with the same rank as `predictions`. Raises: ValueError: if `labels` has invalid shape. """ with ops.name_scope(None, 'expand_labels', (labels, predictions)) as scope: labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels) # If sparse, expand sparse shape. if isinstance(labels, sparse_tensor.SparseTensor): return control_flow_ops.cond( math_ops.equal( array_ops.rank(predictions), array_ops.size(labels.dense_shape) + 1), lambda: sparse_ops.sparse_reshape( # pylint: disable=g-long-lambda labels, shape=array_ops.concat((labels.dense_shape, (1,)), 0), name=scope), lambda: labels) # Otherwise, try to use static shape. labels_rank = labels.get_shape().ndims if labels_rank is not None: predictions_rank = predictions.get_shape().ndims if predictions_rank is not None: if predictions_rank == labels_rank: return labels if predictions_rank == labels_rank + 1: return array_ops.expand_dims(labels, -1, name=scope) raise ValueError( 'Unexpected labels shape %s for predictions shape %s.' % (labels.get_shape(), predictions.get_shape())) # Otherwise, use dynamic shape. return control_flow_ops.cond( math_ops.equal(array_ops.rank(predictions), array_ops.rank(labels) + 1), lambda: array_ops.expand_dims(labels, -1, name=scope), lambda: labels) def _safe_scalar_div(numerator, denominator, name): """Divides two values, returning 0 if the denominator is 0. Args: numerator: A scalar `float64` `Tensor`. denominator: A scalar `float64` `Tensor`. name: Name for the returned op. Returns: 0 if `denominator` == 0, else `numerator` / `denominator` """ numerator.get_shape().with_rank_at_most(1) denominator.get_shape().with_rank_at_most(1) return math_ops.div_no_nan(numerator, denominator, name=name) def _streaming_confusion_matrix(labels, predictions, num_classes, weights=None): """Calculate a streaming confusion matrix. Calculates a confusion matrix. For estimation over a stream of data, the function creates an `update_op` operation. Args: labels: A `Tensor` of ground truth labels with shape [batch size] and of type `int32` or `int64`. The tensor will be flattened if its rank > 1. predictions: A `Tensor` of prediction results for semantic labels, whose shape is [batch size] and type `int32` or `int64`. The tensor will be flattened if its rank > 1. num_classes: The possible number of labels the prediction task can have. This value must be provided, since a confusion matrix of dimension = [num_classes, num_classes] will be allocated. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). Returns: total_cm: A `Tensor` representing the confusion matrix. update_op: An operation that increments the confusion matrix. """ # Local variable to accumulate the predictions in the confusion matrix. total_cm = metric_variable( [num_classes, num_classes], dtypes.float64, name='total_confusion_matrix') # Cast the type to int64 required by confusion_matrix_ops. predictions = math_ops.cast(predictions, dtypes.int64) labels = math_ops.cast(labels, dtypes.int64) num_classes = math_ops.cast(num_classes, dtypes.int64) # Flatten the input if its rank > 1. if predictions.get_shape().ndims > 1: predictions = array_ops.reshape(predictions, [-1]) if labels.get_shape().ndims > 1: labels = array_ops.reshape(labels, [-1]) if (weights is not None) and (weights.get_shape().ndims > 1): weights = array_ops.reshape(weights, [-1]) # Accumulate the prediction to current confusion matrix. current_cm = confusion_matrix.confusion_matrix( labels, predictions, num_classes, weights=weights, dtype=dtypes.float64) update_op = state_ops.assign_add(total_cm, current_cm) return total_cm, update_op def _aggregate_across_replicas(metrics_collections, metric_value_fn, *args): """Aggregate metric value across replicas.""" def fn(distribution, *a): """Call `metric_value_fn` in the correct control flow context.""" if hasattr(distribution.extended, '_outer_control_flow_context'): # If there was an outer context captured before this method was called, # then we enter that context to create the metric value op. If the # caputred context is `None`, ops.control_dependencies(None) gives the # desired behavior. Else we use `Enter` and `Exit` to enter and exit the # captured context. # This special handling is needed because sometimes the metric is created # inside a while_loop (and perhaps a TPU rewrite context). But we don't # want the value op to be evaluated every step or on the TPU. So we # create it outside so that it can be evaluated at the end on the host, # once the update ops have been evaluted. # pylint: disable=protected-access if distribution.extended._outer_control_flow_context is None: with ops.control_dependencies(None): metric_value = metric_value_fn(distribution, *a) else: distribution.extended._outer_control_flow_context.Enter() metric_value = metric_value_fn(distribution, *a) distribution.extended._outer_control_flow_context.Exit() # pylint: enable=protected-access else: metric_value = metric_value_fn(distribution, *a) if metrics_collections: ops.add_to_collections(metrics_collections, metric_value) return metric_value return distribution_strategy_context.get_replica_context().merge_call( fn, args=args) @tf_export(v1=['metrics.mean']) def mean(values, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the (weighted) mean of the given values. The `mean` function creates two local variables, `total` and `count` that are used to compute the average of `values`. This average is ultimately returned as `mean` which is an idempotent operation that simply divides `total` by `count`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `mean`. `update_op` increments `total` with the reduced sum of the product of `values` and `weights`, and it increments `count` with the reduced sum of `weights`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: values: A `Tensor` of arbitrary dimensions. weights: Optional `Tensor` whose rank is either 0, or the same rank as `values`, and must be broadcastable to `values` (i.e., all dimensions must be either `1`, or the same as the corresponding `values` dimension). metrics_collections: An optional list of collections that `mean` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: mean: A `Tensor` representing the current mean, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately and whose value matches `mean_value`. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `values`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.mean is not supported when eager execution ' 'is enabled.') with variable_scope.variable_scope(name, 'mean', (values, weights)): values = math_ops.cast(values, dtypes.float32) total = metric_variable([], dtypes.float32, name='total') count = metric_variable([], dtypes.float32, name='count') if weights is None: num_values = math_ops.cast(array_ops.size(values), dtypes.float32) else: values, _, weights = _remove_squeezable_dimensions( predictions=values, labels=None, weights=weights) weights = weights_broadcast_ops.broadcast_weights( math_ops.cast(weights, dtypes.float32), values) values = math_ops.multiply(values, weights) num_values = math_ops.reduce_sum(weights) update_total_op = state_ops.assign_add(total, math_ops.reduce_sum(values)) with ops.control_dependencies([values]): update_count_op = state_ops.assign_add(count, num_values) def compute_mean(_, t, c): return math_ops.div_no_nan(t, math_ops.maximum(c, 0), name='value') mean_t = _aggregate_across_replicas( metrics_collections, compute_mean, total, count) update_op = math_ops.div_no_nan( update_total_op, math_ops.maximum(update_count_op, 0), name='update_op') if updates_collections: ops.add_to_collections(updates_collections, update_op) return mean_t, update_op @tf_export(v1=['metrics.accuracy']) def accuracy(labels, predictions, weights=None, metrics_collections=None, updates_collections=None, name=None): """Calculates how often `predictions` matches `labels`. The `accuracy` function creates two local variables, `total` and `count` that are used to compute the frequency with which `predictions` matches `labels`. This frequency is ultimately returned as `accuracy`: an idempotent operation that simply divides `total` by `count`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `accuracy`. Internally, an `is_correct` operation computes a `Tensor` with elements 1.0 where the corresponding elements of `predictions` and `labels` match and 0.0 otherwise. Then `update_op` increments `total` with the reduced sum of the product of `weights` and `is_correct`, and it increments `count` with the reduced sum of `weights`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a `Tensor` whose shape matches `predictions`. predictions: The predicted values, a `Tensor` of any shape. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `accuracy` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: accuracy: A `Tensor` representing the accuracy, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately and whose value matches `accuracy`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.accuracy is not supported when eager ' 'execution is enabled.') predictions, labels, weights = _remove_squeezable_dimensions( predictions=predictions, labels=labels, weights=weights) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) if labels.dtype != predictions.dtype: predictions = math_ops.cast(predictions, labels.dtype) is_correct = math_ops.cast( math_ops.equal(predictions, labels), dtypes.float32) return mean(is_correct, weights, metrics_collections, updates_collections, name or 'accuracy') def _confusion_matrix_at_thresholds(labels, predictions, thresholds, weights=None, includes=None): """Computes true_positives, false_negatives, true_negatives, false_positives. This function creates up to four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives`. `true_positive[i]` is defined as the total weight of values in `predictions` above `thresholds[i]` whose corresponding entry in `labels` is `True`. `false_negatives[i]` is defined as the total weight of values in `predictions` at most `thresholds[i]` whose corresponding entry in `labels` is `True`. `true_negatives[i]` is defined as the total weight of values in `predictions` at most `thresholds[i]` whose corresponding entry in `labels` is `False`. `false_positives[i]` is defined as the total weight of values in `predictions` above `thresholds[i]` whose corresponding entry in `labels` is `False`. For estimation of these metrics over a stream of data, for each metric the function respectively creates an `update_op` operation that updates the variable and returns its value. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` whose shape matches `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. thresholds: A python list or tuple of float thresholds in `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). includes: Tuple of keys to return, from 'tp', 'fn', 'tn', fp'. If `None`, default to all four. Returns: values: Dict of variables of shape `[len(thresholds)]`. Keys are from `includes`. update_ops: Dict of operations that increments the `values`. Keys are from `includes`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if `includes` contains invalid keys. """ all_includes = ('tp', 'fn', 'tn', 'fp') if includes is None: includes = all_includes else: for include in includes: if include not in all_includes: raise ValueError('Invalid key: %s.' % include) with ops.control_dependencies([ check_ops.assert_greater_equal( predictions, math_ops.cast(0.0, dtype=predictions.dtype), message='predictions must be in [0, 1]'), check_ops.assert_less_equal( predictions, math_ops.cast(1.0, dtype=predictions.dtype), message='predictions must be in [0, 1]') ]): predictions, labels, weights = _remove_squeezable_dimensions( predictions=math_ops.cast(predictions, dtypes.float32), labels=math_ops.cast(labels, dtype=dtypes.bool), weights=weights) num_thresholds = len(thresholds) # Reshape predictions and labels. predictions_2d = array_ops.reshape(predictions, [-1, 1]) labels_2d = array_ops.reshape( math_ops.cast(labels, dtype=dtypes.bool), [1, -1]) # Use static shape if known. num_predictions = predictions_2d.get_shape().as_list()[0] # Otherwise use dynamic shape. if num_predictions is None: num_predictions = array_ops.shape(predictions_2d)[0] thresh_tiled = array_ops.tile( array_ops.expand_dims(array_ops.constant(thresholds), [1]), array_ops.stack([1, num_predictions])) # Tile the predictions after thresholding them across different thresholds. pred_is_pos = math_ops.greater( array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]), thresh_tiled) if ('fn' in includes) or ('tn' in includes): pred_is_neg = math_ops.logical_not(pred_is_pos) # Tile labels by number of thresholds label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1]) if ('fp' in includes) or ('tn' in includes): label_is_neg = math_ops.logical_not(label_is_pos) if weights is not None: weights = weights_broadcast_ops.broadcast_weights( math_ops.cast(weights, dtypes.float32), predictions) weights_tiled = array_ops.tile( array_ops.reshape(weights, [1, -1]), [num_thresholds, 1]) thresh_tiled.get_shape().assert_is_compatible_with( weights_tiled.get_shape()) else: weights_tiled = None values = {} update_ops = {} if 'tp' in includes: true_p = metric_variable( [num_thresholds], dtypes.float32, name='true_positives') is_true_positive = math_ops.cast( math_ops.logical_and(label_is_pos, pred_is_pos), dtypes.float32) if weights_tiled is not None: is_true_positive *= weights_tiled update_ops['tp'] = state_ops.assign_add(true_p, math_ops.reduce_sum( is_true_positive, 1)) values['tp'] = true_p if 'fn' in includes: false_n = metric_variable( [num_thresholds], dtypes.float32, name='false_negatives') is_false_negative = math_ops.cast( math_ops.logical_and(label_is_pos, pred_is_neg), dtypes.float32) if weights_tiled is not None: is_false_negative *= weights_tiled update_ops['fn'] = state_ops.assign_add(false_n, math_ops.reduce_sum( is_false_negative, 1)) values['fn'] = false_n if 'tn' in includes: true_n = metric_variable( [num_thresholds], dtypes.float32, name='true_negatives') is_true_negative = math_ops.cast( math_ops.logical_and(label_is_neg, pred_is_neg), dtypes.float32) if weights_tiled is not None: is_true_negative *= weights_tiled update_ops['tn'] = state_ops.assign_add(true_n, math_ops.reduce_sum( is_true_negative, 1)) values['tn'] = true_n if 'fp' in includes: false_p = metric_variable( [num_thresholds], dtypes.float32, name='false_positives') is_false_positive = math_ops.cast( math_ops.logical_and(label_is_neg, pred_is_pos), dtypes.float32) if weights_tiled is not None: is_false_positive *= weights_tiled update_ops['fp'] = state_ops.assign_add(false_p, math_ops.reduce_sum( is_false_positive, 1)) values['fp'] = false_p return values, update_ops def _aggregate_variable(v, collections): f = lambda distribution, value: distribution.extended.read_var(value) return _aggregate_across_replicas(collections, f, v) @tf_export(v1=['metrics.auc']) def auc(labels, predictions, weights=None, num_thresholds=200, metrics_collections=None, updates_collections=None, curve='ROC', name=None, summation_method='trapezoidal', thresholds=None): """Computes the approximate AUC via a Riemann sum. The `auc` function creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` that are used to compute the AUC. To discretize the AUC curve, a linearly spaced set of thresholds is used to compute pairs of recall and precision values. The area under the ROC-curve is therefore computed using the height of the recall values by the false positive rate, while the area under the PR-curve is the computed using the height of the precision values by the recall. This value is ultimately returned as `auc`, an idempotent operation that computes the area under a discretized curve of precision versus recall values (computed using the aforementioned variables). The `num_thresholds` variable controls the degree of discretization with larger numbers of thresholds more closely approximating the true AUC. The quality of the approximation may vary dramatically depending on `num_thresholds`. For best results, `predictions` should be distributed approximately uniformly in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC approximation may be poor if this is not the case. Setting `summation_method` to 'minoring' or 'majoring' can help quantify the error in the approximation by providing lower or upper bound estimate of the AUC. The `thresholds` parameter can be used to manually specify thresholds which split the predictions more evenly. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `auc`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` whose shape matches `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). num_thresholds: The number of thresholds to use when discretizing the roc curve. metrics_collections: An optional list of collections that `auc` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. curve: Specifies the name of the curve to be computed, 'ROC' [default] or 'PR' for the Precision-Recall-curve. name: An optional variable_scope name. summation_method: Specifies the Riemann summation method used (https://en.wikipedia.org/wiki/Riemann_sum): 'trapezoidal' [default] that applies the trapezoidal rule; 'careful_interpolation', a variant of it differing only by a more correct interpolation scheme for PR-AUC - interpolating (true/false) positives but not the ratio that is precision; 'minoring' that applies left summation for increasing intervals and right summation for decreasing intervals; 'majoring' that does the opposite. Note that 'careful_interpolation' is strictly preferred to 'trapezoidal' (to be deprecated soon) as it applies the same method for ROC, and a better one (see Davis & Goadrich 2006 for details) for the PR curve. thresholds: An optional list of floating point values to use as the thresholds for discretizing the curve. If set, the `num_thresholds` parameter is ignored. Values should be in [0, 1]. Endpoint thresholds equal to {-epsilon, 1+epsilon} for a small positive epsilon value will be automatically included with these to correctly handle predictions equal to exactly 0 or 1. Returns: auc: A scalar `Tensor` representing the current area-under-curve. update_op: An operation that increments the `true_positives`, `true_negatives`, `false_positives` and `false_negatives` variables appropriately and whose value matches `auc`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.auc is not supported when eager execution ' 'is enabled.') with variable_scope.variable_scope(name, 'auc', (labels, predictions, weights)): if curve != 'ROC' and curve != 'PR': raise ValueError('curve must be either ROC or PR, %s unknown' % (curve)) kepsilon = 1e-7 # To account for floating point imprecisions. if thresholds is not None: # If specified, use the supplied thresholds. thresholds = sorted(thresholds) num_thresholds = len(thresholds) + 2 else: # Otherwise, linearly interpolate (num_thresholds - 2) thresholds in # (0, 1). thresholds = [(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)] # Add an endpoint "threshold" below zero and above one for either threshold # method. thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon] values, update_ops = _confusion_matrix_at_thresholds( labels, predictions, thresholds, weights) # Add epsilons to avoid dividing by 0. epsilon = 1.0e-6 def interpolate_pr_auc(tp, fp, fn): """Interpolation formula inspired by section 4 of Davis & Goadrich 2006. Note here we derive & use a closed formula not present in the paper - as follows: Modeling all of TP (true positive weight), FP (false positive weight) and their sum P = TP + FP (positive weight) as varying linearly within each interval [A, B] between successive thresholds, we get Precision = (TP_A + slope * (P - P_A)) / P with slope = dTP / dP = (TP_B - TP_A) / (P_B - P_A). The area within the interval is thus (slope / total_pos_weight) times int_A^B{Precision.dP} = int_A^B{(TP_A + slope * (P - P_A)) * dP / P} int_A^B{Precision.dP} = int_A^B{slope * dP + intercept * dP / P} where intercept = TP_A - slope * P_A = TP_B - slope * P_B, resulting in int_A^B{Precision.dP} = TP_B - TP_A + intercept * log(P_B / P_A) Bringing back the factor (slope / total_pos_weight) we'd put aside, we get slope * [dTP + intercept * log(P_B / P_A)] / total_pos_weight where dTP == TP_B - TP_A. Note that when P_A == 0 the above calculation simplifies into int_A^B{Precision.dTP} = int_A^B{slope * dTP} = slope * (TP_B - TP_A) which is really equivalent to imputing constant precision throughout the first bucket having >0 true positives. Args: tp: true positive counts fp: false positive counts fn: false negative counts Returns: pr_auc: an approximation of the area under the P-R curve. """ dtp = tp[:num_thresholds - 1] - tp[1:] p = tp + fp prec_slope = math_ops.div_no_nan( dtp, math_ops.maximum(p[:num_thresholds - 1] - p[1:], 0), name='prec_slope') intercept = tp[1:] - math_ops.multiply(prec_slope, p[1:]) safe_p_ratio = array_ops.where( math_ops.logical_and(p[:num_thresholds - 1] > 0, p[1:] > 0), math_ops.div_no_nan( p[:num_thresholds - 1], math_ops.maximum(p[1:], 0), name='recall_relative_ratio'), array_ops.ones_like(p[1:])) return math_ops.reduce_sum( math_ops.div_no_nan( prec_slope * (dtp + intercept * math_ops.log(safe_p_ratio)), math_ops.maximum(tp[1:] + fn[1:], 0), name='pr_auc_increment'), name='interpolate_pr_auc') def compute_auc(tp, fn, tn, fp, name): """Computes the roc-auc or pr-auc based on confusion counts.""" if curve == 'PR': if summation_method == 'trapezoidal': logging.warning( 'Trapezoidal rule is known to produce incorrect PR-AUCs; ' 'please switch to "careful_interpolation" instead.') elif summation_method == 'careful_interpolation': # This one is a bit tricky and is handled separately. return interpolate_pr_auc(tp, fp, fn) rec = math_ops.div(tp + epsilon, tp + fn + epsilon) if curve == 'ROC': fp_rate = math_ops.div(fp, fp + tn + epsilon) x = fp_rate y = rec else: # curve == 'PR'. prec = math_ops.div(tp + epsilon, tp + fp + epsilon) x = rec y = prec if summation_method in ('trapezoidal', 'careful_interpolation'): # Note that the case ('PR', 'careful_interpolation') has been handled # above. return math_ops.reduce_sum( math_ops.multiply(x[:num_thresholds - 1] - x[1:], (y[:num_thresholds - 1] + y[1:]) / 2.), name=name) elif summation_method == 'minoring': return math_ops.reduce_sum( math_ops.multiply(x[:num_thresholds - 1] - x[1:], math_ops.minimum(y[:num_thresholds - 1], y[1:])), name=name) elif summation_method == 'majoring': return math_ops.reduce_sum( math_ops.multiply(x[:num_thresholds - 1] - x[1:], math_ops.maximum(y[:num_thresholds - 1], y[1:])), name=name) else: raise ValueError('Invalid summation_method: %s' % summation_method) # sum up the areas of all the trapeziums def compute_auc_value(_, values): return compute_auc(values['tp'], values['fn'], values['tn'], values['fp'], 'value') auc_value = _aggregate_across_replicas( metrics_collections, compute_auc_value, values) update_op = compute_auc(update_ops['tp'], update_ops['fn'], update_ops['tn'], update_ops['fp'], 'update_op') if updates_collections: ops.add_to_collections(updates_collections, update_op) return auc_value, update_op @tf_export(v1=['metrics.mean_absolute_error']) def mean_absolute_error(labels, predictions, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the mean absolute error between the labels and predictions. The `mean_absolute_error` function creates two local variables, `total` and `count` that are used to compute the mean absolute error. This average is weighted by `weights`, and it is ultimately returned as `mean_absolute_error`: an idempotent operation that simply divides `total` by `count`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `mean_absolute_error`. Internally, an `absolute_errors` operation computes the absolute value of the differences between `predictions` and `labels`. Then `update_op` increments `total` with the reduced sum of the product of `weights` and `absolute_errors`, and it increments `count` with the reduced sum of `weights` If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` of the same shape as `predictions`. predictions: A `Tensor` of arbitrary shape. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `mean_absolute_error` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: mean_absolute_error: A `Tensor` representing the current mean, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately and whose value matches `mean_absolute_error`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.mean_absolute_error is not supported ' 'when eager execution is enabled.') predictions, labels, weights = _remove_squeezable_dimensions( predictions=predictions, labels=labels, weights=weights) absolute_errors = math_ops.abs(predictions - labels) return mean(absolute_errors, weights, metrics_collections, updates_collections, name or 'mean_absolute_error') @tf_export(v1=['metrics.mean_cosine_distance']) def mean_cosine_distance(labels, predictions, dim, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the cosine distance between the labels and predictions. The `mean_cosine_distance` function creates two local variables, `total` and `count` that are used to compute the average cosine distance between `predictions` and `labels`. This average is weighted by `weights`, and it is ultimately returned as `mean_distance`, which is an idempotent operation that simply divides `total` by `count`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `mean_distance`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` of arbitrary shape. predictions: A `Tensor` of the same shape as `labels`. dim: The dimension along which the cosine distance is computed. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). Also, dimension `dim` must be `1`. metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. name: An optional variable_scope name. Returns: mean_distance: A `Tensor` representing the current mean, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.mean_cosine_distance is not supported when ' 'eager execution is enabled.') predictions, labels, weights = _remove_squeezable_dimensions( predictions=predictions, labels=labels, weights=weights) radial_diffs = math_ops.multiply(predictions, labels) radial_diffs = math_ops.reduce_sum( radial_diffs, axis=[ dim, ], keepdims=True) mean_distance, update_op = mean(radial_diffs, weights, None, None, name or 'mean_cosine_distance') mean_distance = math_ops.subtract(1.0, mean_distance) update_op = math_ops.subtract(1.0, update_op) if metrics_collections: ops.add_to_collections(metrics_collections, mean_distance) if updates_collections: ops.add_to_collections(updates_collections, update_op) return mean_distance, update_op @tf_export(v1=['metrics.mean_per_class_accuracy']) def mean_per_class_accuracy(labels, predictions, num_classes, weights=None, metrics_collections=None, updates_collections=None, name=None): """Calculates the mean of the per-class accuracies. Calculates the accuracy for each class, then takes the mean of that. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates the accuracy of each class and returns them. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` of ground truth labels with shape [batch size] and of type `int32` or `int64`. The tensor will be flattened if its rank > 1. predictions: A `Tensor` of prediction results for semantic labels, whose shape is [batch size] and type `int32` or `int64`. The tensor will be flattened if its rank > 1. num_classes: The possible number of labels the prediction task can have. This value must be provided, since two variables with shape = [num_classes] will be allocated. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `mean_per_class_accuracy' should be added to. updates_collections: An optional list of collections `update_op` should be added to. name: An optional variable_scope name. Returns: mean_accuracy: A `Tensor` representing the mean per class accuracy. update_op: An operation that updates the accuracy tensor. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.mean_per_class_accuracy is not supported ' 'when eager execution is enabled.') with variable_scope.variable_scope(name, 'mean_accuracy', (predictions, labels, weights)): labels = math_ops.cast(labels, dtypes.int64) # Flatten the input if its rank > 1. if labels.get_shape().ndims > 1: labels = array_ops.reshape(labels, [-1]) if predictions.get_shape().ndims > 1: predictions = array_ops.reshape(predictions, [-1]) # Check if shape is compatible. predictions.get_shape().assert_is_compatible_with(labels.get_shape()) total = metric_variable([num_classes], dtypes.float32, name='total') count = metric_variable([num_classes], dtypes.float32, name='count') ones = array_ops.ones([array_ops.size(labels)], dtypes.float32) if labels.dtype != predictions.dtype: predictions = math_ops.cast(predictions, labels.dtype) is_correct = math_ops.cast( math_ops.equal(predictions, labels), dtypes.float32) if weights is not None: if weights.get_shape().ndims > 1: weights = array_ops.reshape(weights, [-1]) weights = math_ops.cast(weights, dtypes.float32) is_correct *= weights ones *= weights update_total_op = state_ops.scatter_add(total, labels, ones) update_count_op = state_ops.scatter_add(count, labels, is_correct) def compute_mean_accuracy(_, count, total): per_class_accuracy = math_ops.div_no_nan( count, math_ops.maximum(total, 0), name=None) mean_accuracy_v = math_ops.reduce_mean( per_class_accuracy, name='mean_accuracy') return mean_accuracy_v mean_accuracy_v = _aggregate_across_replicas( metrics_collections, compute_mean_accuracy, count, total) update_op = math_ops.div_no_nan( update_count_op, math_ops.maximum(update_total_op, 0), name='update_op') if updates_collections: ops.add_to_collections(updates_collections, update_op) return mean_accuracy_v, update_op @tf_export(v1=['metrics.mean_iou']) def mean_iou(labels, predictions, num_classes, weights=None, metrics_collections=None, updates_collections=None, name=None): """Calculate per-step mean Intersection-Over-Union (mIOU). Mean Intersection-Over-Union is a common evaluation metric for semantic image segmentation, which first computes the IOU for each semantic class and then computes the average over classes. IOU is defined as follows: IOU = true_positive / (true_positive + false_positive + false_negative). The predictions are accumulated in a confusion matrix, weighted by `weights`, and mIOU is then calculated from it. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `mean_iou`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` of ground truth labels with shape [batch size] and of type `int32` or `int64`. The tensor will be flattened if its rank > 1. predictions: A `Tensor` of prediction results for semantic labels, whose shape is [batch size] and type `int32` or `int64`. The tensor will be flattened if its rank > 1. num_classes: The possible number of labels the prediction task can have. This value must be provided, since a confusion matrix of dimension = [num_classes, num_classes] will be allocated. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `mean_iou` should be added to. updates_collections: An optional list of collections `update_op` should be added to. name: An optional variable_scope name. Returns: mean_iou: A `Tensor` representing the mean intersection-over-union. update_op: An operation that increments the confusion matrix. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.mean_iou is not supported when ' 'eager execution is enabled.') with variable_scope.variable_scope(name, 'mean_iou', (predictions, labels, weights)): # Check if shape is compatible. predictions.get_shape().assert_is_compatible_with(labels.get_shape()) total_cm, update_op = _streaming_confusion_matrix(labels, predictions, num_classes, weights) def compute_mean_iou(_, total_cm): """Compute the mean intersection-over-union via the confusion matrix.""" sum_over_row = math_ops.cast( math_ops.reduce_sum(total_cm, 0), dtypes.float32) sum_over_col = math_ops.cast( math_ops.reduce_sum(total_cm, 1), dtypes.float32) cm_diag = math_ops.cast(array_ops.diag_part(total_cm), dtypes.float32) denominator = sum_over_row + sum_over_col - cm_diag # The mean is only computed over classes that appear in the # label or prediction tensor. If the denominator is 0, we need to # ignore the class. num_valid_entries = math_ops.reduce_sum( math_ops.cast( math_ops.not_equal(denominator, 0), dtype=dtypes.float32)) # If the value of the denominator is 0, set it to 1 to avoid # zero division. denominator = array_ops.where( math_ops.greater(denominator, 0), denominator, array_ops.ones_like(denominator)) iou = math_ops.div(cm_diag, denominator) # If the number of valid entries is 0 (no classes) we return 0. result = array_ops.where( math_ops.greater(num_valid_entries, 0), math_ops.reduce_sum(iou, name='mean_iou') / num_valid_entries, 0) return result # TODO(priyag): Use outside_compilation if in TPU context. mean_iou_v = _aggregate_across_replicas( metrics_collections, compute_mean_iou, total_cm) if updates_collections: ops.add_to_collections(updates_collections, update_op) return mean_iou_v, update_op @tf_export(v1=['metrics.mean_relative_error']) def mean_relative_error(labels, predictions, normalizer, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the mean relative error by normalizing with the given values. The `mean_relative_error` function creates two local variables, `total` and `count` that are used to compute the mean relative absolute error. This average is weighted by `weights`, and it is ultimately returned as `mean_relative_error`: an idempotent operation that simply divides `total` by `count`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `mean_reative_error`. Internally, a `relative_errors` operation divides the absolute value of the differences between `predictions` and `labels` by the `normalizer`. Then `update_op` increments `total` with the reduced sum of the product of `weights` and `relative_errors`, and it increments `count` with the reduced sum of `weights`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` of the same shape as `predictions`. predictions: A `Tensor` of arbitrary shape. normalizer: A `Tensor` of the same shape as `predictions`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `mean_relative_error` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: mean_relative_error: A `Tensor` representing the current mean, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately and whose value matches `mean_relative_error`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.mean_relative_error is not supported when ' 'eager execution is enabled.') predictions, labels, weights = _remove_squeezable_dimensions( predictions=predictions, labels=labels, weights=weights) predictions, normalizer = confusion_matrix.remove_squeezable_dimensions( predictions, normalizer) predictions.get_shape().assert_is_compatible_with(normalizer.get_shape()) relative_errors = array_ops.where( math_ops.equal(normalizer, 0.0), array_ops.zeros_like(labels), math_ops.div(math_ops.abs(labels - predictions), normalizer)) return mean(relative_errors, weights, metrics_collections, updates_collections, name or 'mean_relative_error') @tf_export(v1=['metrics.mean_squared_error']) def mean_squared_error(labels, predictions, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the mean squared error between the labels and predictions. The `mean_squared_error` function creates two local variables, `total` and `count` that are used to compute the mean squared error. This average is weighted by `weights`, and it is ultimately returned as `mean_squared_error`: an idempotent operation that simply divides `total` by `count`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `mean_squared_error`. Internally, a `squared_error` operation computes the element-wise square of the difference between `predictions` and `labels`. Then `update_op` increments `total` with the reduced sum of the product of `weights` and `squared_error`, and it increments `count` with the reduced sum of `weights`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` of the same shape as `predictions`. predictions: A `Tensor` of arbitrary shape. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `mean_squared_error` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: mean_squared_error: A `Tensor` representing the current mean, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately and whose value matches `mean_squared_error`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.mean_squared_error is not supported when ' 'eager execution is enabled.') predictions, labels, weights = _remove_squeezable_dimensions( predictions=predictions, labels=labels, weights=weights) squared_error = math_ops.squared_difference(labels, predictions) return mean(squared_error, weights, metrics_collections, updates_collections, name or 'mean_squared_error') @tf_export(v1=['metrics.mean_tensor']) def mean_tensor(values, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the element-wise (weighted) mean of the given tensors. In contrast to the `mean` function which returns a scalar with the mean, this function returns an average tensor with the same shape as the input tensors. The `mean_tensor` function creates two local variables, `total_tensor` and `count_tensor` that are used to compute the average of `values`. This average is ultimately returned as `mean` which is an idempotent operation that simply divides `total` by `count`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `mean`. `update_op` increments `total` with the reduced sum of the product of `values` and `weights`, and it increments `count` with the reduced sum of `weights`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: values: A `Tensor` of arbitrary dimensions. weights: Optional `Tensor` whose rank is either 0, or the same rank as `values`, and must be broadcastable to `values` (i.e., all dimensions must be either `1`, or the same as the corresponding `values` dimension). metrics_collections: An optional list of collections that `mean` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: mean: A float `Tensor` representing the current mean, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately and whose value matches `mean_value`. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `values`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.mean_tensor is not supported when ' 'eager execution is enabled.') with variable_scope.variable_scope(name, 'mean', (values, weights)): values = math_ops.cast(values, dtypes.float32) total = metric_variable( values.get_shape(), dtypes.float32, name='total_tensor') count = metric_variable( values.get_shape(), dtypes.float32, name='count_tensor') num_values = array_ops.ones_like(values) if weights is not None: values, _, weights = _remove_squeezable_dimensions( predictions=values, labels=None, weights=weights) weights = weights_broadcast_ops.broadcast_weights( math_ops.cast(weights, dtypes.float32), values) values = math_ops.multiply(values, weights) num_values = math_ops.multiply(num_values, weights) update_total_op = state_ops.assign_add(total, values) with ops.control_dependencies([values]): update_count_op = state_ops.assign_add(count, num_values) compute_mean = lambda _, t, c: math_ops.div_no_nan( # pylint: disable=g-long-lambda t, math_ops.maximum(c, 0), name='value') mean_t = _aggregate_across_replicas( metrics_collections, compute_mean, total, count) update_op = math_ops.div_no_nan( update_total_op, math_ops.maximum(update_count_op, 0), name='update_op') if updates_collections: ops.add_to_collections(updates_collections, update_op) return mean_t, update_op @tf_export(v1=['metrics.percentage_below']) def percentage_below(values, threshold, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the percentage of values less than the given threshold. The `percentage_below` function creates two local variables, `total` and `count` that are used to compute the percentage of `values` that fall below `threshold`. This rate is weighted by `weights`, and it is ultimately returned as `percentage` which is an idempotent operation that simply divides `total` by `count`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `percentage`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: values: A numeric `Tensor` of arbitrary size. threshold: A scalar threshold. weights: Optional `Tensor` whose rank is either 0, or the same rank as `values`, and must be broadcastable to `values` (i.e., all dimensions must be either `1`, or the same as the corresponding `values` dimension). metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. name: An optional variable_scope name. Returns: percentage: A `Tensor` representing the current mean, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `values`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.percentage_below is not supported when ' 'eager execution is enabled.') is_below_threshold = math_ops.cast( math_ops.less(values, threshold), dtypes.float32) return mean(is_below_threshold, weights, metrics_collections, updates_collections, name or 'percentage_below_threshold') def _count_condition(values, weights=None, metrics_collections=None, updates_collections=None): """Sums the weights of cases where the given values are True. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: values: A `bool` `Tensor` of arbitrary size. weights: Optional `Tensor` whose rank is either 0, or the same rank as `values`, and must be broadcastable to `values` (i.e., all dimensions must be either `1`, or the same as the corresponding `values` dimension). metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. Returns: value_tensor: A `Tensor` representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `values`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ check_ops.assert_type(values, dtypes.bool) count = metric_variable([], dtypes.float32, name='count') values = math_ops.cast(values, dtypes.float32) if weights is not None: with ops.control_dependencies((check_ops.assert_rank_in( weights, (0, array_ops.rank(values))),)): weights = math_ops.cast(weights, dtypes.float32) values = math_ops.multiply(values, weights) value_tensor = _aggregate_variable(count, metrics_collections) update_op = state_ops.assign_add(count, math_ops.reduce_sum(values)) if updates_collections: ops.add_to_collections(updates_collections, update_op) return value_tensor, update_op @tf_export(v1=['metrics.false_negatives']) def false_negatives(labels, predictions, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the total number of false negatives. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will be cast to `bool`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. name: An optional variable_scope name. Returns: value_tensor: A `Tensor` representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `values`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.false_negatives is not supported when ' 'eager execution is enabled.') with variable_scope.variable_scope(name, 'false_negatives', (predictions, labels, weights)): predictions, labels, weights = _remove_squeezable_dimensions( predictions=math_ops.cast(predictions, dtype=dtypes.bool), labels=math_ops.cast(labels, dtype=dtypes.bool), weights=weights) is_false_negative = math_ops.logical_and( math_ops.equal(labels, True), math_ops.equal(predictions, False)) return _count_condition(is_false_negative, weights, metrics_collections, updates_collections) @tf_export(v1=['metrics.false_negatives_at_thresholds']) def false_negatives_at_thresholds(labels, predictions, thresholds, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes false negatives at provided threshold values. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` whose shape matches `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. thresholds: A python list or tuple of float thresholds in `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `false_negatives` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: false_negatives: A float `Tensor` of shape `[len(thresholds)]`. update_op: An operation that updates the `false_negatives` variable and returns its current value. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.false_negatives_at_thresholds is not ' 'supported when eager execution is enabled.') with variable_scope.variable_scope(name, 'false_negatives', (predictions, labels, weights)): values, update_ops = _confusion_matrix_at_thresholds( labels, predictions, thresholds, weights=weights, includes=('fn',)) fn_value = _aggregate_variable(values['fn'], metrics_collections) if updates_collections: ops.add_to_collections(updates_collections, update_ops['fn']) return fn_value, update_ops['fn'] @tf_export(v1=['metrics.false_positives']) def false_positives(labels, predictions, weights=None, metrics_collections=None, updates_collections=None, name=None): """Sum the weights of false positives. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will be cast to `bool`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. name: An optional variable_scope name. Returns: value_tensor: A `Tensor` representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.false_positives is not supported when ' 'eager execution is enabled.') with variable_scope.variable_scope(name, 'false_positives', (predictions, labels, weights)): predictions, labels, weights = _remove_squeezable_dimensions( predictions=math_ops.cast(predictions, dtype=dtypes.bool), labels=math_ops.cast(labels, dtype=dtypes.bool), weights=weights) is_false_positive = math_ops.logical_and( math_ops.equal(labels, False), math_ops.equal(predictions, True)) return _count_condition(is_false_positive, weights, metrics_collections, updates_collections) @tf_export(v1=['metrics.false_positives_at_thresholds']) def false_positives_at_thresholds(labels, predictions, thresholds, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes false positives at provided threshold values. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` whose shape matches `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. thresholds: A python list or tuple of float thresholds in `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `false_positives` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: false_positives: A float `Tensor` of shape `[len(thresholds)]`. update_op: An operation that updates the `false_positives` variable and returns its current value. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.false_positives_at_thresholds is not ' 'supported when eager execution is enabled.') with variable_scope.variable_scope(name, 'false_positives', (predictions, labels, weights)): values, update_ops = _confusion_matrix_at_thresholds( labels, predictions, thresholds, weights=weights, includes=('fp',)) fp_value = _aggregate_variable(values['fp'], metrics_collections) if updates_collections: ops.add_to_collections(updates_collections, update_ops['fp']) return fp_value, update_ops['fp'] @tf_export(v1=['metrics.true_negatives']) def true_negatives(labels, predictions, weights=None, metrics_collections=None, updates_collections=None, name=None): """Sum the weights of true_negatives. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will be cast to `bool`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. name: An optional variable_scope name. Returns: value_tensor: A `Tensor` representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.true_negatives is not ' 'supported when eager execution is enabled.') with variable_scope.variable_scope(name, 'true_negatives', (predictions, labels, weights)): predictions, labels, weights = _remove_squeezable_dimensions( predictions=math_ops.cast(predictions, dtype=dtypes.bool), labels=math_ops.cast(labels, dtype=dtypes.bool), weights=weights) is_true_negative = math_ops.logical_and( math_ops.equal(labels, False), math_ops.equal(predictions, False)) return _count_condition(is_true_negative, weights, metrics_collections, updates_collections) @tf_export(v1=['metrics.true_negatives_at_thresholds']) def true_negatives_at_thresholds(labels, predictions, thresholds, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes true negatives at provided threshold values. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` whose shape matches `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. thresholds: A python list or tuple of float thresholds in `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `true_negatives` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: true_negatives: A float `Tensor` of shape `[len(thresholds)]`. update_op: An operation that updates the `true_negatives` variable and returns its current value. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.true_negatives_at_thresholds is not ' 'supported when eager execution is enabled.') with variable_scope.variable_scope(name, 'true_negatives', (predictions, labels, weights)): values, update_ops = _confusion_matrix_at_thresholds( labels, predictions, thresholds, weights=weights, includes=('tn',)) tn_value = _aggregate_variable(values['tn'], metrics_collections) if updates_collections: ops.add_to_collections(updates_collections, update_ops['tn']) return tn_value, update_ops['tn'] @tf_export(v1=['metrics.true_positives']) def true_positives(labels, predictions, weights=None, metrics_collections=None, updates_collections=None, name=None): """Sum the weights of true_positives. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will be cast to `bool`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. name: An optional variable_scope name. Returns: value_tensor: A `Tensor` representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.true_positives is not ' 'supported when eager execution is enabled.') with variable_scope.variable_scope(name, 'true_positives', (predictions, labels, weights)): predictions, labels, weights = _remove_squeezable_dimensions( predictions=math_ops.cast(predictions, dtype=dtypes.bool), labels=math_ops.cast(labels, dtype=dtypes.bool), weights=weights) is_true_positive = math_ops.logical_and( math_ops.equal(labels, True), math_ops.equal(predictions, True)) return _count_condition(is_true_positive, weights, metrics_collections, updates_collections) @tf_export(v1=['metrics.true_positives_at_thresholds']) def true_positives_at_thresholds(labels, predictions, thresholds, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes true positives at provided threshold values. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` whose shape matches `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. thresholds: A python list or tuple of float thresholds in `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `true_positives` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: true_positives: A float `Tensor` of shape `[len(thresholds)]`. update_op: An operation that updates the `true_positives` variable and returns its current value. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.true_positives_at_thresholds is not ' 'supported when eager execution is enabled.') with variable_scope.variable_scope(name, 'true_positives', (predictions, labels, weights)): values, update_ops = _confusion_matrix_at_thresholds( labels, predictions, thresholds, weights=weights, includes=('tp',)) tp_value = _aggregate_variable(values['tp'], metrics_collections) if updates_collections: ops.add_to_collections(updates_collections, update_ops['tp']) return tp_value, update_ops['tp'] @tf_export(v1=['metrics.precision']) def precision(labels, predictions, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the precision of the predictions with respect to the labels. The `precision` function creates two local variables, `true_positives` and `false_positives`, that are used to compute the precision. This value is ultimately returned as `precision`, an idempotent operation that simply divides `true_positives` by the sum of `true_positives` and `false_positives`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `precision`. `update_op` weights each prediction by the corresponding value in `weights`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will be cast to `bool`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `precision` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: precision: Scalar float `Tensor` with the value of `true_positives` divided by the sum of `true_positives` and `false_positives`. update_op: `Operation` that increments `true_positives` and `false_positives` variables appropriately and whose value matches `precision`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.precision is not ' 'supported when eager execution is enabled.') with variable_scope.variable_scope(name, 'precision', (predictions, labels, weights)): predictions, labels, weights = _remove_squeezable_dimensions( predictions=math_ops.cast(predictions, dtype=dtypes.bool), labels=math_ops.cast(labels, dtype=dtypes.bool), weights=weights) true_p, true_positives_update_op = true_positives( labels, predictions, weights, metrics_collections=None, updates_collections=None, name=None) false_p, false_positives_update_op = false_positives( labels, predictions, weights, metrics_collections=None, updates_collections=None, name=None) def compute_precision(tp, fp, name): return array_ops.where( math_ops.greater(tp + fp, 0), math_ops.div(tp, tp + fp), 0, name) def once_across_replicas(_, true_p, false_p): return compute_precision(true_p, false_p, 'value') p = _aggregate_across_replicas(metrics_collections, once_across_replicas, true_p, false_p) update_op = compute_precision(true_positives_update_op, false_positives_update_op, 'update_op') if updates_collections: ops.add_to_collections(updates_collections, update_op) return p, update_op @tf_export(v1=['metrics.precision_at_thresholds']) def precision_at_thresholds(labels, predictions, thresholds, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes precision values for different `thresholds` on `predictions`. The `precision_at_thresholds` function creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` for various values of thresholds. `precision[i]` is defined as the total weight of values in `predictions` above `thresholds[i]` whose corresponding entry in `labels` is `True`, divided by the total weight of values in `predictions` above `thresholds[i]` (`true_positives[i] / (true_positives[i] + false_positives[i])`). For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `precision`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. thresholds: A python list or tuple of float thresholds in `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `auc` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: precision: A float `Tensor` of shape `[len(thresholds)]`. update_op: An operation that increments the `true_positives`, `true_negatives`, `false_positives` and `false_negatives` variables that are used in the computation of `precision`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.precision_at_thresholds is not ' 'supported when eager execution is enabled.') with variable_scope.variable_scope(name, 'precision_at_thresholds', (predictions, labels, weights)): values, update_ops = _confusion_matrix_at_thresholds( labels, predictions, thresholds, weights, includes=('tp', 'fp')) # Avoid division by zero. epsilon = 1e-7 def compute_precision(tp, fp, name): return math_ops.div(tp, epsilon + tp + fp, name='precision_' + name) def precision_across_replicas(_, values): return compute_precision(values['tp'], values['fp'], 'value') prec = _aggregate_across_replicas( metrics_collections, precision_across_replicas, values) update_op = compute_precision(update_ops['tp'], update_ops['fp'], 'update_op') if updates_collections: ops.add_to_collections(updates_collections, update_op) return prec, update_op @tf_export(v1=['metrics.recall']) def recall(labels, predictions, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the recall of the predictions with respect to the labels. The `recall` function creates two local variables, `true_positives` and `false_negatives`, that are used to compute the recall. This value is ultimately returned as `recall`, an idempotent operation that simply divides `true_positives` by the sum of `true_positives` and `false_negatives`. For estimation of the metric over a stream of data, the function creates an `update_op` that updates these variables and returns the `recall`. `update_op` weights each prediction by the corresponding value in `weights`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will be cast to `bool`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `recall` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: recall: Scalar float `Tensor` with the value of `true_positives` divided by the sum of `true_positives` and `false_negatives`. update_op: `Operation` that increments `true_positives` and `false_negatives` variables appropriately and whose value matches `recall`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.recall is not supported is not ' 'supported when eager execution is enabled.') with variable_scope.variable_scope(name, 'recall', (predictions, labels, weights)): predictions, labels, weights = _remove_squeezable_dimensions( predictions=math_ops.cast(predictions, dtype=dtypes.bool), labels=math_ops.cast(labels, dtype=dtypes.bool), weights=weights) true_p, true_positives_update_op = true_positives( labels, predictions, weights, metrics_collections=None, updates_collections=None, name=None) false_n, false_negatives_update_op = false_negatives( labels, predictions, weights, metrics_collections=None, updates_collections=None, name=None) def compute_recall(true_p, false_n, name): return array_ops.where( math_ops.greater(true_p + false_n, 0), math_ops.div(true_p, true_p + false_n), 0, name) def once_across_replicas(_, true_p, false_n): return compute_recall(true_p, false_n, 'value') rec = _aggregate_across_replicas( metrics_collections, once_across_replicas, true_p, false_n) update_op = compute_recall(true_positives_update_op, false_negatives_update_op, 'update_op') if updates_collections: ops.add_to_collections(updates_collections, update_op) return rec, update_op def _at_k_name(name, k=None, class_id=None): if k is not None: name = '%s_at_%d' % (name, k) else: name = '%s_at_k' % (name) if class_id is not None: name = '%s_class%d' % (name, class_id) return name def _select_class_id(ids, selected_id): """Filter all but `selected_id` out of `ids`. Args: ids: `int64` `Tensor` or `SparseTensor` of IDs. selected_id: Int id to select. Returns: `SparseTensor` of same dimensions as `ids`. This contains only the entries equal to `selected_id`. """ ids = sparse_tensor.convert_to_tensor_or_sparse_tensor(ids) if isinstance(ids, sparse_tensor.SparseTensor): return sparse_ops.sparse_retain(ids, math_ops.equal(ids.values, selected_id)) # TODO(ptucker): Make this more efficient, maybe add a sparse version of # tf.equal and tf.reduce_any? # Shape of filled IDs is the same as `ids` with the last dim collapsed to 1. ids_shape = array_ops.shape(ids, out_type=dtypes.int64) ids_last_dim = array_ops.size(ids_shape) - 1 filled_selected_id_shape = math_ops.reduced_shape(ids_shape, array_ops.reshape( ids_last_dim, [1])) # Intersect `ids` with the selected ID. filled_selected_id = array_ops.fill(filled_selected_id_shape, math_ops.cast(selected_id, dtypes.int64)) result = sets.set_intersection(filled_selected_id, ids) return sparse_tensor.SparseTensor( indices=result.indices, values=result.values, dense_shape=ids_shape) def _maybe_select_class_id(labels, predictions_idx, selected_id=None): """If class ID is specified, filter all other classes. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. predictions_idx: `int64` `Tensor` of class IDs, with shape [D1, ... DN, k] where N >= 1. Commonly, N=1 and `predictions_idx` has shape [batch size, k]. selected_id: Int id to select. Returns: Tuple of `labels` and `predictions_idx`, possibly with classes removed. """ if selected_id is None: return labels, predictions_idx return (_select_class_id(labels, selected_id), _select_class_id(predictions_idx, selected_id)) def _sparse_true_positive_at_k(labels, predictions_idx, class_id=None, weights=None, name=None): """Calculates true positives for recall@k and precision@k. If `class_id` is specified, calculate binary true positives for `class_id` only. If `class_id` is not specified, calculate metrics for `k` predicted vs `n` label classes, where `n` is the 2nd dimension of `labels_sparse`. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`, top `k` predicted classes. For rank `n`, the first `n-1` dimensions must match `labels`. class_id: Class for which we want binary metrics. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). name: Name of operation. Returns: A [D1, ... DN] `Tensor` of true positive counts. """ with ops.name_scope(name, 'true_positives', (predictions_idx, labels, weights)): labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx, class_id) tp = sets.set_size(sets.set_intersection(predictions_idx, labels)) tp = math_ops.cast(tp, dtypes.float64) if weights is not None: with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable( weights, tp),)): weights = math_ops.cast(weights, dtypes.float64) tp = math_ops.multiply(tp, weights) return tp def _streaming_sparse_true_positive_at_k(labels, predictions_idx, k=None, class_id=None, weights=None, name=None): """Calculates weighted per step true positives for recall@k and precision@k. If `class_id` is specified, calculate binary true positives for `class_id` only. If `class_id` is not specified, calculate metrics for `k` predicted vs `n` label classes, where `n` is the 2nd dimension of `labels`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`, top `k` predicted classes. For rank `n`, the first `n-1` dimensions must match `labels`. k: Integer, k for @k metric. This is only used for default op name. class_id: Class for which we want binary metrics. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). name: Name of new variable, and namespace for other dependent ops. Returns: A tuple of `Variable` and update `Operation`. Raises: ValueError: If `weights` is not `None` and has an incompatible shape. """ with ops.name_scope(name, _at_k_name('true_positive', k, class_id=class_id), (predictions_idx, labels, weights)) as scope: tp = _sparse_true_positive_at_k( predictions_idx=predictions_idx, labels=labels, class_id=class_id, weights=weights) batch_total_tp = math_ops.cast(math_ops.reduce_sum(tp), dtypes.float64) var = metric_variable([], dtypes.float64, name=scope) return var, state_ops.assign_add(var, batch_total_tp, name='update') def _sparse_false_negative_at_k(labels, predictions_idx, class_id=None, weights=None): """Calculates false negatives for recall@k. If `class_id` is specified, calculate binary true positives for `class_id` only. If `class_id` is not specified, calculate metrics for `k` predicted vs `n` label classes, where `n` is the 2nd dimension of `labels_sparse`. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`, top `k` predicted classes. For rank `n`, the first `n-1` dimensions must match `labels`. class_id: Class for which we want binary metrics. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). Returns: A [D1, ... DN] `Tensor` of false negative counts. """ with ops.name_scope(None, 'false_negatives', (predictions_idx, labels, weights)): labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx, class_id) fn = sets.set_size( sets.set_difference(predictions_idx, labels, aminusb=False)) fn = math_ops.cast(fn, dtypes.float64) if weights is not None: with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable( weights, fn),)): weights = math_ops.cast(weights, dtypes.float64) fn = math_ops.multiply(fn, weights) return fn def _streaming_sparse_false_negative_at_k(labels, predictions_idx, k, class_id=None, weights=None, name=None): """Calculates weighted per step false negatives for recall@k. If `class_id` is specified, calculate binary true positives for `class_id` only. If `class_id` is not specified, calculate metrics for `k` predicted vs `n` label classes, where `n` is the 2nd dimension of `labels`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`, top `k` predicted classes. For rank `n`, the first `n-1` dimensions must match `labels`. k: Integer, k for @k metric. This is only used for default op name. class_id: Class for which we want binary metrics. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). name: Name of new variable, and namespace for other dependent ops. Returns: A tuple of `Variable` and update `Operation`. Raises: ValueError: If `weights` is not `None` and has an incompatible shape. """ with ops.name_scope(name, _at_k_name('false_negative', k, class_id=class_id), (predictions_idx, labels, weights)) as scope: fn = _sparse_false_negative_at_k( predictions_idx=predictions_idx, labels=labels, class_id=class_id, weights=weights) batch_total_fn = math_ops.cast(math_ops.reduce_sum(fn), dtypes.float64) var = metric_variable([], dtypes.float64, name=scope) return var, state_ops.assign_add(var, batch_total_fn, name='update') @tf_export(v1=['metrics.recall_at_k']) def recall_at_k(labels, predictions, k, class_id=None, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes recall@k of the predictions with respect to sparse labels. If `class_id` is specified, we calculate recall by considering only the entries in the batch for which `class_id` is in the label, and computing the fraction of them for which `class_id` is in the top-k `predictions`. If `class_id` is not specified, we'll calculate recall as how often on average a class among the labels of a batch entry is in the top-k `predictions`. `sparse_recall_at_k` creates two local variables, `true_positive_at_<k>` and `false_negative_at_<k>`, that are used to compute the recall_at_k frequency. This frequency is ultimately returned as `recall_at_<k>`: an idempotent operation that simply divides `true_positive_at_<k>` by total (`true_positive_at_<k>` + `false_negative_at_<k>`). For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `recall_at_<k>`. Internally, a `top_k` operation computes a `Tensor` indicating the top `k` `predictions`. Set operations applied to `top_k` and `labels` calculate the true positives and false negatives weighted by `weights`. Then `update_op` increments `true_positive_at_<k>` and `false_negative_at_<k>` using these values. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies num_labels=1. N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values should be in range [0, num_classes), where num_classes is the last dimension of `predictions`. Values outside this range always count towards `false_negative_at_<k>`. predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes]. The final dimension contains the logit values for each class. [D1, ... DN] must match `labels`. k: Integer, k for @k metric. class_id: Integer class ID for which we want binary metrics. This should be in range [0, num_classes), where num_classes is the last dimension of `predictions`. If class_id is outside this range, the method returns NAN. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that values should be added to. updates_collections: An optional list of collections that updates should be added to. name: Name of new update operation, and namespace for other dependent ops. Returns: recall: Scalar `float64` `Tensor` with the value of `true_positives` divided by the sum of `true_positives` and `false_negatives`. update_op: `Operation` that increments `true_positives` and `false_negatives` variables appropriately, and whose value matches `recall`. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.recall_at_k is not ' 'supported when eager execution is enabled.') with ops.name_scope(name, _at_k_name('recall', k, class_id=class_id), (predictions, labels, weights)) as scope: _, top_k_idx = nn.top_k(predictions, k) return recall_at_top_k( labels=labels, predictions_idx=top_k_idx, k=k, class_id=class_id, weights=weights, metrics_collections=metrics_collections, updates_collections=updates_collections, name=scope) @tf_export(v1=['metrics.recall_at_top_k']) def recall_at_top_k(labels, predictions_idx, k=None, class_id=None, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes recall@k of top-k predictions with respect to sparse labels. Differs from `recall_at_k` in that predictions must be in the form of top `k` class indices, whereas `recall_at_k` expects logits. Refer to `recall_at_k` for more details. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies num_labels=1. N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values should be in range [0, num_classes), where num_classes is the last dimension of `predictions`. Values outside this range always count towards `false_negative_at_<k>`. predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1. Commonly, N=1 and predictions has shape [batch size, k]. The final dimension contains the top `k` predicted class indices. [D1, ... DN] must match `labels`. k: Integer, k for @k metric. Only used for the default op name. class_id: Integer class ID for which we want binary metrics. This should be in range [0, num_classes), where num_classes is the last dimension of `predictions`. If class_id is outside this range, the method returns NAN. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that values should be added to. updates_collections: An optional list of collections that updates should be added to. name: Name of new update operation, and namespace for other dependent ops. Returns: recall: Scalar `float64` `Tensor` with the value of `true_positives` divided by the sum of `true_positives` and `false_negatives`. update_op: `Operation` that increments `true_positives` and `false_negatives` variables appropriately, and whose value matches `recall`. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ with ops.name_scope(name, _at_k_name('recall', k, class_id=class_id), (predictions_idx, labels, weights)) as scope: labels = _maybe_expand_labels(labels, predictions_idx) top_k_idx = math_ops.cast(predictions_idx, dtypes.int64) tp, tp_update = _streaming_sparse_true_positive_at_k( predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id, weights=weights) fn, fn_update = _streaming_sparse_false_negative_at_k( predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id, weights=weights) def compute_recall(_, tp, fn): return math_ops.div(tp, math_ops.add(tp, fn), name=scope) metric = _aggregate_across_replicas( metrics_collections, compute_recall, tp, fn) update = math_ops.div( tp_update, math_ops.add(tp_update, fn_update), name='update') if updates_collections: ops.add_to_collections(updates_collections, update) return metric, update @tf_export(v1=['metrics.recall_at_thresholds']) def recall_at_thresholds(labels, predictions, thresholds, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes various recall values for different `thresholds` on `predictions`. The `recall_at_thresholds` function creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` for various values of thresholds. `recall[i]` is defined as the total weight of values in `predictions` above `thresholds[i]` whose corresponding entry in `labels` is `True`, divided by the total weight of `True` values in `labels` (`true_positives[i] / (true_positives[i] + false_negatives[i])`). For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `recall`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. thresholds: A python list or tuple of float thresholds in `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `recall` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: recall: A float `Tensor` of shape `[len(thresholds)]`. update_op: An operation that increments the `true_positives`, `true_negatives`, `false_positives` and `false_negatives` variables that are used in the computation of `recall`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.recall_at_thresholds is not ' 'supported when eager execution is enabled.') with variable_scope.variable_scope(name, 'recall_at_thresholds', (predictions, labels, weights)): values, update_ops = _confusion_matrix_at_thresholds( labels, predictions, thresholds, weights, includes=('tp', 'fn')) # Avoid division by zero. epsilon = 1e-7 def compute_recall(tp, fn, name): return math_ops.div(tp, epsilon + tp + fn, name='recall_' + name) def recall_across_replicas(_, values): return compute_recall(values['tp'], values['fn'], 'value') rec = _aggregate_across_replicas( metrics_collections, recall_across_replicas, values) update_op = compute_recall(update_ops['tp'], update_ops['fn'], 'update_op') if updates_collections: ops.add_to_collections(updates_collections, update_op) return rec, update_op @tf_export(v1=['metrics.root_mean_squared_error']) def root_mean_squared_error(labels, predictions, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the root mean squared error between the labels and predictions. The `root_mean_squared_error` function creates two local variables, `total` and `count` that are used to compute the root mean squared error. This average is weighted by `weights`, and it is ultimately returned as `root_mean_squared_error`: an idempotent operation that takes the square root of the division of `total` by `count`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `root_mean_squared_error`. Internally, a `squared_error` operation computes the element-wise square of the difference between `predictions` and `labels`. Then `update_op` increments `total` with the reduced sum of the product of `weights` and `squared_error`, and it increments `count` with the reduced sum of `weights`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` of the same shape as `predictions`. predictions: A `Tensor` of arbitrary shape. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `root_mean_squared_error` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: root_mean_squared_error: A `Tensor` representing the current mean, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately and whose value matches `root_mean_squared_error`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.root_mean_squared_error is not ' 'supported when eager execution is enabled.') predictions, labels, weights = _remove_squeezable_dimensions( predictions=predictions, labels=labels, weights=weights) mse, update_mse_op = mean_squared_error(labels, predictions, weights, None, None, name or 'root_mean_squared_error') once_across_replicas = lambda _, mse: math_ops.sqrt(mse) rmse = _aggregate_across_replicas( metrics_collections, once_across_replicas, mse) update_rmse_op = math_ops.sqrt(update_mse_op) if updates_collections: ops.add_to_collections(updates_collections, update_rmse_op) return rmse, update_rmse_op @tf_export(v1=['metrics.sensitivity_at_specificity']) def sensitivity_at_specificity(labels, predictions, specificity, weights=None, num_thresholds=200, metrics_collections=None, updates_collections=None, name=None): """Computes the specificity at a given sensitivity. The `sensitivity_at_specificity` function creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` that are used to compute the sensitivity at the given specificity value. The threshold for the given specificity value is computed and used to evaluate the corresponding sensitivity. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `sensitivity`. `update_op` increments the `true_positives`, `true_negatives`, `false_positives` and `false_negatives` counts with the weight of each case found in the `predictions` and `labels`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. For additional information about specificity and sensitivity, see the following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. specificity: A scalar value in range `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). num_thresholds: The number of thresholds to use for matching the given specificity. metrics_collections: An optional list of collections that `sensitivity` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: sensitivity: A scalar `Tensor` representing the sensitivity at the given `specificity` value. update_op: An operation that increments the `true_positives`, `true_negatives`, `false_positives` and `false_negatives` variables appropriately and whose value matches `sensitivity`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, if `weights` is not `None` and its shape doesn't match `predictions`, or if `specificity` is not between 0 and 1, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.sensitivity_at_specificity is not ' 'supported when eager execution is enabled.') if specificity < 0 or specificity > 1: raise ValueError('`specificity` must be in the range [0, 1].') with variable_scope.variable_scope(name, 'sensitivity_at_specificity', (predictions, labels, weights)): kepsilon = 1e-7 # to account for floating point imprecisions thresholds = [ (i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2) ] thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon] values, update_ops = _confusion_matrix_at_thresholds( labels, predictions, thresholds, weights) def compute_sensitivity_at_specificity(tp, tn, fp, fn, name): specificities = math_ops.div(tn, tn + fp + kepsilon) tf_index = math_ops.argmin(math_ops.abs(specificities - specificity), 0) tf_index = math_ops.cast(tf_index, dtypes.int32) # Now, we have the implicit threshold, so compute the sensitivity: return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + kepsilon, name) def sensitivity_across_replicas(_, values): return compute_sensitivity_at_specificity( values['tp'], values['tn'], values['fp'], values['fn'], 'value') sensitivity = _aggregate_across_replicas( metrics_collections, sensitivity_across_replicas, values) update_op = compute_sensitivity_at_specificity( update_ops['tp'], update_ops['tn'], update_ops['fp'], update_ops['fn'], 'update_op') if updates_collections: ops.add_to_collections(updates_collections, update_op) return sensitivity, update_op def _expand_and_tile(tensor, multiple, dim=0, name=None): """Slice `tensor` shape in 2, then tile along the sliced dimension. A new dimension is inserted in shape of `tensor` before `dim`, then values are tiled `multiple` times along the new dimension. Args: tensor: Input `Tensor` or `SparseTensor`. multiple: Integer, number of times to tile. dim: Integer, dimension along which to tile. name: Name of operation. Returns: `Tensor` result of expanding and tiling `tensor`. Raises: ValueError: if `multiple` is less than 1, or `dim` is not in `[-rank(tensor), rank(tensor)]`. """ if multiple < 1: raise ValueError('Invalid multiple %s, must be > 0.' % multiple) with ops.name_scope(name, 'expand_and_tile', (tensor, multiple, dim)) as scope: # Sparse. tensor = sparse_tensor.convert_to_tensor_or_sparse_tensor(tensor) if isinstance(tensor, sparse_tensor.SparseTensor): if dim < 0: expand_dims = array_ops.reshape( array_ops.size(tensor.dense_shape) + dim, [1]) else: expand_dims = [dim] expanded_shape = array_ops.concat( (array_ops.slice(tensor.dense_shape, [0], expand_dims), [1], array_ops.slice(tensor.dense_shape, expand_dims, [-1])), 0, name='expanded_shape') expanded = sparse_ops.sparse_reshape( tensor, shape=expanded_shape, name='expand') if multiple == 1: return expanded return sparse_ops.sparse_concat( dim - 1 if dim < 0 else dim, [expanded] * multiple, name=scope) # Dense. expanded = array_ops.expand_dims( tensor, dim if (dim >= 0) else (dim - 1), name='expand') if multiple == 1: return expanded ones = array_ops.ones_like(array_ops.shape(tensor)) tile_multiples = array_ops.concat( (ones[:dim], (multiple,), ones[dim:]), 0, name='multiples') return array_ops.tile(expanded, tile_multiples, name=scope) def _num_relevant(labels, k): """Computes number of relevant values for each row in labels. For labels with shape [D1, ... DN, num_labels], this is the minimum of `num_labels` and `k`. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. k: Integer, k for @k metric. Returns: Integer `Tensor` of shape [D1, ... DN], where each value is the number of relevant values for that row. Raises: ValueError: if inputs have invalid dtypes or values. """ if k < 1: raise ValueError('Invalid k=%s.' % k) with ops.name_scope(None, 'num_relevant', (labels,)) as scope: # For SparseTensor, calculate separate count for each row. labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels) if isinstance(labels, sparse_tensor.SparseTensor): return math_ops.minimum(sets.set_size(labels), k, name=scope) # For dense Tensor, calculate scalar count based on last dimension, and # tile across labels shape. labels_shape = array_ops.shape(labels) labels_size = labels_shape[-1] num_relevant_scalar = math_ops.minimum(labels_size, k) return array_ops.fill(labels_shape[0:-1], num_relevant_scalar, name=scope) def _sparse_average_precision_at_top_k(labels, predictions_idx): """Computes average precision@k of predictions with respect to sparse labels. From en.wikipedia.org/wiki/Information_retrieval#Average_precision, formula for each row is: AveP = sum_{i=1...k} P_{i} * rel_{i} / num_relevant_items A "row" is the elements in dimension [D1, ... DN] of `predictions_idx`, `labels`, and the result `Tensors`. In the common case, this is [batch_size]. Each row of the results contains the average precision for that row. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies num_labels=1. N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. Values should be in range [0, num_classes). predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1. Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final dimension must be set and contains the top `k` predicted class indices. [D1, ... DN] must match `labels`. Values should be in range [0, num_classes). Returns: `float64` `Tensor` of shape [D1, ... DN], where each value is the average precision for that row. Raises: ValueError: if the last dimension of predictions_idx is not set. """ with ops.name_scope(None, 'average_precision', (predictions_idx, labels)) as scope: predictions_idx = math_ops.cast( predictions_idx, dtypes.int64, name='predictions_idx') if predictions_idx.get_shape().ndims == 0: raise ValueError('The rank of predictions_idx must be at least 1.') k = predictions_idx.get_shape().as_list()[-1] if k is None: raise ValueError('The last dimension of predictions_idx must be set.') labels = _maybe_expand_labels(labels, predictions_idx) # Expand dims to produce [D1, ... DN, k, 1] tensor. This gives us a separate # prediction for each k, so we can calculate separate true positive values # for each k. predictions_idx_per_k = array_ops.expand_dims( predictions_idx, -1, name='predictions_idx_per_k') # Replicate labels k times to produce [D1, ... DN, k, num_labels] tensor. labels_per_k = _expand_and_tile( labels, multiple=k, dim=-1, name='labels_per_k') # The following tensors are all of shape [D1, ... DN, k], containing values # per row, per k value. # `relevant_per_k` (int32) - Relevance indicator, 1 if the prediction at # that k value is correct, 0 otherwise. This is the "rel_{i}" term from # the formula above. # `tp_per_k` (int32) - True positive counts. # `retrieved_per_k` (int32) - Number of predicted values at each k. This is # the precision denominator. # `precision_per_k` (float64) - Precision at each k. This is the "P_{i}" # term from the formula above. # `relevant_precision_per_k` (float64) - Relevant precisions; i.e., # precisions at all k for which relevance indicator is true. relevant_per_k = _sparse_true_positive_at_k( labels_per_k, predictions_idx_per_k, name='relevant_per_k') tp_per_k = math_ops.cumsum(relevant_per_k, axis=-1, name='tp_per_k') retrieved_per_k = math_ops.cumsum( array_ops.ones_like(relevant_per_k), axis=-1, name='retrieved_per_k') precision_per_k = math_ops.div( math_ops.cast(tp_per_k, dtypes.float64), math_ops.cast(retrieved_per_k, dtypes.float64), name='precision_per_k') relevant_precision_per_k = math_ops.multiply( precision_per_k, math_ops.cast(relevant_per_k, dtypes.float64), name='relevant_precision_per_k') # Reduce along k dimension to get the sum, yielding a [D1, ... DN] tensor. precision_sum = math_ops.reduce_sum( relevant_precision_per_k, axis=(-1,), name='precision_sum') # Divide by number of relevant items to get average precision. These are # the "num_relevant_items" and "AveP" terms from the formula above. num_relevant_items = math_ops.cast(_num_relevant(labels, k), dtypes.float64) return math_ops.div(precision_sum, num_relevant_items, name=scope) def _streaming_sparse_average_precision_at_top_k(labels, predictions_idx, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes average precision@k of predictions with respect to sparse labels. `sparse_average_precision_at_top_k` creates two local variables, `average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that are used to compute the frequency. This frequency is ultimately returned as `average_precision_at_<k>`: an idempotent operation that simply divides `average_precision_at_<k>/total` by `average_precision_at_<k>/max`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `precision_at_<k>`. Set operations applied to `top_k` and `labels` calculate the true positives and false positives weighted by `weights`. Then `update_op` increments `true_positive_at_<k>` and `false_positive_at_<k>` using these values. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies num_labels=1. N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. Values should be in range [0, num_classes). predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1. Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final dimension contains the top `k` predicted class indices. [D1, ... DN] must match `labels`. Values should be in range [0, num_classes). weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that values should be added to. updates_collections: An optional list of collections that updates should be added to. name: Name of new update operation, and namespace for other dependent ops. Returns: mean_average_precision: Scalar `float64` `Tensor` with the mean average precision values. update: `Operation` that increments variables appropriately, and whose value matches `metric`. """ with ops.name_scope(name, 'average_precision_at_top_k', (predictions_idx, labels, weights)) as scope: # Calculate per-example average precision, and apply weights. average_precision = _sparse_average_precision_at_top_k( predictions_idx=predictions_idx, labels=labels) if weights is not None: weights = weights_broadcast_ops.broadcast_weights( math_ops.cast(weights, dtypes.float64), average_precision) average_precision = math_ops.multiply(average_precision, weights) # Create accumulation variables and update ops for max average precision and # total average precision. with ops.name_scope(None, 'max', (average_precision,)) as max_scope: # `max` is the max possible precision. Since max for any row is 1.0: # - For the unweighted case, this is just the number of rows. # - For the weighted case, it's the sum of the weights broadcast across # `average_precision` rows. max_var = metric_variable([], dtypes.float64, name=max_scope) if weights is None: batch_max = math_ops.cast( array_ops.size(average_precision, name='batch_max'), dtypes.float64) else: batch_max = math_ops.reduce_sum(weights, name='batch_max') max_update = state_ops.assign_add(max_var, batch_max, name='update') with ops.name_scope(None, 'total', (average_precision,)) as total_scope: total_var = metric_variable([], dtypes.float64, name=total_scope) batch_total = math_ops.reduce_sum(average_precision, name='batch_total') total_update = state_ops.assign_add(total_var, batch_total, name='update') # Divide total by max to get mean, for both vars and the update ops. def precision_across_replicas(_, total_var, max_var): return _safe_scalar_div(total_var, max_var, name='mean') mean_average_precision = _aggregate_across_replicas( metrics_collections, precision_across_replicas, total_var, max_var) update = _safe_scalar_div(total_update, max_update, name=scope) if updates_collections: ops.add_to_collections(updates_collections, update) return mean_average_precision, update @tf_export(v1=['metrics.sparse_average_precision_at_k']) @deprecated(None, 'Use average_precision_at_k instead') def sparse_average_precision_at_k(labels, predictions, k, weights=None, metrics_collections=None, updates_collections=None, name=None): """Renamed to `average_precision_at_k`, please use that method instead.""" return average_precision_at_k( labels=labels, predictions=predictions, k=k, weights=weights, metrics_collections=metrics_collections, updates_collections=updates_collections, name=name) @tf_export(v1=['metrics.average_precision_at_k']) def average_precision_at_k(labels, predictions, k, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes average precision@k of predictions with respect to sparse labels. `average_precision_at_k` creates two local variables, `average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that are used to compute the frequency. This frequency is ultimately returned as `average_precision_at_<k>`: an idempotent operation that simply divides `average_precision_at_<k>/total` by `average_precision_at_<k>/max`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor` indicating the top `k` `predictions`. Set operations applied to `top_k` and `labels` calculate the true positives and false positives weighted by `weights`. Then `update_op` increments `true_positive_at_<k>` and `false_positive_at_<k>` using these values. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies num_labels=1. N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values should be in range [0, num_classes), where num_classes is the last dimension of `predictions`. Values outside this range are ignored. predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where N >= 1. Commonly, N=1 and `predictions` has shape [batch size, num_classes]. The final dimension contains the logit values for each class. [D1, ... DN] must match `labels`. k: Integer, k for @k metric. This will calculate an average precision for range `[1,k]`, as documented above. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that values should be added to. updates_collections: An optional list of collections that updates should be added to. name: Name of new update operation, and namespace for other dependent ops. Returns: mean_average_precision: Scalar `float64` `Tensor` with the mean average precision values. update: `Operation` that increments variables appropriately, and whose value matches `metric`. Raises: ValueError: if k is invalid. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.sparse_average_precision_at_k is not ' 'supported when eager execution is enabled.') if k < 1: raise ValueError('Invalid k=%s.' % k) with ops.name_scope(name, _at_k_name('average_precision', k), (predictions, labels, weights)) as scope: # Calculate top k indices to produce [D1, ... DN, k] tensor. _, predictions_idx = nn.top_k(predictions, k) return _streaming_sparse_average_precision_at_top_k( labels=labels, predictions_idx=predictions_idx, weights=weights, metrics_collections=metrics_collections, updates_collections=updates_collections, name=scope) def _sparse_false_positive_at_k(labels, predictions_idx, class_id=None, weights=None): """Calculates false positives for precision@k. If `class_id` is specified, calculate binary true positives for `class_id` only. If `class_id` is not specified, calculate metrics for `k` predicted vs `n` label classes, where `n` is the 2nd dimension of `labels_sparse`. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`, top `k` predicted classes. For rank `n`, the first `n-1` dimensions must match `labels`. class_id: Class for which we want binary metrics. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). Returns: A [D1, ... DN] `Tensor` of false positive counts. """ with ops.name_scope(None, 'false_positives', (predictions_idx, labels, weights)): labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx, class_id) fp = sets.set_size( sets.set_difference(predictions_idx, labels, aminusb=True)) fp = math_ops.cast(fp, dtypes.float64) if weights is not None: with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable( weights, fp),)): weights = math_ops.cast(weights, dtypes.float64) fp = math_ops.multiply(fp, weights) return fp def _streaming_sparse_false_positive_at_k(labels, predictions_idx, k=None, class_id=None, weights=None, name=None): """Calculates weighted per step false positives for precision@k. If `class_id` is specified, calculate binary true positives for `class_id` only. If `class_id` is not specified, calculate metrics for `k` predicted vs `n` label classes, where `n` is the 2nd dimension of `labels`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`, top `k` predicted classes. For rank `n`, the first `n-1` dimensions must match `labels`. k: Integer, k for @k metric. This is only used for default op name. class_id: Class for which we want binary metrics. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). name: Name of new variable, and namespace for other dependent ops. Returns: A tuple of `Variable` and update `Operation`. Raises: ValueError: If `weights` is not `None` and has an incompatible shape. """ with ops.name_scope(name, _at_k_name('false_positive', k, class_id=class_id), (predictions_idx, labels, weights)) as scope: fp = _sparse_false_positive_at_k( predictions_idx=predictions_idx, labels=labels, class_id=class_id, weights=weights) batch_total_fp = math_ops.cast(math_ops.reduce_sum(fp), dtypes.float64) var = metric_variable([], dtypes.float64, name=scope) return var, state_ops.assign_add(var, batch_total_fp, name='update') @tf_export(v1=['metrics.precision_at_top_k']) def precision_at_top_k(labels, predictions_idx, k=None, class_id=None, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes precision@k of the predictions with respect to sparse labels. Differs from `sparse_precision_at_k` in that predictions must be in the form of top `k` class indices, whereas `sparse_precision_at_k` expects logits. Refer to `sparse_precision_at_k` for more details. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies num_labels=1. N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values should be in range [0, num_classes), where num_classes is the last dimension of `predictions`. Values outside this range are ignored. predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1. Commonly, N=1 and predictions has shape [batch size, k]. The final dimension contains the top `k` predicted class indices. [D1, ... DN] must match `labels`. k: Integer, k for @k metric. Only used for the default op name. class_id: Integer class ID for which we want binary metrics. This should be in range [0, num_classes], where num_classes is the last dimension of `predictions`. If `class_id` is outside this range, the method returns NAN. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that values should be added to. updates_collections: An optional list of collections that updates should be added to. name: Name of new update operation, and namespace for other dependent ops. Returns: precision: Scalar `float64` `Tensor` with the value of `true_positives` divided by the sum of `true_positives` and `false_positives`. update_op: `Operation` that increments `true_positives` and `false_positives` variables appropriately, and whose value matches `precision`. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.precision_at_top_k is not ' 'supported when eager execution is enabled.') with ops.name_scope(name, _at_k_name('precision', k, class_id=class_id), (predictions_idx, labels, weights)) as scope: labels = _maybe_expand_labels(labels, predictions_idx) top_k_idx = math_ops.cast(predictions_idx, dtypes.int64) tp, tp_update = _streaming_sparse_true_positive_at_k( predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id, weights=weights) fp, fp_update = _streaming_sparse_false_positive_at_k( predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id, weights=weights) def precision_across_replicas(_, tp, fp): return math_ops.div(tp, math_ops.add(tp, fp), name=scope) metric = _aggregate_across_replicas( metrics_collections, precision_across_replicas, tp, fp) update = math_ops.div( tp_update, math_ops.add(tp_update, fp_update), name='update') if updates_collections: ops.add_to_collections(updates_collections, update) return metric, update @tf_export(v1=['metrics.sparse_precision_at_k']) @deprecated(None, 'Use precision_at_k instead') def sparse_precision_at_k(labels, predictions, k, class_id=None, weights=None, metrics_collections=None, updates_collections=None, name=None): """Renamed to `precision_at_k`, please use that method instead.""" return precision_at_k( labels=labels, predictions=predictions, k=k, class_id=class_id, weights=weights, metrics_collections=metrics_collections, updates_collections=updates_collections, name=name) @tf_export(v1=['metrics.precision_at_k']) def precision_at_k(labels, predictions, k, class_id=None, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes precision@k of the predictions with respect to sparse labels. If `class_id` is specified, we calculate precision by considering only the entries in the batch for which `class_id` is in the top-k highest `predictions`, and computing the fraction of them for which `class_id` is indeed a correct label. If `class_id` is not specified, we'll calculate precision as how often on average a class among the top-k classes with the highest predicted values of a batch entry is correct and can be found in the label for that entry. `precision_at_k` creates two local variables, `true_positive_at_<k>` and `false_positive_at_<k>`, that are used to compute the precision@k frequency. This frequency is ultimately returned as `precision_at_<k>`: an idempotent operation that simply divides `true_positive_at_<k>` by total (`true_positive_at_<k>` + `false_positive_at_<k>`). For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor` indicating the top `k` `predictions`. Set operations applied to `top_k` and `labels` calculate the true positives and false positives weighted by `weights`. Then `update_op` increments `true_positive_at_<k>` and `false_positive_at_<k>` using these values. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies num_labels=1. N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values should be in range [0, num_classes), where num_classes is the last dimension of `predictions`. Values outside this range are ignored. predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes]. The final dimension contains the logit values for each class. [D1, ... DN] must match `labels`. k: Integer, k for @k metric. class_id: Integer class ID for which we want binary metrics. This should be in range [0, num_classes], where num_classes is the last dimension of `predictions`. If `class_id` is outside this range, the method returns NAN. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that values should be added to. updates_collections: An optional list of collections that updates should be added to. name: Name of new update operation, and namespace for other dependent ops. Returns: precision: Scalar `float64` `Tensor` with the value of `true_positives` divided by the sum of `true_positives` and `false_positives`. update_op: `Operation` that increments `true_positives` and `false_positives` variables appropriately, and whose value matches `precision`. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.sparse_precision_at_k is not ' 'supported when eager execution is enabled.') with ops.name_scope(name, _at_k_name('precision', k, class_id=class_id), (predictions, labels, weights)) as scope: _, top_k_idx = nn.top_k(predictions, k) return precision_at_top_k( labels=labels, predictions_idx=top_k_idx, k=k, class_id=class_id, weights=weights, metrics_collections=metrics_collections, updates_collections=updates_collections, name=scope) @tf_export(v1=['metrics.specificity_at_sensitivity']) def specificity_at_sensitivity(labels, predictions, sensitivity, weights=None, num_thresholds=200, metrics_collections=None, updates_collections=None, name=None): """Computes the specificity at a given sensitivity. The `specificity_at_sensitivity` function creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` that are used to compute the specificity at the given sensitivity value. The threshold for the given sensitivity value is computed and used to evaluate the corresponding specificity. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `specificity`. `update_op` increments the `true_positives`, `true_negatives`, `false_positives` and `false_negatives` counts with the weight of each case found in the `predictions` and `labels`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. For additional information about specificity and sensitivity, see the following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. sensitivity: A scalar value in range `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). num_thresholds: The number of thresholds to use for matching the given sensitivity. metrics_collections: An optional list of collections that `specificity` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: specificity: A scalar `Tensor` representing the specificity at the given `sensitivity` value. update_op: An operation that increments the `true_positives`, `true_negatives`, `false_positives` and `false_negatives` variables appropriately and whose value matches `specificity`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, if `weights` is not `None` and its shape doesn't match `predictions`, or if `sensitivity` is not between 0 and 1, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError('tf.metrics.specificity_at_sensitivity is not ' 'supported when eager execution is enabled.') if sensitivity < 0 or sensitivity > 1: raise ValueError('`sensitivity` must be in the range [0, 1].') with variable_scope.variable_scope(name, 'specificity_at_sensitivity', (predictions, labels, weights)): kepsilon = 1e-7 # to account for floating point imprecisions thresholds = [ (i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2) ] thresholds = [0.0 - kepsilon] + thresholds + [1.0 - kepsilon] values, update_ops = _confusion_matrix_at_thresholds( labels, predictions, thresholds, weights) def compute_specificity_at_sensitivity(tp, tn, fp, fn, name): """Computes the specificity at the given sensitivity. Args: tp: True positives. tn: True negatives. fp: False positives. fn: False negatives. name: The name of the operation. Returns: The specificity using the aggregated values. """ sensitivities = math_ops.div(tp, tp + fn + kepsilon) # We'll need to use this trick until tf.argmax allows us to specify # whether we should use the first or last index in case of ties. min_val = math_ops.reduce_min(math_ops.abs(sensitivities - sensitivity)) indices_at_minval = math_ops.equal( math_ops.abs(sensitivities - sensitivity), min_val) indices_at_minval = math_ops.cast(indices_at_minval, dtypes.int64) indices_at_minval = math_ops.cumsum(indices_at_minval) tf_index = math_ops.argmax(indices_at_minval, 0) tf_index = math_ops.cast(tf_index, dtypes.int32) # Now, we have the implicit threshold, so compute the specificity: return math_ops.div(tn[tf_index], tn[tf_index] + fp[tf_index] + kepsilon, name) def specificity_across_replicas(_, values): return compute_specificity_at_sensitivity( values['tp'], values['tn'], values['fp'], values['fn'], 'value') specificity = _aggregate_across_replicas( metrics_collections, specificity_across_replicas, values) update_op = compute_specificity_at_sensitivity( update_ops['tp'], update_ops['tn'], update_ops['fp'], update_ops['fn'], 'update_op') if updates_collections: ops.add_to_collections(updates_collections, update_op) return specificity, update_op
tensorflow-master
tensorflow/python/ops/metrics_impl.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=g-short-docstring-punctuation """Image processing and decoding ops. See the [Images](https://tensorflow.org/api_guides/python/image) guide. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.gen_image_ops import * from tensorflow.python.ops.image_ops_impl import * # pylint: enable=wildcard-import # TODO(drpng): remove these once internal use has discontinued. # pylint: disable=unused-import from tensorflow.python.ops.image_ops_impl import _Check3DImage from tensorflow.python.ops.image_ops_impl import _ImageDimensions # pylint: enable=unused-import
tensorflow-master
tensorflow/python/ops/image_ops.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=g-short-docstring-punctuation """Histograms. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import gen_math_ops from tensorflow.python.ops import math_ops from tensorflow.python.util.tf_export import tf_export @tf_export('histogram_fixed_width_bins') def histogram_fixed_width_bins(values, value_range, nbins=100, dtype=dtypes.int32, name=None): """Bins the given values for use in a histogram. Given the tensor `values`, this operation returns a rank 1 `Tensor` representing the indices of a histogram into which each element of `values` would be binned. The bins are equal width and determined by the arguments `value_range` and `nbins`. Args: values: Numeric `Tensor`. value_range: Shape [2] `Tensor` of same `dtype` as `values`. values <= value_range[0] will be mapped to hist[0], values >= value_range[1] will be mapped to hist[-1]. nbins: Scalar `int32 Tensor`. Number of histogram bins. dtype: dtype for returned histogram. name: A name for this operation (defaults to 'histogram_fixed_width'). Returns: A `Tensor` holding the indices of the binned values whose shape matches `values`. Examples: ```python # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) nbins = 5 value_range = [0.0, 5.0] new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] with tf.compat.v1.get_default_session() as sess: indices = tf.histogram_fixed_width_bins(new_values, value_range, nbins=5) variables.global_variables_initializer().run() sess.run(indices) => [0, 0, 1, 2, 4] ``` """ with ops.name_scope(name, 'histogram_fixed_width_bins', [values, value_range, nbins]): values = ops.convert_to_tensor(values, name='values') shape = array_ops.shape(values) values = array_ops.reshape(values, [-1]) value_range = ops.convert_to_tensor(value_range, name='value_range') nbins = ops.convert_to_tensor(nbins, dtype=dtypes.int32, name='nbins') nbins_float = math_ops.cast(nbins, values.dtype) # Map tensor values that fall within value_range to [0, 1]. scaled_values = math_ops.truediv( values - value_range[0], value_range[1] - value_range[0], name='scaled_values') # map tensor values within the open interval value_range to {0,.., nbins-1}, # values outside the open interval will be zero or less, or nbins or more. indices = math_ops.floor(nbins_float * scaled_values, name='indices') # Clip edge cases (e.g. value = value_range[1]) or "outliers." indices = math_ops.cast( clip_ops.clip_by_value(indices, 0, nbins_float - 1), dtypes.int32) return array_ops.reshape(indices, shape) @tf_export('histogram_fixed_width') def histogram_fixed_width(values, value_range, nbins=100, dtype=dtypes.int32, name=None): """Return histogram of values. Given the tensor `values`, this operation returns a rank 1 histogram counting the number of entries in `values` that fell into every bin. The bins are equal width and determined by the arguments `value_range` and `nbins`. Args: values: Numeric `Tensor`. value_range: Shape [2] `Tensor` of same `dtype` as `values`. values <= value_range[0] will be mapped to hist[0], values >= value_range[1] will be mapped to hist[-1]. nbins: Scalar `int32 Tensor`. Number of histogram bins. dtype: dtype for returned histogram. name: A name for this operation (defaults to 'histogram_fixed_width'). Returns: A 1-D `Tensor` holding histogram of values. Examples: ```python # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) nbins = 5 value_range = [0.0, 5.0] new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] with tf.compat.v1.get_default_session() as sess: hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) variables.global_variables_initializer().run() sess.run(hist) => [2, 1, 1, 0, 2] ``` """ with ops.name_scope(name, 'histogram_fixed_width', [values, value_range, nbins]) as name: # pylint: disable=protected-access return gen_math_ops._histogram_fixed_width( values, value_range, nbins, dtype=dtype, name=name) # pylint: enable=protected-access
tensorflow-master
tensorflow/python/ops/histogram_ops.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed unde_sdca_opsr the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A Dual Coordinate Ascent optimizer library for training fast linear models. """ # pylint: disable=g-bad-name from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.gen_sdca_ops import * # pylint: enable=wildcard-import ops.NotDifferentiable("SdcaFprint") ops.NotDifferentiable("SdcaOptimizer") ops.NotDifferentiable("SdcaOptimizerV2") ops.NotDifferentiable("SdcaShrinkL1")
tensorflow-master
tensorflow/python/ops/sdca_ops.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ops for GPU collective operations implemented using NVIDIA nccl.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import threading from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.framework import device from tensorflow.python.framework import ops from tensorflow.python.ops import gen_nccl_ops _module_lock = threading.Lock() _shared_name_counter = 0 def all_sum(tensors): """Returns a list of tensors with the all-reduce sum across `tensors`. The computation is done with an all-reduce operation, so if only some of the returned tensors are evaluated then the computation will hang. Args: tensors: The input tensors across which to sum; must be assigned to GPU devices. Returns: List of tensors, each with the sum of the input tensors, where tensor i has the same device as `tensors[i]`. """ return _apply_all_reduce('sum', tensors) @ops.RegisterGradient('NcclAllReduce') def _all_sum_grad(op, grad): """The gradients for `all_sum`. Args: op: The `all_sum` `Operation` that we are differentiating. grad: Gradient with respect to the output of the `all_sum` op. Returns: The gradient with respect to the output of `all_sum`. Raises: LookupError: If `reduction` is not `sum`. """ if op.get_attr('reduction') != b'sum': raise LookupError('No gradient defined for NcclAllReduce except sum.') _check_device(grad, expected=op.device) num_devices = op.get_attr('num_devices') shared_name = op.get_attr('shared_name') + b'_grad' with ops.device(op.device): return gen_nccl_ops.nccl_all_reduce( input=grad, reduction='sum', num_devices=num_devices, shared_name=shared_name) def all_prod(tensors): """Returns a list of tensors with the all-reduce product across `tensors`. The computation is done with an all-reduce operation, so if only some of the returned tensors are evaluated then the computation will hang. Args: tensors: The input tensors across which to multiply; must be assigned to GPU devices. Returns: List of tensors, each with the product of the input tensors, where tensor i has the same device as `tensors[i]`. """ return _apply_all_reduce('prod', tensors) def all_min(tensors): """Returns a list of tensors with the all-reduce min across `tensors`. The computation is done with an all-reduce operation, so if only some of the returned tensors are evaluated then the computation will hang. Args: tensors: The input tensors across which to reduce; must be assigned to GPU devices. Returns: List of tensors, each with the minimum of the input tensors, where tensor i has the same device as `tensors[i]`. """ return _apply_all_reduce('min', tensors) def all_max(tensors): """Returns a list of tensors with the all-reduce max across `tensors`. The computation is done with an all-reduce operation, so if only some of the returned tensors are evaluated then the computation will hang. Args: tensors: The input tensors across which to reduce; must be assigned to GPU devices. Returns: List of tensors, each with the maximum of the input tensors, where tensor i has the same device as `tensors[i]`. """ return _apply_all_reduce('max', tensors) def reduce_sum(tensors): """Returns a tensor with the reduce sum across `tensors`. The computation is done with a reduce operation, so only one tensor is returned. Args: tensors: The input tensors across which to sum; must be assigned to GPU devices. Returns: A tensor containing the sum of the input tensors. Raises: LookupError: If context is not currently using a GPU device. """ return _apply_reduce('sum', tensors) @ops.RegisterGradient('NcclReduce') def _reduce_sum_grad(op, grad): """The gradients for input `Operation` of `reduce_sum`. Args: op: The `sum send` `Operation` that we are differentiating. grad: Gradient with respect to the output of the `reduce_sum` op. Returns: The gradient with respect to the input of `reduce_sum` op. Raises: LookupError: If the reduction attribute of op is not `sum`. """ if op.get_attr('reduction') != b'sum': raise LookupError('No gradient defined for NcclReduce except sum.') _check_device(grad, expected=op.device) with ops.device(op.device): result = gen_nccl_ops.nccl_broadcast(input=grad, shape=grad.shape) return [result] * len(op.inputs) def broadcast(tensor): """Returns a tensor that can be efficiently transferred to other devices. Args: tensor: The tensor to send; must be assigned to a GPU device. Returns: A tensor with the value of `src_tensor`, which can be used as input to ops on other GPU devices. """ _check_device(tensor) with ops.device(tensor.device): return gen_nccl_ops.nccl_broadcast(input=tensor, shape=tensor.shape) @ops.RegisterGradient('NcclBroadcast') def _broadcast_grad(op, accumulated_grad): """The gradients for input `Operation` of `broadcast`. Args: op: The `broadcast send` `Operation` that we are differentiating. accumulated_grad: Accumulated gradients with respect to the output of the `broadcast` op. Returns: Gradients with respect to the input of `broadcast`. """ # Grab inputs of accumulated_grad and replace accumulation with reduce_sum. grads = [t for t in accumulated_grad.op.inputs] for t in grads: _check_device(t) with ops.device(op.device): return gen_nccl_ops.nccl_reduce(input=grads, reduction='sum') def _apply_all_reduce(reduction, tensors): """Helper function for all_* functions.""" if not tensors: raise ValueError('Must pass >0 tensors to all reduce operations') shared_name = _get_shared_name() def _all_reduce(): """Call nccl allreduce.""" res = [] for t in tensors: _check_device(t) with ops.device(t.device): res.append( gen_nccl_ops.nccl_all_reduce( input=t, reduction=reduction, num_devices=len(tensors), shared_name=shared_name)) return res if context.executing_eagerly(): # Nccl ops will block unless they are executed concurrently such as in a # graph or a defun. return def_function.function(_all_reduce)() else: return _all_reduce() def _apply_reduce(reduction, tensors): """Helper function for reduce_* functions.""" if not tensors: raise ValueError('Must pass >0 tensors to reduce operations') for t in tensors: _check_device(t) result = gen_nccl_ops.nccl_reduce(input=tensors, reduction=reduction) try: next(t for t in tensors if t.device == result.device) except StopIteration: raise ValueError('One input tensor must be assigned to current device') return result def _get_shared_name(): global _shared_name_counter with _module_lock: val = _shared_name_counter _shared_name_counter += 1 return 'c%s' % val def _check_device(tensor, expected=None): if not device.canonical_name(tensor.device): raise ValueError('Device assignment required for nccl collective ops') if expected and expected != tensor.device: raise ValueError('Expected device %s, got %s' % (expected, tensor.device))
tensorflow-master
tensorflow/python/ops/nccl_ops.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Decorator to overrides the gradient for a function.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python import pywrap_tensorflow from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.eager import tape as tape_lib from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_array_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variable_scope from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import nest from tensorflow.python.util import tf_decorator from tensorflow.python.util import tf_inspect from tensorflow.python.util.tf_export import tf_export def copy_handle_data(source_t, target_t): """Copies HandleData for variant and resource type tensors if available. The CppShapeInferenceResult::HandleData proto contains information about the shapes and types of the element tensors of resource/variant type tensors. We need to copy this across function boundaries, i.e., when capturing a placeholder or when returning a function tensor as output. If we don't do this the element tensors will have unknown shapes, e.g., if a TensorList variant tensor is captured as a placeholder, elements popped from that list would have unknown shape. Args: source_t: The tensor to copy HandleData from. target_t: The tensor to copy HandleData to. """ if (target_t.dtype == dtypes.resource or target_t.dtype == dtypes.variant): if isinstance(source_t, ops.EagerTensor): handle_data = source_t._handle_data # pylint: disable=protected-access else: handle_data = resource_variable_ops.get_resource_handle_data(source_t) if (handle_data is not None and handle_data.is_set and handle_data.shape_and_type): # pylint: disable=protected-access pywrap_tensorflow.SetHandleShapeAndType(target_t.graph._c_graph, target_t._as_tf_output(), handle_data.SerializeToString()) # pylint: enable=protected-access # Ensure that shapes and dtypes are propagated. shapes, types = zip(*[(pair.shape, pair.dtype) for pair in handle_data.shape_and_type]) ranks = [len(s.dim) if not s.unknown_rank else -1 for s in shapes] shapes = [[d.size for d in s.dim] if not s.unknown_rank else None for s in shapes] pywrap_tensorflow.TF_GraphSetOutputHandleShapesAndTypes_wrapper( target_t._op._graph._c_graph, # pylint: disable=protected-access target_t._as_tf_output(), # pylint: disable=protected-access shapes, ranks, types) @tf_export("custom_gradient") def custom_gradient(f): """Decorator to define a function with a custom gradient. This decorator allows fine grained control over the gradients of a sequence for operations. This may be useful for multiple reasons, including providing a more efficient or numerically stable gradient for a sequence of operations. For example, consider the following function that commonly occurs in the computation of cross entropy and log likelihoods: ```python def log1pexp(x): return tf.math.log(1 + tf.exp(x)) ``` Due to numerical instability, the gradient this function evaluated at x=100 is NaN. For example: ```python x = tf.constant(100.) y = log1pexp(x) dy = tf.gradients(y, x) # Will be NaN when evaluated. ``` The gradient expression can be analytically simplified to provide numerical stability: ```python @tf.custom_gradient def log1pexp(x): e = tf.exp(x) def grad(dy): return dy * (1 - 1 / (1 + e)) return tf.math.log(1 + e), grad ``` With this definition, the gradient at x=100 will be correctly evaluated as 1.0. See also `tf.RegisterGradient` which registers a gradient function for a primitive TensorFlow operation. `tf.custom_gradient` on the other hand allows for fine grained control over the gradient computation of a sequence of operations. Note that if the decorated function uses `Variable`s, the enclosing variable scope must be using `ResourceVariable`s. Args: f: function `f(*x)` that returns a tuple `(y, grad_fn)` where: - `x` is a sequence of `Tensor` inputs to the function. - `y` is a `Tensor` or sequence of `Tensor` outputs of applying TensorFlow operations in `f` to `x`. - `grad_fn` is a function with the signature `g(*grad_ys)` which returns a list of `Tensor`s - the derivatives of `Tensor`s in `y` with respect to the `Tensor`s in `x`. `grad_ys` is a `Tensor` or sequence of `Tensor`s the same size as `y` holding the initial value gradients for each `Tensor` in `y`. In a pure mathematical sense, a vector-argument vector-valued function `f`'s derivatives should be its Jacobian matrix `J`. Here we are expressing the Jacobian `J` as a function `grad_fn` which defines how `J` will transform a vector `grad_ys` when left-multiplied with it (`grad_ys * J`). This functional representation of a matrix is convenient to use for chain-rule calculation (in e.g. the back-propagation algorithm). If `f` uses `Variable`s (that are not part of the inputs), i.e. through `get_variable`, then `grad_fn` should have signature `g(*grad_ys, variables=None)`, where `variables` is a list of the `Variable`s, and return a 2-tuple `(grad_xs, grad_vars)`, where `grad_xs` is the same as above, and `grad_vars` is a `list<Tensor>` with the derivatives of `Tensor`s in `y` with respect to the variables (that is, grad_vars has one Tensor per variable in variables). Returns: A function `h(x)` which returns the same value as `f(x)[0]` and whose gradient (as calculated by `tf.gradients`) is determined by `f(x)[1]`. """ def decorated(*args, **kwargs): """Decorated function with custom gradient.""" if context.executing_eagerly(): return _eager_mode_decorator(f, *args, **kwargs) else: return _graph_mode_decorator(f, *args, **kwargs) return tf_decorator.make_decorator(f, decorated) def _graph_mode_decorator(f, *args, **kwargs): """Implement custom gradient decorator for graph mode.""" # TODO(rsepassi): Add support for kwargs if kwargs: raise ValueError( "The custom_gradient decorator currently supports keywords " "arguments only when eager execution is enabled.") name = "CustomGradient-%s" % ops.uid() args = [ops.convert_to_tensor(x) for x in args] # Checking global and local variables attempts to ensure that no non-resource # Variables are added to the graph. current_var_scope = variable_scope.get_variable_scope() before_vars = set(current_var_scope.global_variables() + current_var_scope.local_variables()) with backprop.GradientTape() as tape: result, grad_fn = f(*args) after_vars = set(current_var_scope.global_variables() + current_var_scope.local_variables()) new_vars = after_vars - before_vars for v in new_vars: if not resource_variable_ops.is_resource_variable(v): raise TypeError( "All variables used by a function wrapped with @custom_gradient must " "be `ResourceVariable`s. Ensure that no `variable_scope` is created " "with `use_resource=False`.") # The variables that grad_fn needs to return gradients for are the set of # variables used that are *not* part of the inputs. variables = list(set(tape.watched_variables()) - set(args)) grad_argspec = tf_inspect.getfullargspec(grad_fn) variables_in_signature = ("variables" in grad_argspec.args or grad_argspec.varkw) if variables and not variables_in_signature: raise TypeError("If using @custom_gradient with a function that " "uses variables, then grad_fn must accept a keyword " "argument 'variables'.") if variables_in_signature and not variables: # User seems to intend to use variables but none were captured. if not variable_scope.get_variable_scope().use_resource: raise TypeError("If using @custom_gradient with a function that " "uses variables, the enclosing variable scope must " "have use_resource=True.") else: logging.warn("@custom_gradient grad_fn has 'variables' in signature, but " "no ResourceVariables were used on the forward pass.") flat_result = nest.flatten(result) flat_result_len = len(flat_result) all_tensors = flat_result + args + variables def tape_grad_fn(*result_grads): """Custom grad fn wrapper.""" result_grads = result_grads[:flat_result_len] if variables: input_grads, variable_grads = grad_fn(*result_grads, variables=variables) if len(variable_grads) != len(variables): raise ValueError("Must return gradient for each variable from " "@custom_gradient grad_fn.") else: input_grads = grad_fn(*result_grads) variable_grads = [] # Need to return one value per input to the IdentityN, so pad the # gradients of the inputs of the custom_gradient function with the # gradients of the outputs as well. input_grads = nest.flatten(input_grads) return ([None] * flat_result_len) + input_grads + variable_grads @ops.RegisterGradient(name) def internal_grad_fn(unused_op, *result_grads): # pylint: disable=unused-variable """Custom grad fn wrapper.""" return tape_grad_fn(*result_grads) original_tensors = all_tensors with ops.get_default_graph().gradient_override_map({"IdentityN": name}): all_tensors = array_ops.identity_n(all_tensors) original_tensors = [ops.convert_to_tensor(x) for x in original_tensors] # Propagate handle data for happier shape inference for resource variables. for i, t in enumerate(original_tensors): if t.dtype == dtypes.resource and hasattr(t, "_handle_data"): all_tensors[i]._handle_data = t._handle_data # pylint: disable=protected-access tape_lib.record_operation( f.__name__, all_tensors, original_tensors, tape_grad_fn) for ot, t in zip(original_tensors, all_tensors): copy_handle_data(ot, t) return nest.pack_sequence_as( structure=result, flat_sequence=all_tensors[:flat_result_len]) def _eager_mode_decorator(f, *args, **kwargs): """Implement custom gradient decorator for eager mode.""" with backprop.GradientTape() as tape: result, grad_fn = f(*args, **kwargs) all_inputs = list(args) + list(kwargs.values()) # The variables that grad_fn needs to return gradients for are the set of # variables used that are *not* part of the inputs. variables = [v for v in set(tape.watched_variables()) if v not in all_inputs] grad_argspec = tf_inspect.getfullargspec(grad_fn) if (variables and ("variables" not in grad_argspec.args) and not grad_argspec.varkw): raise TypeError("If using @custom_gradient with a function that " "uses variables, then grad_fn must accept a keyword " "argument 'variables'.") flat_result = nest.flatten(result) # TODO(apassos) consider removing the identity below. flat_result = [gen_array_ops.identity(x) for x in flat_result] input_tensors = [ops.convert_to_tensor(x) for x in list(args) + list(variables)] arg_count = len(args) def actual_grad_fn(*result_grads): """Custom grad fn wrapper.""" if variables: input_grads, variable_grads = grad_fn(*result_grads, variables=variables) if len(variable_grads) != len(variables): raise ValueError("Must return gradient for each variable from " "@custom_gradient grad_fn.") else: input_grads = grad_fn(*result_grads) variable_grads = [] flat_grads = nest.flatten(input_grads) if len(flat_grads) != arg_count: raise ValueError( "custom_gradient function expected to return", arg_count, "gradients but returned", len(flat_grads), "instead.") return nest.flatten(input_grads) + variable_grads tape_lib.record_operation(f.__name__, flat_result, input_tensors, actual_grad_fn) flat_result = list(flat_result) return nest.pack_sequence_as(result, flat_result) @tf_export("recompute_grad") def recompute_grad(f): """An eager-compatible version of recompute_grad. For f(*args, **kwargs), this supports gradients with respect to args, or to gradients with respect to any variables residing in the kwarg 'variables'. Note that for keras layer and model objects, this is handled automatically. Warning: If `f` was originally a tf.keras Model or Layer object, `g` will not be able to access the member variables of that object, because `g` returns through the wrapper function `inner`. When recomputing gradients through objects that inherit from keras, we suggest keeping a reference to the underlying object around for the purpose of accessing these variables. Args: f: function `f(*x)` that returns a `Tensor` or sequence of `Tensor` outputs. Returns: A function `g` that wraps `f`, but which recomputes `f` on the backwards pass of a gradient call. """ # TODO(cdfreeman) Add is_recomputing functionality from graph mode version @custom_gradient def inner(*args, **kwargs): """Inner function closure for calculating gradients.""" result = f(*args, **kwargs) def grad(dresult, variables=None): """Gradient function calculation for inner function.""" with backprop.GradientTape() as t: t.watch(args) if variables is not None: t.watch(variables) with ops.control_dependencies([dresult]): result = f(*args, **kwargs) kw_vars = [] if variables is not None: kw_vars = list(variables) grads = t.gradient( result, list(args) + kw_vars, output_gradients=[dresult]) return grads[:len(args)], grads[len(args):] return result, grad return inner
tensorflow-master
tensorflow/python/ops/custom_gradient.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Helper functions for creating partitioned variables. This is a convenient abstraction to partition a large variable across multiple smaller variables that can be assigned to different devices. The full variable can be reconstructed by concatenating the smaller variables. Using partitioned variables instead of a single variable is mostly a performance choice. It however also has an impact on: 1. Random initialization, as the random number generator is called once per slice 2. Updates, as they happen in parallel across slices A key design goal is to allow a different graph to repartition a variable with the same name but different slicings, including possibly no partitions. TODO(touts): If an initializer provides a seed, the seed must be changed deterministically for each slice, maybe by adding one to it, otherwise each slice will use the same values. Maybe this can be done by passing the slice offsets to the initializer functions. Typical usage: ```python # Create a list of partitioned variables with: vs = create_partitioned_variables( <shape>, <slicing>, <initializer>, name=<optional-name>) # Pass the list as inputs to embedding_lookup for sharded, parallel lookup: y = embedding_lookup(vs, ids, partition_strategy="div") # Or fetch the variables in parallel to speed up large matmuls: z = matmul(x, concat(slice_dim, vs)) ``` """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import math from tensorflow.python.framework import dtypes from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import variable_scope from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export __all__ = [ "create_partitioned_variables", "variable_axis_size_partitioner", "min_max_variable_partitioner", "fixed_size_partitioner", ] @tf_export(v1=["variable_axis_size_partitioner"]) def variable_axis_size_partitioner( max_shard_bytes, axis=0, bytes_per_string_element=16, max_shards=None): """Get a partitioner for VariableScope to keep shards below `max_shard_bytes`. This partitioner will shard a Variable along one axis, attempting to keep the maximum shard size below `max_shard_bytes`. In practice, this is not always possible when sharding along only one axis. When this happens, this axis is sharded as much as possible (i.e., every dimension becomes a separate shard). If the partitioner hits the `max_shards` limit, then each shard may end up larger than `max_shard_bytes`. By default `max_shards` equals `None` and no limit on the number of shards is enforced. One reasonable value for `max_shard_bytes` is `(64 << 20) - 1`, or almost `64MB`, to keep below the protobuf byte limit. Args: max_shard_bytes: The maximum size any given shard is allowed to be. axis: The axis to partition along. Default: outermost axis. bytes_per_string_element: If the `Variable` is of type string, this provides an estimate of how large each scalar in the `Variable` is. max_shards: The maximum number of shards in int created taking precedence over `max_shard_bytes`. Returns: A partition function usable as the `partitioner` argument to `variable_scope` and `get_variable`. Raises: ValueError: If any of the byte counts are non-positive. """ if max_shard_bytes < 1 or bytes_per_string_element < 1: raise ValueError( "Both max_shard_bytes and bytes_per_string_element must be positive.") if max_shards and max_shards < 1: raise ValueError( "max_shards must be positive.") def _partitioner(shape, dtype): """Partitioner that partitions shards to have max_shard_bytes total size. Args: shape: A `TensorShape`. dtype: A `DType`. Returns: A tuple representing how much to slice each axis in shape. Raises: ValueError: If shape is not a fully defined `TensorShape` or dtype is not a `DType`. """ if not isinstance(shape, tensor_shape.TensorShape): raise ValueError("shape is not a TensorShape: %s" % shape) if not shape.is_fully_defined(): raise ValueError("shape is not fully defined: %s" % shape) if not isinstance(dtype, dtypes.DType): raise ValueError("dtype is not a DType: %s" % dtype) if dtype.base_dtype == dtypes.string: element_size = bytes_per_string_element else: element_size = dtype.size partitions = [1] * shape.ndims bytes_per_slice = 1.0 * ( shape.num_elements() / shape.dims[axis].value) * element_size # How many slices can we fit on one shard of size at most max_shard_bytes? # At least one slice is required. slices_per_shard = max(1, math.floor(max_shard_bytes / bytes_per_slice)) # How many shards do we need for axis given that each shard fits # slices_per_shard slices from a total of shape[axis] slices? axis_shards = int(math.ceil( 1.0 * shape.dims[axis].value / slices_per_shard)) if max_shards: axis_shards = min(max_shards, axis_shards) partitions[axis] = axis_shards return partitions return _partitioner @tf_export(v1=["min_max_variable_partitioner"]) def min_max_variable_partitioner(max_partitions=1, axis=0, min_slice_size=256 << 10, bytes_per_string_element=16): """Partitioner to allocate minimum size per slice. Returns a partitioner that partitions the variable of given shape and dtype such that each partition has a minimum of `min_slice_size` slice of the variable. The maximum number of such partitions (upper bound) is given by `max_partitions`. Args: max_partitions: Upper bound on the number of partitions. Defaults to 1. axis: Axis along which to partition the variable. Defaults to 0. min_slice_size: Minimum size of the variable slice per partition. Defaults to 256K. bytes_per_string_element: If the `Variable` is of type string, this provides an estimate of how large each scalar in the `Variable` is. Returns: A partition function usable as the `partitioner` argument to `variable_scope` and `get_variable`. """ def _partitioner(shape, dtype): """Partitioner that partitions list for a variable of given shape and type. Ex: Consider partitioning a variable of type float32 with shape=[1024, 1024]. If `max_partitions` >= 16, this function would return [(1024 * 1024 * 4) / (256 * 1024), 1] = [16, 1]. If `max_partitions` < 16, this function would return [`max_partitions`, 1]. Args: shape: Shape of the variable. dtype: Type of the variable. Returns: List of partitions for each axis (currently only one axis can be partitioned). Raises: ValueError: If axis to partition along does not exist for the variable. """ if axis >= len(shape): raise ValueError("Can not partition variable along axis %d when shape is " "only %s" % (axis, shape)) if dtype.base_dtype == dtypes.string: bytes_per_element = bytes_per_string_element else: bytes_per_element = dtype.size total_size_bytes = shape.num_elements() * bytes_per_element partitions = total_size_bytes / min_slice_size partitions_list = [1] * len(shape) # We can not partition the variable beyond what its shape or # `max_partitions` allows. partitions_list[axis] = max(1, min(shape.dims[axis].value, max_partitions, int(math.ceil(partitions)))) return partitions_list return _partitioner @tf_export(v1=["fixed_size_partitioner"]) def fixed_size_partitioner(num_shards, axis=0): """Partitioner to specify a fixed number of shards along given axis. Args: num_shards: `int`, number of shards to partition variable. axis: `int`, axis to partition on. Returns: A partition function usable as the `partitioner` argument to `variable_scope` and `get_variable`. """ def _partitioner(shape, **unused_args): partitions_list = [1] * len(shape) partitions_list[axis] = min(num_shards, shape.dims[axis].value) return partitions_list return _partitioner @tf_export(v1=["create_partitioned_variables"]) @deprecation.deprecated( date=None, instructions="Use `tf.get_variable` with a partitioner set.") def create_partitioned_variables( shape, slicing, initializer, dtype=dtypes.float32, trainable=True, collections=None, name=None, reuse=None): """Create a list of partitioned variables according to the given `slicing`. Currently only one dimension of the full variable can be sliced, and the full variable can be reconstructed by the concatenation of the returned list along that dimension. Args: shape: List of integers. The shape of the full variable. slicing: List of integers. How to partition the variable. Must be of the same length as `shape`. Each value indicate how many slices to create in the corresponding dimension. Presently only one of the values can be more than 1; that is, the variable can only be sliced along one dimension. For convenience, The requested number of partitions does not have to divide the corresponding dimension evenly. If it does not, the shapes of the partitions are incremented by 1 starting from partition 0 until all slack is absorbed. The adjustment rules may change in the future, but as you can save/restore these variables with different slicing specifications this should not be a problem. initializer: A `Tensor` of shape `shape` or a variable initializer function. If a function, it will be called once for each slice, passing the shape and data type of the slice as parameters. The function must return a tensor with the same shape as the slice. dtype: Type of the variables. Ignored if `initializer` is a `Tensor`. trainable: If True also add all the variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. collections: List of graph collections keys to add the variables to. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`. name: Optional name for the full variable. Defaults to `"PartitionedVariable"` and gets uniquified automatically. reuse: Boolean or `None`; if `True` and name is set, it would reuse previously created variables. if `False` it will create new variables. if `None`, it would inherit the parent scope reuse. Returns: A list of Variables corresponding to the slicing. Raises: ValueError: If any of the arguments is malformed. """ if len(shape) != len(slicing): raise ValueError("The 'shape' and 'slicing' of a partitioned Variable " "must have the length: shape: %s, slicing: %s" % (shape, slicing)) if len(shape) < 1: raise ValueError("A partitioned Variable must have rank at least 1: " "shape: %s" % shape) # Legacy: we are provided the slicing directly, so just pass it to # the partitioner. partitioner = lambda **unused_kwargs: slicing with variable_scope.variable_scope( name, "PartitionedVariable", reuse=reuse): # pylint: disable=protected-access partitioned_var = variable_scope._get_partitioned_variable( name=None, shape=shape, dtype=dtype, initializer=initializer, trainable=trainable, partitioner=partitioner, collections=collections) return list(partitioned_var) # pylint: enable=protected-access
tensorflow-master
tensorflow/python/ops/partitioned_variables.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Module implementing RNN Cells. This module provides a number of basic commonly used RNN cells, such as LSTM (Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number of operators that allow adding dropouts, projections, or embeddings for inputs. Constructing multi-layer cells is supported by the class `MultiRNNCell`, or by calling the `rnn` ops several times. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.keras import activations from tensorflow.python.keras import initializers from tensorflow.python.keras.engine import input_spec from tensorflow.python.keras.utils import tf_utils from tensorflow.python.layers import base as base_layer from tensorflow.python.ops import array_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import partitioned_variables from tensorflow.python.ops import rnn_cell_wrapper_impl from tensorflow.python.ops import variable_scope as vs from tensorflow.python.ops import variables as tf_variables from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training.tracking import base as trackable from tensorflow.python.util import nest from tensorflow.python.util.deprecation import deprecated from tensorflow.python.util.tf_export import tf_export _BIAS_VARIABLE_NAME = "bias" _WEIGHTS_VARIABLE_NAME = "kernel" # This can be used with self.assertRaisesRegexp for assert_like_rnncell. ASSERT_LIKE_RNNCELL_ERROR_REGEXP = "is not an RNNCell" def _hasattr(obj, attr_name): try: getattr(obj, attr_name) except AttributeError: return False else: return True def assert_like_rnncell(cell_name, cell): """Raises a TypeError if cell is not like an RNNCell. NOTE: Do not rely on the error message (in particular in tests) which can be subject to change to increase readability. Use ASSERT_LIKE_RNNCELL_ERROR_REGEXP. Args: cell_name: A string to give a meaningful error referencing to the name of the functionargument. cell: The object which should behave like an RNNCell. Raises: TypeError: A human-friendly exception. """ conditions = [ _hasattr(cell, "output_size"), _hasattr(cell, "state_size"), _hasattr(cell, "get_initial_state") or _hasattr(cell, "zero_state"), callable(cell), ] errors = [ "'output_size' property is missing", "'state_size' property is missing", "either 'zero_state' or 'get_initial_state' method is required", "is not callable" ] if not all(conditions): errors = [error for error, cond in zip(errors, conditions) if not cond] raise TypeError("The argument {!r} ({}) is not an RNNCell: {}.".format( cell_name, cell, ", ".join(errors))) def _concat(prefix, suffix, static=False): """Concat that enables int, Tensor, or TensorShape values. This function takes a size specification, which can be an integer, a TensorShape, or a Tensor, and converts it into a concatenated Tensor (if static = False) or a list of integers (if static = True). Args: prefix: The prefix; usually the batch size (and/or time step size). (TensorShape, int, or Tensor.) suffix: TensorShape, int, or Tensor. static: If `True`, return a python list with possibly unknown dimensions. Otherwise return a `Tensor`. Returns: shape: the concatenation of prefix and suffix. Raises: ValueError: if `suffix` is not a scalar or vector (or TensorShape). ValueError: if prefix or suffix was `None` and asked for dynamic Tensors out. """ if isinstance(prefix, ops.Tensor): p = prefix p_static = tensor_util.constant_value(prefix) if p.shape.ndims == 0: p = array_ops.expand_dims(p, 0) elif p.shape.ndims != 1: raise ValueError("prefix tensor must be either a scalar or vector, " "but saw tensor: %s" % p) else: p = tensor_shape.as_shape(prefix) p_static = p.as_list() if p.ndims is not None else None p = ( constant_op.constant(p.as_list(), dtype=dtypes.int32) if p.is_fully_defined() else None) if isinstance(suffix, ops.Tensor): s = suffix s_static = tensor_util.constant_value(suffix) if s.shape.ndims == 0: s = array_ops.expand_dims(s, 0) elif s.shape.ndims != 1: raise ValueError("suffix tensor must be either a scalar or vector, " "but saw tensor: %s" % s) else: s = tensor_shape.as_shape(suffix) s_static = s.as_list() if s.ndims is not None else None s = ( constant_op.constant(s.as_list(), dtype=dtypes.int32) if s.is_fully_defined() else None) if static: shape = tensor_shape.as_shape(p_static).concatenate(s_static) shape = shape.as_list() if shape.ndims is not None else None else: if p is None or s is None: raise ValueError("Provided a prefix or suffix of None: %s and %s" % (prefix, suffix)) shape = array_ops.concat((p, s), 0) return shape def _zero_state_tensors(state_size, batch_size, dtype): """Create tensors of zeros based on state_size, batch_size, and dtype.""" def get_state_shape(s): """Combine s with batch_size to get a proper tensor shape.""" c = _concat(batch_size, s) size = array_ops.zeros(c, dtype=dtype) if not context.executing_eagerly(): c_static = _concat(batch_size, s, static=True) size.set_shape(c_static) return size return nest.map_structure(get_state_shape, state_size) @tf_export(v1=["nn.rnn_cell.RNNCell"]) class RNNCell(base_layer.Layer): """Abstract object representing an RNN cell. Every `RNNCell` must have the properties below and implement `call` with the signature `(output, next_state) = call(input, state)`. The optional third input argument, `scope`, is allowed for backwards compatibility purposes; but should be left off for new subclasses. This definition of cell differs from the definition used in the literature. In the literature, 'cell' refers to an object with a single scalar output. This definition refers to a horizontal array of such units. An RNN cell, in the most abstract setting, is anything that has a state and performs some operation that takes a matrix of inputs. This operation results in an output matrix with `self.output_size` columns. If `self.state_size` is an integer, this operation also results in a new state matrix with `self.state_size` columns. If `self.state_size` is a (possibly nested tuple of) TensorShape object(s), then it should return a matching structure of Tensors having shape `[batch_size].concatenate(s)` for each `s` in `self.batch_size`. """ def __init__(self, trainable=True, name=None, dtype=None, **kwargs): super(RNNCell, self).__init__( trainable=trainable, name=name, dtype=dtype, **kwargs) # Attribute that indicates whether the cell is a TF RNN cell, due the slight # difference between TF and Keras RNN cell. Notably the state is not wrapped # in a list for TF cell where they are single tensor state, whereas keras # cell will wrap the state into a list, and call() will have to unwrap them. self._is_tf_rnn_cell = True def __call__(self, inputs, state, scope=None): """Run this RNN cell on inputs, starting from the given state. Args: inputs: `2-D` tensor with shape `[batch_size, input_size]`. state: if `self.state_size` is an integer, this should be a `2-D Tensor` with shape `[batch_size, self.state_size]`. Otherwise, if `self.state_size` is a tuple of integers, this should be a tuple with shapes `[batch_size, s] for s in self.state_size`. scope: VariableScope for the created subgraph; defaults to class name. Returns: A pair containing: - Output: A `2-D` tensor with shape `[batch_size, self.output_size]`. - New state: Either a single `2-D` tensor, or a tuple of tensors matching the arity and shapes of `state`. """ if scope is not None: with vs.variable_scope( scope, custom_getter=self._rnn_get_variable) as scope: return super(RNNCell, self).__call__(inputs, state, scope=scope) else: scope_attrname = "rnncell_scope" scope = getattr(self, scope_attrname, None) if scope is None: scope = vs.variable_scope( vs.get_variable_scope(), custom_getter=self._rnn_get_variable) setattr(self, scope_attrname, scope) with scope: return super(RNNCell, self).__call__(inputs, state) def _rnn_get_variable(self, getter, *args, **kwargs): variable = getter(*args, **kwargs) if context.executing_eagerly(): trainable = variable._trainable # pylint: disable=protected-access else: trainable = ( variable in tf_variables.trainable_variables() or (isinstance(variable, tf_variables.PartitionedVariable) and list(variable)[0] in tf_variables.trainable_variables())) if trainable and variable not in self._trainable_weights: self._trainable_weights.append(variable) elif not trainable and variable not in self._non_trainable_weights: self._non_trainable_weights.append(variable) return variable @property def state_size(self): """size(s) of state(s) used by this cell. It can be represented by an Integer, a TensorShape or a tuple of Integers or TensorShapes. """ raise NotImplementedError("Abstract method") @property def output_size(self): """Integer or TensorShape: size of outputs produced by this cell.""" raise NotImplementedError("Abstract method") def build(self, _): # This tells the parent Layer object that it's OK to call # self.add_variable() inside the call() method. pass def get_initial_state(self, inputs=None, batch_size=None, dtype=None): if inputs is not None: # Validate the given batch_size and dtype against inputs if provided. inputs = ops.convert_to_tensor(inputs, name="inputs") if batch_size is not None: if tensor_util.is_tensor(batch_size): static_batch_size = tensor_util.constant_value( batch_size, partial=True) else: static_batch_size = batch_size if inputs.shape.dims[0].value != static_batch_size: raise ValueError( "batch size from input tensor is different from the " "input param. Input tensor batch: {}, batch_size: {}".format( inputs.shape.dims[0].value, batch_size)) if dtype is not None and inputs.dtype != dtype: raise ValueError( "dtype from input tensor is different from the " "input param. Input tensor dtype: {}, dtype: {}".format( inputs.dtype, dtype)) batch_size = inputs.shape.dims[0].value or array_ops.shape(inputs)[0] dtype = inputs.dtype if None in [batch_size, dtype]: raise ValueError( "batch_size and dtype cannot be None while constructing initial " "state: batch_size={}, dtype={}".format(batch_size, dtype)) return self.zero_state(batch_size, dtype) def zero_state(self, batch_size, dtype): """Return zero-filled state tensor(s). Args: batch_size: int, float, or unit Tensor representing the batch size. dtype: the data type to use for the state. Returns: If `state_size` is an int or TensorShape, then the return value is a `N-D` tensor of shape `[batch_size, state_size]` filled with zeros. If `state_size` is a nested list or tuple, then the return value is a nested list or tuple (of the same structure) of `2-D` tensors with the shapes `[batch_size, s]` for each s in `state_size`. """ # Try to use the last cached zero_state. This is done to avoid recreating # zeros, especially when eager execution is enabled. state_size = self.state_size is_eager = context.executing_eagerly() if is_eager and _hasattr(self, "_last_zero_state"): (last_state_size, last_batch_size, last_dtype, last_output) = getattr(self, "_last_zero_state") if (last_batch_size == batch_size and last_dtype == dtype and last_state_size == state_size): return last_output with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]): output = _zero_state_tensors(state_size, batch_size, dtype) if is_eager: self._last_zero_state = (state_size, batch_size, dtype, output) return output class LayerRNNCell(RNNCell): """Subclass of RNNCells that act like proper `tf.Layer` objects. For backwards compatibility purposes, most `RNNCell` instances allow their `call` methods to instantiate variables via `tf.compat.v1.get_variable`. The underlying variable scope thus keeps track of any variables, and returning cached versions. This is atypical of `tf.layer` objects, which separate this part of layer building into a `build` method that is only called once. Here we provide a subclass for `RNNCell` objects that act exactly as `Layer` objects do. They must provide a `build` method and their `call` methods do not access Variables `tf.compat.v1.get_variable`. """ def __call__(self, inputs, state, scope=None, *args, **kwargs): """Run this RNN cell on inputs, starting from the given state. Args: inputs: `2-D` tensor with shape `[batch_size, input_size]`. state: if `self.state_size` is an integer, this should be a `2-D Tensor` with shape `[batch_size, self.state_size]`. Otherwise, if `self.state_size` is a tuple of integers, this should be a tuple with shapes `[batch_size, s] for s in self.state_size`. scope: optional cell scope. *args: Additional positional arguments. **kwargs: Additional keyword arguments. Returns: A pair containing: - Output: A `2-D` tensor with shape `[batch_size, self.output_size]`. - New state: Either a single `2-D` tensor, or a tuple of tensors matching the arity and shapes of `state`. """ # Bypass RNNCell's variable capturing semantics for LayerRNNCell. # Instead, it is up to subclasses to provide a proper build # method. See the class docstring for more details. return base_layer.Layer.__call__( self, inputs, state, scope=scope, *args, **kwargs) @tf_export(v1=["nn.rnn_cell.BasicRNNCell"]) class BasicRNNCell(LayerRNNCell): """The most basic RNN cell. Note that this cell is not optimized for performance. Please use `tf.contrib.cudnn_rnn.CudnnRNNTanh` for better performance on GPU. Args: num_units: int, The number of units in the RNN cell. activation: Nonlinearity to use. Default: `tanh`. It could also be string that is within Keras activation function names. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. name: String, the name of the layer. Layers with the same name will share weights, but to avoid mistakes we require reuse=True in such cases. dtype: Default dtype of the layer (default of `None` means use the type of the first input). Required when `build` is called before `call`. **kwargs: Dict, keyword named properties for common layer attributes, like `trainable` etc when constructing the cell from configs of get_config(). """ @deprecated(None, "This class is equivalent as tf.keras.layers.SimpleRNNCell," " and will be replaced by that in Tensorflow 2.0.") def __init__(self, num_units, activation=None, reuse=None, name=None, dtype=None, **kwargs): super(BasicRNNCell, self).__init__( _reuse=reuse, name=name, dtype=dtype, **kwargs) _check_supported_dtypes(self.dtype) if context.executing_eagerly() and context.num_gpus() > 0: logging.warn( "%s: Note that this cell is not optimized for performance. " "Please use tf.contrib.cudnn_rnn.CudnnRNNTanh for better " "performance on GPU.", self) # Inputs must be 2-dimensional. self.input_spec = input_spec.InputSpec(ndim=2) self._num_units = num_units if activation: self._activation = activations.get(activation) else: self._activation = math_ops.tanh @property def state_size(self): return self._num_units @property def output_size(self): return self._num_units @tf_utils.shape_type_conversion def build(self, inputs_shape): if inputs_shape[-1] is None: raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s" % str(inputs_shape)) _check_supported_dtypes(self.dtype) input_depth = inputs_shape[-1] self._kernel = self.add_variable( _WEIGHTS_VARIABLE_NAME, shape=[input_depth + self._num_units, self._num_units]) self._bias = self.add_variable( _BIAS_VARIABLE_NAME, shape=[self._num_units], initializer=init_ops.zeros_initializer(dtype=self.dtype)) self.built = True def call(self, inputs, state): """Most basic RNN: output = new_state = act(W * input + U * state + B).""" _check_rnn_cell_input_dtypes([inputs, state]) gate_inputs = math_ops.matmul( array_ops.concat([inputs, state], 1), self._kernel) gate_inputs = nn_ops.bias_add(gate_inputs, self._bias) output = self._activation(gate_inputs) return output, output def get_config(self): config = { "num_units": self._num_units, "activation": activations.serialize(self._activation), "reuse": self._reuse, } base_config = super(BasicRNNCell, self).get_config() return dict(list(base_config.items()) + list(config.items())) @tf_export(v1=["nn.rnn_cell.GRUCell"]) class GRUCell(LayerRNNCell): """Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078). Note that this cell is not optimized for performance. Please use `tf.contrib.cudnn_rnn.CudnnGRU` for better performance on GPU, or `tf.contrib.rnn.GRUBlockCellV2` for better performance on CPU. Args: num_units: int, The number of units in the GRU cell. activation: Nonlinearity to use. Default: `tanh`. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. kernel_initializer: (optional) The initializer to use for the weight and projection matrices. bias_initializer: (optional) The initializer to use for the bias. name: String, the name of the layer. Layers with the same name will share weights, but to avoid mistakes we require reuse=True in such cases. dtype: Default dtype of the layer (default of `None` means use the type of the first input). Required when `build` is called before `call`. **kwargs: Dict, keyword named properties for common layer attributes, like `trainable` etc when constructing the cell from configs of get_config(). """ @deprecated(None, "This class is equivalent as tf.keras.layers.GRUCell," " and will be replaced by that in Tensorflow 2.0.") def __init__(self, num_units, activation=None, reuse=None, kernel_initializer=None, bias_initializer=None, name=None, dtype=None, **kwargs): super(GRUCell, self).__init__( _reuse=reuse, name=name, dtype=dtype, **kwargs) _check_supported_dtypes(self.dtype) if context.executing_eagerly() and context.num_gpus() > 0: logging.warn( "%s: Note that this cell is not optimized for performance. " "Please use tf.contrib.cudnn_rnn.CudnnGRU for better " "performance on GPU.", self) # Inputs must be 2-dimensional. self.input_spec = input_spec.InputSpec(ndim=2) self._num_units = num_units if activation: self._activation = activations.get(activation) else: self._activation = math_ops.tanh self._kernel_initializer = initializers.get(kernel_initializer) self._bias_initializer = initializers.get(bias_initializer) @property def state_size(self): return self._num_units @property def output_size(self): return self._num_units @tf_utils.shape_type_conversion def build(self, inputs_shape): if inputs_shape[-1] is None: raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s" % str(inputs_shape)) _check_supported_dtypes(self.dtype) input_depth = inputs_shape[-1] self._gate_kernel = self.add_variable( "gates/%s" % _WEIGHTS_VARIABLE_NAME, shape=[input_depth + self._num_units, 2 * self._num_units], initializer=self._kernel_initializer) self._gate_bias = self.add_variable( "gates/%s" % _BIAS_VARIABLE_NAME, shape=[2 * self._num_units], initializer=(self._bias_initializer if self._bias_initializer is not None else init_ops.constant_initializer(1.0, dtype=self.dtype))) self._candidate_kernel = self.add_variable( "candidate/%s" % _WEIGHTS_VARIABLE_NAME, shape=[input_depth + self._num_units, self._num_units], initializer=self._kernel_initializer) self._candidate_bias = self.add_variable( "candidate/%s" % _BIAS_VARIABLE_NAME, shape=[self._num_units], initializer=(self._bias_initializer if self._bias_initializer is not None else init_ops.zeros_initializer(dtype=self.dtype))) self.built = True def call(self, inputs, state): """Gated recurrent unit (GRU) with nunits cells.""" _check_rnn_cell_input_dtypes([inputs, state]) gate_inputs = math_ops.matmul( array_ops.concat([inputs, state], 1), self._gate_kernel) gate_inputs = nn_ops.bias_add(gate_inputs, self._gate_bias) value = math_ops.sigmoid(gate_inputs) r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1) r_state = r * state candidate = math_ops.matmul( array_ops.concat([inputs, r_state], 1), self._candidate_kernel) candidate = nn_ops.bias_add(candidate, self._candidate_bias) c = self._activation(candidate) new_h = u * state + (1 - u) * c return new_h, new_h def get_config(self): config = { "num_units": self._num_units, "kernel_initializer": initializers.serialize(self._kernel_initializer), "bias_initializer": initializers.serialize(self._bias_initializer), "activation": activations.serialize(self._activation), "reuse": self._reuse, } base_config = super(GRUCell, self).get_config() return dict(list(base_config.items()) + list(config.items())) _LSTMStateTuple = collections.namedtuple("LSTMStateTuple", ("c", "h")) @tf_export(v1=["nn.rnn_cell.LSTMStateTuple"]) class LSTMStateTuple(_LSTMStateTuple): """Tuple used by LSTM Cells for `state_size`, `zero_state`, and output state. Stores two elements: `(c, h)`, in that order. Where `c` is the hidden state and `h` is the output. Only used when `state_is_tuple=True`. """ __slots__ = () @property def dtype(self): (c, h) = self if c.dtype != h.dtype: raise TypeError("Inconsistent internal state: %s vs %s" % (str(c.dtype), str(h.dtype))) return c.dtype @tf_export(v1=["nn.rnn_cell.BasicLSTMCell"]) class BasicLSTMCell(LayerRNNCell): """DEPRECATED: Please use `tf.compat.v1.nn.rnn_cell.LSTMCell` instead. Basic LSTM recurrent network cell. The implementation is based on: http://arxiv.org/abs/1409.2329. We add forget_bias (default: 1) to the biases of the forget gate in order to reduce the scale of forgetting in the beginning of the training. It does not allow cell clipping, a projection layer, and does not use peep-hole connections: it is the basic baseline. For advanced models, please use the full `tf.compat.v1.nn.rnn_cell.LSTMCell` that follows. Note that this cell is not optimized for performance. Please use `tf.contrib.cudnn_rnn.CudnnLSTM` for better performance on GPU, or `tf.contrib.rnn.LSTMBlockCell` and `tf.contrib.rnn.LSTMBlockFusedCell` for better performance on CPU. """ @deprecated(None, "This class is equivalent as tf.keras.layers.LSTMCell," " and will be replaced by that in Tensorflow 2.0.") def __init__(self, num_units, forget_bias=1.0, state_is_tuple=True, activation=None, reuse=None, name=None, dtype=None, **kwargs): """Initialize the basic LSTM cell. Args: num_units: int, The number of units in the LSTM cell. forget_bias: float, The bias added to forget gates (see above). Must set to `0.0` manually when restoring from CudnnLSTM-trained checkpoints. state_is_tuple: If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`. If False, they are concatenated along the column axis. The latter behavior will soon be deprecated. activation: Activation function of the inner states. Default: `tanh`. It could also be string that is within Keras activation function names. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. name: String, the name of the layer. Layers with the same name will share weights, but to avoid mistakes we require reuse=True in such cases. dtype: Default dtype of the layer (default of `None` means use the type of the first input). Required when `build` is called before `call`. **kwargs: Dict, keyword named properties for common layer attributes, like `trainable` etc when constructing the cell from configs of get_config(). When restoring from CudnnLSTM-trained checkpoints, must use `CudnnCompatibleLSTMCell` instead. """ super(BasicLSTMCell, self).__init__( _reuse=reuse, name=name, dtype=dtype, **kwargs) _check_supported_dtypes(self.dtype) if not state_is_tuple: logging.warn( "%s: Using a concatenated state is slower and will soon be " "deprecated. Use state_is_tuple=True.", self) if context.executing_eagerly() and context.num_gpus() > 0: logging.warn( "%s: Note that this cell is not optimized for performance. " "Please use tf.contrib.cudnn_rnn.CudnnLSTM for better " "performance on GPU.", self) # Inputs must be 2-dimensional. self.input_spec = input_spec.InputSpec(ndim=2) self._num_units = num_units self._forget_bias = forget_bias self._state_is_tuple = state_is_tuple if activation: self._activation = activations.get(activation) else: self._activation = math_ops.tanh @property def state_size(self): return (LSTMStateTuple(self._num_units, self._num_units) if self._state_is_tuple else 2 * self._num_units) @property def output_size(self): return self._num_units @tf_utils.shape_type_conversion def build(self, inputs_shape): if inputs_shape[-1] is None: raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s" % str(inputs_shape)) _check_supported_dtypes(self.dtype) input_depth = inputs_shape[-1] h_depth = self._num_units self._kernel = self.add_variable( _WEIGHTS_VARIABLE_NAME, shape=[input_depth + h_depth, 4 * self._num_units]) self._bias = self.add_variable( _BIAS_VARIABLE_NAME, shape=[4 * self._num_units], initializer=init_ops.zeros_initializer(dtype=self.dtype)) self.built = True def call(self, inputs, state): """Long short-term memory cell (LSTM). Args: inputs: `2-D` tensor with shape `[batch_size, input_size]`. state: An `LSTMStateTuple` of state tensors, each shaped `[batch_size, num_units]`, if `state_is_tuple` has been set to `True`. Otherwise, a `Tensor` shaped `[batch_size, 2 * num_units]`. Returns: A pair containing the new hidden state, and the new state (either a `LSTMStateTuple` or a concatenated state, depending on `state_is_tuple`). """ _check_rnn_cell_input_dtypes([inputs, state]) sigmoid = math_ops.sigmoid one = constant_op.constant(1, dtype=dtypes.int32) # Parameters of gates are concatenated into one multiply for efficiency. if self._state_is_tuple: c, h = state else: c, h = array_ops.split(value=state, num_or_size_splits=2, axis=one) gate_inputs = math_ops.matmul( array_ops.concat([inputs, h], 1), self._kernel) gate_inputs = nn_ops.bias_add(gate_inputs, self._bias) # i = input_gate, j = new_input, f = forget_gate, o = output_gate i, j, f, o = array_ops.split( value=gate_inputs, num_or_size_splits=4, axis=one) forget_bias_tensor = constant_op.constant(self._forget_bias, dtype=f.dtype) # Note that using `add` and `multiply` instead of `+` and `*` gives a # performance improvement. So using those at the cost of readability. add = math_ops.add multiply = math_ops.multiply new_c = add( multiply(c, sigmoid(add(f, forget_bias_tensor))), multiply(sigmoid(i), self._activation(j))) new_h = multiply(self._activation(new_c), sigmoid(o)) if self._state_is_tuple: new_state = LSTMStateTuple(new_c, new_h) else: new_state = array_ops.concat([new_c, new_h], 1) return new_h, new_state def get_config(self): config = { "num_units": self._num_units, "forget_bias": self._forget_bias, "state_is_tuple": self._state_is_tuple, "activation": activations.serialize(self._activation), "reuse": self._reuse, } base_config = super(BasicLSTMCell, self).get_config() return dict(list(base_config.items()) + list(config.items())) @tf_export(v1=["nn.rnn_cell.LSTMCell"]) class LSTMCell(LayerRNNCell): """Long short-term memory unit (LSTM) recurrent network cell. The default non-peephole implementation is based on: https://pdfs.semanticscholar.org/1154/0131eae85b2e11d53df7f1360eeb6476e7f4.pdf Felix Gers, Jurgen Schmidhuber, and Fred Cummins. "Learning to forget: Continual prediction with LSTM." IET, 850-855, 1999. The peephole implementation is based on: https://research.google.com/pubs/archive/43905.pdf Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory recurrent neural network architectures for large scale acoustic modeling." INTERSPEECH, 2014. The class uses optional peep-hole connections, optional cell clipping, and an optional projection layer. Note that this cell is not optimized for performance. Please use `tf.contrib.cudnn_rnn.CudnnLSTM` for better performance on GPU, or `tf.contrib.rnn.LSTMBlockCell` and `tf.contrib.rnn.LSTMBlockFusedCell` for better performance on CPU. """ @deprecated(None, "This class is equivalent as tf.keras.layers.LSTMCell," " and will be replaced by that in Tensorflow 2.0.") def __init__(self, num_units, use_peepholes=False, cell_clip=None, initializer=None, num_proj=None, proj_clip=None, num_unit_shards=None, num_proj_shards=None, forget_bias=1.0, state_is_tuple=True, activation=None, reuse=None, name=None, dtype=None, **kwargs): """Initialize the parameters for an LSTM cell. Args: num_units: int, The number of units in the LSTM cell. use_peepholes: bool, set True to enable diagonal/peephole connections. cell_clip: (optional) A float value, if provided the cell state is clipped by this value prior to the cell output activation. initializer: (optional) The initializer to use for the weight and projection matrices. num_proj: (optional) int, The output dimensionality for the projection matrices. If None, no projection is performed. proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is provided, then the projected values are clipped elementwise to within `[-proj_clip, proj_clip]`. num_unit_shards: Deprecated, will be removed by Jan. 2017. Use a variable_scope partitioner instead. num_proj_shards: Deprecated, will be removed by Jan. 2017. Use a variable_scope partitioner instead. forget_bias: Biases of the forget gate are initialized by default to 1 in order to reduce the scale of forgetting at the beginning of the training. Must set it manually to `0.0` when restoring from CudnnLSTM trained checkpoints. state_is_tuple: If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`. If False, they are concatenated along the column axis. This latter behavior will soon be deprecated. activation: Activation function of the inner states. Default: `tanh`. It could also be string that is within Keras activation function names. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. name: String, the name of the layer. Layers with the same name will share weights, but to avoid mistakes we require reuse=True in such cases. dtype: Default dtype of the layer (default of `None` means use the type of the first input). Required when `build` is called before `call`. **kwargs: Dict, keyword named properties for common layer attributes, like `trainable` etc when constructing the cell from configs of get_config(). When restoring from CudnnLSTM-trained checkpoints, use `CudnnCompatibleLSTMCell` instead. """ super(LSTMCell, self).__init__( _reuse=reuse, name=name, dtype=dtype, **kwargs) _check_supported_dtypes(self.dtype) if not state_is_tuple: logging.warn( "%s: Using a concatenated state is slower and will soon be " "deprecated. Use state_is_tuple=True.", self) if num_unit_shards is not None or num_proj_shards is not None: logging.warn( "%s: The num_unit_shards and proj_unit_shards parameters are " "deprecated and will be removed in Jan 2017. " "Use a variable scope with a partitioner instead.", self) if context.executing_eagerly() and context.num_gpus() > 0: logging.warn( "%s: Note that this cell is not optimized for performance. " "Please use tf.contrib.cudnn_rnn.CudnnLSTM for better " "performance on GPU.", self) # Inputs must be 2-dimensional. self.input_spec = input_spec.InputSpec(ndim=2) self._num_units = num_units self._use_peepholes = use_peepholes self._cell_clip = cell_clip self._initializer = initializers.get(initializer) self._num_proj = num_proj self._proj_clip = proj_clip self._num_unit_shards = num_unit_shards self._num_proj_shards = num_proj_shards self._forget_bias = forget_bias self._state_is_tuple = state_is_tuple if activation: self._activation = activations.get(activation) else: self._activation = math_ops.tanh if num_proj: self._state_size = ( LSTMStateTuple(num_units, num_proj) if state_is_tuple else num_units + num_proj) self._output_size = num_proj else: self._state_size = ( LSTMStateTuple(num_units, num_units) if state_is_tuple else 2 * num_units) self._output_size = num_units @property def state_size(self): return self._state_size @property def output_size(self): return self._output_size @tf_utils.shape_type_conversion def build(self, inputs_shape): if inputs_shape[-1] is None: raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s" % str(inputs_shape)) _check_supported_dtypes(self.dtype) input_depth = inputs_shape[-1] h_depth = self._num_units if self._num_proj is None else self._num_proj maybe_partitioner = ( partitioned_variables.fixed_size_partitioner(self._num_unit_shards) if self._num_unit_shards is not None else None) self._kernel = self.add_variable( _WEIGHTS_VARIABLE_NAME, shape=[input_depth + h_depth, 4 * self._num_units], initializer=self._initializer, partitioner=maybe_partitioner) if self.dtype is None: initializer = init_ops.zeros_initializer else: initializer = init_ops.zeros_initializer(dtype=self.dtype) self._bias = self.add_variable( _BIAS_VARIABLE_NAME, shape=[4 * self._num_units], initializer=initializer) if self._use_peepholes: self._w_f_diag = self.add_variable( "w_f_diag", shape=[self._num_units], initializer=self._initializer) self._w_i_diag = self.add_variable( "w_i_diag", shape=[self._num_units], initializer=self._initializer) self._w_o_diag = self.add_variable( "w_o_diag", shape=[self._num_units], initializer=self._initializer) if self._num_proj is not None: maybe_proj_partitioner = ( partitioned_variables.fixed_size_partitioner(self._num_proj_shards) if self._num_proj_shards is not None else None) self._proj_kernel = self.add_variable( "projection/%s" % _WEIGHTS_VARIABLE_NAME, shape=[self._num_units, self._num_proj], initializer=self._initializer, partitioner=maybe_proj_partitioner) self.built = True def call(self, inputs, state): """Run one step of LSTM. Args: inputs: input Tensor, must be 2-D, `[batch, input_size]`. state: if `state_is_tuple` is False, this must be a state Tensor, `2-D, [batch, state_size]`. If `state_is_tuple` is True, this must be a tuple of state Tensors, both `2-D`, with column sizes `c_state` and `m_state`. Returns: A tuple containing: - A `2-D, [batch, output_dim]`, Tensor representing the output of the LSTM after reading `inputs` when previous state was `state`. Here output_dim is: num_proj if num_proj was set, num_units otherwise. - Tensor(s) representing the new state of LSTM after reading `inputs` when the previous state was `state`. Same type and shape(s) as `state`. Raises: ValueError: If input size cannot be inferred from inputs via static shape inference. """ _check_rnn_cell_input_dtypes([inputs, state]) num_proj = self._num_units if self._num_proj is None else self._num_proj sigmoid = math_ops.sigmoid if self._state_is_tuple: (c_prev, m_prev) = state else: c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units]) m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj]) input_size = inputs.get_shape().with_rank(2).dims[1].value if input_size is None: raise ValueError("Could not infer input size from inputs.get_shape()[-1]") # i = input_gate, j = new_input, f = forget_gate, o = output_gate lstm_matrix = math_ops.matmul( array_ops.concat([inputs, m_prev], 1), self._kernel) lstm_matrix = nn_ops.bias_add(lstm_matrix, self._bias) i, j, f, o = array_ops.split( value=lstm_matrix, num_or_size_splits=4, axis=1) # Diagonal connections if self._use_peepholes: c = ( sigmoid(f + self._forget_bias + self._w_f_diag * c_prev) * c_prev + sigmoid(i + self._w_i_diag * c_prev) * self._activation(j)) else: c = ( sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) * self._activation(j)) if self._cell_clip is not None: # pylint: disable=invalid-unary-operand-type c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip) # pylint: enable=invalid-unary-operand-type if self._use_peepholes: m = sigmoid(o + self._w_o_diag * c) * self._activation(c) else: m = sigmoid(o) * self._activation(c) if self._num_proj is not None: m = math_ops.matmul(m, self._proj_kernel) if self._proj_clip is not None: # pylint: disable=invalid-unary-operand-type m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip) # pylint: enable=invalid-unary-operand-type new_state = ( LSTMStateTuple(c, m) if self._state_is_tuple else array_ops.concat([c, m], 1)) return m, new_state def get_config(self): config = { "num_units": self._num_units, "use_peepholes": self._use_peepholes, "cell_clip": self._cell_clip, "initializer": initializers.serialize(self._initializer), "num_proj": self._num_proj, "proj_clip": self._proj_clip, "num_unit_shards": self._num_unit_shards, "num_proj_shards": self._num_proj_shards, "forget_bias": self._forget_bias, "state_is_tuple": self._state_is_tuple, "activation": activations.serialize(self._activation), "reuse": self._reuse, } base_config = super(LSTMCell, self).get_config() return dict(list(base_config.items()) + list(config.items())) class _RNNCellWrapperV1(RNNCell): """Base class for cells wrappers V1 compatibility. This class along with `_RNNCellWrapperV2` allows to define cells wrappers that are compatible with V1 and V2, and defines helper methods for this purpose. """ def __init__(self, cell, *args, **kwargs): super(_RNNCellWrapperV1, self).__init__(*args, **kwargs) assert_like_rnncell("cell", cell) self.cell = cell if isinstance(cell, trackable.Trackable): self._track_trackable(self.cell, name="cell") def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs): """Calls the wrapped cell and performs the wrapping logic. This method is called from the wrapper's `call` or `__call__` methods. Args: inputs: A tensor with wrapped cell's input. state: A tensor or tuple of tensors with wrapped cell's state. cell_call_fn: Wrapped cell's method to use for step computation (cell's `__call__` or 'call' method). **kwargs: Additional arguments. Returns: A pair containing: - Output: A tensor with cell's output. - New state: A tensor or tuple of tensors with new wrapped cell's state. """ raise NotImplementedError def __call__(self, inputs, state, scope=None): """Runs the RNN cell step computation. We assume that the wrapped RNNCell is being built within its `__call__` method. We directly use the wrapped cell's `__call__` in the overridden wrapper `__call__` method. This allows to use the wrapped cell and the non-wrapped cell equivalently when using `__call__`. Args: inputs: A tensor with wrapped cell's input. state: A tensor or tuple of tensors with wrapped cell's state. scope: VariableScope for the subgraph created in the wrapped cells' `__call__`. Returns: A pair containing: - Output: A tensor with cell's output. - New state: A tensor or tuple of tensors with new wrapped cell's state. """ return self._call_wrapped_cell( inputs, state, cell_call_fn=self.cell.__call__, scope=scope) def get_config(self): config = { "cell": { "class_name": self.cell.__class__.__name__, "config": self.cell.get_config() }, } base_config = super(_RNNCellWrapperV1, self).get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config, custom_objects=None): config = config.copy() cell = config.pop("cell") try: assert_like_rnncell("cell", cell) return cls(cell, **config) except TypeError: raise ValueError("RNNCellWrapper cannot reconstruct the wrapped cell. " "Please overwrite the cell in the config with a RNNCell " "instance.") @tf_export(v1=["nn.rnn_cell.DropoutWrapper"]) class DropoutWrapper(rnn_cell_wrapper_impl.DropoutWrapperBase, _RNNCellWrapperV1): """Operator adding dropout to inputs and outputs of the given cell.""" def __init__(self, *args, **kwargs): # pylint: disable=useless-super-delegation super(DropoutWrapper, self).__init__(*args, **kwargs) __init__.__doc__ = rnn_cell_wrapper_impl.DropoutWrapperBase.__init__.__doc__ @tf_export(v1=["nn.rnn_cell.ResidualWrapper"]) class ResidualWrapper(rnn_cell_wrapper_impl.ResidualWrapperBase, _RNNCellWrapperV1): """RNNCell wrapper that ensures cell inputs are added to the outputs.""" def __init__(self, *args, **kwargs): # pylint: disable=useless-super-delegation super(ResidualWrapper, self).__init__(*args, **kwargs) __init__.__doc__ = rnn_cell_wrapper_impl.ResidualWrapperBase.__init__.__doc__ @tf_export(v1=["nn.rnn_cell.DeviceWrapper"]) class DeviceWrapper(rnn_cell_wrapper_impl.DeviceWrapperBase, _RNNCellWrapperV1): def __init__(self, *args, **kwargs): # pylint: disable=useless-super-delegation super(DeviceWrapper, self).__init__(*args, **kwargs) __init__.__doc__ = rnn_cell_wrapper_impl.DeviceWrapperBase.__init__.__doc__ @tf_export(v1=["nn.rnn_cell.MultiRNNCell"]) class MultiRNNCell(RNNCell): """RNN cell composed sequentially of multiple simple cells. Example: ```python num_units = [128, 64] cells = [BasicLSTMCell(num_units=n) for n in num_units] stacked_rnn_cell = MultiRNNCell(cells) ``` """ @deprecated(None, "This class is equivalent as " "tf.keras.layers.StackedRNNCells, and will be replaced by " "that in Tensorflow 2.0.") def __init__(self, cells, state_is_tuple=True): """Create a RNN cell composed sequentially of a number of RNNCells. Args: cells: list of RNNCells that will be composed in this order. state_is_tuple: If True, accepted and returned states are n-tuples, where `n = len(cells)`. If False, the states are all concatenated along the column axis. This latter behavior will soon be deprecated. Raises: ValueError: if cells is empty (not allowed), or at least one of the cells returns a state tuple but the flag `state_is_tuple` is `False`. """ super(MultiRNNCell, self).__init__() if not cells: raise ValueError("Must specify at least one cell for MultiRNNCell.") if not nest.is_sequence(cells): raise TypeError("cells must be a list or tuple, but saw: %s." % cells) if len(set([id(cell) for cell in cells])) < len(cells): logging.log_first_n( logging.WARN, "At least two cells provided to MultiRNNCell " "are the same object and will share weights.", 1) self._cells = cells for cell_number, cell in enumerate(self._cells): # Add Trackable dependencies on these cells so their variables get # saved with this object when using object-based saving. if isinstance(cell, trackable.Trackable): # TODO(allenl): Track down non-Trackable callers. self._track_trackable(cell, name="cell-%d" % (cell_number,)) self._state_is_tuple = state_is_tuple if not state_is_tuple: if any(nest.is_sequence(c.state_size) for c in self._cells): raise ValueError("Some cells return tuples of states, but the flag " "state_is_tuple is not set. State sizes are: %s" % str([c.state_size for c in self._cells])) @property def state_size(self): if self._state_is_tuple: return tuple(cell.state_size for cell in self._cells) else: return sum(cell.state_size for cell in self._cells) @property def output_size(self): return self._cells[-1].output_size def zero_state(self, batch_size, dtype): with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]): if self._state_is_tuple: return tuple(cell.zero_state(batch_size, dtype) for cell in self._cells) else: # We know here that state_size of each cell is not a tuple and # presumably does not contain TensorArrays or anything else fancy return super(MultiRNNCell, self).zero_state(batch_size, dtype) @property def trainable_weights(self): if not self.trainable: return [] weights = [] for cell in self._cells: if isinstance(cell, base_layer.Layer): weights += cell.trainable_weights return weights @property def non_trainable_weights(self): weights = [] for cell in self._cells: if isinstance(cell, base_layer.Layer): weights += cell.non_trainable_weights if not self.trainable: trainable_weights = [] for cell in self._cells: if isinstance(cell, base_layer.Layer): trainable_weights += cell.trainable_weights return trainable_weights + weights return weights def call(self, inputs, state): """Run this multi-layer cell on inputs, starting from state.""" cur_state_pos = 0 cur_inp = inputs new_states = [] for i, cell in enumerate(self._cells): with vs.variable_scope("cell_%d" % i): if self._state_is_tuple: if not nest.is_sequence(state): raise ValueError( "Expected state to be a tuple of length %d, but received: %s" % (len(self.state_size), state)) cur_state = state[i] else: cur_state = array_ops.slice(state, [0, cur_state_pos], [-1, cell.state_size]) cur_state_pos += cell.state_size cur_inp, new_state = cell(cur_inp, cur_state) new_states.append(new_state) new_states = ( tuple(new_states) if self._state_is_tuple else array_ops.concat( new_states, 1)) return cur_inp, new_states def _check_rnn_cell_input_dtypes(inputs): """Check whether the input tensors are with supported dtypes. Default RNN cells only support floats and complex as its dtypes since the activation function (tanh and sigmoid) only allow those types. This function will throw a proper error message if the inputs is not in a supported type. Args: inputs: tensor or nested structure of tensors that are feed to RNN cell as input or state. Raises: ValueError: if any of the input tensor are not having dtypes of float or complex. """ for t in nest.flatten(inputs): _check_supported_dtypes(t.dtype) def _check_supported_dtypes(dtype): if dtype is None: return dtype = dtypes.as_dtype(dtype) if not (dtype.is_floating or dtype.is_complex): raise ValueError("RNN cell only supports floating point inputs, " "but saw dtype: %s" % dtype)
tensorflow-master
tensorflow/python/ops/rnn_cell_impl.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Weight broadcasting operations. In `tf.losses` and `tf.metrics`, we support limited weight broadcasting. This file includes operations for those broadcasting rules. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import sets def _has_valid_dims(weights_shape, values_shape): with ops.name_scope( None, "has_invalid_dims", (weights_shape, values_shape)) as scope: values_shape_2d = array_ops.expand_dims(values_shape, -1) valid_dims = array_ops.concat( (values_shape_2d, array_ops.ones_like(values_shape_2d)), axis=1) weights_shape_2d = array_ops.expand_dims(weights_shape, -1) invalid_dims = sets.set_difference(weights_shape_2d, valid_dims) num_invalid_dims = array_ops.size( invalid_dims.values, name="num_invalid_dims") return math_ops.equal(0, num_invalid_dims, name=scope) def _has_valid_nonscalar_shape( weights_rank, weights_shape, values_rank, values_shape): with ops.name_scope( None, "has_valid_nonscalar_shape", (weights_rank, weights_shape, values_rank, values_shape)) as scope: is_same_rank = math_ops.equal( values_rank, weights_rank, name="is_same_rank") return control_flow_ops.cond( is_same_rank, lambda: _has_valid_dims(weights_shape, values_shape), lambda: is_same_rank, name=scope) _ASSERT_BROADCASTABLE_ERROR_PREFIX = "weights can not be broadcast to values." def assert_broadcastable(weights, values): """Asserts `weights` can be broadcast to `values`. In `tf.losses` and `tf.metrics`, we support limited weight broadcasting. We let weights be either scalar, or the same rank as the target values, with each dimension either 1, or the same as the corresponding values dimension. Args: weights: `Tensor` of weights. values: `Tensor` of values to which weights are applied. Returns: `Operation` raising `InvalidArgumentError` if `weights` has incorrect shape. `no_op` if static checks determine `weights` has correct shape. Raises: ValueError: If static checks determine `weights` has incorrect shape. """ with ops.name_scope(None, "assert_broadcastable", (weights, values)) as scope: with ops.name_scope(None, "weights", (weights,)) as weights_scope: weights = ops.convert_to_tensor(weights, name=weights_scope) weights_shape = array_ops.shape(weights, name="shape") weights_rank = array_ops.rank(weights, name="rank") weights_rank_static = tensor_util.constant_value(weights_rank) with ops.name_scope(None, "values", (values,)) as values_scope: values = ops.convert_to_tensor(values, name=values_scope) values_shape = array_ops.shape(values, name="shape") values_rank = array_ops.rank(values, name="rank") values_rank_static = tensor_util.constant_value(values_rank) # Try static checks. if weights_rank_static is not None and values_rank_static is not None: if weights_rank_static == 0: return control_flow_ops.no_op(name="static_scalar_check_success") if weights_rank_static != values_rank_static: raise ValueError( "%s values.rank=%s. weights.rank=%s." " values.shape=%s. weights.shape=%s." % ( _ASSERT_BROADCASTABLE_ERROR_PREFIX, values_rank_static, weights_rank_static, values.shape, weights.shape)) weights_shape_static = tensor_util.constant_value(weights_shape) values_shape_static = tensor_util.constant_value(values_shape) if weights_shape_static is not None and values_shape_static is not None: # Sanity check, this should always be true since we checked rank above. ndims = len(values_shape_static) assert ndims == len(weights_shape_static) for i in range(ndims): if weights_shape_static[i] not in (1, values_shape_static[i]): raise ValueError( "%s Mismatch at dim %s. values.shape=%s weights.shape=%s." % ( _ASSERT_BROADCASTABLE_ERROR_PREFIX, i, values_shape_static, weights_shape_static)) return control_flow_ops.no_op(name="static_dims_check_success") # Dynamic checks. is_scalar = math_ops.equal(0, weights_rank, name="is_scalar") data = ( _ASSERT_BROADCASTABLE_ERROR_PREFIX, "weights.shape=", weights.name, weights_shape, "values.shape=", values.name, values_shape, "is_scalar=", is_scalar, ) is_valid_shape = control_flow_ops.cond( is_scalar, lambda: is_scalar, lambda: _has_valid_nonscalar_shape( # pylint: disable=g-long-lambda weights_rank, weights_shape, values_rank, values_shape), name="is_valid_shape") return control_flow_ops.Assert(is_valid_shape, data, name=scope) def broadcast_weights(weights, values): """Broadcast `weights` to the same shape as `values`. This returns a version of `weights` following the same broadcast rules as `mul(weights, values)`, but limited to the weights shapes allowed by `assert_broadcastable`. When computing a weighted average, use this function to broadcast `weights` before summing them; e.g., `reduce_sum(w * v) / reduce_sum(_broadcast_weights(w, v))`. Args: weights: `Tensor` whose shape is broadcastable to `values` according to the rules of `assert_broadcastable`. values: `Tensor` of any shape. Returns: `weights` broadcast to `values` shape according to the rules of `assert_broadcastable`. """ with ops.name_scope(None, "broadcast_weights", (weights, values)) as scope: values = ops.convert_to_tensor(values, name="values") weights = ops.convert_to_tensor( weights, dtype=values.dtype.base_dtype, name="weights") # Try static check for exact match. weights_shape = weights.get_shape() values_shape = values.get_shape() if (weights_shape.is_fully_defined() and values_shape.is_fully_defined() and weights_shape.is_compatible_with(values_shape)): return weights with ops.control_dependencies((assert_broadcastable(weights, values),)): return math_ops.multiply( weights, array_ops.ones_like(values), name=scope)
tensorflow-master
tensorflow/python/ops/weights_broadcast_ops.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tensorflow set operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.sets_impl import * # pylint: enable=wildcard-import
tensorflow-master
tensorflow/python/ops/sets.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Operations for generating random numbers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_random_ops from tensorflow.python.ops import math_ops # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.gen_random_ops import * from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export # pylint: enable=wildcard-import def _ShapeTensor(shape): """Convert to an int32 or int64 tensor, defaulting to int32 if empty.""" if isinstance(shape, (tuple, list)) and not shape: dtype = dtypes.int32 else: dtype = None return ops.convert_to_tensor(shape, dtype=dtype, name="shape") @tf_export("random.normal", v1=["random.normal", "random_normal"]) @deprecation.deprecated_endpoints("random_normal") def random_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, seed=None, name=None): """Outputs random values from a normal distribution. Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. mean: A 0-D Tensor or Python value of type `dtype`. The mean of the normal distribution. stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation of the normal distribution. dtype: The type of the output. seed: A Python integer. Used to create a random seed for the distribution. See `tf.compat.v1.set_random_seed` for behavior. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random normal values. """ with ops.name_scope(name, "random_normal", [shape, mean, stddev]) as name: shape_tensor = _ShapeTensor(shape) mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean") stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev") seed1, seed2 = random_seed.get_seed(seed) rnd = gen_random_ops.random_standard_normal( shape_tensor, dtype, seed=seed1, seed2=seed2) mul = rnd * stddev_tensor value = math_ops.add(mul, mean_tensor, name=name) return value ops.NotDifferentiable("RandomStandardNormal") def parameterized_truncated_normal(shape, means=0.0, stddevs=1.0, minvals=-2.0, maxvals=2.0, dtype=dtypes.float32, seed=None, name=None): """Outputs random values from a truncated normal distribution. The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked. Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. means: A 0-D Tensor or Python value of type `dtype`. The mean of the truncated normal distribution. stddevs: A 0-D Tensor or Python value of type `dtype`. The standard deviation of the truncated normal distribution. minvals: A 0-D Tensor or Python value of type `dtype`. The minimum value of the truncated normal distribution. maxvals: A 0-D Tensor or Python value of type `dtype`. The maximum value of the truncated normal distribution. dtype: The type of the output. seed: A Python integer. Used to create a random seed for the distribution. See `tf.compat.v1.set_random_seed` for behavior. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random truncated normal values. """ with ops.name_scope(name, "parameterized_truncated_normal", [shape, means, stddevs, minvals, maxvals]) as name: shape_tensor = _ShapeTensor(shape) means_tensor = ops.convert_to_tensor(means, dtype=dtype, name="means") stddevs_tensor = ops.convert_to_tensor(stddevs, dtype=dtype, name="stddevs") minvals_tensor = ops.convert_to_tensor(minvals, dtype=dtype, name="minvals") maxvals_tensor = ops.convert_to_tensor(maxvals, dtype=dtype, name="maxvals") seed1, seed2 = random_seed.get_seed(seed) rnd = gen_random_ops.parameterized_truncated_normal( shape_tensor, means_tensor, stddevs_tensor, minvals_tensor, maxvals_tensor, seed=seed1, seed2=seed2) return rnd @tf_export("random.truncated_normal", v1=["random.truncated_normal", "truncated_normal"]) @deprecation.deprecated_endpoints("truncated_normal") def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, seed=None, name=None): """Outputs random values from a truncated normal distribution. The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked. Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. mean: A 0-D Tensor or Python value of type `dtype`. The mean of the truncated normal distribution. stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation of the normal distribution, before truncation. dtype: The type of the output. seed: A Python integer. Used to create a random seed for the distribution. See `tf.compat.v1.set_random_seed` for behavior. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random truncated normal values. """ with ops.name_scope(name, "truncated_normal", [shape, mean, stddev]) as name: shape_tensor = _ShapeTensor(shape) mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean") stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev") seed1, seed2 = random_seed.get_seed(seed) rnd = gen_random_ops.truncated_normal( shape_tensor, dtype, seed=seed1, seed2=seed2) mul = rnd * stddev_tensor value = math_ops.add(mul, mean_tensor, name=name) return value ops.NotDifferentiable("ParameterizedTruncatedNormal") ops.NotDifferentiable("TruncatedNormal") @tf_export("random.uniform", v1=["random.uniform", "random_uniform"]) @deprecation.deprecated_endpoints("random_uniform") def random_uniform(shape, minval=0, maxval=None, dtype=dtypes.float32, seed=None, name=None): """Outputs random values from a uniform distribution. The generated values follow a uniform distribution in the range `[minval, maxval)`. The lower bound `minval` is included in the range, while the upper bound `maxval` is excluded. For floats, the default range is `[0, 1)`. For ints, at least `maxval` must be specified explicitly. In the integer case, the random integers are slightly biased unless `maxval - minval` is an exact power of two. The bias is small for values of `maxval - minval` significantly smaller than the range of the output (either `2**32` or `2**64`). Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. minval: A 0-D Tensor or Python value of type `dtype`. The lower bound on the range of random values to generate. Defaults to 0. maxval: A 0-D Tensor or Python value of type `dtype`. The upper bound on the range of random values to generate. Defaults to 1 if `dtype` is floating point. dtype: The type of the output: `float16`, `float32`, `float64`, `int32`, or `int64`. seed: A Python integer. Used to create a random seed for the distribution. See `tf.compat.v1.set_random_seed` for behavior. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random uniform values. Raises: ValueError: If `dtype` is integral and `maxval` is not specified. """ dtype = dtypes.as_dtype(dtype) if dtype not in (dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64): raise ValueError("Invalid dtype %r" % dtype) if maxval is None: if dtype.is_integer: raise ValueError("Must specify maxval for integer dtype %r" % dtype) maxval = 1 with ops.name_scope(name, "random_uniform", [shape, minval, maxval]) as name: shape = _ShapeTensor(shape) minval = ops.convert_to_tensor(minval, dtype=dtype, name="min") maxval = ops.convert_to_tensor(maxval, dtype=dtype, name="max") seed1, seed2 = random_seed.get_seed(seed) if dtype.is_integer: return gen_random_ops.random_uniform_int( shape, minval, maxval, seed=seed1, seed2=seed2, name=name) else: rnd = gen_random_ops.random_uniform(shape, dtype, seed=seed1, seed2=seed2) return math_ops.add(rnd * (maxval - minval), minval, name=name) ops.NotDifferentiable("RandomUniform") @tf_export("random.shuffle", v1=["random.shuffle", "random_shuffle"]) @deprecation.deprecated_endpoints("random_shuffle") def random_shuffle(value, seed=None, name=None): """Randomly shuffles a tensor along its first dimension. The tensor is shuffled along dimension 0, such that each `value[j]` is mapped to one and only one `output[i]`. For example, a mapping that might occur for a 3x2 tensor is: ```python [[1, 2], [[5, 6], [3, 4], ==> [1, 2], [5, 6]] [3, 4]] ``` Args: value: A Tensor to be shuffled. seed: A Python integer. Used to create a random seed for the distribution. See `tf.compat.v1.set_random_seed` for behavior. name: A name for the operation (optional). Returns: A tensor of same shape and type as `value`, shuffled along its first dimension. """ seed1, seed2 = random_seed.get_seed(seed) return gen_random_ops.random_shuffle( value, seed=seed1, seed2=seed2, name=name) @tf_export("image.random_crop", v1=["image.random_crop", "random_crop"]) @deprecation.deprecated_endpoints("random_crop") def random_crop(value, size, seed=None, name=None): """Randomly crops a tensor to a given size. Slices a shape `size` portion out of `value` at a uniformly chosen offset. Requires `value.shape >= size`. If a dimension should not be cropped, pass the full size of that dimension. For example, RGB images can be cropped with `size = [crop_height, crop_width, 3]`. Args: value: Input tensor to crop. size: 1-D tensor with size the rank of `value`. seed: Python integer. Used to create a random seed. See `tf.compat.v1.set_random_seed` for behavior. name: A name for this operation (optional). Returns: A cropped tensor of the same rank as `value` and shape `size`. """ # TODO(shlens): Implement edge case to guarantee output size dimensions. # If size > value.shape, zero pad the result so that it always has shape # exactly size. with ops.name_scope(name, "random_crop", [value, size]) as name: value = ops.convert_to_tensor(value, name="value") size = ops.convert_to_tensor(size, dtype=dtypes.int32, name="size") shape = array_ops.shape(value) check = control_flow_ops.Assert( math_ops.reduce_all(shape >= size), ["Need value.shape >= size, got ", shape, size], summarize=1000) shape = control_flow_ops.with_dependencies([check], shape) limit = shape - size + 1 offset = random_uniform( array_ops.shape(shape), dtype=size.dtype, maxval=size.dtype.max, seed=seed) % limit return array_ops.slice(value, offset, size, name=name) @tf_export(v1=["random.multinomial", "multinomial"]) @deprecation.deprecated( date=None, instructions="Use `tf.random.categorical` instead.") def multinomial(logits, num_samples, seed=None, name=None, output_dtype=None): """Draws samples from a multinomial distribution. Example: ```python # samples has shape [1, 5], where each value is either 0 or 1 with equal # probability. samples = tf.random.categorical(tf.math.log([[10., 10.]]), 5) ``` Args: logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` represents the unnormalized log-probabilities for all classes. num_samples: 0-D. Number of independent samples to draw for each row slice. seed: A Python integer. Used to create a random seed for the distribution. See `tf.compat.v1.set_random_seed` for behavior. name: Optional name for the operation. output_dtype: integer type to use for the output. Defaults to int64. Returns: The drawn samples of shape `[batch_size, num_samples]`. """ with ops.name_scope(name, "multinomial", [logits]): return multinomial_categorical_impl(logits, num_samples, output_dtype, seed) @tf_export("random.categorical") def categorical(logits, num_samples, dtype=None, seed=None, name=None): """Draws samples from a categorical distribution. Example: ```python # samples has shape [1, 5], where each value is either 0 or 1 with equal # probability. samples = tf.random.categorical(tf.math.log([[10., 10.]]), 5) ``` Args: logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` represents the unnormalized log-probabilities for all classes. num_samples: 0-D. Number of independent samples to draw for each row slice. dtype: integer type to use for the output. Defaults to int64. seed: A Python integer. Used to create a random seed for the distribution. See `tf.compat.v1.set_random_seed` for behavior. name: Optional name for the operation. Returns: The drawn samples of shape `[batch_size, num_samples]`. """ with ops.name_scope(name, "categorical", [logits]): return multinomial_categorical_impl(logits, num_samples, dtype, seed) def multinomial_categorical_impl(logits, num_samples, dtype, seed): """Implementation for random.categorical (v1) and random.categorical (v2).""" logits = ops.convert_to_tensor(logits, name="logits") seed1, seed2 = random_seed.get_seed(seed) return gen_random_ops.multinomial( logits, num_samples, seed=seed1, seed2=seed2, output_dtype=dtype) ops.NotDifferentiable("Multinomial") @tf_export("random.gamma", v1=["random.gamma", "random_gamma"]) @deprecation.deprecated_endpoints("random_gamma") def random_gamma(shape, alpha, beta=None, dtype=dtypes.float32, seed=None, name=None): """Draws `shape` samples from each of the given Gamma distribution(s). `alpha` is the shape parameter describing the distribution(s), and `beta` is the inverse scale parameter(s). Note: Because internal calculations are done using `float64` and casting has `floor` semantics, we must manually map zero outcomes to the smallest possible positive floating-point value, i.e., `np.finfo(dtype).tiny`. This means that `np.finfo(dtype).tiny` occurs more frequently than it otherwise should. This bias can only happen for small values of `alpha`, i.e., `alpha << 1` or large values of `beta`, i.e., `beta >> 1`. The samples are differentiable w.r.t. alpha and beta. The derivatives are computed using the approach described in the paper [Michael Figurnov, Shakir Mohamed, Andriy Mnih. Implicit Reparameterization Gradients, 2018](https://arxiv.org/abs/1805.08498) Example: ```python samples = tf.random.gamma([10], [0.5, 1.5]) # samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents # the samples drawn from each distribution samples = tf.random.gamma([7, 5], [0.5, 1.5]) # samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1] # represents the 7x5 samples drawn from each of the two distributions alpha = tf.constant([[1.],[3.],[5.]]) beta = tf.constant([[3., 4.]]) samples = tf.random.gamma([30], alpha=alpha, beta=beta) # samples has shape [30, 3, 2], with 30 samples each of 3x2 distributions. loss = tf.reduce_mean(tf.square(samples)) dloss_dalpha, dloss_dbeta = tf.gradients(loss, [alpha, beta]) # unbiased stochastic derivatives of the loss function alpha.shape == dloss_dalpha.shape # True beta.shape == dloss_dbeta.shape # True ``` Args: shape: A 1-D integer Tensor or Python array. The shape of the output samples to be drawn per alpha/beta-parameterized distribution. alpha: A Tensor or Python value or N-D array of type `dtype`. `alpha` provides the shape parameter(s) describing the gamma distribution(s) to sample. Must be broadcastable with `beta`. beta: A Tensor or Python value or N-D array of type `dtype`. Defaults to 1. `beta` provides the inverse scale parameter(s) of the gamma distribution(s) to sample. Must be broadcastable with `alpha`. dtype: The type of alpha, beta, and the output: `float16`, `float32`, or `float64`. seed: A Python integer. Used to create a random seed for the distributions. See `tf.compat.v1.set_random_seed` for behavior. name: Optional name for the operation. Returns: samples: a `Tensor` of shape `tf.concat([shape, tf.shape(alpha + beta)], axis=0)` with values of type `dtype`. """ with ops.name_scope(name, "random_gamma", [shape, alpha, beta]): shape = ops.convert_to_tensor(shape, name="shape", dtype=dtypes.int32) alpha = ops.convert_to_tensor(alpha, name="alpha", dtype=dtype) beta = ops.convert_to_tensor( beta if beta is not None else 1, name="beta", dtype=dtype) alpha_broadcast = alpha + array_ops.zeros_like(beta) seed1, seed2 = random_seed.get_seed(seed) return math_ops.maximum( np.finfo(dtype.as_numpy_dtype).tiny, gen_random_ops.random_gamma( shape, alpha_broadcast, seed=seed1, seed2=seed2) / beta) @tf_export(v1=["random.poisson", "random_poisson"]) @deprecation.deprecated_endpoints("random_poisson") def random_poisson(lam, shape, dtype=dtypes.float32, seed=None, name=None): """Draws `shape` samples from each of the given Poisson distribution(s). `lam` is the rate parameter describing the distribution(s). Example: ```python samples = tf.random.poisson([0.5, 1.5], [10]) # samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents # the samples drawn from each distribution samples = tf.random.poisson([12.2, 3.3], [7, 5]) # samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1] # represents the 7x5 samples drawn from each of the two distributions ``` Args: lam: A Tensor or Python value or N-D array of type `dtype`. `lam` provides the rate parameter(s) describing the poisson distribution(s) to sample. shape: A 1-D integer Tensor or Python array. The shape of the output samples to be drawn per "rate"-parameterized distribution. dtype: The type of the output: `float16`, `float32`, `float64`, `int32` or `int64`. seed: A Python integer. Used to create a random seed for the distributions. See `tf.compat.v1.set_random_seed` for behavior. name: Optional name for the operation. Returns: samples: a `Tensor` of shape `tf.concat([shape, tf.shape(lam)], axis=0)` with values of type `dtype`. """ return random_poisson_v2(shape, lam, dtype, seed, name) @tf_export("random.poisson", v1=[]) def random_poisson_v2(shape, lam, dtype=dtypes.float32, seed=None, name=None): """Draws `shape` samples from each of the given Poisson distribution(s). `lam` is the rate parameter describing the distribution(s). Example: ```python samples = tf.random.poisson([10], [0.5, 1.5]) # samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents # the samples drawn from each distribution samples = tf.random.poisson([7, 5], [12.2, 3.3]) # samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1] # represents the 7x5 samples drawn from each of the two distributions ``` Args: shape: A 1-D integer Tensor or Python array. The shape of the output samples to be drawn per "rate"-parameterized distribution. lam: A Tensor or Python value or N-D array of type `dtype`. `lam` provides the rate parameter(s) describing the poisson distribution(s) to sample. dtype: The type of the output: `float16`, `float32`, `float64`, `int32` or `int64`. seed: A Python integer. Used to create a random seed for the distributions. See `tf.compat.v1.set_random_seed` for behavior. name: Optional name for the operation. Returns: samples: a `Tensor` of shape `tf.concat([shape, tf.shape(lam)], axis=0)` with values of type `dtype`. """ with ops.name_scope(name, "random_poisson", [lam, shape]): shape = ops.convert_to_tensor(shape, name="shape", dtype=dtypes.int32) seed1, seed2 = random_seed.get_seed(seed) return gen_random_ops.random_poisson_v2( shape, lam, dtype=dtype, seed=seed1, seed2=seed2)
tensorflow-master
tensorflow/python/ops/random_ops.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Python ops defined in math_grad.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.eager import execution_callbacks from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import gradient_checker_v2 from tensorflow.python.ops import gradients from tensorflow.python.ops import math_ops from tensorflow.python.platform import test RAISE = execution_callbacks.ExecutionCallback.RAISE class SquaredDifferenceOpTest(test.TestCase): def _testGrad(self, left_shape, right_shape): if len(left_shape) > len(right_shape): output_shape = left_shape else: output_shape = right_shape l = np.random.randn(*left_shape) r = np.random.randn(*right_shape) with self.cached_session(use_gpu=True): left_tensor = constant_op.constant(l, shape=left_shape) right_tensor = constant_op.constant(r, shape=right_shape) output = math_ops.squared_difference(left_tensor, right_tensor) left_err = gradient_checker.compute_gradient_error( left_tensor, left_shape, output, output_shape, x_init_value=l) right_err = gradient_checker.compute_gradient_error( right_tensor, right_shape, output, output_shape, x_init_value=r) self.assertLess(left_err, 1e-10) self.assertLess(right_err, 1e-10) @test_util.run_deprecated_v1 def testGrad(self): self._testGrad([1, 2, 3, 2], [3, 2]) self._testGrad([2, 4], [3, 2, 4]) class AbsOpTest(test.TestCase): def _biasedRandN(self, shape, bias=0.1, sigma=1.0): """Returns samples from a normal distribution shifted `bias` away from 0.""" value = np.random.randn(*shape) * sigma return value + np.sign(value) * bias def _testGrad(self, shape, dtype=None, max_error=None, bias=None, sigma=None): np.random.seed(7) if dtype in (dtypes.complex64, dtypes.complex128): value = math_ops.complex( self._biasedRandN( shape, bias=bias, sigma=sigma), self._biasedRandN( shape, bias=bias, sigma=sigma)) else: value = ops.convert_to_tensor( self._biasedRandN( shape, bias=bias), dtype=dtype) with self.cached_session(use_gpu=True): output = math_ops.abs(value) error = gradient_checker.compute_gradient_error( value, shape, output, output.get_shape().as_list()) self.assertLess(error, max_error) @test_util.run_deprecated_v1 def testComplexAbs(self): # Bias random test values away from zero to avoid numeric instabilities. self._testGrad( [3, 3], dtype=dtypes.float32, max_error=2e-5, bias=0.1, sigma=1.0) self._testGrad( [3, 3], dtype=dtypes.complex64, max_error=2e-5, bias=0.1, sigma=1.0) # Ensure stability near the pole at zero. self._testGrad( [3, 3], dtype=dtypes.float32, max_error=100.0, bias=0.0, sigma=0.1) self._testGrad( [3, 3], dtype=dtypes.complex64, max_error=100.0, bias=0.0, sigma=0.1) class MinOrMaxGradientTest(test.TestCase): @test_util.run_deprecated_v1 def testMinGradient(self): inputs = constant_op.constant([1.0], dtype=dtypes.float32) outputs = math_ops.reduce_min(array_ops.concat([inputs, inputs], 0)) with self.cached_session(): error = gradient_checker.compute_gradient_error(inputs, [1], outputs, []) self.assertLess(error, 1e-4) @test_util.run_deprecated_v1 def testMaxGradient(self): inputs = constant_op.constant([1.0], dtype=dtypes.float32) outputs = math_ops.reduce_max(array_ops.concat([inputs, inputs], 0)) with self.cached_session(): error = gradient_checker.compute_gradient_error(inputs, [1], outputs, []) self.assertLess(error, 1e-4) class MaximumOrMinimumGradientTest(test.TestCase): @test_util.run_deprecated_v1 def testMaximumGradient(self): inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0], dtype=dtypes.float32) outputs = math_ops.maximum(inputs, 3.0) with self.cached_session(): error = gradient_checker.compute_gradient_error(inputs, [4], outputs, [4]) self.assertLess(error, 1e-4) @test_util.run_deprecated_v1 def testMinimumGradient(self): inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0], dtype=dtypes.float32) outputs = math_ops.minimum(inputs, 2.0) with self.cached_session(): error = gradient_checker.compute_gradient_error(inputs, [4], outputs, [4]) self.assertLess(error, 1e-4) class ProdGradientTest(test.TestCase): @test_util.run_deprecated_v1 def testProdGradient(self): inputs = constant_op.constant([[1., 2.], [3., 4.]], dtype=dtypes.float32) outputs = math_ops.reduce_prod(inputs) with self.cached_session(): error = gradient_checker.compute_gradient_error( inputs, inputs.get_shape().as_list(), outputs, outputs.get_shape().as_list()) self.assertLess(error, 1e-4) @test_util.run_deprecated_v1 def testProdGradientForNegativeAxis(self): inputs = constant_op.constant([[1., 2.], [3., 4.]], dtype=dtypes.float32) outputs = math_ops.reduce_prod(inputs, -1) with self.cached_session(): error = gradient_checker.compute_gradient_error( inputs, inputs.get_shape().as_list(), outputs, outputs.get_shape().as_list()) self.assertLess(error, 1e-4) @test_util.run_deprecated_v1 def testProdGradientComplex(self): for dtype in dtypes.complex64, dtypes.complex128: inputs = constant_op.constant([[1 + 3j, 2 - 1j], [3j, 4]], dtype=dtype) outputs = math_ops.reduce_prod(inputs) with self.cached_session(): error = gradient_checker.compute_gradient_error( inputs, inputs.get_shape().as_list(), outputs, outputs.get_shape().as_list()) self.assertLess(error, 1e-4) @test_util.run_deprecated_v1 def testProdGradientForNegativeAxisComplex(self): for dtype in dtypes.complex64, dtypes.complex128: inputs = constant_op.constant([[1 + 3j, 2 - 1j], [3j, 4]], dtype=dtype) outputs = math_ops.reduce_prod(inputs, -1) with self.cached_session(): error = gradient_checker.compute_gradient_error( inputs, inputs.get_shape().as_list(), outputs, outputs.get_shape().as_list()) self.assertLess(error, 1e-4) class SegmentMinOrMaxGradientTest(test.TestCase): @test_util.run_deprecated_v1 def testSegmentMinGradient(self): data = constant_op.constant([1.0, 2.0, 3.0], dtype=dtypes.float32) segment_ids = constant_op.constant([0, 0, 1], dtype=dtypes.int64) segment_min = math_ops.segment_min(data, segment_ids) with self.cached_session(): error = gradient_checker.compute_gradient_error(data, [3], segment_min, [2]) self.assertLess(error, 1e-4) @test_util.run_deprecated_v1 def testSegmentMaxGradient(self): data = constant_op.constant([1.0, 2.0, 3.0], dtype=dtypes.float32) segment_ids = constant_op.constant([0, 0, 1], dtype=dtypes.int64) segment_max = math_ops.segment_max(data, segment_ids) with self.cached_session(): error = gradient_checker.compute_gradient_error(data, [3], segment_max, [2]) self.assertLess(error, 1e-4) @test_util.run_deprecated_v1 def testSegmentMinGradientWithTies(self): inputs = constant_op.constant([1.0], dtype=dtypes.float32) data = array_ops.concat([inputs, inputs], 0) segment_ids = constant_op.constant([0, 0], dtype=dtypes.int64) segment_min = math_ops.segment_min(data, segment_ids) with self.cached_session(): error = gradient_checker.compute_gradient_error(inputs, [1], segment_min, [1]) self.assertLess(error, 1e-4) @test_util.run_deprecated_v1 def testSegmentMaxGradientWithTies(self): inputs = constant_op.constant([1.0], dtype=dtypes.float32) data = array_ops.concat([inputs, inputs], 0) segment_ids = constant_op.constant([0, 0], dtype=dtypes.int64) segment_max = math_ops.segment_max(data, segment_ids) with self.cached_session(): error = gradient_checker.compute_gradient_error(inputs, [1], segment_max, [1]) self.assertLess(error, 1e-4) class FloorModGradientTest(test.TestCase): @test_util.run_deprecated_v1 def testFloorModGradient(self): # Making sure the input is not near the discontinuity point where # x/y == floor(x/y) ns = constant_op.constant([17.], dtype=dtypes.float32) inputs = constant_op.constant([131.], dtype=dtypes.float32) floor_mod = math_ops.floormod(inputs, ns) with self.cached_session(): error = gradient_checker.compute_gradient_error(inputs, [1], floor_mod, [1]) self.assertLess(error, 1e-4) class DivNoNanGradientTest(test.TestCase): @test_util.run_deprecated_v1 def testBasicGradient(self): inputs = constant_op.constant(np.arange(-3, 3), dtype=dtypes.float32) outputs = math_ops.div_no_nan(inputs, 1 + math_ops.abs(inputs)) with self.cached_session(): error = gradient_checker.compute_gradient_error( inputs, inputs.get_shape().as_list(), outputs, outputs.get_shape().as_list()) self.assertLess(error, 1e-4) @test_util.run_deprecated_v1 def testGradientWithDenominatorIsZero(self): x = constant_op.constant(np.arange(-3, 3), dtype=dtypes.float32) y = array_ops.zeros_like(x, dtype=dtypes.float32) outputs = math_ops.div_no_nan(x, y) with self.cached_session(): dx, dy = gradients.gradients(outputs, [x, y]) self.assertAllClose(dx.eval(), np.zeros(x.shape.as_list())) self.assertAllClose(dy.eval(), np.zeros(y.shape.as_list())) class MulNoNanGradientTest(test.TestCase): @test_util.run_deprecated_v1 def testBasicGradient(self): inputs = constant_op.constant(np.arange(-3, 3), dtype=dtypes.float32) outputs = math_ops.mul_no_nan(inputs, 1 + math_ops.abs(inputs)) with self.cached_session(): error = gradient_checker.compute_gradient_error( inputs, inputs.get_shape().as_list(), outputs, outputs.get_shape().as_list()) self.assertLess(error, 1e-4) @test_util.run_deprecated_v1 def testGradientWithRhsIsZero(self): x_vals = [0, 1.0, np.nan, np.inf, np.NINF] x = constant_op.constant(x_vals, dtype=dtypes.float32) y = array_ops.zeros_like(x, dtype=dtypes.float32) outputs = math_ops.mul_no_nan(x, y) with self.cached_session(): dx, dy = gradients.gradients(outputs, [x, y]) self.assertAllClose(dx.eval(), np.zeros(x.shape.as_list())) self.assertAllClose(dy.eval(), x_vals) class XlogyTest(test.TestCase): def _xlogy_gradients(self, x, y): xlogy_xgrad = self.evaluate(gradients.gradients(math_ops.xlogy(x, y), x)[0]) xlogy_ygrad = self.evaluate(gradients.gradients(math_ops.xlogy(x, y), y)[0]) return xlogy_xgrad, xlogy_ygrad @test_util.run_deprecated_v1 def testNonZeroValuesGrad(self): for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]: x = constant_op.constant(0.1, dtype=dtype) y = constant_op.constant(3.1, dtype=dtype) xlogy_xgrad, xlogy_ygrad = self._xlogy_gradients(x, y) xlogy_expected_xgrad = self.evaluate(math_ops.log(y)) xlogy_expected_ygrad = self.evaluate(x / y) self.assertAllClose(xlogy_expected_xgrad, xlogy_xgrad) self.assertAllClose(xlogy_expected_ygrad, xlogy_ygrad) @test_util.run_deprecated_v1 def testZeroXGrad(self): for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]: x = constant_op.constant(0., dtype=dtype) y = constant_op.constant(3.1, dtype=dtype) xlogy_xgrad, xlogy_ygrad = self._xlogy_gradients(x, y) zero = self.evaluate(x) self.assertAllClose(zero, xlogy_xgrad) self.assertAllClose(zero, xlogy_ygrad) @test_util.run_deprecated_v1 def testZeroYGrad(self): for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]: x = constant_op.constant(0.1, dtype=dtype) y = constant_op.constant(0., dtype=dtype) xlogy_xgrad, xlogy_ygrad = self._xlogy_gradients(x, y) self.assertAllClose(-np.inf, xlogy_xgrad) self.assertAllClose(np.inf, xlogy_ygrad) @test_util.run_deprecated_v1 def testZeroXYGrad(self): for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]: x = constant_op.constant(0., dtype=dtype) y = constant_op.constant(0., dtype=dtype) xlogy_xgrad, xlogy_ygrad = self._xlogy_gradients(x, y) zero = self.evaluate(x) self.assertAllClose(zero, xlogy_xgrad) self.assertAllClose(zero, xlogy_ygrad) class XdivyTest(test.TestCase): def _xdivy_gradients(self, x, y): xdivy_xgrad = self.evaluate(gradients.gradients(math_ops.xdivy(x, y), x)[0]) xdivy_ygrad = self.evaluate(gradients.gradients(math_ops.xdivy(x, y), y)[0]) return xdivy_xgrad, xdivy_ygrad @test_util.run_deprecated_v1 def testNonZeroValuesGrad(self): for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]: x = constant_op.constant(0.1, dtype=dtype) y = constant_op.constant(3.1, dtype=dtype) xdivy_xgrad, xdivy_ygrad = self._xdivy_gradients(x, y) xdivy_expected_xgrad = self.evaluate(1 / y) xdivy_expected_ygrad = self.evaluate(-x / y**2) self.assertAllClose(xdivy_expected_xgrad, xdivy_xgrad) self.assertAllClose(xdivy_expected_ygrad, xdivy_ygrad) @test_util.run_deprecated_v1 def testZeroXGrad(self): for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]: x = constant_op.constant(0., dtype=dtype) y = constant_op.constant(3.1, dtype=dtype) xdivy_xgrad, xdivy_ygrad = self._xdivy_gradients(x, y) zero = self.evaluate(x) self.assertAllClose(zero, xdivy_xgrad) self.assertAllClose(zero, xdivy_ygrad) @test_util.run_deprecated_v1 def testZeroYGrad(self): for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]: x = constant_op.constant(0.1, dtype=dtype) y = constant_op.constant(0., dtype=dtype) xdivy_xgrad, xdivy_ygrad = self._xdivy_gradients(x, y) self.assertAllClose(np.inf, xdivy_xgrad) self.assertAllClose(-np.inf, xdivy_ygrad) @test_util.run_deprecated_v1 def testZeroXYGrad(self): for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]: x = constant_op.constant(0., dtype=dtype) y = constant_op.constant(0., dtype=dtype) xdivy_xgrad, xdivy_ygrad = self._xdivy_gradients(x, y) zero = self.evaluate(x) self.assertAllClose(zero, xdivy_xgrad) self.assertAllClose(zero, xdivy_ygrad) @test_util.run_all_in_graph_and_eager_modes class PowGradTest(test.TestCase): def test_zero_grad_tf_gradients(self): if context.executing_eagerly(): self.skipTest("tf.gradients not supported in eager.") x = constant_op.constant([-1., 0., 1.]) g = self.evaluate(gradients.gradients(math_ops.pow(x, 2), x)[0]) self.assertAllClose([-2., 0., 2.], g) def test_zero_grad_tape(self): with execution_callbacks.errstate(inf_or_nan=RAISE): x = constant_op.constant([-1, 0., 1.]) with backprop.GradientTape() as tape: tape.watch(x) g = tape.gradient(math_ops.pow(x, 2), x) g = self.evaluate(g) self.assertAllClose([-2., 0., 2.], g) @test_util.run_all_in_graph_and_eager_modes class NextAfterTest(test.TestCase): def _nextafter_gradient(self, x1, x2): with backprop.GradientTape() as tape: tape.watch(x1) tape.watch(x2) y = math_ops.nextafter(x1, x2) return tape.gradient(y, [x1, x2]) def testBasic(self): for dtype in [dtypes.float32, dtypes.float64]: x1 = constant_op.constant(0.1, dtype=dtype) x2 = constant_op.constant(3.1, dtype=dtype) dx1, dx2 = self._nextafter_gradient(x1, x2) expected_dx1 = constant_op.constant(1, dtype=dtype) expected_dx2 = constant_op.constant(0, dtype=dtype) self.assertAllClose(expected_dx1, dx1) self.assertAllClose(expected_dx2, dx2) def testDynamicShapes(self): for dtype in [dtypes.float32, dtypes.float64]: default_x1 = constant_op.constant(0.1, dtype=dtype) default_x2 = constant_op.constant(3.1, dtype=dtype) x1 = array_ops.placeholder_with_default(default_x1, shape=None) x2 = array_ops.placeholder_with_default(default_x2, shape=None) dx1, dx2 = self._nextafter_gradient(x1, x2) expected_dx1 = constant_op.constant(1, dtype=dtype) expected_dx2 = constant_op.constant(0, dtype=dtype) self.assertAllClose(expected_dx1, dx1) self.assertAllClose(expected_dx2, dx2) def testWithGradientChecker(self): for dtype in [dtypes.float32, dtypes.float64]: with self.cached_session(): x1 = np.array([-1, 0, 1, 2, 3], dtype=dtype.as_numpy_dtype) x2 = np.array([2, 2, 2, 2, 2], dtype=dtype.as_numpy_dtype) err = gradient_checker_v2.max_error( *gradient_checker_v2.compute_gradient( lambda x: math_ops.nextafter(x, x2), [x1])) # pylint: disable=cell-var-from-loop self.assertLess(err, 1e-3) def testBroadcastingWithGradientChecker(self): for dtype in [dtypes.float32, dtypes.float64]: with self.cached_session(): x1 = np.array([-1, 0, 1, 2, 3], dtype=dtype.as_numpy_dtype) x2 = np.array([2], dtype=dtype.as_numpy_dtype) err = gradient_checker_v2.max_error( *gradient_checker_v2.compute_gradient( lambda x: math_ops.nextafter(x, x2), [x1])) # pylint: disable=cell-var-from-loop self.assertLess(err, 1e-3) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/ops/math_grad_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Operators for manipulating tensors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.ops import gen_manip_ops as _gen_manip_ops from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export # pylint: disable=protected-access @tf_export('roll', v1=['roll', 'manip.roll']) @deprecation.deprecated_endpoints('manip.roll') def roll(input, shift, axis, name=None): # pylint: disable=redefined-builtin return _gen_manip_ops.roll(input, shift, axis, name) roll.__doc__ = _gen_manip_ops.roll.__doc__ # pylint: enable=protected-access
tensorflow-master
tensorflow/python/ops/manip_ops.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Logging and Summary Operations.""" # pylint: disable=protected-access from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import pprint import random import sys import six from tensorflow.python import pywrap_tensorflow from tensorflow.python.compat import compat from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_util from tensorflow.python.ops import gen_logging_ops from tensorflow.python.ops import string_ops # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.gen_logging_ops import * # pylint: enable=wildcard-import from tensorflow.python.platform import tf_logging from tensorflow.python.util import nest from tensorflow.python.util.deprecation import deprecated from tensorflow.python.util.tf_export import tf_export # Register printing to the cell output if we are in a Colab or Jupyter Notebook. try: get_ipython() # Exists in an ipython env like Jupyter or Colab pywrap_tensorflow.TFE_Py_EnableInteractivePythonLogging() except NameError: pass # The python wrapper for Assert is in control_flow_ops, as the Assert # call relies on certain conditionals for its dependencies. Use # control_flow_ops.Assert. # Assert and Print are special symbols in python, so we must # have an upper-case version of them. # # For users with Python 3 or Python 2.7 # with `from __future__ import print_function`, we could also allow lowercase. # See https://github.com/tensorflow/tensorflow/issues/18053 # pylint: disable=invalid-name @deprecated("2018-08-20", "Use tf.print instead of tf.Print. Note that " "tf.print returns a no-output operator that directly " "prints the output. Outside of defuns or eager mode, " "this operator will not be executed unless it is " "directly specified in session.run or used as a " "control dependency for other operators. This is " "only a concern in graph mode. Below is an example " "of how to ensure tf.print executes in graph mode:\n" """```python sess = tf.compat.v1.Session() with sess.as_default(): tensor = tf.range(10) print_op = tf.print(tensor) with tf.control_dependencies([print_op]): out = tf.add(tensor, tensor) sess.run(out) ``` Additionally, to use tf.print in python 2.7, users must make sure to import the following: `from __future__ import print_function` """) @tf_export(v1=["Print"]) def Print(input_, data, message=None, first_n=None, summarize=None, name=None): """Prints a list of tensors. This is an identity op (behaves like `tf.identity`) with the side effect of printing `data` when evaluating. Note: This op prints to the standard error. It is not currently compatible with jupyter notebook (printing to the notebook *server's* output, not into the notebook). Args: input_: A tensor passed through this op. data: A list of tensors to print out when op is evaluated. message: A string, prefix of the error message. first_n: Only log `first_n` number of times. Negative numbers log always; this is the default. summarize: Only print this many entries of each tensor. If None, then a maximum of 3 elements are printed per input tensor. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type and contents as `input_`. """ return gen_logging_ops._print(input_, data, message, first_n, summarize, name) # pylint: enable=invalid-name def _generate_placeholder_string(x, default_placeholder="{}"): """Generate and return a string that does not appear in `x`.""" placeholder = default_placeholder rng = random.Random(5) while placeholder in x: placeholder = placeholder + str(rng.randint(0, 9)) return placeholder def _is_filepath(output_stream): """Returns True if output_stream is a file path.""" return isinstance(output_stream, str) and output_stream.startswith("file://") # Temporarily disable pylint g-doc-args error to allow giving more context # about what the kwargs are. # Because we are using arbitrary-length positional arguments, python 2 # does not support explicitly specifying the keyword arguments in the # function definition. # pylint: disable=g-doc-args @tf_export("print") def print_v2(*inputs, **kwargs): """Print the specified inputs. Returns an operator that prints the specified inputs to a desired output stream or logging level. The inputs may be dense or sparse Tensors, primitive python objects, data structures that contain Tensors, and printable python objects. Printed tensors will recursively show the first and last `summarize` elements of each dimension. With eager execution enabled and/or inside a `tf.contrib.eager.defun` this operator will automatically execute, and users only need to call `tf.print` without using the return value. When constructing graphs outside of a `tf.contrib.eager.defun`, one must either include the returned op in the input to `session.run`, or use the operator as a control dependency for executed ops by specifying `with tf.control_dependencies([print_op])`. @compatibility(python2) In python 2.7, make sure to import the following: `from __future__ import print_function` @end_compatibility Example: Single-input usage: ```python tf.compat.v1.enable_eager_execution() tensor = tf.range(10) tf.print(tensor, output_stream=sys.stderr) ``` (This prints "[0 1 2 ... 7 8 9]" to sys.stderr) Multi-input usage: ```python tf.compat.v1.enable_eager_execution() tensor = tf.range(10) tf.print("tensors:", tensor, {2: tensor * 2}, output_stream=sys.stdout) ``` (This prints "tensors: [0 1 2 ... 7 8 9] {2: [0 2 4 ... 14 16 18]}" to sys.stdout) Usage in a defun: ```python tf.compat.v1.enable_eager_execution() @tf.contrib.eager.defun def f(): tensor = tf.range(10) tf.print(tensor, output_stream=sys.stderr) return tensor range_tensor = f() ``` (This prints "[0 1 2 ... 7 8 9]" to sys.stderr) Usage when constructing graphs: ```python sess = tf.compat.v1.Session() with sess.as_default(): tensor = tf.range(10) print_op = tf.print("tensors:", tensor, {2: tensor * 2}, output_stream=sys.stdout) with tf.control_dependencies([print_op]): tripled_tensor = tensor * 3 sess.run(tripled_tensor) ``` (This prints "tensors: [0 1 2 ... 7 8 9] {2: [0 2 4 ... 14 16 18]}" to sys.stdout) Note: In Jupyter notebooks and colabs, this operator prints to the notebook cell outputs. It will not write to the notebook kernel's console logs. Args: *inputs: Positional arguments that are the inputs to print. Inputs in the printed output will be separated by spaces. Inputs may be python primitives, tensors, data structures such as dicts and lists that may contain tensors (with the data structures possibly nested in arbitrary ways), and printable python objects. output_stream: The output stream, logging level, or file to print to. Defaults to sys.stderr, but sys.stdout, tf.compat.v1.logging.info, tf.compat.v1.logging.warning, and tf.compat.v1.logging.error are also supported. To print to a file, pass a string started with "file://" followed by the file path, e.g., "file:///tmp/foo.out". summarize: The first and last `summarize` elements within each dimension are recursively printed per Tensor. If None, then the first 3 and last 3 elements of each dimension are printed for each tensor. If set to -1, it will print all elements of every tensor. sep: The string to use to separate the inputs. Defaults to " ". end: End character that is appended at the end the printed string. Defaults to the newline character. name: A name for the operation (optional). Returns: A print operator that prints the specified inputs in the specified output stream or logging level. Raises: ValueError: If an unsupported output stream is specified. """ # Because we are using arbitrary-length positional arguments, python 2 # does not support explicitly specifying the keyword arguments in the # function definition. So, we manually get the keyword arguments w/ default # values here. output_stream = kwargs.pop("output_stream", sys.stderr) name = kwargs.pop("name", None) summarize = kwargs.pop("summarize", 3) sep = kwargs.pop("sep", " ") end = kwargs.pop("end", os.linesep) if kwargs: raise ValueError("Unrecognized keyword arguments for tf.print: %s" % kwargs) format_name = None if name: format_name = name + "_format" # Match the C++ string constants representing the different output streams. # Keep this updated! output_stream_to_constant = { sys.stdout: "stdout", sys.stderr: "stderr", tf_logging.INFO: "log(info)", tf_logging.info: "log(info)", tf_logging.WARN: "log(warning)", tf_logging.warning: "log(warning)", tf_logging.warn: "log(warning)", tf_logging.ERROR: "log(error)", tf_logging.error: "log(error)", } if _is_filepath(output_stream): output_stream_string = output_stream else: output_stream_string = output_stream_to_constant.get(output_stream) if not output_stream_string: raise ValueError("Unsupported output stream, logging level, or file." + str(output_stream) + ". Supported streams are sys.stdout, " "sys.stderr, tf.logging.info, " "tf.logging.warning, tf.logging.error. " + "File needs to be in the form of 'file://<filepath>'.") # If we are only printing a single string scalar, there is no need to format if (len(inputs) == 1 and tensor_util.is_tensor(inputs[0]) and (not isinstance(inputs[0], sparse_tensor.SparseTensor)) and (inputs[0].shape.ndims == 0) and (inputs[0].dtype == dtypes.string)): formatted_string = inputs[0] # Otherwise, we construct an appropriate template for the tensors we are # printing, and format the template using those tensors. else: # For each input to this print function, we extract any nested tensors, # and construct an appropriate template to format representing the # printed input. templates = [] tensors = [] tensor_free_structure = nest.map_structure( lambda x: "" if tensor_util.is_tensor(x) else x, inputs) tensor_free_template = " ".join( pprint.pformat(x) for x in tensor_free_structure) placeholder = _generate_placeholder_string(tensor_free_template) for input_ in inputs: placeholders = [] # Use the nest utilities to flatten & process any nested elements in this # input. The placeholder for a tensor in the template should be the # placeholder string, and the placeholder for a non-tensor can just be # the printed value of the non-tensor itself. for x in nest.flatten(input_): # support sparse tensors if isinstance(x, sparse_tensor.SparseTensor): tensors.extend([x.indices, x.values, x.dense_shape]) placeholders.append( "SparseTensor(indices={}, values={}, shape={})".format( placeholder, placeholder, placeholder)) elif tensor_util.is_tensor(x): tensors.append(x) placeholders.append(placeholder) else: placeholders.append(x) if isinstance(input_, six.string_types): # If the current input to format/print is a normal string, that string # can act as the template. cur_template = input_ else: # We pack the placeholders into a data structure that matches the # input data structure format, then format that data structure # into a string template. # # NOTE: We must use pprint.pformat here for building the template for # unordered data structures such as `dict`, because `str` doesn't # guarantee orderings, while pprint prints in sorted order. pprint # will match the ordering of `nest.flatten`. # This even works when nest.flatten reorders OrderedDicts, because # pprint is printing *after* the OrderedDicts have been reordered. cur_template = pprint.pformat( nest.pack_sequence_as(input_, placeholders)) templates.append(cur_template) # We join the templates for the various inputs into a single larger # template. We also remove all quotes surrounding the placeholders, so that # the formatted/printed output will not contain quotes around tensors. # (example of where these quotes might appear: if we have added a # placeholder string into a list, then pretty-formatted that list) template = sep.join(templates) template = template.replace("'" + placeholder + "'", placeholder) formatted_string = string_ops.string_format( inputs=tensors, template=template, placeholder=placeholder, summarize=summarize, name=format_name) if compat.forward_compatible(2019, 5, 27): return gen_logging_ops.print_v2( formatted_string, output_stream=output_stream_string, name=name, end=end) else: if end == os.linesep: end = "" return gen_logging_ops.print_v2( formatted_string + end, output_stream=output_stream_string, name=name) # pylint: enable=g-doc-args @ops.RegisterGradient("Print") def _PrintGrad(op, *grad): return list(grad) + [None] * (len(op.inputs) - 1) def _Collect(val, collections, default_collections): if collections is None: collections = default_collections for key in collections: ops.add_to_collection(key, val) @deprecated( "2016-11-30", "Please switch to tf.summary.histogram. Note that " "tf.summary.histogram uses the node name instead of the tag. " "This means that TensorFlow will automatically de-duplicate summary " "names based on the scope they are created in.") def histogram_summary(tag, values, collections=None, name=None): # pylint: disable=line-too-long """Outputs a `Summary` protocol buffer with a histogram. This ops is deprecated. Please switch to tf.summary.histogram. For an explanation of why this op was deprecated, and information on how to migrate, look ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py) The generated [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) has one summary value containing a histogram for `values`. This op reports an `InvalidArgument` error if any value is not finite. Args: tag: A `string` `Tensor`. 0-D. Tag to use for the summary value. values: A real numeric `Tensor`. Any shape. Values to use to build the histogram. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[GraphKeys.SUMMARIES]`. name: A name for the operation (optional). Returns: A scalar `Tensor` of type `string`. The serialized `Summary` protocol buffer. """ with ops.name_scope(name, "HistogramSummary", [tag, values]) as scope: val = gen_logging_ops.histogram_summary(tag=tag, values=values, name=scope) _Collect(val, collections, [ops.GraphKeys.SUMMARIES]) return val @deprecated( "2016-11-30", "Please switch to tf.summary.image. Note that " "tf.summary.image uses the node name instead of the tag. " "This means that TensorFlow will automatically de-duplicate summary " "names based on the scope they are created in. Also, the max_images " "argument was renamed to max_outputs.") def image_summary(tag, tensor, max_images=3, collections=None, name=None): # pylint: disable=line-too-long """Outputs a `Summary` protocol buffer with images. For an explanation of why this op was deprecated, and information on how to migrate, look ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py) The summary has up to `max_images` summary values containing images. The images are built from `tensor` which must be 4-D with shape `[batch_size, height, width, channels]` and where `channels` can be: * 1: `tensor` is interpreted as Grayscale. * 3: `tensor` is interpreted as RGB. * 4: `tensor` is interpreted as RGBA. The images have the same number of channels as the input tensor. For float input, the values are normalized one image at a time to fit in the range `[0, 255]`. `uint8` values are unchanged. The op uses two different normalization algorithms: * If the input values are all positive, they are rescaled so the largest one is 255. * If any input value is negative, the values are shifted so input value 0.0 is at 127. They are then rescaled so that either the smallest value is 0, or the largest one is 255. The `tag` argument is a scalar `Tensor` of type `string`. It is used to build the `tag` of the summary values: * If `max_images` is 1, the summary value tag is '*tag*/image'. * If `max_images` is greater than 1, the summary value tags are generated sequentially as '*tag*/image/0', '*tag*/image/1', etc. Args: tag: A scalar `Tensor` of type `string`. Used to build the `tag` of the summary values. tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height, width, channels]` where `channels` is 1, 3, or 4. max_images: Max number of batch elements to generate images for. collections: Optional list of ops.GraphKeys. The collections to add the summary to. Defaults to [ops.GraphKeys.SUMMARIES] name: A name for the operation (optional). Returns: A scalar `Tensor` of type `string`. The serialized `Summary` protocol buffer. """ with ops.name_scope(name, "ImageSummary", [tag, tensor]) as scope: val = gen_logging_ops.image_summary( tag=tag, tensor=tensor, max_images=max_images, name=scope) _Collect(val, collections, [ops.GraphKeys.SUMMARIES]) return val @deprecated( "2016-11-30", "Please switch to tf.summary.audio. Note that " "tf.summary.audio uses the node name instead of the tag. " "This means that TensorFlow will automatically de-duplicate summary " "names based on the scope they are created in.") def audio_summary(tag, tensor, sample_rate, max_outputs=3, collections=None, name=None): # pylint: disable=line-too-long """Outputs a `Summary` protocol buffer with audio. This op is deprecated. Please switch to tf.summary.audio. For an explanation of why this op was deprecated, and information on how to migrate, look ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py) The summary has up to `max_outputs` summary values containing audio. The audio is built from `tensor` which must be 3-D with shape `[batch_size, frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`. The `tag` argument is a scalar `Tensor` of type `string`. It is used to build the `tag` of the summary values: * If `max_outputs` is 1, the summary value tag is '*tag*/audio'. * If `max_outputs` is greater than 1, the summary value tags are generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc. Args: tag: A scalar `Tensor` of type `string`. Used to build the `tag` of the summary values. tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]` or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`. sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the signal in hertz. max_outputs: Max number of batch elements to generate audio for. collections: Optional list of ops.GraphKeys. The collections to add the summary to. Defaults to [ops.GraphKeys.SUMMARIES] name: A name for the operation (optional). Returns: A scalar `Tensor` of type `string`. The serialized `Summary` protocol buffer. """ with ops.name_scope(name, "AudioSummary", [tag, tensor]) as scope: sample_rate = ops.convert_to_tensor( sample_rate, dtype=dtypes.float32, name="sample_rate") val = gen_logging_ops.audio_summary_v2( tag=tag, tensor=tensor, max_outputs=max_outputs, sample_rate=sample_rate, name=scope) _Collect(val, collections, [ops.GraphKeys.SUMMARIES]) return val @deprecated("2016-11-30", "Please switch to tf.summary.merge.") def merge_summary(inputs, collections=None, name=None): # pylint: disable=line-too-long """Merges summaries. This op is deprecated. Please switch to tf.compat.v1.summary.merge, which has identical behavior. This op creates a [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) protocol buffer that contains the union of all the values in the input summaries. When the Op is run, it reports an `InvalidArgument` error if multiple values in the summaries to merge use the same tag. Args: inputs: A list of `string` `Tensor` objects containing serialized `Summary` protocol buffers. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[GraphKeys.SUMMARIES]`. name: A name for the operation (optional). Returns: A scalar `Tensor` of type `string`. The serialized `Summary` protocol buffer resulting from the merging. """ with ops.name_scope(name, "MergeSummary", inputs): val = gen_logging_ops.merge_summary(inputs=inputs, name=name) _Collect(val, collections, []) return val @deprecated("2016-11-30", "Please switch to tf.summary.merge_all.") def merge_all_summaries(key=ops.GraphKeys.SUMMARIES): """Merges all summaries collected in the default graph. This op is deprecated. Please switch to tf.compat.v1.summary.merge_all, which has identical behavior. Args: key: `GraphKey` used to collect the summaries. Defaults to `GraphKeys.SUMMARIES`. Returns: If no summaries were collected, returns None. Otherwise returns a scalar `Tensor` of type `string` containing the serialized `Summary` protocol buffer resulting from the merging. """ summary_ops = ops.get_collection(key) if not summary_ops: return None else: return merge_summary(summary_ops) def get_summary_op(): """Returns a single Summary op that would run all summaries. Either existing one from `SUMMARY_OP` collection or merges all existing summaries. Returns: If no summaries were collected, returns None. Otherwise returns a scalar `Tensor` of type `string` containing the serialized `Summary` protocol buffer resulting from the merging. """ summary_op = ops.get_collection(ops.GraphKeys.SUMMARY_OP) if summary_op is not None: if summary_op: summary_op = summary_op[0] else: summary_op = None if summary_op is None: summary_op = merge_all_summaries() if summary_op is not None: ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op) return summary_op @deprecated( "2016-11-30", "Please switch to tf.summary.scalar. Note that " "tf.summary.scalar uses the node name instead of the tag. " "This means that TensorFlow will automatically de-duplicate summary " "names based on the scope they are created in. Also, passing a " "tensor or list of tags to a scalar summary op is no longer " "supported.") def scalar_summary(tags, values, collections=None, name=None): # pylint: disable=line-too-long """Outputs a `Summary` protocol buffer with scalar values. This ops is deprecated. Please switch to tf.summary.scalar. For an explanation of why this op was deprecated, and information on how to migrate, look ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py) The input `tags` and `values` must have the same shape. The generated summary has a summary value for each tag-value pair in `tags` and `values`. Args: tags: A `string` `Tensor`. Tags for the summaries. values: A real numeric Tensor. Values for the summaries. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[GraphKeys.SUMMARIES]`. name: A name for the operation (optional). Returns: A scalar `Tensor` of type `string`. The serialized `Summary` protocol buffer. """ with ops.name_scope(name, "ScalarSummary", [tags, values]) as scope: val = gen_logging_ops.scalar_summary(tags=tags, values=values, name=scope) _Collect(val, collections, [ops.GraphKeys.SUMMARIES]) return val ops.NotDifferentiable("HistogramSummary") ops.NotDifferentiable("ImageSummary") ops.NotDifferentiable("AudioSummary") ops.NotDifferentiable("AudioSummaryV2") ops.NotDifferentiable("MergeSummary") ops.NotDifferentiable("ScalarSummary") ops.NotDifferentiable("TensorSummary") ops.NotDifferentiable("TensorSummaryV2") ops.NotDifferentiable("Timestamp")
tensorflow-master
tensorflow/python/ops/logging_ops.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """RNN helpers for TensorFlow models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.keras.engine import base_layer from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import control_flow_util from tensorflow.python.ops import math_ops from tensorflow.python.ops import rnn_cell_impl from tensorflow.python.ops import tensor_array_ops from tensorflow.python.ops import variable_scope as vs from tensorflow.python.util import deprecation from tensorflow.python.util import nest from tensorflow.python.util.tf_export import tf_export # pylint: disable=protected-access _concat = rnn_cell_impl._concat # pylint: enable=protected-access def _transpose_batch_time(x): """Transposes the batch and time dimensions of a Tensor. If the input tensor has rank < 2 it returns the original tensor. Retains as much of the static shape information as possible. Args: x: A Tensor. Returns: x transposed along the first two dimensions. """ x_static_shape = x.get_shape() if x_static_shape.rank is not None and x_static_shape.rank < 2: return x x_rank = array_ops.rank(x) x_t = array_ops.transpose( x, array_ops.concat(([1, 0], math_ops.range(2, x_rank)), axis=0)) x_t.set_shape( tensor_shape.TensorShape( [x_static_shape.dims[1].value, x_static_shape.dims[0].value]).concatenate(x_static_shape[2:])) return x_t def _best_effort_input_batch_size(flat_input): """Get static input batch size if available, with fallback to the dynamic one. Args: flat_input: An iterable of time major input Tensors of shape `[max_time, batch_size, ...]`. All inputs should have compatible batch sizes. Returns: The batch size in Python integer if available, or a scalar Tensor otherwise. Raises: ValueError: if there is any input with an invalid shape. """ for input_ in flat_input: shape = input_.shape if shape.rank is None: continue if shape.rank < 2: raise ValueError("Expected input tensor %s to have rank at least 2" % input_) batch_size = shape.dims[1].value if batch_size is not None: return batch_size # Fallback to the dynamic batch size of the first input. return array_ops.shape(flat_input[0])[1] def _infer_state_dtype(explicit_dtype, state): """Infer the dtype of an RNN state. Args: explicit_dtype: explicitly declared dtype or None. state: RNN's hidden state. Must be a Tensor or a nested iterable containing Tensors. Returns: dtype: inferred dtype of hidden state. Raises: ValueError: if `state` has heterogeneous dtypes or is empty. """ if explicit_dtype is not None: return explicit_dtype elif nest.is_sequence(state): inferred_dtypes = [element.dtype for element in nest.flatten(state)] if not inferred_dtypes: raise ValueError("Unable to infer dtype from empty state.") all_same = all(x == inferred_dtypes[0] for x in inferred_dtypes) if not all_same: raise ValueError( "State has tensors of different inferred_dtypes. Unable to infer a " "single representative dtype.") return inferred_dtypes[0] else: return state.dtype def _maybe_tensor_shape_from_tensor(shape): if isinstance(shape, ops.Tensor): return tensor_shape.as_shape(tensor_util.constant_value(shape)) else: return shape def _should_cache(): """Returns True if a default caching device should be set, otherwise False.""" if context.executing_eagerly(): return False # Don't set a caching device when running in a loop, since it is possible that # train steps could be wrapped in a tf.while_loop. In that scenario caching # prevents forward computations in loop iterations from re-reading the # updated weights. ctxt = ops.get_default_graph()._get_control_flow_context() # pylint: disable=protected-access return control_flow_util.GetContainingWhileContext(ctxt) is None def _is_keras_rnn_cell(rnn_cell): """Check whether the cell is a Keras RNN cell. The Keras RNN cell accept the state as a list even the state is a single tensor, whereas the TF RNN cell does not wrap single state tensor in list. This behavior difference should be unified in future version. Args: rnn_cell: An RNN cell instance that either follow the Keras interface or TF RNN interface. Returns: Boolean, whether the cell is an Keras RNN cell. """ # Cell type check is not strict enough since there are cells created by other # library like Deepmind that didn't inherit tf.nn.rnn_cell.RNNCell. # Keras cells never had zero_state method, which was from the original # interface from TF RNN cell. return (not isinstance(rnn_cell, rnn_cell_impl.RNNCell) and isinstance(rnn_cell, base_layer.Layer) and getattr(rnn_cell, "zero_state", None) is None) # pylint: disable=unused-argument def _rnn_step(time, sequence_length, min_sequence_length, max_sequence_length, zero_output, state, call_cell, state_size, skip_conditionals=False): """Calculate one step of a dynamic RNN minibatch. Returns an (output, state) pair conditioned on `sequence_length`. When skip_conditionals=False, the pseudocode is something like: if t >= max_sequence_length: return (zero_output, state) if t < min_sequence_length: return call_cell() # Selectively output zeros or output, old state or new state depending # on whether we've finished calculating each row. new_output, new_state = call_cell() final_output = np.vstack([ zero_output if time >= sequence_length[r] else new_output_r for r, new_output_r in enumerate(new_output) ]) final_state = np.vstack([ state[r] if time >= sequence_length[r] else new_state_r for r, new_state_r in enumerate(new_state) ]) return (final_output, final_state) Args: time: int32 `Tensor` scalar. sequence_length: int32 `Tensor` vector of size [batch_size]. min_sequence_length: int32 `Tensor` scalar, min of sequence_length. max_sequence_length: int32 `Tensor` scalar, max of sequence_length. zero_output: `Tensor` vector of shape [output_size]. state: Either a single `Tensor` matrix of shape `[batch_size, state_size]`, or a list/tuple of such tensors. call_cell: lambda returning tuple of (new_output, new_state) where new_output is a `Tensor` matrix of shape `[batch_size, output_size]`. new_state is a `Tensor` matrix of shape `[batch_size, state_size]`. state_size: The `cell.state_size` associated with the state. skip_conditionals: Python bool, whether to skip using the conditional calculations. This is useful for `dynamic_rnn`, where the input tensor matches `max_sequence_length`, and using conditionals just slows everything down. Returns: A tuple of (`final_output`, `final_state`) as given by the pseudocode above: final_output is a `Tensor` matrix of shape [batch_size, output_size] final_state is either a single `Tensor` matrix, or a tuple of such matrices (matching length and shapes of input `state`). Raises: ValueError: If the cell returns a state tuple whose length does not match that returned by `state_size`. """ # Convert state to a list for ease of use flat_state = nest.flatten(state) flat_zero_output = nest.flatten(zero_output) # Vector describing which batch entries are finished. copy_cond = time >= sequence_length def _copy_one_through(output, new_output): # TensorArray and scalar get passed through. if isinstance(output, tensor_array_ops.TensorArray): return new_output if output.shape.rank == 0: return new_output # Otherwise propagate the old or the new value. with ops.colocate_with(new_output): return array_ops.where(copy_cond, output, new_output) def _copy_some_through(flat_new_output, flat_new_state): # Use broadcasting select to determine which values should get # the previous state & zero output, and which values should get # a calculated state & output. flat_new_output = [ _copy_one_through(zero_output, new_output) for zero_output, new_output in zip(flat_zero_output, flat_new_output) ] flat_new_state = [ _copy_one_through(state, new_state) for state, new_state in zip(flat_state, flat_new_state) ] return flat_new_output + flat_new_state def _maybe_copy_some_through(): """Run RNN step. Pass through either no or some past state.""" new_output, new_state = call_cell() nest.assert_same_structure(zero_output, new_output) nest.assert_same_structure(state, new_state) flat_new_state = nest.flatten(new_state) flat_new_output = nest.flatten(new_output) return control_flow_ops.cond( # if t < min_seq_len: calculate and return everything time < min_sequence_length, lambda: flat_new_output + flat_new_state, # else copy some of it through lambda: _copy_some_through(flat_new_output, flat_new_state)) # TODO(ebrevdo): skipping these conditionals may cause a slowdown, # but benefits from removing cond() and its gradient. We should # profile with and without this switch here. if skip_conditionals: # Instead of using conditionals, perform the selective copy at all time # steps. This is faster when max_seq_len is equal to the number of unrolls # (which is typical for dynamic_rnn). new_output, new_state = call_cell() nest.assert_same_structure(zero_output, new_output) nest.assert_same_structure(state, new_state) new_state = nest.flatten(new_state) new_output = nest.flatten(new_output) final_output_and_state = _copy_some_through(new_output, new_state) else: empty_update = lambda: flat_zero_output + flat_state final_output_and_state = control_flow_ops.cond( # if t >= max_seq_len: copy all state through, output zeros time >= max_sequence_length, empty_update, # otherwise calculation is required: copy some or all of it through _maybe_copy_some_through) if len(final_output_and_state) != len(flat_zero_output) + len(flat_state): raise ValueError("Internal error: state and output were not concatenated " "correctly.") final_output = final_output_and_state[:len(flat_zero_output)] final_state = final_output_and_state[len(flat_zero_output):] for output, flat_output in zip(final_output, flat_zero_output): output.set_shape(flat_output.get_shape()) for substate, flat_substate in zip(final_state, flat_state): if not isinstance(substate, tensor_array_ops.TensorArray): substate.set_shape(flat_substate.get_shape()) final_output = nest.pack_sequence_as( structure=zero_output, flat_sequence=final_output) final_state = nest.pack_sequence_as( structure=state, flat_sequence=final_state) return final_output, final_state def _reverse_seq(input_seq, lengths): """Reverse a list of Tensors up to specified lengths. Args: input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features) or nested tuples of tensors. lengths: A `Tensor` of dimension batch_size, containing lengths for each sequence in the batch. If "None" is specified, simply reverses the list. Returns: time-reversed sequence """ if lengths is None: return list(reversed(input_seq)) flat_input_seq = tuple(nest.flatten(input_) for input_ in input_seq) flat_results = [[] for _ in range(len(input_seq))] for sequence in zip(*flat_input_seq): input_shape = tensor_shape.unknown_shape(rank=sequence[0].get_shape().rank) for input_ in sequence: input_shape.merge_with(input_.get_shape()) input_.set_shape(input_shape) # Join into (time, batch_size, depth) s_joined = array_ops.stack(sequence) # Reverse along dimension 0 s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1) # Split again into list result = array_ops.unstack(s_reversed) for r, flat_result in zip(result, flat_results): r.set_shape(input_shape) flat_result.append(r) results = [ nest.pack_sequence_as(structure=input_, flat_sequence=flat_result) for input_, flat_result in zip(input_seq, flat_results) ] return results @deprecation.deprecated(None, "Please use `keras.layers.Bidirectional(" "keras.layers.RNN(cell))`, which is equivalent to " "this API") @tf_export(v1=["nn.bidirectional_dynamic_rnn"]) def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None, initial_state_fw=None, initial_state_bw=None, dtype=None, parallel_iterations=None, swap_memory=False, time_major=False, scope=None): """Creates a dynamic version of bidirectional recurrent neural network. Takes input and builds independent forward and backward RNNs. The input_size of forward and backward cell must match. The initial state for both directions is zero by default (but can be set optionally) and no intermediate states are ever returned -- the network is fully unrolled for the given (passed in) length(s) of the sequence(s) or completely unrolled if length(s) is not given. Args: cell_fw: An instance of RNNCell, to be used for forward direction. cell_bw: An instance of RNNCell, to be used for backward direction. inputs: The RNN inputs. If time_major == False (default), this must be a tensor of shape: `[batch_size, max_time, ...]`, or a nested tuple of such elements. If time_major == True, this must be a tensor of shape: `[max_time, batch_size, ...]`, or a nested tuple of such elements. sequence_length: (optional) An int32/int64 vector, size `[batch_size]`, containing the actual lengths for each of the sequences in the batch. If not provided, all batch entries are assumed to be full sequences; and time reversal is applied from time `0` to `max_time` for each sequence. initial_state_fw: (optional) An initial state for the forward RNN. This must be a tensor of appropriate type and shape `[batch_size, cell_fw.state_size]`. If `cell_fw.state_size` is a tuple, this should be a tuple of tensors having shapes `[batch_size, s] for s in cell_fw.state_size`. initial_state_bw: (optional) Same as for `initial_state_fw`, but using the corresponding properties of `cell_bw`. dtype: (optional) The data type for the initial states and expected output. Required if initial_states are not provided or RNN states have a heterogeneous dtype. parallel_iterations: (Default: 32). The number of iterations to run in parallel. Those operations which do not have any temporal dependency and can be run in parallel, will be. This parameter trades off time for space. Values >> 1 use more memory but take less time, while smaller values use less memory but computations take longer. swap_memory: Transparently swap the tensors produced in forward inference but needed for back prop from GPU to CPU. This allows training RNNs which would typically not fit on a single GPU, with very minimal (or no) performance penalty. time_major: The shape format of the `inputs` and `outputs` Tensors. If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`. If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`. Using `time_major = True` is a bit more efficient because it avoids transposes at the beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form. scope: VariableScope for the created subgraph; defaults to "bidirectional_rnn" Returns: A tuple (outputs, output_states) where: outputs: A tuple (output_fw, output_bw) containing the forward and the backward rnn output `Tensor`. If time_major == False (default), output_fw will be a `Tensor` shaped: `[batch_size, max_time, cell_fw.output_size]` and output_bw will be a `Tensor` shaped: `[batch_size, max_time, cell_bw.output_size]`. If time_major == True, output_fw will be a `Tensor` shaped: `[max_time, batch_size, cell_fw.output_size]` and output_bw will be a `Tensor` shaped: `[max_time, batch_size, cell_bw.output_size]`. It returns a tuple instead of a single concatenated `Tensor`, unlike in the `bidirectional_rnn`. If the concatenated one is preferred, the forward and backward outputs can be concatenated as `tf.concat(outputs, 2)`. output_states: A tuple (output_state_fw, output_state_bw) containing the forward and the backward final states of bidirectional rnn. Raises: TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`. """ rnn_cell_impl.assert_like_rnncell("cell_fw", cell_fw) rnn_cell_impl.assert_like_rnncell("cell_bw", cell_bw) with vs.variable_scope(scope or "bidirectional_rnn"): # Forward direction with vs.variable_scope("fw") as fw_scope: output_fw, output_state_fw = dynamic_rnn( cell=cell_fw, inputs=inputs, sequence_length=sequence_length, initial_state=initial_state_fw, dtype=dtype, parallel_iterations=parallel_iterations, swap_memory=swap_memory, time_major=time_major, scope=fw_scope) # Backward direction if not time_major: time_axis = 1 batch_axis = 0 else: time_axis = 0 batch_axis = 1 def _reverse(input_, seq_lengths, seq_axis, batch_axis): if seq_lengths is not None: return array_ops.reverse_sequence( input=input_, seq_lengths=seq_lengths, seq_axis=seq_axis, batch_axis=batch_axis) else: return array_ops.reverse(input_, axis=[seq_axis]) with vs.variable_scope("bw") as bw_scope: def _map_reverse(inp): return _reverse( inp, seq_lengths=sequence_length, seq_axis=time_axis, batch_axis=batch_axis) inputs_reverse = nest.map_structure(_map_reverse, inputs) tmp, output_state_bw = dynamic_rnn( cell=cell_bw, inputs=inputs_reverse, sequence_length=sequence_length, initial_state=initial_state_bw, dtype=dtype, parallel_iterations=parallel_iterations, swap_memory=swap_memory, time_major=time_major, scope=bw_scope) output_bw = _reverse( tmp, seq_lengths=sequence_length, seq_axis=time_axis, batch_axis=batch_axis) outputs = (output_fw, output_bw) output_states = (output_state_fw, output_state_bw) return (outputs, output_states) @deprecation.deprecated( None, "Please use `keras.layers.RNN(cell)`, which is equivalent to this API") @tf_export(v1=["nn.dynamic_rnn"]) def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None, dtype=None, parallel_iterations=None, swap_memory=False, time_major=False, scope=None): """Creates a recurrent neural network specified by RNNCell `cell`. Performs fully dynamic unrolling of `inputs`. Example: ```python # create a BasicRNNCell rnn_cell = tf.compat.v1.nn.rnn_cell.BasicRNNCell(hidden_size) # 'outputs' is a tensor of shape [batch_size, max_time, cell_state_size] # defining initial state initial_state = rnn_cell.zero_state(batch_size, dtype=tf.float32) # 'state' is a tensor of shape [batch_size, cell_state_size] outputs, state = tf.compat.v1.nn.dynamic_rnn(rnn_cell, input_data, initial_state=initial_state, dtype=tf.float32) ``` ```python # create 2 LSTMCells rnn_layers = [tf.compat.v1.nn.rnn_cell.LSTMCell(size) for size in [128, 256]] # create a RNN cell composed sequentially of a number of RNNCells multi_rnn_cell = tf.compat.v1.nn.rnn_cell.MultiRNNCell(rnn_layers) # 'outputs' is a tensor of shape [batch_size, max_time, 256] # 'state' is a N-tuple where N is the number of LSTMCells containing a # tf.nn.rnn_cell.LSTMStateTuple for each cell outputs, state = tf.compat.v1.nn.dynamic_rnn(cell=multi_rnn_cell, inputs=data, dtype=tf.float32) ``` Args: cell: An instance of RNNCell. inputs: The RNN inputs. If `time_major == False` (default), this must be a `Tensor` of shape: `[batch_size, max_time, ...]`, or a nested tuple of such elements. If `time_major == True`, this must be a `Tensor` of shape: `[max_time, batch_size, ...]`, or a nested tuple of such elements. This may also be a (possibly nested) tuple of Tensors satisfying this property. The first two dimensions must match across all the inputs, but otherwise the ranks and other shape components may differ. In this case, input to `cell` at each time-step will replicate the structure of these tuples, except for the time dimension (from which the time is taken). The input to `cell` at each time step will be a `Tensor` or (possibly nested) tuple of Tensors each with dimensions `[batch_size, ...]`. sequence_length: (optional) An int32/int64 vector sized `[batch_size]`. Used to copy-through state and zero-out outputs when past a batch element's sequence length. This parameter enables users to extract the last valid state and properly padded outputs, so it is provided for correctness. initial_state: (optional) An initial state for the RNN. If `cell.state_size` is an integer, this must be a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`. If `cell.state_size` is a tuple, this should be a tuple of tensors having shapes `[batch_size, s] for s in cell.state_size`. dtype: (optional) The data type for the initial state and expected output. Required if initial_state is not provided or RNN state has a heterogeneous dtype. parallel_iterations: (Default: 32). The number of iterations to run in parallel. Those operations which do not have any temporal dependency and can be run in parallel, will be. This parameter trades off time for space. Values >> 1 use more memory but take less time, while smaller values use less memory but computations take longer. swap_memory: Transparently swap the tensors produced in forward inference but needed for back prop from GPU to CPU. This allows training RNNs which would typically not fit on a single GPU, with very minimal (or no) performance penalty. time_major: The shape format of the `inputs` and `outputs` Tensors. If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`. If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`. Using `time_major = True` is a bit more efficient because it avoids transposes at the beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form. scope: VariableScope for the created subgraph; defaults to "rnn". Returns: A pair (outputs, state) where: outputs: The RNN output `Tensor`. If time_major == False (default), this will be a `Tensor` shaped: `[batch_size, max_time, cell.output_size]`. If time_major == True, this will be a `Tensor` shaped: `[max_time, batch_size, cell.output_size]`. Note, if `cell.output_size` is a (possibly nested) tuple of integers or `TensorShape` objects, then `outputs` will be a tuple having the same structure as `cell.output_size`, containing Tensors having shapes corresponding to the shape data in `cell.output_size`. state: The final state. If `cell.state_size` is an int, this will be shaped `[batch_size, cell.state_size]`. If it is a `TensorShape`, this will be shaped `[batch_size] + cell.state_size`. If it is a (possibly nested) tuple of ints or `TensorShape`, this will be a tuple having the corresponding shapes. If cells are `LSTMCells` `state` will be a tuple containing a `LSTMStateTuple` for each cell. Raises: TypeError: If `cell` is not an instance of RNNCell. ValueError: If inputs is None or an empty list. """ rnn_cell_impl.assert_like_rnncell("cell", cell) with vs.variable_scope(scope or "rnn") as varscope: # Create a new scope in which the caching device is either # determined by the parent scope, or is set to place the cached # Variable using the same placement as for the rest of the RNN. if _should_cache(): if varscope.caching_device is None: varscope.set_caching_device(lambda op: op.device) # By default, time_major==False and inputs are batch-major: shaped # [batch, time, depth] # For internal calculations, we transpose to [time, batch, depth] flat_input = nest.flatten(inputs) if not time_major: # (B,T,D) => (T,B,D) flat_input = [ops.convert_to_tensor(input_) for input_ in flat_input] flat_input = tuple(_transpose_batch_time(input_) for input_ in flat_input) parallel_iterations = parallel_iterations or 32 if sequence_length is not None: sequence_length = math_ops.cast(sequence_length, dtypes.int32) if sequence_length.get_shape().rank not in (None, 1): raise ValueError( "sequence_length must be a vector of length batch_size, " "but saw shape: %s" % sequence_length.get_shape()) sequence_length = array_ops.identity( # Just to find it in the graph. sequence_length, name="sequence_length") batch_size = _best_effort_input_batch_size(flat_input) if initial_state is not None: state = initial_state else: if not dtype: raise ValueError("If there is no initial_state, you must give a dtype.") if getattr(cell, "get_initial_state", None) is not None: state = cell.get_initial_state( inputs=None, batch_size=batch_size, dtype=dtype) else: state = cell.zero_state(batch_size, dtype) def _assert_has_shape(x, shape): x_shape = array_ops.shape(x) packed_shape = array_ops.stack(shape) return control_flow_ops.Assert( math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)), [ "Expected shape for Tensor %s is " % x.name, packed_shape, " but saw shape: ", x_shape ]) if not context.executing_eagerly() and sequence_length is not None: # Perform some shape validation with ops.control_dependencies( [_assert_has_shape(sequence_length, [batch_size])]): sequence_length = array_ops.identity( sequence_length, name="CheckSeqLen") inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input) (outputs, final_state) = _dynamic_rnn_loop( cell, inputs, state, parallel_iterations=parallel_iterations, swap_memory=swap_memory, sequence_length=sequence_length, dtype=dtype) # Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth]. # If we are performing batch-major calculations, transpose output back # to shape [batch, time, depth] if not time_major: # (T,B,D) => (B,T,D) outputs = nest.map_structure(_transpose_batch_time, outputs) return (outputs, final_state) def _dynamic_rnn_loop(cell, inputs, initial_state, parallel_iterations, swap_memory, sequence_length=None, dtype=None): """Internal implementation of Dynamic RNN. Args: cell: An instance of RNNCell. inputs: A `Tensor` of shape [time, batch_size, input_size], or a nested tuple of such elements. initial_state: A `Tensor` of shape `[batch_size, state_size]`, or if `cell.state_size` is a tuple, then this should be a tuple of tensors having shapes `[batch_size, s] for s in cell.state_size`. parallel_iterations: Positive Python int. swap_memory: A Python boolean sequence_length: (optional) An `int32` `Tensor` of shape [batch_size]. dtype: (optional) Expected dtype of output. If not specified, inferred from initial_state. Returns: Tuple `(final_outputs, final_state)`. final_outputs: A `Tensor` of shape `[time, batch_size, cell.output_size]`. If `cell.output_size` is a (possibly nested) tuple of ints or `TensorShape` objects, then this returns a (possibly nested) tuple of Tensors matching the corresponding shapes. final_state: A `Tensor`, or possibly nested tuple of Tensors, matching in length and shapes to `initial_state`. Raises: ValueError: If the input depth cannot be inferred via shape inference from the inputs. ValueError: If time_step is not the same for all the elements in the inputs. ValueError: If batch_size is not the same for all the elements in the inputs. """ state = initial_state assert isinstance(parallel_iterations, int), "parallel_iterations must be int" state_size = cell.state_size flat_input = nest.flatten(inputs) flat_output_size = nest.flatten(cell.output_size) # Construct an initial output input_shape = array_ops.shape(flat_input[0]) time_steps = input_shape[0] batch_size = _best_effort_input_batch_size(flat_input) inputs_got_shape = tuple( input_.get_shape().with_rank_at_least(3) for input_ in flat_input) const_time_steps, const_batch_size = inputs_got_shape[0].as_list()[:2] for shape in inputs_got_shape: if not shape[2:].is_fully_defined(): raise ValueError( "Input size (depth of inputs) must be accessible via shape inference," " but saw value None.") got_time_steps = shape.dims[0].value got_batch_size = shape.dims[1].value if const_time_steps != got_time_steps: raise ValueError( "Time steps is not the same for all the elements in the input in a " "batch.") if const_batch_size != got_batch_size: raise ValueError( "Batch_size is not the same for all the elements in the input.") # Prepare dynamic conditional copying of state & output def _create_zero_arrays(size): size = _concat(batch_size, size) return array_ops.zeros( array_ops.stack(size), _infer_state_dtype(dtype, state)) flat_zero_output = tuple( _create_zero_arrays(output) for output in flat_output_size) zero_output = nest.pack_sequence_as( structure=cell.output_size, flat_sequence=flat_zero_output) if sequence_length is not None: min_sequence_length = math_ops.reduce_min(sequence_length) max_sequence_length = math_ops.reduce_max(sequence_length) else: max_sequence_length = time_steps time = array_ops.constant(0, dtype=dtypes.int32, name="time") with ops.name_scope("dynamic_rnn") as scope: base_name = scope def _create_ta(name, element_shape, dtype): return tensor_array_ops.TensorArray( dtype=dtype, size=time_steps, element_shape=element_shape, tensor_array_name=base_name + name) in_graph_mode = not context.executing_eagerly() if in_graph_mode: output_ta = tuple( _create_ta( "output_%d" % i, element_shape=( tensor_shape.TensorShape([const_batch_size]).concatenate( _maybe_tensor_shape_from_tensor(out_size))), dtype=_infer_state_dtype(dtype, state)) for i, out_size in enumerate(flat_output_size)) input_ta = tuple( _create_ta( "input_%d" % i, element_shape=flat_input_i.shape[1:], dtype=flat_input_i.dtype) for i, flat_input_i in enumerate(flat_input)) input_ta = tuple( ta.unstack(input_) for ta, input_ in zip(input_ta, flat_input)) else: output_ta = tuple([0 for _ in range(time_steps.numpy())] for i in range(len(flat_output_size))) input_ta = flat_input def _time_step(time, output_ta_t, state): """Take a time step of the dynamic RNN. Args: time: int32 scalar Tensor. output_ta_t: List of `TensorArray`s that represent the output. state: nested tuple of vector tensors that represent the state. Returns: The tuple (time + 1, output_ta_t with updated flow, new_state). """ if in_graph_mode: input_t = tuple(ta.read(time) for ta in input_ta) # Restore some shape information for input_, shape in zip(input_t, inputs_got_shape): input_.set_shape(shape[1:]) else: input_t = tuple(ta[time.numpy()] for ta in input_ta) input_t = nest.pack_sequence_as(structure=inputs, flat_sequence=input_t) # Keras RNN cells only accept state as list, even if it's a single tensor. is_keras_rnn_cell = _is_keras_rnn_cell(cell) if is_keras_rnn_cell and not nest.is_sequence(state): state = [state] call_cell = lambda: cell(input_t, state) if sequence_length is not None: (output, new_state) = _rnn_step( time=time, sequence_length=sequence_length, min_sequence_length=min_sequence_length, max_sequence_length=max_sequence_length, zero_output=zero_output, state=state, call_cell=call_cell, state_size=state_size, skip_conditionals=True) else: (output, new_state) = call_cell() # Keras cells always wrap state as list, even if it's a single tensor. if is_keras_rnn_cell and len(new_state) == 1: new_state = new_state[0] # Pack state if using state tuples output = nest.flatten(output) if in_graph_mode: output_ta_t = tuple( ta.write(time, out) for ta, out in zip(output_ta_t, output)) else: for ta, out in zip(output_ta_t, output): ta[time.numpy()] = out return (time + 1, output_ta_t, new_state) if in_graph_mode: # Make sure that we run at least 1 step, if necessary, to ensure # the TensorArrays pick up the dynamic shape. loop_bound = math_ops.minimum(time_steps, math_ops.maximum(1, max_sequence_length)) else: # Using max_sequence_length isn't currently supported in the Eager branch. loop_bound = time_steps _, output_final_ta, final_state = control_flow_ops.while_loop( cond=lambda time, *_: time < loop_bound, body=_time_step, loop_vars=(time, output_ta, state), parallel_iterations=parallel_iterations, maximum_iterations=time_steps, swap_memory=swap_memory) # Unpack final output if not using output tuples. if in_graph_mode: final_outputs = tuple(ta.stack() for ta in output_final_ta) # Restore some shape information for output, output_size in zip(final_outputs, flat_output_size): shape = _concat([const_time_steps, const_batch_size], output_size, static=True) output.set_shape(shape) else: final_outputs = output_final_ta final_outputs = nest.pack_sequence_as( structure=cell.output_size, flat_sequence=final_outputs) if not in_graph_mode: final_outputs = nest.map_structure_up_to( cell.output_size, lambda x: array_ops.stack(x, axis=0), final_outputs) return (final_outputs, final_state) @tf_export(v1=["nn.raw_rnn"]) def raw_rnn(cell, loop_fn, parallel_iterations=None, swap_memory=False, scope=None): """Creates an `RNN` specified by RNNCell `cell` and loop function `loop_fn`. **NOTE: This method is still in testing, and the API may change.** This function is a more primitive version of `dynamic_rnn` that provides more direct access to the inputs each iteration. It also provides more control over when to start and finish reading the sequence, and what to emit for the output. For example, it can be used to implement the dynamic decoder of a seq2seq model. Instead of working with `Tensor` objects, most operations work with `TensorArray` objects directly. The operation of `raw_rnn`, in pseudo-code, is basically the following: ```python time = tf.constant(0, dtype=tf.int32) (finished, next_input, initial_state, emit_structure, loop_state) = loop_fn( time=time, cell_output=None, cell_state=None, loop_state=None) emit_ta = TensorArray(dynamic_size=True, dtype=initial_state.dtype) state = initial_state while not all(finished): (output, cell_state) = cell(next_input, state) (next_finished, next_input, next_state, emit, loop_state) = loop_fn( time=time + 1, cell_output=output, cell_state=cell_state, loop_state=loop_state) # Emit zeros and copy forward state for minibatch entries that are finished. state = tf.where(finished, state, next_state) emit = tf.where(finished, tf.zeros_like(emit_structure), emit) emit_ta = emit_ta.write(time, emit) # If any new minibatch entries are marked as finished, mark these. finished = tf.logical_or(finished, next_finished) time += 1 return (emit_ta, state, loop_state) ``` with the additional properties that output and state may be (possibly nested) tuples, as determined by `cell.output_size` and `cell.state_size`, and as a result the final `state` and `emit_ta` may themselves be tuples. A simple implementation of `dynamic_rnn` via `raw_rnn` looks like this: ```python inputs = tf.compat.v1.placeholder(shape=(max_time, batch_size, input_depth), dtype=tf.float32) sequence_length = tf.compat.v1.placeholder(shape=(batch_size,), dtype=tf.int32) inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time) inputs_ta = inputs_ta.unstack(inputs) cell = tf.compat.v1.nn.rnn_cell.LSTMCell(num_units) def loop_fn(time, cell_output, cell_state, loop_state): emit_output = cell_output # == None for time == 0 if cell_output is None: # time == 0 next_cell_state = cell.zero_state(batch_size, tf.float32) else: next_cell_state = cell_state elements_finished = (time >= sequence_length) finished = tf.reduce_all(elements_finished) next_input = tf.cond( finished, lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32), lambda: inputs_ta.read(time)) next_loop_state = None return (elements_finished, next_input, next_cell_state, emit_output, next_loop_state) outputs_ta, final_state, _ = raw_rnn(cell, loop_fn) outputs = outputs_ta.stack() ``` Args: cell: An instance of RNNCell. loop_fn: A callable that takes inputs `(time, cell_output, cell_state, loop_state)` and returns the tuple `(finished, next_input, next_cell_state, emit_output, next_loop_state)`. Here `time` is an int32 scalar `Tensor`, `cell_output` is a `Tensor` or (possibly nested) tuple of tensors as determined by `cell.output_size`, and `cell_state` is a `Tensor` or (possibly nested) tuple of tensors, as determined by the `loop_fn` on its first call (and should match `cell.state_size`). The outputs are: `finished`, a boolean `Tensor` of shape `[batch_size]`, `next_input`: the next input to feed to `cell`, `next_cell_state`: the next state to feed to `cell`, and `emit_output`: the output to store for this iteration. Note that `emit_output` should be a `Tensor` or (possibly nested) tuple of tensors which is aggregated in the `emit_ta` inside the `while_loop`. For the first call to `loop_fn`, the `emit_output` corresponds to the `emit_structure` which is then used to determine the size of the `zero_tensor` for the `emit_ta` (defaults to `cell.output_size`). For the subsequent calls to the `loop_fn`, the `emit_output` corresponds to the actual output tensor that is to be aggregated in the `emit_ta`. The parameter `cell_state` and output `next_cell_state` may be either a single or (possibly nested) tuple of tensors. The parameter `loop_state` and output `next_loop_state` may be either a single or (possibly nested) tuple of `Tensor` and `TensorArray` objects. This last parameter may be ignored by `loop_fn` and the return value may be `None`. If it is not `None`, then the `loop_state` will be propagated through the RNN loop, for use purely by `loop_fn` to keep track of its own state. The `next_loop_state` parameter returned may be `None`. The first call to `loop_fn` will be `time = 0`, `cell_output = None`, `cell_state = None`, and `loop_state = None`. For this call: The `next_cell_state` value should be the value with which to initialize the cell's state. It may be a final state from a previous RNN or it may be the output of `cell.zero_state()`. It should be a (possibly nested) tuple structure of tensors. If `cell.state_size` is an integer, this must be a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`. If `cell.state_size` is a `TensorShape`, this must be a `Tensor` of appropriate type and shape `[batch_size] + cell.state_size`. If `cell.state_size` is a (possibly nested) tuple of ints or `TensorShape`, this will be a tuple having the corresponding shapes. The `emit_output` value may be either `None` or a (possibly nested) tuple structure of tensors, e.g., `(tf.zeros(shape_0, dtype=dtype_0), tf.zeros(shape_1, dtype=dtype_1))`. If this first `emit_output` return value is `None`, then the `emit_ta` result of `raw_rnn` will have the same structure and dtypes as `cell.output_size`. Otherwise `emit_ta` will have the same structure, shapes (prepended with a `batch_size` dimension), and dtypes as `emit_output`. The actual values returned for `emit_output` at this initializing call are ignored. Note, this emit structure must be consistent across all time steps. parallel_iterations: (Default: 32). The number of iterations to run in parallel. Those operations which do not have any temporal dependency and can be run in parallel, will be. This parameter trades off time for space. Values >> 1 use more memory but take less time, while smaller values use less memory but computations take longer. swap_memory: Transparently swap the tensors produced in forward inference but needed for back prop from GPU to CPU. This allows training RNNs which would typically not fit on a single GPU, with very minimal (or no) performance penalty. scope: VariableScope for the created subgraph; defaults to "rnn". Returns: A tuple `(emit_ta, final_state, final_loop_state)` where: `emit_ta`: The RNN output `TensorArray`. If `loop_fn` returns a (possibly nested) set of Tensors for `emit_output` during initialization, (inputs `time = 0`, `cell_output = None`, and `loop_state = None`), then `emit_ta` will have the same structure, dtypes, and shapes as `emit_output` instead. If `loop_fn` returns `emit_output = None` during this call, the structure of `cell.output_size` is used: If `cell.output_size` is a (possibly nested) tuple of integers or `TensorShape` objects, then `emit_ta` will be a tuple having the same structure as `cell.output_size`, containing TensorArrays whose elements' shapes correspond to the shape data in `cell.output_size`. `final_state`: The final cell state. If `cell.state_size` is an int, this will be shaped `[batch_size, cell.state_size]`. If it is a `TensorShape`, this will be shaped `[batch_size] + cell.state_size`. If it is a (possibly nested) tuple of ints or `TensorShape`, this will be a tuple having the corresponding shapes. `final_loop_state`: The final loop state as returned by `loop_fn`. Raises: TypeError: If `cell` is not an instance of RNNCell, or `loop_fn` is not a `callable`. """ rnn_cell_impl.assert_like_rnncell("cell", cell) if not callable(loop_fn): raise TypeError("loop_fn must be a callable") parallel_iterations = parallel_iterations or 32 # Create a new scope in which the caching device is either # determined by the parent scope, or is set to place the cached # Variable using the same placement as for the rest of the RNN. with vs.variable_scope(scope or "rnn") as varscope: if _should_cache(): if varscope.caching_device is None: varscope.set_caching_device(lambda op: op.device) time = constant_op.constant(0, dtype=dtypes.int32) (elements_finished, next_input, initial_state, emit_structure, init_loop_state) = loop_fn( time, None, None, None) # time, cell_output, cell_state, loop_state flat_input = nest.flatten(next_input) # Need a surrogate loop state for the while_loop if none is available. loop_state = ( init_loop_state if init_loop_state is not None else constant_op.constant(0, dtype=dtypes.int32)) input_shape = [input_.get_shape() for input_ in flat_input] static_batch_size = tensor_shape.dimension_at_index(input_shape[0], 0) for input_shape_i in input_shape: # Static verification that batch sizes all match static_batch_size.merge_with( tensor_shape.dimension_at_index(input_shape_i, 0)) batch_size = tensor_shape.dimension_value(static_batch_size) const_batch_size = batch_size if batch_size is None: batch_size = array_ops.shape(flat_input[0])[0] nest.assert_same_structure(initial_state, cell.state_size) state = initial_state flat_state = nest.flatten(state) flat_state = [ops.convert_to_tensor(s) for s in flat_state] state = nest.pack_sequence_as(structure=state, flat_sequence=flat_state) if emit_structure is not None: flat_emit_structure = nest.flatten(emit_structure) flat_emit_size = [ emit.shape if emit.shape.is_fully_defined() else array_ops.shape(emit) for emit in flat_emit_structure ] flat_emit_dtypes = [emit.dtype for emit in flat_emit_structure] else: emit_structure = cell.output_size flat_emit_size = nest.flatten(emit_structure) flat_emit_dtypes = [flat_state[0].dtype] * len(flat_emit_size) flat_emit_ta = [ tensor_array_ops.TensorArray( dtype=dtype_i, dynamic_size=True, element_shape=(tensor_shape.TensorShape([ const_batch_size ]).concatenate(_maybe_tensor_shape_from_tensor(size_i))), size=0, name="rnn_output_%d" % i) for i, (dtype_i, size_i) in enumerate(zip(flat_emit_dtypes, flat_emit_size)) ] emit_ta = nest.pack_sequence_as( structure=emit_structure, flat_sequence=flat_emit_ta) flat_zero_emit = [ array_ops.zeros(_concat(batch_size, size_i), dtype_i) for size_i, dtype_i in zip(flat_emit_size, flat_emit_dtypes) ] zero_emit = nest.pack_sequence_as( structure=emit_structure, flat_sequence=flat_zero_emit) def condition(unused_time, elements_finished, *_): return math_ops.logical_not(math_ops.reduce_all(elements_finished)) def body(time, elements_finished, current_input, emit_ta, state, loop_state): """Internal while loop body for raw_rnn. Args: time: time scalar. elements_finished: batch-size vector. current_input: possibly nested tuple of input tensors. emit_ta: possibly nested tuple of output TensorArrays. state: possibly nested tuple of state tensors. loop_state: possibly nested tuple of loop state tensors. Returns: Tuple having the same size as Args but with updated values. """ (next_output, cell_state) = cell(current_input, state) nest.assert_same_structure(state, cell_state) nest.assert_same_structure(cell.output_size, next_output) next_time = time + 1 (next_finished, next_input, next_state, emit_output, next_loop_state) = loop_fn(next_time, next_output, cell_state, loop_state) nest.assert_same_structure(state, next_state) nest.assert_same_structure(current_input, next_input) nest.assert_same_structure(emit_ta, emit_output) # If loop_fn returns None for next_loop_state, just reuse the # previous one. loop_state = loop_state if next_loop_state is None else next_loop_state def _copy_some_through(current, candidate): """Copy some tensors through via array_ops.where.""" def copy_fn(cur_i, cand_i): # TensorArray and scalar get passed through. if isinstance(cur_i, tensor_array_ops.TensorArray): return cand_i if cur_i.shape.rank == 0: return cand_i # Otherwise propagate the old or the new value. with ops.colocate_with(cand_i): return array_ops.where(elements_finished, cur_i, cand_i) return nest.map_structure(copy_fn, current, candidate) emit_output = _copy_some_through(zero_emit, emit_output) next_state = _copy_some_through(state, next_state) emit_ta = nest.map_structure(lambda ta, emit: ta.write(time, emit), emit_ta, emit_output) elements_finished = math_ops.logical_or(elements_finished, next_finished) return (next_time, elements_finished, next_input, emit_ta, next_state, loop_state) returned = control_flow_ops.while_loop( condition, body, loop_vars=[ time, elements_finished, next_input, emit_ta, state, loop_state ], parallel_iterations=parallel_iterations, swap_memory=swap_memory) (emit_ta, final_state, final_loop_state) = returned[-3:] if init_loop_state is None: final_loop_state = None return (emit_ta, final_state, final_loop_state) @deprecation.deprecated(None, "Please use `keras.layers.RNN(cell, unroll=True)`, " "which is equivalent to this API") @tf_export(v1=["nn.static_rnn"]) def static_rnn(cell, inputs, initial_state=None, dtype=None, sequence_length=None, scope=None): """Creates a recurrent neural network specified by RNNCell `cell`. The simplest form of RNN network generated is: ```python state = cell.zero_state(...) outputs = [] for input_ in inputs: output, state = cell(input_, state) outputs.append(output) return (outputs, state) ``` However, a few other options are available: An initial state can be provided. If the sequence_length vector is provided, dynamic calculation is performed. This method of calculation does not compute the RNN steps past the maximum sequence length of the minibatch (thus saving computational time), and properly propagates the state at an example's sequence length to the final state output. The dynamic calculation performed is, at time `t` for batch row `b`, ```python (output, state)(b, t) = (t >= sequence_length(b)) ? (zeros(cell.output_size), states(b, sequence_length(b) - 1)) : cell(input(b, t), state(b, t - 1)) ``` Args: cell: An instance of RNNCell. inputs: A length T list of inputs, each a `Tensor` of shape `[batch_size, input_size]`, or a nested tuple of such elements. initial_state: (optional) An initial state for the RNN. If `cell.state_size` is an integer, this must be a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`. If `cell.state_size` is a tuple, this should be a tuple of tensors having shapes `[batch_size, s] for s in cell.state_size`. dtype: (optional) The data type for the initial state and expected output. Required if initial_state is not provided or RNN state has a heterogeneous dtype. sequence_length: Specifies the length of each sequence in inputs. An int32 or int64 vector (tensor) size `[batch_size]`, values in `[0, T)`. scope: VariableScope for the created subgraph; defaults to "rnn". Returns: A pair (outputs, state) where: - outputs is a length T list of outputs (one for each input), or a nested tuple of such elements. - state is the final state Raises: TypeError: If `cell` is not an instance of RNNCell. ValueError: If `inputs` is `None` or an empty list, or if the input depth (column size) cannot be inferred from inputs via shape inference. """ rnn_cell_impl.assert_like_rnncell("cell", cell) if not nest.is_sequence(inputs): raise TypeError("inputs must be a sequence") if not inputs: raise ValueError("inputs must not be empty") outputs = [] # Create a new scope in which the caching device is either # determined by the parent scope, or is set to place the cached # Variable using the same placement as for the rest of the RNN. with vs.variable_scope(scope or "rnn") as varscope: if _should_cache(): if varscope.caching_device is None: varscope.set_caching_device(lambda op: op.device) # Obtain the first sequence of the input first_input = inputs while nest.is_sequence(first_input): first_input = first_input[0] # Temporarily avoid EmbeddingWrapper and seq2seq badness # TODO(lukaszkaiser): remove EmbeddingWrapper if first_input.get_shape().rank != 1: input_shape = first_input.get_shape().with_rank_at_least(2) fixed_batch_size = input_shape.dims[0] flat_inputs = nest.flatten(inputs) for flat_input in flat_inputs: input_shape = flat_input.get_shape().with_rank_at_least(2) batch_size, input_size = tensor_shape.dimension_at_index( input_shape, 0), input_shape[1:] fixed_batch_size.merge_with(batch_size) for i, size in enumerate(input_size.dims): if tensor_shape.dimension_value(size) is None: raise ValueError( "Input size (dimension %d of inputs) must be accessible via " "shape inference, but saw value None." % i) else: fixed_batch_size = first_input.get_shape().with_rank_at_least(1)[0] if tensor_shape.dimension_value(fixed_batch_size): batch_size = tensor_shape.dimension_value(fixed_batch_size) else: batch_size = array_ops.shape(first_input)[0] if initial_state is not None: state = initial_state else: if not dtype: raise ValueError("If no initial_state is provided, " "dtype must be specified") if getattr(cell, "get_initial_state", None) is not None: state = cell.get_initial_state( inputs=None, batch_size=batch_size, dtype=dtype) else: state = cell.zero_state(batch_size, dtype) if sequence_length is not None: # Prepare variables sequence_length = ops.convert_to_tensor( sequence_length, name="sequence_length") if sequence_length.get_shape().rank not in (None, 1): raise ValueError( "sequence_length must be a vector of length batch_size") def _create_zero_output(output_size): # convert int to TensorShape if necessary size = _concat(batch_size, output_size) output = array_ops.zeros( array_ops.stack(size), _infer_state_dtype(dtype, state)) shape = _concat( tensor_shape.dimension_value(fixed_batch_size), output_size, static=True) output.set_shape(tensor_shape.TensorShape(shape)) return output output_size = cell.output_size flat_output_size = nest.flatten(output_size) flat_zero_output = tuple( _create_zero_output(size) for size in flat_output_size) zero_output = nest.pack_sequence_as( structure=output_size, flat_sequence=flat_zero_output) sequence_length = math_ops.cast(sequence_length, dtypes.int32) min_sequence_length = math_ops.reduce_min(sequence_length) max_sequence_length = math_ops.reduce_max(sequence_length) # Keras RNN cells only accept state as list, even if it's a single tensor. is_keras_rnn_cell = _is_keras_rnn_cell(cell) if is_keras_rnn_cell and not nest.is_sequence(state): state = [state] for time, input_ in enumerate(inputs): if time > 0: varscope.reuse_variables() # pylint: disable=cell-var-from-loop call_cell = lambda: cell(input_, state) # pylint: enable=cell-var-from-loop if sequence_length is not None: (output, state) = _rnn_step( time=time, sequence_length=sequence_length, min_sequence_length=min_sequence_length, max_sequence_length=max_sequence_length, zero_output=zero_output, state=state, call_cell=call_cell, state_size=cell.state_size) else: (output, state) = call_cell() outputs.append(output) # Keras RNN cells only return state as list, even if it's a single tensor. if is_keras_rnn_cell and len(state) == 1: state = state[0] return (outputs, state) @deprecation.deprecated(None, "Please use `keras.layers.RNN(cell, stateful=True)`, " "which is equivalent to this API") @tf_export(v1=["nn.static_state_saving_rnn"]) def static_state_saving_rnn(cell, inputs, state_saver, state_name, sequence_length=None, scope=None): """RNN that accepts a state saver for time-truncated RNN calculation. Args: cell: An instance of `RNNCell`. inputs: A length T list of inputs, each a `Tensor` of shape `[batch_size, input_size]`. state_saver: A state saver object with methods `state` and `save_state`. state_name: Python string or tuple of strings. The name to use with the state_saver. If the cell returns tuples of states (i.e., `cell.state_size` is a tuple) then `state_name` should be a tuple of strings having the same length as `cell.state_size`. Otherwise it should be a single string. sequence_length: (optional) An int32/int64 vector size [batch_size]. See the documentation for rnn() for more details about sequence_length. scope: VariableScope for the created subgraph; defaults to "rnn". Returns: A pair (outputs, state) where: outputs is a length T list of outputs (one for each input) states is the final state Raises: TypeError: If `cell` is not an instance of RNNCell. ValueError: If `inputs` is `None` or an empty list, or if the arity and type of `state_name` does not match that of `cell.state_size`. """ state_size = cell.state_size state_is_tuple = nest.is_sequence(state_size) state_name_tuple = nest.is_sequence(state_name) if state_is_tuple != state_name_tuple: raise ValueError("state_name should be the same type as cell.state_size. " "state_name: %s, cell.state_size: %s" % (str(state_name), str(state_size))) if state_is_tuple: state_name_flat = nest.flatten(state_name) state_size_flat = nest.flatten(state_size) if len(state_name_flat) != len(state_size_flat): raise ValueError("#elems(state_name) != #elems(state_size): %d vs. %d" % (len(state_name_flat), len(state_size_flat))) initial_state = nest.pack_sequence_as( structure=state_size, flat_sequence=[state_saver.state(s) for s in state_name_flat]) else: initial_state = state_saver.state(state_name) (outputs, state) = static_rnn( cell, inputs, initial_state=initial_state, sequence_length=sequence_length, scope=scope) if state_is_tuple: flat_state = nest.flatten(state) state_name = nest.flatten(state_name) save_state = [ state_saver.save_state(name, substate) for name, substate in zip(state_name, flat_state) ] else: save_state = [state_saver.save_state(state_name, state)] with ops.control_dependencies(save_state): last_output = outputs[-1] flat_last_output = nest.flatten(last_output) flat_last_output = [ array_ops.identity(output) for output in flat_last_output ] outputs[-1] = nest.pack_sequence_as( structure=last_output, flat_sequence=flat_last_output) if state_is_tuple: state = nest.pack_sequence_as( structure=state, flat_sequence=[array_ops.identity(s) for s in flat_state]) else: state = array_ops.identity(state) return (outputs, state) @deprecation.deprecated(None, "Please use `keras.layers.Bidirectional(" "keras.layers.RNN(cell, unroll=True))`, which is " "equivalent to this API") @tf_export(v1=["nn.static_bidirectional_rnn"]) def static_bidirectional_rnn(cell_fw, cell_bw, inputs, initial_state_fw=None, initial_state_bw=None, dtype=None, sequence_length=None, scope=None): """Creates a bidirectional recurrent neural network. Similar to the unidirectional case above (rnn) but takes input and builds independent forward and backward RNNs with the final forward and backward outputs depth-concatenated, such that the output will have the format [time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of forward and backward cell must match. The initial state for both directions is zero by default (but can be set optionally) and no intermediate states are ever returned -- the network is fully unrolled for the given (passed in) length(s) of the sequence(s) or completely unrolled if length(s) is not given. Args: cell_fw: An instance of RNNCell, to be used for forward direction. cell_bw: An instance of RNNCell, to be used for backward direction. inputs: A length T list of inputs, each a tensor of shape [batch_size, input_size], or a nested tuple of such elements. initial_state_fw: (optional) An initial state for the forward RNN. This must be a tensor of appropriate type and shape `[batch_size, cell_fw.state_size]`. If `cell_fw.state_size` is a tuple, this should be a tuple of tensors having shapes `[batch_size, s] for s in cell_fw.state_size`. initial_state_bw: (optional) Same as for `initial_state_fw`, but using the corresponding properties of `cell_bw`. dtype: (optional) The data type for the initial state. Required if either of the initial states are not provided. sequence_length: (optional) An int32/int64 vector, size `[batch_size]`, containing the actual lengths for each of the sequences. scope: VariableScope for the created subgraph; defaults to "bidirectional_rnn" Returns: A tuple (outputs, output_state_fw, output_state_bw) where: outputs is a length `T` list of outputs (one for each input), which are depth-concatenated forward and backward outputs. output_state_fw is the final state of the forward rnn. output_state_bw is the final state of the backward rnn. Raises: TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`. ValueError: If inputs is None or an empty list. """ rnn_cell_impl.assert_like_rnncell("cell_fw", cell_fw) rnn_cell_impl.assert_like_rnncell("cell_bw", cell_bw) if not nest.is_sequence(inputs): raise TypeError("inputs must be a sequence") if not inputs: raise ValueError("inputs must not be empty") with vs.variable_scope(scope or "bidirectional_rnn"): # Forward direction with vs.variable_scope("fw") as fw_scope: output_fw, output_state_fw = static_rnn( cell_fw, inputs, initial_state_fw, dtype, sequence_length, scope=fw_scope) # Backward direction with vs.variable_scope("bw") as bw_scope: reversed_inputs = _reverse_seq(inputs, sequence_length) tmp, output_state_bw = static_rnn( cell_bw, reversed_inputs, initial_state_bw, dtype, sequence_length, scope=bw_scope) output_bw = _reverse_seq(tmp, sequence_length) # Concat each of the forward/backward outputs flat_output_fw = nest.flatten(output_fw) flat_output_bw = nest.flatten(output_bw) flat_outputs = tuple( array_ops.concat([fw, bw], 1) for fw, bw in zip(flat_output_fw, flat_output_bw)) outputs = nest.pack_sequence_as( structure=output_fw, flat_sequence=flat_outputs) return (outputs, output_state_fw, output_state_bw)
tensorflow-master
tensorflow/python/ops/rnn.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Gradient checker for functions. The gradient checker verifies numerically that an function properly computes the gradients """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import tf_export def _product(t): if isinstance(t, int): return t else: y = 1 for x in t: y *= x return y def _eval_indexed_slices(a): """Converts IndexedSlices to IndexedSlicesValue with numpy indices/values. When eager execution is enabled, converts IndexedSlices to IndexedSlicesValue with numpy indices/values. Args: a: any value. Returns: If a is IndexedSlices and eager execution is enabled, calls numpy() on a's fields. Otherwise returns a unchanged. """ if isinstance(a, ops.IndexedSlices) and context.executing_eagerly(): return ops.IndexedSlicesValue( indices=[x.numpy() for x in a.indices], values=[x.numpy() for x in a.values], dense_shape=a.dense_shape) return a def _to_numpy(a): """Converts Tensors, EagerTensors, and IndexedSlicesValue to numpy arrays. Args: a: any value. Returns: If a is EagerTensor or Tensor, returns the evaluation of a by calling numpy() or run(). If a is IndexedSlicesValue, constructs the corresponding dense numpy array. Otherwise returns a unchanged. """ if isinstance(a, ops.EagerTensor): return a.numpy() if isinstance(a, ops.Tensor): sess = ops.get_default_session() return sess.run(a) if isinstance(a, ops.IndexedSlicesValue): arr = np.zeros(a.dense_shape) assert len(a.values) == len(a.indices), ( "IndexedSlicesValue has %s value slices but %s indices\n%s" % (a.values, a.indices, a)) for values_slice, index in zip(a.values, a.indices): assert 0 <= index < len(arr), ( "IndexedSlicesValue has invalid index %s\n%s" % (index, a)) arr[index] += values_slice return arr return a def _prepare(f, xs_dtypes): """Return a function that executes 'f'. In TF 2.x, this is the same as `f`. In TF 1.x, returns a Python function that executes the graph defined by `f` in a Session. Args: f: the function. xs_dtypes: dtypes of f's arguments. Returns: a function that will be evaluated in both graph and eager mode """ if context.executing_eagerly(): def decorated_eager(*xs_data): return f(*map(ops.convert_to_tensor, xs_data)) return decorated_eager xs = [array_ops.placeholder(x_dtype) for x_dtype in xs_dtypes] y = f(*xs) sess = ops.get_default_session() def decorated_graph(*xs_data): xs_data = [_to_numpy(a) for a in xs_data] return sess.run(y, feed_dict=dict(zip(xs, xs_data))) return decorated_graph def _compute_theoretical_jacobian(f, y_shape, y_dtype, xs, param): """Computes the theoretical Jacobian for f regarding xs[param]. One can think of the relation among f, xs and y as y = f(xs). Args: f: the function. y_shape: the shape of the result. y_dtype: the dtype of the result. xs: a list of tensors. param: the index of the target parameter. Returns: A 2-d numpy array representing the Jacobian. It has "x_size" rows and "y_size" columns where "x_size" is the number of elements in xs[param] and "y_size" is the number of elements in the result. Raises: ValueError: If result is empty but the gradient is nonzero. """ x = xs[param] # Complex vectors are treated as vectors of twice as many reals. x_shape = tuple(x.shape) + (2,) if x.dtype.is_complex else x.shape y_factor = 2 if y_dtype.is_complex else 1 # To compute the jacobian, we treat x and y as one-dimensional vectors. x_size = _product(x_shape) x_val_size = _product(x_shape[1:]) # This is used for sparse gradients y_size = _product(y_shape) * y_factor # Allocate 2-D Jacobian, with x dimensions smashed into the first # dimension and y dimensions smashed into the second. jacobian = np.zeros((x_size, y_size), dtype=x.dtype.real_dtype.as_numpy_dtype) # For each of the entry of dy, we set this to be 1 and # everything else to be 0 and compute the gradients -- this will give us one # one column of the Jacobian matrix. dy_data = np.zeros(y_shape, dtype=y_dtype.as_numpy_dtype) dy_data_flat = dy_data.ravel().view(y_dtype.real_dtype.as_numpy_dtype) grad_fn_unprep = backprop.gradients_function(f, [param]) grad_fn = _prepare(lambda dy, *xs: grad_fn_unprep(*xs, dy=dy), [y_dtype] + [x.dtype for x in xs]) for col in range(y_size): dy_data_flat[col] = 1 grad = _to_numpy(grad_fn(dy_data, *xs)[0]) grad = _eval_indexed_slices(grad) dy_data_flat[col] = 0 if isinstance(grad, ops.IndexedSlicesValue): for i, v in zip(grad.indices, grad.values): r_begin = i * x_val_size r_end = r_begin + x_val_size jacobian[r_begin:r_end, col] += v.flat else: jacobian[:, col] = grad.ravel().view(jacobian.dtype) # If the output is empty, run the gradients at least once and make sure # they produce zeros. if y_size == 0: # don't use 'not y_size', because y_size may not be an int grad = _to_numpy(grad_fn(dy_data, *xs)[0]) if grad.shape != x.shape: raise ValueError("Empty gradient has wrong shape: expected %s, got %s" % (x.shape, grad.shape)) if np.any(grad): raise ValueError("Empty tensor with nonzero gradients") logging.vlog(1, "Theoretical Jacobian =\n%s", jacobian) return jacobian def _compute_numeric_jacobian(f, y_size, y_dtype, xs, param, delta): """Computes the numeric Jacobian for f regarding xs[param]. One can think of the relation among f, xs and y as y = f(xs). Args: f: the function. y_size: the number of elements of the result. y_dtype: the dtype of the result. xs: a list of tensors. param: the index of the target parameter. delta: the amount of perturbation we give to the input. Returns: A 2-d numpy array representing the Jacobian. It has "x_size" rows and "y_size" columns where "x_size" is the number of elements in xs[param] and "y_size" is the number of elements in the result. """ # bfloat16 doesn't have enough bits to represent high precision numbers such # as delta. Convert to float32 here. Since numeric_jacobian is expected to # be the groundtruth to compare against, it shouldn't lose any information. x_shape = xs[param].shape x_dtype = xs[param].dtype if y_dtype == dtypes.bfloat16: f = lambda *xs: math_ops.cast(f(*xs), dtypes.float32) y_dtype = dtypes.float32 # To compute the jacobian, we treat x and y as one-dimensional vectors x_size = _product(x_shape) * (2 if x_dtype.is_complex else 1) y_size = y_size * (2 if y_dtype.is_complex else 1) x_dtype = x_dtype.real_dtype.as_numpy_dtype y_dtype = y_dtype.real_dtype.as_numpy_dtype xs_dtypes = [x.dtype for x in xs] # Converts xs to numpy arrays to do in-place perturbation. # Calls asarray() to avoid copying in ravel() later. xs = [np.asarray(_to_numpy(x)) for x in xs] x = xs[param] # Make sure we have the right types scale = np.asarray(2 * delta, dtype=y_dtype)[()] jacobian = np.zeros((x_size, y_size), dtype=x_dtype) # For each of the entry of x, we slightly perturbs this by adding and # subtracting a delta and then compute difference between the outputs. This # will give us one row of the Jacobian matrix. f = _prepare(f, xs_dtypes) for row in range(x_size): original = x.ravel().view(x_dtype)[row] x.ravel().view(x_dtype)[row] += delta y_pos = _to_numpy(f(*xs)) x.ravel().view(x_dtype)[row] = original x.ravel().view(x_dtype)[row] -= delta y_neg = _to_numpy(f(*xs)) x.ravel().view(x_dtype)[row] = original diff = (y_pos - y_neg) / scale jacobian[row, :] = diff.ravel().view(y_dtype) logging.vlog(1, "Numeric Jacobian =\n%s", jacobian) return jacobian def _compute_gradient(f, y_shape, y_dtype, xs, param, delta): """Computes the theoretical and numerical jacobian.""" x = xs[param] t = x.dtype allowed_types = [dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128] assert t.base_dtype in allowed_types, ("Cannot compute gradient for " "unsupported type %s of argument %s" % (t.name, param)) t2 = y_dtype assert t2.base_dtype in allowed_types, ("Cannot compute gradient for " "unsupported type %s of y" % t2.name) y_size = _product(y_shape) jacob_t = _compute_theoretical_jacobian(f, y_shape, y_dtype, xs, param) jacob_n = _compute_numeric_jacobian(f, y_size, y_dtype, xs, param, delta) return jacob_t, jacob_n def _compute_gradient_list(f, xs, delta): """Compute gradients for a list of x values.""" # convert xs to tensors so that dtype and shape have uniform types xs = list(map(ops.convert_to_tensor, xs)) # run the function to get info of the result xs_dtypes = [x.dtype for x in xs] f_temp = _prepare(f, xs_dtypes) y = f_temp(*xs) return zip(*[_compute_gradient(f, y.shape, dtypes.as_dtype(y.dtype), xs, i, delta) for i in range(len(xs))]) @tf_export("test.compute_gradient", v1=[]) def compute_gradient(f, x, delta=1e-3): """Computes the theoretical and numeric Jacobian of `f`. With y = f(x), computes the theoretical and numeric Jacobian dy/dx. Args: f: the function. x: a list arguments for the function delta: (optional) perturbation used to compute numeric Jacobian. Returns: A pair of lists, where the first is a list of 2-d numpy arrays representing the theoretical Jacobians for each argument, and the second list is the numerical ones. Each 2-d array has "x_size" rows and "y_size" columns where "x_size" is the number of elements in the corresponding argument and "y_size" is the number of elements in f(x). Raises: ValueError: If result is empty but the gradient is nonzero. ValueError: If x is not list, but any other type. Example: ```python @tf.function def test_func(x): return x*x theoretical, numerical = tf.test.compute_gradient(test_func, [1.0]) theoretical, numerical # ((array([[2.]], dtype=float32),), (array([[2.000004]], dtype=float32),)) ``` """ if not isinstance(x, list): raise ValueError( "`x` must be a list of Tensors (arguments to `f`), not a %s" % type(x)) return _compute_gradient_list(f, x, delta) def max_error(grad1, grad2): """Computes maximum elementwise gap. Computes the maximum elementwise gap between two lists of tensors of the same shape. Args: grad1: a lists of tensors. grad2: a lists of tensors with the same shape as grad1. Returns: The maximum elementwise gap between the two. """ error = 0 for j_t, j_n in zip(grad1, grad2): if j_t.size or j_n.size: # Handle zero size tensors correctly error = np.maximum(error, np.fabs(j_t - j_n).max()) return error
tensorflow-master
tensorflow/python/ops/gradient_checker_v2.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Confusion matrix related utilities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export def remove_squeezable_dimensions( labels, predictions, expected_rank_diff=0, name=None): """Squeeze last dim if ranks differ from expected by exactly 1. In the common case where we expect shapes to match, `expected_rank_diff` defaults to 0, and we squeeze the last dimension of the larger rank if they differ by 1. But, for example, if `labels` contains class IDs and `predictions` contains 1 probability per class, we expect `predictions` to have 1 more dimension than `labels`, so `expected_rank_diff` would be 1. In this case, we'd squeeze `labels` if `rank(predictions) - rank(labels) == 0`, and `predictions` if `rank(predictions) - rank(labels) == 2`. This will use static shape if available. Otherwise, it will add graph operations, which could result in a performance hit. Args: labels: Label values, a `Tensor` whose dimensions match `predictions`. predictions: Predicted values, a `Tensor` of arbitrary dimensions. expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`. name: Name of the op. Returns: Tuple of `labels` and `predictions`, possibly with last dim squeezed. """ with ops.name_scope(name, 'remove_squeezable_dimensions', [labels, predictions]): predictions = ops.convert_to_tensor(predictions) labels = ops.convert_to_tensor(labels) predictions_shape = predictions.get_shape() predictions_rank = predictions_shape.ndims labels_shape = labels.get_shape() labels_rank = labels_shape.ndims if (labels_rank is not None) and (predictions_rank is not None): # Use static rank. rank_diff = predictions_rank - labels_rank if rank_diff == expected_rank_diff + 1: predictions = array_ops.squeeze(predictions, [-1]) elif rank_diff == expected_rank_diff - 1: labels = array_ops.squeeze(labels, [-1]) return labels, predictions # Use dynamic rank. rank_diff = array_ops.rank(predictions) - array_ops.rank(labels) if (predictions_rank is None) or ( predictions_shape.dims[-1].is_compatible_with(1)): predictions = control_flow_ops.cond( math_ops.equal(expected_rank_diff + 1, rank_diff), lambda: array_ops.squeeze(predictions, [-1]), lambda: predictions) if (labels_rank is None) or ( labels_shape.dims[-1].is_compatible_with(1)): labels = control_flow_ops.cond( math_ops.equal(expected_rank_diff - 1, rank_diff), lambda: array_ops.squeeze(labels, [-1]), lambda: labels) return labels, predictions @tf_export('math.confusion_matrix', v1=[]) def confusion_matrix(labels, predictions, num_classes=None, weights=None, dtype=dtypes.int32, name=None): """Computes the confusion matrix from predictions and labels. The matrix columns represent the prediction labels and the rows represent the real labels. The confusion matrix is always a 2-D array of shape `[n, n]`, where `n` is the number of valid labels for a given classification task. Both prediction and labels must be 1-D arrays of the same shape in order for this function to work. If `num_classes` is `None`, then `num_classes` will be set to one plus the maximum value in either predictions or labels. Class labels are expected to start at 0. For example, if `num_classes` is 3, then the possible labels would be `[0, 1, 2]`. If `weights` is not `None`, then each prediction contributes its corresponding weight to the total value of the confusion matrix cell. For example: ```python tf.math.confusion_matrix([1, 2, 4], [2, 2, 4]) ==> [[0 0 0 0 0] [0 0 1 0 0] [0 0 1 0 0] [0 0 0 0 0] [0 0 0 0 1]] ``` Note that the possible labels are assumed to be `[0, 1, 2, 3, 4]`, resulting in a 5x5 confusion matrix. Args: labels: 1-D `Tensor` of real labels for the classification task. predictions: 1-D `Tensor` of predictions for a given classification. num_classes: The possible number of labels the classification task can have. If this value is not provided, it will be calculated using both predictions and labels array. weights: An optional `Tensor` whose shape matches `predictions`. dtype: Data type of the confusion matrix. name: Scope name. Returns: A `Tensor` of type `dtype` with shape `[n, n]` representing the confusion matrix, where `n` is the number of possible labels in the classification task. Raises: ValueError: If both predictions and labels are not 1-D vectors and have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`. """ with ops.name_scope(name, 'confusion_matrix', (predictions, labels, num_classes, weights)) as name: labels, predictions = remove_squeezable_dimensions( ops.convert_to_tensor(labels, name='labels'), ops.convert_to_tensor( predictions, name='predictions')) predictions = math_ops.cast(predictions, dtypes.int64) labels = math_ops.cast(labels, dtypes.int64) # Sanity checks - underflow or overflow can cause memory corruption. labels = control_flow_ops.with_dependencies( [check_ops.assert_non_negative( labels, message='`labels` contains negative values')], labels) predictions = control_flow_ops.with_dependencies( [check_ops.assert_non_negative( predictions, message='`predictions` contains negative values')], predictions) if num_classes is None: num_classes = math_ops.maximum(math_ops.reduce_max(predictions), math_ops.reduce_max(labels)) + 1 else: num_classes_int64 = math_ops.cast(num_classes, dtypes.int64) labels = control_flow_ops.with_dependencies( [check_ops.assert_less( labels, num_classes_int64, message='`labels` out of bound')], labels) predictions = control_flow_ops.with_dependencies( [check_ops.assert_less( predictions, num_classes_int64, message='`predictions` out of bound')], predictions) if weights is not None: weights = ops.convert_to_tensor(weights, name='weights') predictions.get_shape().assert_is_compatible_with(weights.get_shape()) weights = math_ops.cast(weights, dtype) shape = array_ops.stack([num_classes, num_classes]) indices = array_ops.stack([labels, predictions], axis=1) values = (array_ops.ones_like(predictions, dtype) if weights is None else weights) cm_sparse = sparse_tensor.SparseTensor( indices=indices, values=values, dense_shape=math_ops.cast(shape, dtypes.int64)) zero_matrix = array_ops.zeros(math_ops.cast(shape, dtypes.int32), dtype) return sparse_ops.sparse_add(zero_matrix, cm_sparse) @tf_export(v1=['math.confusion_matrix', 'confusion_matrix']) @deprecation.deprecated_endpoints('confusion_matrix', 'train.confusion_matrix') def confusion_matrix_v1(labels, predictions, num_classes=None, dtype=dtypes.int32, name=None, weights=None): """Computes the confusion matrix from predictions and labels. The matrix columns represent the prediction labels and the rows represent the real labels. The confusion matrix is always a 2-D array of shape `[n, n]`, where `n` is the number of valid labels for a given classification task. Both prediction and labels must be 1-D arrays of the same shape in order for this function to work. If `num_classes` is `None`, then `num_classes` will be set to one plus the maximum value in either predictions or labels. Class labels are expected to start at 0. For example, if `num_classes` is 3, then the possible labels would be `[0, 1, 2]`. If `weights` is not `None`, then each prediction contributes its corresponding weight to the total value of the confusion matrix cell. For example: ```python tf.math.confusion_matrix([1, 2, 4], [2, 2, 4]) ==> [[0 0 0 0 0] [0 0 1 0 0] [0 0 1 0 0] [0 0 0 0 0] [0 0 0 0 1]] ``` Note that the possible labels are assumed to be `[0, 1, 2, 3, 4]`, resulting in a 5x5 confusion matrix. Args: labels: 1-D `Tensor` of real labels for the classification task. predictions: 1-D `Tensor` of predictions for a given classification. num_classes: The possible number of labels the classification task can have. If this value is not provided, it will be calculated using both predictions and labels array. dtype: Data type of the confusion matrix. name: Scope name. weights: An optional `Tensor` whose shape matches `predictions`. Returns: A `Tensor` of type `dtype` with shape `[n, n]` representing the confusion matrix, where `n` is the number of possible labels in the classification task. Raises: ValueError: If both predictions and labels are not 1-D vectors and have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`. """ return confusion_matrix(labels, predictions, num_classes, weights, dtype, name)
tensorflow-master
tensorflow/python/ops/confusion_matrix.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.ops.image_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import colorsys import functools import itertools import math import os import time import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.compat import compat from tensorflow.core.protobuf import config_pb2 from tensorflow.python.client import session from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_image_ops from tensorflow.python.ops import gradients from tensorflow.python.ops import image_ops from tensorflow.python.ops import image_ops_impl from tensorflow.python.ops import io_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variables from tensorflow.python.platform import googletest from tensorflow.python.platform import test class RGBToHSVTest(test_util.TensorFlowTestCase): def testBatch(self): # Build an arbitrary RGB image np.random.seed(7) batch_size = 5 shape = (batch_size, 2, 7, 3) for nptype in [np.float32, np.float64]: inp = np.random.rand(*shape).astype(nptype) # Convert to HSV and back, as a batch and individually with self.cached_session(use_gpu=True) as sess: batch0 = constant_op.constant(inp) batch1 = image_ops.rgb_to_hsv(batch0) batch2 = image_ops.hsv_to_rgb(batch1) split0 = array_ops.unstack(batch0) split1 = list(map(image_ops.rgb_to_hsv, split0)) split2 = list(map(image_ops.hsv_to_rgb, split1)) join1 = array_ops.stack(split1) join2 = array_ops.stack(split2) batch1, batch2, join1, join2 = self.evaluate( [batch1, batch2, join1, join2]) # Verify that processing batch elements together is the same as separate self.assertAllClose(batch1, join1) self.assertAllClose(batch2, join2) self.assertAllClose(batch2, inp) def testRGBToHSVRoundTrip(self): data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] for nptype in [np.float32, np.float64]: rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255. with self.cached_session(use_gpu=True): hsv = image_ops.rgb_to_hsv(rgb_np) rgb = image_ops.hsv_to_rgb(hsv) rgb_tf = self.evaluate(rgb) self.assertAllClose(rgb_tf, rgb_np) class RGBToYIQTest(test_util.TensorFlowTestCase): def testBatch(self): # Build an arbitrary RGB image np.random.seed(7) batch_size = 5 shape = (batch_size, 2, 7, 3) for nptype in [np.float32, np.float64]: inp = np.random.rand(*shape).astype(nptype) # Convert to YIQ and back, as a batch and individually with self.cached_session(use_gpu=True) as sess: batch0 = constant_op.constant(inp) batch1 = image_ops.rgb_to_yiq(batch0) batch2 = image_ops.yiq_to_rgb(batch1) split0 = array_ops.unstack(batch0) split1 = list(map(image_ops.rgb_to_yiq, split0)) split2 = list(map(image_ops.yiq_to_rgb, split1)) join1 = array_ops.stack(split1) join2 = array_ops.stack(split2) batch1, batch2, join1, join2 = self.evaluate( [batch1, batch2, join1, join2]) # Verify that processing batch elements together is the same as separate self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4) self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4) self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4) class RGBToYUVTest(test_util.TensorFlowTestCase): def testBatch(self): # Build an arbitrary RGB image np.random.seed(7) batch_size = 5 shape = (batch_size, 2, 7, 3) for nptype in [np.float32, np.float64]: inp = np.random.rand(*shape).astype(nptype) # Convert to YUV and back, as a batch and individually with self.cached_session(use_gpu=True) as sess: batch0 = constant_op.constant(inp) batch1 = image_ops.rgb_to_yuv(batch0) batch2 = image_ops.yuv_to_rgb(batch1) split0 = array_ops.unstack(batch0) split1 = list(map(image_ops.rgb_to_yuv, split0)) split2 = list(map(image_ops.yuv_to_rgb, split1)) join1 = array_ops.stack(split1) join2 = array_ops.stack(split2) batch1, batch2, join1, join2 = self.evaluate( [batch1, batch2, join1, join2]) # Verify that processing batch elements together is the same as separate self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4) self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4) self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4) class GrayscaleToRGBTest(test_util.TensorFlowTestCase): def _RGBToGrayscale(self, images): is_batch = True if len(images.shape) == 3: is_batch = False images = np.expand_dims(images, axis=0) out_shape = images.shape[0:3] + (1,) out = np.zeros(shape=out_shape, dtype=np.uint8) for batch in xrange(images.shape[0]): for y in xrange(images.shape[1]): for x in xrange(images.shape[2]): red = images[batch, y, x, 0] green = images[batch, y, x, 1] blue = images[batch, y, x, 2] gray = 0.2989 * red + 0.5870 * green + 0.1140 * blue out[batch, y, x, 0] = int(gray) if not is_batch: out = np.squeeze(out, axis=0) return out def _TestRGBToGrayscale(self, x_np): y_np = self._RGBToGrayscale(x_np) with self.cached_session(use_gpu=True): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.rgb_to_grayscale(x_tf) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) def testBasicRGBToGrayscale(self): # 4-D input with batch dimension. x_np = np.array( [[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 1, 2, 3]) self._TestRGBToGrayscale(x_np) # 3-D input with no batch dimension. x_np = np.array([[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 2, 3]) self._TestRGBToGrayscale(x_np) def testBasicGrayscaleToRGB(self): # 4-D input with batch dimension. x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2, 1]) y_np = np.array( [[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 1, 2, 3]) with self.cached_session(use_gpu=True): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.grayscale_to_rgb(x_tf) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) # 3-D input with no batch dimension. x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 2, 1]) y_np = np.array([[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 2, 3]) with self.cached_session(use_gpu=True): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.grayscale_to_rgb(x_tf) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) def testGrayscaleToRGBInputValidation(self): # tests whether the grayscale_to_rgb function raises # an exception if the input images' last dimension is # not of size 1, i.e. the images have shape # [batch size, height, width] or [height, width] # tests if an exception is raised if a three dimensional # input is used, i.e. the images have shape [batch size, height, width] with self.cached_session(use_gpu=True): # 3-D input with batch dimension. x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2]) x_tf = constant_op.constant(x_np, shape=x_np.shape) # this is the error message we expect the function to raise err_msg = "Last dimension of a grayscale image should be size 1" with self.assertRaisesRegexp(ValueError, err_msg): image_ops.grayscale_to_rgb(x_tf) # tests if an exception is raised if a two dimensional # input is used, i.e. the images have shape [height, width] with self.cached_session(use_gpu=True): # 1-D input without batch dimension. x_np = np.array([[1, 2]], dtype=np.uint8).reshape([2]) x_tf = constant_op.constant(x_np, shape=x_np.shape) # this is the error message we expect the function to raise err_msg = "A grayscale image must be at least two-dimensional" with self.assertRaisesRegexp(ValueError, err_msg): image_ops.grayscale_to_rgb(x_tf) @test_util.run_deprecated_v1 def testShapeInference(self): # Shape inference works and produces expected output where possible rgb_shape = [7, None, 19, 3] gray_shape = rgb_shape[:-1] + [1] with self.cached_session(use_gpu=True): rgb_tf = array_ops.placeholder(dtypes.uint8, shape=rgb_shape) gray = image_ops.rgb_to_grayscale(rgb_tf) self.assertEqual(gray_shape, gray.get_shape().as_list()) with self.cached_session(use_gpu=True): gray_tf = array_ops.placeholder(dtypes.uint8, shape=gray_shape) rgb = image_ops.grayscale_to_rgb(gray_tf) self.assertEqual(rgb_shape, rgb.get_shape().as_list()) # Shape inference does not break for unknown shapes with self.cached_session(use_gpu=True): rgb_tf_unknown = array_ops.placeholder(dtypes.uint8) gray_unknown = image_ops.rgb_to_grayscale(rgb_tf_unknown) self.assertFalse(gray_unknown.get_shape()) with self.cached_session(use_gpu=True): gray_tf_unknown = array_ops.placeholder(dtypes.uint8) rgb_unknown = image_ops.grayscale_to_rgb(gray_tf_unknown) self.assertFalse(rgb_unknown.get_shape()) class AdjustGamma(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def test_adjust_gamma_less_zero_float32(self): """White image should be returned for gamma equal to zero""" with self.cached_session(): x_data = np.random.uniform(0, 1.0, (8, 8)) x_np = np.array(x_data, dtype=np.float32) x = constant_op.constant(x_np, shape=x_np.shape) err_msg = "Gamma should be a non-negative real number" with self.assertRaisesRegexp(ValueError, err_msg): image_ops.adjust_gamma(x, gamma=-1) @test_util.run_deprecated_v1 def test_adjust_gamma_less_zero_uint8(self): """White image should be returned for gamma equal to zero""" with self.cached_session(): x_data = np.random.uniform(0, 255, (8, 8)) x_np = np.array(x_data, dtype=np.uint8) x = constant_op.constant(x_np, shape=x_np.shape) err_msg = "Gamma should be a non-negative real number" with self.assertRaisesRegexp(ValueError, err_msg): image_ops.adjust_gamma(x, gamma=-1) @test_util.run_deprecated_v1 def test_adjust_gamma_less_zero_tensor(self): """White image should be returned for gamma equal to zero""" with self.cached_session(): x_data = np.random.uniform(0, 1.0, (8, 8)) x_np = np.array(x_data, dtype=np.float32) x = constant_op.constant(x_np, shape=x_np.shape) y = constant_op.constant(-1.0, dtype=dtypes.float32) image = image_ops.adjust_gamma(x, gamma=y) err_msg = "Gamma should be a non-negative real number" with self.assertRaisesRegexp(errors.InvalidArgumentError, err_msg): self.evaluate(image) def _test_adjust_gamma_uint8(self, gamma): """Verifying the output with expected results for gamma correction for uint8 images """ with self.cached_session(): x_np = np.random.uniform(0, 255, (8, 8)).astype(np.uint8) x = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.adjust_gamma(x, gamma=gamma) y_tf = np.trunc(y.eval()) # calculate gamma correction using numpy # firstly, transform uint8 to float representation # then perform correction y_np = np.power(x_np / 255.0, gamma) # convert correct numpy image back to uint8 type y_np = np.trunc(np.clip(y_np * 255.5, 0, 255.0)) self.assertAllClose(y_tf, y_np, 1e-6) def _test_adjust_gamma_float32(self, gamma): """Verifying the output with expected results for gamma correction for float32 images """ with self.cached_session(): x_np = np.random.uniform(0, 1.0, (8, 8)) x = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.adjust_gamma(x, gamma=gamma) y_tf = y.eval() y_np = np.clip(np.power(x_np, gamma), 0, 1.0) self.assertAllClose(y_tf, y_np, 1e-6) @test_util.run_deprecated_v1 def test_adjust_gamma_one_float32(self): """Same image should be returned for gamma equal to one""" self._test_adjust_gamma_float32(1.0) @test_util.run_deprecated_v1 def test_adjust_gamma_one_uint8(self): self._test_adjust_gamma_uint8(1.0) @test_util.run_deprecated_v1 def test_adjust_gamma_zero_uint8(self): """White image should be returned for gamma equal to zero for uint8 images """ self._test_adjust_gamma_uint8(gamma=0.0) @test_util.run_deprecated_v1 def test_adjust_gamma_less_one_uint8(self): """Verifying the output with expected results for gamma correction with gamma equal to half for uint8 images """ self._test_adjust_gamma_uint8(gamma=0.5) @test_util.run_deprecated_v1 def test_adjust_gamma_greater_one_uint8(self): """Verifying the output with expected results for gamma correction for uint8 images """ self._test_adjust_gamma_uint8(gamma=1.0) @test_util.run_deprecated_v1 def test_adjust_gamma_less_one_float32(self): """Verifying the output with expected results for gamma correction with gamma equal to half for float32 images """ self._test_adjust_gamma_float32(0.5) @test_util.run_deprecated_v1 def test_adjust_gamma_greater_one_float32(self): """Verifying the output with expected results for gamma correction with gamma equal to two for float32 images """ self._test_adjust_gamma_float32(1.0) @test_util.run_deprecated_v1 def test_adjust_gamma_zero_float32(self): """White image should be returned for gamma equal to zero for float32 images """ self._test_adjust_gamma_float32(0.0) class AdjustHueTest(test_util.TensorFlowTestCase): def testAdjustNegativeHue(self): x_shape = [2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) delta = -0.25 y_data = [0, 13, 1, 54, 226, 59, 8, 234, 150, 255, 39, 1] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) with self.cached_session(use_gpu=True): x = constant_op.constant(x_np, shape=x_shape) y = image_ops.adjust_hue(x, delta) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) def testAdjustPositiveHue(self): x_shape = [2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) delta = 0.25 y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) with self.cached_session(use_gpu=True): x = constant_op.constant(x_np, shape=x_shape) y = image_ops.adjust_hue(x, delta) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) def testBatchAdjustHue(self): x_shape = [2, 1, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) delta = 0.25 y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) with self.cached_session(use_gpu=True): x = constant_op.constant(x_np, shape=x_shape) y = image_ops.adjust_hue(x, delta) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) def _adjustHueNp(self, x_np, delta_h): self.assertEqual(x_np.shape[-1], 3) x_v = x_np.reshape([-1, 3]) y_v = np.ndarray(x_v.shape, dtype=x_v.dtype) channel_count = x_v.shape[0] for i in xrange(channel_count): r = x_v[i][0] g = x_v[i][1] b = x_v[i][2] h, s, v = colorsys.rgb_to_hsv(r, g, b) h += delta_h h = math.fmod(h + 10.0, 1.0) r, g, b = colorsys.hsv_to_rgb(h, s, v) y_v[i][0] = r y_v[i][1] = g y_v[i][2] = b return y_v.reshape(x_np.shape) def _adjustHueTf(self, x_np, delta_h): with self.cached_session(use_gpu=True): x = constant_op.constant(x_np) y = image_ops.adjust_hue(x, delta_h) y_tf = self.evaluate(y) return y_tf def testAdjustRandomHue(self): x_shapes = [ [2, 2, 3], [4, 2, 3], [2, 4, 3], [2, 5, 3], [1000, 1, 3], ] test_styles = [ "all_random", "rg_same", "rb_same", "gb_same", "rgb_same", ] for x_shape in x_shapes: for test_style in test_styles: x_np = np.random.rand(*x_shape) * 255. delta_h = np.random.rand() * 2.0 - 1.0 if test_style == "all_random": pass elif test_style == "rg_same": x_np[..., 1] = x_np[..., 0] elif test_style == "rb_same": x_np[..., 2] = x_np[..., 0] elif test_style == "gb_same": x_np[..., 2] = x_np[..., 1] elif test_style == "rgb_same": x_np[..., 1] = x_np[..., 0] x_np[..., 2] = x_np[..., 0] else: raise AssertionError("Invalid test style: %s" % (test_style)) y_np = self._adjustHueNp(x_np, delta_h) y_tf = self._adjustHueTf(x_np, delta_h) self.assertAllClose(y_tf, y_np, rtol=2e-5, atol=1e-5) def testInvalidShapes(self): fused = False if not fused: # The tests are known to pass with the fused adjust_hue. We will enable # them when the fused implementation is the default. return x_np = np.random.rand(2, 3) * 255. delta_h = np.random.rand() * 2.0 - 1.0 fused = False with self.assertRaisesRegexp(ValueError, "Shape must be at least rank 3"): self._adjustHueTf(x_np, delta_h) x_np = np.random.rand(4, 2, 4) * 255. delta_h = np.random.rand() * 2.0 - 1.0 with self.assertRaisesOpError("input must have 3 channels"): self._adjustHueTf(x_np, delta_h) class FlipImageBenchmark(test.Benchmark): def _benchmarkFlipLeftRight(self, device, cpu_count): image_shape = [299, 299, 3] warmup_rounds = 100 benchmark_rounds = 1000 config = config_pb2.ConfigProto() if cpu_count is not None: config.inter_op_parallelism_threads = 1 config.intra_op_parallelism_threads = cpu_count with session.Session("", graph=ops.Graph(), config=config) as sess: with ops.device(device): inputs = variables.Variable( random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255, trainable=False, dtype=dtypes.float32) run_op = image_ops.flip_left_right(inputs) self.evaluate(variables.global_variables_initializer()) for i in xrange(warmup_rounds + benchmark_rounds): if i == warmup_rounds: start = time.time() self.evaluate(run_op) end = time.time() step_time = (end - start) / benchmark_rounds tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all") print("benchmarkFlipLeftRight_299_299_3_%s step_time: %.2f us" % (tag, step_time * 1e6)) self.report_benchmark( name="benchmarkFlipLeftRight_299_299_3_%s" % (tag), iters=benchmark_rounds, wall_time=step_time) def _benchmarkRandomFlipLeftRight(self, device, cpu_count): image_shape = [299, 299, 3] warmup_rounds = 100 benchmark_rounds = 1000 config = config_pb2.ConfigProto() if cpu_count is not None: config.inter_op_parallelism_threads = 1 config.intra_op_parallelism_threads = cpu_count with session.Session("", graph=ops.Graph(), config=config) as sess: with ops.device(device): inputs = variables.Variable( random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255, trainable=False, dtype=dtypes.float32) run_op = image_ops.random_flip_left_right(inputs) self.evaluate(variables.global_variables_initializer()) for i in xrange(warmup_rounds + benchmark_rounds): if i == warmup_rounds: start = time.time() self.evaluate(run_op) end = time.time() step_time = (end - start) / benchmark_rounds tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all") print("benchmarkRandomFlipLeftRight_299_299_3_%s step_time: %.2f us" % (tag, step_time * 1e6)) self.report_benchmark( name="benchmarkRandomFlipLeftRight_299_299_3_%s" % (tag), iters=benchmark_rounds, wall_time=step_time) def _benchmarkBatchedRandomFlipLeftRight(self, device, cpu_count): image_shape = [16, 299, 299, 3] warmup_rounds = 100 benchmark_rounds = 1000 config = config_pb2.ConfigProto() if cpu_count is not None: config.inter_op_parallelism_threads = 1 config.intra_op_parallelism_threads = cpu_count with session.Session("", graph=ops.Graph(), config=config) as sess: with ops.device(device): inputs = variables.Variable( random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255, trainable=False, dtype=dtypes.float32) run_op = image_ops.random_flip_left_right(inputs) self.evaluate(variables.global_variables_initializer()) for i in xrange(warmup_rounds + benchmark_rounds): if i == warmup_rounds: start = time.time() self.evaluate(run_op) end = time.time() step_time = (end - start) / benchmark_rounds tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all") print("benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s step_time: " "%.2f us" % (tag, step_time * 1e6)) self.report_benchmark( name="benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s" % (tag), iters=benchmark_rounds, wall_time=step_time) def benchmarkFlipLeftRightCpu1(self): self._benchmarkFlipLeftRight("/cpu:0", 1) def benchmarkFlipLeftRightCpuAll(self): self._benchmarkFlipLeftRight("/cpu:0", None) def benchmarkFlipLeftRightGpu(self): self._benchmarkFlipLeftRight(test.gpu_device_name(), None) def benchmarkRandomFlipLeftRightCpu1(self): self._benchmarkRandomFlipLeftRight("/cpu:0", 1) def benchmarkRandomFlipLeftRightCpuAll(self): self._benchmarkRandomFlipLeftRight("/cpu:0", None) def benchmarkRandomFlipLeftRightGpu(self): self._benchmarkRandomFlipLeftRight(test.gpu_device_name(), None) def benchmarkBatchedRandomFlipLeftRightCpu1(self): self._benchmarkBatchedRandomFlipLeftRight("/cpu:0", 1) def benchmarkBatchedRandomFlipLeftRightCpuAll(self): self._benchmarkBatchedRandomFlipLeftRight("/cpu:0", None) def benchmarkBatchedRandomFlipLeftRightGpu(self): self._benchmarkBatchedRandomFlipLeftRight(test.gpu_device_name(), None) class AdjustHueBenchmark(test.Benchmark): def _benchmarkAdjustHue(self, device, cpu_count): image_shape = [299, 299, 3] warmup_rounds = 100 benchmark_rounds = 1000 config = config_pb2.ConfigProto() if cpu_count is not None: config.inter_op_parallelism_threads = 1 config.intra_op_parallelism_threads = cpu_count with self.benchmark_session(config=config, device=device) as sess: inputs = variables.Variable( random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255, trainable=False, dtype=dtypes.float32) delta = constant_op.constant(0.1, dtype=dtypes.float32) outputs = image_ops.adjust_hue(inputs, delta) run_op = control_flow_ops.group(outputs) self.evaluate(variables.global_variables_initializer()) for i in xrange(warmup_rounds + benchmark_rounds): if i == warmup_rounds: start = time.time() self.evaluate(run_op) end = time.time() step_time = (end - start) / benchmark_rounds tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all") print("benchmarkAdjustHue_299_299_3_%s step_time: %.2f us" % (tag, step_time * 1e6)) self.report_benchmark( name="benchmarkAdjustHue_299_299_3_%s" % (tag), iters=benchmark_rounds, wall_time=step_time) def benchmarkAdjustHueCpu1(self): self._benchmarkAdjustHue("/cpu:0", 1) def benchmarkAdjustHueCpuAll(self): self._benchmarkAdjustHue("/cpu:0", None) def benchmarkAdjustHueGpu(self): self._benchmarkAdjustHue(test.gpu_device_name(), None) class AdjustSaturationBenchmark(test.Benchmark): def _benchmarkAdjustSaturation(self, device, cpu_count): image_shape = [299, 299, 3] warmup_rounds = 100 benchmark_rounds = 1000 config = config_pb2.ConfigProto() if cpu_count is not None: config.inter_op_parallelism_threads = 1 config.intra_op_parallelism_threads = cpu_count with self.benchmark_session(config=config, device=device) as sess: inputs = variables.Variable( random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255, trainable=False, dtype=dtypes.float32) delta = constant_op.constant(0.1, dtype=dtypes.float32) outputs = image_ops.adjust_saturation(inputs, delta) run_op = control_flow_ops.group(outputs) self.evaluate(variables.global_variables_initializer()) for _ in xrange(warmup_rounds): self.evaluate(run_op) start = time.time() for _ in xrange(benchmark_rounds): self.evaluate(run_op) end = time.time() step_time = (end - start) / benchmark_rounds tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all") print("benchmarkAdjustSaturation_299_299_3_%s step_time: %.2f us" % (tag, step_time * 1e6)) self.report_benchmark( name="benchmarkAdjustSaturation_299_299_3_%s" % (tag), iters=benchmark_rounds, wall_time=step_time) def benchmarkAdjustSaturationCpu1(self): self._benchmarkAdjustSaturation("/cpu:0", 1) def benchmarkAdjustSaturationCpuAll(self): self._benchmarkAdjustSaturation("/cpu:0", None) def benchmarkAdjustSaturationGpu(self): self._benchmarkAdjustSaturation(test.gpu_device_name(), None) class ResizeBilinearBenchmark(test.Benchmark): def _benchmarkResize(self, image_size, num_channels): batch_size = 1 num_ops = 1000 img = variables.Variable( random_ops.random_normal( [batch_size, image_size[0], image_size[1], num_channels]), name="img") deps = [] for _ in xrange(num_ops): with ops.control_dependencies(deps): resize_op = image_ops.resize_bilinear( img, [299, 299], align_corners=False) deps = [resize_op] benchmark_op = control_flow_ops.group(*deps) with self.benchmark_session() as sess: self.evaluate(variables.global_variables_initializer()) results = self.run_op_benchmark( sess, benchmark_op, name=("resize_bilinear_%s_%s_%s" % (image_size[0], image_size[1], num_channels))) print("%s : %.2f ms/img" % (results["name"], 1000 * results["wall_time"] / (batch_size * num_ops))) def benchmarkSimilar3Channel(self): self._benchmarkResize((183, 229), 3) def benchmarkScaleUp3Channel(self): self._benchmarkResize((141, 186), 3) def benchmarkScaleDown3Channel(self): self._benchmarkResize((749, 603), 3) def benchmarkSimilar1Channel(self): self._benchmarkResize((183, 229), 1) def benchmarkScaleUp1Channel(self): self._benchmarkResize((141, 186), 1) def benchmarkScaleDown1Channel(self): self._benchmarkResize((749, 603), 1) class ResizeBicubicBenchmark(test.Benchmark): def _benchmarkResize(self, image_size, num_channels): batch_size = 1 num_ops = 1000 img = variables.Variable( random_ops.random_normal( [batch_size, image_size[0], image_size[1], num_channels]), name="img") deps = [] for _ in xrange(num_ops): with ops.control_dependencies(deps): resize_op = image_ops.resize_bicubic( img, [299, 299], align_corners=False) deps = [resize_op] benchmark_op = control_flow_ops.group(*deps) with self.benchmark_session() as sess: self.evaluate(variables.global_variables_initializer()) results = self.run_op_benchmark( sess, benchmark_op, min_iters=20, name=("resize_bicubic_%s_%s_%s" % (image_size[0], image_size[1], num_channels))) print("%s : %.2f ms/img" % (results["name"], 1000 * results["wall_time"] / (batch_size * num_ops))) def benchmarkSimilar3Channel(self): self._benchmarkResize((183, 229), 3) def benchmarkScaleUp3Channel(self): self._benchmarkResize((141, 186), 3) def benchmarkScaleDown3Channel(self): self._benchmarkResize((749, 603), 3) def benchmarkSimilar1Channel(self): self._benchmarkResize((183, 229), 1) def benchmarkScaleUp1Channel(self): self._benchmarkResize((141, 186), 1) def benchmarkScaleDown1Channel(self): self._benchmarkResize((749, 603), 1) def benchmarkSimilar4Channel(self): self._benchmarkResize((183, 229), 4) def benchmarkScaleUp4Channel(self): self._benchmarkResize((141, 186), 4) def benchmarkScaleDown4Channel(self): self._benchmarkResize((749, 603), 4) class ResizeAreaBenchmark(test.Benchmark): def _benchmarkResize(self, image_size, num_channels): batch_size = 1 num_ops = 1000 img = variables.Variable( random_ops.random_normal( [batch_size, image_size[0], image_size[1], num_channels]), name="img") deps = [] for _ in xrange(num_ops): with ops.control_dependencies(deps): resize_op = image_ops.resize_area(img, [299, 299], align_corners=False) deps = [resize_op] benchmark_op = control_flow_ops.group(*deps) with self.benchmark_session() as sess: self.evaluate(variables.global_variables_initializer()) results = self.run_op_benchmark( sess, benchmark_op, name=("resize_area_%s_%s_%s" % (image_size[0], image_size[1], num_channels))) print("%s : %.2f ms/img" % (results["name"], 1000 * results["wall_time"] / (batch_size * num_ops))) def benchmarkSimilar3Channel(self): self._benchmarkResize((183, 229), 3) def benchmarkScaleUp3Channel(self): self._benchmarkResize((141, 186), 3) def benchmarkScaleDown3Channel(self): self._benchmarkResize((749, 603), 3) def benchmarkSimilar1Channel(self): self._benchmarkResize((183, 229), 1) def benchmarkScaleUp1Channel(self): self._benchmarkResize((141, 186), 1) def benchmarkScaleDown1Channel(self): self._benchmarkResize((749, 603), 1) class AdjustSaturationTest(test_util.TensorFlowTestCase): def testHalfSaturation(self): x_shape = [2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) saturation_factor = 0.5 y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) with self.cached_session(use_gpu=True): x = constant_op.constant(x_np, shape=x_shape) y = image_ops.adjust_saturation(x, saturation_factor) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) def testTwiceSaturation(self): x_shape = [2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) saturation_factor = 2.0 y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) with self.cached_session(use_gpu=True): x = constant_op.constant(x_np, shape=x_shape) y = image_ops.adjust_saturation(x, saturation_factor) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) def testBatchSaturation(self): x_shape = [2, 1, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) saturation_factor = 0.5 y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) with self.cached_session(use_gpu=True): x = constant_op.constant(x_np, shape=x_shape) y = image_ops.adjust_saturation(x, saturation_factor) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) def _adjustSaturationNp(self, x_np, scale): self.assertEqual(x_np.shape[-1], 3) x_v = x_np.reshape([-1, 3]) y_v = np.ndarray(x_v.shape, dtype=x_v.dtype) channel_count = x_v.shape[0] for i in xrange(channel_count): r = x_v[i][0] g = x_v[i][1] b = x_v[i][2] h, s, v = colorsys.rgb_to_hsv(r, g, b) s *= scale s = min(1.0, max(0.0, s)) r, g, b = colorsys.hsv_to_rgb(h, s, v) y_v[i][0] = r y_v[i][1] = g y_v[i][2] = b return y_v.reshape(x_np.shape) @test_util.run_deprecated_v1 def testAdjustRandomSaturation(self): x_shapes = [ [2, 2, 3], [4, 2, 3], [2, 4, 3], [2, 5, 3], [1000, 1, 3], ] test_styles = [ "all_random", "rg_same", "rb_same", "gb_same", "rgb_same", ] with self.cached_session(use_gpu=True): for x_shape in x_shapes: for test_style in test_styles: x_np = np.random.rand(*x_shape) * 255. scale = np.random.rand() if test_style == "all_random": pass elif test_style == "rg_same": x_np[..., 1] = x_np[..., 0] elif test_style == "rb_same": x_np[..., 2] = x_np[..., 0] elif test_style == "gb_same": x_np[..., 2] = x_np[..., 1] elif test_style == "rgb_same": x_np[..., 1] = x_np[..., 0] x_np[..., 2] = x_np[..., 0] else: raise AssertionError("Invalid test style: %s" % (test_style)) y_baseline = self._adjustSaturationNp(x_np, scale) y_fused = image_ops.adjust_saturation(x_np, scale).eval() self.assertAllClose(y_fused, y_baseline, rtol=2e-5, atol=1e-5) class FlipTransposeRotateTest(test_util.TensorFlowTestCase): def testInvolutionLeftRight(self): x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1]) with self.cached_session(use_gpu=True): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf)) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, x_np) def testInvolutionLeftRightWithBatch(self): x_np = np.array( [[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]], dtype=np.uint8).reshape([2, 2, 3, 1]) with self.cached_session(use_gpu=True): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf)) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, x_np) @test_util.run_deprecated_v1 def testLeftRight(self): x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1]) y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1]) with self.cached_session(use_gpu=True): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.flip_left_right(x_tf) self.assertTrue(y.op.name.startswith("flip_left_right")) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) def testLeftRightWithBatch(self): x_np = np.array( [[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]], dtype=np.uint8).reshape([2, 2, 3, 1]) y_np = np.array( [[[3, 2, 1], [3, 2, 1]], [[3, 2, 1], [3, 2, 1]]], dtype=np.uint8).reshape([2, 2, 3, 1]) with self.cached_session(use_gpu=True): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.flip_left_right(x_tf) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) @test_util.run_deprecated_v1 def testRandomFlipLeftRight(self): x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1]) y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1]) seed = 42 with self.cached_session(use_gpu=True): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.random_flip_left_right(x_tf, seed=seed) self.assertTrue(y.op.name.startswith("random_flip_left_right")) count_flipped = 0 count_unflipped = 0 for _ in range(100): y_tf = self.evaluate(y) if y_tf[0][0] == 1: self.assertAllEqual(y_tf, x_np) count_unflipped += 1 else: self.assertAllEqual(y_tf, y_np) count_flipped += 1 # 100 trials # Mean: 50 # Std Dev: ~5 # Six Sigma: 50 - (5 * 6) = 20 self.assertGreaterEqual(count_flipped, 20) self.assertGreaterEqual(count_unflipped, 20) @test_util.run_deprecated_v1 def testRandomFlipLeftRightWithBatch(self): batch_size = 16 seed = 42 # create single item of test data x_np_raw = np.array( [[1, 2, 3], [1, 2, 3]], dtype=np.uint8 ).reshape([1, 2, 3, 1]) y_np_raw = np.array( [[3, 2, 1], [3, 2, 1]], dtype=np.uint8 ).reshape([1, 2, 3, 1]) # create batched test data x_np = np.vstack([x_np_raw for _ in range(batch_size)]) y_np = np.vstack([y_np_raw for _ in range(batch_size)]) with self.cached_session(use_gpu=True): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.random_flip_left_right(x_tf, seed=seed) self.assertTrue(y.op.name.startswith("random_flip_left_right")) count_flipped = 0 count_unflipped = 0 for _ in range(100): y_tf = self.evaluate(y) # check every element of the batch for i in range(batch_size): if y_tf[i][0][0] == 1: self.assertAllEqual(y_tf[i], x_np[i]) count_unflipped += 1 else: self.assertAllEqual(y_tf[i], y_np[i]) count_flipped += 1 # 100 trials, each containing batch_size elements # Mean: 50 * batch_size # Std Dev: ~5 * sqrt(batch_size) # Six Sigma: 50 * batch_size - (5 * 6 * sqrt(batch_size)) # = 50 * batch_size - 30 * sqrt(batch_size) = 800 - 30 * 4 = 680 six_sigma = 50 * batch_size - 30 * np.sqrt(batch_size) self.assertGreaterEqual(count_flipped, six_sigma) self.assertGreaterEqual(count_unflipped, six_sigma) def testInvolutionUpDown(self): x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1]) with self.cached_session(use_gpu=True): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf)) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, x_np) def testInvolutionUpDownWithBatch(self): x_np = np.array( [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], dtype=np.uint8).reshape([2, 2, 3, 1]) with self.cached_session(use_gpu=True): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf)) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, x_np) @test_util.run_deprecated_v1 def testUpDown(self): x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1]) y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1]) with self.cached_session(use_gpu=True): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.flip_up_down(x_tf) self.assertTrue(y.op.name.startswith("flip_up_down")) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) def testUpDownWithBatch(self): x_np = np.array( [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], dtype=np.uint8).reshape([2, 2, 3, 1]) y_np = np.array( [[[4, 5, 6], [1, 2, 3]], [[10, 11, 12], [7, 8, 9]]], dtype=np.uint8).reshape([2, 2, 3, 1]) with self.cached_session(use_gpu=True): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.flip_up_down(x_tf) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) @test_util.run_deprecated_v1 def testRandomFlipUpDown(self): x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1]) y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1]) seed = 42 with self.cached_session(use_gpu=True): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.random_flip_up_down(x_tf, seed=seed) self.assertTrue(y.op.name.startswith("random_flip_up_down")) count_flipped = 0 count_unflipped = 0 for _ in range(100): y_tf = self.evaluate(y) if y_tf[0][0] == 1: self.assertAllEqual(y_tf, x_np) count_unflipped += 1 else: self.assertAllEqual(y_tf, y_np) count_flipped += 1 # 100 trials # Mean: 50 # Std Dev: ~5 # Six Sigma: 50 - (5 * 6) = 20 self.assertGreaterEqual(count_flipped, 20) self.assertGreaterEqual(count_unflipped, 20) @test_util.run_deprecated_v1 def testRandomFlipUpDownWithBatch(self): batch_size = 16 seed = 42 # create single item of test data x_np_raw = np.array( [[1, 2, 3], [4, 5, 6]], dtype=np.uint8 ).reshape([1, 2, 3, 1]) y_np_raw = np.array( [[4, 5, 6], [1, 2, 3]], dtype=np.uint8 ).reshape([1, 2, 3, 1]) # create batched test data x_np = np.vstack([x_np_raw for _ in range(batch_size)]) y_np = np.vstack([y_np_raw for _ in range(batch_size)]) with self.cached_session(use_gpu=True): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.random_flip_up_down(x_tf, seed=seed) self.assertTrue(y.op.name.startswith("random_flip_up_down")) count_flipped = 0 count_unflipped = 0 for _ in range(100): y_tf = self.evaluate(y) # check every element of the batch for i in range(batch_size): if y_tf[i][0][0] == 1: self.assertAllEqual(y_tf[i], x_np[i]) count_unflipped += 1 else: self.assertAllEqual(y_tf[i], y_np[i]) count_flipped += 1 # 100 trials, each containing batch_size elements # Mean: 50 * batch_size # Std Dev: ~5 * sqrt(batch_size) # Six Sigma: 50 * batch_size - (5 * 6 * sqrt(batch_size)) # = 50 * batch_size - 30 * sqrt(batch_size) = 800 - 30 * 4 = 680 six_sigma = 50 * batch_size - 30 * np.sqrt(batch_size) self.assertGreaterEqual(count_flipped, six_sigma) self.assertGreaterEqual(count_unflipped, six_sigma) def testInvolutionTranspose(self): x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1]) with self.cached_session(use_gpu=True): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.transpose(image_ops.transpose(x_tf)) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, x_np) def testInvolutionTransposeWithBatch(self): x_np = np.array( [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], dtype=np.uint8).reshape([2, 2, 3, 1]) with self.cached_session(use_gpu=True): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.transpose(image_ops.transpose(x_tf)) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, x_np) @test_util.run_deprecated_v1 def testTranspose(self): x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1]) y_np = np.array([[1, 4], [2, 5], [3, 6]], dtype=np.uint8).reshape([3, 2, 1]) with self.cached_session(use_gpu=True): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.transpose(x_tf) self.assertTrue(y.op.name.startswith("transpose")) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) def testTransposeWithBatch(self): x_np = np.array( [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], dtype=np.uint8).reshape([2, 2, 3, 1]) y_np = np.array( [[[1, 4], [2, 5], [3, 6]], [[7, 10], [8, 11], [9, 12]]], dtype=np.uint8).reshape([2, 3, 2, 1]) with self.cached_session(use_gpu=True): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.transpose(x_tf) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) @test_util.run_deprecated_v1 def testPartialShapes(self): p_unknown_rank = array_ops.placeholder(dtypes.uint8) p_unknown_dims_3 = array_ops.placeholder( dtypes.uint8, shape=[None, None, None]) p_unknown_dims_4 = array_ops.placeholder( dtypes.uint8, shape=[None, None, None, None]) p_unknown_width = array_ops.placeholder(dtypes.uint8, shape=[64, None, 3]) p_unknown_batch = array_ops.placeholder( dtypes.uint8, shape=[None, 64, 64, 3]) p_wrong_rank = array_ops.placeholder(dtypes.uint8, shape=[None, None]) p_zero_dim = array_ops.placeholder(dtypes.uint8, shape=[64, 0, 3]) #Ops that support 3D input for op in [ image_ops.flip_left_right, image_ops.flip_up_down, image_ops.random_flip_left_right, image_ops.random_flip_up_down, image_ops.transpose, image_ops.rot90 ]: transformed_unknown_rank = op(p_unknown_rank) self.assertEqual(3, transformed_unknown_rank.get_shape().ndims) transformed_unknown_dims_3 = op(p_unknown_dims_3) self.assertEqual(3, transformed_unknown_dims_3.get_shape().ndims) transformed_unknown_width = op(p_unknown_width) self.assertEqual(3, transformed_unknown_width.get_shape().ndims) with self.assertRaisesRegexp(ValueError, "must be > 0"): op(p_zero_dim) #Ops that support 4D input for op in [ image_ops.flip_left_right, image_ops.flip_up_down, image_ops.random_flip_left_right, image_ops.random_flip_up_down, image_ops.transpose, image_ops.rot90 ]: transformed_unknown_dims_4 = op(p_unknown_dims_4) self.assertEqual(4, transformed_unknown_dims_4.get_shape().ndims) transformed_unknown_batch = op(p_unknown_batch) self.assertEqual(4, transformed_unknown_batch.get_shape().ndims) with self.assertRaisesRegexp(ValueError, "must be at least three-dimensional"): op(p_wrong_rank) def testRot90GroupOrder(self): image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3]) with self.cached_session(use_gpu=True): rotated = image for _ in xrange(4): rotated = image_ops.rot90(rotated) self.assertAllEqual(image, self.evaluate(rotated)) def testRot90GroupOrderWithBatch(self): image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3]) with self.cached_session(use_gpu=True): rotated = image for _ in xrange(4): rotated = image_ops.rot90(rotated) self.assertAllEqual(image, self.evaluate(rotated)) @test_util.run_deprecated_v1 def testRot90NumpyEquivalence(self): image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3]) with self.cached_session(use_gpu=True): k_placeholder = array_ops.placeholder(dtypes.int32, shape=[]) y_tf = image_ops.rot90(image, k_placeholder) for k in xrange(4): y_np = np.rot90(image, k=k) self.assertAllEqual(y_np, y_tf.eval({k_placeholder: k})) @test_util.run_deprecated_v1 def testRot90NumpyEquivalenceWithBatch(self): image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3]) with self.cached_session(use_gpu=True): k_placeholder = array_ops.placeholder(dtypes.int32, shape=[]) y_tf = image_ops.rot90(image, k_placeholder) for k in xrange(4): y_np = np.rot90(image, k=k, axes=(1, 2)) self.assertAllEqual(y_np, y_tf.eval({k_placeholder: k})) class AdjustContrastTest(test_util.TensorFlowTestCase): def _testContrast(self, x_np, y_np, contrast_factor): with self.cached_session(use_gpu=True): x = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.adjust_contrast(x, contrast_factor) y_tf = self.evaluate(y) self.assertAllClose(y_tf, y_np, 1e-6) def testDoubleContrastUint8(self): x_shape = [1, 2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) y_data = [0, 0, 0, 62, 169, 255, 28, 0, 255, 135, 255, 0] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) self._testContrast(x_np, y_np, contrast_factor=2.0) def testDoubleContrastFloat(self): x_shape = [1, 2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.float).reshape(x_shape) / 255. y_data = [ -45.25, -90.75, -92.5, 62.75, 169.25, 333.5, 28.75, -84.75, 349.5, 134.75, 409.25, -116.5 ] y_np = np.array(y_data, dtype=np.float).reshape(x_shape) / 255. self._testContrast(x_np, y_np, contrast_factor=2.0) def testHalfContrastUint8(self): x_shape = [1, 2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) y_data = [22, 52, 65, 49, 118, 172, 41, 54, 176, 67, 178, 59] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) self._testContrast(x_np, y_np, contrast_factor=0.5) def testBatchDoubleContrast(self): x_shape = [2, 1, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) y_data = [0, 0, 0, 81, 200, 255, 10, 0, 255, 116, 255, 0] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) self._testContrast(x_np, y_np, contrast_factor=2.0) def _adjustContrastNp(self, x_np, contrast_factor): mean = np.mean(x_np, (1, 2), keepdims=True) y_np = mean + contrast_factor * (x_np - mean) return y_np def _adjustContrastTf(self, x_np, contrast_factor): with self.cached_session(use_gpu=True): x = constant_op.constant(x_np) y = image_ops.adjust_contrast(x, contrast_factor) y_tf = self.evaluate(y) return y_tf def testRandomContrast(self): x_shapes = [ [1, 2, 2, 3], [2, 1, 2, 3], [1, 2, 2, 3], [2, 5, 5, 3], [2, 1, 1, 3], ] for x_shape in x_shapes: x_np = np.random.rand(*x_shape) * 255. contrast_factor = np.random.rand() * 2.0 + 0.1 y_np = self._adjustContrastNp(x_np, contrast_factor) y_tf = self._adjustContrastTf(x_np, contrast_factor) self.assertAllClose(y_tf, y_np, rtol=1e-5, atol=1e-5) @test_util.run_deprecated_v1 def testContrastFactorShape(self): x_shape = [1, 2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) with self.assertRaisesRegexp( ValueError, 'Shape must be rank 0 but is rank 1'): image_ops.adjust_contrast(x_np, [2.0]) class AdjustBrightnessTest(test_util.TensorFlowTestCase): def _testBrightness(self, x_np, y_np, delta, tol=1e-6): with self.cached_session(use_gpu=True): x = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.adjust_brightness(x, delta) y_tf = self.evaluate(y) self.assertAllClose(y_tf, y_np, tol) def testPositiveDeltaUint8(self): x_shape = [2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 255, 11] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) self._testBrightness(x_np, y_np, delta=10. / 255.) def testPositiveDeltaFloat32(self): x_shape = [2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.float32).reshape(x_shape) / 255. y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11] y_np = np.array(y_data, dtype=np.float32).reshape(x_shape) / 255. self._testBrightness(x_np, y_np, delta=10. / 255.) def testPositiveDeltaFloat16(self): x_shape = [2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.float16).reshape(x_shape) / 255. y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11] y_np = np.array(y_data, dtype=np.float16).reshape(x_shape) / 255. self._testBrightness(x_np, y_np, delta=10. / 255., tol=1e-3) def testNegativeDelta(self): x_shape = [2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) y_data = [0, 0, 3, 44, 125, 216, 27, 0, 224, 80, 245, 0] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) self._testBrightness(x_np, y_np, delta=-10. / 255.) class PerImageWhiteningTest(test_util.TensorFlowTestCase): def _NumpyPerImageWhitening(self, x): num_pixels = np.prod(x.shape) mn = np.mean(x) std = np.std(x) stddev = max(std, 1.0 / math.sqrt(num_pixels)) y = x.astype(np.float32) y -= mn y /= stddev return y @test_util.run_deprecated_v1 def testBasic(self): x_shape = [13, 9, 3] x_np = np.arange(0, np.prod(x_shape), dtype=np.float32).reshape(x_shape) y_np = self._NumpyPerImageWhitening(x_np) with self.cached_session(use_gpu=True): x = constant_op.constant(x_np, shape=x_shape) y = image_ops.per_image_standardization(x) self.assertTrue(y.op.name.startswith("per_image_standardization")) y_tf = self.evaluate(y) self.assertAllClose(y_tf, y_np, atol=1e-4) def testUniformImage(self): im_np = np.ones([19, 19, 3]).astype(np.float32) * 249 im = constant_op.constant(im_np) whiten = image_ops.per_image_standardization(im) with self.cached_session(use_gpu=True): whiten_np = self.evaluate(whiten) self.assertFalse(np.any(np.isnan(whiten_np))) def testBatchWhitening(self): imgs_np = np.random.uniform(0., 255., [4, 24, 24, 3]) whiten_np = [self._NumpyPerImageWhitening(img) for img in imgs_np] with self.cached_session(use_gpu=True): imgs = constant_op.constant(imgs_np) whiten = image_ops.per_image_standardization(imgs) whiten_tf = self.evaluate(whiten) for w_tf, w_np in zip(whiten_tf, whiten_np): self.assertAllClose(w_tf, w_np, atol=1e-4) def testPreservesDtype(self): imgs_npu8 = np.random.uniform(0., 255., [2, 5, 5, 3]).astype(np.uint8) imgs_tfu8 = constant_op.constant(imgs_npu8) whiten_tfu8 = image_ops.per_image_standardization(imgs_tfu8) self.assertEqual(whiten_tfu8.dtype, dtypes.uint8) imgs_npf16 = np.random.uniform(0., 255., [2, 5, 5, 3]).astype(np.float16) imgs_tff16 = constant_op.constant(imgs_npf16) whiten_tff16 = image_ops.per_image_standardization(imgs_tff16) self.assertEqual(whiten_tff16.dtype, dtypes.float16) class CropToBoundingBoxTest(test_util.TensorFlowTestCase): def _CropToBoundingBox(self, x, offset_height, offset_width, target_height, target_width, use_tensor_inputs): if use_tensor_inputs: offset_height = ops.convert_to_tensor(offset_height) offset_width = ops.convert_to_tensor(offset_width) target_height = ops.convert_to_tensor(target_height) target_width = ops.convert_to_tensor(target_width) x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim) feed_dict = {x_tensor: x} else: x_tensor = x feed_dict = {} y = image_ops.crop_to_bounding_box(x_tensor, offset_height, offset_width, target_height, target_width) if not use_tensor_inputs: self.assertTrue(y.get_shape().is_fully_defined()) with self.cached_session(use_gpu=True): return y.eval(feed_dict=feed_dict) def _assertReturns(self, x, x_shape, offset_height, offset_width, y, y_shape, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] target_height, target_width, _ = y_shape x = np.array(x).reshape(x_shape) y = np.array(y).reshape(y_shape) for use_tensor_inputs in use_tensor_inputs_options: y_tf = self._CropToBoundingBox(x, offset_height, offset_width, target_height, target_width, use_tensor_inputs) self.assertAllClose(y, y_tf) def _assertRaises(self, x, x_shape, offset_height, offset_width, target_height, target_width, err_msg, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] x = np.array(x).reshape(x_shape) for use_tensor_inputs in use_tensor_inputs_options: try: self._CropToBoundingBox(x, offset_height, offset_width, target_height, target_width, use_tensor_inputs) except Exception as e: if err_msg not in str(e): raise else: raise AssertionError("Exception not raised: %s" % err_msg) def _assertShapeInference(self, pre_shape, height, width, post_shape): image = array_ops.placeholder(dtypes.float32, shape=pre_shape) y = image_ops.crop_to_bounding_box(image, 0, 0, height, width) self.assertEqual(y.get_shape().as_list(), post_shape) @test_util.run_deprecated_v1 def testNoOp(self): x_shape = [10, 10, 10] x = np.random.uniform(size=x_shape) self._assertReturns(x, x_shape, 0, 0, x, x_shape) @test_util.run_deprecated_v1 def testCrop(self): x = [1, 2, 3, 4, 5, 6, 7, 8, 9] x_shape = [3, 3, 1] offset_height, offset_width = [1, 0] y_shape = [2, 3, 1] y = [4, 5, 6, 7, 8, 9] self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape) offset_height, offset_width = [0, 1] y_shape = [3, 2, 1] y = [2, 3, 5, 6, 8, 9] self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape) offset_height, offset_width = [0, 0] y_shape = [2, 3, 1] y = [1, 2, 3, 4, 5, 6] self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape) offset_height, offset_width = [0, 0] y_shape = [3, 2, 1] y = [1, 2, 4, 5, 7, 8] self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape) @test_util.run_deprecated_v1 def testShapeInference(self): self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None]) self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None]) self._assertShapeInference([None, None, None], 55, 66, [55, 66, None]) self._assertShapeInference(None, 55, 66, [55, 66, None]) @test_util.run_deprecated_v1 def testNon3DInput(self): # Input image is not 3D x = [0] * 15 offset_height, offset_width = [0, 0] target_height, target_width = [2, 2] for x_shape in ([3, 5], [1, 3, 5, 1, 1]): self._assertRaises(x, x_shape, offset_height, offset_width, target_height, target_width, "'image' must have either 3 or 4 dimensions.") @test_util.run_deprecated_v1 def testZeroLengthInput(self): # Input image has 0-length dimension(s). # Each line is a test configuration: # x_shape, target_height, target_width test_config = (([0, 2, 2], 1, 1), ([2, 0, 2], 1, 1), ([2, 2, 0], 1, 1), ([0, 2, 2], 0, 1), ([2, 0, 2], 1, 0)) offset_height, offset_width = [0, 0] x = [] for x_shape, target_height, target_width in test_config: self._assertRaises( x, x_shape, offset_height, offset_width, target_height, target_width, "all dims of 'image.shape' must be > 0", use_tensor_inputs_options=[False]) # Multiple assertion could fail, but the evaluation order is arbitrary. # Match gainst generic pattern. self._assertRaises( x, x_shape, offset_height, offset_width, target_height, target_width, "assertion failed:", use_tensor_inputs_options=[True]) @test_util.run_deprecated_v1 def testBadParams(self): x_shape = [4, 4, 1] x = np.zeros(x_shape) # Each line is a test configuration: # (offset_height, offset_width, target_height, target_width), err_msg test_config = (([-1, 0, 3, 3], "offset_height must be >= 0"), ([ 0, -1, 3, 3 ], "offset_width must be >= 0"), ([0, 0, 0, 3], "target_height must be > 0"), ([0, 0, 3, 0], "target_width must be > 0"), ([2, 0, 3, 3], "height must be >= target + offset"), ([0, 2, 3, 3], "width must be >= target + offset")) for params, err_msg in test_config: self._assertRaises(x, x_shape, *params, err_msg=err_msg) @test_util.run_deprecated_v1 def testNameScope(self): image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3]) y = image_ops.crop_to_bounding_box(image, 0, 0, 55, 66) self.assertTrue(y.name.startswith("crop_to_bounding_box")) class CentralCropTest(test_util.TensorFlowTestCase): def _assertShapeInference(self, pre_shape, fraction, post_shape): image = array_ops.placeholder(dtypes.float32, shape=pre_shape) y = image_ops.central_crop(image, fraction) if post_shape is None: self.assertEqual(y.get_shape().dims, None) else: self.assertEqual(y.get_shape().as_list(), post_shape) @test_util.run_deprecated_v1 def testNoOp(self): x_shapes = [[13, 9, 3], [5, 13, 9, 3]] for x_shape in x_shapes: x_np = np.ones(x_shape, dtype=np.float32) for use_gpu in [True, False]: with self.cached_session(use_gpu=use_gpu): x = constant_op.constant(x_np, shape=x_shape) y = image_ops.central_crop(x, 1.0) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, x_np) self.assertEqual(y.op.name, x.op.name) def testCropping(self): x_shape = [4, 8, 1] x_np = np.array( [[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8]], dtype=np.int32).reshape(x_shape) y_np = np.array([[3, 4, 5, 6], [3, 4, 5, 6]]).reshape([2, 4, 1]) for use_gpu in [True, False]: with self.cached_session(use_gpu=use_gpu): x = constant_op.constant(x_np, shape=x_shape) y = image_ops.central_crop(x, 0.5) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) self.assertAllEqual(y_tf.shape, y_np.shape) x_shape = [2, 4, 8, 1] x_np = np.array( [[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8], [8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1]], dtype=np.int32).reshape(x_shape) y_np = np.array([[[3, 4, 5, 6], [3, 4, 5, 6]], [[6, 5, 4, 3], [6, 5, 4, 3]]]).reshape([2, 2, 4, 1]) with self.cached_session(use_gpu=True): x = constant_op.constant(x_np, shape=x_shape) y = image_ops.central_crop(x, 0.5) y_tf = self.evaluate(y) self.assertAllEqual(y_tf, y_np) self.assertAllEqual(y_tf.shape, y_np.shape) @test_util.run_deprecated_v1 def testCropping2(self): # Test case for 10315 x_shapes = [[240, 320, 3], [5, 240, 320, 3]] expected_y_shapes = [[80, 106, 3], [5, 80, 106, 3]] for x_shape, y_shape in zip(x_shapes, expected_y_shapes): x_np = np.zeros(x_shape, dtype=np.int32) y_np = np.zeros(y_shape, dtype=np.int32) for use_gpu in [True, False]: with self.cached_session(use_gpu=use_gpu): x = array_ops.placeholder(shape=x_shape, dtype=dtypes.int32) y = image_ops.central_crop(x, 0.33) y_tf = y.eval(feed_dict={x: x_np}) self.assertAllEqual(y_tf, y_np) self.assertAllEqual(y_tf.shape, y_np.shape) @test_util.run_deprecated_v1 def testShapeInference(self): # Test no-op fraction=1.0, with 3-D tensors. self._assertShapeInference([50, 60, 3], 1.0, [50, 60, 3]) self._assertShapeInference([None, 60, 3], 1.0, [None, 60, 3]) self._assertShapeInference([50, None, 3], 1.0, [50, None, 3]) self._assertShapeInference([None, None, 3], 1.0, [None, None, 3]) self._assertShapeInference([50, 60, None], 1.0, [50, 60, None]) self._assertShapeInference([None, None, None], 1.0, [None, None, None]) # Test no-op fraction=0.5, with 3-D tensors. self._assertShapeInference([50, 60, 3], 0.5, [26, 30, 3]) self._assertShapeInference([None, 60, 3], 0.5, [None, 30, 3]) self._assertShapeInference([50, None, 3], 0.5, [26, None, 3]) self._assertShapeInference([None, None, 3], 0.5, [None, None, 3]) self._assertShapeInference([50, 60, None], 0.5, [26, 30, None]) self._assertShapeInference([None, None, None], 0.5, [None, None, None]) # Test no-op fraction=1.0, with 4-D tensors. self._assertShapeInference([5, 50, 60, 3], 1.0, [5, 50, 60, 3]) self._assertShapeInference([5, None, 60, 3], 1.0, [5, None, 60, 3]) self._assertShapeInference([5, 50, None, 3], 1.0, [5, 50, None, 3]) self._assertShapeInference([5, None, None, 3], 1.0, [5, None, None, 3]) self._assertShapeInference([5, 50, 60, None], 1.0, [5, 50, 60, None]) self._assertShapeInference([5, None, None, None], 1.0, [5, None, None, None]) self._assertShapeInference([None, None, None, None], 1.0, [None, None, None, None]) # Test no-op fraction=0.5, with 4-D tensors. self._assertShapeInference([5, 50, 60, 3], 0.5, [5, 26, 30, 3]) self._assertShapeInference([5, None, 60, 3], 0.5, [5, None, 30, 3]) self._assertShapeInference([5, 50, None, 3], 0.5, [5, 26, None, 3]) self._assertShapeInference([5, None, None, 3], 0.5, [5, None, None, 3]) self._assertShapeInference([5, 50, 60, None], 0.5, [5, 26, 30, None]) self._assertShapeInference([5, None, None, None], 0.5, [5, None, None, None]) self._assertShapeInference([None, None, None, None], 0.5, [None, None, None, None]) def testErrorOnInvalidCentralCropFractionValues(self): x_shape = [13, 9, 3] x_np = np.ones(x_shape, dtype=np.float32) for use_gpu in [True, False]: with self.cached_session(use_gpu=use_gpu): x = constant_op.constant(x_np, shape=x_shape) with self.assertRaises(ValueError): _ = image_ops.central_crop(x, 0.0) with self.assertRaises(ValueError): _ = image_ops.central_crop(x, 1.01) def testErrorOnInvalidShapes(self): x_shapes = [None, [], [3], [3, 9], [3, 9, 3, 9, 3]] for x_shape in x_shapes: x_np = np.ones(x_shape, dtype=np.float32) for use_gpu in [True, False]: with self.cached_session(use_gpu=use_gpu): x = constant_op.constant(x_np, shape=x_shape) with self.assertRaises(ValueError): _ = image_ops.central_crop(x, 0.5) @test_util.run_deprecated_v1 def testNameScope(self): x_shape = [13, 9, 3] x_np = np.ones(x_shape, dtype=np.float32) for use_gpu in [True, False]: with self.cached_session(use_gpu=use_gpu): y = image_ops.central_crop(x_np, 1.0) self.assertTrue(y.op.name.startswith("central_crop")) class PadToBoundingBoxTest(test_util.TensorFlowTestCase): def _PadToBoundingBox(self, x, offset_height, offset_width, target_height, target_width, use_tensor_inputs): if use_tensor_inputs: offset_height = ops.convert_to_tensor(offset_height) offset_width = ops.convert_to_tensor(offset_width) target_height = ops.convert_to_tensor(target_height) target_width = ops.convert_to_tensor(target_width) x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim) feed_dict = {x_tensor: x} else: x_tensor = x feed_dict = {} y = image_ops.pad_to_bounding_box(x_tensor, offset_height, offset_width, target_height, target_width) if not use_tensor_inputs: self.assertTrue(y.get_shape().is_fully_defined()) with self.cached_session(use_gpu=True): return y.eval(feed_dict=feed_dict) def _assertReturns(self, x, x_shape, offset_height, offset_width, y, y_shape, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] target_height, target_width, _ = y_shape x = np.array(x).reshape(x_shape) y = np.array(y).reshape(y_shape) for use_tensor_inputs in use_tensor_inputs_options: y_tf = self._PadToBoundingBox(x, offset_height, offset_width, target_height, target_width, use_tensor_inputs) self.assertAllClose(y, y_tf) def _assertRaises(self, x, x_shape, offset_height, offset_width, target_height, target_width, err_msg, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] x = np.array(x).reshape(x_shape) for use_tensor_inputs in use_tensor_inputs_options: try: self._PadToBoundingBox(x, offset_height, offset_width, target_height, target_width, use_tensor_inputs) except Exception as e: if err_msg not in str(e): raise else: raise AssertionError("Exception not raised: %s" % err_msg) def _assertShapeInference(self, pre_shape, height, width, post_shape): image = array_ops.placeholder(dtypes.float32, shape=pre_shape) y = image_ops.pad_to_bounding_box(image, 0, 0, height, width) self.assertEqual(y.get_shape().as_list(), post_shape) def testInt64(self): x = [1, 2, 3, 4, 5, 6, 7, 8, 9] x_shape = [3, 3, 1] y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] y_shape = [4, 3, 1] x = np.array(x).reshape(x_shape) y = np.array(y).reshape(y_shape) i = constant_op.constant([1, 0, 4, 3], dtype=dtypes.int64) y_tf = image_ops.pad_to_bounding_box(x, i[0], i[1], i[2], i[3]) with self.cached_session(use_gpu=True): self.assertAllClose(y, self.evaluate(y_tf)) @test_util.run_deprecated_v1 def testNoOp(self): x_shape = [10, 10, 10] x = np.random.uniform(size=x_shape) offset_height, offset_width = [0, 0] self._assertReturns(x, x_shape, offset_height, offset_width, x, x_shape) @test_util.run_deprecated_v1 def testPadding(self): x = [1, 2, 3, 4, 5, 6, 7, 8, 9] x_shape = [3, 3, 1] offset_height, offset_width = [1, 0] y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] y_shape = [4, 3, 1] self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape) offset_height, offset_width = [0, 1] y = [0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9] y_shape = [3, 4, 1] self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape) offset_height, offset_width = [0, 0] y = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0] y_shape = [4, 3, 1] self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape) offset_height, offset_width = [0, 0] y = [1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9, 0] y_shape = [3, 4, 1] self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape) @test_util.run_deprecated_v1 def testShapeInference(self): self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None]) self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None]) self._assertShapeInference([None, None, None], 55, 66, [55, 66, None]) self._assertShapeInference(None, 55, 66, [55, 66, None]) @test_util.run_deprecated_v1 def testNon3DInput(self): # Input image is not 3D x = [0] * 15 offset_height, offset_width = [0, 0] target_height, target_width = [2, 2] for x_shape in ([3, 5], [1, 3, 5, 1, 1]): self._assertRaises(x, x_shape, offset_height, offset_width, target_height, target_width, "'image' must have either 3 or 4 dimensions.") @test_util.run_deprecated_v1 def testZeroLengthInput(self): # Input image has 0-length dimension(s). # Each line is a test configuration: # x_shape, target_height, target_width test_config = (([0, 2, 2], 2, 2), ([2, 0, 2], 2, 2), ([2, 2, 0], 2, 2)) offset_height, offset_width = [0, 0] x = [] for x_shape, target_height, target_width in test_config: self._assertRaises( x, x_shape, offset_height, offset_width, target_height, target_width, "all dims of 'image.shape' must be > 0", use_tensor_inputs_options=[False]) # The original error message does not contain back slashes. However, they # are added by either the assert op or the runtime. If this behavior # changes in the future, the match string will also needs to be changed. self._assertRaises( x, x_shape, offset_height, offset_width, target_height, target_width, "all dims of \\'image.shape\\' must be > 0", use_tensor_inputs_options=[True]) @test_util.run_deprecated_v1 def testBadParams(self): x_shape = [3, 3, 1] x = np.zeros(x_shape) # Each line is a test configuration: # offset_height, offset_width, target_height, target_width, err_msg test_config = ((-1, 0, 4, 4, "offset_height must be >= 0"), (0, -1, 4, 4, "offset_width must be >= 0"), (2, 0, 4, 4, "height must be <= target - offset"), (0, 2, 4, 4, "width must be <= target - offset")) for config_item in test_config: self._assertRaises(x, x_shape, *config_item) @test_util.run_deprecated_v1 def testNameScope(self): image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3]) y = image_ops.pad_to_bounding_box(image, 0, 0, 55, 66) self.assertTrue(y.op.name.startswith("pad_to_bounding_box")) class SelectDistortedCropBoxTest(test_util.TensorFlowTestCase): def _testSampleDistortedBoundingBox(self, image, bounding_box, min_object_covered, aspect_ratio_range, area_range): original_area = float(np.prod(image.shape)) bounding_box_area = float((bounding_box[3] - bounding_box[1]) * (bounding_box[2] - bounding_box[0])) image_size_np = np.array(image.shape, dtype=np.int32) bounding_box_np = ( np.array(bounding_box, dtype=np.float32).reshape([1, 1, 4])) aspect_ratios = [] area_ratios = [] fraction_object_covered = [] num_iter = 1000 with self.cached_session(use_gpu=True): image_tf = constant_op.constant(image, shape=image.shape) image_size_tf = constant_op.constant( image_size_np, shape=image_size_np.shape) bounding_box_tf = constant_op.constant( bounding_box_np, dtype=dtypes.float32, shape=bounding_box_np.shape) begin, size, _ = image_ops.sample_distorted_bounding_box( image_size=image_size_tf, bounding_boxes=bounding_box_tf, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range) y = array_ops.strided_slice(image_tf, begin, begin + size) for _ in xrange(num_iter): y_tf = self.evaluate(y) crop_height = y_tf.shape[0] crop_width = y_tf.shape[1] aspect_ratio = float(crop_width) / float(crop_height) area = float(crop_width * crop_height) aspect_ratios.append(aspect_ratio) area_ratios.append(area / original_area) fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area) # min_object_covered as tensor min_object_covered_placeholder = array_ops.placeholder(dtypes.float32) begin, size, _ = image_ops.sample_distorted_bounding_box( image_size=image_size_tf, bounding_boxes=bounding_box_tf, min_object_covered=min_object_covered_placeholder, aspect_ratio_range=aspect_ratio_range, area_range=area_range) y = array_ops.strided_slice(image_tf, begin, begin + size) for _ in xrange(num_iter): y_tf = y.eval(feed_dict={ min_object_covered_placeholder: min_object_covered }) crop_height = y_tf.shape[0] crop_width = y_tf.shape[1] aspect_ratio = float(crop_width) / float(crop_height) area = float(crop_width * crop_height) aspect_ratios.append(aspect_ratio) area_ratios.append(area / original_area) fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area) # Ensure that each entry is observed within 3 standard deviations. # num_bins = 10 # aspect_ratio_hist, _ = np.histogram(aspect_ratios, # bins=num_bins, # range=aspect_ratio_range) # mean = np.mean(aspect_ratio_hist) # stddev = np.sqrt(mean) # TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky. # TODO(irving): Since the rejection probability is not independent of the # aspect ratio, the aspect_ratio random value is not exactly uniformly # distributed in [min_aspect_ratio, max_aspect_ratio). This test should be # fixed to reflect the true statistical property, then tightened to enforce # a stricter bound. Or, ideally, the sample_distorted_bounding_box Op # be fixed to not use rejection sampling and generate correctly uniform # aspect ratios. # self.assertAllClose(aspect_ratio_hist, # [mean] * num_bins, atol=3.6 * stddev) # The resulting crop will not be uniformly distributed in area. In practice, # we find that the area skews towards the small sizes. Instead, we perform # a weaker test to ensure that the area ratios are merely within the # specified bounds. self.assertLessEqual(max(area_ratios), area_range[1]) self.assertGreaterEqual(min(area_ratios), area_range[0]) # For reference, here is what the distribution of area ratios look like. area_ratio_hist, _ = np.histogram(area_ratios, bins=10, range=area_range) print("area_ratio_hist ", area_ratio_hist) # Ensure that fraction_object_covered is satisfied. # TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky. # self.assertGreaterEqual(min(fraction_object_covered), min_object_covered) @test_util.run_deprecated_v1 def testWholeImageBoundingBox(self): height = 40 width = 50 image_size = [height, width, 1] bounding_box = [0.0, 0.0, 1.0, 1.0] image = np.arange( 0, np.prod(image_size), dtype=np.int32).reshape(image_size) self._testSampleDistortedBoundingBox( image, bounding_box, min_object_covered=0.1, aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0)) @test_util.run_deprecated_v1 def testWithBoundingBox(self): height = 40 width = 50 x_shape = [height, width, 1] image = np.zeros(x_shape, dtype=np.int32) # Create an object with 1's in a region with area A and require that # the total pixel values >= 0.1 * A. min_object_covered = 0.1 xmin = 2 ymin = 3 xmax = 12 ymax = 13 for x in np.arange(xmin, xmax + 1, 1): for y in np.arange(ymin, ymax + 1, 1): image[x, y] = 1 # Bounding box is specified as (ymin, xmin, ymax, xmax) in # relative coordinates. bounding_box = (float(ymin) / height, float(xmin) / width, float(ymax) / height, float(xmax) / width) self._testSampleDistortedBoundingBox( image, bounding_box=bounding_box, min_object_covered=min_object_covered, aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0)) @test_util.run_deprecated_v1 def testSampleDistortedBoundingBoxShape(self): with self.cached_session(use_gpu=True): image_size = constant_op.constant( [40, 50, 1], shape=[3], dtype=dtypes.int32) bounding_box = constant_op.constant( [[[0.0, 0.0, 1.0, 1.0]]], shape=[1, 1, 4], dtype=dtypes.float32, ) begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box( image_size=image_size, bounding_boxes=bounding_box, min_object_covered=0.1, aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0)) # Test that the shapes are correct. self.assertAllEqual([3], begin.get_shape().as_list()) self.assertAllEqual([3], end.get_shape().as_list()) self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list()) # Actual run to make sure shape is correct inside Compute(). begin = self.evaluate(begin) end = self.evaluate(end) bbox_for_drawing = self.evaluate(bbox_for_drawing) begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box( image_size=image_size, bounding_boxes=bounding_box, min_object_covered=array_ops.placeholder(dtypes.float32), aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0)) # Test that the shapes are correct. self.assertAllEqual([3], begin.get_shape().as_list()) self.assertAllEqual([3], end.get_shape().as_list()) self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list()) def testDefaultMinObjectCovered(self): # By default min_object_covered=0.1 if not provided with self.cached_session(use_gpu=True): image_size = constant_op.constant( [40, 50, 1], shape=[3], dtype=dtypes.int32) bounding_box = constant_op.constant( [[[0.0, 0.0, 1.0, 1.0]]], shape=[1, 1, 4], dtype=dtypes.float32, ) begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box( image_size=image_size, bounding_boxes=bounding_box, aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0)) self.assertAllEqual([3], begin.get_shape().as_list()) self.assertAllEqual([3], end.get_shape().as_list()) self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list()) # Actual run to make sure shape is correct inside Compute(). begin = self.evaluate(begin) end = self.evaluate(end) bbox_for_drawing = self.evaluate(bbox_for_drawing) class ResizeImagesV2Test(test_util.TensorFlowTestCase): METHODS = [ image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR, image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA, image_ops.ResizeMethod.LANCZOS3, image_ops.ResizeMethod.LANCZOS5, image_ops.ResizeMethod.GAUSSIAN, image_ops.ResizeMethod.MITCHELLCUBIC ] # Some resize methods, such as Gaussian, are non-interpolating in that they # change the image even if there is no scale change, for some test, we only # check the value on the value preserving methods. INTERPOLATING_METHODS = [ image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR, image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA, image_ops.ResizeMethod.LANCZOS3, image_ops.ResizeMethod.LANCZOS5 ] TYPES = [ np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64 ] def _assertShapeInference(self, pre_shape, size, post_shape): # Try single image resize single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape) y = image_ops.resize_images_v2(single_image, size) self.assertEqual(y.get_shape().as_list(), post_shape) # Try batch images resize with known batch size images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape) y = image_ops.resize_images_v2(images, size) self.assertEqual(y.get_shape().as_list(), [99] + post_shape) # Try batch images resize with unknown batch size images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape) y = image_ops.resize_images_v2(images, size) self.assertEqual(y.get_shape().as_list(), [None] + post_shape) def shouldRunOnGPU(self, method, nptype): if (method == image_ops.ResizeMethod.NEAREST_NEIGHBOR and nptype in [np.float32, np.float64]): return True else: return False @test_util.disable_xla("align_corners=False not supported by XLA") @test_util.run_deprecated_v1 def testNoOp(self): img_shape = [1, 6, 4, 1] single_shape = [6, 4, 1] # This test is also conducted with int8, so 127 is the maximum # value that can be used. data = [ 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127, 50, 50, 100, 100, 50, 50, 100, 100 ] target_height = 6 target_width = 4 for nptype in self.TYPES: img_np = np.array(data, dtype=nptype).reshape(img_shape) for method in self.METHODS: with self.cached_session(use_gpu=True): image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images_v2(image, [target_height, target_width], method) yshape = array_ops.shape(y) resized, newshape = self.evaluate([y, yshape]) self.assertAllEqual(img_shape, newshape) if method in self.INTERPOLATING_METHODS: self.assertAllClose(resized, img_np, atol=1e-5) # Resizing with a single image must leave the shape unchanged also. with self.cached_session(use_gpu=True): img_single = img_np.reshape(single_shape) image = constant_op.constant(img_single, shape=single_shape) y = image_ops.resize_images_v2(image, [target_height, target_width], self.METHODS[0]) yshape = array_ops.shape(y) newshape = self.evaluate(yshape) self.assertAllEqual(single_shape, newshape) # half_pixel_centers unsupported in ResizeBilinear @test_util.run_deprecated_v1 @test_util.disable_xla("b/127616992") def testTensorArguments(self): img_shape = [1, 6, 4, 1] single_shape = [6, 4, 1] # This test is also conducted with int8, so 127 is the maximum # value that can be used. data = [ 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127, 50, 50, 100, 100, 50, 50, 100, 100 ] new_size = array_ops.placeholder(dtypes.int32, shape=(2)) img_np = np.array(data, dtype=np.uint8).reshape(img_shape) for method in self.METHODS: with self.cached_session(use_gpu=True) as sess: image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images_v2(image, new_size, method) yshape = array_ops.shape(y) resized, newshape = sess.run([y, yshape], {new_size: [6, 4]}) self.assertAllEqual(img_shape, newshape) if method in self.INTERPOLATING_METHODS: self.assertAllClose(resized, img_np, atol=1e-5) # Resizing with a single image must leave the shape unchanged also. with self.cached_session(use_gpu=True): img_single = img_np.reshape(single_shape) image = constant_op.constant(img_single, shape=single_shape) y = image_ops.resize_images_v2(image, new_size, self.METHODS[0]) yshape = array_ops.shape(y) resized, newshape = sess.run([y, yshape], {new_size: [6, 4]}) self.assertAllEqual(single_shape, newshape) if method in self.INTERPOLATING_METHODS: self.assertAllClose(resized, img_single, atol=1e-5) # Incorrect shape. with self.assertRaises(ValueError): new_size = constant_op.constant(4) _ = image_ops.resize_images_v2(image, new_size, image_ops.ResizeMethod.BILINEAR) with self.assertRaises(ValueError): new_size = constant_op.constant([4]) _ = image_ops.resize_images_v2(image, new_size, image_ops.ResizeMethod.BILINEAR) with self.assertRaises(ValueError): new_size = constant_op.constant([1, 2, 3]) _ = image_ops.resize_images_v2(image, new_size, image_ops.ResizeMethod.BILINEAR) # Incorrect dtypes. with self.assertRaises(ValueError): new_size = constant_op.constant([6.0, 4]) _ = image_ops.resize_images_v2(image, new_size, image_ops.ResizeMethod.BILINEAR) with self.assertRaises(ValueError): _ = image_ops.resize_images_v2(image, [6, 4.0], image_ops.ResizeMethod.BILINEAR) with self.assertRaises(ValueError): _ = image_ops.resize_images_v2(image, [None, 4], image_ops.ResizeMethod.BILINEAR) with self.assertRaises(ValueError): _ = image_ops.resize_images_v2(image, [6, None], image_ops.ResizeMethod.BILINEAR) @test_util.run_deprecated_v1 def testReturnDtype(self): target_shapes = [[6, 4], [3, 2], [ array_ops.placeholder(dtypes.int32), array_ops.placeholder(dtypes.int32) ]] for nptype in self.TYPES: image = array_ops.placeholder(nptype, shape=[1, 6, 4, 1]) for method in self.METHODS: for target_shape in target_shapes: y = image_ops.resize_images_v2(image, target_shape, method) if method == image_ops.ResizeMethod.NEAREST_NEIGHBOR: expected_dtype = image.dtype else: expected_dtype = dtypes.float32 self.assertEqual(y.dtype, expected_dtype) # half_pixel_centers not supported by XLA @test_util.disable_xla("b/127616992") def testSumTensor(self): img_shape = [1, 6, 4, 1] # This test is also conducted with int8, so 127 is the maximum # value that can be used. data = [ 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127, 50, 50, 100, 100, 50, 50, 100, 100 ] # Test size where width is specified as a tensor which is a sum # of two tensors. width_1 = constant_op.constant(1) width_2 = constant_op.constant(3) width = math_ops.add(width_1, width_2) height = constant_op.constant(6) img_np = np.array(data, dtype=np.uint8).reshape(img_shape) for method in self.METHODS: with self.cached_session(): image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images_v2(image, [height, width], method) yshape = array_ops.shape(y) resized, newshape = self.evaluate([y, yshape]) self.assertAllEqual(img_shape, newshape) if method in self.INTERPOLATING_METHODS: self.assertAllClose(resized, img_np, atol=1e-5) @test_util.disable_xla("align_corners=False not supported by XLA") def testResizeDown(self): # This test is also conducted with int8, so 127 is the maximum # value that can be used. data = [ 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127, 50, 50, 100, 100, 50, 50, 100, 100 ] expected_data = [127, 64, 64, 127, 50, 100] target_height = 3 target_width = 2 # Test out 3-D and 4-D image shapes. img_shapes = [[1, 6, 4, 1], [6, 4, 1]] target_shapes = [[1, target_height, target_width, 1], [target_height, target_width, 1]] for target_shape, img_shape in zip(target_shapes, img_shapes): for nptype in self.TYPES: img_np = np.array(data, dtype=nptype).reshape(img_shape) for method in self.METHODS: if test.is_gpu_available() and self.shouldRunOnGPU(method, nptype): with self.cached_session(use_gpu=True): image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images_v2( image, [target_height, target_width], method) expected = np.array(expected_data).reshape(target_shape) resized = self.evaluate(y) self.assertAllClose(resized, expected, atol=1e-5) @test_util.disable_xla("align_corners=False not supported by XLA") def testResizeUp(self): img_shape = [1, 3, 2, 1] data = [64, 32, 32, 64, 50, 100] target_height = 6 target_width = 4 expected_data = {} expected_data[image_ops.ResizeMethod.BILINEAR] = [ 64.0, 56.0, 40.0, 32.0, 56.0, 52.0, 44.0, 40.0, 40.0, 44.0, 52.0, 56.0, 36.5, 45.625, 63.875, 73.0, 45.5, 56.875, 79.625, 91.0, 50.0, 62.5, 87.5, 100.0 ] expected_data[image_ops.ResizeMethod.NEAREST_NEIGHBOR] = [ 64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0, 100.0 ] expected_data[image_ops.ResizeMethod.AREA] = [ 64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0, 100.0 ] expected_data[image_ops.ResizeMethod.LANCZOS3] = [ 75.8294, 59.6281, 38.4313, 22.23, 60.6851, 52.0037, 40.6454, 31.964, 35.8344, 41.0779, 47.9383, 53.1818, 24.6968, 43.0769, 67.1244, 85.5045, 35.7939, 56.4713, 83.5243, 104.2017, 44.8138, 65.1949, 91.8603, 112.2413 ] expected_data[image_ops.ResizeMethod.LANCZOS5] = [ 77.5699, 60.0223, 40.6694, 23.1219, 61.8253, 51.2369, 39.5593, 28.9709, 35.7438, 40.8875, 46.5604, 51.7041, 21.5942, 43.5299, 67.7223, 89.658, 32.1213, 56.784, 83.984, 108.6467, 44.5802, 66.183, 90.0082, 111.6109 ] expected_data[image_ops.ResizeMethod.GAUSSIAN] = [ 61.1087, 54.6926, 41.3074, 34.8913, 54.6926, 51.4168, 44.5832, 41.3074, 41.696, 45.2456, 52.6508, 56.2004, 39.4273, 47.0526, 62.9602, 70.5855, 47.3008, 57.3042, 78.173, 88.1764, 51.4771, 62.3638, 85.0752, 95.9619 ] expected_data[image_ops.ResizeMethod.BICUBIC] = [ 70.1453, 59.0252, 36.9748, 25.8547, 59.3195, 53.3386, 41.4789, 35.4981, 36.383, 41.285, 51.0051, 55.9071, 30.2232, 42.151, 65.8032, 77.731, 41.6492, 55.823, 83.9288, 98.1026, 47.0363, 62.2744, 92.4903, 107.7284 ] expected_data[image_ops.ResizeMethod.MITCHELLCUBIC] = [ 66.0382, 56.6079, 39.3921, 29.9618, 56.7255, 51.9603, 43.2611, 38.4959, 39.1828, 43.4664, 51.2864, 55.57, 34.6287, 45.1812, 64.4458, 74.9983, 43.8523, 56.8078, 80.4594, 93.4149, 48.9943, 63.026, 88.6422, 102.6739 ] for nptype in self.TYPES: for method in expected_data: with self.cached_session(use_gpu=True): img_np = np.array(data, dtype=nptype).reshape(img_shape) image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images_v2(image, [target_height, target_width], method) resized = self.evaluate(y) expected = np.array(expected_data[method]).reshape( [1, target_height, target_width, 1]) self.assertAllClose(resized, expected, atol=1e-04) # XLA doesn't implement half_pixel_centers @test_util.disable_xla("b/127616992") def testLegacyBicubicMethodsMatchNewMethods(self): img_shape = [1, 3, 2, 1] data = [64, 32, 32, 64, 50, 100] target_height = 6 target_width = 4 methods_to_test = ((gen_image_ops.resize_bilinear, "triangle"), (gen_image_ops.resize_bicubic, "keyscubic")) for legacy_method, new_method in methods_to_test: with self.cached_session(use_gpu=True): img_np = np.array(data, dtype=np.float32).reshape(img_shape) image = constant_op.constant(img_np, shape=img_shape) legacy_result = legacy_method( image, constant_op.constant([target_height, target_width], dtype=dtypes.int32), half_pixel_centers=True) scale = ( constant_op.constant([target_height, target_width], dtype=dtypes.float32) / math_ops.cast(array_ops.shape(image)[1:3], dtype=dtypes.float32)) new_result = gen_image_ops.scale_and_translate( image, constant_op.constant([target_height, target_width], dtype=dtypes.int32), scale, array_ops.zeros([2]), kernel_type=new_method, antialias=False) self.assertAllClose( self.evaluate(legacy_result), self.evaluate(new_result), atol=1e-04) def testResizeDownArea(self): img_shape = [1, 6, 6, 1] data = [ 128, 64, 32, 16, 8, 4, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 5, 10, 15, 20, 25, 30, 30, 25, 20, 15, 10, 5, 5, 10, 15, 20, 25, 30 ] img_np = np.array(data, dtype=np.uint8).reshape(img_shape) target_height = 4 target_width = 4 expected_data = [ 73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21 ] with self.cached_session(use_gpu=True): image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images_v2(image, [target_height, target_width], image_ops.ResizeMethod.AREA) expected = np.array(expected_data).reshape( [1, target_height, target_width, 1]) resized = self.evaluate(y) self.assertAllClose(resized, expected, atol=1) @test_util.disable_xla("align_corners=False not supported by XLA") def testCompareNearestNeighbor(self): if test.is_gpu_available(): input_shape = [1, 5, 6, 3] target_height = 8 target_width = 12 for nptype in [np.float32, np.float64]: img_np = np.arange( 0, np.prod(input_shape), dtype=nptype).reshape(input_shape) with self.cached_session(use_gpu=True): image = constant_op.constant(img_np, shape=input_shape) new_size = constant_op.constant([target_height, target_width]) out_op = image_ops.resize_images_v2( image, new_size, image_ops.ResizeMethod.NEAREST_NEIGHBOR) gpu_val = self.evaluate(out_op) with self.cached_session(use_gpu=False): image = constant_op.constant(img_np, shape=input_shape) new_size = constant_op.constant([target_height, target_width]) out_op = image_ops.resize_images_v2( image, new_size, image_ops.ResizeMethod.NEAREST_NEIGHBOR) cpu_val = self.evaluate(out_op) self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5) def testCompareBilinear(self): if test.is_gpu_available(): input_shape = [1, 5, 6, 3] target_height = 8 target_width = 12 for nptype in [np.float32, np.float64]: img_np = np.arange( 0, np.prod(input_shape), dtype=nptype).reshape(input_shape) value = {} for use_gpu in [True, False]: with self.cached_session(use_gpu=use_gpu): image = constant_op.constant(img_np, shape=input_shape) new_size = constant_op.constant([target_height, target_width]) out_op = image_ops.resize_images(image, new_size, image_ops.ResizeMethod.BILINEAR) value[use_gpu] = self.evaluate(out_op) self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5) @test_util.run_deprecated_v1 def testShapeInference(self): self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None]) self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None]) self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None]) self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None]) self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None]) self._assertShapeInference([None, None, None], [55, 66], [55, 66, None]) @test_util.run_deprecated_v1 def testNameScope(self): with self.cached_session(use_gpu=True): single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3]) y = image_ops.resize_images(single_image, [55, 66]) self.assertTrue(y.op.name.startswith("resize")) def _ResizeImageCall(self, x, max_h, max_w, preserve_aspect_ratio, use_tensor_inputs): if use_tensor_inputs: target_max = ops.convert_to_tensor([max_h, max_w]) x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim) feed_dict = {x_tensor: x} else: target_max = [max_h, max_w] x_tensor = x feed_dict = {} y = image_ops.resize_images( x_tensor, target_max, preserve_aspect_ratio=preserve_aspect_ratio) with self.cached_session(use_gpu=True): return y.eval(feed_dict=feed_dict) def _assertResizeEqual(self, x, x_shape, y, y_shape, preserve_aspect_ratio=True, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] target_height, target_width, _ = y_shape x = np.array(x).reshape(x_shape) y = np.array(y).reshape(y_shape) for use_tensor_inputs in use_tensor_inputs_options: y_tf = self._ResizeImageCall(x, target_height, target_width, preserve_aspect_ratio, use_tensor_inputs) self.assertAllClose(y, y_tf) def _assertResizeCheckShape(self, x, x_shape, target_shape, y_shape, preserve_aspect_ratio=True, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] target_height, target_width = target_shape x = np.array(x).reshape(x_shape) y = np.zeros(y_shape) for use_tensor_inputs in use_tensor_inputs_options: y_tf = self._ResizeImageCall(x, target_height, target_width, preserve_aspect_ratio, use_tensor_inputs) self.assertShapeEqual(y, ops.convert_to_tensor(y_tf)) @test_util.run_deprecated_v1 def testPreserveAspectRatioMultipleImages(self): x_shape = [10, 100, 100, 10] x = np.random.uniform(size=x_shape) self._assertResizeCheckShape( x, x_shape, [250, 250], [10, 250, 250, 10], preserve_aspect_ratio=False) @test_util.run_deprecated_v1 def testPreserveAspectRatioNoOp(self): x_shape = [10, 10, 10] x = np.random.uniform(size=x_shape) self._assertResizeEqual(x, x_shape, x, x_shape) @test_util.run_deprecated_v1 def testPreserveAspectRatioSmaller(self): x_shape = [100, 100, 10] x = np.random.uniform(size=x_shape) self._assertResizeCheckShape(x, x_shape, [75, 50], [50, 50, 10]) @test_util.run_deprecated_v1 def testPreserveAspectRatioSmallerMultipleImages(self): x_shape = [10, 100, 100, 10] x = np.random.uniform(size=x_shape) self._assertResizeCheckShape(x, x_shape, [75, 50], [10, 50, 50, 10]) @test_util.run_deprecated_v1 def testPreserveAspectRatioLarger(self): x_shape = [100, 100, 10] x = np.random.uniform(size=x_shape) self._assertResizeCheckShape(x, x_shape, [150, 200], [150, 150, 10]) @test_util.run_deprecated_v1 def testPreserveAspectRatioSameRatio(self): x_shape = [1920, 1080, 3] x = np.random.uniform(size=x_shape) self._assertResizeCheckShape(x, x_shape, [3840, 2160], [3840, 2160, 3]) @test_util.run_deprecated_v1 def testPreserveAspectRatioSquare(self): x_shape = [299, 299, 3] x = np.random.uniform(size=x_shape) self._assertResizeCheckShape(x, x_shape, [320, 320], [320, 320, 3]) class ResizeImagesTest(test_util.TensorFlowTestCase): METHODS = [ image_ops.ResizeMethodV1.BILINEAR, image_ops.ResizeMethodV1.NEAREST_NEIGHBOR, image_ops.ResizeMethodV1.BICUBIC, image_ops.ResizeMethodV1.AREA ] TYPES = [ np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64 ] def _assertShapeInference(self, pre_shape, size, post_shape): # Try single image resize single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape) y = image_ops.resize_images(single_image, size) self.assertEqual(y.get_shape().as_list(), post_shape) # Try batch images resize with known batch size images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape) y = image_ops.resize_images(images, size) self.assertEqual(y.get_shape().as_list(), [99] + post_shape) # Try batch images resize with unknown batch size images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape) y = image_ops.resize_images(images, size) self.assertEqual(y.get_shape().as_list(), [None] + post_shape) def shouldRunOnGPU(self, method, nptype): if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR and nptype in [np.float32, np.float64]): return True else: return False @test_util.disable_xla("align_corners=False not supported by XLA") @test_util.run_deprecated_v1 def testNoOp(self): img_shape = [1, 6, 4, 1] single_shape = [6, 4, 1] # This test is also conducted with int8, so 127 is the maximum # value that can be used. data = [ 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127, 50, 50, 100, 100, 50, 50, 100, 100 ] target_height = 6 target_width = 4 for nptype in self.TYPES: img_np = np.array(data, dtype=nptype).reshape(img_shape) for method in self.METHODS: with self.cached_session(use_gpu=True) as sess: image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images(image, [target_height, target_width], method) yshape = array_ops.shape(y) resized, newshape = self.evaluate([y, yshape]) self.assertAllEqual(img_shape, newshape) self.assertAllClose(resized, img_np, atol=1e-5) # Resizing with a single image must leave the shape unchanged also. with self.cached_session(use_gpu=True): img_single = img_np.reshape(single_shape) image = constant_op.constant(img_single, shape=single_shape) y = image_ops.resize_images(image, [target_height, target_width], self.METHODS[0]) yshape = array_ops.shape(y) newshape = self.evaluate(yshape) self.assertAllEqual(single_shape, newshape) @test_util.run_deprecated_v1 def testTensorArguments(self): img_shape = [1, 6, 4, 1] single_shape = [6, 4, 1] # This test is also conducted with int8, so 127 is the maximum # value that can be used. data = [ 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127, 50, 50, 100, 100, 50, 50, 100, 100 ] new_size = array_ops.placeholder(dtypes.int32, shape=(2)) img_np = np.array(data, dtype=np.uint8).reshape(img_shape) for method in self.METHODS: with self.cached_session(use_gpu=True) as sess: image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images(image, new_size, method) yshape = array_ops.shape(y) resized, newshape = sess.run([y, yshape], {new_size: [6, 4]}) self.assertAllEqual(img_shape, newshape) self.assertAllClose(resized, img_np, atol=1e-5) # Resizing with a single image must leave the shape unchanged also. with self.cached_session(use_gpu=True): img_single = img_np.reshape(single_shape) image = constant_op.constant(img_single, shape=single_shape) y = image_ops.resize_images(image, new_size, self.METHODS[0]) yshape = array_ops.shape(y) resized, newshape = sess.run([y, yshape], {new_size: [6, 4]}) self.assertAllEqual(single_shape, newshape) self.assertAllClose(resized, img_single, atol=1e-5) # Incorrect shape. with self.assertRaises(ValueError): new_size = constant_op.constant(4) _ = image_ops.resize_images(image, new_size, image_ops.ResizeMethodV1.BILINEAR) with self.assertRaises(ValueError): new_size = constant_op.constant([4]) _ = image_ops.resize_images(image, new_size, image_ops.ResizeMethodV1.BILINEAR) with self.assertRaises(ValueError): new_size = constant_op.constant([1, 2, 3]) _ = image_ops.resize_images(image, new_size, image_ops.ResizeMethodV1.BILINEAR) # Incorrect dtypes. with self.assertRaises(ValueError): new_size = constant_op.constant([6.0, 4]) _ = image_ops.resize_images(image, new_size, image_ops.ResizeMethodV1.BILINEAR) with self.assertRaises(ValueError): _ = image_ops.resize_images(image, [6, 4.0], image_ops.ResizeMethodV1.BILINEAR) with self.assertRaises(ValueError): _ = image_ops.resize_images(image, [None, 4], image_ops.ResizeMethodV1.BILINEAR) with self.assertRaises(ValueError): _ = image_ops.resize_images(image, [6, None], image_ops.ResizeMethodV1.BILINEAR) @test_util.run_deprecated_v1 def testReturnDtype(self): target_shapes = [[6, 4], [3, 2], [ array_ops.placeholder(dtypes.int32), array_ops.placeholder(dtypes.int32) ]] for nptype in self.TYPES: image = array_ops.placeholder(nptype, shape=[1, 6, 4, 1]) for method in self.METHODS: for target_shape in target_shapes: y = image_ops.resize_images(image, target_shape, method) if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR or target_shape == image.shape[1:3]): expected_dtype = image.dtype else: expected_dtype = dtypes.float32 self.assertEqual(y.dtype, expected_dtype) @test_util.disable_xla("align_corners=False not supported by XLA") def testSumTensor(self): img_shape = [1, 6, 4, 1] # This test is also conducted with int8, so 127 is the maximum # value that can be used. data = [ 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127, 50, 50, 100, 100, 50, 50, 100, 100 ] # Test size where width is specified as a tensor which is a sum # of two tensors. width_1 = constant_op.constant(1) width_2 = constant_op.constant(3) width = math_ops.add(width_1, width_2) height = constant_op.constant(6) img_np = np.array(data, dtype=np.uint8).reshape(img_shape) for method in self.METHODS: with self.cached_session() as sess: image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images(image, [height, width], method) yshape = array_ops.shape(y) resized, newshape = self.evaluate([y, yshape]) self.assertAllEqual(img_shape, newshape) self.assertAllClose(resized, img_np, atol=1e-5) @test_util.disable_xla("align_corners=False not supported by XLA") def testResizeDown(self): # This test is also conducted with int8, so 127 is the maximum # value that can be used. data = [ 127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127, 50, 50, 100, 100, 50, 50, 100, 100 ] expected_data = [127, 64, 64, 127, 50, 100] target_height = 3 target_width = 2 # Test out 3-D and 4-D image shapes. img_shapes = [[1, 6, 4, 1], [6, 4, 1]] target_shapes = [[1, target_height, target_width, 1], [target_height, target_width, 1]] for target_shape, img_shape in zip(target_shapes, img_shapes): for nptype in self.TYPES: img_np = np.array(data, dtype=nptype).reshape(img_shape) for method in self.METHODS: if test.is_gpu_available() and self.shouldRunOnGPU(method, nptype): with self.cached_session(use_gpu=True): image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images(image, [target_height, target_width], method) expected = np.array(expected_data).reshape(target_shape) resized = self.evaluate(y) self.assertAllClose(resized, expected, atol=1e-5) @test_util.disable_xla("align_corners=False not supported by XLA") def testResizeUpAlignCornersFalse(self): img_shape = [1, 3, 2, 1] data = [64, 32, 32, 64, 50, 100] target_height = 6 target_width = 4 expected_data = {} expected_data[image_ops.ResizeMethodV1.BILINEAR] = [ 64.0, 48.0, 32.0, 32.0, 48.0, 48.0, 48.0, 48.0, 32.0, 48.0, 64.0, 64.0, 41.0, 61.5, 82.0, 82.0, 50.0, 75.0, 100.0, 100.0, 50.0, 75.0, 100.0, 100.0 ] expected_data[image_ops.ResizeMethodV1.NEAREST_NEIGHBOR] = [ 64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0, 100.0 ] expected_data[image_ops.ResizeMethodV1.AREA] = [ 64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0, 100.0 ] for nptype in self.TYPES: for method in [ image_ops.ResizeMethodV1.BILINEAR, image_ops.ResizeMethodV1.NEAREST_NEIGHBOR, image_ops.ResizeMethodV1.AREA ]: with self.cached_session(use_gpu=True): img_np = np.array(data, dtype=nptype).reshape(img_shape) image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images( image, [target_height, target_width], method, align_corners=False) resized = self.evaluate(y) expected = np.array(expected_data[method]).reshape( [1, target_height, target_width, 1]) self.assertAllClose(resized, expected, atol=1e-05) def testResizeUpAlignCornersTrue(self): img_shape = [1, 3, 2, 1] data = [6, 3, 3, 6, 6, 9] target_height = 5 target_width = 4 expected_data = {} expected_data[image_ops.ResizeMethodV1.BILINEAR] = [ 6.0, 5.0, 4.0, 3.0, 4.5, 4.5, 4.5, 4.5, 3.0, 4.0, 5.0, 6.0, 4.5, 5.5, 6.5, 7.5, 6.0, 7.0, 8.0, 9.0 ] expected_data[image_ops.ResizeMethodV1.NEAREST_NEIGHBOR] = [ 6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 6.0, 3.0, 3.0, 6.0, 6.0, 6.0, 6.0, 9.0, 9.0, 6.0, 6.0, 9.0, 9.0 ] # TODO(b/37749740): Improve alignment of ResizeMethodV1.AREA when # align_corners=True. expected_data[image_ops.ResizeMethodV1.AREA] = [ 6.0, 6.0, 6.0, 3.0, 6.0, 6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 3.0, 3.0, 3.0, 6.0, 6.0, 6.0, 6.0, 9.0 ] for nptype in self.TYPES: for method in [ image_ops.ResizeMethodV1.BILINEAR, image_ops.ResizeMethodV1.NEAREST_NEIGHBOR, image_ops.ResizeMethodV1.AREA ]: with self.cached_session(use_gpu=True): img_np = np.array(data, dtype=nptype).reshape(img_shape) image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images( image, [target_height, target_width], method, align_corners=True) resized = self.evaluate(y) expected = np.array(expected_data[method]).reshape( [1, target_height, target_width, 1]) self.assertAllClose(resized, expected, atol=1e-05) def testResizeUpBicubic(self): img_shape = [1, 6, 6, 1] data = [ 128, 128, 64, 64, 128, 128, 64, 64, 64, 64, 128, 128, 64, 64, 128, 128, 50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100 ] img_np = np.array(data, dtype=np.uint8).reshape(img_shape) target_height = 8 target_width = 8 expected_data = [ 128, 135, 96, 55, 64, 114, 134, 128, 78, 81, 68, 52, 57, 118, 144, 136, 55, 49, 79, 109, 103, 89, 83, 84, 74, 70, 95, 122, 115, 69, 49, 55, 100, 105, 75, 43, 50, 89, 105, 100, 57, 54, 74, 96, 91, 65, 55, 58, 70, 69, 75, 81, 80, 72, 69, 70, 105, 112, 75, 36, 45, 92, 111, 105 ] with self.cached_session(use_gpu=True): image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images(image, [target_height, target_width], image_ops.ResizeMethodV1.BICUBIC) resized = self.evaluate(y) expected = np.array(expected_data).reshape( [1, target_height, target_width, 1]) self.assertAllClose(resized, expected, atol=1) def testResizeDownArea(self): img_shape = [1, 6, 6, 1] data = [ 128, 64, 32, 16, 8, 4, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 5, 10, 15, 20, 25, 30, 30, 25, 20, 15, 10, 5, 5, 10, 15, 20, 25, 30 ] img_np = np.array(data, dtype=np.uint8).reshape(img_shape) target_height = 4 target_width = 4 expected_data = [ 73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21 ] with self.cached_session(use_gpu=True): image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images(image, [target_height, target_width], image_ops.ResizeMethodV1.AREA) expected = np.array(expected_data).reshape( [1, target_height, target_width, 1]) resized = self.evaluate(y) self.assertAllClose(resized, expected, atol=1) @test_util.disable_xla("align_corners=False not supported by XLA") def testCompareNearestNeighbor(self): if test.is_gpu_available(): input_shape = [1, 5, 6, 3] target_height = 8 target_width = 12 for nptype in [np.float32, np.float64]: for align_corners in [True, False]: img_np = np.arange( 0, np.prod(input_shape), dtype=nptype).reshape(input_shape) with self.cached_session(use_gpu=True): image = constant_op.constant(img_np, shape=input_shape) new_size = constant_op.constant([target_height, target_width]) out_op = image_ops.resize_images( image, new_size, image_ops.ResizeMethodV1.NEAREST_NEIGHBOR, align_corners=align_corners) gpu_val = self.evaluate(out_op) with self.cached_session(use_gpu=False): image = constant_op.constant(img_np, shape=input_shape) new_size = constant_op.constant([target_height, target_width]) out_op = image_ops.resize_images( image, new_size, image_ops.ResizeMethodV1.NEAREST_NEIGHBOR, align_corners=align_corners) cpu_val = self.evaluate(out_op) self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5) def testCompareBilinear(self): if test.is_gpu_available(): input_shape = [1, 5, 6, 3] target_height = 8 target_width = 12 for nptype in [np.float32, np.float64]: for align_corners in [True, False]: img_np = np.arange( 0, np.prod(input_shape), dtype=nptype).reshape(input_shape) value = {} for use_gpu in [True, False]: with self.cached_session(use_gpu=use_gpu): image = constant_op.constant(img_np, shape=input_shape) new_size = constant_op.constant([target_height, target_width]) out_op = image_ops.resize_images( image, new_size, image_ops.ResizeMethodV1.BILINEAR, align_corners=align_corners) value[use_gpu] = self.evaluate(out_op) self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5) @test_util.run_deprecated_v1 def testShapeInference(self): self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3]) self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None]) self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None]) self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None]) self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None]) self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None]) self._assertShapeInference([None, None, None], [55, 66], [55, 66, None]) @test_util.run_deprecated_v1 def testNameScope(self): img_shape = [1, 3, 2, 1] with self.cached_session(use_gpu=True): single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3]) y = image_ops.resize_images(single_image, [55, 66]) self.assertTrue(y.op.name.startswith("resize")) def _ResizeImageCall(self, x, max_h, max_w, preserve_aspect_ratio, use_tensor_inputs): if use_tensor_inputs: target_max = ops.convert_to_tensor([max_h, max_w]) x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim) feed_dict = {x_tensor: x} else: target_max = [max_h, max_w] x_tensor = x feed_dict = {} y = image_ops.resize_images(x_tensor, target_max, preserve_aspect_ratio=preserve_aspect_ratio) with self.cached_session(use_gpu=True): return y.eval(feed_dict=feed_dict) def _assertResizeEqual(self, x, x_shape, y, y_shape, preserve_aspect_ratio=True, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] target_height, target_width, _ = y_shape x = np.array(x).reshape(x_shape) y = np.array(y).reshape(y_shape) for use_tensor_inputs in use_tensor_inputs_options: y_tf = self._ResizeImageCall(x, target_height, target_width, preserve_aspect_ratio, use_tensor_inputs) self.assertAllClose(y, y_tf) def _assertResizeCheckShape(self, x, x_shape, target_shape, y_shape, preserve_aspect_ratio=True, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] target_height, target_width = target_shape x = np.array(x).reshape(x_shape) y = np.zeros(y_shape) for use_tensor_inputs in use_tensor_inputs_options: y_tf = self._ResizeImageCall(x, target_height, target_width, preserve_aspect_ratio, use_tensor_inputs) self.assertShapeEqual(y, ops.convert_to_tensor(y_tf)) @test_util.run_deprecated_v1 def testPreserveAspectRatioMultipleImages(self): x_shape = [10, 100, 100, 10] x = np.random.uniform(size=x_shape) self._assertResizeCheckShape(x, x_shape, [250, 250], [10, 250, 250, 10], preserve_aspect_ratio=False) @test_util.run_deprecated_v1 def testPreserveAspectRatioNoOp(self): x_shape = [10, 10, 10] x = np.random.uniform(size=x_shape) self._assertResizeEqual(x, x_shape, x, x_shape) @test_util.run_deprecated_v1 def testPreserveAspectRatioSmaller(self): x_shape = [100, 100, 10] x = np.random.uniform(size=x_shape) self._assertResizeCheckShape(x, x_shape, [75, 50], [50, 50, 10]) @test_util.run_deprecated_v1 def testPreserveAspectRatioSmallerMultipleImages(self): x_shape = [10, 100, 100, 10] x = np.random.uniform(size=x_shape) self._assertResizeCheckShape(x, x_shape, [75, 50], [10, 50, 50, 10]) @test_util.run_deprecated_v1 def testPreserveAspectRatioLarger(self): x_shape = [100, 100, 10] x = np.random.uniform(size=x_shape) self._assertResizeCheckShape(x, x_shape, [150, 200], [150, 150, 10]) @test_util.run_deprecated_v1 def testPreserveAspectRatioSameRatio(self): x_shape = [1920, 1080, 3] x = np.random.uniform(size=x_shape) self._assertResizeCheckShape(x, x_shape, [3840, 2160], [3840, 2160, 3]) @test_util.run_deprecated_v1 def testPreserveAspectRatioSquare(self): x_shape = [299, 299, 3] x = np.random.uniform(size=x_shape) self._assertResizeCheckShape(x, x_shape, [320, 320], [320, 320, 3]) class ResizeImageWithPadV1Test(test_util.TensorFlowTestCase): def _ResizeImageWithPad(self, x, target_height, target_width, use_tensor_inputs): if use_tensor_inputs: target_height = ops.convert_to_tensor(target_height) target_width = ops.convert_to_tensor(target_width) x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim) feed_dict = {x_tensor: x} else: x_tensor = x feed_dict = {} y = image_ops.resize_image_with_pad_v1(x_tensor, target_height, target_width) if not use_tensor_inputs: self.assertTrue(y.get_shape().is_fully_defined()) with self.cached_session(use_gpu=True): return y.eval(feed_dict=feed_dict) def _assertReturns(self, x, x_shape, y, y_shape, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] target_height, target_width, _ = y_shape x = np.array(x).reshape(x_shape) y = np.array(y).reshape(y_shape) for use_tensor_inputs in use_tensor_inputs_options: y_tf = self._ResizeImageWithPad(x, target_height, target_width, use_tensor_inputs) self.assertAllClose(y, y_tf) def _assertRaises(self, x, x_shape, target_height, target_width, err_msg, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] x = np.array(x).reshape(x_shape) for use_tensor_inputs in use_tensor_inputs_options: try: self._ResizeImageWithPad(x, target_height, target_width, use_tensor_inputs) except Exception as e: # pylint: disable=broad-except if err_msg not in str(e): raise else: raise AssertionError("Exception not raised: %s" % err_msg) def _assertShapeInference(self, pre_shape, height, width, post_shape): image = array_ops.placeholder(dtypes.float32, shape=pre_shape) y = image_ops.resize_image_with_pad_v1(image, height, width) self.assertEqual(y.get_shape().as_list(), post_shape) @test_util.run_deprecated_v1 def testNoOp(self): x_shape = [10, 10, 10] x = np.random.uniform(size=x_shape) self._assertReturns(x, x_shape, x, x_shape) @test_util.run_deprecated_v1 def testPad(self): # Reduce vertical dimension x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [2, 4, 1] y = [0, 1, 3, 0] y_shape = [1, 4, 1] self._assertReturns(x, x_shape, y, y_shape) # Reduce horizontal dimension x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [2, 4, 1] y = [1, 3, 0, 0] y_shape = [2, 2, 1] self._assertReturns(x, x_shape, y, y_shape) x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [2, 4, 1] y = [1, 3] y_shape = [1, 2, 1] self._assertReturns(x, x_shape, y, y_shape) # half_pixel_centers not supported by XLA @test_util.for_all_test_methods(test_util.disable_xla, "b/127616992") class ResizeImageWithPadV2Test(test_util.TensorFlowTestCase): def _ResizeImageWithPad(self, x, target_height, target_width, use_tensor_inputs): if use_tensor_inputs: target_height = ops.convert_to_tensor(target_height) target_width = ops.convert_to_tensor(target_width) x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim) feed_dict = {x_tensor: x} else: x_tensor = x feed_dict = {} y = image_ops.resize_image_with_pad_v2(x_tensor, target_height, target_width) if not use_tensor_inputs: self.assertTrue(y.get_shape().is_fully_defined()) with self.cached_session(use_gpu=True): return y.eval(feed_dict=feed_dict) def _assertReturns(self, x, x_shape, y, y_shape, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] target_height, target_width, _ = y_shape x = np.array(x).reshape(x_shape) y = np.array(y).reshape(y_shape) for use_tensor_inputs in use_tensor_inputs_options: y_tf = self._ResizeImageWithPad(x, target_height, target_width, use_tensor_inputs) self.assertAllClose(y, y_tf) def _assertRaises(self, x, x_shape, target_height, target_width, err_msg, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] x = np.array(x).reshape(x_shape) for use_tensor_inputs in use_tensor_inputs_options: try: self._ResizeImageWithPad(x, target_height, target_width, use_tensor_inputs) except Exception as e: # pylint: disable=broad-except if err_msg not in str(e): raise else: raise AssertionError("Exception not raised: %s" % err_msg) def _assertShapeInference(self, pre_shape, height, width, post_shape): image = array_ops.placeholder(dtypes.float32, shape=pre_shape) y = image_ops.resize_image_with_pad_v1(image, height, width) self.assertEqual(y.get_shape().as_list(), post_shape) @test_util.run_deprecated_v1 def testNoOp(self): x_shape = [10, 10, 10] x = np.random.uniform(size=x_shape) self._assertReturns(x, x_shape, x, x_shape) @test_util.run_deprecated_v1 def testPad(self): # Reduce vertical dimension x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [2, 4, 1] y = [0, 3.5, 5.5, 0] y_shape = [1, 4, 1] self._assertReturns(x, x_shape, y, y_shape) # Reduce horizontal dimension x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [2, 4, 1] y = [3.5, 5.5, 0, 0] y_shape = [2, 2, 1] self._assertReturns(x, x_shape, y, y_shape) x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [2, 4, 1] y = [3.5, 5.5] y_shape = [1, 2, 1] self._assertReturns(x, x_shape, y, y_shape) class ResizeImageWithCropOrPadTest(test_util.TensorFlowTestCase): def _ResizeImageWithCropOrPad(self, x, target_height, target_width, use_tensor_inputs): if use_tensor_inputs: target_height = ops.convert_to_tensor(target_height) target_width = ops.convert_to_tensor(target_width) x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim) feed_dict = {x_tensor: x} else: x_tensor = x feed_dict = {} y = image_ops.resize_image_with_crop_or_pad(x_tensor, target_height, target_width) if not use_tensor_inputs: self.assertTrue(y.get_shape().is_fully_defined()) with self.cached_session(use_gpu=True): return y.eval(feed_dict=feed_dict) def _assertReturns(self, x, x_shape, y, y_shape, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] target_height, target_width, _ = y_shape x = np.array(x).reshape(x_shape) y = np.array(y).reshape(y_shape) for use_tensor_inputs in use_tensor_inputs_options: y_tf = self._ResizeImageWithCropOrPad(x, target_height, target_width, use_tensor_inputs) self.assertAllClose(y, y_tf) def _assertRaises(self, x, x_shape, target_height, target_width, err_msg, use_tensor_inputs_options=None): use_tensor_inputs_options = use_tensor_inputs_options or [False, True] x = np.array(x).reshape(x_shape) for use_tensor_inputs in use_tensor_inputs_options: try: self._ResizeImageWithCropOrPad(x, target_height, target_width, use_tensor_inputs) except Exception as e: if err_msg not in str(e): raise else: raise AssertionError("Exception not raised: %s" % err_msg) def _assertShapeInference(self, pre_shape, height, width, post_shape): image = array_ops.placeholder(dtypes.float32, shape=pre_shape) y = image_ops.resize_image_with_crop_or_pad(image, height, width) self.assertEqual(y.get_shape().as_list(), post_shape) @test_util.run_deprecated_v1 def testNoOp(self): x_shape = [10, 10, 10] x = np.random.uniform(size=x_shape) self._assertReturns(x, x_shape, x, x_shape) @test_util.run_deprecated_v1 def testPad(self): # Pad even along col. x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [2, 4, 1] y = [0, 1, 2, 3, 4, 0, 0, 5, 6, 7, 8, 0] y_shape = [2, 6, 1] self._assertReturns(x, x_shape, y, y_shape) # Pad odd along col. x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [2, 4, 1] y = [0, 1, 2, 3, 4, 0, 0, 0, 5, 6, 7, 8, 0, 0] y_shape = [2, 7, 1] self._assertReturns(x, x_shape, y, y_shape) # Pad even along row. x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [2, 4, 1] y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0] y_shape = [4, 4, 1] self._assertReturns(x, x_shape, y, y_shape) # Pad odd along row. x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [2, 4, 1] y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0] y_shape = [5, 4, 1] self._assertReturns(x, x_shape, y, y_shape) @test_util.run_deprecated_v1 def testCrop(self): # Crop even along col. x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [2, 4, 1] y = [2, 3, 6, 7] y_shape = [2, 2, 1] self._assertReturns(x, x_shape, y, y_shape) # Crop odd along col. x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] x_shape = [2, 6, 1] y = [2, 3, 4, 8, 9, 10] y_shape = [2, 3, 1] self._assertReturns(x, x_shape, y, y_shape) # Crop even along row. x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [4, 2, 1] y = [3, 4, 5, 6] y_shape = [2, 2, 1] self._assertReturns(x, x_shape, y, y_shape) # Crop odd along row. x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] x_shape = [8, 2, 1] y = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12] y_shape = [5, 2, 1] self._assertReturns(x, x_shape, y, y_shape) @test_util.run_deprecated_v1 def testCropAndPad(self): # Pad along row but crop along col. x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [2, 4, 1] y = [0, 0, 2, 3, 6, 7, 0, 0] y_shape = [4, 2, 1] self._assertReturns(x, x_shape, y, y_shape) # Crop along row but pad along col. x = [1, 2, 3, 4, 5, 6, 7, 8] x_shape = [4, 2, 1] y = [0, 3, 4, 0, 0, 5, 6, 0] y_shape = [2, 4, 1] self._assertReturns(x, x_shape, y, y_shape) @test_util.run_deprecated_v1 def testShapeInference(self): self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([50, 69, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([59, 60, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3]) self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None]) self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None]) self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None]) self._assertShapeInference([50, 69, None], 55, 66, [55, 66, None]) self._assertShapeInference([59, 60, None], 55, 66, [55, 66, None]) self._assertShapeInference([None, None, None], 55, 66, [55, 66, None]) self._assertShapeInference(None, 55, 66, [55, 66, None]) @test_util.run_deprecated_v1 def testNon3DInput(self): # Input image is not 3D x = [0] * 15 target_height, target_width = [4, 4] for x_shape in ([3, 5],): self._assertRaises(x, x_shape, target_height, target_width, "'image' must have either 3 or 4 dimensions.") for x_shape in ([1, 3, 5, 1, 1],): self._assertRaises(x, x_shape, target_height, target_width, "'image' must have either 3 or 4 dimensions.") @test_util.run_deprecated_v1 def testZeroLengthInput(self): # Input image has 0-length dimension(s). target_height, target_width = [1, 1] x = [] for x_shape in ([0, 2, 2], [2, 0, 2], [2, 2, 0]): self._assertRaises( x, x_shape, target_height, target_width, "all dims of 'image.shape' must be > 0", use_tensor_inputs_options=[False]) # The original error message does not contain back slashes. However, they # are added by either the assert op or the runtime. If this behavior # changes in the future, the match string will also needs to be changed. self._assertRaises( x, x_shape, target_height, target_width, "all dims of \\'image.shape\\' must be > 0", use_tensor_inputs_options=[True]) @test_util.run_deprecated_v1 def testBadParams(self): x_shape = [4, 4, 1] x = np.zeros(x_shape) # target_height <= 0 target_height, target_width = [0, 5] self._assertRaises(x, x_shape, target_height, target_width, "target_height must be > 0") # target_width <= 0 target_height, target_width = [5, 0] self._assertRaises(x, x_shape, target_height, target_width, "target_width must be > 0") @test_util.run_deprecated_v1 def testNameScope(self): image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3]) y = image_ops.resize_image_with_crop_or_pad(image, 55, 66) self.assertTrue(y.op.name.startswith("resize_image_with_crop_or_pad")) def _SimpleColorRamp(): """Build a simple color ramp RGB image.""" w, h = 256, 200 i = np.arange(h)[:, None] j = np.arange(w) image = np.empty((h, w, 3), dtype=np.uint8) image[:, :, 0] = i image[:, :, 1] = j image[:, :, 2] = (i + j) >> 1 return image class JpegTest(test_util.TensorFlowTestCase): # TODO(irving): Add self.assertAverageLess or similar to test_util def averageError(self, image0, image1): self.assertEqual(image0.shape, image1.shape) image0 = image0.astype(int) # Avoid overflow return np.abs(image0 - image1).sum() / np.prod(image0.shape) def testExisting(self): # Read a real jpeg and verify shape path = ("tensorflow/core/lib/jpeg/testdata/" "jpeg_merge_test1.jpg") with self.cached_session(use_gpu=True) as sess: jpeg0 = io_ops.read_file(path) image0 = image_ops.decode_jpeg(jpeg0) image1 = image_ops.decode_jpeg(image_ops.encode_jpeg(image0)) jpeg0, image0, image1 = self.evaluate([jpeg0, image0, image1]) self.assertEqual(len(jpeg0), 3771) self.assertEqual(image0.shape, (256, 128, 3)) self.assertLess(self.averageError(image0, image1), 1.4) def testCmyk(self): # Confirm that CMYK reads in as RGB base = "tensorflow/core/lib/jpeg/testdata" rgb_path = os.path.join(base, "jpeg_merge_test1.jpg") cmyk_path = os.path.join(base, "jpeg_merge_test1_cmyk.jpg") shape = 256, 128, 3 for channels in 3, 0: with self.cached_session(use_gpu=True) as sess: rgb = image_ops.decode_jpeg( io_ops.read_file(rgb_path), channels=channels) cmyk = image_ops.decode_jpeg( io_ops.read_file(cmyk_path), channels=channels) rgb, cmyk = self.evaluate([rgb, cmyk]) self.assertEqual(rgb.shape, shape) self.assertEqual(cmyk.shape, shape) error = self.averageError(rgb, cmyk) self.assertLess(error, 4) def testCropAndDecodeJpeg(self): with self.cached_session() as sess: # Encode it, then decode it, then encode it base = "tensorflow/core/lib/jpeg/testdata" jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg")) h, w, _ = 256, 128, 3 crop_windows = [[0, 0, 5, 5], [0, 0, 5, w], [0, 0, h, 5], [h - 6, w - 5, 6, 5], [6, 5, 15, 10], [0, 0, h, w]] for crop_window in crop_windows: # Explicit two stages: decode + crop. image1 = image_ops.decode_jpeg(jpeg0) y, x, h, w = crop_window image1_crop = image_ops.crop_to_bounding_box(image1, y, x, h, w) # Combined decode+crop. image2 = image_ops.decode_and_crop_jpeg(jpeg0, crop_window) # Combined decode+crop should have the same shape inference self.assertAllEqual(image1_crop.get_shape().as_list(), image2.get_shape().as_list()) # CropAndDecode should be equal to DecodeJpeg+Crop. image1_crop, image2 = self.evaluate([image1_crop, image2]) self.assertAllEqual(image1_crop, image2) @test_util.run_deprecated_v1 def testCropAndDecodeJpegWithInvalidCropWindow(self): with self.cached_session() as sess: # Encode it, then decode it, then encode it base = "tensorflow/core/lib/jpeg/testdata" jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg")) h, w, _ = 256, 128, 3 # Invalid crop windows. crop_windows = [[-1, 11, 11, 11], [11, -1, 11, 11], [11, 11, -1, 11], [11, 11, 11, -1], [11, 11, 0, 11], [11, 11, 11, 0], [0, 0, h + 1, w], [0, 0, h, w + 1]] for crop_window in crop_windows: result = image_ops.decode_and_crop_jpeg(jpeg0, crop_window) with self.assertRaisesWithPredicateMatch( errors.InvalidArgumentError, lambda e: "Invalid JPEG data or crop window" in str(e)): self.evaluate(result) def testSynthetic(self): with self.cached_session(use_gpu=True) as sess: # Encode it, then decode it, then encode it image0 = constant_op.constant(_SimpleColorRamp()) jpeg0 = image_ops.encode_jpeg(image0) image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_ACCURATE") image2 = image_ops.decode_jpeg( image_ops.encode_jpeg(image1), dct_method="INTEGER_ACCURATE") jpeg0, image0, image1, image2 = self.evaluate( [jpeg0, image0, image1, image2]) # The decoded-encoded image should be similar to the input self.assertLess(self.averageError(image0, image1), 0.6) # We should be very close to a fixpoint self.assertLess(self.averageError(image1, image2), 0.02) # Smooth ramps compress well (input size is 153600) self.assertGreaterEqual(len(jpeg0), 5000) self.assertLessEqual(len(jpeg0), 6000) def testSyntheticFasterAlgorithm(self): with self.cached_session(use_gpu=True) as sess: # Encode it, then decode it, then encode it image0 = constant_op.constant(_SimpleColorRamp()) jpeg0 = image_ops.encode_jpeg(image0) image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST") image2 = image_ops.decode_jpeg( image_ops.encode_jpeg(image1), dct_method="INTEGER_FAST") jpeg0, image0, image1, image2 = self.evaluate( [jpeg0, image0, image1, image2]) # The decoded-encoded image should be similar to the input, but # note this is worse than the slower algorithm because it is # less accurate. self.assertLess(self.averageError(image0, image1), 0.95) # Repeated compression / decompression will have a higher error # with a lossier algorithm. self.assertLess(self.averageError(image1, image2), 1.05) # Smooth ramps compress well (input size is 153600) self.assertGreaterEqual(len(jpeg0), 5000) self.assertLessEqual(len(jpeg0), 6000) def testDefaultDCTMethodIsIntegerFast(self): with self.cached_session(use_gpu=True) as sess: # Compare decoding with both dct_option=INTEGER_FAST and # default. They should be the same. image0 = constant_op.constant(_SimpleColorRamp()) jpeg0 = image_ops.encode_jpeg(image0) image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST") image2 = image_ops.decode_jpeg(jpeg0) image1, image2 = self.evaluate([image1, image2]) # The images should be the same. self.assertAllClose(image1, image2) @test_util.run_deprecated_v1 def testShape(self): with self.cached_session(use_gpu=True) as sess: jpeg = constant_op.constant("nonsense") for channels in 0, 1, 3: image = image_ops.decode_jpeg(jpeg, channels=channels) self.assertEqual(image.get_shape().as_list(), [None, None, channels or None]) @test_util.run_deprecated_v1 def testExtractJpegShape(self): # Read a real jpeg and verify shape. path = ("tensorflow/core/lib/jpeg/testdata/" "jpeg_merge_test1.jpg") with self.cached_session(use_gpu=True) as sess: jpeg = io_ops.read_file(path) # Extract shape without decoding. [image_shape] = sess.run([image_ops.extract_jpeg_shape(jpeg)]) self.assertEqual(image_shape.tolist(), [256, 128, 3]) @test_util.run_deprecated_v1 def testExtractJpegShapeforCmyk(self): # Read a cmyk jpeg image, and verify its shape. path = ("tensorflow/core/lib/jpeg/testdata/" "jpeg_merge_test1_cmyk.jpg") with self.cached_session(use_gpu=True) as sess: jpeg = io_ops.read_file(path) [image_shape] = sess.run([image_ops.extract_jpeg_shape(jpeg)]) # Cmyk jpeg image has 4 channels. self.assertEqual(image_shape.tolist(), [256, 128, 4]) def testRandomJpegQuality(self): # Previous implementation of random_jpeg_quality had a bug. # This unit test tests the fixed version, but due to forward compatibility # this test can only be done when fixed version is used. if compat.forward_compatible(2019, 4, 4): # Test jpeg quality dynamic randomization. with ops.Graph().as_default(), self.test_session(): np.random.seed(7) path = ("tensorflow/core/lib/jpeg/testdata/medium.jpg") jpeg = io_ops.read_file(path) image = image_ops.decode_jpeg(jpeg) random_jpeg_image = image_ops.random_jpeg_quality(image, 40, 100) with self.cached_session(use_gpu=True) as sess: # Test randomization. random_jpeg_images = [sess.run(random_jpeg_image) for _ in range(5)] are_images_equal = [] for i in range(1, len(random_jpeg_images)): # Most of them should be different if randomization is occurring # correctly. are_images_equal.append( np.array_equal(random_jpeg_images[0], random_jpeg_images[i])) self.assertFalse(all(are_images_equal)) def testAdjustJpegQuality(self): # Test if image_ops.adjust_jpeg_quality works when jpeq quality # is an int (not tensor) for backward compatibility. with ops.Graph().as_default(), self.test_session(): np.random.seed(7) jpeg_quality = np.random.randint(40, 100) path = ("tensorflow/core/lib/jpeg/testdata/medium.jpg") jpeg = io_ops.read_file(path) image = image_ops.decode_jpeg(jpeg) adjust_jpeg_quality_image = image_ops.adjust_jpeg_quality( image, jpeg_quality) with self.cached_session(use_gpu=True) as sess: sess.run(adjust_jpeg_quality_image) class PngTest(test_util.TensorFlowTestCase): def testExisting(self): # Read some real PNGs, converting to different channel numbers prefix = "tensorflow/core/lib/png/testdata/" inputs = ((1, "lena_gray.png"), (4, "lena_rgba.png"), (3, "lena_palette.png"), (4, "lena_palette_trns.png")) for channels_in, filename in inputs: for channels in 0, 1, 3, 4: with self.cached_session(use_gpu=True) as sess: png0 = io_ops.read_file(prefix + filename) image0 = image_ops.decode_png(png0, channels=channels) png0, image0 = self.evaluate([png0, image0]) self.assertEqual(image0.shape, (26, 51, channels or channels_in)) if channels == channels_in: image1 = image_ops.decode_png(image_ops.encode_png(image0)) self.assertAllEqual(image0, self.evaluate(image1)) def testSynthetic(self): with self.cached_session(use_gpu=True) as sess: # Encode it, then decode it image0 = constant_op.constant(_SimpleColorRamp()) png0 = image_ops.encode_png(image0, compression=7) image1 = image_ops.decode_png(png0) png0, image0, image1 = self.evaluate([png0, image0, image1]) # PNG is lossless self.assertAllEqual(image0, image1) # Smooth ramps compress well, but not too well self.assertGreaterEqual(len(png0), 400) self.assertLessEqual(len(png0), 750) def testSyntheticUint16(self): with self.cached_session(use_gpu=True) as sess: # Encode it, then decode it image0 = constant_op.constant(_SimpleColorRamp(), dtype=dtypes.uint16) png0 = image_ops.encode_png(image0, compression=7) image1 = image_ops.decode_png(png0, dtype=dtypes.uint16) png0, image0, image1 = self.evaluate([png0, image0, image1]) # PNG is lossless self.assertAllEqual(image0, image1) # Smooth ramps compress well, but not too well self.assertGreaterEqual(len(png0), 800) self.assertLessEqual(len(png0), 1500) def testSyntheticTwoChannel(self): with self.cached_session(use_gpu=True) as sess: # Strip the b channel from an rgb image to get a two-channel image. gray_alpha = _SimpleColorRamp()[:, :, 0:2] image0 = constant_op.constant(gray_alpha) png0 = image_ops.encode_png(image0, compression=7) image1 = image_ops.decode_png(png0) png0, image0, image1 = self.evaluate([png0, image0, image1]) self.assertEqual(2, image0.shape[-1]) self.assertAllEqual(image0, image1) def testSyntheticTwoChannelUint16(self): with self.cached_session(use_gpu=True) as sess: # Strip the b channel from an rgb image to get a two-channel image. gray_alpha = _SimpleColorRamp()[:, :, 0:2] image0 = constant_op.constant(gray_alpha, dtype=dtypes.uint16) png0 = image_ops.encode_png(image0, compression=7) image1 = image_ops.decode_png(png0, dtype=dtypes.uint16) png0, image0, image1 = self.evaluate([png0, image0, image1]) self.assertEqual(2, image0.shape[-1]) self.assertAllEqual(image0, image1) @test_util.run_deprecated_v1 def testShape(self): with self.cached_session(use_gpu=True): png = constant_op.constant("nonsense") for channels in 0, 1, 3: image = image_ops.decode_png(png, channels=channels) self.assertEqual(image.get_shape().as_list(), [None, None, channels or None]) class GifTest(test_util.TensorFlowTestCase): def _testValid(self, filename): # Read some real GIFs prefix = "tensorflow/core/lib/gif/testdata/" WIDTH = 20 HEIGHT = 40 STRIDE = 5 shape = (12, HEIGHT, WIDTH, 3) with self.cached_session(use_gpu=True) as sess: gif0 = io_ops.read_file(prefix + filename) image0 = image_ops.decode_gif(gif0) gif0, image0 = self.evaluate([gif0, image0]) self.assertEqual(image0.shape, shape) for frame_idx, frame in enumerate(image0): gt = np.zeros(shape[1:], dtype=np.uint8) start = frame_idx * STRIDE end = (frame_idx + 1) * STRIDE print(frame_idx) if end <= WIDTH: gt[:, start:end, :] = 255 else: start -= WIDTH end -= WIDTH gt[start:end, :, :] = 255 self.assertAllClose(frame, gt) def testValid(self): self._testValid("scan.gif") self._testValid("optimized.gif") @test_util.run_deprecated_v1 def testShape(self): with self.cached_session(use_gpu=True) as sess: gif = constant_op.constant("nonsense") image = image_ops.decode_gif(gif) self.assertEqual(image.get_shape().as_list(), [None, None, None, 3]) class ConvertImageTest(test_util.TensorFlowTestCase): def _convert(self, original, original_dtype, output_dtype, expected): x_np = np.array(original, dtype=original_dtype.as_numpy_dtype()) y_np = np.array(expected, dtype=output_dtype.as_numpy_dtype()) with self.cached_session(use_gpu=True): image = constant_op.constant(x_np) y = image_ops.convert_image_dtype(image, output_dtype) self.assertTrue(y.dtype == output_dtype) self.assertAllClose(y.eval(), y_np, atol=1e-5) if output_dtype in [ dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64 ]: y_saturate = image_ops.convert_image_dtype( image, output_dtype, saturate=True) self.assertTrue(y_saturate.dtype == output_dtype) self.assertAllClose(y_saturate.eval(), y_np, atol=1e-5) @test_util.run_deprecated_v1 def testNoConvert(self): # Make sure converting to the same data type creates only an identity op with self.cached_session(use_gpu=True): image = constant_op.constant([1], dtype=dtypes.uint8) image_ops.convert_image_dtype(image, dtypes.uint8) y = image_ops.convert_image_dtype(image, dtypes.uint8) self.assertEquals(y.op.type, "Identity") self.assertEquals(y.op.inputs[0], image) @test_util.run_deprecated_v1 def testConvertBetweenInteger(self): # Make sure converting to between integer types scales appropriately with self.cached_session(use_gpu=True): self._convert([0, 255], dtypes.uint8, dtypes.int16, [0, 255 * 128]) self._convert([0, 32767], dtypes.int16, dtypes.uint8, [0, 255]) self._convert([0, 2**32], dtypes.int64, dtypes.int32, [0, 1]) self._convert([0, 1], dtypes.int32, dtypes.int64, [0, 2**32]) @test_util.run_deprecated_v1 def testConvertBetweenFloat(self): # Make sure converting to between float types does nothing interesting with self.cached_session(use_gpu=True): self._convert([-1.0, 0, 1.0, 200000], dtypes.float32, dtypes.float64, [-1.0, 0, 1.0, 200000]) self._convert([-1.0, 0, 1.0, 200000], dtypes.float64, dtypes.float32, [-1.0, 0, 1.0, 200000]) @test_util.run_deprecated_v1 def testConvertBetweenIntegerAndFloat(self): # Make sure converting from and to a float type scales appropriately with self.cached_session(use_gpu=True): self._convert([0, 1, 255], dtypes.uint8, dtypes.float32, [0, 1.0 / 255.0, 1]) self._convert([0, 1.1 / 255.0, 1], dtypes.float32, dtypes.uint8, [0, 1, 255]) @test_util.run_deprecated_v1 def testConvertBetweenInt16AndInt8(self): with self.cached_session(use_gpu=True): # uint8, uint16 self._convert([0, 255 * 256], dtypes.uint16, dtypes.uint8, [0, 255]) self._convert([0, 255], dtypes.uint8, dtypes.uint16, [0, 255 * 256]) # int8, uint16 self._convert([0, 127 * 2 * 256], dtypes.uint16, dtypes.int8, [0, 127]) self._convert([0, 127], dtypes.int8, dtypes.uint16, [0, 127 * 2 * 256]) # int16, uint16 self._convert([0, 255 * 256], dtypes.uint16, dtypes.int16, [0, 255 * 128]) self._convert([0, 255 * 128], dtypes.int16, dtypes.uint16, [0, 255 * 256]) class TotalVariationTest(test_util.TensorFlowTestCase): """Tests the function total_variation() in image_ops. We test a few small handmade examples, as well as some larger examples using an equivalent numpy implementation of the total_variation() function. We do NOT test for overflows and invalid / edge-case arguments. """ def _test(self, x_np, y_np): """Test that the TensorFlow implementation of total_variation(x_np) calculates the values in y_np. Note that these may be float-numbers so we only test for approximate equality within some narrow error-bound. """ # Create a TensorFlow session. with self.cached_session(use_gpu=True): # Add a constant to the TensorFlow graph that holds the input. x_tf = constant_op.constant(x_np, shape=x_np.shape) # Add ops for calculating the total variation using TensorFlow. y = image_ops.total_variation(images=x_tf) # Run the TensorFlow session to calculate the result. y_tf = self.evaluate(y) # Assert that the results are as expected within # some small error-bound in case they are float-values. self.assertAllClose(y_tf, y_np) def _total_variation_np(self, x_np): """Calculate the total variation of x_np using numpy. This implements the same function as TensorFlow but using numpy instead. Args: x_np: Numpy array with 3 or 4 dimensions. """ dim = len(x_np.shape) if dim == 3: # Calculate differences for neighboring pixel-values using slices. dif1 = x_np[1:, :, :] - x_np[:-1, :, :] dif2 = x_np[:, 1:, :] - x_np[:, :-1, :] # Sum for all axis. sum_axis = None elif dim == 4: # Calculate differences for neighboring pixel-values using slices. dif1 = x_np[:, 1:, :, :] - x_np[:, :-1, :, :] dif2 = x_np[:, :, 1:, :] - x_np[:, :, :-1, :] # Only sum for the last 3 axis. sum_axis = (1, 2, 3) else: # This should not occur in this test-code. pass tot_var = np.sum(np.abs(dif1), axis=sum_axis) + \ np.sum(np.abs(dif2), axis=sum_axis) return tot_var def _test_tensorflow_vs_numpy(self, x_np): """Test the TensorFlow implementation against a numpy implementation. Args: x_np: Numpy array with 3 or 4 dimensions. """ # Calculate the y-values using the numpy implementation. y_np = self._total_variation_np(x_np) self._test(x_np, y_np) def _generateArray(self, shape): """Generate an array of the given shape for use in testing. The numbers are calculated as the cumulative sum, which causes the difference between neighboring numbers to vary.""" # Flattened length of the array. flat_len = np.prod(shape) a = np.array(range(flat_len), dtype=int) a = np.cumsum(a) a = a.reshape(shape) return a # TODO(b/133851381): re-enable this test. def disabledtestTotalVariationNumpy(self): """Test the TensorFlow implementation against a numpy implementation. The two implementations are very similar so it is possible that both have the same bug, which would not be detected by this test. It is therefore necessary to test with manually crafted data as well.""" # Generate a test-array. # This is an 'image' with 100x80 pixels and 3 color channels. a = self._generateArray(shape=(100, 80, 3)) # Test the TensorFlow implementation vs. numpy implementation. # We use a numpy implementation to check the results that are # calculated using TensorFlow are correct. self._test_tensorflow_vs_numpy(a) self._test_tensorflow_vs_numpy(a + 1) self._test_tensorflow_vs_numpy(-a) self._test_tensorflow_vs_numpy(1.1 * a) # Expand to a 4-dim array. b = a[np.newaxis, :] # Combine several variations of the image into a single 4-dim array. multi = np.vstack((b, b + 1, -b, 1.1 * b)) # Test that the TensorFlow function can also handle 4-dim arrays. self._test_tensorflow_vs_numpy(multi) def testTotalVariationHandmade(self): """Test the total variation for a few handmade examples.""" # We create an image that is 2x2 pixels with 3 color channels. # The image is very small so we can check the result by hand. # Red color channel. # The following are the sum of absolute differences between the pixels. # sum row dif = (4-1) + (7-2) = 3 + 5 = 8 # sum col dif = (2-1) + (7-4) = 1 + 3 = 4 r = [[1, 2], [4, 7]] # Blue color channel. # sum row dif = 18 + 29 = 47 # sum col dif = 7 + 18 = 25 g = [[11, 18], [29, 47]] # Green color channel. # sum row dif = 120 + 193 = 313 # sum col dif = 47 + 120 = 167 b = [[73, 120], [193, 313]] # Combine the 3 color channels into a single 3-dim array. # The shape is (2, 2, 3) corresponding to (height, width and color). a = np.dstack((r, g, b)) # Total variation for this image. # Sum of all pixel differences = 8 + 4 + 47 + 25 + 313 + 167 = 564 tot_var = 564 # Calculate the total variation using TensorFlow and assert it is correct. self._test(a, tot_var) # If we add 1 to all pixel-values then the total variation is unchanged. self._test(a + 1, tot_var) # If we negate all pixel-values then the total variation is unchanged. self._test(-a, tot_var) # Scale the pixel-values by a float. This scales the total variation as # well. b = 1.1 * a self._test(b, 1.1 * tot_var) # Scale by another float. c = 1.2 * a self._test(c, 1.2 * tot_var) # Combine these 3 images into a single array of shape (3, 2, 2, 3) # where the first dimension is for the image-number. multi = np.vstack((a[np.newaxis, :], b[np.newaxis, :], c[np.newaxis, :])) # Check that TensorFlow correctly calculates the total variation # for each image individually and returns the correct array. self._test(multi, tot_var * np.array([1.0, 1.1, 1.2])) class FormatTest(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testFormats(self): prefix = "tensorflow/core/lib" paths = ("png/testdata/lena_gray.png", "jpeg/testdata/jpeg_merge_test1.jpg", "gif/testdata/lena.gif") decoders = { "jpeg": functools.partial(image_ops.decode_jpeg, channels=3), "png": functools.partial(image_ops.decode_png, channels=3), "gif": lambda s: array_ops.squeeze(image_ops.decode_gif(s), axis=0), } with self.cached_session(): for path in paths: contents = io_ops.read_file(os.path.join(prefix, path)).eval() images = {} for name, decode in decoders.items(): image = decode(contents).eval() self.assertEqual(image.ndim, 3) for prev_name, prev in images.items(): print("path %s, names %s %s, shapes %s %s" % (path, name, prev_name, image.shape, prev.shape)) self.assertAllEqual(image, prev) images[name] = image def testError(self): path = "tensorflow/core/lib/gif/testdata/scan.gif" with self.cached_session(): for decode in image_ops.decode_jpeg, image_ops.decode_png: with self.assertRaisesOpError(r"Got 12 frames"): decode(io_ops.read_file(path)).eval() class NonMaxSuppressionTest(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def NonMaxSuppressionTest(self): boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]] scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3] max_output_size_np = 3 iou_threshold_np = 0.5 with self.cached_session(): boxes = constant_op.constant(boxes_np) scores = constant_op.constant(scores_np) max_output_size = constant_op.constant(max_output_size_np) iou_threshold = constant_op.constant(iou_threshold_np) selected_indices = image_ops.non_max_suppression( boxes, scores, max_output_size, iou_threshold) self.assertAllClose(selected_indices.eval(), [3, 0, 5]) @test_util.run_deprecated_v1 def testInvalidShape(self): # The boxes should be 2D of shape [num_boxes, 4]. with self.assertRaisesRegexp(ValueError, "Shape must be rank 2 but is rank 1"): boxes = constant_op.constant([0.0, 0.0, 1.0, 1.0]) scores = constant_op.constant([0.9]) image_ops.non_max_suppression(boxes, scores, 3, 0.5) with self.assertRaisesRegexp(ValueError, "Dimension must be 4 but is 3"): boxes = constant_op.constant([[0.0, 0.0, 1.0]]) scores = constant_op.constant([0.9]) image_ops.non_max_suppression(boxes, scores, 3, 0.5) # The boxes is of shape [num_boxes, 4], and the scores is # of shape [num_boxes]. So an error will be thrown. with self.assertRaisesRegexp(ValueError, "Dimensions must be equal, but are 1 and 2"): boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]]) scores = constant_op.constant([0.9, 0.75]) image_ops.non_max_suppression(boxes, scores, 3, 0.5) # The scores should be 1D of shape [num_boxes]. with self.assertRaisesRegexp(ValueError, "Shape must be rank 1 but is rank 2"): boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]]) scores = constant_op.constant([[0.9]]) image_ops.non_max_suppression(boxes, scores, 3, 0.5) # The max_output_size should be a scalar (0-D). with self.assertRaisesRegexp(ValueError, "Shape must be rank 0 but is rank 1"): boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]]) scores = constant_op.constant([0.9]) image_ops.non_max_suppression(boxes, scores, [3], 0.5) # The iou_threshold should be a scalar (0-D). with self.assertRaisesRegexp(ValueError, "Shape must be rank 0 but is rank 2"): boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]]) scores = constant_op.constant([0.9]) image_ops.non_max_suppression(boxes, scores, 3, [[0.5]]) @test_util.run_deprecated_v1 def testDataTypes(self): # Test case for GitHub issue 20199. boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]] scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3] max_output_size_np = 3 iou_threshold_np = 0.5 score_threshold_np = float("-inf") # Note: There are multiple versions of non_max_suppression v2, v3, v4. # gen_image_ops.non_max_suppression_v2: for dtype in [np.float16, np.float32]: with self.cached_session(): boxes = constant_op.constant(boxes_np, dtype=dtype) scores = constant_op.constant(scores_np, dtype=dtype) max_output_size = constant_op.constant(max_output_size_np) iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype) selected_indices = gen_image_ops.non_max_suppression_v2( boxes, scores, max_output_size, iou_threshold).eval() self.assertAllClose(selected_indices, [3, 0, 5]) # gen_image_ops.non_max_suppression_v3 for dtype in [np.float16, np.float32]: with self.cached_session(): boxes = constant_op.constant(boxes_np, dtype=dtype) scores = constant_op.constant(scores_np, dtype=dtype) max_output_size = constant_op.constant(max_output_size_np) iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype) score_threshold = constant_op.constant(score_threshold_np, dtype=dtype) selected_indices = gen_image_ops.non_max_suppression_v3( boxes, scores, max_output_size, iou_threshold, score_threshold) selected_indices = self.evaluate(selected_indices) self.assertAllClose(selected_indices, [3, 0, 5]) # gen_image_ops.non_max_suppression_v4. for dtype in [np.float16, np.float32]: with self.cached_session(): boxes = constant_op.constant(boxes_np, dtype=dtype) scores = constant_op.constant(scores_np, dtype=dtype) max_output_size = constant_op.constant(max_output_size_np) iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype) score_threshold = constant_op.constant(score_threshold_np, dtype=dtype) selected_indices, _ = gen_image_ops.non_max_suppression_v4( boxes, scores, max_output_size, iou_threshold, score_threshold) selected_indices = self.evaluate(selected_indices) self.assertAllClose(selected_indices, [3, 0, 5]) # gen_image_ops.non_max_suppression_v5. soft_nms_sigma_np = float(0.0) for dtype in [np.float16, np.float32]: with self.cached_session(): boxes = constant_op.constant(boxes_np, dtype=dtype) scores = constant_op.constant(scores_np, dtype=dtype) max_output_size = constant_op.constant(max_output_size_np) iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype) score_threshold = constant_op.constant(score_threshold_np, dtype=dtype) soft_nms_sigma = constant_op.constant(soft_nms_sigma_np, dtype=dtype) selected_indices, _, _ = gen_image_ops.non_max_suppression_v5( boxes, scores, max_output_size, iou_threshold, score_threshold, soft_nms_sigma) selected_indices = self.evaluate(selected_indices) self.assertAllClose(selected_indices, [3, 0, 5]) class NonMaxSuppressionWithScoresTest(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testSelectFromThreeClustersWithSoftNMS(self): boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]] scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3] max_output_size_np = 6 iou_threshold_np = 1.0 score_threshold_np = 0.0 soft_nms_sigma_np = 0.5 boxes = constant_op.constant(boxes_np) scores = constant_op.constant(scores_np) max_output_size = constant_op.constant(max_output_size_np) iou_threshold = constant_op.constant(iou_threshold_np) score_threshold = constant_op.constant(score_threshold_np) soft_nms_sigma = constant_op.constant(soft_nms_sigma_np) selected_indices, selected_scores = \ image_ops.non_max_suppression_with_scores( boxes, scores, max_output_size, iou_threshold, score_threshold, soft_nms_sigma) selected_indices, selected_scores = self.evaluate( [selected_indices, selected_scores]) self.assertAllClose(selected_indices, [3, 0, 1, 5, 4, 2]) self.assertAllClose(selected_scores, [0.95, 0.9, 0.384, 0.3, 0.256, 0.197], rtol=1e-2, atol=1e-2) class NonMaxSuppressionPaddedTest(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testSelectFromThreeClusters(self): boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]] scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3] max_output_size_np = 5 iou_threshold_np = 0.5 boxes = constant_op.constant(boxes_np) scores = constant_op.constant(scores_np) max_output_size = constant_op.constant(max_output_size_np) iou_threshold = constant_op.constant(iou_threshold_np) selected_indices_padded, num_valid_padded = \ image_ops.non_max_suppression_padded( boxes, scores, max_output_size, iou_threshold, pad_to_max_output_size=True) selected_indices, num_valid = image_ops.non_max_suppression_padded( boxes, scores, max_output_size, iou_threshold, pad_to_max_output_size=False) # The output shape of the padded operation must be fully defined. self.assertEqual(selected_indices_padded.shape.is_fully_defined(), True) self.assertEqual(selected_indices.shape.is_fully_defined(), False) with self.cached_session(): self.assertAllClose(selected_indices_padded.eval(), [3, 0, 5, 0, 0]) self.assertEqual(num_valid_padded.eval(), 3) self.assertAllClose(selected_indices.eval(), [3, 0, 5]) self.assertEqual(num_valid.eval(), 3) @test_util.run_deprecated_v1 def testSelectFromContinuousOverLap(self): boxes_np = [[0, 0, 1, 1], [0, 0.2, 1, 1.2], [0, 0.4, 1, 1.4], [0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]] scores_np = [0.9, 0.75, 0.6, 0.5, 0.4, 0.3] max_output_size_np = 3 iou_threshold_np = 0.5 score_threshold_np = 0.1 boxes = constant_op.constant(boxes_np) scores = constant_op.constant(scores_np) max_output_size = constant_op.constant(max_output_size_np) iou_threshold = constant_op.constant(iou_threshold_np) score_threshold = constant_op.constant(score_threshold_np) selected_indices, num_valid = image_ops.non_max_suppression_padded( boxes, scores, max_output_size, iou_threshold, score_threshold) # The output shape of the padded operation must be fully defined. self.assertEqual(selected_indices.shape.is_fully_defined(), False) with self.cached_session(): self.assertAllClose(selected_indices.eval(), [0, 2, 4]) self.assertEqual(num_valid.eval(), 3) class NonMaxSuppressionWithOverlapsTest(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testSelectOneFromThree(self): overlaps_np = [ [1.0, 0.7, 0.2], [0.7, 1.0, 0.0], [0.2, 0.0, 1.0], ] scores_np = [0.7, 0.9, 0.1] max_output_size_np = 3 overlaps = constant_op.constant(overlaps_np) scores = constant_op.constant(scores_np) max_output_size = constant_op.constant(max_output_size_np) overlap_threshold = 0.6 score_threshold = 0.4 selected_indices = image_ops.non_max_suppression_with_overlaps( overlaps, scores, max_output_size, overlap_threshold, score_threshold) with self.cached_session(): self.assertAllClose(selected_indices.eval(), [1]) class VerifyCompatibleImageShapesTest(test_util.TensorFlowTestCase): """Tests utility function used by ssim() and psnr().""" @test_util.run_deprecated_v1 def testWrongDims(self): img = array_ops.placeholder(dtype=dtypes.float32) img_np = np.array((2, 2)) with self.cached_session(use_gpu=True) as sess: _, _, checks = image_ops_impl._verify_compatible_image_shapes(img, img) with self.assertRaises(errors.InvalidArgumentError): sess.run(checks, {img: img_np}) @test_util.run_deprecated_v1 def testShapeMismatch(self): img1 = array_ops.placeholder(dtype=dtypes.float32) img2 = array_ops.placeholder(dtype=dtypes.float32) img1_np = np.array([1, 2, 2, 1]) img2_np = np.array([1, 3, 3, 1]) with self.cached_session(use_gpu=True) as sess: _, _, checks = image_ops_impl._verify_compatible_image_shapes(img1, img2) with self.assertRaises(errors.InvalidArgumentError): sess.run(checks, {img1: img1_np, img2: img2_np}) class PSNRTest(test_util.TensorFlowTestCase): """Tests for PSNR.""" def _LoadTestImage(self, sess, filename): content = io_ops.read_file(os.path.join( "tensorflow/core/lib/psnr/testdata", filename)) im = image_ops.decode_jpeg(content, dct_method="INTEGER_ACCURATE") im = image_ops.convert_image_dtype(im, dtypes.float32) im, = self.evaluate([im]) return np.expand_dims(im, axis=0) def _LoadTestImages(self): with self.cached_session(use_gpu=True) as sess: q20 = self._LoadTestImage(sess, "cat_q20.jpg") q72 = self._LoadTestImage(sess, "cat_q72.jpg") q95 = self._LoadTestImage(sess, "cat_q95.jpg") return q20, q72, q95 def _PSNR_NumPy(self, orig, target, max_value): """Numpy implementation of PSNR.""" mse = ((orig - target) ** 2).mean(axis=(-3, -2, -1)) return 20 * np.log10(max_value) - 10 * np.log10(mse) def _RandomImage(self, shape, max_val): """Returns an image or image batch with given shape.""" return np.random.rand(*shape).astype(np.float32) * max_val @test_util.run_deprecated_v1 def testPSNRSingleImage(self): image1 = self._RandomImage((8, 8, 1), 1) image2 = self._RandomImage((8, 8, 1), 1) psnr = self._PSNR_NumPy(image1, image2, 1) with self.cached_session(use_gpu=True): tf_image1 = constant_op.constant(image1, shape=image1.shape, dtype=dtypes.float32) tf_image2 = constant_op.constant(image2, shape=image2.shape, dtype=dtypes.float32) tf_psnr = image_ops.psnr(tf_image1, tf_image2, 1.0, "psnr").eval() self.assertAllClose(psnr, tf_psnr, atol=0.001) @test_util.run_deprecated_v1 def testPSNRMultiImage(self): image1 = self._RandomImage((10, 8, 8, 1), 1) image2 = self._RandomImage((10, 8, 8, 1), 1) psnr = self._PSNR_NumPy(image1, image2, 1) with self.cached_session(use_gpu=True): tf_image1 = constant_op.constant(image1, shape=image1.shape, dtype=dtypes.float32) tf_image2 = constant_op.constant(image2, shape=image2.shape, dtype=dtypes.float32) tf_psnr = image_ops.psnr(tf_image1, tf_image2, 1, "psnr").eval() self.assertAllClose(psnr, tf_psnr, atol=0.001) @test_util.run_deprecated_v1 def testGoldenPSNR(self): q20, q72, q95 = self._LoadTestImages() # Verify NumPy implementation first. # Golden values are generated using GNU Octave's psnr() function. psnr1 = self._PSNR_NumPy(q20, q72, 1) self.assertNear(30.321, psnr1, 0.001, msg="q20.dtype=" + str(q20.dtype)) psnr2 = self._PSNR_NumPy(q20, q95, 1) self.assertNear(29.994, psnr2, 0.001) psnr3 = self._PSNR_NumPy(q72, q95, 1) self.assertNear(35.302, psnr3, 0.001) # Test TensorFlow implementation. with self.cached_session(use_gpu=True): tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32) tf_q72 = constant_op.constant(q72, shape=q72.shape, dtype=dtypes.float32) tf_q95 = constant_op.constant(q95, shape=q95.shape, dtype=dtypes.float32) tf_psnr1 = image_ops.psnr(tf_q20, tf_q72, 1, "psnr1").eval() tf_psnr2 = image_ops.psnr(tf_q20, tf_q95, 1, "psnr2").eval() tf_psnr3 = image_ops.psnr(tf_q72, tf_q95, 1, "psnr3").eval() self.assertAllClose(psnr1, tf_psnr1, atol=0.001) self.assertAllClose(psnr2, tf_psnr2, atol=0.001) self.assertAllClose(psnr3, tf_psnr3, atol=0.001) @test_util.run_deprecated_v1 def testInfinity(self): q20, _, _ = self._LoadTestImages() psnr = self._PSNR_NumPy(q20, q20, 1) with self.cached_session(use_gpu=True): tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32) tf_psnr = image_ops.psnr(tf_q20, tf_q20, 1, "psnr").eval() self.assertAllClose(psnr, tf_psnr, atol=0.001) @test_util.run_deprecated_v1 def testInt(self): img1 = self._RandomImage((10, 8, 8, 1), 255) img2 = self._RandomImage((10, 8, 8, 1), 255) img1 = constant_op.constant(img1, dtypes.uint8) img2 = constant_op.constant(img2, dtypes.uint8) psnr_uint8 = image_ops.psnr(img1, img2, 255) img1 = image_ops.convert_image_dtype(img1, dtypes.float32) img2 = image_ops.convert_image_dtype(img2, dtypes.float32) psnr_float32 = image_ops.psnr(img1, img2, 1.0) with self.cached_session(use_gpu=True): self.assertAllClose( psnr_uint8.eval(), self.evaluate(psnr_float32), atol=0.001) class SSIMTest(test_util.TensorFlowTestCase): """Tests for SSIM.""" _filenames = ["checkerboard1.png", "checkerboard2.png", "checkerboard3.png",] _ssim = np.asarray([[1.000000, 0.230880, 0.231153], [0.230880, 1.000000, 0.996828], [0.231153, 0.996828, 1.000000]]) def _LoadTestImage(self, sess, filename): content = io_ops.read_file(os.path.join( "tensorflow/core/lib/ssim/testdata", filename)) im = image_ops.decode_png(content) im = image_ops.convert_image_dtype(im, dtypes.float32) im, = self.evaluate([im]) return np.expand_dims(im, axis=0) def _LoadTestImages(self): with self.cached_session(use_gpu=True) as sess: return [self._LoadTestImage(sess, f) for f in self._filenames] def _RandomImage(self, shape, max_val): """Returns an image or image batch with given shape.""" return np.random.rand(*shape).astype(np.float32) * max_val @test_util.run_deprecated_v1 def testAgainstMatlab(self): """Tests against values produced by Matlab.""" img = self._LoadTestImages() expected = self._ssim[np.triu_indices(3)] ph = [array_ops.placeholder(dtype=dtypes.float32) for _ in range(2)] ssim = image_ops.ssim( *ph, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) with self.cached_session(use_gpu=True): scores = [ssim.eval(dict(zip(ph, t))) for t in itertools.combinations_with_replacement(img, 2)] self.assertAllClose(expected, np.squeeze(scores), atol=1e-4) def testBatch(self): img = self._LoadTestImages() expected = self._ssim[np.triu_indices(3, k=1)] img1, img2 = zip(*itertools.combinations(img, 2)) img1 = np.concatenate(img1) img2 = np.concatenate(img2) ssim = image_ops.ssim( constant_op.constant(img1), constant_op.constant(img2), 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) with self.cached_session(use_gpu=True): self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4) def testBroadcast(self): img = self._LoadTestImages()[:2] expected = self._ssim[:2, :2] img = constant_op.constant(np.concatenate(img)) img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2. img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1. ssim = image_ops.ssim( img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) with self.cached_session(use_gpu=True): self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4) @test_util.run_deprecated_v1 def testNegative(self): """Tests against negative SSIM index.""" step = np.expand_dims(np.arange(0, 256, 16, dtype=np.uint8), axis=0) img1 = np.tile(step, (16, 1)) img2 = np.fliplr(img1) img1 = img1.reshape((1, 16, 16, 1)) img2 = img2.reshape((1, 16, 16, 1)) ssim = image_ops.ssim( constant_op.constant(img1), constant_op.constant(img2), 255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) with self.cached_session(use_gpu=True): self.assertLess(ssim.eval(), 0) @test_util.run_deprecated_v1 def testInt(self): img1 = self._RandomImage((1, 16, 16, 3), 255) img2 = self._RandomImage((1, 16, 16, 3), 255) img1 = constant_op.constant(img1, dtypes.uint8) img2 = constant_op.constant(img2, dtypes.uint8) ssim_uint8 = image_ops.ssim( img1, img2, 255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) img1 = image_ops.convert_image_dtype(img1, dtypes.float32) img2 = image_ops.convert_image_dtype(img2, dtypes.float32) ssim_float32 = image_ops.ssim( img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) with self.cached_session(use_gpu=True): self.assertAllClose( ssim_uint8.eval(), self.evaluate(ssim_float32), atol=0.001) class MultiscaleSSIMTest(test_util.TensorFlowTestCase): """Tests for MS-SSIM.""" _filenames = ["checkerboard1.png", "checkerboard2.png", "checkerboard3.png",] _msssim = np.asarray([[1.000000, 0.091016, 0.091025], [0.091016, 1.000000, 0.999567], [0.091025, 0.999567, 1.000000]]) def _LoadTestImage(self, sess, filename): content = io_ops.read_file(os.path.join( "tensorflow/core/lib/ssim/testdata", filename)) im = image_ops.decode_png(content) im = image_ops.convert_image_dtype(im, dtypes.float32) im, = self.evaluate([im]) return np.expand_dims(im, axis=0) def _LoadTestImages(self): with self.cached_session(use_gpu=True) as sess: return [self._LoadTestImage(sess, f) for f in self._filenames] def _RandomImage(self, shape, max_val): """Returns an image or image batch with given shape.""" return np.random.rand(*shape).astype(np.float32) * max_val @test_util.run_deprecated_v1 def testAgainstMatlab(self): """Tests against MS-SSIM computed with Matlab implementation. For color images, MS-SSIM scores are averaged over color channels. """ img = self._LoadTestImages() expected = self._msssim[np.triu_indices(3)] ph = [array_ops.placeholder(dtype=dtypes.float32) for _ in range(2)] msssim = image_ops.ssim_multiscale( *ph, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) with self.cached_session(use_gpu=True): scores = [msssim.eval(dict(zip(ph, t))) for t in itertools.combinations_with_replacement(img, 2)] self.assertAllClose(expected, np.squeeze(scores), atol=1e-4) @test_util.run_deprecated_v1 def testUnweightedIsDifferentiable(self): img = self._LoadTestImages() ph = [array_ops.placeholder(dtype=dtypes.float32) for _ in range(2)] scalar = constant_op.constant(1.0, dtype=dtypes.float32) scaled_ph = [x * scalar for x in ph] msssim = image_ops.ssim_multiscale( *scaled_ph, max_val=1.0, power_factors=(1, 1, 1, 1, 1), filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) grads = gradients.gradients(msssim, scalar) with self.cached_session(use_gpu=True) as sess: np_grads = sess.run(grads, feed_dict={ph[0]: img[0], ph[1]: img[1]}) self.assertTrue(np.isfinite(np_grads).all()) def testBatch(self): """Tests MS-SSIM computed in batch.""" img = self._LoadTestImages() expected = self._msssim[np.triu_indices(3, k=1)] img1, img2 = zip(*itertools.combinations(img, 2)) img1 = np.concatenate(img1) img2 = np.concatenate(img2) msssim = image_ops.ssim_multiscale( constant_op.constant(img1), constant_op.constant(img2), 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) with self.cached_session(use_gpu=True): self.assertAllClose(expected, self.evaluate(msssim), 1e-4) def testBroadcast(self): """Tests MS-SSIM broadcasting.""" img = self._LoadTestImages()[:2] expected = self._msssim[:2, :2] img = constant_op.constant(np.concatenate(img)) img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2. img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1. score_tensor = image_ops.ssim_multiscale( img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) with self.cached_session(use_gpu=True): self.assertAllClose(expected, self.evaluate(score_tensor), 1e-4) def testRange(self): """Tests against low MS-SSIM score. MS-SSIM is a geometric mean of SSIM and CS scores of various scales. If any of the value is negative so that the geometric mean is not well-defined, then treat the MS-SSIM score as zero. """ with self.cached_session(use_gpu=True) as sess: img1 = self._LoadTestImage(sess, "checkerboard1.png") img2 = self._LoadTestImage(sess, "checkerboard3.png") images = [img1, img2, np.zeros_like(img1), np.full_like(img1, fill_value=255)] images = [ops.convert_to_tensor(x, dtype=dtypes.float32) for x in images] msssim_ops = [ image_ops.ssim_multiscale( x, y, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) for x, y in itertools.combinations(images, 2) ] msssim = self.evaluate(msssim_ops) msssim = np.squeeze(msssim) self.assertTrue(np.all(msssim >= 0.0)) self.assertTrue(np.all(msssim <= 1.0)) @test_util.run_deprecated_v1 def testInt(self): img1 = self._RandomImage((1, 180, 240, 3), 255) img2 = self._RandomImage((1, 180, 240, 3), 255) img1 = constant_op.constant(img1, dtypes.uint8) img2 = constant_op.constant(img2, dtypes.uint8) ssim_uint8 = image_ops.ssim_multiscale( img1, img2, 255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) img1 = image_ops.convert_image_dtype(img1, dtypes.float32) img2 = image_ops.convert_image_dtype(img2, dtypes.float32) ssim_float32 = image_ops.ssim_multiscale( img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) with self.cached_session(use_gpu=True): self.assertAllClose( ssim_uint8.eval(), self.evaluate(ssim_float32), atol=0.001) def testNumpyInput(self): """Test case for GitHub issue 28241.""" image = np.random.random([512, 512, 1]) score_tensor = image_ops.ssim_multiscale(image, image, max_val=1.0) with self.cached_session(use_gpu=True): _ = self.evaluate(score_tensor) class ImageGradientsTest(test_util.TensorFlowTestCase): def testImageGradients(self): shape = [1, 2, 4, 1] img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]]) img = array_ops.reshape(img, shape) expected_dy = np.reshape([[7, 4, 1, 4], [0, 0, 0, 0]], shape) expected_dx = np.reshape([[2, 1, -2, 0], [-1, -2, 1, 0]], shape) dy, dx = image_ops.image_gradients(img) with self.cached_session(): actual_dy = self.evaluate(dy) actual_dx = self.evaluate(dx) self.assertAllClose(expected_dy, actual_dy) self.assertAllClose(expected_dx, actual_dx) def testImageGradientsMultiChannelBatch(self): batch = [[[[1, 2], [2, 5], [3, 3]], [[8, 4], [5, 1], [9, 8]]], [[[5, 3], [7, 9], [1, 6]], [[1, 2], [6, 3], [6, 3]]]] expected_dy = [[[[7, 2], [3, -4], [6, 5]], [[0, 0], [0, 0], [0, 0]]], [[[-4, -1], [-1, -6], [5, -3]], [[0, 0], [0, 0], [0, 0]]]] expected_dx = [[[[1, 3], [1, -2], [0, 0]], [[-3, -3], [4, 7], [0, 0]]], [[[2, 6], [-6, -3], [0, 0]], [[5, 1], [0, 0], [0, 0]]]] batch = constant_op.constant(batch) assert batch.get_shape().as_list() == [2, 2, 3, 2] dy, dx = image_ops.image_gradients(batch) with self.cached_session(use_gpu=True): actual_dy = self.evaluate(dy) actual_dx = self.evaluate(dx) self.assertAllClose(expected_dy, actual_dy) self.assertAllClose(expected_dx, actual_dx) def testImageGradientsBadShape(self): # [2 x 4] image but missing batch and depth dimensions. img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]]) with self.assertRaises(ValueError): image_ops.image_gradients(img) class SobelEdgesTest(test_util.TensorFlowTestCase): def testSobelEdges1x2x3x1(self): img = constant_op.constant([[1, 3, 6], [4, 1, 5]], dtype=dtypes.float32, shape=[1, 2, 3, 1]) expected = np.reshape([[[0, 0], [0, 12], [0, 0]], [[0, 0], [0, 12], [0, 0]]], [1, 2, 3, 1, 2]) sobel = image_ops.sobel_edges(img) with self.cached_session(use_gpu=True): actual_sobel = self.evaluate(sobel) self.assertAllClose(expected, actual_sobel) def testSobelEdges5x3x4x2(self): batch_size = 5 plane = np.reshape([[1, 3, 6, 2], [4, 1, 5, 7], [2, 5, 1, 4]], [1, 3, 4, 1]) two_channel = np.concatenate([plane, plane], axis=3) batch = np.concatenate([two_channel] * batch_size, axis=0) img = constant_op.constant(batch, dtype=dtypes.float32, shape=[batch_size, 3, 4, 2]) expected_plane = np.reshape([[[0, 0], [0, 12], [0, 10], [0, 0]], [[6, 0], [0, 6], [-6, 10], [-6, 0]], [[0, 0], [0, 0], [0, 10], [0, 0]]], [1, 3, 4, 1, 2]) expected_two_channel = np.concatenate( [expected_plane, expected_plane], axis=3) expected_batch = np.concatenate([expected_two_channel] * batch_size, axis=0) sobel = image_ops.sobel_edges(img) with self.cached_session(use_gpu=True): actual_sobel = self.evaluate(sobel) self.assertAllClose(expected_batch, actual_sobel) class DecodeImageTest(test_util.TensorFlowTestCase): def testJpegUint16(self): with self.cached_session(use_gpu=True) as sess: base = "tensorflow/core/lib/jpeg/testdata" jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg")) image0 = image_ops.decode_image(jpeg0, dtype=dtypes.uint16) image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0), dtypes.uint16) image0, image1 = self.evaluate([image0, image1]) self.assertAllEqual(image0, image1) def testPngUint16(self): with self.cached_session(use_gpu=True) as sess: base = "tensorflow/core/lib/png/testdata" png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png")) image0 = image_ops.decode_image(png0, dtype=dtypes.uint16) image1 = image_ops.convert_image_dtype( image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.uint16) image0, image1 = self.evaluate([image0, image1]) self.assertAllEqual(image0, image1) def testGifUint16(self): with self.cached_session(use_gpu=True) as sess: base = "tensorflow/core/lib/gif/testdata" gif0 = io_ops.read_file(os.path.join(base, "scan.gif")) image0 = image_ops.decode_image(gif0, dtype=dtypes.uint16) image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0), dtypes.uint16) image0, image1 = self.evaluate([image0, image1]) self.assertAllEqual(image0, image1) def testBmpUint16(self): with self.cached_session(use_gpu=True) as sess: base = "tensorflow/core/lib/bmp/testdata" bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp")) image0 = image_ops.decode_image(bmp0, dtype=dtypes.uint16) image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0), dtypes.uint16) image0, image1 = self.evaluate([image0, image1]) self.assertAllEqual(image0, image1) def testJpegFloat32(self): with self.cached_session(use_gpu=True) as sess: base = "tensorflow/core/lib/jpeg/testdata" jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg")) image0 = image_ops.decode_image(jpeg0, dtype=dtypes.float32) image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0), dtypes.float32) image0, image1 = self.evaluate([image0, image1]) self.assertAllEqual(image0, image1) def testPngFloat32(self): with self.cached_session(use_gpu=True) as sess: base = "tensorflow/core/lib/png/testdata" png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png")) image0 = image_ops.decode_image(png0, dtype=dtypes.float32) image1 = image_ops.convert_image_dtype( image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.float32) image0, image1 = self.evaluate([image0, image1]) self.assertAllEqual(image0, image1) def testGifFloat32(self): with self.cached_session(use_gpu=True) as sess: base = "tensorflow/core/lib/gif/testdata" gif0 = io_ops.read_file(os.path.join(base, "scan.gif")) image0 = image_ops.decode_image(gif0, dtype=dtypes.float32) image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0), dtypes.float32) image0, image1 = self.evaluate([image0, image1]) self.assertAllEqual(image0, image1) def testBmpFloat32(self): with self.cached_session(use_gpu=True) as sess: base = "tensorflow/core/lib/bmp/testdata" bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp")) image0 = image_ops.decode_image(bmp0, dtype=dtypes.float32) image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0), dtypes.float32) image0, image1 = self.evaluate([image0, image1]) self.assertAllEqual(image0, image1) def testExpandAnimations(self): with self.cached_session(use_gpu=True) as sess: base = "tensorflow/core/lib/gif/testdata" gif0 = io_ops.read_file(os.path.join(base, "scan.gif")) image0 = image_ops.decode_image( gif0, dtype=dtypes.float32, expand_animations=False) # image_ops.decode_png() handles GIFs and returns 3D tensors animation = image_ops.decode_gif(gif0) first_frame = array_ops.gather(animation, 0) image1 = image_ops.convert_image_dtype(first_frame, dtypes.float32) image0, image1 = self.evaluate([image0, image1]) self.assertEqual(len(image0.shape), 3) self.assertAllEqual(list(image0.shape), [40, 20, 3]) self.assertAllEqual(image0, image1) if __name__ == "__main__": googletest.main()
tensorflow-master
tensorflow/python/ops/image_ops_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.ops.histogram_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.framework import constant_op from tensorflow.python.ops import array_ops from tensorflow.python.ops import histogram_ops from tensorflow.python.platform import test class BinValuesFixedWidth(test.TestCase): def test_empty_input_gives_all_zero_counts(self): # Bins will be: # (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) value_range = [0.0, 5.0] values = [] expected_bins = [] with self.cached_session(): bins = histogram_ops.histogram_fixed_width_bins( values, value_range, nbins=5) self.assertEqual(dtypes.int32, bins.dtype) self.assertAllClose(expected_bins, self.evaluate(bins)) def test_1d_values_int32_output(self): # Bins will be: # (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) value_range = [0.0, 5.0] values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] expected_bins = [0, 0, 1, 2, 4, 4] with self.cached_session(): bins = histogram_ops.histogram_fixed_width_bins( values, value_range, nbins=5, dtype=dtypes.int64) self.assertEqual(dtypes.int32, bins.dtype) self.assertAllClose(expected_bins, self.evaluate(bins)) def test_1d_float64_values_int32_output(self): # Bins will be: # (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) value_range = np.float64([0.0, 5.0]) values = np.float64([-1.0, 0.0, 1.5, 2.0, 5.0, 15]) expected_bins = [0, 0, 1, 2, 4, 4] with self.cached_session(): bins = histogram_ops.histogram_fixed_width_bins( values, value_range, nbins=5) self.assertEqual(dtypes.int32, bins.dtype) self.assertAllClose(expected_bins, self.evaluate(bins)) def test_2d_values(self): # Bins will be: # (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) value_range = [0.0, 5.0] values = constant_op.constant( [[-1.0, 0.0, 1.5], [2.0, 5.0, 15]], shape=(2, 3)) expected_bins = [[0, 0, 1], [2, 4, 4]] with self.cached_session(): bins = histogram_ops.histogram_fixed_width_bins( values, value_range, nbins=5) self.assertEqual(dtypes.int32, bins.dtype) self.assertAllClose(expected_bins, self.evaluate(bins)) class HistogramFixedWidthTest(test.TestCase): def setUp(self): self.rng = np.random.RandomState(0) @test_util.run_deprecated_v1 def test_with_invalid_value_range(self): values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] with self.assertRaisesRegexp( ValueError, "Shape must be rank 1 but is rank 0"): histogram_ops.histogram_fixed_width(values, 1.0) with self.assertRaisesRegexp(ValueError, "Dimension must be 2 but is 3"): histogram_ops.histogram_fixed_width(values, [1.0, 2.0, 3.0]) @test_util.run_deprecated_v1 def test_with_invalid_nbins(self): values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] with self.assertRaisesRegexp( ValueError, "Shape must be rank 0 but is rank 1"): histogram_ops.histogram_fixed_width(values, [1.0, 5.0], nbins=[1, 2]) with self.assertRaisesRegexp( ValueError, "Requires nbins > 0"): histogram_ops.histogram_fixed_width(values, [1.0, 5.0], nbins=-5) def test_empty_input_gives_all_zero_counts(self): # Bins will be: # (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) value_range = [0.0, 5.0] values = [] expected_bin_counts = [0, 0, 0, 0, 0] with self.session(use_gpu=True): hist = histogram_ops.histogram_fixed_width(values, value_range, nbins=5) self.assertEqual(dtypes.int32, hist.dtype) self.assertAllClose(expected_bin_counts, self.evaluate(hist)) def test_1d_values_int64_output(self): # Bins will be: # (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) value_range = [0.0, 5.0] values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] expected_bin_counts = [2, 1, 1, 0, 2] with self.session(use_gpu=True): hist = histogram_ops.histogram_fixed_width( values, value_range, nbins=5, dtype=dtypes.int64) self.assertEqual(dtypes.int64, hist.dtype) self.assertAllClose(expected_bin_counts, self.evaluate(hist)) def test_1d_float64_values(self): # Bins will be: # (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) value_range = np.float64([0.0, 5.0]) values = np.float64([-1.0, 0.0, 1.5, 2.0, 5.0, 15]) expected_bin_counts = [2, 1, 1, 0, 2] with self.session(use_gpu=True): hist = histogram_ops.histogram_fixed_width(values, value_range, nbins=5) self.assertEqual(dtypes.int32, hist.dtype) self.assertAllClose(expected_bin_counts, self.evaluate(hist)) def test_2d_values(self): # Bins will be: # (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) value_range = [0.0, 5.0] values = [[-1.0, 0.0, 1.5], [2.0, 5.0, 15]] expected_bin_counts = [2, 1, 1, 0, 2] with self.session(use_gpu=True): hist = histogram_ops.histogram_fixed_width(values, value_range, nbins=5) self.assertEqual(dtypes.int32, hist.dtype) self.assertAllClose(expected_bin_counts, self.evaluate(hist)) @test_util.run_deprecated_v1 def test_shape_inference(self): value_range = [0.0, 5.0] values = [[-1.0, 0.0, 1.5], [2.0, 5.0, 15]] expected_bin_counts = [2, 1, 1, 0, 2] placeholder = array_ops.placeholder(dtypes.int32) with self.session(use_gpu=True): hist = histogram_ops.histogram_fixed_width(values, value_range, nbins=5) self.assertAllEqual(hist.shape.as_list(), (5,)) self.assertEqual(dtypes.int32, hist.dtype) self.assertAllClose(expected_bin_counts, self.evaluate(hist)) hist = histogram_ops.histogram_fixed_width( values, value_range, nbins=placeholder) self.assertEquals(hist.shape.ndims, 1) self.assertIs(hist.shape.dims[0].value, None) self.assertEqual(dtypes.int32, hist.dtype) self.assertAllClose(expected_bin_counts, hist.eval({placeholder: 5})) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/ops/histogram_ops_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmark for split and grad of split.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import random import time from tensorflow.core.protobuf import config_pb2 from tensorflow.python.client import session as session_lib from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import variables from tensorflow.python.platform import test def build_graph(device, input_shape, variable, num_inputs, axis, grad): """Build a graph containing a sequence of concat operations. Args: device: string, the device to run on. input_shape: shape of the input tensors. variable: whether or not to randomize the input shape num_inputs: the number of inputs to concat axis: axis to be concat'ed grad: if True compute the gradient Returns: An array of tensors to run() """ with ops.device("/%s:0" % device): if not variable: inputs = [array_ops.zeros(input_shape) for _ in range(num_inputs)] else: if axis == 1: inputs = [ array_ops.zeros([ input_shape[0], random.randint(max(1, input_shape[1] - 5), input_shape[1] + 5) ]) for _ in range(num_inputs) ] else: inputs = [ array_ops.zeros([ random.randint(max(1, input_shape[0] - 5), input_shape[0] + 5), input_shape[1] ]) for _ in range(num_inputs) ] outputs = [array_ops.concat(inputs, axis) for _ in range(100)] if grad: return control_flow_ops.group(*list( itertools.chain.from_iterable([ gradients_impl.gradients(output, inputs) for output in outputs ]))) else: return control_flow_ops.group(*outputs) class ConcatBenchmark(test.Benchmark): """Benchmark concat.""" def _run_graph(self, device, input_shape, variable, num_inputs, axis, grad, num_iters): """Run the graph and print its execution time. Args: device: string, the device to run on. input_shape: shape of the input tensors. variable: whether or not the input shape should be fixed num_inputs: the number of inputs to concat axis: axis to be concat'ed grad: if True compute the gradient num_iters: number of steps to run. Returns: The duration of the run in seconds. """ graph = ops.Graph() with graph.as_default(): outputs = build_graph(device, input_shape, variable, num_inputs, axis, grad) config = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions( optimizer_options=config_pb2.OptimizerOptions( opt_level=config_pb2.OptimizerOptions.L0))) with session_lib.Session(graph=graph, config=config) as session: variables.global_variables_initializer().run() _ = session.run(outputs) # warm up. start_time = time.time() for _ in range(num_iters): _ = session.run(outputs) duration = time.time() - start_time print("%s shape:%d/%d var: %r #inputs:%d axis:%d grad:%r - %f secs - %f " "GB/sec" % (device, input_shape[0], input_shape[1], variable, num_inputs, axis, grad, duration / num_iters, num_inputs * input_shape[0] * input_shape[1] * 4 * 2 * 100 / (duration / num_iters) / 1e9)) name_template = ( "concat_bench_{device}_input_shape_{shape}_variable_{variable}" "_num_inputs_{num_inputs}_axis_{axis}_grad_{grad}") self.report_benchmark(name=name_template.format( device=device, num_inputs=num_inputs, variable=variable, grad=grad, shape=str(input_shape).replace(" ", ""), axis=str(axis), iters=num_iters)) return duration def benchmark_concat(self): print("Forward vs backward concat") shapes = [[2000, 8], [8, 2000], [100, 18], [1000, 18], [100, 97], [1000, 97], [10000, 1], [1, 10000]] axis_ = [0, 1] num_inputs = 20 num_iters = [10] * len(shapes) variable = [False, True] # fixed input size or not for shape, iters in zip(shapes, num_iters): for axis in axis_: for v in variable: self._run_graph("cpu", shape, v, num_inputs, axis, True, iters) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/ops/concat_benchmark.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Operations for embeddings.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import clip_ops # Imports gradient definitions. from tensorflow.python.ops import data_flow_grad # pylint: disable=unused-import from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.ops import variables from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import tf_export def _clip(params, ids, max_norm): """Helper function for _embedding_lookup_and_transform. This function optionally clips embeddings to an l2-norm of max_norm. Args: params: A `Tensor` of embeddings retrieved by `gather`. ids: The `ids` argument that was passed to `gather`. max_norm: If not `None`, each embedding is clipped if its l2-norm is larger than this value. Returns: A `Tensor` with the same type as `params`. """ def _rank(x): """Helper function to retrieve the rank of a tensor. Args: x: Something convertible to `Tensor`. Returns: Either a pair `(rank, True)` where `rank` is an integer or a pair `(rank, False)` where `rank` is an integer `Tensor`. In either case, `rank` is the rank of `x`. """ rank = ops.convert_to_tensor(x).get_shape().ndims if rank: return rank, True else: return array_ops.rank(x), False if max_norm is None: return params ids_rank, ids_static = _rank(ids) params_rank, params_static = _rank(params) return clip_ops.clip_by_norm( params, max_norm, axes=(list(range(ids_rank, params_rank)) if ids_static and params_static else math_ops.range(ids_rank, params_rank))) def _embedding_lookup_and_transform(params, ids, partition_strategy="mod", name=None, max_norm=None, transform_fn=None): """Helper function for embedding_lookup and _compute_sampled_logits. This function is a generalization of embedding_lookup that optionally applies a caller-specified transformation to each embedding. This is done through the `transform_fn` argument. If provided, the function is applied to each partitioned tensor of retrieved embeddings, colocated with the embeddings. This function will be called with a single `Tensor` argument of the same type as the `params` tensor and should return a `Tensor`. The shape of the argument will be the same as `params` except for the size of the first dimension. The first dimension of the result's shape must be the same size as the argument's. Args: params: See embedding_lookup. ids: See embedding_lookup. partition_strategy: See embedding_lookup. name: See embedding_lookup. max_norm: See embedding_lookup. transform_fn: An optional function to apply to each retrieved embedding. If max_norm is provided, transform_fn is applied to the norm-limited embeddings. Returns: See embedding_lookup for details. Raises: ValueError: If `params` is empty. """ if params is None or params in ((), []): raise ValueError("Need at least one param") if isinstance(params, variables.PartitionedVariable): params = list(params) # Iterate to get the underlying Variables. if not isinstance(params, list): params = [params] with ops.name_scope(name, "embedding_lookup", params + [ids]) as name: np = len(params) # Number of partitions # Preserve the resource variable status to avoid accidental dense reads. if not any( isinstance(p, resource_variable_ops.ResourceVariable) for p in params): params = ops.convert_n_to_tensor_or_indexed_slices(params, name="params") ids = ops.convert_to_tensor(ids, name="ids") if np == 1 and (not transform_fn or ids.get_shape().ndims == 1): with ops.colocate_with(params[0]): result = _clip( array_ops.gather(params[0], ids, name=name), ids, max_norm) if transform_fn: result = transform_fn(result) # Make sure the final result does not have colocation contraints on the # params. Similar to the case np > 1 where parallel_dynamic_stitch is # outside the scioe of all with ops.colocate_with(params[p]). return array_ops.identity(result) else: # Flatten the ids. There are two cases where we need to do this. # - There is more than one params tensor. # - There is a transform_fn and ids is not statically known to be 1-D. # We must flatten in this case because transform_fn expects a flat # tensor of embeddings. flat_ids = array_ops.reshape(ids, [-1]) original_indices = math_ops.range(array_ops.size(flat_ids)) # Create p_assignments and set new_ids depending on the strategy. if partition_strategy == "mod": p_assignments = flat_ids % np new_ids = flat_ids // np elif partition_strategy == "div": # Compute num_total_ids as the sum of dim-0 of params, then assign to # partitions based on a constant number of ids per partition. Optimize # if we already know the full shape statically. dim_0_size = tensor_shape.Dimension( tensor_shape.dimension_value(params[0].get_shape()[0])) for p in xrange(1, np): dim_0_size += tensor_shape.Dimension( tensor_shape.dimension_value(params[p].get_shape()[0])) if dim_0_size.value: num_total_ids = constant_op.constant(dim_0_size.value, flat_ids.dtype) else: dim_0_sizes = [] for p in xrange(np): param_p_dim = tensor_shape.dimension_value(params[p].get_shape()[0]) if param_p_dim is not None: dim_0_sizes.append(param_p_dim) else: with ops.colocate_with(params[p]): dim_0_sizes.append(array_ops.shape(params[p])[0]) num_total_ids = math_ops.reduce_sum( math_ops.cast(array_ops.stack(dim_0_sizes), flat_ids.dtype)) ids_per_partition = num_total_ids // np extras = num_total_ids % np p_assignments = math_ops.maximum(flat_ids // (ids_per_partition + 1), (flat_ids - extras) // ids_per_partition) # Emulate a conditional using a boolean indicator tensor new_ids = array_ops.where(p_assignments < extras, flat_ids % (ids_per_partition + 1), (flat_ids - extras) % ids_per_partition) else: raise ValueError("Unrecognized partition strategy: " + partition_strategy) # Cast partition assignments to int32 for use in dynamic_partition. # There really should not be more than 2^32 partitions. p_assignments = math_ops.cast(p_assignments, dtypes.int32) # Partition list of ids based on assignments into np separate lists gather_ids = data_flow_ops.dynamic_partition(new_ids, p_assignments, np) # Similarly, partition the original indices. pindices = data_flow_ops.dynamic_partition(original_indices, p_assignments, np) # Do np separate lookups, finding embeddings for plist[p] in params[p] partitioned_result = [] for p in xrange(np): pids = gather_ids[p] with ops.colocate_with(params[p]): result = array_ops.gather(params[p], pids) if transform_fn: # If transform_fn is provided, the clip_by_norm precedes # the transform and hence must be co-located. See below # for the counterpart if transform_fn is not proveded. result = transform_fn(_clip(result, pids, max_norm)) partitioned_result.append(result) # Stitch these back together ret = data_flow_ops.parallel_dynamic_stitch( pindices, partitioned_result, name=name) # Determine the static element shape. if transform_fn is None: element_shape_s = params[0].get_shape()[1:] for p in params[1:]: element_shape_s = element_shape_s.merge_with(p.get_shape()[1:]) else: element_shape_s = ret.get_shape()[1:] # Compute the dynamic element shape. if element_shape_s.is_fully_defined(): element_shape_d = element_shape_s elif transform_fn is None: # It's important that we compute params[0].shape on the right device # to avoid data motion. with ops.colocate_with(params[0]): params_shape = array_ops.shape(params[0]) element_shape_d = params_shape[1:] else: element_shape_d = array_ops.shape(ret)[1:] # Reshape to reverse the flattening of ids. ret = array_ops.reshape( ret, array_ops.concat([array_ops.shape(ids), element_shape_d], 0)) # Normally the reshape is sufficient, but setting shape explicitly # teaches shape inference that params[1:].get_shape() matters # (in the case that transform_fn is None). ret.set_shape(ids.get_shape().concatenate(element_shape_s)) if not transform_fn: # If transform_fn was provided, the clip_by_norm was done above. ret = _clip(ret, ids, max_norm) return ret @tf_export(v1=["nn.embedding_lookup"]) def embedding_lookup( params, ids, partition_strategy="mod", name=None, validate_indices=True, # pylint: disable=unused-argument max_norm=None): """Looks up `ids` in a list of embedding tensors. This function is used to perform parallel lookups on the list of tensors in `params`. It is a generalization of `tf.gather`, where `params` is interpreted as a partitioning of a large embedding tensor. `params` may be a `PartitionedVariable` as returned by using `tf.compat.v1.get_variable()` with a partitioner. If `len(params) > 1`, each element `id` of `ids` is partitioned between the elements of `params` according to the `partition_strategy`. In all strategies, if the id space does not evenly divide the number of partitions, each of the first `(max_id + 1) % len(params)` partitions will be assigned one more id. If `partition_strategy` is `"mod"`, we assign each id to partition `p = id % len(params)`. For instance, 13 ids are split across 5 partitions as: `[[0, 5, 10], [1, 6, 11], [2, 7, 12], [3, 8], [4, 9]]` If `partition_strategy` is `"div"`, we assign ids to partitions in a contiguous manner. In this case, 13 ids are split across 5 partitions as: `[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]` The results of the lookup are concatenated into a dense tensor. The returned tensor has shape `shape(ids) + shape(params)[1:]`. Args: params: A single tensor representing the complete embedding tensor, or a list of P tensors all of same shape except for the first dimension, representing sharded embedding tensors. Alternatively, a `PartitionedVariable`, created by partitioning along dimension 0. Each element must be appropriately sized for the given `partition_strategy`. ids: A `Tensor` with type `int32` or `int64` containing the ids to be looked up in `params`. partition_strategy: A string specifying the partitioning strategy, relevant if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default is `"mod"`. name: A name for the operation (optional). validate_indices: DEPRECATED. If this operation is assigned to CPU, values in `indices` are always validated to be within range. If assigned to GPU, out-of-bound indices result in safe but unspecified behavior, which may include raising an error. max_norm: If not `None`, each embedding is clipped if its l2-norm is larger than this value. Returns: A `Tensor` with the same type as the tensors in `params`. Raises: ValueError: If `params` is empty. """ return _embedding_lookup_and_transform( params=params, ids=ids, partition_strategy=partition_strategy, name=name, max_norm=max_norm, transform_fn=None) @tf_export("nn.embedding_lookup", v1=[]) def embedding_lookup_v2(params, ids, max_norm=None, name=None): """Looks up `ids` in a list of embedding tensors. This function is used to perform parallel lookups on the list of tensors in `params`. It is a generalization of `tf.gather`, where `params` is interpreted as a partitioning of a large embedding tensor. `params` may be a `PartitionedVariable` as returned by using `tf.compat.v1.get_variable()` with a partitioner. If `len(params) > 1`, each element `id` of `ids` is partitioned between the elements of `params` according to the `partition_strategy`. In all strategies, if the id space does not evenly divide the number of partitions, each of the first `(max_id + 1) % len(params)` partitions will be assigned one more id. The `partition_strategy` is always `"div"` currently. This means that we assign ids to partitions in a contiguous manner. For instance, 13 ids are split across 5 partitions as: `[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]` The results of the lookup are concatenated into a dense tensor. The returned tensor has shape `shape(ids) + shape(params)[1:]`. Args: params: A single tensor representing the complete embedding tensor, or a list of P tensors all of same shape except for the first dimension, representing sharded embedding tensors. Alternatively, a `PartitionedVariable`, created by partitioning along dimension 0. Each element must be appropriately sized for the 'div' `partition_strategy`. ids: A `Tensor` with type `int32` or `int64` containing the ids to be looked up in `params`. max_norm: If not `None`, each embedding is clipped if its l2-norm is larger than this value. name: A name for the operation (optional). Returns: A `Tensor` with the same type as the tensors in `params`. Raises: ValueError: If `params` is empty. """ return embedding_lookup(params, ids, "div", name, max_norm=max_norm) @tf_export(v1=["nn.embedding_lookup_sparse"]) def embedding_lookup_sparse(params, sp_ids, sp_weights, partition_strategy="mod", name=None, combiner=None, max_norm=None): """Computes embeddings for the given ids and weights. This op assumes that there is at least one id for each row in the dense tensor represented by sp_ids (i.e. there are no rows with empty features), and that all the indices of sp_ids are in canonical row-major order. It also assumes that all id values lie in the range [0, p0), where p0 is the sum of the size of params along dimension 0. Args: params: A single tensor representing the complete embedding tensor, or a list of P tensors all of same shape except for the first dimension, representing sharded embedding tensors. Alternatively, a `PartitionedVariable`, created by partitioning along dimension 0. Each element must be appropriately sized for the given `partition_strategy`. sp_ids: N x M `SparseTensor` of int64 ids where N is typically batch size and M is arbitrary. sp_weights: either a `SparseTensor` of float / double weights, or `None` to indicate all weights should be taken to be 1. If specified, `sp_weights` must have exactly the same shape and indices as `sp_ids`. partition_strategy: A string specifying the partitioning strategy, relevant if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default is `"mod"`. See `tf.nn.embedding_lookup` for more details. name: Optional name for the op. combiner: A string specifying the reduction op. Currently "mean", "sqrtn" and "sum" are supported. "sum" computes the weighted sum of the embedding results for each row. "mean" is the weighted sum divided by the total weight. "sqrtn" is the weighted sum divided by the square root of the sum of the squares of the weights. max_norm: If not `None`, each embedding is clipped if its l2-norm is larger than this value, before combining. Returns: A dense tensor representing the combined embeddings for the sparse ids. For each row in the dense tensor represented by `sp_ids`, the op looks up the embeddings for all ids in that row, multiplies them by the corresponding weight, and combines these embeddings as specified. In other words, if `shape(combined params) = [p0, p1, ..., pm]` and `shape(sp_ids) = shape(sp_weights) = [d0, d1, ..., dn]` then `shape(output) = [d0, d1, ..., dn-1, p1, ..., pm]`. For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are ```python [0, 0]: id 1, weight 2.0 [0, 1]: id 3, weight 0.5 [1, 0]: id 0, weight 1.0 [2, 3]: id 1, weight 3.0 ``` with `combiner`="mean", then the output will be a 3x20 matrix where ```python output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5) output[1, :] = (params[0, :] * 1.0) / 1.0 output[2, :] = (params[1, :] * 3.0) / 3.0 ``` Raises: TypeError: If `sp_ids` is not a `SparseTensor`, or if `sp_weights` is neither `None` nor `SparseTensor`. ValueError: If `combiner` is not one of {"mean", "sqrtn", "sum"}. """ if combiner is None: logging.warn("The default value of combiner will change from \"mean\" " "to \"sqrtn\" after 2016/11/01.") combiner = "mean" if combiner not in ("mean", "sqrtn", "sum"): raise ValueError("combiner must be one of 'mean', 'sqrtn' or 'sum'") if isinstance(params, variables.PartitionedVariable): params = list(params) # Iterate to get the underlying Variables. if not isinstance(params, list): params = [params] if not isinstance(sp_ids, sparse_tensor.SparseTensor): raise TypeError("sp_ids must be SparseTensor") ignore_weights = sp_weights is None if not ignore_weights: if not isinstance(sp_weights, sparse_tensor.SparseTensor): raise TypeError("sp_weights must be either None or SparseTensor") sp_ids.values.get_shape().assert_is_compatible_with( sp_weights.values.get_shape()) sp_ids.indices.get_shape().assert_is_compatible_with( sp_weights.indices.get_shape()) sp_ids.dense_shape.get_shape().assert_is_compatible_with( sp_weights.dense_shape.get_shape()) # TODO(yleon): Add enhanced node assertions to verify that sp_ids and # sp_weights have equal indices and shapes. with ops.name_scope(name, "embedding_lookup_sparse", params + [sp_ids]) as name: segment_ids = sp_ids.indices[:, 0] if segment_ids.dtype != dtypes.int32: segment_ids = math_ops.cast(segment_ids, dtypes.int32) ids = sp_ids.values ids, idx = array_ops.unique(ids) embeddings = embedding_lookup( params, ids, partition_strategy=partition_strategy, max_norm=max_norm) if embeddings.dtype in (dtypes.float16, dtypes.bfloat16): embeddings = math_ops.cast(embeddings, dtypes.float32) if not ignore_weights: weights = sp_weights.values if weights.dtype != embeddings.dtype: weights = math_ops.cast(weights, embeddings.dtype) embeddings = array_ops.gather(embeddings, idx) # Reshape weights to allow broadcast ones = array_ops.fill( array_ops.expand_dims(array_ops.rank(embeddings) - 1, 0), 1) bcast_weights_shape = array_ops.concat([array_ops.shape(weights), ones], 0) orig_weights_shape = weights.get_shape() weights = array_ops.reshape(weights, bcast_weights_shape) # Set the weight shape, since after reshaping to bcast_weights_shape, # the shape becomes None. if embeddings.get_shape().ndims is not None: weights.set_shape( orig_weights_shape.concatenate( [1 for _ in range(embeddings.get_shape().ndims - 1)])) embeddings *= weights if combiner == "sum": embeddings = math_ops.segment_sum(embeddings, segment_ids, name=name) elif combiner == "mean": embeddings = math_ops.segment_sum(embeddings, segment_ids) weight_sum = math_ops.segment_sum(weights, segment_ids) embeddings = math_ops.div(embeddings, weight_sum, name=name) elif combiner == "sqrtn": embeddings = math_ops.segment_sum(embeddings, segment_ids) weights_squared = math_ops.pow(weights, 2) weight_sum = math_ops.segment_sum(weights_squared, segment_ids) weight_sum_sqrt = math_ops.sqrt(weight_sum) embeddings = math_ops.div(embeddings, weight_sum_sqrt, name=name) else: assert False, "Unrecognized combiner" else: assert idx is not None if combiner == "sum": embeddings = math_ops.sparse_segment_sum( embeddings, idx, segment_ids, name=name) elif combiner == "mean": embeddings = math_ops.sparse_segment_mean( embeddings, idx, segment_ids, name=name) elif combiner == "sqrtn": embeddings = math_ops.sparse_segment_sqrt_n( embeddings, idx, segment_ids, name=name) else: assert False, "Unrecognized combiner" return embeddings @tf_export("nn.embedding_lookup_sparse", v1=[]) def embedding_lookup_sparse_v2(params, sp_ids, sp_weights, combiner=None, max_norm=None, name=None): """Computes embeddings for the given ids and weights. This op assumes that there is at least one id for each row in the dense tensor represented by sp_ids (i.e. there are no rows with empty features), and that all the indices of sp_ids are in canonical row-major order. It also assumes that all id values lie in the range [0, p0), where p0 is the sum of the size of params along dimension 0. Args: params: A single tensor representing the complete embedding tensor, or a list of P tensors all of same shape except for the first dimension, representing sharded embedding tensors. Alternatively, a `PartitionedVariable`, created by partitioning along dimension 0. Each element must be appropriately sized for ``"div"`` `partition_strategy`. sp_ids: N x M `SparseTensor` of int64 ids where N is typically batch size and M is arbitrary. sp_weights: either a `SparseTensor` of float / double weights, or `None` to indicate all weights should be taken to be 1. If specified, `sp_weights` must have exactly the same shape and indices as `sp_ids`. combiner: A string specifying the reduction op. Currently "mean", "sqrtn" and "sum" are supported. "sum" computes the weighted sum of the embedding results for each row. "mean" is the weighted sum divided by the total weight. "sqrtn" is the weighted sum divided by the square root of the sum of the squares of the weights. max_norm: If not `None`, each embedding is clipped if its l2-norm is larger than this value, before combining. name: Optional name for the op. Returns: A dense tensor representing the combined embeddings for the sparse ids. For each row in the dense tensor represented by `sp_ids`, the op looks up the embeddings for all ids in that row, multiplies them by the corresponding weight, and combines these embeddings as specified. In other words, if `shape(combined params) = [p0, p1, ..., pm]` and `shape(sp_ids) = shape(sp_weights) = [d0, d1, ..., dn]` then `shape(output) = [d0, d1, ..., dn-1, p1, ..., pm]`. For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are ```python [0, 0]: id 1, weight 2.0 [0, 1]: id 3, weight 0.5 [1, 0]: id 0, weight 1.0 [2, 3]: id 1, weight 3.0 ``` with `combiner`="mean", then the output will be a 3x20 matrix where ```python output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5) output[1, :] = (params[0, :] * 1.0) / 1.0 output[2, :] = (params[1, :] * 3.0) / 3.0 ``` Raises: TypeError: If `sp_ids` is not a `SparseTensor`, or if `sp_weights` is neither `None` nor `SparseTensor`. ValueError: If `combiner` is not one of {"mean", "sqrtn", "sum"}. """ return embedding_lookup_sparse(params, sp_ids, sp_weights, "div", name, combiner, max_norm) @tf_export("nn.safe_embedding_lookup_sparse", v1=[]) def safe_embedding_lookup_sparse_v2(embedding_weights, sparse_ids, sparse_weights=None, combiner="mean", default_id=None, max_norm=None, name=None): """Lookup embedding results, accounting for invalid IDs and empty features. The partitioned embedding in `embedding_weights` must all be the same shape except for the first dimension. The first dimension is allowed to vary as the vocabulary size is not necessarily a multiple of `P`. `embedding_weights` may be a `PartitionedVariable` as returned by using `tf.compat.v1.get_variable()` with a partitioner. Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs with non-positive weight. For an entry with no features, the embedding vector for `default_id` is returned, or the 0-vector if `default_id` is not supplied. The ids and weights may be multi-dimensional. Embeddings are always aggregated along the last dimension. Note: when doing embedding lookup on `embedding_weights`, "div" partition strategy will be used. Support for other partition strategy will be added later. Args: embedding_weights: A list of `P` float `Tensor`s or values representing partitioned embedding `Tensor`s. Alternatively, a `PartitionedVariable` created by partitioning along dimension 0. The total unpartitioned shape should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the vocab size and `e_1, ..., e_m` are the embedding dimensions. sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the ids. `d_0` is typically batch size. sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing float weights corresponding to `sparse_ids`, or `None` if all weights are be assumed to be 1.0. combiner: A string specifying how to combine embedding results for each entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the default. default_id: The id to use for an entry with no features. max_norm: If not `None`, all embeddings are l2-normalized to max_norm before combining. name: A name for this operation (optional). Returns: Dense `Tensor` of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`. Raises: ValueError: if `embedding_weights` is empty. """ return safe_embedding_lookup_sparse( embedding_weights, sparse_ids, sparse_weights=sparse_weights, combiner=combiner, default_id=default_id, name=name, partition_strategy="div", max_norm=max_norm) @tf_export(v1=["nn.safe_embedding_lookup_sparse"]) def safe_embedding_lookup_sparse(embedding_weights, sparse_ids, sparse_weights=None, combiner="mean", default_id=None, name=None, partition_strategy="div", max_norm=None): """Lookup embedding results, accounting for invalid IDs and empty features. The partitioned embedding in `embedding_weights` must all be the same shape except for the first dimension. The first dimension is allowed to vary as the vocabulary size is not necessarily a multiple of `P`. `embedding_weights` may be a `PartitionedVariable` as returned by using `tf.compat.v1.get_variable()` with a partitioner. Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs with non-positive weight. For an entry with no features, the embedding vector for `default_id` is returned, or the 0-vector if `default_id` is not supplied. The ids and weights may be multi-dimensional. Embeddings are always aggregated along the last dimension. Args: embedding_weights: A list of `P` float `Tensor`s or values representing partitioned embedding `Tensor`s. Alternatively, a `PartitionedVariable` created by partitioning along dimension 0. The total unpartitioned shape should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the vocab size and `e_1, ..., e_m` are the embedding dimensions. sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the ids. `d_0` is typically batch size. sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing float weights corresponding to `sparse_ids`, or `None` if all weights are be assumed to be 1.0. combiner: A string specifying how to combine embedding results for each entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the default. default_id: The id to use for an entry with no features. name: A name for this operation (optional). partition_strategy: A string specifying the partitioning strategy. Currently `"div"` and `"mod"` are supported. Default is `"div"`. max_norm: If not `None`, all embeddings are l2-normalized to max_norm before combining. Returns: Dense `Tensor` of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`. Raises: ValueError: if `embedding_weights` is empty. """ if embedding_weights is None: raise ValueError("Missing embedding_weights %s." % embedding_weights) if isinstance(embedding_weights, variables.PartitionedVariable): embedding_weights = list(embedding_weights) # get underlying Variables. if not isinstance(embedding_weights, list): embedding_weights = [embedding_weights] if len(embedding_weights) < 1: raise ValueError("Missing embedding_weights %s." % embedding_weights) dtype = sparse_weights.dtype if sparse_weights is not None else None embedding_weights = [ w if (isinstance(w, resource_variable_ops.ResourceVariable) and dtype in (None, w.dtype)) else ops.convert_to_tensor(w, dtype=dtype) for w in embedding_weights ] with ops.name_scope(name, "embedding_lookup", embedding_weights + [sparse_ids, sparse_weights]) as scope: # Reshape higher-rank sparse ids and weights to linear segment ids. original_shape = sparse_ids.dense_shape original_rank_dim = tensor_shape.dimension_value( sparse_ids.dense_shape.get_shape()[0]) original_rank = ( array_ops.size(original_shape) if original_rank_dim is None else original_rank_dim) sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [ math_ops.reduce_prod( array_ops.slice(original_shape, [0], [original_rank - 1])), array_ops.gather(original_shape, original_rank - 1) ]) if sparse_weights is not None: sparse_weights = sparse_tensor.SparseTensor(sparse_ids.indices, sparse_weights.values, sparse_ids.dense_shape) # Prune invalid ids and weights. sparse_ids, sparse_weights = _prune_invalid_ids(sparse_ids, sparse_weights) if combiner != "sum": sparse_ids, sparse_weights = _prune_invalid_weights( sparse_ids, sparse_weights) # Fill in dummy values for empty features, if necessary. sparse_ids, is_row_empty = sparse_ops.sparse_fill_empty_rows( sparse_ids, default_id or 0) if sparse_weights is not None: sparse_weights, _ = sparse_ops.sparse_fill_empty_rows(sparse_weights, 1.0) result = embedding_lookup_sparse( embedding_weights, sparse_ids, sparse_weights, combiner=combiner, partition_strategy=partition_strategy, name=None if default_id is None else scope, max_norm=max_norm) if default_id is None: # Broadcast is_row_empty to the same shape as embedding_lookup_result, # for use in Select. is_row_empty = array_ops.tile( array_ops.reshape(is_row_empty, [-1, 1]), array_ops.stack([1, array_ops.shape(result)[1]])) result = array_ops.where( is_row_empty, array_ops.zeros_like(result), result, name=scope) # Reshape back from linear ids back into higher-dimensional dense result. final_result = array_ops.reshape( result, array_ops.concat([ array_ops.slice( math_ops.cast(original_shape, dtypes.int32), [0], [original_rank - 1]), array_ops.slice(array_ops.shape(result), [1], [-1]) ], 0)) final_result.set_shape( tensor_shape.unknown_shape( (tensor_shape.Dimension(original_rank_dim) - 1).value).concatenate( result.get_shape()[1:])) return final_result def _prune_invalid_ids(sparse_ids, sparse_weights): """Prune invalid IDs (< 0) from the input ids and weights.""" is_id_valid = math_ops.greater_equal(sparse_ids.values, 0) if sparse_weights is not None: is_id_valid = math_ops.logical_and( is_id_valid, array_ops.ones_like(sparse_weights.values, dtype=dtypes.bool)) sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid) if sparse_weights is not None: sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid) return sparse_ids, sparse_weights def _prune_invalid_weights(sparse_ids, sparse_weights): """Prune invalid weights (< 0) from the input ids and weights.""" if sparse_weights is not None: is_weights_valid = math_ops.greater(sparse_weights.values, 0) sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_weights_valid) sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_weights_valid) return sparse_ids, sparse_weights
tensorflow-master
tensorflow/python/ops/embedding_ops.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for stateful_random_ops.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import re from absl.testing import parameterized import numpy as np from tensorflow.python.distribute import values as dist_values from tensorflow.python.distribute.mirrored_strategy import MirroredStrategy from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.kernel_tests.random import util as \ random_test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_random_ops from tensorflow.python.ops import gen_stateful_random_ops from tensorflow.python.ops import logging_ops from tensorflow.python.ops import stateful_random_ops as \ random from tensorflow.python.ops import variables from tensorflow.python.platform import test g_seeded = None g_unseeded = None GPU_FLOATS = [dtypes.float16, dtypes.float32, dtypes.float64] CPU_FLOATS = GPU_FLOATS + [dtypes.bfloat16] FLOATS = GPU_FLOATS INTS = [dtypes.int32, dtypes.int64] class StatefulRandomOpsTest(test.TestCase, parameterized.TestCase): def testCreateRNGStateIntSeed(self): """Tests `create_rng_state` when `seed` is int.""" # using leading 'F' to test overflow tolerance state = random.create_rng_state(0xFFFF222233334444FFAA666677778888, random.RNG_ALG_PHILOX) self.assertAllEqual( list(map(random._uint_to_int, [0xFFAA666677778888, 0xFFFF222233334444] + [0] * (random.PHILOX_STATE_SIZE - 2))), state) def assertAllDifferent(self, tensors): """Checks that there are no duplicate elements anywhere among the tensors. Args: tensors: a list of tensors. They can have different shapes. """ tensors = [array_ops.reshape(t, shape=[-1]) for t in tensors] ls = array_ops.concat(tensors, axis=0).numpy().tolist() self.assertAllEqual(len(ls), len(set(ls))) @test_util.run_v2_only def testNonDeterministicInts(self): """Tests that non_deterministic_ints returns different results every time. This test is flaky, but with very low probability of failing. """ shape = [2, 3] dtype = dtypes.int64 a = random.non_deterministic_ints(shape=shape, dtype=dtype) self.assertAllEqual(shape, a.shape) self.assertEqual(dtype, a.dtype) b = random.non_deterministic_ints(shape, dtype=dtype) self.assertAllDifferent([a, b]) @test_util.run_v2_only def testBatchSeeds(self): """Test for batch seeds. """ shape = [2, 3] count = 6 gen = random.Generator.from_seed(1234) keys1 = gen._make_int64_keys(shape=shape) keys2 = gen._make_int64_keys(shape=shape) self.assertAllDifferent([keys1, keys2]) seeds1 = gen.make_seeds(count=count) seeds2 = gen.make_seeds(count=count) self.assertAllDifferent([seeds1[0, :], seeds2[0, :]]) gens = gen.split(count=count) self.assertAllEqual(count, len(gens)) randoms = [g.uniform_full_int(shape=shape, dtype=dtypes.int32) for g in gens] self.assertAllDifferent(randoms) # Tests graph mode. @def_function.function def f(): return gen.make_seeds(count=count) for _ in range(3): f() def assertRegex(self, pattern, text): self.assertTrue( re.search(pattern, text), "Can't find pattern '%s' in text '%s'" % (pattern, text)) @test_util.run_v2_only @test_util.run_cuda_only def testCrossDeviceSplit(self): """Tests that a CPU RNG can split into RNGs on GPU. """ with ops.device("/device:CPU:0"): gen = random.Generator.from_seed(1234) # gen is on CPU self.assertRegex("CPU", gen.state.device) with ops.device(test_util.gpu_device_name()): gens = gen.split(count=10) # gens are on GPU self.assertRegex("GPU", gens[0].state.device) @test_util.run_v2_only def testReset(self): shape = [2, 3] gen = random.Generator.from_seed(0) for resetter in [ lambda g: g.reset(state=[1, 2, 3]), lambda g: g.reset_from_seed(1234), lambda g: g.reset_from_key_counter(key=1, counter=[2, 3]), ]: resetter(gen) expected_normal = gen.normal(shape) @def_function.function def f(resetter): resetter(gen) return gen.normal(shape) def check_results(expected_normal, v): self.assertAllEqual(expected_normal, v) check_results(expected_normal, f(resetter)) check_results(expected_normal, f(resetter)) @test_util.run_v2_only def testGeneratorCreation(self): """Tests generator creation, in both eager and tf.function. The interaction between Generator creation and defun should be the same as tf.Variable. """ shape = [2, 3] alg = random.RNG_ALG_PHILOX for constructor in [ lambda: random.Generator(state=[1, 2, 3], alg=alg), lambda: random.Generator.from_seed(1234), lambda: random.Generator.from_key_counter( # pylint: disable=g-long-lambda key=1, counter=[2, 3], alg=alg), ]: gen = constructor() # Tests tf.function expected_normal1 = gen.normal(shape) expected_normal2 = gen.normal(shape) global g_seeded g_seeded = None @def_function.function def f(constructor): global g_seeded # defun'ed function should only create variables once if g_seeded is None: g_seeded = constructor() return g_seeded.normal(shape) def check_results(expected_normal, v): self.assertAllEqual(expected_normal, v) check_results(expected_normal1, f(constructor)) check_results(expected_normal2, f(constructor)) @test_util.run_v2_only def testGeneratorCreationUnseeded(self): """Tests generator creation, the unseeded case.""" shape = [2, 3] global g_unseeded g_unseeded = None @def_function.function def f(): global g_unseeded # defun'ed function should only create variables once if g_unseeded is None: g_unseeded = random.Generator.from_non_deterministic_state() return g_unseeded.normal(shape) self.assertAllEqual(shape, f().shape) @test_util.run_v2_only def testGeneratorCopy(self): """Tests copying a generator.""" g = random.Generator.from_seed(0) g_copy = random.Generator(g) self.assertAllEqual(g.algorithm, g_copy.algorithm) self.assertAllEqual(g.state.read_value(), g_copy.state.read_value()) # Tests tf.function global g_seeded g_seeded = None # Do the same in tf.function @def_function.function def f(): global g_seeded # defun'ed function should only create variables once if g_seeded is None: g_seeded = random.Generator(g) self.assertAllEqual(g.algorithm, g_seeded.algorithm) self.assertAllEqual(g.state.read_value(), g_seeded.state.read_value()) f() @test_util.run_v1_only( ("This test is specifically for checking TF1 compatibility. " "It cannot run under TF2.")) def testTF1(self): seed = 1234 shape = [2, 3] expected_normal1 = constant_op.constant( [[0.9356609, 1.0854305, -0.93788373], [-0.50615472, 1.31697023, 0.71375787]], dtype=dtypes.float32) expected_normal2 = constant_op.constant( [[-0.3964749, 0.8369565, -0.30946946], [1.1206646, 1.00852597, -0.10185789]], dtype=dtypes.float32) with self.cached_session() as sess: gen1 = random.Generator.from_seed(seed) gen2 = random.Generator.from_non_deterministic_state() sess.run((gen1._state_var.initializer, gen2._state_var.initializer)) r1 = gen1.normal(shape, dtype=dtypes.float32) r2 = gen2.normal(shape, dtype=dtypes.float32) def f(): return sess.run((r1, r2)) def check_results(expected_normal, v1, v2): self.assertAllClose(expected_normal, v1, rtol=1e-5, atol=1e-5) self.assertAllEqual(shape, v2.shape) check_results(expected_normal1, *f()) check_results(expected_normal2, *f()) @test_util.run_v2_only @test_util.also_run_as_tf_function def testEagerAndDefun(self): """A simple test to make sure the op works in eager and defunned mode.""" random.get_global_generator().normal((3,)) @test_util.run_v2_only def testOpSeedSelectionAfterSetSeed(self): """Tests that op-seed selection is reset after reseting global generator. Fixing GitHub issue 9171: https://github.com/tensorflow/tensorflow/issues/9171 """ shape = (3,) random.get_global_generator().reset_from_seed(1) a = random.get_global_generator().normal(shape) random.get_global_generator().reset_from_seed(1) b = random.get_global_generator().normal(shape) self.assertAllEqual(a, b) # Now do the above again using accelerated ('defun'ed) computation @def_function.function def f(): return random.get_global_generator().normal(shape) random.get_global_generator().reset_from_seed(1) c = f() random.get_global_generator().reset_from_seed(1) d = f() self.assertAllEqual(c, d) self.assertAllEqual(a, c) @test_util.run_v2_only def testOpSeedSelectionNotSensitive(self): """Test that op-seed selection is not sensitive to trivial changes. Test that op-seed selection is not sensitive to trivial computation (i.e. graph) changes. Fixing b/32087099 """ def f(include_print): shape = constant_op.constant([5]) if include_print: shape = logging_ops.Print(shape, [shape]) return random.get_global_generator().normal(shape) def compare(fst_includes_print, snd_includes_print): random.get_global_generator().reset_from_seed(50) fst = f(fst_includes_print) random.get_global_generator().reset_from_seed(50) snd = f(snd_includes_print) self.assertAllEqual(fst, snd) # Now do the above again using accelerated (defunned) 'f'. # Running 'f' with two different Boolean arguments should cause # two different graphs to be generated, hence demonstrating the # insensitivity to graph changes. f_acc = def_function.function(f) random.get_global_generator().reset_from_seed(50) fst = f_acc(fst_includes_print) random.get_global_generator().reset_from_seed(50) snd = f_acc(snd_includes_print) self.assertAllEqual(fst, snd) compare(False, False) compare(True, True) compare(True, False) @test_util.run_v2_only def testKey(self): key = 1234 gen = random.Generator(state=[0, 0, key], alg=random.RNG_ALG_PHILOX) got = gen.key self.assertAllEqual(key, got) @def_function.function def f(): return gen.key got = f() self.assertAllEqual(key, got) @test_util.run_v2_only def testSkip(self): key = 1234 counter = 5678 gen = random.Generator(state=[counter, 0, key], alg=random.RNG_ALG_PHILOX) delta = 432 gen.skip(delta) new_counter = gen._state_var[0] self.assertAllEqual(counter + delta * 256, new_counter) def _sameAsOldRandomOps(self, device, floats): def compare(dtype, old, new): seed1, seed2 = 79, 25 # note how the two seeds for the old op correspond to the seed for the new # op with ops.device(device): gen = random.Generator(state=[0, seed2, seed1], alg=random.RNG_ALG_PHILOX) # create a graph for the old op in order to call it many times @def_function.function def run_old(): with ops.device(device): return old(dtype, seed1, seed2) def run_new(): with ops.device(device): return new(dtype, gen) for _ in range(100): self.assertAllEqual(run_old(), run_new()) shape = constant_op.constant([4, 7]) minval = 128 maxval = 256 # passing `dtype` around to compress go/gpylint-faq#cell-var-from-loop and # go/gpylint-faq#undefined-loop-variable def old_normal(dtype, seed1, seed2): return gen_random_ops.random_standard_normal( shape, dtype=dtype, seed=seed1, seed2=seed2) def new_normal(dtype, gen): return gen._standard_normal(shape, dtype=dtype) def old_truncated_normal(dtype, seed1, seed2): return gen_random_ops.truncated_normal( shape, dtype=dtype, seed=seed1, seed2=seed2) def new_truncated_normal(dtype, gen): return gen._truncated_normal(shape, dtype=dtype) def old_uniform_int(dtype, seed1, seed2): minval2 = constant_op.constant(minval, dtype=dtype) maxval2 = constant_op.constant(maxval, dtype=dtype) return gen_random_ops.random_uniform_int( shape, minval=minval2, maxval=maxval2, seed=seed1, seed2=seed2) def new_uniform_int(dtype, gen): return gen.uniform(shape, minval=minval, maxval=maxval, dtype=dtype) def old_uniform(dtype, seed1, seed2): return gen_random_ops.random_uniform( shape, dtype=dtype, seed=seed1, seed2=seed2) def new_uniform(dtype, gen): return gen._uniform(shape, dtype=dtype) for dtype in floats: compare(dtype, old_normal, new_normal) compare(dtype, old_truncated_normal, new_truncated_normal) compare(dtype, old_uniform, new_uniform) for dtype in INTS: compare(dtype, old_uniform_int, new_uniform_int) @test_util.run_v2_only def testSameAsOldRandomOpsCPU(self): """Tests that the generated numbers are the same as the old random_ops.py. The CPU version. """ self._sameAsOldRandomOps("/device:CPU:0", CPU_FLOATS) @test_util.run_v2_only @test_util.run_cuda_only def testSameAsOldRandomOpsGPU(self): """Tests that the generated numbers are the same as the old random_ops.py. The GPU version. """ self._sameAsOldRandomOps(test_util.gpu_device_name(), GPU_FLOATS) @parameterized.parameters(INTS + [dtypes.uint32, dtypes.uint64]) @test_util.run_v2_only @test_util.run_cuda_only def testGPUEqualsCPU(self, dtype): """Tests that GPU and CPU generate the same integer outputs.""" seed = 1234 shape = [315, 49] with ops.device("/device:CPU:0"): cpu = random.Generator.from_seed(seed).uniform_full_int( shape=shape, dtype=dtype) with ops.device(test_util.gpu_device_name()): gpu = random.Generator.from_seed(seed).uniform_full_int( shape=shape, dtype=dtype) self.assertAllEqual(cpu, gpu) @parameterized.parameters(FLOATS + INTS) @test_util.run_v2_only def testUniformIsInRange(self, dtype): minval = 2 maxval = 33 size = 1000 gen = random.Generator.from_seed(1234) x = gen.uniform( shape=[size], dtype=dtype, minval=minval, maxval=maxval).numpy() self.assertTrue(np.all(x >= minval)) self.assertTrue(np.all(x < maxval)) @parameterized.parameters(FLOATS) @test_util.run_v2_only def testNormalIsFinite(self, dtype): gen = random.Generator.from_seed(1234) x = gen.normal(shape=[10000], dtype=dtype).numpy() self.assertTrue(np.all(np.isfinite(x))) @parameterized.parameters(FLOATS + INTS) @test_util.run_v2_only def testDistributionOfUniform(self, dtype): """Use Pearson's Chi-squared test to test for uniformity.""" n = 1000 seed = 12 gen = random.Generator.from_seed(seed) maxval = 1 if dtype.is_integer: maxval = 100 x = gen.uniform(shape=[n], maxval=maxval, dtype=dtype).numpy() if maxval > 1: # Normalize y to range [0, 1). x = x.astype(float) / maxval # Tests that the values are distributed amongst 10 bins with equal # probability. 16.92 is the Chi^2 value for 9 degrees of freedom with # p=0.05. This test is probabilistic and would be flaky if the random # seed were not fixed. val = random_test_util.chi_squared(x, 10) self.assertLess(val, 16.92) @parameterized.parameters(FLOATS) @test_util.run_v2_only def testDistributionOfNormal(self, dtype): """Use Anderson-Darling test to test distribution appears normal.""" n = 1000 gen = random.Generator.from_seed(1234) x = gen.normal(shape=[n], dtype=dtype).numpy() # The constant 2.492 is the 5% critical value for the Anderson-Darling # test where the mean and variance are known. This test is probabilistic # so to avoid flakiness the seed is fixed. self.assertLess( random_test_util.anderson_darling(x.astype(float)), 2.492) @test_util.run_v2_only def testErrors(self): """Tests that proper errors are raised. """ shape = [2, 3] gen = random.Generator.from_seed(1234) with self.assertRaisesWithPredicateMatch( errors.InvalidArgumentError, r"must have shape \[\], not"): gen_stateful_random_ops.stateful_standard_normal_v2( gen.state.handle, [0, 0], shape) with self.assertRaisesWithPredicateMatch( errors.InvalidArgumentError, r"must have shape \[\], not"): gen_stateful_random_ops.rng_skip( gen.state.handle, gen.algorithm, [0, 0]) with self.assertRaisesWithPredicateMatch( TypeError, "Requested dtype: int64"): gen_stateful_random_ops.stateful_standard_normal_v2( gen.state.handle, 1.1, shape) with self.assertRaisesWithPredicateMatch( errors.InvalidArgumentError, "Unsupported algorithm id"): gen_stateful_random_ops.stateful_standard_normal_v2( gen.state.handle, 123, shape) var = variables.Variable([0, 0], dtype=dtypes.int32) with self.assertRaisesWithPredicateMatch( errors.InvalidArgumentError, "dtype of RNG state variable must be int64, not"): gen_stateful_random_ops.stateful_standard_normal_v2( var.handle, random.RNG_ALG_PHILOX, shape) var = variables.Variable([[0]], dtype=dtypes.int64) with self.assertRaisesWithPredicateMatch( errors.InvalidArgumentError, "RNG state must have one and only one dimension, not"): gen_stateful_random_ops.stateful_standard_normal_v2( var.handle, random.RNG_ALG_PHILOX, shape) var = variables.Variable([0], dtype=dtypes.int64) with self.assertRaisesWithPredicateMatch( errors.InvalidArgumentError, "For the Philox algorithm, the size of state must be at least"): gen_stateful_random_ops.stateful_standard_normal_v2( var.handle, random.RNG_ALG_PHILOX, shape) @test_util.run_v2_only def testSetGlobalGeneratorBadWithDefun(self): """Demonstrates that set_global_generator don't work properly with defun. """ shape = (3,) @def_function.function def f(): return random.get_global_generator().normal(shape) random.set_global_generator(random.Generator.from_seed(50)) with self.assertRaisesWithPredicateMatch( errors.NotFoundError, "Resource .+ does not exist"): _ = f() random.set_global_generator(random.Generator.from_seed(50)) _ = f() @test_util.run_v2_only @test_util.run_cuda_only def testMirroredStratSeq(self): """Tests RNG/MirrorStrategy interaction #1. If an RNG is created outside strategy.scope(), all replicas will access the same RNG object, and accesses are serialized. """ shape = [3, 4] dtype = dtypes.int32 gen = random.Generator.from_seed(1234) strat = MirroredStrategy(devices=["/cpu:0", test_util.gpu_device_name()]) with strat.scope(): def f(): t1 = gen.uniform_full_int(shape=shape, dtype=dtype) t2 = gen.uniform_full_int(shape=shape, dtype=dtype) t = array_ops.stack([t1, t2]) return t results = strat.extended.call_for_each_replica( fn=f) values = results.values self.assertAllEqual(2, len(values)) self.assertAllDifferent(values) @test_util.run_v2_only @test_util.run_cuda_only def testMirroredStratParaSync(self): """Tests RNG/MirrorStrategy interaction #2. If an RNG is created inside strategy.scope(), each replica gets an mirror of this RNG. If they access their RNGs in the same manner, their random-number streams are the same. """ shape = [3, 4] dtype = dtypes.int32 strat = MirroredStrategy(devices=["/cpu:0", test_util.gpu_device_name()]) with strat.scope(): gen = random.Generator.from_seed(1234) def f(): t1 = gen.uniform_full_int(shape=shape, dtype=dtype) t2 = gen.uniform_full_int(shape=shape, dtype=dtype) t = array_ops.stack([t1, t2]) return t results = strat.extended.call_for_each_replica(fn=f) values = results.values self.assertAllEqual(2, len(values)) self.assertAllEqual(values[0], values[1]) @test_util.run_v2_only @test_util.run_cuda_only def testMirroredStratParaSyncWithinFun(self): """Tests RNG/MirrorStrategy interaction #2b. If the RNG creation is within `f` in situation #2, the replicas' random-number streams are still the same. Note that whether the RNG creation is within strategy.scope() or not doesn't affect the result in this case (putting in inside strategy.scope() will cause unnecessary mirror creation and waste memory though). """ shape = [3, 4] dtype = dtypes.int32 strat = MirroredStrategy(devices=["/cpu:0", test_util.gpu_device_name()]) def f(): gen = random.Generator.from_seed(1234) t1 = gen.uniform_full_int(shape=shape, dtype=dtype) t2 = gen.uniform_full_int(shape=shape, dtype=dtype) t = array_ops.stack([t1, t2]) return t results = strat.extended.call_for_each_replica(fn=f) values = results.values self.assertAllEqual(2, len(values)) self.assertAllEqual(values[0], values[1]) @test_util.run_v2_only @test_util.run_cuda_only def testMirroredStratUnseedSync(self): """Tests RNG/MirrorStrategy interaction #2c. If the RNG created in situation #2 is unseeded, the replicas' random-number streams are still the same. If the RNG created in situation #2b is unseeded, the replicas' random-number streams will be different. We can't test this for now because the op 'NonDeterministicInts' is not implemented on GPU yet. """ shape = [3, 4] dtype = dtypes.int32 strat = MirroredStrategy(devices=["/cpu:0", test_util.gpu_device_name()]) # TODO(wangpeng): support calling `random.Generator()` inside `f` (i.e. # inside `call_for_each_replica` so that each replica can get a # different random-number stream. The only obstacle is that op # 'NonDeterministicInts' is not implemented on GPU.) with strat.scope(): gen = random.Generator.from_non_deterministic_state() def f(): t1 = gen.uniform_full_int(shape=shape, dtype=dtype) t2 = gen.uniform_full_int(shape=shape, dtype=dtype) t = array_ops.stack([t1, t2]) return t results = strat.extended.call_for_each_replica(fn=f) values = results.values self.assertAllEqual(2, len(values)) self.assertAllEqual(values[0], values[1]) @test_util.run_v2_only @test_util.run_cuda_only def testMirroredStratParaAsync(self): """Tests RNG/MirrorStrategy interaction #3. The user can create n independent RNGs outside strategy.scope(), where n is the number of replicas, and give one to each replica. The replicas can thus get different random-number streams. """ shape = [3, 4] dtype = dtypes.int32 gens = random.get_global_generator().split(count=2) devices = ["/cpu:0", test_util.gpu_device_name()] strat = MirroredStrategy(devices=devices) # Use `PerReplica` to specify which `gen` is sent to which replica gens = dist_values.PerReplica( device_map=dist_values.ReplicaDeviceMap(devices), values=[[g] for g in gens]) with strat.scope(): def f(gen): t1 = gen.uniform_full_int(shape=shape, dtype=dtype) t2 = gen.uniform_full_int(shape=shape, dtype=dtype) t = array_ops.stack([t1, t2]) return t results = strat.extended.call_for_each_replica( fn=f, args=gens) values = results.values self.assertAllEqual(2, len(values)) self.assertAllDifferent(values) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/ops/stateful_random_ops_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Operations often used for initializing tensors. All variable initializers returned by functions in this file should have the following signature: def _initializer(shape, dtype=dtypes.float32): Args: shape: List of `int` representing the shape of the output `Tensor`. Some initializers may also be able to accept a `Tensor`. dtype: (Optional) Type of the output `Tensor`. Returns: A `Tensor` of type `dtype` and `shape`. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_linalg_ops from tensorflow.python.ops import linalg_ops_impl from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import stateless_random_ops from tensorflow.python.util.tf_export import tf_export class Initializer(object): """Initializer base class: all initializers inherit from this class. """ def __call__(self, shape, dtype=None): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. If not provided will return tensor of `tf.float32`. """ raise NotImplementedError def get_config(self): """Returns the configuration of the initializer as a JSON-serializable dict. Returns: A JSON-serializable Python dict. """ return {} @classmethod def from_config(cls, config): """Instantiates an initializer from a configuration dictionary. Example: ```python initializer = RandomUniform(-1, 1) config = initializer.get_config() initializer = RandomUniform.from_config(config) ``` Args: config: A Python dictionary. It will typically be the output of `get_config`. Returns: An Initializer instance. """ config.pop("dtype", None) return cls(**config) @tf_export("zeros_initializer", v1=[]) class Zeros(Initializer): """Initializer that generates tensors initialized to 0.""" def __call__(self, shape, dtype=dtypes.float32): dtype = dtypes.as_dtype(dtype) return array_ops.zeros(shape, dtype) @tf_export("ones_initializer", v1=[]) class Ones(Initializer): """Initializer that generates tensors initialized to 1.""" def __call__(self, shape, dtype=dtypes.float32): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are supported. Raises: ValuesError: If the dtype is not numeric or boolean. """ dtype = dtypes.as_dtype(dtype) if not dtype.is_numpy_compatible or dtype == dtypes.string: raise ValueError("Expected numeric or boolean dtype, got %s." % dtype) return array_ops.ones(shape, dtype) @tf_export("constant_initializer", v1=[]) class Constant(Initializer): """Initializer that generates tensors with constant values. The resulting tensor is populated with values of type `dtype`, as specified by arguments `value` following the desired `shape` of the new tensor (see examples below). The argument `value` can be a constant value, or a list of values of type `dtype`. If `value` is a list, then the length of the list must be less than or equal to the number of elements implied by the desired shape of the tensor. In the case where the total number of elements in `value` is less than the number of elements required by the tensor shape, the last element in `value` will be used to fill the remaining entries. If the total number of elements in `value` is greater than the number of elements required by the tensor shape, the initializer will raise a `ValueError`. Args: value: A Python scalar, list or tuple of values, or a N-dimensional numpy array. All elements of the initialized variable will be set to the corresponding value in the `value` argument. Raises: TypeError: If the input `value` is not one of the expected types. Examples: The following example can be rewritten using a numpy.ndarray instead of the `value` list, even reshaped, as shown in the two commented lines below the `value` list initialization. ```python >>> import numpy as np >>> import tensorflow as tf >>> value = [0, 1, 2, 3, 4, 5, 6, 7] >>> # value = np.array(value) >>> # value = value.reshape([2, 4]) >>> init = tf.compat.v1.constant_initializer(value) >>> print('fitting shape:') >>> with tf.compat.v1.Session(): >>> x = tf.compat.v1.get_variable('x', shape=[2, 4], initializer=init) >>> x.initializer.run() >>> print(x.eval()) fitting shape: [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.]] >>> print('larger shape:') >>> with tf.compat.v1.Session(): >>> x = tf.compat.v1.get_variable('x', shape=[3, 4], initializer=init) >>> x.initializer.run() >>> print(x.eval()) larger shape: [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 7. 7. 7. 7.]] >>> print('smaller shape:') >>> with tf.compat.v1.Session(): >>> x = tf.compat.v1.get_variable('x', shape=[2, 3], initializer=init) ValueError: Too many elements provided. Needed at most 6, but received 8 ``` """ def __init__(self, value=0): if not (np.isscalar(value) or isinstance(value, (list, tuple, np.ndarray))): raise TypeError( "Invalid type for initial value: %s (expected Python scalar, list or " "tuple of values, or numpy.ndarray)." % type(value)) self.value = value def __call__(self, shape, dtype=None): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. If not provided the dtype of the tensor created will be the type of the inital value. Raises: TypeError: If the initializer cannot create a tensor of the requested dtype. """ if dtype is not None: dtype = dtypes.as_dtype(dtype) return constant_op.constant( self.value, dtype=dtype, shape=shape) def get_config(self): return {"value": self.value} @tf_export("random_uniform_initializer", v1=[]) class RandomUniform(Initializer): """Initializer that generates tensors with a uniform distribution. Args: minval: A python scalar or a scalar tensor. Lower bound of the range of random values to generate. maxval: A python scalar or a scalar tensor. Upper bound of the range of random values to generate. Defaults to 1 for float types. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. """ def __init__(self, minval=-0.05, maxval=0.05, seed=None): self.minval = minval self.maxval = maxval self.seed = seed self._random_generator = _RandomGenerator(seed) def __call__(self, shape, dtype=dtypes.float32): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point and integer types are supported. Raises: ValueError: If the dtype is not numeric. """ dtype = dtypes.as_dtype(dtype) if not dtype.is_floating and not dtype.is_integer: raise ValueError("Expected float or integer dtype, got %s." % dtype) return self._random_generator.random_uniform(shape, self.minval, self.maxval, dtype) def get_config(self): return { "minval": self.minval, "maxval": self.maxval, "seed": self.seed } @tf_export("random_normal_initializer", v1=[]) class RandomNormal(Initializer): """Initializer that generates tensors with a normal distribution. Args: mean: a python scalar or a scalar tensor. Mean of the random values to generate. stddev: a python scalar or a scalar tensor. Standard deviation of the random values to generate. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. """ def __init__(self, mean=0.0, stddev=0.05, seed=None): self.mean = mean self.stddev = stddev self.seed = seed self._random_generator = _RandomGenerator(seed) def __call__(self, shape, dtype=dtypes.float32): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. Raises: ValueError: If the dtype is not floating point """ dtype = _assert_float_dtype(dtype) return self._random_generator.random_normal(shape, self.mean, self.stddev, dtype) def get_config(self): return { "mean": self.mean, "stddev": self.stddev, "seed": self.seed } class TruncatedNormal(Initializer): """Initializer that generates a truncated normal distribution. These values are similar to values from a `random_normal_initializer` except that values more than two standard deviations from the mean are discarded and re-drawn. This is the recommended initializer for neural network weights and filters. Args: mean: a python scalar or a scalar tensor. Mean of the random values to generate. stddev: a python scalar or a scalar tensor. Standard deviation of the random values to generate. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. """ def __init__(self, mean=0.0, stddev=0.05, seed=None): self.mean = mean self.stddev = stddev self.seed = seed self._random_generator = _RandomGenerator(seed) def __call__(self, shape, dtype=dtypes.float32): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. Raises: ValueError: If the dtype is not floating point """ dtype = _assert_float_dtype(dtype) return self._random_generator.truncated_normal(shape, self.mean, self.stddev, dtype) def get_config(self): return { "mean": self.mean, "stddev": self.stddev, "seed": self.seed } class VarianceScaling(Initializer): """Initializer capable of adapting its scale to the shape of weights tensors. With `distribution="truncated_normal" or "untruncated_normal"`, samples are drawn from a truncated/untruncated normal distribution with a mean of zero and a standard deviation (after truncation, if used) `stddev = sqrt(scale / n)` where n is: - number of input units in the weight tensor, if mode = "fan_in" - number of output units, if mode = "fan_out" - average of the numbers of input and output units, if mode = "fan_avg" With `distribution="uniform"`, samples are drawn from a uniform distribution within [-limit, limit], with `limit = sqrt(3 * scale / n)`. Args: scale: Scaling factor (positive float). mode: One of "fan_in", "fan_out", "fan_avg". distribution: Random distribution to use. One of "truncated_normal", "untruncated_normal" and "uniform". seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. Raises: ValueError: In case of an invalid value for the "scale", mode" or "distribution" arguments. """ def __init__(self, scale=1.0, mode="fan_in", distribution="truncated_normal", seed=None): if scale <= 0.: raise ValueError("`scale` must be positive float.") if mode not in {"fan_in", "fan_out", "fan_avg"}: raise ValueError("Invalid `mode` argument:", mode) distribution = distribution.lower() # Compatibility with keras-team/keras. if distribution == "normal": distribution = "truncated_normal" if distribution not in {"uniform", "truncated_normal", "untruncated_normal"}: raise ValueError("Invalid `distribution` argument:", distribution) self.scale = scale self.mode = mode self.distribution = distribution self.seed = seed self._random_generator = _RandomGenerator(seed) def __call__(self, shape, dtype=dtypes.float32): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. Raises: ValueError: If the dtype is not floating point """ partition_info = None # Keeps logic so can be readded later if necessary dtype = _assert_float_dtype(dtype) scale = self.scale scale_shape = shape if partition_info is not None: scale_shape = partition_info.full_shape fan_in, fan_out = _compute_fans(scale_shape) if self.mode == "fan_in": scale /= max(1., fan_in) elif self.mode == "fan_out": scale /= max(1., fan_out) else: scale /= max(1., (fan_in + fan_out) / 2.) if self.distribution == "truncated_normal": # constant from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.) stddev = math.sqrt(scale) / .87962566103423978 return self._random_generator.truncated_normal(shape, 0.0, stddev, dtype) elif self.distribution == "untruncated_normal": stddev = math.sqrt(scale) return self._random_generator.random_normal(shape, 0.0, stddev, dtype) else: limit = math.sqrt(3.0 * scale) return self._random_generator.random_uniform(shape, -limit, limit, dtype) def get_config(self): return { "scale": self.scale, "mode": self.mode, "distribution": self.distribution, "seed": self.seed } class Orthogonal(Initializer): """Initializer that generates an orthogonal matrix. If the shape of the tensor to initialize is two-dimensional, it is initialized with an orthogonal matrix obtained from the QR decomposition of a matrix of random numbers drawn from a normal distribution. If the matrix has fewer rows than columns then the output will have orthogonal rows. Otherwise, the output will have orthogonal columns. If the shape of the tensor to initialize is more than two-dimensional, a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])` is initialized, where `n` is the length of the shape vector. The matrix is subsequently reshaped to give a tensor of the desired shape. Args: gain: multiplicative factor to apply to the orthogonal matrix seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. References: [Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C) ([pdf](https://arxiv.org/pdf/1312.6120.pdf)) """ def __init__(self, gain=1.0, seed=None): self.gain = gain self.seed = seed self._random_generator = _RandomGenerator(seed) def __call__(self, shape, dtype=dtypes.float32): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. Raises: ValueError: If the dtype is not floating point or the input shape is not valid. """ dtype = _assert_float_dtype(dtype) # Check the shape if len(shape) < 2: raise ValueError("The tensor to initialize must be " "at least two-dimensional") # Flatten the input shape with the last dimension remaining # its original shape so it works for conv2d num_rows = 1 for dim in shape[:-1]: num_rows *= dim num_cols = shape[-1] flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows)) # Generate a random matrix a = self._random_generator.random_normal(flat_shape, dtype=dtype) # Compute the qr factorization q, r = gen_linalg_ops.qr(a, full_matrices=False) # Make Q uniform d = array_ops.diag_part(r) q *= math_ops.sign(d) if num_rows < num_cols: q = array_ops.matrix_transpose(q) return self.gain * array_ops.reshape(q, shape) def get_config(self): return {"gain": self.gain, "seed": self.seed} class Identity(Initializer): """Initializer that generates the identity matrix. Only use for 2D matrices. Args: gain: Multiplicative factor to apply to the identity matrix. """ def __init__(self, gain=1.0): self.gain = gain def __call__(self, shape, dtype=dtypes.float32): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. Raises: ValueError: If the dtype is not floating point """ partition_info = None # Keeps logic so can be readded later if necessary dtype = _assert_float_dtype(dtype) full_shape = shape if partition_info is None else partition_info.full_shape if len(full_shape) != 2: raise ValueError( "Identity matrix initializer can only be used for 2D matrices.") initializer = linalg_ops_impl.eye(*full_shape, dtype=dtype) if partition_info is not None: initializer = array_ops.slice(initializer, partition_info.var_offset, shape) return self.gain * initializer def get_config(self): return {"gain": self.gain} class GlorotUniform(VarianceScaling): """The Glorot uniform initializer, also called Xavier uniform initializer. It draws samples from a uniform distribution within [-limit, limit] where `limit` is `sqrt(6 / (fan_in + fan_out))` where `fan_in` is the number of input units in the weight tensor and `fan_out` is the number of output units in the weight tensor. Args: seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. References: [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html) ([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)) """ def __init__(self, seed=None): super(GlorotUniform, self).__init__( scale=1.0, mode="fan_avg", distribution="uniform", seed=seed) def get_config(self): return {"seed": self.seed} class GlorotNormal(VarianceScaling): """The Glorot normal initializer, also called Xavier normal initializer. It draws samples from a truncated normal distribution centered on 0 with `stddev = sqrt(2 / (fan_in + fan_out))` where `fan_in` is the number of input units in the weight tensor and `fan_out` is the number of output units in the weight tensor. Args: seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. References: [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html) ([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)) """ def __init__(self, seed=None): super(GlorotNormal, self).__init__( scale=1.0, mode="fan_avg", distribution="truncated_normal", seed=seed) def get_config(self): return {"seed": self.seed} # Aliases. # pylint: disable=invalid-name zeros_initializer = Zeros ones_initializer = Ones constant_initializer = Constant random_uniform_initializer = RandomUniform random_normal_initializer = RandomNormal truncated_normal_initializer = TruncatedNormal variance_scaling_initializer = VarianceScaling glorot_uniform_initializer = GlorotUniform glorot_normal_initializer = GlorotNormal orthogonal_initializer = Orthogonal identity_initializer = Identity # pylint: enable=invalid-name def lecun_normal(seed=None): """LeCun normal initializer. It draws samples from a truncated normal distribution centered on 0 with `stddev = sqrt(1 / fan_in)` where `fan_in` is the number of input units in the weight tensor. Arguments: seed: A Python integer. Used to seed the random generator. Returns: An initializer. References: - Self-Normalizing Neural Networks, [Klambauer et al., 2017] (https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) ([pdf] (https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf)) - Efficient Backprop, [Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf) """ return VarianceScaling( scale=1., mode="fan_in", distribution="truncated_normal", seed=seed) def lecun_uniform(seed=None): """LeCun uniform initializer. It draws samples from a uniform distribution within [-limit, limit] where `limit` is `sqrt(3 / fan_in)` where `fan_in` is the number of input units in the weight tensor. Arguments: seed: A Python integer. Used to seed the random generator. Returns: An initializer. References: - Self-Normalizing Neural Networks, [Klambauer et al., 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) # pylint: disable=line-too-long ([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf)) - Efficient Backprop, [Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf) """ return VarianceScaling( scale=1., mode="fan_in", distribution="uniform", seed=seed) def he_normal(seed=None): """He normal initializer. It draws samples from a truncated normal distribution centered on 0 with `stddev = sqrt(2 / fan_in)` where `fan_in` is the number of input units in the weight tensor. Arguments: seed: A Python integer. Used to seed the random generator. Returns: An initializer. References: [He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf)) """ return VarianceScaling( scale=2., mode="fan_in", distribution="truncated_normal", seed=seed) def he_uniform(seed=None): """He uniform variance scaling initializer. It draws samples from a uniform distribution within [-limit, limit] where `limit` is `sqrt(6 / fan_in)` where `fan_in` is the number of input units in the weight tensor. Arguments: seed: A Python integer. Used to seed the random generator. Returns: An initializer. References: [He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf)) """ return VarianceScaling( scale=2., mode="fan_in", distribution="uniform", seed=seed) # Utility functions. def _compute_fans(shape): """Computes the number of input and output units for a weight shape. Args: shape: Integer shape tuple or TF tensor shape. Returns: A tuple of scalars (fan_in, fan_out). """ if len(shape) < 1: # Just to avoid errors for constants. fan_in = fan_out = 1 elif len(shape) == 1: fan_in = fan_out = shape[0] elif len(shape) == 2: fan_in = shape[0] fan_out = shape[1] else: # Assuming convolution kernels (2D, 3D, or more). # kernel shape: (..., input_depth, depth) receptive_field_size = 1. for dim in shape[:-2]: receptive_field_size *= dim fan_in = shape[-2] * receptive_field_size fan_out = shape[-1] * receptive_field_size return fan_in, fan_out def _assert_float_dtype(dtype): """Validate and return floating point type based on `dtype`. `dtype` must be a floating point type. Args: dtype: The data type to validate. Returns: Validated type. Raises: ValueError: if `dtype` is not a floating point type. """ dtype = dtypes.as_dtype(dtype) if not dtype.is_floating: raise ValueError("Expected floating point type, got %s." % dtype) return dtype class _RandomGenerator(object): """Random generator that selects appropriate random ops.""" def __init__(self, seed=None): super(_RandomGenerator, self).__init__() if seed is not None: # Stateless random ops requires 2-int seed. self.seed = [seed, 0] else: self.seed = None def random_normal(self, shape, mean=0.0, stddev=1, dtype=dtypes.float32): """A deterministic random normal if seed is passed.""" if self.seed: op = stateless_random_ops.stateless_random_normal else: op = random_ops.random_normal return op( shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed) def random_uniform(self, shape, minval, maxval, dtype): """A deterministic random uniform if seed is passed.""" if self.seed: op = stateless_random_ops.stateless_random_uniform else: op = random_ops.random_uniform return op( shape=shape, minval=minval, maxval=maxval, dtype=dtype, seed=self.seed) def truncated_normal(self, shape, mean, stddev, dtype): """A deterministic truncated normal if seed is passed.""" if self.seed: op = stateless_random_ops.stateless_truncated_normal else: op = random_ops.truncated_normal return op( shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed) # Compatibility aliases # pylint: disable=invalid-name zero = zeros = Zeros one = ones = Ones constant = Constant uniform = random_uniform = RandomUniform normal = random_normal = RandomNormal truncated_normal = TruncatedNormal identity = Identity orthogonal = Orthogonal glorot_normal = GlorotNormal glorot_uniform = GlorotUniform
tensorflow-master
tensorflow/python/ops/init_ops_v2.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= # pylint: disable=wildcard-import,unused-import """Protocol Buffer encoding and decoding from tensors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops.gen_decode_proto_ops import decode_proto_v2 as decode_proto from tensorflow.python.ops.gen_encode_proto_ops import encode_proto from tensorflow.python.util.tf_export import tf_export tf_export("io.decode_proto")(decode_proto) tf_export("io.encode_proto")(encode_proto) ops.NotDifferentiable("DecodeProtoV2") ops.NotDifferentiable("EncodeProto")
tensorflow-master
tensorflow/python/ops/proto_ops.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Connects all half, float and double tensors to CheckNumericsOp.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export @tf_export(v1=["debugging.assert_all_finite", "verify_tensor_all_finite"]) @deprecation.deprecated_endpoints("verify_tensor_all_finite") def verify_tensor_all_finite(t=None, msg=None, name=None, x=None, message=None): """Assert that the tensor does not contain any NaN's or Inf's. Args: t: Tensor to check. msg: Message to log on failure. name: A name for this operation (optional). x: Alias for t. message: Alias for msg. Returns: Same tensor as `t`. """ x = deprecation.deprecated_argument_lookup("x", x, "t", t) message = deprecation.deprecated_argument_lookup( "message", message, "msg", msg) return verify_tensor_all_finite_v2(x, message, name) @tf_export("debugging.assert_all_finite", v1=[]) def verify_tensor_all_finite_v2(x, message, name=None): """Assert that the tensor does not contain any NaN's or Inf's. Args: x: Tensor to check. message: Message to log on failure. name: A name for this operation (optional). Returns: Same tensor as `x`. """ with ops.name_scope(name, "VerifyFinite", [x]) as name: x = ops.convert_to_tensor(x, name="x") with ops.colocate_with(x): verify_input = array_ops.check_numerics(x, message=message) out = control_flow_ops.with_dependencies([verify_input], x) return out @tf_export(v1=["add_check_numerics_ops"]) def add_check_numerics_ops(): """Connect a `tf.debugging.check_numerics` to every floating point tensor. `check_numerics` operations themselves are added for each `half`, `float`, or `double` tensor in the current default graph. For all ops in the graph, the `check_numerics` op for all of its (`half`, `float`, or `double`) inputs is guaranteed to run before the `check_numerics` op on any of its outputs. Note: This API is not compatible with the use of `tf.cond` or `tf.while_loop`, and will raise a `ValueError` if you attempt to call it in such a graph. Returns: A `group` op depending on all `check_numerics` ops added. Raises: ValueError: If the graph contains any numeric operations in a control flow structure. RuntimeError: If called with eager execution enabled. @compatibility(eager) Not compatible with eager execution. To check for `Inf`s and `NaN`s under eager execution, call `tfe.seterr(inf_or_nan='raise')` once before executing the checked operations. @end_compatibility """ if context.executing_eagerly(): raise RuntimeError( "add_check_numerics_ops() is not compatible with eager execution. " "To check for Inf's and NaN's under eager execution, call " "tfe.seterr(inf_or_nan='raise') once before executing the " "checked operations.") check_op = [] # This code relies on the ordering of ops in get_operations(). # The producer of a tensor always comes before that tensor's consumer in # this list. This is true because get_operations() returns ops in the order # added, and an op can only be added after its inputs are added. for op in ops.get_default_graph().get_operations(): for output in op.outputs: if output.dtype in [dtypes.float16, dtypes.float32, dtypes.float64]: if op._get_control_flow_context() is not None: # pylint: disable=protected-access raise ValueError("`tf.add_check_numerics_ops() is not compatible " "with TensorFlow control flow operations such as " "`tf.cond()` or `tf.while_loop()`.") message = op.name + ":" + str(output.value_index) with ops.control_dependencies(check_op): check_op = [array_ops.check_numerics(output, message=message)] return control_flow_ops.group(*check_op)
tensorflow-master
tensorflow/python/ops/numerics.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmark for Matmul operator.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import time import numpy as np from tensorflow.python.client import session as session_lib from tensorflow.python.framework import ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test def build_graph(device, n, m, k, transpose_a, transpose_b, dtype): """Build a graph containing a sequence of matmul operations. Args: device: String, the device to run on. n: tensor A's first dimension size. m: tensor A's second dimension size. k: tensor B's second dimension size. transpose_a: boolean value to show if tensor A is transposed. transpose_b: boolean value to show if tensor B is transposed. dtype: numpy data type of the input tensor. Returns: A matmul operation to run() """ with ops.device('%s' % device): if not transpose_a: x = variables.VariableV1(random_ops.random_uniform([n, m], dtype=dtype), use_resource=False) else: x = variables.VariableV1(random_ops.random_uniform([m, n], dtype=dtype), use_resource=False) if not transpose_b: y = variables.VariableV1(random_ops.random_uniform([m, k], dtype=dtype), use_resource=False) else: y = variables.VariableV1(random_ops.random_uniform([k, m], dtype=dtype), use_resource=False) z = math_ops.matmul(x, y, transpose_a=transpose_a, transpose_b=transpose_b) return control_flow_ops.group(z) class MatmulBenchmark(test.Benchmark): """Benchmark matmul!""" def run_graph(self, device, n, m, k, transpose_a, transpose_b, num_iters, dtype): """Run the graph and print its execution time. Args: device: String, the device to run on. n: tensor A's first dimension size. m: tensor A's second dimension size. k: tensor B's second dimension size. transpose_a: boolean value to show if tensor A is transposed. transpose_b: boolean value to show if tensor B is transposed. num_iters: number of iterations to run the benchmark. dtype: numpy data type of the input tensor. Returns: The duration of the run in seconds. """ graph = ops.Graph() with graph.as_default(): output = build_graph(device, n, m, k, transpose_a, transpose_b, dtype) with session_lib.Session(graph=graph) as session: variables.global_variables_initializer().run() for _ in range(500): session.run(output) start_time = time.time() for _ in range(num_iters): session.run(output) duration = (time.time() - start_time) num_items = n * m * k * 2 throughput = num_items * num_iters / duration / 1e9 print('%s %s input_info:%s %d %.4fsec, %.4fGitems/s.' % (device, str(dtype), str(n) + 'x' + str(m) + 'x' + str(k) + ',ta:' + str(transpose_a) + '.tb:' + str(transpose_b), num_iters, duration, throughput)) name_template = ('matmul_{device}_{dtype}_input_info_{inputinfo}') self.report_benchmark( name=name_template.format( device=device, dtype=str(dtype).replace(' ', ''), inputinfo=str(n) + 'x' + str(m) + 'x' + str(k) + ',ta:' + str(transpose_a) + ',tb:' + str(transpose_b)).replace(' ', ''), iters=num_iters, wall_time=duration) return duration def run_test_gpu(self, n, m, k, transpose_a, transpose_b, dtype, num_iters): self.run_graph(test.gpu_device_name(), n, m, k, transpose_a, transpose_b, num_iters, dtype) def test_round(self, num_iters): dtypes = [np.float32, np.float64] for dtype in dtypes: for n, m, (transpose_a, transpose_b) in itertools.product( [512, 1024], [1, 8, 16, 128], [(False, False), (True, False), (False, True)]): k = n self.run_test_gpu(n, m, k, transpose_a, transpose_b, dtype, num_iters) for n, m, k, (transpose_a, transpose_b) in itertools.product( [200], [1, 8, 20], [10000], [(False, False), (True, False), (False, True)]): self.run_test_gpu(n, m, k, transpose_a, transpose_b, dtype, num_iters) for (n, m, k), (transpose_a, transpose_b) in itertools.product( [(200, 20, 20000), (1, 10000, 200)], [(False, False), (True, False), (False, True)]): self.run_test_gpu(n, m, k, transpose_a, transpose_b, dtype, num_iters) def benchmark_matmul(self): self.test_round(num_iters=200) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/ops/matmul_benchmark.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Operations for manipulating the binary representations of integers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.gen_bitwise_ops import * # pylint: enable=wildcard-import ops.NotDifferentiable("BitwiseAnd") ops.NotDifferentiable("BitwiseOr") ops.NotDifferentiable("BitwiseXor") ops.NotDifferentiable("Invert") ops.NotDifferentiable("PopulationCount") ops.NotDifferentiable("LeftShift") ops.NotDifferentiable("RightShift")
tensorflow-master
tensorflow/python/ops/bitwise_ops.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The TF2 version of the enum keras.losses.Reduction.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function class ReductionV2(object): """Types of loss reduction. Contains the following values: * `AUTO`: Indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, we expect reduction value to be `SUM` or `NONE`. Using `AUTO` in that case will raise an error. * `NONE`: Un-reduced weighted losses with the same shape as input. When this reduction type used with built-in Keras training loops like `fit`/`evaluate`, the unreduced vector loss is passed to the optimizer but the reported loss will be a scalar value. * `SUM`: Scalar sum of weighted losses. * `SUM_OVER_BATCH_SIZE`: Scalar `SUM` divided by number of elements in losses. This reduction type is not supported when used with `tf.distribute.Strategy` outside of built-in training loops like `tf.keras` `compile`/`fit`. You can implement 'SUM_OVER_BATCH_SIZE' using global batch size like: ``` with strategy.scope(): loss_obj = tf.keras.losses.CategoricalCrossentropy( reduction=tf.keras.losses.Reduction.None) .... loss = tf.reduce_sum(loss_object(labels, predictions)) * (1. / global_batch_size) ``` Please see https://www.tensorflow.org/alpha/tutorials/distribute/training_loops for more details on this. """ AUTO = 'auto' NONE = 'none' SUM = 'sum' SUM_OVER_BATCH_SIZE = 'sum_over_batch_size' @classmethod def all(cls): return (cls.AUTO, cls.NONE, cls.SUM, cls.SUM_OVER_BATCH_SIZE) @classmethod def validate(cls, key): if key not in cls.all(): raise ValueError('Invalid Reduction Key %s.' % key)
tensorflow-master
tensorflow/python/ops/losses/loss_reduction.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for manipulating the loss collections.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import confusion_matrix from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import weights_broadcast_ops from tensorflow.python.util import tf_contextlib from tensorflow.python.util.tf_export import tf_export def squeeze_or_expand_dimensions(y_pred, y_true=None, sample_weight=None): """Squeeze or expand last dimension if needed. 1. Squeezes last dim of `y_pred` or `y_true` if their rank differs by 1 (using `confusion_matrix.remove_squeezable_dimensions`). 2. Squeezes or expands last dim of `sample_weight` if its rank differs by 1 from the new rank of `y_pred`. If `sample_weight` is scalar, it is kept scalar. This will use static shape if available. Otherwise, it will add graph operations, which could result in a performance hit. Args: y_pred: Predicted values, a `Tensor` of arbitrary dimensions. y_true: Optional label `Tensor` whose dimensions match `y_pred`. sample_weight: Optional weight scalar or `Tensor` whose dimensions match `y_pred`. Returns: Tuple of `y_pred`, `y_true` and `sample_weight`. Each of them possibly has the last dimension squeezed, `sample_weight` could be extended by one dimension. If `sample_weight` is None, (y_pred, y_true) is returned. """ y_pred_shape = y_pred.shape y_pred_rank = y_pred_shape.ndims if y_true is not None: # If sparse matrix is provided as `y_true`, the last dimension in `y_pred` # may be > 1. Eg: y_true = [0, 1, 2] (shape=(3,)), # y_pred = [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]] (shape=(3, 3)) # In this case, we should not try to remove squeezable dimension. y_true_shape = y_true.shape y_true_rank = y_true_shape.ndims if (y_true_rank is not None) and (y_pred_rank is not None): # Use static rank for `y_true` and `y_pred`. if (y_pred_rank - y_true_rank != 1) or y_pred_shape[-1] == 1: y_true, y_pred = confusion_matrix.remove_squeezable_dimensions( y_true, y_pred) else: # Use dynamic rank. rank_diff = array_ops.rank(y_pred) - array_ops.rank(y_true) squeeze_dims = lambda: confusion_matrix.remove_squeezable_dimensions( # pylint: disable=g-long-lambda y_true, y_pred) is_last_dim_1 = math_ops.equal(1, array_ops.shape(y_pred)[-1]) maybe_squeeze_dims = lambda: control_flow_ops.cond( # pylint: disable=g-long-lambda is_last_dim_1, squeeze_dims, lambda: (y_true, y_pred)) y_true, y_pred = control_flow_ops.cond( math_ops.equal(1, rank_diff), maybe_squeeze_dims, squeeze_dims) if sample_weight is None: return y_pred, y_true sample_weight = ops.convert_to_tensor(sample_weight) weights_shape = sample_weight.shape weights_rank = weights_shape.ndims if weights_rank == 0: # If weights is scalar, do nothing. return y_pred, y_true, sample_weight if (y_pred_rank is not None) and (weights_rank is not None): # Use static rank. if weights_rank - y_pred_rank == 1: sample_weight = array_ops.squeeze(sample_weight, [-1]) elif y_pred_rank - weights_rank == 1: sample_weight = array_ops.expand_dims(sample_weight, [-1]) return y_pred, y_true, sample_weight # Use dynamic rank. weights_rank_tensor = array_ops.rank(sample_weight) rank_diff = weights_rank_tensor - array_ops.rank(y_pred) maybe_squeeze_weights = lambda: array_ops.squeeze(sample_weight, [-1]) def _maybe_expand_weights(): expand_weights = lambda: array_ops.expand_dims(sample_weight, [-1]) return control_flow_ops.cond( math_ops.equal(rank_diff, -1), expand_weights, lambda: sample_weight) def _maybe_adjust_weights(): return control_flow_ops.cond( math_ops.equal(rank_diff, 1), maybe_squeeze_weights, _maybe_expand_weights) # squeeze or expand last dim of `sample_weight` if its rank differs by 1 # from the new rank of `y_pred`. sample_weight = control_flow_ops.cond( math_ops.equal(weights_rank_tensor, 0), lambda: sample_weight, _maybe_adjust_weights) return y_pred, y_true, sample_weight def scale_losses_by_sample_weight(losses, sample_weight): """Scales loss values by the given sample weights. `sample_weight` dimensions are updated to match with the dimension of `losses` if possible by using squeeze/expand/broadcast. Args: losses: Loss tensor. sample_weight: Sample weights tensor. Returns: `losses` scaled by `sample_weight` with dtype float32. """ # TODO(psv): Handle the casting here in a better way, eg. if losses is float64 # we do not want to lose precision. losses = math_ops.cast(losses, dtypes.float32) sample_weight = math_ops.cast(sample_weight, dtypes.float32) # Update dimensions of `sample_weight` to match with `losses` if possible. losses, _, sample_weight = squeeze_or_expand_dimensions( losses, None, sample_weight) # Broadcast weights if possible. sample_weight = weights_broadcast_ops.broadcast_weights(sample_weight, losses) return math_ops.multiply(losses, sample_weight) @tf_contextlib.contextmanager def check_per_example_loss_rank(per_example_loss): """Context manager that checks that the rank of per_example_loss is atleast 1. Args: per_example_loss: Per example loss tensor. Yields: A context manager. """ loss_rank = per_example_loss.shape.rank if loss_rank is not None: # Handle static rank. if loss_rank == 0: raise ValueError( "Invalid value passed for `per_example_loss`. Expected a tensor with " "at least rank 1, received: {}".format(per_example_loss)) yield else: # Handle dynamic rank. with ops.control_dependencies([ check_ops.assert_greater_equal( array_ops.rank(per_example_loss), math_ops.cast(1, dtype=dtypes.int32), message="Invalid value passed for `per_example_loss`. Expected a " "tensor with at least rank 1.") ]): yield @tf_export(v1=["losses.add_loss"]) def add_loss(loss, loss_collection=ops.GraphKeys.LOSSES): """Adds a externally defined loss to the collection of losses. Args: loss: A loss `Tensor`. loss_collection: Optional collection to add the loss to. """ # Since we have no way of figuring out when a training iteration starts or # ends, holding on to a loss when executing eagerly is indistingishable from # leaking memory. We instead leave the collection empty. if loss_collection and not context.executing_eagerly(): ops.add_to_collection(loss_collection, loss) @tf_export(v1=["losses.get_losses"]) def get_losses(scope=None, loss_collection=ops.GraphKeys.LOSSES): """Gets the list of losses from the loss_collection. Args: scope: An optional scope name for filtering the losses to return. loss_collection: Optional losses collection. Returns: a list of loss tensors. """ return ops.get_collection(loss_collection, scope) @tf_export(v1=["losses.get_regularization_losses"]) def get_regularization_losses(scope=None): """Gets the list of regularization losses. Args: scope: An optional scope name for filtering the losses to return. Returns: A list of regularization losses as Tensors. """ return ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES, scope) @tf_export(v1=["losses.get_regularization_loss"]) def get_regularization_loss(scope=None, name="total_regularization_loss"): """Gets the total regularization loss. Args: scope: An optional scope name for filtering the losses to return. name: The name of the returned tensor. Returns: A scalar regularization loss. """ losses = get_regularization_losses(scope) if losses: return math_ops.add_n(losses, name=name) else: return constant_op.constant(0.0) @tf_export(v1=["losses.get_total_loss"]) def get_total_loss(add_regularization_losses=True, name="total_loss", scope=None): """Returns a tensor whose value represents the total loss. In particular, this adds any losses you have added with `tf.add_loss()` to any regularization losses that have been added by regularization parameters on layers constructors e.g. `tf.layers`. Be very sure to use this if you are constructing a loss_op manually. Otherwise regularization arguments on `tf.layers` methods will not function. Args: add_regularization_losses: A boolean indicating whether or not to use the regularization losses in the sum. name: The name of the returned tensor. scope: An optional scope name for filtering the losses to return. Note that this filters the losses added with `tf.add_loss()` as well as the regularization losses to that scope. Returns: A `Tensor` whose value represents the total loss. Raises: ValueError: if `losses` is not iterable. """ losses = get_losses(scope=scope) if add_regularization_losses: losses += get_regularization_losses(scope=scope) return math_ops.add_n(losses, name=name)
tensorflow-master
tensorflow/python/ops/losses/util.py
tensorflow-master
tensorflow/python/ops/losses/__init__.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Implementation of Loss operations for use in neural networks.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import confusion_matrix from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import nn_ops from tensorflow.python.ops import weights_broadcast_ops from tensorflow.python.ops.losses import util from tensorflow.python.util.deprecation import deprecated_args from tensorflow.python.util.deprecation import deprecated_argument_lookup from tensorflow.python.util.tf_export import tf_export @tf_export(v1=["losses.Reduction"]) class Reduction(object): """Types of loss reduction. Contains the following values: * `NONE`: Un-reduced weighted losses with the same shape as input. * `SUM`: Scalar sum of weighted losses. * `MEAN`: Scalar `SUM` divided by sum of weights. DEPRECATED. * `SUM_OVER_BATCH_SIZE`: Scalar `SUM` divided by number of elements in losses. * `SUM_OVER_NONZERO_WEIGHTS`: Scalar `SUM` divided by number of non-zero weights. DEPRECATED. * `SUM_BY_NONZERO_WEIGHTS`: Same as `SUM_OVER_NONZERO_WEIGHTS`. DEPRECATED. """ NONE = "none" SUM = "weighted_sum" SUM_OVER_BATCH_SIZE = "weighted_sum_over_batch_size" MEAN = "weighted_mean" SUM_BY_NONZERO_WEIGHTS = "weighted_sum_by_nonzero_weights" SUM_OVER_NONZERO_WEIGHTS = SUM_BY_NONZERO_WEIGHTS @classmethod def all(cls): return ( cls.NONE, cls.SUM, cls.MEAN, cls.SUM_OVER_BATCH_SIZE, cls.SUM_OVER_NONZERO_WEIGHTS, cls.SUM_BY_NONZERO_WEIGHTS) @classmethod def validate(cls, key): if key not in cls.all(): raise ValueError("Invalid Reduction Key %s." % key) def _safe_mean(losses, num_present): """Computes a safe mean of the losses. Args: losses: `Tensor` whose elements contain individual loss measurements. num_present: The number of measurable elements in `losses`. Returns: A scalar representing the mean of `losses`. If `num_present` is zero, then zero is returned. """ total_loss = math_ops.reduce_sum(losses) return math_ops.div_no_nan(total_loss, num_present, name="value") def _num_present(losses, weights, per_batch=False): """Computes the number of elements in the loss function induced by `weights`. A given weights tensor induces different numbers of usable elements in the `losses` tensor. The `weights` tensor is broadcast across `losses` for all possible dimensions. For example, if `losses` is a tensor of dimension `[4, 5, 6, 3]` and `weights` is a tensor of shape `[4, 5]`, then `weights` is, in effect, tiled to match the shape of `losses`. Following this effective tile, the total number of present elements is the number of non-zero weights. Args: losses: `Tensor` of shape `[batch_size, d1, ... dN]`. weights: `Tensor` of shape `[]`, `[batch_size]` or `[batch_size, d1, ... dK]`, where K < N. per_batch: Whether to return the number of elements per batch or as a sum total. Returns: The number of present (non-zero) elements in the losses tensor. If `per_batch` is `True`, the value is returned as a tensor of size `[batch_size]`. Otherwise, a single scalar tensor is returned. """ if ((isinstance(weights, float) and weights != 0.0) or (context.executing_eagerly() and weights._rank() == 0 # pylint: disable=protected-access and not math_ops.equal(weights, 0.0))): return _num_elements(losses) with ops.name_scope(None, "num_present", (losses, weights)) as scope: weights = math_ops.cast(weights, dtype=dtypes.float32) present = array_ops.where( math_ops.equal(weights, 0.0), array_ops.zeros_like(weights), array_ops.ones_like(weights)) present = weights_broadcast_ops.broadcast_weights(present, losses) if per_batch: return math_ops.reduce_sum( present, axis=math_ops.range(1, array_ops.rank(present)), keepdims=True, name=scope) return math_ops.reduce_sum(present, name=scope) def _num_elements(losses): """Computes the number of elements in `losses` tensor.""" with ops.name_scope(None, "num_elements", values=[losses]) as scope: return math_ops.cast(array_ops.size(losses, name=scope), dtype=losses.dtype) @tf_export(v1=["losses.compute_weighted_loss"]) def compute_weighted_loss( losses, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): """Computes the weighted loss. Args: losses: `Tensor` of shape `[batch_size, d1, ... dN]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `losses`, and must be broadcastable to `losses` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). scope: the scope for the operations performed in computing the loss. loss_collection: the loss will be added to these collections. reduction: Type of reduction to apply to loss. Returns: Weighted loss `Tensor` of the same type as `losses`. If `reduction` is `NONE`, this has the same shape as `losses`; otherwise, it is scalar. Raises: ValueError: If `weights` is `None` or the shape is not compatible with `losses`, or if the number of dimensions (rank) of either `losses` or `weights` is missing. Note: When calculating the gradient of a weighted loss contributions from both `losses` and `weights` are considered. If your `weights` depend on some model parameters but you do not want this to affect the loss gradient, you need to apply `tf.stop_gradient` to `weights` before passing them to `compute_weighted_loss`. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility """ Reduction.validate(reduction) with ops.name_scope(scope, "weighted_loss", (losses, weights)): # Save the `reduction` argument for loss normalization when distributing # to multiple replicas. Used only for estimator + v1 optimizer flow. ops.get_default_graph()._last_loss_reduction = reduction # pylint: disable=protected-access with ops.control_dependencies(( weights_broadcast_ops.assert_broadcastable(weights, losses),)): losses = ops.convert_to_tensor(losses) input_dtype = losses.dtype losses = math_ops.cast(losses, dtype=dtypes.float32) weights = math_ops.cast(weights, dtype=dtypes.float32) weighted_losses = math_ops.multiply(losses, weights) if reduction == Reduction.NONE: loss = weighted_losses else: loss = math_ops.reduce_sum(weighted_losses) if reduction == Reduction.MEAN: loss = _safe_mean( loss, math_ops.reduce_sum(array_ops.ones_like(losses) * weights)) elif (reduction == Reduction.SUM_BY_NONZERO_WEIGHTS or reduction == Reduction.SUM_OVER_NONZERO_WEIGHTS): loss = _safe_mean(loss, _num_present(losses, weights)) elif reduction == Reduction.SUM_OVER_BATCH_SIZE: loss = _safe_mean(loss, _num_elements(losses)) # Convert the result back to the input type. loss = math_ops.cast(loss, input_dtype) util.add_loss(loss, loss_collection) return loss @tf_export(v1=["losses.absolute_difference"]) def absolute_difference( labels, predictions, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): """Adds an Absolute Difference loss to the training procedure. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a `Tensor` of shape `[batch_size]`, then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. If the shape of `weights` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weights`. Args: labels: The ground truth output tensor, same dimensions as 'predictions'. predictions: The predicted outputs. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). scope: The scope for the operations performed in computing the loss. loss_collection: collection to which this loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid or if `labels` or `predictions` is None. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility """ if labels is None: raise ValueError("labels must not be None.") if predictions is None: raise ValueError("predictions must not be None.") with ops.name_scope(scope, "absolute_difference", (predictions, labels, weights)) as scope: predictions = math_ops.cast(predictions, dtype=dtypes.float32) labels = math_ops.cast(labels, dtype=dtypes.float32) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) losses = math_ops.abs(math_ops.subtract(predictions, labels)) return compute_weighted_loss( losses, weights, scope, loss_collection, reduction=reduction) @tf_export(v1=["losses.cosine_distance"]) @deprecated_args(None, "dim is deprecated, use axis instead", "dim") def cosine_distance( labels, predictions, axis=None, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS, dim=None): """Adds a cosine-distance loss to the training procedure. Note that the function assumes that `predictions` and `labels` are already unit-normalized. Args: labels: `Tensor` whose shape matches 'predictions' predictions: An arbitrary matrix. axis: The dimension along which the cosine distance is computed. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). scope: The scope for the operations performed in computing the loss. loss_collection: collection to which this loss will be added. reduction: Type of reduction to apply to loss. dim: The old (deprecated) name for `axis`. Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If `predictions` shape doesn't match `labels` shape, or `axis`, `labels`, `predictions` or `weights` is `None`. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility """ axis = deprecated_argument_lookup("axis", axis, "dim", dim) if axis is None: raise ValueError("You must specify 'axis'.") if labels is None: raise ValueError("labels must not be None.") if predictions is None: raise ValueError("predictions must not be None.") with ops.name_scope(scope, "cosine_distance_loss", (predictions, labels, weights)) as scope: predictions = math_ops.cast(predictions, dtype=dtypes.float32) labels = math_ops.cast(labels, dtype=dtypes.float32) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) radial_diffs = math_ops.multiply(predictions, labels) losses = 1 - math_ops.reduce_sum(radial_diffs, axis=(axis,), keepdims=True) return compute_weighted_loss( losses, weights, scope, loss_collection, reduction=reduction) @tf_export(v1=["losses.hinge_loss"]) def hinge_loss(labels, logits, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): """Adds a hinge loss to the training procedure. Args: labels: The ground truth output tensor. Its shape should match the shape of logits. The values of the tensor are expected to be 0.0 or 1.0. Internally the {0,1} labels are converted to {-1,1} when calculating the hinge loss. logits: The logits, a float tensor. Note that logits are assumed to be unbounded and 0-centered. A value > 0 (resp. < 0) is considered a positive (resp. negative) binary prediction. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If the shapes of `logits` and `labels` don't match or if `labels` or `logits` is None. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility """ if labels is None: raise ValueError("labels must not be None.") if logits is None: raise ValueError("logits must not be None.") with ops.name_scope(scope, "hinge_loss", (logits, labels, weights)) as scope: logits = math_ops.cast(logits, dtype=dtypes.float32) labels = math_ops.cast(labels, dtype=dtypes.float32) logits.get_shape().assert_is_compatible_with(labels.get_shape()) # We first need to convert binary labels to -1/1 labels (as floats). all_ones = array_ops.ones_like(labels) labels = math_ops.subtract(2 * labels, all_ones) losses = nn_ops.relu( math_ops.subtract(all_ones, math_ops.multiply(labels, logits))) return compute_weighted_loss( losses, weights, scope, loss_collection, reduction=reduction) @tf_export(v1=["losses.huber_loss"]) def huber_loss(labels, predictions, weights=1.0, delta=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): """Adds a Huber Loss term to the training procedure. For each value x in `error=labels-predictions`, the following is calculated: ``` 0.5 * x^2 if |x| <= d 0.5 * d^2 + d * (|x| - d) if |x| > d ``` where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size `[batch_size]`, then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. If the shape of `weights` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weights`. Args: labels: The ground truth output tensor, same dimensions as 'predictions'. predictions: The predicted outputs. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). delta: `float`, the point where the huber loss function changes from a quadratic to linear. scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid. Also if `labels` or `predictions` is None. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility """ if labels is None: raise ValueError("labels must not be None.") if predictions is None: raise ValueError("predictions must not be None.") with ops.name_scope(scope, "huber_loss", (predictions, labels, weights)) as scope: predictions = math_ops.cast(predictions, dtype=dtypes.float32) labels = math_ops.cast(labels, dtype=dtypes.float32) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) error = math_ops.subtract(predictions, labels) abs_error = math_ops.abs(error) quadratic = math_ops.minimum(abs_error, delta) # The following expression is the same in value as # tf.maximum(abs_error - delta, 0), but importantly the gradient for the # expression when abs_error == delta is 0 (for tf.maximum it would be 1). # This is necessary to avoid doubling the gradient, since there is already a # nonzero contribution to the gradient from the quadratic term. linear = math_ops.subtract(abs_error, quadratic) losses = math_ops.add( math_ops.multiply( ops.convert_to_tensor(0.5, dtype=quadratic.dtype), math_ops.multiply(quadratic, quadratic)), math_ops.multiply(delta, linear)) return compute_weighted_loss( losses, weights, scope, loss_collection, reduction=reduction) @tf_export(v1=["losses.log_loss"]) def log_loss(labels, predictions, weights=1.0, epsilon=1e-7, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): """Adds a Log Loss term to the training procedure. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size `[batch_size]`, then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. If the shape of `weights` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weights`. Args: labels: The ground truth output tensor, same dimensions as 'predictions'. predictions: The predicted outputs. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). epsilon: A small increment to add to avoid taking a log of zero. scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid. Also if `labels` or `predictions` is None. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility """ if labels is None: raise ValueError("labels must not be None.") if predictions is None: raise ValueError("predictions must not be None.") with ops.name_scope(scope, "log_loss", (predictions, labels, weights)) as scope: predictions = math_ops.cast(predictions, dtype=dtypes.float32) labels = math_ops.cast(labels, dtype=dtypes.float32) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) losses = -math_ops.multiply( labels, math_ops.log(predictions + epsilon)) - math_ops.multiply( (1 - labels), math_ops.log(1 - predictions + epsilon)) return compute_weighted_loss( losses, weights, scope, loss_collection, reduction=reduction) # TODO(b/37208492): Add reduction arg. @tf_export(v1=["losses.mean_pairwise_squared_error"]) def mean_pairwise_squared_error( labels, predictions, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES): """Adds a pairwise-errors-squared loss to the training procedure. Unlike `mean_squared_error`, which is a measure of the differences between corresponding elements of `predictions` and `labels`, `mean_pairwise_squared_error` is a measure of the differences between pairs of corresponding elements of `predictions` and `labels`. For example, if `labels`=[a, b, c] and `predictions`=[x, y, z], there are three pairs of differences are summed to compute the loss: loss = [ ((a-b) - (x-y)).^2 + ((a-c) - (x-z)).^2 + ((b-c) - (y-z)).^2 ] / 3 Note that since the inputs are of shape `[batch_size, d0, ... dN]`, the corresponding pairs are computed within each batch sample but not across samples within a batch. For example, if `predictions` represents a batch of 16 grayscale images of dimension [batch_size, 100, 200], then the set of pairs is drawn from each image, but not across images. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size `[batch_size]`, then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. Args: labels: The ground truth output tensor, whose shape must match the shape of `predictions`. predictions: The predicted outputs, a tensor of size `[batch_size, d0, .. dN]` where N+1 is the total number of dimensions in `predictions`. weights: Coefficients for the loss a scalar, a tensor of shape `[batch_size]` or a tensor whose shape matches `predictions`. scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. Returns: A scalar `Tensor` that returns the weighted loss. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid. Also if `labels` or `predictions` is None. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility """ if labels is None: raise ValueError("labels must not be None.") if predictions is None: raise ValueError("predictions must not be None.") with ops.name_scope(scope, "mean_pairwise_squared_error", (predictions, labels, weights)) as scope: weights = math_ops.cast(weights, dtype=dtypes.float32) labels = math_ops.cast(labels, dtype=dtypes.float32) with ops.control_dependencies(( weights_broadcast_ops.assert_broadcastable(weights, labels),)): predictions = math_ops.cast(predictions, dtype=dtypes.float32) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) diffs = math_ops.subtract(predictions, labels) axis = math_ops.range(1, array_ops.rank(diffs)) sum_squares_diff_per_batch = math_ops.reduce_sum( math_ops.square(diffs), axis=axis, keepdims=True) num_present_per_batch = _num_present(diffs, weights, per_batch=True) term1 = 2.0 * math_ops.div_no_nan( sum_squares_diff_per_batch, math_ops.maximum(num_present_per_batch - 1, 0), name="value") sum_diff = math_ops.reduce_sum(diffs, axis=axis, keepdims=True) term2 = 2.0 * math_ops.div_no_nan( math_ops.square(sum_diff), math_ops.maximum( math_ops.multiply(num_present_per_batch, num_present_per_batch - 1), 0), name="value") weighted_losses = math_ops.multiply(term1 - term2, weights) loss = math_ops.reduce_sum(weighted_losses) mean_loss = array_ops.where( math_ops.reduce_sum(num_present_per_batch) > 0, loss, array_ops.zeros_like(loss), name="value") util.add_loss(mean_loss, loss_collection) return mean_loss @tf_export(v1=["losses.mean_squared_error"]) def mean_squared_error( labels, predictions, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): """Adds a Sum-of-Squares loss to the training procedure. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size `[batch_size]`, then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. If the shape of `weights` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weights`. Args: labels: The ground truth output tensor, same dimensions as 'predictions'. predictions: The predicted outputs. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid. Also if `labels` or `predictions` is None. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility """ if labels is None: raise ValueError("labels must not be None.") if predictions is None: raise ValueError("predictions must not be None.") with ops.name_scope(scope, "mean_squared_error", (predictions, labels, weights)) as scope: predictions = math_ops.cast(predictions, dtype=dtypes.float32) labels = math_ops.cast(labels, dtype=dtypes.float32) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) losses = math_ops.squared_difference(predictions, labels) return compute_weighted_loss( losses, weights, scope, loss_collection, reduction=reduction) @tf_export(v1=["losses.sigmoid_cross_entropy"]) def sigmoid_cross_entropy( multi_class_labels, logits, weights=1.0, label_smoothing=0, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): """Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of shape `[batch_size]`, then the loss weights apply to each corresponding sample. If `label_smoothing` is nonzero, smooth the labels towards 1/2: new_multiclass_labels = multiclass_labels * (1 - label_smoothing) + 0.5 * label_smoothing Args: multi_class_labels: `[batch_size, num_classes]` target integer labels in `{0, 1}`. logits: Float `[batch_size, num_classes]` logits outputs of the network. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). label_smoothing: If greater than `0` then smooth the labels. scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss `Tensor` of the same type as `logits`. If `reduction` is `NONE`, this has the same shape as `logits`; otherwise, it is scalar. Raises: ValueError: If the shape of `logits` doesn't match that of `multi_class_labels` or if the shape of `weights` is invalid, or if `weights` is None. Also if `multi_class_labels` or `logits` is None. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility """ if multi_class_labels is None: raise ValueError("multi_class_labels must not be None.") if logits is None: raise ValueError("logits must not be None.") with ops.name_scope(scope, "sigmoid_cross_entropy_loss", (logits, multi_class_labels, weights)) as scope: logits = ops.convert_to_tensor(logits) multi_class_labels = math_ops.cast(multi_class_labels, logits.dtype) logits.get_shape().assert_is_compatible_with(multi_class_labels.get_shape()) if label_smoothing > 0: multi_class_labels = (multi_class_labels * (1 - label_smoothing) + 0.5 * label_smoothing) losses = nn.sigmoid_cross_entropy_with_logits(labels=multi_class_labels, logits=logits, name="xentropy") return compute_weighted_loss( losses, weights, scope, loss_collection, reduction=reduction) @tf_export(v1=["losses.softmax_cross_entropy"]) def softmax_cross_entropy( onehot_labels, logits, weights=1.0, label_smoothing=0, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): """Creates a cross-entropy loss using tf.nn.softmax_cross_entropy_with_logits_v2. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of shape `[batch_size]`, then the loss weights apply to each corresponding sample. If `label_smoothing` is nonzero, smooth the labels towards 1/num_classes: new_onehot_labels = onehot_labels * (1 - label_smoothing) + label_smoothing / num_classes Note that `onehot_labels` and `logits` must have the same shape, e.g. `[batch_size, num_classes]`. The shape of `weights` must be broadcastable to loss, whose shape is decided by the shape of `logits`. In case the shape of `logits` is `[batch_size, num_classes]`, loss is a `Tensor` of shape `[batch_size]`. Args: onehot_labels: One-hot-encoded labels. logits: Logits outputs of the network. weights: Optional `Tensor` that is broadcastable to loss. label_smoothing: If greater than 0 then smooth the labels. scope: the scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss `Tensor` of the same type as `logits`. If `reduction` is `NONE`, this has shape `[batch_size]`; otherwise, it is scalar. Raises: ValueError: If the shape of `logits` doesn't match that of `onehot_labels` or if the shape of `weights` is invalid or if `weights` is None. Also if `onehot_labels` or `logits` is None. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility """ if onehot_labels is None: raise ValueError("onehot_labels must not be None.") if logits is None: raise ValueError("logits must not be None.") with ops.name_scope(scope, "softmax_cross_entropy_loss", (logits, onehot_labels, weights)) as scope: logits = ops.convert_to_tensor(logits) onehot_labels = math_ops.cast(onehot_labels, logits.dtype) logits.get_shape().assert_is_compatible_with(onehot_labels.get_shape()) if label_smoothing > 0: num_classes = math_ops.cast( array_ops.shape(onehot_labels)[-1], logits.dtype) smooth_positives = 1.0 - label_smoothing smooth_negatives = label_smoothing / num_classes onehot_labels = onehot_labels * smooth_positives + smooth_negatives onehot_labels = array_ops.stop_gradient( onehot_labels, name="labels_stop_gradient") losses = nn.softmax_cross_entropy_with_logits_v2( labels=onehot_labels, logits=logits, name="xentropy") return compute_weighted_loss( losses, weights, scope, loss_collection, reduction=reduction) # TODO(ptucker): Merge this with similar method in metrics_impl. def _remove_squeezable_dimensions( labels, predictions, weights=None, expected_rank_diff=0): """Internal version of _remove_squeezable_dimensions which handles weights. Squeezes `predictions` and `labels` if their ranks differ from expected by exactly 1. Squeezes `weights` if its rank is 1 more than the new rank of `predictions` This will use static shape if available. Otherwise, it will add graph operations, which could result in a performance hit. Args: labels: Label values, a `Tensor` whose dimensions match `predictions`. predictions: Predicted values, a `Tensor` of arbitrary dimensions. weights: Optional weight `Tensor`. It will be squeezed if it's not scalar, and its rank is 1 more than the new rank of `labels`. expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`. Returns: Tuple of `predictions`, `labels` and `weights`, possibly with the last dimension squeezed. """ labels, predictions = confusion_matrix.remove_squeezable_dimensions( labels, predictions, expected_rank_diff=expected_rank_diff) if weights is not None: weights = ops.convert_to_tensor(weights) labels_rank = labels.get_shape().ndims weights_shape = weights.get_shape() weights_rank = weights_shape.ndims if (labels_rank is not None) and (weights_rank is not None): # Use static rank. rank_diff = weights_rank - labels_rank if rank_diff == 1: weights = array_ops.squeeze(weights, [-1]) return labels, predictions, weights # Use dynamic rank. rank_diff = array_ops.rank(weights) - array_ops.rank(labels) if (weights_rank is None) or ( weights_rank > 0 and weights_shape.dims[-1].is_compatible_with(1)): weights = control_flow_ops.cond( math_ops.equal(1, rank_diff), lambda: array_ops.squeeze(weights, [-1]), lambda: weights) return labels, predictions, weights @tf_export(v1=["losses.sparse_softmax_cross_entropy"]) def sparse_softmax_cross_entropy( labels, logits, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): """Cross-entropy loss using `tf.nn.sparse_softmax_cross_entropy_with_logits`. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of shape `[batch_size]`, then the loss weights apply to each corresponding sample. Args: labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of `labels` and result) and dtype `int32` or `int64`. Each entry in `labels` must be an index in `[0, num_classes)`. Other values will raise an exception when this op is run on CPU, and return `NaN` for corresponding loss and gradient rows on GPU. logits: Unscaled log probabilities of shape `[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32` or `float64`. weights: Coefficients for the loss. This must be scalar or broadcastable to `labels` (i.e. same rank and each dimension is either 1 or the same). scope: the scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss `Tensor` of the same type as `logits`. If `reduction` is `NONE`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If the shapes of `logits`, `labels`, and `weights` are incompatible, or if any of them are None. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility """ if labels is None: raise ValueError("labels must not be None.") if logits is None: raise ValueError("logits must not be None.") with ops.name_scope(scope, "sparse_softmax_cross_entropy_loss", (logits, labels, weights)) as scope: # As documented above in Args, labels contain class IDs and logits contains # 1 probability per class ID, so we expect rank(logits) - rank(labels) == 1; # therefore, expected_rank_diff=1. labels, logits, weights = _remove_squeezable_dimensions( labels, logits, weights, expected_rank_diff=1) losses = nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name="xentropy") return compute_weighted_loss( losses, weights, scope, loss_collection, reduction=reduction)
tensorflow-master
tensorflow/python/ops/losses/losses_impl.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for losses util.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops.losses import util from tensorflow.python.platform import test class LossesUtilTest(test.TestCase): @test_util.run_deprecated_v1 def testGetRegularizationLoss(self): # Empty regularization collection should evaluate to 0.0. with self.cached_session(): self.assertEqual(0.0, util.get_regularization_loss().eval()) # Loss should sum. ops.add_to_collection( ops.GraphKeys.REGULARIZATION_LOSSES, constant_op.constant(2.0)) ops.add_to_collection( ops.GraphKeys.REGULARIZATION_LOSSES, constant_op.constant(3.0)) with self.cached_session(): self.assertEqual(5.0, util.get_regularization_loss().eval()) # Check scope capture mechanism. with ops.name_scope('scope1'): ops.add_to_collection( ops.GraphKeys.REGULARIZATION_LOSSES, constant_op.constant(-1.0)) with self.cached_session(): self.assertEqual(-1.0, util.get_regularization_loss('scope1').eval()) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/ops/losses/util_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Loss operations for use in neural networks. Note: All the losses are added to the `GraphKeys.LOSSES` collection by default. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=wildcard-import from tensorflow.python.ops.losses.losses_impl import * from tensorflow.python.ops.losses.util import * # pylint: enable=wildcard-import
tensorflow-master
tensorflow/python/ops/losses/losses.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """`LinearOperator` acting like a Householder transformation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.linalg import linalg_impl as linalg from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.util.tf_export import tf_export __all__ = ["LinearOperatorHouseholder",] @tf_export("linalg.LinearOperatorHouseholder") class LinearOperatorHouseholder(linear_operator.LinearOperator): """`LinearOperator` acting like a [batch] of Householder transformations. This operator acts like a [batch] of householder reflections with shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `N x N` matrix. This matrix `A` is not materialized, but for purposes of broadcasting this shape will be relevant. `LinearOperatorHouseholder` is initialized with a (batch) vector. A Householder reflection, defined via a vector `v`, which reflects points in `R^n` about the hyperplane orthogonal to `v` and through the origin. ```python # Create a 2 x 2 householder transform. vec = [1 / np.sqrt(2), 1. / np.sqrt(2)] operator = LinearOperatorHouseholder(vec) operator.to_dense() ==> [[0., -1.] [-1., -0.]] operator.shape ==> [2, 2] operator.log_abs_determinant() ==> scalar Tensor x = ... Shape [2, 4] Tensor operator.matmul(x) ==> Shape [2, 4] Tensor #### Shape compatibility This operator acts on [batch] matrix with compatible shape. `x` is a batch matrix with compatible shape for `matmul` and `solve` if ``` operator.shape = [B1,...,Bb] + [N, N], with b >= 0 x.shape = [C1,...,Cc] + [N, R], and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd] ``` #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way. """ def __init__(self, reflection_axis, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name="LinearOperatorHouseholder"): r"""Initialize a `LinearOperatorHouseholder`. Args: reflection_axis: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`. The vector defining the hyperplane to reflect about. Allowed dtypes: `float16`, `float32`, `float64`, `complex64`, `complex128`. is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. This is autoset to true is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form `x^H A x` has positive real part for all nonzero `x`. Note that we do not require the operator to be self-adjoint to be positive-definite. See: https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices This is autoset to false. is_square: Expect that this operator acts like square [batch] matrices. This is autoset to true. name: A name for this `LinearOperator`. Raises: ValueError: `is_self_adjoint` is not `True`, `is_positive_definite` is not `False` or `is_square` is not `True`. """ with ops.name_scope(name, values=[reflection_axis]): self._reflection_axis = ops.convert_to_tensor( reflection_axis, name="reflection_axis") self._check_reflection_axis(self._reflection_axis) # Check and auto-set hints. if is_self_adjoint is False: # pylint:disable=g-bool-id-comparison raise ValueError("A Householder operator is always self adjoint.") else: is_self_adjoint = True if is_positive_definite is True: # pylint:disable=g-bool-id-comparison raise ValueError( "A Householder operator is always non-positive definite.") else: is_positive_definite = False if is_square is False: # pylint:disable=g-bool-id-comparison raise ValueError("A Householder operator is always square.") is_square = True super(LinearOperatorHouseholder, self).__init__( dtype=self._reflection_axis.dtype, graph_parents=[self._reflection_axis], is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name) def _check_reflection_axis(self, reflection_axis): """Static check of reflection_axis.""" if (reflection_axis.get_shape().ndims is not None and reflection_axis.get_shape().ndims < 1): raise ValueError( "Argument reflection_axis must have at least 1 dimension. " "Found: %s" % reflection_axis) def _shape(self): # If d_shape = [5, 3], we return [5, 3, 3]. d_shape = self._reflection_axis.get_shape() return d_shape.concatenate(d_shape[-1:]) def _shape_tensor(self): d_shape = array_ops.shape(self._reflection_axis) k = d_shape[-1] return array_ops.concat((d_shape, [k]), 0) def _assert_non_singular(self): return control_flow_ops.no_op("assert_non_singular") def _assert_positive_definite(self): raise errors.InvalidArgumentError( node_def=None, op=None, message="Householder operators are always " "non-positive definite.") def _assert_self_adjoint(self): return control_flow_ops.no_op("assert_self_adjoint") def _matmul(self, x, adjoint=False, adjoint_arg=False): # Given a vector `v`, we would like to reflect `x` about the hyperplane # orthogonal to `v` going through the origin. We first project `x` to `v` # to get v * dot(v, x) / dot(v, v). After we project, we can reflect the # projection about the hyperplane by flipping sign to get # -v * dot(v, x) / dot(v, v). Finally, we can add back the component # that is orthogonal to v. This is invariant under reflection, since the # whole hyperplane is invariant. This component is equal to x - v * dot(v, # x) / dot(v, v), giving the formula x - 2 * v * dot(v, x) / dot(v, v) # for the reflection. # Note that because this is a reflection, it lies in O(n) (for real vector # spaces) or U(n) (for complex vector spaces), and thus is its own adjoint. x = linalg.adjoint(x) if adjoint_arg else x normalized_axis = self.reflection_axis / linalg.norm( self.reflection_axis, axis=-1, keepdims=True) mat = normalized_axis[..., array_ops.newaxis] x_dot_normalized_v = math_ops.matmul(mat, x, adjoint_a=True) return x - 2 * mat * x_dot_normalized_v def _trace(self): # We have (n - 1) +1 eigenvalues and a single -1 eigenvalue. return math_ops.cast( self.domain_dimension_tensor() - 2, self.dtype) * array_ops.ones( shape=self.batch_shape_tensor(), dtype=self.dtype) def _determinant(self): # For householder transformations, the determinant is -1. return -array_ops.ones(shape=self.batch_shape_tensor(), dtype=self.dtype) def _log_abs_determinant(self): # Orthogonal matrix -> log|Q| = 0. return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype) def _solve(self, rhs, adjoint=False, adjoint_arg=False): # A householder reflection is a reflection, hence is idempotent. Thus we # can just apply a matmul. return self._matmul(rhs, adjoint, adjoint_arg) def _to_dense(self): normalized_axis = self.reflection_axis / linalg.norm( self.reflection_axis, axis=-1, keepdims=True) mat = normalized_axis[..., array_ops.newaxis] matrix = -2 * math_ops.matmul(mat, mat, adjoint_b=True) return array_ops.matrix_set_diag( matrix, 1. + array_ops.matrix_diag_part(matrix)) def _diag_part(self): normalized_axis = self.reflection_axis / linalg.norm( self.reflection_axis, axis=-1, keepdims=True) return 1. - 2 * normalized_axis * math_ops.conj(normalized_axis) @property def reflection_axis(self): return self._reflection_axis
tensorflow-master
tensorflow/python/ops/linalg/linear_operator_householder.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """`LinearOperator` acting like a diagonal matrix.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.linalg import linalg_impl as linalg from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.ops.linalg import linear_operator_util from tensorflow.python.util.tf_export import tf_export __all__ = ["LinearOperatorDiag",] @tf_export("linalg.LinearOperatorDiag") class LinearOperatorDiag(linear_operator.LinearOperator): """`LinearOperator` acting like a [batch] square diagonal matrix. This operator acts like a [batch] diagonal matrix `A` with shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `N x N` matrix. This matrix `A` is not materialized, but for purposes of broadcasting this shape will be relevant. `LinearOperatorDiag` is initialized with a (batch) vector. ```python # Create a 2 x 2 diagonal linear operator. diag = [1., -1.] operator = LinearOperatorDiag(diag) operator.to_dense() ==> [[1., 0.] [0., -1.]] operator.shape ==> [2, 2] operator.log_abs_determinant() ==> scalar Tensor x = ... Shape [2, 4] Tensor operator.matmul(x) ==> Shape [2, 4] Tensor # Create a [2, 3] batch of 4 x 4 linear operators. diag = tf.random.normal(shape=[2, 3, 4]) operator = LinearOperatorDiag(diag) # Create a shape [2, 1, 4, 2] vector. Note that this shape is compatible # since the batch dimensions, [2, 1], are broadcast to # operator.batch_shape = [2, 3]. y = tf.random.normal(shape=[2, 1, 4, 2]) x = operator.solve(y) ==> operator.matmul(x) = y ``` #### Shape compatibility This operator acts on [batch] matrix with compatible shape. `x` is a batch matrix with compatible shape for `matmul` and `solve` if ``` operator.shape = [B1,...,Bb] + [N, N], with b >= 0 x.shape = [C1,...,Cc] + [N, R], and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd] ``` #### Performance Suppose `operator` is a `LinearOperatorDiag` of shape `[N, N]`, and `x.shape = [N, R]`. Then * `operator.matmul(x)` involves `N * R` multiplications. * `operator.solve(x)` involves `N` divisions and `N * R` multiplications. * `operator.determinant()` involves a size `N` `reduce_prod`. If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way. """ def __init__(self, diag, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name="LinearOperatorDiag"): r"""Initialize a `LinearOperatorDiag`. Args: diag: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`. The diagonal of the operator. Allowed dtypes: `float16`, `float32`, `float64`, `complex64`, `complex128`. is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. If `diag.dtype` is real, this is auto-set to `True`. is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form `x^H A x` has positive real part for all nonzero `x`. Note that we do not require the operator to be self-adjoint to be positive-definite. See: https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices is_square: Expect that this operator acts like square [batch] matrices. name: A name for this `LinearOperator`. Raises: TypeError: If `diag.dtype` is not an allowed type. ValueError: If `diag.dtype` is real, and `is_self_adjoint` is not `True`. """ with ops.name_scope(name, values=[diag]): self._diag = ops.convert_to_tensor(diag, name="diag") self._check_diag(self._diag) # Check and auto-set hints. if not self._diag.dtype.is_complex: if is_self_adjoint is False: raise ValueError("A real diagonal operator is always self adjoint.") else: is_self_adjoint = True if is_square is False: raise ValueError("Only square diagonal operators currently supported.") is_square = True super(LinearOperatorDiag, self).__init__( dtype=self._diag.dtype, graph_parents=[self._diag], is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name) def _check_diag(self, diag): """Static check of diag.""" allowed_dtypes = [ dtypes.float16, dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128, ] dtype = diag.dtype if dtype not in allowed_dtypes: raise TypeError( "Argument diag must have dtype in %s. Found: %s" % (allowed_dtypes, dtype)) if diag.get_shape().ndims is not None and diag.get_shape().ndims < 1: raise ValueError("Argument diag must have at least 1 dimension. " "Found: %s" % diag) def _shape(self): # If d_shape = [5, 3], we return [5, 3, 3]. d_shape = self._diag.get_shape() return d_shape.concatenate(d_shape[-1:]) def _shape_tensor(self): d_shape = array_ops.shape(self._diag) k = d_shape[-1] return array_ops.concat((d_shape, [k]), 0) def _assert_non_singular(self): return linear_operator_util.assert_no_entries_with_modulus_zero( self._diag, message="Singular operator: Diagonal contained zero values.") def _assert_positive_definite(self): if self.dtype.is_complex: message = ( "Diagonal operator had diagonal entries with non-positive real part, " "thus was not positive definite.") else: message = ( "Real diagonal operator had non-positive diagonal entries, " "thus was not positive definite.") return check_ops.assert_positive( math_ops.real(self._diag), message=message) def _assert_self_adjoint(self): return linear_operator_util.assert_zero_imag_part( self._diag, message=( "This diagonal operator contained non-zero imaginary values. " " Thus it was not self-adjoint.")) def _matmul(self, x, adjoint=False, adjoint_arg=False): diag_term = math_ops.conj(self._diag) if adjoint else self._diag x = linalg.adjoint(x) if adjoint_arg else x diag_mat = array_ops.expand_dims(diag_term, -1) return diag_mat * x def _matvec(self, x, adjoint=False): diag_term = math_ops.conj(self._diag) if adjoint else self._diag return diag_term * x def _determinant(self): return math_ops.reduce_prod(self._diag, axis=[-1]) def _log_abs_determinant(self): log_det = math_ops.reduce_sum( math_ops.log(math_ops.abs(self._diag)), axis=[-1]) if self.dtype.is_complex: log_det = math_ops.cast(log_det, dtype=self.dtype) return log_det def _solve(self, rhs, adjoint=False, adjoint_arg=False): diag_term = math_ops.conj(self._diag) if adjoint else self._diag rhs = linalg.adjoint(rhs) if adjoint_arg else rhs inv_diag_mat = array_ops.expand_dims(1. / diag_term, -1) return rhs * inv_diag_mat def _to_dense(self): return array_ops.matrix_diag(self._diag) def _diag_part(self): return self.diag def _add_to_tensor(self, x): x_diag = array_ops.matrix_diag_part(x) new_diag = self._diag + x_diag return array_ops.matrix_set_diag(x, new_diag) @property def diag(self): return self._diag
tensorflow-master
tensorflow/python/ops/linalg/linear_operator_diag.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Inverts a non-singular `LinearOperator`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.util.tf_export import tf_export __all__ = [] @tf_export("linalg.LinearOperatorInversion") class LinearOperatorInversion(linear_operator.LinearOperator): """`LinearOperator` representing the inverse of another operator. This operator represents the inverse of another operator. ```python # Create a 2 x 2 linear operator. operator = LinearOperatorFullMatrix([[1., 0.], [0., 2.]]) operator_inv = LinearOperatorInversion(operator) operator_inv.to_dense() ==> [[1., 0.] [0., 0.5]] operator_inv.shape ==> [2, 2] operator_inv.log_abs_determinant() ==> - log(2) x = ... Shape [2, 4] Tensor operator_inv.matmul(x) ==> Shape [2, 4] Tensor, equal to operator.solve(x) ``` #### Performance The performance of `LinearOperatorInversion` depends on the underlying operators performance: `solve` and `matmul` are swapped, and determinant is inverted. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way. """ def __init__(self, operator, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name=None): r"""Initialize a `LinearOperatorInversion`. `LinearOperatorInversion` is initialized with an operator `A`. The `solve` and `matmul` methods are effectively swapped. E.g. ``` A = MyLinearOperator(...) B = LinearOperatorInversion(A) x = [....] # a vector assert A.matvec(x) == B.solvevec(x) ``` Args: operator: `LinearOperator` object. If `operator.is_non_singular == False`, an exception is raised. We do allow `operator.is_non_singular == None`, in which case this operator will have `is_non_singular == None`. Similarly for `is_self_adjoint` and `is_positive_definite`. is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form `x^H A x` has positive real part for all nonzero `x`. Note that we do not require the operator to be self-adjoint to be positive-definite. See: https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices is_square: Expect that this operator acts like square [batch] matrices. name: A name for this `LinearOperator`. Default is `operator.name + "_inv"`. Raises: ValueError: If `operator.is_non_singular` is False. """ self._operator = operator # Auto-set and check hints. if operator.is_non_singular is False or is_non_singular is False: raise ValueError( "operator and supplied hints must have `is_non_singular` equal to " "`True` or `None`. Found %s, %s" % (operator.is_non_singular, is_non_singular)) if operator.is_square is False or is_square is False: raise ValueError( "operator and supplied hints must have `is_square` equal to " "`True` or `None`. Found %s, %s" % (operator.is_square, is_square)) # The congruency of is_non_singular and is_self_adjoint was checked in the # base operator. Other hints are, in this special case of inversion, ones # that must be the same for base/derived operator. def _combined_hint(hint_str, provided_hint_value, message): """Get combined hint in the case where operator.hint should equal hint.""" op_hint = getattr(operator, hint_str) if op_hint is False and provided_hint_value: raise ValueError(message) if op_hint and provided_hint_value is False: raise ValueError(message) return (op_hint or provided_hint_value) or None is_square = _combined_hint( "is_square", is_square, "An operator is square if and only if its inverse is square.") is_non_singular = _combined_hint( "is_non_singular", is_non_singular, "An operator is non-singular if and only if its inverse is " "non-singular.") is_self_adjoint = _combined_hint( "is_self_adjoint", is_self_adjoint, "An operator is self-adjoint if and only if its inverse is " "self-adjoint.") is_positive_definite = _combined_hint( "is_positive_definite", is_positive_definite, "An operator is positive-definite if and only if its inverse is " "positive-definite.") is_square = _combined_hint( "is_square", is_square, "An operator is square if and only if its inverse is square.") # Initialization. if name is None: name = operator.name + "_inv" with ops.name_scope(name, values=operator.graph_parents): super(LinearOperatorInversion, self).__init__( dtype=operator.dtype, graph_parents=operator.graph_parents, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name) @property def operator(self): """The operator before inversion.""" return self._operator def _assert_non_singular(self): return self.operator.assert_non_singular() def _assert_positive_definite(self): return self.operator.assert_positive_definite() def _assert_self_adjoint(self): return self.operator.assert_self_adjoint() def _shape(self): return self.operator.shape def _shape_tensor(self): return self.operator.shape_tensor() def _matmul(self, x, adjoint=False, adjoint_arg=False): return self.operator.solve(x, adjoint=adjoint, adjoint_arg=adjoint_arg) def _determinant(self): return 1. / self.operator.determinant() def _log_abs_determinant(self): return -1. * self.operator.log_abs_determinant() def _solve(self, rhs, adjoint=False, adjoint_arg=False): return self.operator.matmul(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
tensorflow-master
tensorflow/python/ops/linalg/linear_operator_inversion.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """`LinearOperator` acting like a lower triangular matrix.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.linalg import linalg_impl as linalg from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.ops.linalg import linear_operator_util from tensorflow.python.util.tf_export import tf_export __all__ = [ "LinearOperatorLowerTriangular", ] @tf_export("linalg.LinearOperatorLowerTriangular") class LinearOperatorLowerTriangular(linear_operator.LinearOperator): """`LinearOperator` acting like a [batch] square lower triangular matrix. This operator acts like a [batch] lower triangular matrix `A` with shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `N x N` matrix. `LinearOperatorLowerTriangular` is initialized with a `Tensor` having dimensions `[B1,...,Bb, N, N]`. The upper triangle of the last two dimensions is ignored. ```python # Create a 2 x 2 lower-triangular linear operator. tril = [[1., 2.], [3., 4.]] operator = LinearOperatorLowerTriangular(tril) # The upper triangle is ignored. operator.to_dense() ==> [[1., 0.] [3., 4.]] operator.shape ==> [2, 2] operator.log_abs_determinant() ==> scalar Tensor x = ... Shape [2, 4] Tensor operator.matmul(x) ==> Shape [2, 4] Tensor # Create a [2, 3] batch of 4 x 4 linear operators. tril = tf.random.normal(shape=[2, 3, 4, 4]) operator = LinearOperatorLowerTriangular(tril) ``` #### Shape compatibility This operator acts on [batch] matrix with compatible shape. `x` is a batch matrix with compatible shape for `matmul` and `solve` if ``` operator.shape = [B1,...,Bb] + [N, N], with b >= 0 x.shape = [B1,...,Bb] + [N, R], with R >= 0. ``` #### Performance Suppose `operator` is a `LinearOperatorLowerTriangular` of shape `[N, N]`, and `x.shape = [N, R]`. Then * `operator.matmul(x)` involves `N^2 * R` multiplications. * `operator.solve(x)` involves `N * R` size `N` back-substitutions. * `operator.determinant()` involves a size `N` `reduce_prod`. If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way. """ def __init__(self, tril, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name="LinearOperatorLowerTriangular"): r"""Initialize a `LinearOperatorLowerTriangular`. Args: tril: Shape `[B1,...,Bb, N, N]` with `b >= 0`, `N >= 0`. The lower triangular part of `tril` defines this operator. The strictly upper triangle is ignored. is_non_singular: Expect that this operator is non-singular. This operator is non-singular if and only if its diagonal elements are all non-zero. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. This operator is self-adjoint only if it is diagonal with real-valued diagonal entries. In this case it is advised to use `LinearOperatorDiag`. is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form `x^H A x` has positive real part for all nonzero `x`. Note that we do not require the operator to be self-adjoint to be positive-definite. See: https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices is_square: Expect that this operator acts like square [batch] matrices. name: A name for this `LinearOperator`. Raises: ValueError: If `is_square` is `False`. """ if is_square is False: raise ValueError( "Only square lower triangular operators supported at this time.") is_square = True with ops.name_scope(name, values=[tril]): self._tril = ops.convert_to_tensor(tril, name="tril") self._check_tril(self._tril) self._tril = array_ops.matrix_band_part(tril, -1, 0) self._diag = array_ops.matrix_diag_part(self._tril) super(LinearOperatorLowerTriangular, self).__init__( dtype=self._tril.dtype, graph_parents=[self._tril], is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name) def _check_tril(self, tril): """Static check of the `tril` argument.""" allowed_dtypes = [ dtypes.float16, dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128, ] dtype = tril.dtype if dtype not in allowed_dtypes: raise TypeError( "Argument tril must have dtype in %s. Found: %s" % (allowed_dtypes, dtype)) if tril.get_shape().ndims is not None and tril.get_shape().ndims < 2: raise ValueError( "Argument tril must have at least 2 dimensions. Found: %s" % tril) def _shape(self): return self._tril.get_shape() def _shape_tensor(self): return array_ops.shape(self._tril) def _assert_non_singular(self): return linear_operator_util.assert_no_entries_with_modulus_zero( self._diag, message="Singular operator: Diagonal contained zero values.") def _matmul(self, x, adjoint=False, adjoint_arg=False): return math_ops.matmul( self._tril, x, adjoint_a=adjoint, adjoint_b=adjoint_arg) def _determinant(self): return math_ops.reduce_prod(self._diag, axis=[-1]) def _log_abs_determinant(self): return math_ops.reduce_sum( math_ops.log(math_ops.abs(self._diag)), axis=[-1]) def _solve(self, rhs, adjoint=False, adjoint_arg=False): rhs = linalg.adjoint(rhs) if adjoint_arg else rhs return linear_operator_util.matrix_triangular_solve_with_broadcast( self._tril, rhs, lower=True, adjoint=adjoint) def _to_dense(self): return self._tril def _add_to_tensor(self, x): return self._tril + x
tensorflow-master
tensorflow/python/ops/linalg/linear_operator_lower_triangular.py