python_code
stringlengths
0
1.02M
repo_name
stringlengths
9
48
file_path
stringlengths
5
114
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras built-in datasets.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras.datasets import boston_housing from tensorflow.python.keras.datasets import cifar10 from tensorflow.python.keras.datasets import cifar100 from tensorflow.python.keras.datasets import fashion_mnist from tensorflow.python.keras.datasets import imdb from tensorflow.python.keras.datasets import mnist from tensorflow.python.keras.datasets import reuters del absolute_import del division del print_function
tensorflow-master
tensorflow/python/keras/datasets/__init__.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """IMDB sentiment classification dataset. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import numpy as np from tensorflow.python.keras.preprocessing.sequence import _remove_long_seq from tensorflow.python.keras.utils.data_utils import get_file from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import keras_export @keras_export('keras.datasets.imdb.load_data') def load_data(path='imdb.npz', num_words=None, skip_top=0, maxlen=None, seed=113, start_char=1, oov_char=2, index_from=3, **kwargs): """Loads the IMDB dataset. Arguments: path: where to cache the data (relative to `~/.keras/dataset`). num_words: max number of words to include. Words are ranked by how often they occur (in the training set) and only the most frequent words are kept skip_top: skip the top N most frequently occurring words (which may not be informative). maxlen: sequences longer than this will be filtered out. seed: random seed for sample shuffling. start_char: The start of a sequence will be marked with this character. Set to 1 because 0 is usually the padding character. oov_char: words that were cut out because of the `num_words` or `skip_top` limit will be replaced with this character. index_from: index actual words with this index and higher. **kwargs: Used for backwards compatibility. Returns: Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. Raises: ValueError: in case `maxlen` is so low that no input sequence could be kept. Note that the 'out of vocabulary' character is only used for words that were present in the training set but are not included because they're not making the `num_words` cut here. Words that were not seen in the training set but are in the test set have simply been skipped. """ # Legacy support if 'nb_words' in kwargs: logging.warning('The `nb_words` argument in `load_data` ' 'has been renamed `num_words`.') num_words = kwargs.pop('nb_words') if kwargs: raise TypeError('Unrecognized keyword arguments: ' + str(kwargs)) origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/' path = get_file( path, origin=origin_folder + 'imdb.npz', file_hash='599dadb1135973df5b59232a0e9a887c') with np.load(path, allow_pickle=True) as f: x_train, labels_train = f['x_train'], f['y_train'] x_test, labels_test = f['x_test'], f['y_test'] np.random.seed(seed) indices = np.arange(len(x_train)) np.random.shuffle(indices) x_train = x_train[indices] labels_train = labels_train[indices] indices = np.arange(len(x_test)) np.random.shuffle(indices) x_test = x_test[indices] labels_test = labels_test[indices] xs = np.concatenate([x_train, x_test]) labels = np.concatenate([labels_train, labels_test]) if start_char is not None: xs = [[start_char] + [w + index_from for w in x] for x in xs] elif index_from: xs = [[w + index_from for w in x] for x in xs] if maxlen: xs, labels = _remove_long_seq(maxlen, xs, labels) if not xs: raise ValueError('After filtering for sequences shorter than maxlen=' + str(maxlen) + ', no sequence was kept. ' 'Increase maxlen.') if not num_words: num_words = max([max(x) for x in xs]) # by convention, use 2 as OOV word # reserve 'index_from' (=3 by default) characters: # 0 (padding), 1 (start), 2 (OOV) if oov_char is not None: xs = [ [w if (skip_top <= w < num_words) else oov_char for w in x] for x in xs ] else: xs = [[w for w in x if skip_top <= w < num_words] for x in xs] idx = len(x_train) x_train, y_train = np.array(xs[:idx]), np.array(labels[:idx]) x_test, y_test = np.array(xs[idx:]), np.array(labels[idx:]) return (x_train, y_train), (x_test, y_test) @keras_export('keras.datasets.imdb.get_word_index') def get_word_index(path='imdb_word_index.json'): """Retrieves the dictionary mapping word indices back to words. Arguments: path: where to cache the data (relative to `~/.keras/dataset`). Returns: The word index dictionary. """ origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/' path = get_file( path, origin=origin_folder + 'imdb_word_index.json', file_hash='bfafd718b763782e994055a2d397834f') with open(path) as f: return json.load(f)
tensorflow-master
tensorflow/python/keras/datasets/imdb.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Boston housing price regression dataset. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.keras.utils.data_utils import get_file from tensorflow.python.util.tf_export import keras_export @keras_export('keras.datasets.boston_housing.load_data') def load_data(path='boston_housing.npz', test_split=0.2, seed=113): """Loads the Boston Housing dataset. Arguments: path: path where to cache the dataset locally (relative to ~/.keras/datasets). test_split: fraction of the data to reserve as test set. seed: Random seed for shuffling the data before computing the test split. Returns: Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. """ assert 0 <= test_split < 1 origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/' path = get_file( path, origin=origin_folder + 'boston_housing.npz', file_hash= 'f553886a1f8d56431e820c5b82552d9d95cfcb96d1e678153f8839538947dff5') with np.load(path) as f: x = f['x'] y = f['y'] np.random.seed(seed) indices = np.arange(len(x)) np.random.shuffle(indices) x = x[indices] y = y[indices] x_train = np.array(x[:int(len(x) * (1 - test_split))]) y_train = np.array(y[:int(len(x) * (1 - test_split))]) x_test = np.array(x[int(len(x) * (1 - test_split)):]) y_test = np.array(y[int(len(x) * (1 - test_split)):]) return (x_train, y_train), (x_test, y_test)
tensorflow-master
tensorflow/python/keras/datasets/boston_housing.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Reuters topic classification dataset. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import numpy as np from tensorflow.python.keras.preprocessing.sequence import _remove_long_seq from tensorflow.python.keras.utils.data_utils import get_file from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import keras_export @keras_export('keras.datasets.reuters.load_data') def load_data(path='reuters.npz', num_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=113, start_char=1, oov_char=2, index_from=3, **kwargs): """Loads the Reuters newswire classification dataset. Arguments: path: where to cache the data (relative to `~/.keras/dataset`). num_words: max number of words to include. Words are ranked by how often they occur (in the training set) and only the most frequent words are kept skip_top: skip the top N most frequently occurring words (which may not be informative). maxlen: truncate sequences after this length. test_split: Fraction of the dataset to be used as test data. seed: random seed for sample shuffling. start_char: The start of a sequence will be marked with this character. Set to 1 because 0 is usually the padding character. oov_char: words that were cut out because of the `num_words` or `skip_top` limit will be replaced with this character. index_from: index actual words with this index and higher. **kwargs: Used for backwards compatibility. Returns: Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. Note that the 'out of vocabulary' character is only used for words that were present in the training set but are not included because they're not making the `num_words` cut here. Words that were not seen in the training set but are in the test set have simply been skipped. """ # Legacy support if 'nb_words' in kwargs: logging.warning('The `nb_words` argument in `load_data` ' 'has been renamed `num_words`.') num_words = kwargs.pop('nb_words') if kwargs: raise TypeError('Unrecognized keyword arguments: ' + str(kwargs)) origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/' path = get_file( path, origin=origin_folder + 'reuters.npz', file_hash='87aedbeb0cb229e378797a632c1997b6') with np.load(path) as f: xs, labels = f['x'], f['y'] np.random.seed(seed) indices = np.arange(len(xs)) np.random.shuffle(indices) xs = xs[indices] labels = labels[indices] if start_char is not None: xs = [[start_char] + [w + index_from for w in x] for x in xs] elif index_from: xs = [[w + index_from for w in x] for x in xs] if maxlen: xs, labels = _remove_long_seq(maxlen, xs, labels) if not num_words: num_words = max([max(x) for x in xs]) # by convention, use 2 as OOV word # reserve 'index_from' (=3 by default) characters: # 0 (padding), 1 (start), 2 (OOV) if oov_char is not None: xs = [[w if skip_top <= w < num_words else oov_char for w in x] for x in xs] else: xs = [[w for w in x if skip_top <= w < num_words] for x in xs] idx = int(len(xs) * (1 - test_split)) x_train, y_train = np.array(xs[:idx]), np.array(labels[:idx]) x_test, y_test = np.array(xs[idx:]), np.array(labels[idx:]) return (x_train, y_train), (x_test, y_test) @keras_export('keras.datasets.reuters.get_word_index') def get_word_index(path='reuters_word_index.json'): """Retrieves the dictionary mapping word indices back to words. Arguments: path: where to cache the data (relative to `~/.keras/dataset`). Returns: The word index dictionary. """ origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/' path = get_file( path, origin=origin_folder + 'reuters_word_index.json', file_hash='4d44cc38712099c9e383dc6e5f11a921') with open(path) as f: return json.load(f)
tensorflow-master
tensorflow/python/keras/datasets/reuters.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities common to CIFAR10 and CIFAR100 datasets. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys from six.moves import cPickle def load_batch(fpath, label_key='labels'): """Internal utility for parsing CIFAR data. Arguments: fpath: path the file to parse. label_key: key for label data in the retrieve dictionary. Returns: A tuple `(data, labels)`. """ with open(fpath, 'rb') as f: if sys.version_info < (3,): d = cPickle.load(f) else: d = cPickle.load(f, encoding='bytes') # decode utf8 d_decoded = {} for k, v in d.items(): d_decoded[k.decode('utf8')] = v d = d_decoded data = d['data'] labels = d[label_key] data = data.reshape(data.shape[0], 3, 32, 32) return data, labels
tensorflow-master
tensorflow/python/keras/datasets/cifar.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """CIFAR10 small images classification dataset. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np from tensorflow.python.keras import backend as K from tensorflow.python.keras.datasets.cifar import load_batch from tensorflow.python.keras.utils.data_utils import get_file from tensorflow.python.util.tf_export import keras_export @keras_export('keras.datasets.cifar10.load_data') def load_data(): """Loads CIFAR10 dataset. Returns: Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. """ dirname = 'cifar-10-batches-py' origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz' path = get_file(dirname, origin=origin, untar=True) num_train_samples = 50000 x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8') y_train = np.empty((num_train_samples,), dtype='uint8') for i in range(1, 6): fpath = os.path.join(path, 'data_batch_' + str(i)) (x_train[(i - 1) * 10000:i * 10000, :, :, :], y_train[(i - 1) * 10000:i * 10000]) = load_batch(fpath) fpath = os.path.join(path, 'test_batch') x_test, y_test = load_batch(fpath) y_train = np.reshape(y_train, (len(y_train), 1)) y_test = np.reshape(y_test, (len(y_test), 1)) if K.image_data_format() == 'channels_last': x_train = x_train.transpose(0, 2, 3, 1) x_test = x_test.transpose(0, 2, 3, 1) return (x_train, y_train), (x_test, y_test)
tensorflow-master
tensorflow/python/keras/datasets/cifar10.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """MNIST handwritten digits dataset. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.keras.utils.data_utils import get_file from tensorflow.python.util.tf_export import keras_export @keras_export('keras.datasets.mnist.load_data') def load_data(path='mnist.npz'): """Loads the MNIST dataset. Arguments: path: path where to cache the dataset locally (relative to ~/.keras/datasets). Returns: Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. License: Yann LeCun and Corinna Cortes hold the copyright of MNIST dataset, which is a derivative work from original NIST datasets. MNIST dataset is made available under the terms of the [Creative Commons Attribution-Share Alike 3.0 license.]( https://creativecommons.org/licenses/by-sa/3.0/) """ origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/' path = get_file( path, origin=origin_folder + 'mnist.npz', file_hash='8a61469f7ea1b51cbae51d4f78837e45') with np.load(path) as f: x_train, y_train = f['x_train'], f['y_train'] x_test, y_test = f['x_test'], f['y_test'] return (x_train, y_train), (x_test, y_test)
tensorflow-master
tensorflow/python/keras/datasets/mnist.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests Policies.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.keras.mixed_precision.experimental import policy as mp_policy from tensorflow.python.keras.optimizer_v2 import gradient_descent from tensorflow.python.platform import test from tensorflow.python.training.experimental import mixed_precision @test_util.run_all_in_graph_and_eager_modes class PolicyTest(test.TestCase): """Tests Policies.""" def test_infer(self): policy = mp_policy.Policy('infer') self.assertEqual(policy.name, 'infer') self.assertEqual(policy.default_variable_dtype, None) def test_infer_float32_vars(self): policy = mp_policy.Policy('infer_float32_vars') self.assertEqual(policy.name, 'infer_float32_vars') self.assertEqual(policy.default_variable_dtype, 'float32') def test_global_policy(self): self.assertEqual(mp_policy.global_policy().name, 'infer') default_policy = mp_policy.global_policy() try: mp_policy.set_policy('infer_float32_vars') self.assertEqual(mp_policy.global_policy().name, 'infer_float32_vars') self.assertEqual(mp_policy.global_policy().default_variable_dtype, 'float32') with ops.Graph().as_default(): # Policies are not associated with a graph self.assertEqual(mp_policy.global_policy().name, 'infer_float32_vars') mp_policy.set_policy('infer') self.assertEqual(mp_policy.global_policy().name, 'infer') self.assertEqual(mp_policy.global_policy().default_variable_dtype, None) policy = mp_policy.Policy('infer_float32_vars') mp_policy.set_policy(policy) self.assertIs(mp_policy.global_policy(), policy) finally: mp_policy.set_policy(default_policy) def test_policy_scope(self): with mp_policy.policy_scope('infer_float32_vars'): self.assertEqual(mp_policy.global_policy().name, 'infer_float32_vars') with mp_policy.policy_scope('infer'): self.assertEqual(mp_policy.global_policy().name, 'infer') self.assertEqual(mp_policy.global_policy().name, 'infer_float32_vars') self.assertEqual(mp_policy.global_policy().name, 'infer') def test_error_if_graph_rewrite_enabled(self): try: mixed_precision.enable_mixed_precision_graph_rewrite( gradient_descent.SGD(1.)) with self.assertRaisesRegexp( ValueError, 'the mixed precision graph rewrite has already been ' 'enabled'): mp_policy.set_policy('infer_float32_vars') finally: mixed_precision.disable_mixed_precision_graph_rewrite() if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/keras/mixed_precision/experimental/policy_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains the Policy class for mixed precision training.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib from tensorflow.python.training.experimental import mixed_precision_global_state from tensorflow.python.util.tf_export import keras_export @keras_export('keras.mixed_precision.experimental.Policy') class Policy(object): """A mixed precision policy for a Keras layer. A mixed precision policy determines the floating-point dtype that Keras layers should create variables in. For non-default policies, if the variable dtype does not match the input dtype, variables will automatically be casted to the input dtype to avoid type errors. Policies can be passed to the 'dtype' argument of layer constructors, or a global policy can be set with 'set_policy'. In the near future, policies will also determine the computation dtype of layers, as well as the loss scaling algorithm. Policies are intended to enable mixed precision training, which require using float32 variables and [b]float16 computations for most layers. The term "mixed precision" refers to the use of both float16 (or bfloat16) and float32 in a model. See https://arxiv.org/abs/1710.03740 for more information on mixed precision training. Policies are constructed by passing a string to the `name` constructor argument. `name` determines the behavior of the policy. Currently, `name` can be one of the following values. * 'infer': Infer the variable and computation dtypes from the input dtype. This is the default behavior. * 'infer_float32_vars': Infer the computation dtypes from the input dtype, but create variables in float32. Variables will be casted to the computation dtype. This is intended to enable mixed precision. Users can cast tensors to float16 before passing them to a layer, which causes the layer to run it's computation in float16 while keeping variables in float32. To use mixed precision in a model, the 'infer_float32_vars' policy can be used alongside float16 input tensors, which results in float16 computations and float32 variables. For example: ```python tf.keras.mixed_precision.experimental.set_policy('infer_float32_vars') model = tf.keras.models.Sequential( tf.keras.layers.Input((100,), dtype='float16'), tf.keras.layers.Dense(10), tf.keras.layers.Dense(10), tf.keras.layers.Lambda(lambda x: tf.cast(x, 'float32')), tf.keras.layers.Activation('Softmax') ) ``` Alternatively, the policy can be passed to individual layers instead of setting the global policy with `set_policy`: ```python policy = tf.keras.mixed_precision.experimental.Policy('infer_float32_vars') model = tf.keras.models.Sequential( tf.keras.layers.Input((100,), dtype='float16'), tf.keras.layers.Dense(10, dtype=policy), tf.keras.layers.Dense(10, dtype=policy), tf.keras.layers.Lambda(lambda x: tf.cast(x, 'float32')), tf.keras.layers.Activation('Softmax') ) ``` Note that a LossScaleOptimizer should also be used for mixed precision models to avoid numerical underflow. See `LossScaleOptimizer`. """ def __init__(self, name): self._name = name if name == 'infer': self._default_variable_dtype = None elif name == 'infer_float32_vars': self._default_variable_dtype = 'float32' else: raise ValueError('"name" argument to Policy constructor must be "infer" ' 'or "infer_float32_vars", but got: %s' % name) @property def name(self): """Returns the name of the policy: "infer" or "infer_float32_vars.""" return self._name @property def default_variable_dtype(self): """Returns the default variable dtype of this policy. This is the dtype layers will create their variables in, unless a layer explicit chooses a different dtype. Layers will cast variables to the appropriate dtype to avoid type errors. Returns: The default variable dtype of this policy, or None if the default variable dtype should be derived from the inputs. """ return self._default_variable_dtype @property def should_cast_variables(self): """Returns true if variables should be casted.""" return self.default_variable_dtype is not None # TODO(reedwm): Implement get_config/from_config. # The policy in effect when TensorFlow starts. This is constant and never # changes. _default_policy = Policy('infer') # The current global policy in effect. This starts as the default policy, but # can be changed with `set_policy`. # TODO(reedwm): Make this thread local? _global_policy = _default_policy @keras_export('keras.mixed_precision.experimental.global_policy') def global_policy(): """Returns the global Policy. The global policy is the default policy used for layers, if no policy is passed to the layer constructor. When TensorFlow starts, the global policy is set to an "infer" policy, and can be changed with `set_policy`. Returns: The global Policy. """ return _global_policy def _check_if_mixed_precision_graph_rewrite_is_enabled(): # TODO(reedwm): Update this comment once the Keras API is complete. if mixed_precision_global_state.mixed_precision_graph_rewrite_is_enabled: raise ValueError( 'The mixed precision policy cannot be set, because the mixed ' 'precision graph rewrite has already been enabled.\n' 'At most, one of the following functions can be called:\n\n' ' 1. tf.train.experimental.enable_mixed_precision_graph_rewrite() ' '(You called this first)\n' ' 2. tf.keras.mixed_precision.experimental.set_policy() (You called ' 'this second)\n\n' 'You called both functions, which is an error, because both functions ' 'enable you to use mixed precision. The first function enables mixed ' 'precision in the graph with a graph rewrite. However it is currently ' 'not very customizable, and does not support eager. The second ' 'function is for Keras layers, but is not yet fully complete.') @keras_export('keras.mixed_precision.experimental.set_policy') def set_policy(policy): """Sets the global Policy.""" global _global_policy _check_if_mixed_precision_graph_rewrite_is_enabled() if not isinstance(policy, Policy): policy = Policy(policy) _global_policy = policy mixed_precision_global_state.using_default_mixed_precision_policy = ( _global_policy is _default_policy) # TODO(reedwm): Make this thread local @contextlib.contextmanager def policy_scope(policy): old_policy = _global_policy try: set_policy(policy) yield finally: set_policy(old_policy)
tensorflow-master
tensorflow/python/keras/mixed_precision/experimental/policy.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Mixed precision API.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras.mixed_precision.experimental.loss_scale_optimizer import LossScaleOptimizer from tensorflow.python.keras.mixed_precision.experimental.policy import global_policy from tensorflow.python.keras.mixed_precision.experimental.policy import Policy from tensorflow.python.keras.mixed_precision.experimental.policy import set_policy
tensorflow-master
tensorflow/python/keras/mixed_precision/experimental/__init__.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests mixed precision works correctly with Keras layers and models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from absl.testing import parameterized import numpy as np from tensorflow.python.data.ops import dataset_ops from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.distribute import mirrored_strategy from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.keras import backend from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import layers from tensorflow.python.keras import models from tensorflow.python.keras import regularizers from tensorflow.python.keras import testing_utils from tensorflow.python.keras.engine import base_layer from tensorflow.python.keras.layers import core from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer from tensorflow.python.keras.mixed_precision.experimental import policy from tensorflow.python.keras.mixed_precision.experimental import test_util as mp_test_util from tensorflow.python.keras.optimizer_v2 import gradient_descent from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training.experimental import loss_scale as loss_scale_module from tensorflow.python.training.tracking import util as trackable_utils from tensorflow.python.util import nest class AssertTypeLayer(base_layer.Layer): """A layer which asserts it's inputs are a certain type.""" def __init__(self, assert_type=None, **kwargs): self._assert_type = assert_type super(AssertTypeLayer, self).__init__(**kwargs) def assert_input_types(self, inputs): """Asserts `inputs` are of the correct type. Should be called in call().""" if self._assert_type: inputs_flattened = nest.flatten(inputs) for inp in inputs_flattened: assert inp.dtype.base_dtype == self._assert_type, ( 'Input tensor has type %s which does not match assert type %s' % (inp.dtype.name, self._assert_type.name)) class AddLayer(AssertTypeLayer): """A layer which adds it's input to a scalar variable.""" def __init__(self, regularizer=None, use_operator=False, var_name='v', **kwargs): """Initializes the AddLayer. Args: regularizer: The regularizer on the scalar variable. use_operator: If True, add using the + operator. If False, add using tf.add. var_name: The name of the variable. It can be useful to pass a name other than 'v', to test having the attribute name (self.v) being different from the variable name. **kwargs: Passed to AssertTypeLayer constructor. """ self._regularizer = regularizer self._use_operator = use_operator self._var_name = var_name super(AddLayer, self).__init__(**kwargs) def build(self, _): self.v = self.add_weight(self._var_name, (), initializer='ones', regularizer=self._regularizer) self.built = True def call(self, inputs): self.assert_input_types(inputs) assert inputs.dtype == self.v.dtype return self._add(inputs, self.v) def _add(self, x, y): if self._use_operator: return x + y else: return math_ops.add(x, y) class AddLayerWithoutAutoCast(AddLayer): """Same as AddLayer, but does not use AutoCastVariables.""" def build(self, _): dtype = self.dtype if dtype in ('float16', 'bfloat16'): dtype = 'float32' self.v = self.add_weight('v', (), initializer='ones', dtype=dtype, experimental_autocast=False, regularizer=self._regularizer) self.built = True def call(self, inputs): self.assert_input_types(inputs) assert self.v.dtype in (dtypes.float32, dtypes.float64) return self._add(inputs, math_ops.cast(self.v, inputs.dtype)) class IdentityRegularizer(regularizers.Regularizer): def __call__(self, x): assert x.dtype == dtypes.float32 return array_ops.identity(x) # If called outside any strategy.scope() calls, this will return the default # strategy. default_strategy_fn = distribution_strategy_context.get_strategy def create_mirrored_strategy(): if context.num_gpus() >= 1: return mirrored_strategy.MirroredStrategy(['cpu:0', 'gpu:0']) else: return mirrored_strategy.MirroredStrategy(['cpu:0']) TESTCASES = ({ 'testcase_name': 'base', 'strategy_fn': default_strategy_fn }, { 'testcase_name': 'distribute', 'strategy_fn': create_mirrored_strategy }) class KerasLayerTest(keras_parameterized.TestCase): """Test mixed precision with Keras layers.""" @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes def test_variables_in_float32(self, strategy_fn): x = constant_op.constant([1.], dtype=dtypes.float16) with strategy_fn().scope(): with policy.policy_scope('infer_float32_vars'): layer = AddLayer(assert_type=dtypes.float16) y = layer(x) self.assertEqual(layer.v.dtype, dtypes.float32) self.assertEqual(y.dtype, dtypes.float16) self.evaluate(variables.global_variables_initializer()) self.assertEqual(self.evaluate(y), 2.) @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes def test_layer_with_non_autocast_variable(self, strategy_fn): x = constant_op.constant([1.], dtype=dtypes.float16) with strategy_fn().scope(): with policy.policy_scope('infer_float32_vars'): layer = AddLayerWithoutAutoCast(assert_type=dtypes.float16) y = layer(x) self.assertEqual(layer.v.dtype, dtypes.float32) self.assertEqual(y.dtype, dtypes.float16) self.evaluate(variables.global_variables_initializer()) self.assertEqual(self.evaluate(y), 2.) @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes def test_layer_regularizer_runs_in_float32(self, strategy_fn): x = constant_op.constant([1.], dtype=dtypes.float16) with strategy_fn().scope(): with policy.policy_scope('infer_float32_vars'): # Test on AddLayer layer = AddLayer(assert_type=dtypes.float16, regularizer=IdentityRegularizer()) layer(x) (regularizer_loss,) = layer.losses self.assertEqual(regularizer_loss.dtype, dtypes.float32) self.evaluate(variables.global_variables_initializer()) self.assertEqual(self.evaluate(regularizer_loss), 1.) # Test on AddLayerWithoutAutoCast layer = AddLayerWithoutAutoCast(assert_type=dtypes.float16, regularizer=IdentityRegularizer()) layer(x) (regularizer_loss,) = layer.losses self.assertEqual(regularizer_loss.dtype, dtypes.float32) self.evaluate(variables.global_variables_initializer()) self.assertEqual(self.evaluate(regularizer_loss), 1.) @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes def test_passing_policy_to_layer(self, strategy_fn): x = constant_op.constant([1.], dtype=dtypes.float16) with strategy_fn().scope(): # Passing a Policy to 'dtype' sets the policy for that layer. layer = AddLayer(assert_type=dtypes.float16, dtype=policy.Policy('infer_float32_vars')) # layer.dtype refers to the variable dtype self.assertEqual(layer.dtype, dtypes.float32) layer(x) self.assertEqual(layer.v.dtype, dtypes.float32) with policy.policy_scope('infer_float32_vars'): # Passing a Policy to dtype overrides the global Policy layer = AddLayer(assert_type=dtypes.float16, dtype=policy.Policy('infer')) # layer dtype is not yet known self.assertEqual(layer.dtype, None) layer(x) self.assertEqual(layer.v.dtype, dtypes.float16) self.assertEqual(layer.dtype, dtypes.float16) @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes def test_gradient(self, strategy_fn): x = constant_op.constant([1.], dtype=dtypes.float16) with strategy_fn().scope() as strategy: with policy.policy_scope('infer_float32_vars'): layer = AddLayer(assert_type=dtypes.float16) def run_fn(): with backprop.GradientTape() as tape: y = layer(x) # Divide by num_replicas_in_sync, as the effective total loss is the # sum of each of the replica's losses. y /= strategy.num_replicas_in_sync # Learning rate is small enough that if applied to a float16 variable, # the variable will not change. So this tests the learning rate is not # applied to a float16 value, but instead the float32 variable. opt = gradient_descent.SGD(2 ** -14) grad = tape.gradient(y, layer.v) return opt.apply_gradients([(grad, layer.v)]) op = strategy.experimental_run(run_fn) if not context.executing_eagerly(): self.evaluate(variables.global_variables_initializer()) self.evaluate(op) # The gradient with respective to the variable is 1. Since the # variable is initialized with 1 and the learning rate is 2**-14, the # new variable value should be: init_val - gradient * learning_rate, # which is 1 - 1 * 2**-14 self.assertEqual(self.evaluate(layer.v), 1 - 2 ** -14) @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes def test_checkpointing_layer_weights(self, strategy_fn): x = constant_op.constant([1.], dtype=dtypes.float16) with strategy_fn().scope(): with policy.policy_scope('infer_float32_vars'): layer = AddLayer(assert_type=dtypes.float16) layer.build(()) layer.set_weights([np.array(100.)]) self.assertEqual(self.evaluate(layer(x)), 101.) checkpoint = trackable_utils.Checkpoint(layer=layer) prefix = os.path.join(self.get_temp_dir(), 'ckpt') save_path = checkpoint.save(prefix) layer.set_weights([np.array(200.)]) self.assertEqual(self.evaluate(layer(x)), 201.) checkpoint.restore(save_path).assert_consumed().run_restore_ops() self.assertEqual(layer.get_weights(), [100.]) self.assertEqual(self.evaluate(layer(x)), 101.) # TODO(reedwm): Allow layers to be saved without using mixed precision, and # restored with mixed precision? Or vice versa? class KerasModelTest(keras_parameterized.TestCase): """Test mixed precision with Keras models.""" def _is_strategy_supported(self, strategy_fn): if (strategy_fn != default_strategy_fn and testing_utils.should_run_eagerly()): # Distribution strategies do not support running with `run_eagerly=True` # in Keras Models. return False else: return True @keras_parameterized.run_all_keras_modes @parameterized.named_parameters({ 'testcase_name': 'base', 'strategy_fn': default_strategy_fn }, { 'testcase_name': 'distribute', 'strategy_fn': create_mirrored_strategy, }, { 'testcase_name': 'operator', 'strategy_fn': create_mirrored_strategy, 'use_operator': True }, { 'testcase_name': 'regularizer', 'strategy_fn': create_mirrored_strategy, 'use_regularizer': True }, { 'testcase_name': 'nocloning', 'strategy_fn': create_mirrored_strategy, 'cloning': False }) def test_model(self, strategy_fn, use_operator=False, use_regularizer=False, cloning=True): if not self._is_strategy_supported(strategy_fn): return regularizer = IdentityRegularizer() if use_regularizer else None with strategy_fn().scope(): with policy.policy_scope('infer_float32_vars'): x = layers.Input(shape=(1,), batch_size=2, dtype=dtypes.float16) layer = AddLayer(assert_type=dtypes.float16, use_operator=use_operator, regularizer=regularizer) y = layer(x) y = math_ops.cast(y, dtypes.float32) model = models.Model(inputs=x, outputs=y) def loss_fn(y_true, y_pred): del y_true return math_ops.reduce_mean(y_pred) # Learning rate is small enough that if applied to a float16 variable, # the variable will not change. So this tests the learning rate not # applied to a float16 value, but instead the float32 variable. opt = gradient_descent.SGD(2 ** -14) model.compile(opt, loss=loss_fn, cloning=cloning, run_eagerly=testing_utils.should_run_eagerly()) self.assertEqual(backend.eval(layer.v), 1) x = np.ones((2, 1)) y = np.ones((2, 1)) dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(2) model.fit(dataset) # Variable starts at 1, and should have gradient of 2 ** -14 subtracted # from it. expected = 1 - 2 ** -14 if use_regularizer: # Regularizer adds another 2 ** -14 to the gradient. expected -= 2 ** -14 self.assertEqual(backend.eval(layer.v), expected) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters({ 'testcase_name': 'base', 'strategy_fn': default_strategy_fn }, { 'testcase_name': 'distribute', 'strategy_fn': create_mirrored_strategy, }, { 'testcase_name': 'nocloning', 'strategy_fn': create_mirrored_strategy, 'cloning': False, }) def test_fixed_loss_scaling(self, strategy_fn, cloning=True): # Note: We do not test mixed precision in this method, only loss scaling. if not self._is_strategy_supported(strategy_fn): return loss_scale = 8. batch_size = 4 with strategy_fn().scope(): x = layers.Input(shape=(1,), batch_size=batch_size) layer = AddLayer() y = layer(x) # The gradient of 'y' at this point is 1. With loss scaling, the gradient # is 'loss_scale'. We divide by the batch size since the loss is averaged # across batch elements. expected_gradient = loss_scale / batch_size identity_with_grad_check_fn = ( mp_test_util.create_identity_with_grad_check_fn([expected_gradient])) y = core.Lambda(identity_with_grad_check_fn)(y) model = models.Model(inputs=x, outputs=y) def loss_fn(y_true, y_pred): del y_true return math_ops.reduce_mean(y_pred) opt = gradient_descent.SGD(1.) opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale) model.compile(opt, loss=loss_fn, cloning=cloning, run_eagerly=testing_utils.should_run_eagerly()) self.assertEqual(backend.eval(layer.v), 1) x = np.ones((batch_size, 1)) y = np.ones((batch_size, 1)) dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(batch_size) model.fit(dataset) # Variable starts at 1, and should have gradient of 1 subtracted from it. expected = 0 self.assertEqual(backend.eval(layer.v), expected) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters({ 'testcase_name': 'base', 'strategy_fn': default_strategy_fn }, { 'testcase_name': 'distribute', 'strategy_fn': create_mirrored_strategy, }, { 'testcase_name': 'loss_scaling', 'strategy_fn': create_mirrored_strategy, 'use_loss_scaling': True }) def test_advanced_model(self, strategy_fn, use_loss_scaling=False): # The advanced model tests mixed-precision-related features that would occur # in a resnet50 model. It tests a model that has: # * Multiple layers, some which use auto-cast variables and some which do # not # * Regularization on some variables and not others. # * A fixed loss scale (if use_loss_scaling is True) if not self._is_strategy_supported(strategy_fn): return strategy = strategy_fn() if use_loss_scaling: loss_scale = 8. learning_rate = 2 ** -14 with strategy.scope(): with policy.policy_scope(policy.Policy('infer_float32_vars')): x = layers.Input(shape=(1,), batch_size=2, dtype=dtypes.float16) layer1 = AddLayer(assert_type=dtypes.float16, regularizer=IdentityRegularizer(), use_operator=True) layer2 = AddLayerWithoutAutoCast(assert_type=dtypes.float16, use_operator=True) layer3 = AddLayer(assert_type=dtypes.float16, use_operator=False) layer4 = AddLayerWithoutAutoCast(assert_type=dtypes.float16, regularizer=IdentityRegularizer(), use_operator=False) y = layer1(x) y = layer2(y) y = layer3(y) y = layer4(y) if use_loss_scaling: # The gradient of 'y' at this point is 1. With loss scaling, the # gradient is 'loss_scale'. We divide by the batch size of 2 since the # loss is averaged across batch elements. expected_gradient = loss_scale / 2 identity_with_grad_check_fn = ( mp_test_util.create_identity_with_grad_check_fn( expected_dtype=dtypes.float16, expected_gradient=[expected_gradient])) y = core.Lambda(identity_with_grad_check_fn)(y) y = math_ops.cast(y, dtypes.float32) model = models.Model(inputs=x, outputs=y) def loss_fn(y_true, y_pred): self.assertEqual(y_true.dtype, dtypes.float32) self.assertEqual(y_pred.dtype, dtypes.float32) return math_ops.reduce_mean(y_pred) opt = gradient_descent.SGD(learning_rate) if use_loss_scaling: opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale) model.compile(opt, loss=loss_fn, run_eagerly=testing_utils.should_run_eagerly()) x = np.ones((2, 1)) y = np.ones((2, 1)) dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(2) model.fit(dataset) for layer in (layer1, layer2, layer3, layer4): if layer.losses: # Layer has weight regularizer self.assertEqual(backend.eval(layer.v), 1 - 2 * learning_rate) else: # Layer does not have weight regularizer self.assertEqual(backend.eval(layer.v), 1 - learning_rate) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters({ 'testcase_name': 'base', 'strategy_fn': default_strategy_fn }, { 'testcase_name': 'distribute', 'strategy_fn': create_mirrored_strategy, }, { 'testcase_name': 'nocloning', 'strategy_fn': create_mirrored_strategy, 'cloning': False, }) def test_dynamic_loss_scaling(self, strategy_fn, cloning=True): if not self._is_strategy_supported(strategy_fn): return strategy = strategy_fn() initial_loss_scale = 2. batch_size = 4 expected_gradient = backend.variable([initial_loss_scale / batch_size], dtype=dtypes.float16) # If this variable is set to True, the model below will have NaN gradients have_nan_gradients = backend.variable(False, dtype=dtypes.bool) with strategy.scope(): with policy.policy_scope(policy.Policy('infer_float32_vars')): x = layers.Input(shape=(1,), batch_size=batch_size, dtype=dtypes.float16) layer = AddLayer(assert_type=dtypes.float16) y = layer(x) identity_with_nan_grads = ( mp_test_util.create_identity_with_nan_gradients_fn( have_nan_gradients)) y = core.Lambda(identity_with_nan_grads)(y) identity_with_grad_check_fn = ( mp_test_util.create_identity_with_grad_check_fn( expected_dtype=dtypes.float16, expected_gradient=expected_gradient)) y = core.Lambda(identity_with_grad_check_fn)(y) y = math_ops.cast(y, dtypes.float32) model = models.Model(inputs=x, outputs=y) def loss_fn(y_true, y_pred): del y_true return math_ops.reduce_mean(y_pred) opt = gradient_descent.SGD(1.) loss_scale = loss_scale_module.DynamicLossScale( initial_loss_scale=initial_loss_scale, increment_period=2) opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale) model.compile(opt, loss=loss_fn, cloning=cloning, run_eagerly=testing_utils.should_run_eagerly()) self.assertEqual(backend.eval(layer.v), 1) x = np.ones((batch_size, 1)) y = np.ones((batch_size, 1)) dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(batch_size) model.fit(dataset) # The variables starts with 1 and has a gradient of 1, so will go down by 1 # each step. self.assertEqual(backend.eval(layer.v), 0) model.fit(dataset) self.assertEqual(backend.eval(layer.v), -1) # There have been two steps without NaNs, so the loss scale will double backend.set_value(expected_gradient, backend.get_value(expected_gradient * 2)) model.fit(dataset) self.assertEqual(backend.eval(layer.v), -2) # Next test with NaN gradients. backend.set_value(have_nan_gradients, True) model.fit(dataset) # Variable should not be updated self.assertEqual(backend.eval(layer.v), -2) # Test with finite gradients again backend.set_value(have_nan_gradients, False) # The loss scale will be halved due to the NaNs, so the gradient will also # be halved backend.set_value(expected_gradient, backend.get_value(expected_gradient / 2)) model.fit(dataset) self.assertEqual(backend.eval(layer.v), -3) @parameterized.named_parameters({ 'testcase_name': 'base', 'strategy_fn': default_strategy_fn, }, { 'testcase_name': 'distribute', 'strategy_fn': create_mirrored_strategy, }, { 'testcase_name': 'base_h5', 'strategy_fn': default_strategy_fn, 'h5': True, }, { 'testcase_name': 'distribute_h5', 'strategy_fn': create_mirrored_strategy, 'h5': True, }) @test_util.run_in_graph_and_eager_modes def test_save_weights_with_autocast_vars(self, strategy_fn, h5=False): with strategy_fn().scope(): with policy.policy_scope('infer_float32_vars'): x = layers.Input(shape=(1,), batch_size=2, dtype=dtypes.float16) layer = AddLayer(assert_type=dtypes.float16) y = layer(x) y = math_ops.cast(y, dtypes.float32) model = models.Model(inputs=x, outputs=y) model.set_weights([np.array(100.)]) x = np.ones((2, 1), dtype=np.float16) self.assertAllClose(backend.get_value(model(x)), x + 100.) suffix = '.h5' if h5 else '' weights_file = os.path.join(self.get_temp_dir(), 'weights' + suffix) model.save_weights(weights_file) model.set_weights([np.array(200.)]) self.assertAllClose(backend.get_value(model(x)), x + 200.) model.load_weights(weights_file) self.assertAllClose(backend.get_value(model(x)), x + 100.) self.assertEqual(model.get_weights(), [np.array(100.)]) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters({ 'testcase_name': 'base', 'strategy_fn': default_strategy_fn, }, { 'testcase_name': 'distribute', 'strategy_fn': create_mirrored_strategy, }, { 'testcase_name': 'different_var_name', 'strategy_fn': default_strategy_fn, 'var_name': 'w' }, { 'testcase_name': 'different_var_name_distribute', 'strategy_fn': create_mirrored_strategy, 'var_name': 'w' }) def test_save_slot_variables_with_autocast_vars(self, strategy_fn, var_name='v'): if not self._is_strategy_supported(strategy_fn): return with strategy_fn().scope(), policy.policy_scope('infer_float32_vars'): x = layers.Input(shape=(2,), batch_size=2, dtype=dtypes.float16) # Having a var_name other than 'v' tests that a fixed bug (b/134713714) # does not reoccur. The bug was that a crash would occur when saving a # checkpoint where an AutoCastVariable with a slot variable would have a # different name than the layer attribute's name (layer.v in this case). layer = AddLayer(assert_type=dtypes.float16, var_name=var_name) y = layer(x) y = math_ops.cast(y, dtypes.float32) model = models.Model(inputs=x, outputs=y) opt = gradient_descent.SGD(1., 1.) model.compile(optimizer=opt, loss='mse', run_eagerly=testing_utils.should_run_eagerly()) model.fit(np.zeros((2, 2)), np.zeros((2, 2)), batch_size=2) weights_file = os.path.join(self.get_temp_dir(), 'weights') model.save_weights(weights_file) saved_slot = backend.get_value(opt.get_slot(layer.v, 'momentum')) model.fit(np.zeros((2, 2)), np.zeros((2, 2)), batch_size=2) new_slot = backend.get_value(opt.get_slot(layer.v, 'momentum')) self.assertNotEqual(new_slot, saved_slot) model.load_weights(weights_file) restored_slot = backend.get_value(opt.get_slot(layer.v, 'momentum')) self.assertEqual(restored_slot, saved_slot) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters(*TESTCASES) def test_save_weights_with_dynamic_loss_scaling(self, strategy_fn): if not self._is_strategy_supported(strategy_fn): return strategy = strategy_fn() if (isinstance(strategy, mirrored_strategy.MirroredStrategy) and not context.executing_eagerly()): # TODO(b/121381184): Enable running the test in this case. return # Create and run model. with strategy.scope(): x = layers.Input(shape=(2,), batch_size=2, dtype=dtypes.float32) y = AddLayer(assert_type=dtypes.float32)(x) model = models.Model(inputs=x, outputs=y) loss_scale = loss_scale_module.DynamicLossScale( initial_loss_scale=1., increment_period=2., multiplier=2.) opt = gradient_descent.SGD(1.) opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale) model.compile(optimizer=opt, loss='mse', run_eagerly=testing_utils.should_run_eagerly()) # Run for 3 steps (6 examples with a batch size of 2) model.fit(np.zeros((6, 2)), np.zeros((6, 2)), batch_size=2) self.assertEqual(backend.get_value(loss_scale()), 2) self.assertEqual(backend.get_value(loss_scale._num_good_steps), 1) # Save model weights. save_prefix = os.path.join(self.get_temp_dir(), 'ckpt') model.save_weights(save_prefix) # Run model again for 1 step (2 examples with a batch size of 2) model.fit(np.zeros((2, 2)), np.zeros((2, 2)), batch_size=2) self.assertEqual(backend.get_value(loss_scale()), 4) self.assertEqual(backend.get_value(loss_scale._num_good_steps), 0) # Load model weights and ensure loss scale weights are restored. model.load_weights(save_prefix) self.assertEqual(backend.get_value(loss_scale()), 2) self.assertEqual(backend.get_value(loss_scale._num_good_steps), 1) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/keras/mixed_precision/experimental/keras_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains testing utilities related to mixed precision.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import custom_gradient def create_identity_with_grad_check_fn(expected_gradient, expected_dtype=None): """Returns a function that asserts it's gradient has a certain value. This serves as a hook to assert intermediate gradients have a certain value. This returns an identity function. The identity's gradient function is also the identity function, except it asserts that the gradient equals `expected_gradient` and has dtype `expected_dtype`. Args: expected_gradient: The gradient function asserts that the gradient is this value. expected_dtype: The gradient function asserts the gradient has this dtype. Returns: An identity function whose gradient function asserts the gradient has a certain value. """ @custom_gradient.custom_gradient def identity_with_grad_check(x): """Function that asserts it's gradient has a certain value.""" x = array_ops.identity(x) def grad(dx): if expected_dtype: assert dx.dtype == expected_dtype, ( 'dx.dtype should be %s but is: %s' % (expected_dtype, dx.dtype)) expected_tensor = ops.convert_to_tensor(expected_gradient, dtype=dx.dtype, name='expected_gradient') assert_op = check_ops.assert_equal(dx, expected_tensor) with ops.control_dependencies([assert_op]): dx = array_ops.identity(dx) return dx return x, grad return identity_with_grad_check def create_identity_with_nan_gradients_fn(have_nan_gradients): """Returns a function that optionally has NaN gradients. This serves as a hook to introduce NaN gradients to a model. This returns an identity function. The identity's gradient function will check if the boolean tensor `have_nan_gradients` is True. If so, the gradient will be NaN. Otherwise, the gradient will also be the identity. Args: have_nan_gradients: A scalar boolean tensor. If True, gradients will be NaN. Otherwise, the gradient function is the identity function. Returns: An identity function whose gradient function will return NaNs, if `have_nan_gradients` is True. """ @custom_gradient.custom_gradient def identity_with_nan_gradients(x): """Function whose gradient is NaN iff `have_nan_gradients` is True.""" x = array_ops.identity(x) def grad(dx): # We need this control dependency, because otherwise the NaN could be # produced before `dx`. This in turn could cause the final gradient to be # produced because `dx`, causing the loss scale to be updated before `dx`, # which can cause `tf.assert_equal`s to fail. with ops.control_dependencies([dx]): nan_scalar = constant_op.constant(float('NaN'), dtype=dx.dtype) return control_flow_ops.cond( have_nan_gradients, lambda: array_ops.fill(array_ops.shape(dx), nan_scalar), lambda: dx ) return x, grad return identity_with_nan_gradients
tensorflow-master
tensorflow/python/keras/mixed_precision/experimental/test_util.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for LossScaleOptimizer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from absl.testing import parameterized from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.distribute import mirrored_strategy from tensorflow.python.eager import context from tensorflow.python.framework import test_util from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer from tensorflow.python.keras.mixed_precision.experimental import test_util as mp_test_util from tensorflow.python.keras.optimizer_v2 import adam from tensorflow.python.keras.optimizer_v2 import gradient_descent from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training.experimental import loss_scale as loss_scale_module from tensorflow.python.training.tracking import util as trackable_utils # If called outside any strategy.scope() calls, this will return the default # strategy. default_strategy_fn = distribution_strategy_context.get_strategy def create_mirrored_strategy(): if context.num_gpus() >= 1: return mirrored_strategy.MirroredStrategy(['cpu:0', 'gpu:0']) else: return mirrored_strategy.MirroredStrategy(['cpu:0']) TESTCASES = ({ 'testcase_name': 'Base', 'strategy_fn': default_strategy_fn }, { 'testcase_name': 'Distribute', 'strategy_fn': create_mirrored_strategy }) class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase): def _run_if_in_graph_mode(self, val): # Running only in graph mode is useful, because optimizers sometimes return # a value that, in Graph mode, is runnable with self.evaluate. But in Eager # mode, the optimizer already does the computations and the return value # cannot be run. if not context.executing_eagerly(): self.evaluate(val) def _run_fn_with_grad_check(self, strategy, var, opt, expected_grad): grad_check_fn = mp_test_util.create_identity_with_grad_check_fn( expected_grad) loss = lambda: grad_check_fn(var) / strategy.num_replicas_in_sync return lambda: opt.minimize(loss, var_list=[var]) @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes def testFixedLossScaleAppliedToLossWithMinimize(self, strategy_fn): with strategy_fn().scope() as strategy: var = variables.Variable([5.0]) opt = gradient_descent.SGD(2.0) loss_scale = 10. opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale) # We need num_replicas_in_sync to divide loss_scale, otherwise loss_scale # / strategy.num_replicas_in_sync will not be exact, which could lead to # assertion failures due to rounding issues. self.assertEqual(loss_scale % strategy.num_replicas_in_sync, 0) run_fn = self._run_fn_with_grad_check( strategy, var, opt, loss_scale / strategy.num_replicas_in_sync) run_op = strategy.experimental_run(run_fn) self.evaluate(variables.global_variables_initializer()) self._run_if_in_graph_mode(run_op) # The loss is the identity of the variable. Therefore the gradient is 1, # and so the variable will be init_val - grad * lr == 5 - 1 * 2 == 3 self.assertAllClose([3.], self.evaluate(var)) @test_util.deprecated_graph_mode_only def testFixedLossScaleAppliedToLossWithGetGradients(self): var = variables.Variable([2.0]) opt = gradient_descent.SGD(1.0) loss_scale = 10. opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale) grad_check_fn = mp_test_util.create_identity_with_grad_check_fn(loss_scale) loss = grad_check_fn(var) run_op = opt.get_gradients(loss, [var]) self.evaluate(variables.global_variables_initializer()) # This will cause an assertion to run, as # mp_test_util.create_identity_with_grad_check_fn added an assertion op. self.evaluate(run_op) @test_util.run_in_graph_and_eager_modes def testGetScaledLoss(self): opt = gradient_descent.SGD(2.0) opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale=2.) self.assertEqual(10., self.evaluate(opt.get_scaled_loss(5.))) @test_util.run_in_graph_and_eager_modes def testGetUnscaledGradients(self): opt = gradient_descent.SGD(2.0) opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale=2) grads = opt.get_unscaled_gradients([3., None, -4.]) grads = [self.evaluate(g) if g is not None else g for g in grads] self.assertEqual([1.5, None, -2.], grads) @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes def testDynamicLossScale(self, strategy_fn): strategy = strategy_fn() learning_rate = 2. expected_gradient = resource_variable_ops.ResourceVariable( learning_rate / strategy.num_replicas_in_sync) with strategy.scope(): var = variables.Variable([5.0]) opt = gradient_descent.SGD(learning_rate) loss_scale = loss_scale_module.DynamicLossScale( initial_loss_scale=2, increment_period=1, multiplier=2) opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale) self.assertEqual( loss_scale.initial_loss_scale % strategy.num_replicas_in_sync, 0) run_fn = self._run_fn_with_grad_check(strategy, var, opt, expected_gradient) run_op = strategy.experimental_run(run_fn) self.evaluate(variables.global_variables_initializer()) self._run_if_in_graph_mode(run_op) # The loss is the identity of the variable. Therefore the gradient is 1, # and so the variable will be init_val - grad * lr == 5 - 1 * 2 == 3 self.assertAllClose([3.], self.evaluate(var)) # Loss scale will be double, so the expected gradient is also doubled. self.evaluate(expected_gradient.assign( 2 * learning_rate / strategy.num_replicas_in_sync)) run_op = strategy.experimental_run(run_fn) self._run_if_in_graph_mode(run_op) # As before, the 2 is subtracted from the variable, making it's new value # 1. self.assertAllClose([1.], self.evaluate(var)) @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes def testDynamicUpdate(self, strategy_fn): with strategy_fn().scope() as strategy: var = variables.Variable([1.0, 2.0]) opt = gradient_descent.SGD(1.0) loss_scale = loss_scale_module.DynamicLossScale( initial_loss_scale=2, increment_period=1, multiplier=2) opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale) # Test optimizer with finite gradients loss = lambda: var * 2.0 / strategy.num_replicas_in_sync run_fn = lambda: opt.minimize(loss, var_list=[var]) run_op = strategy.experimental_run(run_fn) self.evaluate(variables.global_variables_initializer()) self._run_if_in_graph_mode(run_op) # Gradient is 2, so variable will have 2 subtracted from it self.assertAllClose([-1.0, 0.0], self.evaluate(var)) # Loss scale has doubled from 2 to 4 self.assertEqual(4., self.evaluate(opt.loss_scale())) # Test optimizer with NaN gradients loss = lambda: var * float('NaN') run_fn = lambda: opt.minimize(loss, var_list=[var]) run_op = strategy.experimental_run(run_fn) self._run_if_in_graph_mode(run_op) # Variable should not change from before, due to NaN gradients. self.assertAllClose(self.evaluate(var), [-1.0, 0.0]) # Loss scale should half due to NaN gradients. self.assertEqual(2., self.evaluate(opt.loss_scale())) @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes def testDynamicLossScaleWithSlots(self, strategy_fn): with strategy_fn().scope() as strategy: var = variables.Variable([1.0, 2.0]) # An SGD optimizer with momentum has slot variables. opt = gradient_descent.SGD(1.0, momentum=1.) initial_loss_scale = 2. loss_scale = loss_scale_module.DynamicLossScale( initial_loss_scale=initial_loss_scale, increment_period=1, multiplier=4) opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale) loss = lambda: var / strategy.num_replicas_in_sync run_fn = lambda: opt.minimize(loss, var_list=[var]) run_op = strategy.experimental_run(run_fn) self.evaluate(variables.global_variables_initializer()) self._run_if_in_graph_mode(run_op) # The momentum accumulator starts at 0 and the gradient is 1. The # accumulator is incremented by the gradient, so it is now 1. Then the # variable is subtracted by the accumulator, so the variable is subtracted # by 1. self.assertAllClose([0.0, 1.0], self.evaluate(var)) self.assertEqual(self.evaluate(opt.loss_scale()), initial_loss_scale * 4) run_op = strategy.experimental_run(run_fn) self._run_if_in_graph_mode(run_op) # The momentum accumulator was 1 before this step and the gradient is 1. # The accumulator is incremented by the gradient, so it is now 2. Then the # variable is subtracted by the accumulator, so the variable is subtracted # by 2. self.assertAllClose([-2., -1.], self.evaluate(var)) self.assertEqual(self.evaluate(opt.loss_scale()), initial_loss_scale * 16) @test_util.run_in_graph_and_eager_modes def testIterations(self): opt = gradient_descent.SGD(2.0) lso = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale=10.) lso.iterations = 7 self.assertEqual(lso.iterations, 7) self.assertEqual(opt.iterations, 7) @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes def testGettingAndSettingLearningRate(self, strategy_fn): with strategy_fn().scope() as strategy: var = variables.Variable([5.0]) opt = adam.Adam(learning_rate=1.0) loss = lambda: var * 2.0 run_fn = lambda: opt.minimize(loss, [var]) run_op = strategy.experimental_run(run_fn) self.evaluate(variables.global_variables_initializer()) self._run_if_in_graph_mode(run_op) lr = self.evaluate(opt.lr) self.assertEqual(1.0, lr) opt.lr = 2.0 lr = self.evaluate(opt.lr) self.assertEqual(2.0, lr) self.evaluate(opt.lr.assign(3.0)) lr = self.evaluate(opt.lr) self.assertEqual(3.0, lr) with self.assertRaises(AttributeError): opt.not_an_attr += 3 @test_util.run_in_graph_and_eager_modes def testArbitraryAttributesNotExposed(self): opt = adam.Adam(learning_rate=1.0) # Test that Adam has attributes 'epsilon' and 'beta1' opt.epsilon # pylint: disable=pointless-statement opt.beta_1 # pylint: disable=pointless-statement opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale=10.) # Test that attributes defined by OptimizerV2 subclasses are not exposed in # LossScaleOptimizer. with self.assertRaises(AttributeError): opt.epsilon # pylint: disable=pointless-statement with self.assertRaises(AttributeError): opt.beta_1 # pylint: disable=pointless-statement @parameterized.named_parameters(*TESTCASES) @test_util.run_in_graph_and_eager_modes def testCheckpoint(self, strategy_fn): strategy = strategy_fn() if (isinstance(strategy, mirrored_strategy.MirroredStrategy) and not context.executing_eagerly()): # TODO(b/121381184): Enable running the test in this case. return with self.test_session(), strategy.scope(): # Build and run a simple model. var = variables.Variable([2.0]) loss_scale = loss_scale_module.DynamicLossScale( initial_loss_scale=1., increment_period=2., multiplier=2.) opt = gradient_descent.SGD(1., momentum=1.) opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale) run_fn = lambda: opt.minimize(lambda: var + 1., var_list=[var]) opt_op = strategy.experimental_run(run_fn) self.evaluate(variables.global_variables_initializer()) self.evaluate(opt_op) self.assertEqual(self.evaluate(loss_scale()), 1.) self.assertEqual(self.evaluate(loss_scale._num_good_steps), 1) slot_var = opt._optimizer.get_slot(var, 'momentum') slot_value = self.evaluate(slot_var).item() # Save a checkpoint. checkpoint = trackable_utils.Checkpoint(optimizer=opt, var=var) prefix = os.path.join(self.get_temp_dir(), 'ckpt') save_path = checkpoint.save(prefix) # Run model again. self.evaluate(strategy.experimental_run(run_fn)) self.assertEqual(self.evaluate(loss_scale()), 2.) self.assertEqual(self.evaluate(loss_scale._num_good_steps), 0) self.assertNotAlmostEqual(self.evaluate(slot_var).item(), slot_value) # Load checkpoint and ensure loss scale is back to it's original value. status = checkpoint.restore(save_path) status.assert_consumed() status.run_restore_ops() self.assertEqual(self.evaluate(loss_scale()), 1.) self.assertEqual(self.evaluate(loss_scale._num_good_steps), 1) self.assertAlmostEqual(self.evaluate(slot_var).item(), slot_value) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/keras/mixed_precision/experimental/loss_scale_optimizer_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for AutoCastVariable.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from absl.testing import parameterized import numpy as np from tensorflow.python.distribute import mirrored_strategy from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.keras.mixed_precision.experimental import autocast_variable from tensorflow.python.ops import array_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training.tracking import util as trackable_utils TESTCASES = ({ 'testcase_name': 'base', 'distribute': False }, { 'testcase_name': 'distribute', 'distribute': True }) def get_distribute_scope(distribute): class DummyContextManager(object): def __enter__(self): pass def __exit__(self, *args): pass if distribute: return mirrored_strategy.MirroredStrategy(['cpu:0']).scope() else: return DummyContextManager() def get_autocast_var(var, distribute): if distribute: return autocast_variable.AutoCastDistributedVariable(var) else: return autocast_variable.AutoCastVariable(var) def get_var(val, dtype): return variables.VariableV1(val, use_resource=True, dtype=dtype) @test_util.run_all_in_graph_and_eager_modes class AutoCastVariableTest(test.TestCase, parameterized.TestCase): @parameterized.named_parameters(*TESTCASES) def test_read(self, distribute): with get_distribute_scope(distribute): x = get_var(1., dtypes.float32) x = get_autocast_var(x, distribute) self.evaluate(x.initializer) # outside of auto cast scope. self.assertEqual(x.dtype, dtypes.float32) self.assertEqual(x.value().dtype, dtypes.float32) self.assertEqual(x.read_value().dtype, dtypes.float32) self.assertEqual(array_ops.identity(x).dtype, dtypes.float32) # within auto cast scope of different dtype with ops.get_default_graph()._enable_auto_casting_variables( dtypes.float16): self.assertEqual(x.dtype, dtypes.float16) self.assertEqual(x.value().dtype, dtypes.float16) self.assertEqual(x.read_value().dtype, dtypes.float16) self.assertEqual(array_ops.identity(x).dtype, dtypes.float16) # within auto cast scope of same dtype with ops.get_default_graph()._enable_auto_casting_variables( dtypes.float32): self.assertEqual(x.dtype, dtypes.float32) self.assertEqual(x.value().dtype, dtypes.float32) self.assertEqual(x.read_value().dtype, dtypes.float32) self.assertEqual(array_ops.identity(x).dtype, dtypes.float32) @parameterized.named_parameters(*TESTCASES) def test_read_nested_scopes(self, distribute): with get_distribute_scope(distribute): x = get_var(1., dtypes.float32) x = get_autocast_var(x, distribute) self.evaluate(x.initializer) with ops.get_default_graph()._enable_auto_casting_variables( dtypes.float16): self.assertEqual(x.dtype, dtypes.float16) self.assertEqual(x.read_value().dtype, dtypes.float16) with ops.get_default_graph()._enable_auto_casting_variables( dtypes.float32): self.assertEqual(x.dtype, dtypes.float32) self.assertEqual(x.read_value().dtype, dtypes.float32) self.assertEqual(x.dtype, dtypes.float16) self.assertEqual(x.read_value().dtype, dtypes.float16) @parameterized.named_parameters(*TESTCASES) def test_operator_overloads(self, distribute): with get_distribute_scope(distribute): x = get_var(1., dtypes.float32) x = get_autocast_var(x, distribute) self.evaluate(x.initializer) v1 = constant_op.constant(2., dtype=dtypes.float32) v2 = constant_op.constant(2., dtype=dtypes.float16) # Because autocast variables do not yet define operator overloads, the # operator is defined by the non-variable tensor # Test variable as the LHS. Currently, this is not supported with # distributed autocast variables if not distribute: self.assertEqual(self.evaluate(x + v1), 3.) with ops.get_default_graph()._enable_auto_casting_variables( dtypes.float16): self.assertEqual(self.evaluate(x + v2), 3.) # Test variable as the RHS self.assertEqual(self.evaluate(v1 + x), 3.) with ops.get_default_graph()._enable_auto_casting_variables( dtypes.float16): self.assertEqual(self.evaluate(v2 + x), 3.) @parameterized.named_parameters(*TESTCASES) def test_assign(self, distribute): with get_distribute_scope(distribute): x = get_var(0., dtypes.float32) x = get_autocast_var(x, distribute) self.evaluate(x.initializer) # outside of auto cast scope. v1 = constant_op.constant(3.14, dtype=dtypes.float32) v2 = constant_op.constant(3.14, dtype=dtypes.float16) def run_and_check(): # Assign float32 values self.assertAllClose(3.14, self.evaluate(x.assign(v1))) self.assertAllClose(3.14 * 2, self.evaluate(x.assign_add(v1))) self.assertAllClose(3.14, self.evaluate(x.assign_sub(v1))) # Attempt to assign float16 values with self.assertRaisesRegexp( ValueError, 'conversion requested dtype float32 for Tensor with dtype float16'): self.evaluate(x.assign(v2)) with self.assertRaisesRegexp( ValueError, 'conversion requested dtype float32 for Tensor with dtype float16'): self.evaluate(x.assign_add(v2)) with self.assertRaisesRegexp( ValueError, 'conversion requested dtype float32 for Tensor with dtype float16'): self.evaluate(x.assign_sub(v2)) # Assign Python floats self.assertAllClose(3.14, self.evaluate(x.assign(3.14))) self.assertAllClose(3.14 * 2, self.evaluate(x.assign_add(3.14))) self.assertAllClose(3.14, self.evaluate(x.assign_sub(3.14))) run_and_check() # reset x self.evaluate(x.assign(0.)) # within auto cast scope. with ops.get_default_graph()._enable_auto_casting_variables( dtypes.float16): # assign still expect float32 value even if in float16 scope run_and_check() @parameterized.named_parameters(*TESTCASES) def test_assign_stays_in_true_dtype(self, distribute): with get_distribute_scope(distribute): x = get_var(1., dtypes.float32) x = get_autocast_var(x, distribute) self.evaluate(x.initializer) # small_val is a value such that 1.0 + small_val == 1.0 in fp16, but not # in fp32 small_val = np.finfo('float16').eps / 2 small_tensor = constant_op.constant(small_val, dtype=dtypes.float32) with ops.get_default_graph()._enable_auto_casting_variables( dtypes.float16): # Variable should be increased, despite it appearing to be the same # float16 value. self.assertEqual(1. + small_val, self.evaluate(x.assign(1. + small_tensor))) self.assertEqual(1., self.evaluate(x.value())) self.assertEqual(1. + small_val, self.evaluate(x.value())) self.evaluate(x.assign(1.)) with ops.get_default_graph()._enable_auto_casting_variables( dtypes.float16): self.assertEqual(1. + small_val, self.evaluate(x.assign_add(small_tensor))) self.assertEqual(1., self.evaluate(x.value())) self.assertEqual(1. + small_val, self.evaluate(x.value())) @parameterized.named_parameters(*TESTCASES) def test_checkpoint(self, distribute): with get_distribute_scope(distribute): x = get_var(1., dtypes.float32) x = get_autocast_var(x, distribute) self.evaluate(x.initializer) self.evaluate(x.assign(123.)) checkpoint = trackable_utils.Checkpoint(x=x) prefix = os.path.join(self.get_temp_dir(), 'ckpt') save_path = checkpoint.save(prefix) self.evaluate(x.assign(234.)) checkpoint.restore(save_path).assert_consumed().run_restore_ops() self.assertEqual(self.evaluate(x), 123.) @parameterized.named_parameters(*TESTCASES) def test_invalid_wrapped_variable(self, distribute): with get_distribute_scope(distribute): # Wrap a non-variable with self.assertRaisesRegexp(ValueError, 'variable must be of type'): x = constant_op.constant([1.], dtype=dtypes.float32) get_autocast_var(x, distribute) # Wrap a non-floating point variable with self.assertRaisesRegexp(ValueError, 'variable must be a floating point'): x = get_var(1, dtypes.int32) get_autocast_var(x, distribute) if distribute: # Wrap a non-distributed variable with AutoCastDistributedVariable with self.assertRaisesRegexp(ValueError, 'variable must be of type'): x = get_var(1., dtypes.float32) get_autocast_var(x, distribute) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/keras/mixed_precision/experimental/autocast_variable_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains AutoCastVariable, a variable which automatically casts itself.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.distribute import values as distribute_values from tensorflow.python.framework import ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.training.tracking import base as trackable # TODO(reedwm) Make this subclass AutoCastVariable. class AutoCastVariable(trackable.Trackable): """Variable that will cast itself to a different dtype in applicable contexts. This class wraps a floating-point tf.Variable. It emulates the variable interface and delegates to the wrapped variable, but it additionally will cast the wrapped variable under a `Graph._enable_variable_auto_cast(dtype)` context manager. For example: ``` v = tf.Variable(1.0, dtype=tf.float32) v = AutoCastVariable(v) print(tf.identity(v).dtype) # tf.float32 with ops.get_default_graph()._enable_variable_auto_cast(tf.float16): print(tf.identity(v).dtype) # tf.float16, as v will cast itself to float16 print(v.dtype) # tf.float16, as v.dtype also changes under the ctx manager. ``` The purpose of this class is to allow Keras layers to create variables in float32, and automatically cast them to float16 or bfloat16 when the layer is called. """ def __init__(self, variable): """Creates an AutoCastVariable instance. Args: variable: A floating-point resource variable to wrap. Raises: ValueError: If `variable` is not a floating-point resource variable """ if not resource_variable_ops.is_resource_variable(variable): raise ValueError('variable must be of type tf.ResourceVariable, but got: ' '%s' % variable) if not variable.dtype.is_floating: raise ValueError('variable must be a floating point variable but has ' 'type: %s' % variable.dtype.name) self._variable = variable # Delegate to the underlying variable for checkpointing. self._gather_saveables_for_checkpoint = ( self._variable._gather_saveables_for_checkpoint) # pylint: disable=protected-access @property def name(self): return self._variable.name def _should_cast(self): """Returns True if this variable should be casted when accessed.""" g = ops.get_default_graph() # pylint:disable=protected-access return (g._auto_cast_variable_read_dtype is not None and self.true_dtype != g._auto_cast_variable_read_dtype) # pylint:enable=protected-access @property def dtype(self): """The dtype this variable will be casted to when read.""" if self._should_cast(): return ops.get_default_graph()._auto_cast_variable_read_dtype # pylint:disable=protected-access else: return self._variable.dtype @property def true_dtype(self): """The dtype of the underlying variable, before any casts are done.""" return self._variable.dtype def value(self): val = self._variable.value() if not self._should_cast(): return val # We colocate_with(None) to ignore the existing device constraints, so that # the cast is always done on the variable's device with ops.colocate_with(None, ignore_existing=True): with ops.device(val.device): return math_ops.cast(val, self.dtype) def read_value(self): val = self._variable.read_value() if not self._should_cast(): return val return math_ops.cast(val, self.dtype) def sparse_read(self, indices, name=None): """Reads the value of this variable sparsely, using `gather`.""" val = self._variable.sparse_read(indices, name=name) if not self._should_cast(): return val return math_ops.cast(val, self.dtype) def assign(self, value, use_locking=None, name=None, read_value=True): return self._variable.assign( value, use_locking=use_locking, name=name, read_value=read_value) def assign_add(self, delta, use_locking=None, name=None, read_value=True): return self._variable.assign_add( delta, use_locking=use_locking, name=name, read_value=read_value) def assign_sub(self, delta, use_locking=None, name=None, read_value=True): return self._variable.assign_sub( delta, use_locking=use_locking, name=name, read_value=read_value) # TODO(reedwm): Support assigning variables with tf.compat.v1.assign(), # var.scatter_add, etc. def __getattr__(self, name): return getattr(self._variable, name) def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False): """Converts this variable to a tensor.""" if not self._should_cast(): return ops.internal_convert_to_tensor(self._variable, dtype, name, as_ref) # TODO(reedwm): Support as_ref? assert not as_ref if dtype is not None and not dtype.is_compatible_with(self.dtype): raise ValueError( 'Incompatible type conversion requested to type {!r} for variable ' 'of type {!r}'.format(dtype.name, self.dtype.name)) val = ops.internal_convert_to_tensor(self._variable, self._variable.dtype, name, as_ref=False) with ops.colocate_with(None, ignore_existing=True): with ops.device(val.device): return math_ops.cast(val, self.dtype) def _should_act_as_resource_variable(self): """Pass resource_variable_ops.is_resource_variable check.""" pass # TODO(reedwm): Define operator overloads. ops.register_tensor_conversion_function( AutoCastVariable, AutoCastVariable._dense_var_to_tensor) # pylint:disable=protected-access ops.register_dense_tensor_like_type(AutoCastVariable) # We have DistributedVariable subclass to pass # isinstance(..., DistributedVariable) checks when wrapping a # DistributedVariable. # TODO(reedwm): We should not wrap DistributedVariable, but instead have # DistributedVariable wrap AutoCastVariable. Subclassing DistributedVariable is # messy, because we do not fully implement the interface of DistributedVariable. class AutoCastDistributedVariable(AutoCastVariable, distribute_values.DistributedVariable): """Version of AutoCastVariable that subclasses DistributedVariable.""" def __init__(self, variable): if not isinstance(variable, distribute_values.DistributedValues): raise ValueError('variable must be of type DistributedValues, ' 'but got: %s' % variable) super(AutoCastDistributedVariable, self).__init__(variable)
tensorflow-master
tensorflow/python/keras/mixed_precision/experimental/autocast_variable.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains the loss scaling optimizer class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.framework import smart_cond from tensorflow.python.keras import backend from tensorflow.python.keras.optimizer_v2 import optimizer_v2 from tensorflow.python.ops import control_flow_ops from tensorflow.python.training.experimental import loss_scale as loss_scale_module from tensorflow.python.util.tf_export import keras_export class _UnwrapPreventer(object): """Wrapper that DistributionStrategy will not unwrap. Typically, DistributionStrategy will unwrap values when going from a cross- replica context to a replica context via `call_for_each_replica`. This class is a wrapper that DistributionStrategy will not unwrap, so it can be used to prevent it from unwrapping a value. TODO(reedwm): Find/implement a better way of preventing values from being unwrapped by DistributionStrategy """ def __init__(self, value): self.value = value @keras_export('keras.mixed_precision.experimental.LossScaleOptimizer') class LossScaleOptimizer(optimizer_v2.OptimizerV2): """An optimizer that applies loss scaling. Loss scaling is a process that multiplies the loss by a multiplier called the loss scale, and divides each gradient by the same multiplier. The pseudocode for this process is: ``` loss = ... loss *= loss_scale grads = gradients(loss, vars) grads /= loss_scale ``` Mathematically, loss scaling has no effect, but can help avoid numerical underflow in intermediate gradients when float16 tensors are used. By multiplying the loss, each intermediate gradient will have the same multiplier applied. The loss scale can either be a fixed constant, chosen by the user, or be dynamically determined. Dynamically determining the loss scale is convenient as a loss scale does not have to be explicitly chosen. However it reduces performance. This optimizer wraps another optimizer and applies loss scaling to it via a `LossScale`. Loss scaling is applied whenever gradients are computed, either through `minimize()` or `get_gradients()`. The loss scale is updated via `LossScale.update()` whenever gradients are applied, either through `minimize()` or `apply_gradients()`. For example: ```python opt = tf.keras.optimizers.SGD(0.1) opt = tf.keras.mixed_precision.experimental.LossScaleOptimizer(opt, "dynamic") # 'minimize' applies loss scaling to the loss and updates the loss sale. opt.minimize(loss_fn) ``` If a `tf.GradientTape` is used to compute gradients instead of `LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, the loss and gradients must be scaled manually. This can be done by calling `LossScaleOptimizer.get_scaled_loss` before passing the loss to `tf.GradientTape`, and `LossScaleOptimizer.get_unscaled_gradients` after computing the gradients with `tf.GradientTape`. For example: ```python opt = tf.keras.mixed_precision.experimental.LossScaleOptimizer(...) vars = ... with tf.GradientTape() as tape: loss = ... scaled_loss = opt.get_scaled_loss(loss) scaled_grads = tape.gradient(scaled_loss, vars) grads = opt.get_unscaled_gradients(scaled_grads) opt.apply_gradients(zip(grads, vars)) # Loss scale will be updated here ``` """ def __init__(self, opt, loss_scale): """Initializes this loss scale optimizer. Args: opt: The Optimizer instance to wrap. loss_scale: The loss scale to scale the loss and gradients. This can either be an int/float to use a fixed loss scale, the string "dynamic" to use dynamic loss scaling, or an instance of a LossScale. The string "dynamic" equivalent to passing `DynamicLossScale()`, and passing an int/float is equivalent to passing a FixedLossScale with the given loss scale. """ if not isinstance(opt, optimizer_v2.OptimizerV2): raise ValueError('"opt" must be an instance of OptimizerV2, but got: %s' % opt) if hasattr(opt, 'clipnorm'): raise ValueError('LossScaleOptimizer does not support wrapping ' 'optimizers with a clipnorm. Optimizer %s has clipnorm ' '%s' % (opt, opt.clipnorm)) if hasattr(opt, 'clipvalue'): raise ValueError('LossScaleOptimizer does not support wrapping ' 'optimizers with a clipvalue. Optimizer %s has ' 'clipvalue %s' % (opt, opt.clipvalue)) self._optimizer = opt self._loss_scale = loss_scale_module.get(loss_scale) for weight in loss_scale_module.get_loss_scale_weights(self._loss_scale): # We cannot call `track_variable` in the LossScale class itself, because a # file outside of Keras cannot depend on a Keras file. Calling it here # instead is OK, because a variable only needs to be tracked if used with # a Keras class, and the only way to use LossScale with a Keras class is # through the LossScaleOptimizer. backend.track_variable(weight) self._track_trackable(self._optimizer, 'base_optimizer') self._track_trackable(self._loss_scale, 'loss_scale') @property def loss_scale(self): """The `LossScale` instance associated with this optimizer.""" return self._loss_scale def get_scaled_loss(self, loss): """Scales the loss by the loss scale. This method is only needed if you compute gradients manually, e.g. with `tf.GradientTape`. In that case, call this method to scale the loss before passing the loss to `tf.GradientTape`. If you use `LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, loss scaling is automatically applied and this method is unneeded. If this method is called, `get_unscaled_gradients` should also be called. See the `tf.keras.mixed_precision.experimental.LossScaleOptimizer` doc for an example. Args: loss: The loss, which will be multiplied by the loss scale. Can either be a tensor or a callable returning a tensor. Returns: `loss` multiplied by `LossScaleOptimizer.loss_scale()`. """ loss_scale = self._loss_scale() if callable(loss): return lambda: loss() * loss_scale else: return loss * loss_scale def get_unscaled_gradients(self, grads): """Unscales the gradients by the loss scale. This method is only needed if you compute gradients manually, e.g. with `tf.GradientTape`. In that case, call this method to unscale the gradients after computing them with `tf.GradientTape`. If you use `LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, loss scaling is automatically applied and this method is unneeded. If this method is called, `get_scaled_loss` should also be called. See the `tf.keras.mixed_precision.experimental.LossScaleOptimizer` doc for an example. Args: grads: A list of tensors, each which will be divided by the loss scale. Can have None values, which are ignored. Returns: A new list the same size as `grads`, where every non-None value in `grads` is divided by `LossScaleOptimizer.loss_scale()`. """ loss_scale = self._loss_scale() loss_scale_reciprocal = 1. / loss_scale return [g * loss_scale_reciprocal if g is not None else None for g in grads] def _compute_gradients(self, loss, var_list, grad_loss=None): loss = self.get_scaled_loss(loss) grads_and_vars = self._optimizer._compute_gradients(loss, var_list, # pylint: disable=protected-access grad_loss) grads = [g for g, _ in grads_and_vars] variables = [v for _, v in grads_and_vars] unscaled_grads = self.get_unscaled_gradients(grads) return list(zip(unscaled_grads, variables)) def get_gradients(self, loss, params): loss = self.get_scaled_loss(loss) grads = self._optimizer.get_gradients(loss, params) return self.get_unscaled_gradients(grads) def apply_gradients(self, grads_and_vars, name=None): if distribution_strategy_context.in_cross_replica_context(): raise ValueError('apply_gradients() must be called in a replica context.') grads_and_vars = tuple(grads_and_vars) return distribution_strategy_context.get_replica_context().merge_call( self._apply_gradients_cross_replica, args=(grads_and_vars, name)) def _apply_gradients_cross_replica(self, distribution, grads_and_vars, name): grads = [g for g, _ in grads_and_vars] loss_scale_update_op, should_apply_grads = self._loss_scale.update(grads) def apply_fn(): # We do not want DistributionStrategy to unwrap any MirroredVariables in # grads_and_vars, because even in a replica context, the wrapped optimizer # expects mirrored variables. So we wrap grads_and_vars with an # _UnwrapPreventer, preventing DistributionStrategy from unwrapping the # MirroredVariables. wrapped_grads_and_vars = _UnwrapPreventer(grads_and_vars) return distribution.extended.call_for_each_replica( self._apply_gradients, args=(wrapped_grads_and_vars, name)) # Note: We must call this cond() in a cross-replica context. # DistributionStrategy does not support having a cond in a replica context # with a branch that calls `merge_call`, and self._optimizer.apply_gradients # calls `merge_call`. maybe_apply_op = smart_cond.smart_cond(should_apply_grads, apply_fn, control_flow_ops.no_op) return control_flow_ops.group(maybe_apply_op, loss_scale_update_op) def _apply_gradients(self, wrapped_grads_and_vars, name): grads_and_vars = wrapped_grads_and_vars.value return self._optimizer.apply_gradients(grads_and_vars, name) @property def iterations(self): return self._optimizer.iterations @iterations.setter def iterations(self, variable): self._optimizer.iterations = variable # For the most part, we only expose methods in the base OptimizerV2, not # individual subclasses like Adam. However, although "learning_rate" and "lr" # properties are not part of the base OptimizerV2 class, they are part of most # subclasses, so we expose them here for convenience. @property def learning_rate(self): return self._optimizer.learning_rate @learning_rate.setter def learning_rate(self, lr): self._optimizer.learning_rate = lr @property def lr(self): return self._optimizer.lr @lr.setter def lr(self, lr): self._optimizer.lr = lr def get_slot_names(self): """A list of names for this optimizer's slots.""" return self._optimizer.get_slot_names() # TODO(reedwm): Maybe merge this class's functionality into OptimizerV2. # TODO(reedwm): Maybe throw an error if mixed precision is used without this # optimizer being used. # TODO(reedwm): Implement get_config and from_config. This will first require # implementing deserialization support for OptimizerV2. def get_config(self): raise NotImplementedError('get_config() is not yet implemented for ' 'LossScaleOptimizers') @classmethod def from_config(cls, config, custom_objects=None): raise NotImplementedError('from_config() is not yet implemented for ' 'LossScaleOptimizers')
tensorflow-master
tensorflow/python/keras/mixed_precision/experimental/loss_scale_optimizer.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access """Utilities related to loss functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.framework import ops from tensorflow.python.keras import backend as K from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.losses import loss_reduction from tensorflow.python.ops.losses import util as tf_losses_utils from tensorflow.python.util.tf_export import keras_export # TODO(joshl/psv): Update references to ReductionV2 to point to its # new location. ReductionV2 = keras_export( # pylint: disable=invalid-name 'keras.losses.Reduction', v1=[])(loss_reduction.ReductionV2) def _safe_mean(losses, num_present): """Computes a safe mean of the losses. Args: losses: `Tensor` whose elements contain individual loss measurements. num_present: The number of measurable elements in `losses`. Returns: A scalar representing the mean of `losses`. If `num_present` is zero, then zero is returned. """ total_loss = math_ops.reduce_sum(losses) return math_ops.div_no_nan(total_loss, num_present, name='value') def _num_elements(losses): """Computes the number of elements in `losses` tensor.""" with K.name_scope('num_elements') as scope: return math_ops.cast(array_ops.size(losses, name=scope), dtype=losses.dtype) def reduce_weighted_loss(weighted_losses, reduction=ReductionV2.SUM_OVER_BATCH_SIZE): """Reduces the individual weighted loss measurements.""" if reduction == ReductionV2.NONE: loss = weighted_losses else: loss = math_ops.reduce_sum(weighted_losses) if reduction == ReductionV2.SUM_OVER_BATCH_SIZE: loss = _safe_mean(loss, _num_elements(weighted_losses)) return loss def compute_weighted_loss(losses, sample_weight=None, reduction=ReductionV2.SUM_OVER_BATCH_SIZE, name=None): """Computes the weighted loss. Args: losses: `Tensor` of shape `[batch_size, d1, ... dN]`. sample_weight: Optional `Tensor` whose rank is either 0, or the same rank as `losses`, or be broadcastable to `losses`. reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `SUM_OVER_BATCH_SIZE`. name: Optional name for the op. Raises: ValueError: If the shape of `sample_weight` is not compatible with `losses`. Returns: Weighted loss `Tensor` of the same type as `losses`. If `reduction` is `NONE`, this has the same shape as `losses`; otherwise, it is scalar. """ ReductionV2.validate(reduction) # If this function is called directly, then we just default 'AUTO' to # 'SUM_OVER_BATCH_SIZE'. Eg. Canned estimator use cases. if reduction == ReductionV2.AUTO: reduction = ReductionV2.SUM_OVER_BATCH_SIZE if sample_weight is None: sample_weight = 1.0 with K.name_scope(name or 'weighted_loss'): # Save the `reduction` argument for loss normalization when distributing # to multiple replicas. Used only for estimator + v1 optimizer flow. ops.get_default_graph()._last_loss_reduction = reduction # pylint: disable=protected-access losses = ops.convert_to_tensor(losses) input_dtype = losses.dtype weighted_losses = tf_losses_utils.scale_losses_by_sample_weight( losses, sample_weight) # Apply reduction function to the individual weighted losses. loss = reduce_weighted_loss(weighted_losses, reduction) # Convert the result back to the input type. loss = math_ops.cast(loss, input_dtype) return loss def scale_loss_for_distribution(loss_value): """Scales and returns the given loss value by the number of replicas.""" num_replicas = ( distribution_strategy_context.get_strategy().num_replicas_in_sync) if num_replicas > 1: loss_value *= (1. / num_replicas) return loss_value
tensorflow-master
tensorflow/python/keras/utils/losses_utils.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras model mode constants.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import from tensorflow.python.saved_model.model_utils.mode_keys import KerasModeKeys as ModeKeys # pylint: enable=unused-import
tensorflow-master
tensorflow/python/keras/utils/mode_keys.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access """Utilities related to layer/model functionality. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.keras import backend as K from tensorflow.python.keras.utils.conv_utils import convert_kernel from tensorflow.python.util import nest from tensorflow.python.util.tf_export import keras_export @keras_export('keras.utils.get_source_inputs') def get_source_inputs(tensor, layer=None, node_index=None): """Returns the list of input tensors necessary to compute `tensor`. Output will always be a list of tensors (potentially with 1 element). Arguments: tensor: The tensor to start from. layer: Origin layer of the tensor. Will be determined via tensor._keras_history if not provided. node_index: Origin node index of the tensor. Returns: List of input tensors. """ if not hasattr(tensor, '_keras_history'): return tensor if layer is None or node_index: layer, node_index, _ = tensor._keras_history if not layer._inbound_nodes: return [tensor] else: node = layer._inbound_nodes[node_index] if not node.inbound_layers: # Reached an Input layer, stop recursion. return nest.flatten(node.input_tensors) else: source_tensors = [] for layer, node_index, _, tensor in node.iterate_inbound(): previous_sources = get_source_inputs(tensor, layer, node_index) # Avoid input redundancy. for x in previous_sources: if x not in source_tensors: source_tensors.append(x) return source_tensors def count_params(weights): """Count the total number of scalars composing the weights. Arguments: weights: An iterable containing the weights on which to compute params Returns: The total number of scalars composing the weights """ return int(sum(np.prod(p.shape.as_list()) for p in set(weights))) def print_summary(model, line_length=None, positions=None, print_fn=None): """Prints a summary of a model. Arguments: model: Keras model instance. line_length: Total length of printed lines (e.g. set this to adapt the display to different terminal window sizes). positions: Relative or absolute positions of log elements in each line. If not provided, defaults to `[.33, .55, .67, 1.]`. print_fn: Print function to use. It will be called on each line of the summary. You can set it to a custom function in order to capture the string summary. It defaults to `print` (prints to stdout). """ if print_fn is None: print_fn = print if model.__class__.__name__ == 'Sequential': sequential_like = True elif not model._is_graph_network: # We treat subclassed models as a simple sequence of layers, for logging # purposes. sequential_like = True else: sequential_like = True nodes_by_depth = model._nodes_by_depth.values() nodes = [] for v in nodes_by_depth: if (len(v) > 1) or (len(v) == 1 and len(nest.flatten(v[0].inbound_layers)) > 1): # if the model has multiple nodes # or if the nodes have multiple inbound_layers # the model is no longer sequential sequential_like = False break nodes += v if sequential_like: # search for shared layers for layer in model.layers: flag = False for node in layer._inbound_nodes: if node in nodes: if flag: sequential_like = False break else: flag = True if not sequential_like: break if sequential_like: line_length = line_length or 65 positions = positions or [.45, .85, 1.] if positions[-1] <= 1: positions = [int(line_length * p) for p in positions] # header names for the different log elements to_display = ['Layer (type)', 'Output Shape', 'Param #'] else: line_length = line_length or 98 positions = positions or [.33, .55, .67, 1.] if positions[-1] <= 1: positions = [int(line_length * p) for p in positions] # header names for the different log elements to_display = ['Layer (type)', 'Output Shape', 'Param #', 'Connected to'] relevant_nodes = [] for v in model._nodes_by_depth.values(): relevant_nodes += v def print_row(fields, positions): line = '' for i in range(len(fields)): if i > 0: line = line[:-1] + ' ' line += str(fields[i]) line = line[:positions[i]] line += ' ' * (positions[i] - len(line)) print_fn(line) print_fn('Model: "{}"'.format(model.name)) print_fn('_' * line_length) print_row(to_display, positions) print_fn('=' * line_length) def print_layer_summary(layer): """Prints a summary for a single layer. Arguments: layer: target layer. """ try: output_shape = layer.output_shape except AttributeError: output_shape = 'multiple' except RuntimeError: # output_shape unknown in Eager mode. output_shape = '?' name = layer.name cls_name = layer.__class__.__name__ fields = [name + ' (' + cls_name + ')', output_shape, layer.count_params()] print_row(fields, positions) def print_layer_summary_with_connections(layer): """Prints a summary for a single layer (including topological connections). Arguments: layer: target layer. """ try: output_shape = layer.output_shape except AttributeError: output_shape = 'multiple' connections = [] for node in layer._inbound_nodes: if relevant_nodes and node not in relevant_nodes: # node is not part of the current network continue for inbound_layer, node_index, tensor_index, _ in node.iterate_inbound(): connections.append('{}[{}][{}]'.format(inbound_layer.name, node_index, tensor_index)) name = layer.name cls_name = layer.__class__.__name__ if not connections: first_connection = '' else: first_connection = connections[0] fields = [ name + ' (' + cls_name + ')', output_shape, layer.count_params(), first_connection ] print_row(fields, positions) if len(connections) > 1: for i in range(1, len(connections)): fields = ['', '', '', connections[i]] print_row(fields, positions) layers = model.layers for i in range(len(layers)): if sequential_like: print_layer_summary(layers[i]) else: print_layer_summary_with_connections(layers[i]) if i == len(layers) - 1: print_fn('=' * line_length) else: print_fn('_' * line_length) model._check_trainable_weights_consistency() if hasattr(model, '_collected_trainable_weights'): trainable_count = count_params(model._collected_trainable_weights) else: trainable_count = count_params(model.trainable_weights) non_trainable_count = count_params(model.non_trainable_weights) print_fn('Total params: {:,}'.format(trainable_count + non_trainable_count)) print_fn('Trainable params: {:,}'.format(trainable_count)) print_fn('Non-trainable params: {:,}'.format(non_trainable_count)) print_fn('_' * line_length) def gather_trainable_weights(trainable, sub_layers, extra_variables): """Lists the trainable weights for an object with sub-layers. Args: trainable: Whether the object collecting the variables is trainable. sub_layers: A flat list of Layer objects owned by this object, to collect variables from. extra_variables: Any extra variables to include. Their `.trainable` property is used to categorize them. Returns: A list of collected trainable weights/variables. """ if not trainable: return [] weights = [] for layer in sub_layers: weights += layer.trainable_weights trainable_extra_variables = [ v for v in extra_variables if v.trainable] return weights + trainable_extra_variables def gather_non_trainable_weights(trainable, sub_layers, extra_variables): """Lists the non-trainable weights for an object with sub-layers. Args: trainable: Whether the object collecting the variables is trainable. sub_layers: A flat list of Layer objects owned by this object, to collect variables from. extra_variables: Any extra variables to include. Their `.trainable` property is used to categorize them. Returns: A list of collected non-trainable weights/variables. """ trainable_extra_variables = [] non_trainable_extra_variables = [] for v in extra_variables: if v.trainable: trainable_extra_variables.append(v) else: non_trainable_extra_variables.append(v) weights = [] for layer in sub_layers: weights += layer.non_trainable_weights if not trainable: trainable_weights = [] for layer in sub_layers: trainable_weights += layer.trainable_weights return (trainable_weights + trainable_extra_variables + weights + non_trainable_extra_variables) return weights + non_trainable_extra_variables @keras_export('keras.utils.convert_all_kernels_in_model') def convert_all_kernels_in_model(model): """Converts all convolution kernels in a model from Theano to TensorFlow. Also works from TensorFlow to Theano. Arguments: model: target model for the conversion. """ # Note: SeparableConvolution not included # since only supported by TF. conv_classes = { 'Conv1D', 'Conv2D', 'Conv3D', 'Conv2DTranspose', } to_assign = [] for layer in model.layers: if layer.__class__.__name__ in conv_classes: original_kernel = K.get_value(layer.kernel) converted_kernel = convert_kernel(original_kernel) to_assign.append((layer.kernel, converted_kernel)) K.batch_set_value(to_assign) def convert_dense_weights_data_format(dense, previous_feature_map_shape, target_data_format='channels_first'): """Utility useful when changing a convnet's `data_format`. When porting the weights of a convnet from one data format to the other, if the convnet includes a `Flatten` layer (applied to the last convolutional feature map) followed by a `Dense` layer, the weights of that `Dense` layer should be updated to reflect the new dimension ordering. Arguments: dense: The target `Dense` layer. previous_feature_map_shape: A shape tuple of 3 integers, e.g. `(512, 7, 7)`. The shape of the convolutional feature map right before the `Flatten` layer that came before the target `Dense` layer. target_data_format: One of "channels_last", "channels_first". Set it "channels_last" if converting a "channels_first" model to "channels_last", or reciprocally. """ assert target_data_format in {'channels_last', 'channels_first'} kernel, bias = dense.get_weights() for i in range(kernel.shape[1]): if target_data_format == 'channels_first': c, h, w = previous_feature_map_shape original_fm_shape = (h, w, c) ki = kernel[:, i].reshape(original_fm_shape) ki = np.transpose(ki, (2, 0, 1)) # last -> first else: h, w, c = previous_feature_map_shape original_fm_shape = (c, h, w) ki = kernel[:, i].reshape(original_fm_shape) ki = np.transpose(ki, (1, 2, 0)) # first -> last kernel[:, i] = np.reshape(ki, (np.prod(previous_feature_map_shape),)) dense.set_weights([kernel, bias]) def is_builtin_layer(layer): if not getattr(layer, '_keras_api_names', None): return False # Subclasses of `Layer` that are not exported inherit the export name # of the base layer class. return (layer._keras_api_names != ('keras.layers.Layer',) and layer._keras_api_names_v1 != ('keras.layers.Layer',))
tensorflow-master
tensorflow/python/keras/utils/layer_utils.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for io_utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import shutil import numpy as np import six from tensorflow.python import keras from tensorflow.python.keras.utils import io_utils from tensorflow.python.platform import test try: import h5py # pylint:disable=g-import-not-at-top except ImportError: h5py = None def create_dataset(h5_path='test.h5'): x = np.random.randn(200, 10).astype('float32') y = np.random.randint(0, 2, size=(200, 1)) f = h5py.File(h5_path, 'w') # Creating dataset to store features x_dset = f.create_dataset('my_data', (200, 10), dtype='f') x_dset[:] = x # Creating dataset to store labels y_dset = f.create_dataset('my_labels', (200, 1), dtype='i') y_dset[:] = y f.close() class TestIOUtils(test.TestCase): def test_HDF5Matrix(self): if h5py is None: return temp_dir = self.get_temp_dir() self.addCleanup(shutil.rmtree, temp_dir) h5_path = os.path.join(temp_dir, 'test.h5') create_dataset(h5_path) # Instantiating HDF5Matrix for the training set, # which is a slice of the first 150 elements x_train = io_utils.HDF5Matrix(h5_path, 'my_data', start=0, end=150) y_train = io_utils.HDF5Matrix(h5_path, 'my_labels', start=0, end=150) # Likewise for the test set x_test = io_utils.HDF5Matrix(h5_path, 'my_data', start=150, end=200) y_test = io_utils.HDF5Matrix(h5_path, 'my_labels', start=150, end=200) # HDF5Matrix behave more or less like Numpy matrices # with regard to indexing self.assertEqual(y_train.shape, (150, 1)) # But they do not support negative indices, so don't try print(x_train[-1]) self.assertEqual(y_train.dtype, np.dtype('i')) self.assertEqual(y_train.ndim, 2) self.assertEqual(y_train.size, 150) model = keras.models.Sequential() model.add(keras.layers.Dense(64, input_shape=(10,), activation='relu')) model.add(keras.layers.Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='sgd') # Note: you have to use shuffle='batch' or False with HDF5Matrix model.fit(x_train, y_train, batch_size=32, shuffle='batch', verbose=False) # test that evalutation and prediction # don't crash and return reasonable results out_pred = model.predict(x_test, batch_size=32, verbose=False) out_eval = model.evaluate(x_test, y_test, batch_size=32, verbose=False) self.assertEqual(out_pred.shape, (50, 1)) self.assertEqual(out_eval.shape, ()) self.assertGreater(out_eval, 0) # test slicing for shortened array self.assertEqual(len(x_train[0:]), len(x_train)) # test __getitem__ invalid use cases with self.assertRaises(IndexError): _ = x_train[1000] with self.assertRaises(IndexError): _ = x_train[1000: 1001] with self.assertRaises(IndexError): _ = x_train[[1000, 1001]] with self.assertRaises(IndexError): _ = x_train[six.moves.range(1000, 1001)] with self.assertRaises(IndexError): _ = x_train[np.array([1000])] with self.assertRaises(TypeError): _ = x_train[None] # test normalizer normalizer = lambda x: x + 1 normalized_x_train = io_utils.HDF5Matrix( h5_path, 'my_data', start=0, end=150, normalizer=normalizer) self.assertAllClose(normalized_x_train[0][0], x_train[0][0] + 1) def test_ask_to_proceed_with_overwrite(self): with test.mock.patch.object(six.moves, 'input') as mock_log: mock_log.return_value = 'y' self.assertTrue(io_utils.ask_to_proceed_with_overwrite('/tmp/not_exists')) mock_log.return_value = 'n' self.assertFalse( io_utils.ask_to_proceed_with_overwrite('/tmp/not_exists')) mock_log.side_effect = ['m', 'y'] self.assertTrue(io_utils.ask_to_proceed_with_overwrite('/tmp/not_exists')) mock_log.side_effect = ['m', 'n'] self.assertFalse( io_utils.ask_to_proceed_with_overwrite('/tmp/not_exists')) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/keras/utils/io_utils_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Python utilities required by Keras.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import binascii import codecs import marshal import os import re import sys import time import types as python_types import numpy as np import six from tensorflow.python.util import nest from tensorflow.python.util import tf_decorator from tensorflow.python.util import tf_inspect from tensorflow.python.util.tf_export import keras_export _GLOBAL_CUSTOM_OBJECTS = {} @keras_export('keras.utils.CustomObjectScope') class CustomObjectScope(object): """Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape. Code within a `with` statement will be able to access custom objects by name. Changes to global custom objects persist within the enclosing `with` statement. At end of the `with` statement, global custom objects are reverted to state at beginning of the `with` statement. Example: Consider a custom object `MyObject` (e.g. a class): ```python with CustomObjectScope({'MyObject':MyObject}): layer = Dense(..., kernel_regularizer='MyObject') # save, load, etc. will recognize custom object by name ``` """ def __init__(self, *args): self.custom_objects = args self.backup = None def __enter__(self): self.backup = _GLOBAL_CUSTOM_OBJECTS.copy() for objects in self.custom_objects: _GLOBAL_CUSTOM_OBJECTS.update(objects) return self def __exit__(self, *args, **kwargs): _GLOBAL_CUSTOM_OBJECTS.clear() _GLOBAL_CUSTOM_OBJECTS.update(self.backup) @keras_export('keras.utils.custom_object_scope') def custom_object_scope(*args): """Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape. Convenience wrapper for `CustomObjectScope`. Code within a `with` statement will be able to access custom objects by name. Changes to global custom objects persist within the enclosing `with` statement. At end of the `with` statement, global custom objects are reverted to state at beginning of the `with` statement. Example: Consider a custom object `MyObject` ```python with custom_object_scope({'MyObject':MyObject}): layer = Dense(..., kernel_regularizer='MyObject') # save, load, etc. will recognize custom object by name ``` Arguments: *args: Variable length list of dictionaries of name, class pairs to add to custom objects. Returns: Object of type `CustomObjectScope`. """ return CustomObjectScope(*args) @keras_export('keras.utils.get_custom_objects') def get_custom_objects(): """Retrieves a live reference to the global dictionary of custom objects. Updating and clearing custom objects using `custom_object_scope` is preferred, but `get_custom_objects` can be used to directly access `_GLOBAL_CUSTOM_OBJECTS`. Example: ```python get_custom_objects().clear() get_custom_objects()['MyObject'] = MyObject ``` Returns: Global dictionary of names to classes (`_GLOBAL_CUSTOM_OBJECTS`). """ return _GLOBAL_CUSTOM_OBJECTS def serialize_keras_class_and_config(cls_name, cls_config): """Returns the serialization of the class with the given config.""" return {'class_name': cls_name, 'config': cls_config} @keras_export('keras.utils.serialize_keras_object') def serialize_keras_object(instance): _, instance = tf_decorator.unwrap(instance) if instance is None: return None if hasattr(instance, 'get_config'): return serialize_keras_class_and_config(instance.__class__.__name__, instance.get_config()) if hasattr(instance, '__name__'): return instance.__name__ raise ValueError('Cannot serialize', instance) def class_and_config_for_serialized_keras_object( config, module_objects=None, custom_objects=None, printable_module_name='object'): """Returns the class name and config for a serialized keras object.""" if (not isinstance(config, dict) or 'class_name' not in config or 'config' not in config): raise ValueError('Improper config format: ' + str(config)) class_name = config['class_name'] if custom_objects and class_name in custom_objects: cls = custom_objects[class_name] elif class_name in _GLOBAL_CUSTOM_OBJECTS: cls = _GLOBAL_CUSTOM_OBJECTS[class_name] else: module_objects = module_objects or {} cls = module_objects.get(class_name) if cls is None: raise ValueError('Unknown ' + printable_module_name + ': ' + class_name) return (cls, config['config']) @keras_export('keras.utils.deserialize_keras_object') def deserialize_keras_object(identifier, module_objects=None, custom_objects=None, printable_module_name='object'): if identifier is None: return None if isinstance(identifier, dict): # In this case we are dealing with a Keras config dictionary. config = identifier (cls, cls_config) = class_and_config_for_serialized_keras_object( config, module_objects, custom_objects, printable_module_name) if hasattr(cls, 'from_config'): arg_spec = tf_inspect.getfullargspec(cls.from_config) custom_objects = custom_objects or {} if 'custom_objects' in arg_spec.args: return cls.from_config( cls_config, custom_objects=dict( list(_GLOBAL_CUSTOM_OBJECTS.items()) + list(custom_objects.items()))) with CustomObjectScope(custom_objects): return cls.from_config(cls_config) else: # Then `cls` may be a function returning a class. # in this case by convention `config` holds # the kwargs of the function. custom_objects = custom_objects or {} with CustomObjectScope(custom_objects): return cls(**cls_config) elif isinstance(identifier, six.string_types): object_name = identifier if custom_objects and object_name in custom_objects: obj = custom_objects.get(object_name) elif object_name in _GLOBAL_CUSTOM_OBJECTS: obj = _GLOBAL_CUSTOM_OBJECTS[object_name] else: obj = module_objects.get(object_name) if obj is None: raise ValueError('Unknown ' + printable_module_name + ':' + object_name) # Classes passed by name are instantiated with no args, functions are # returned as-is. if tf_inspect.isclass(obj): return obj() return obj else: raise ValueError('Could not interpret serialized ' + printable_module_name + ': ' + identifier) def func_dump(func): """Serializes a user defined function. Arguments: func: the function to serialize. Returns: A tuple `(code, defaults, closure)`. """ if os.name == 'nt': raw_code = marshal.dumps(func.__code__).replace(b'\\', b'/') code = codecs.encode(raw_code, 'base64').decode('ascii') else: raw_code = marshal.dumps(func.__code__) code = codecs.encode(raw_code, 'base64').decode('ascii') defaults = func.__defaults__ if func.__closure__: closure = tuple(c.cell_contents for c in func.__closure__) else: closure = None return code, defaults, closure def func_load(code, defaults=None, closure=None, globs=None): """Deserializes a user defined function. Arguments: code: bytecode of the function. defaults: defaults of the function. closure: closure of the function. globs: dictionary of global objects. Returns: A function object. """ if isinstance(code, (tuple, list)): # unpack previous dump code, defaults, closure = code if isinstance(defaults, list): defaults = tuple(defaults) def ensure_value_to_cell(value): """Ensures that a value is converted to a python cell object. Arguments: value: Any value that needs to be casted to the cell type Returns: A value wrapped as a cell object (see function "func_load") """ def dummy_fn(): # pylint: disable=pointless-statement value # just access it so it gets captured in .__closure__ cell_value = dummy_fn.__closure__[0] if not isinstance(value, type(cell_value)): return cell_value return value if closure is not None: closure = tuple(ensure_value_to_cell(_) for _ in closure) try: raw_code = codecs.decode(code.encode('ascii'), 'base64') except (UnicodeEncodeError, binascii.Error): raw_code = code.encode('raw_unicode_escape') code = marshal.loads(raw_code) if globs is None: globs = globals() return python_types.FunctionType( code, globs, name=code.co_name, argdefs=defaults, closure=closure) def has_arg(fn, name, accept_all=False): """Checks if a callable accepts a given keyword argument. Arguments: fn: Callable to inspect. name: Check if `fn` can be called with `name` as a keyword argument. accept_all: What to return if there is no parameter called `name` but the function accepts a `**kwargs` argument. Returns: bool, whether `fn` accepts a `name` keyword argument. """ arg_spec = tf_inspect.getfullargspec(fn) if accept_all and arg_spec.varkw is not None: return True return name in arg_spec.args @keras_export('keras.utils.Progbar') class Progbar(object): """Displays a progress bar. Arguments: target: Total number of steps expected, None if unknown. width: Progress bar width on screen. verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose) stateful_metrics: Iterable of string names of metrics that should *not* be averaged over time. Metrics in this list will be displayed as-is. All others will be averaged by the progbar before display. interval: Minimum visual progress update interval (in seconds). unit_name: Display name for step counts (usually "step" or "sample"). """ def __init__(self, target, width=30, verbose=1, interval=0.05, stateful_metrics=None, unit_name='step'): self.target = target self.width = width self.verbose = verbose self.interval = interval self.unit_name = unit_name if stateful_metrics: self.stateful_metrics = set(stateful_metrics) else: self.stateful_metrics = set() self._dynamic_display = ((hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()) or 'ipykernel' in sys.modules or 'posix' in sys.modules) self._total_width = 0 self._seen_so_far = 0 # We use a dict + list to avoid garbage collection # issues found in OrderedDict self._values = {} self._values_order = [] self._start = time.time() self._last_update = 0 def update(self, current, values=None): """Updates the progress bar. Arguments: current: Index of current step. values: List of tuples: `(name, value_for_last_step)`. If `name` is in `stateful_metrics`, `value_for_last_step` will be displayed as-is. Else, an average of the metric over time will be displayed. """ values = values or [] for k, v in values: if k not in self._values_order: self._values_order.append(k) if k not in self.stateful_metrics: if k not in self._values: self._values[k] = [v * (current - self._seen_so_far), current - self._seen_so_far] else: self._values[k][0] += v * (current - self._seen_so_far) self._values[k][1] += (current - self._seen_so_far) else: # Stateful metrics output a numeric value. This representation # means "take an average from a single value" but keeps the # numeric formatting. self._values[k] = [v, 1] self._seen_so_far = current now = time.time() info = ' - %.0fs' % (now - self._start) if self.verbose == 1: if (now - self._last_update < self.interval and self.target is not None and current < self.target): return prev_total_width = self._total_width if self._dynamic_display: sys.stdout.write('\b' * prev_total_width) sys.stdout.write('\r') else: sys.stdout.write('\n') if self.target is not None: numdigits = int(np.log10(self.target)) + 1 bar = ('%' + str(numdigits) + 'd/%d [') % (current, self.target) prog = float(current) / self.target prog_width = int(self.width * prog) if prog_width > 0: bar += ('=' * (prog_width - 1)) if current < self.target: bar += '>' else: bar += '=' bar += ('.' * (self.width - prog_width)) bar += ']' else: bar = '%7d/Unknown' % current self._total_width = len(bar) sys.stdout.write(bar) if current: time_per_unit = (now - self._start) / current else: time_per_unit = 0 if self.target is not None and current < self.target: eta = time_per_unit * (self.target - current) if eta > 3600: eta_format = '%d:%02d:%02d' % (eta // 3600, (eta % 3600) // 60, eta % 60) elif eta > 60: eta_format = '%d:%02d' % (eta // 60, eta % 60) else: eta_format = '%ds' % eta info = ' - ETA: %s' % eta_format else: if time_per_unit >= 1 or time_per_unit == 0: info += ' %.0fs/%s' % (time_per_unit, self.unit_name) elif time_per_unit >= 1e-3: info += ' %.0fms/%s' % (time_per_unit * 1e3, self.unit_name) else: info += ' %.0fus/%s' % (time_per_unit * 1e6, self.unit_name) for k in self._values_order: info += ' - %s:' % k if isinstance(self._values[k], list): avg = np.mean(self._values[k][0] / max(1, self._values[k][1])) if abs(avg) > 1e-3: info += ' %.4f' % avg else: info += ' %.4e' % avg else: info += ' %s' % self._values[k] self._total_width += len(info) if prev_total_width > self._total_width: info += (' ' * (prev_total_width - self._total_width)) if self.target is not None and current >= self.target: info += '\n' sys.stdout.write(info) sys.stdout.flush() elif self.verbose == 2: if self.target is not None and current >= self.target: numdigits = int(np.log10(self.target)) + 1 count = ('%' + str(numdigits) + 'd/%d') % (current, self.target) info = count + info for k in self._values_order: info += ' - %s:' % k avg = np.mean(self._values[k][0] / max(1, self._values[k][1])) if avg > 1e-3: info += ' %.4f' % avg else: info += ' %.4e' % avg info += '\n' sys.stdout.write(info) sys.stdout.flush() self._last_update = now def add(self, n, values=None): self.update(self._seen_so_far + n, values) def make_batches(size, batch_size): """Returns a list of batch indices (tuples of indices). Arguments: size: Integer, total size of the data to slice into batches. batch_size: Integer, batch size. Returns: A list of tuples of array indices. """ num_batches = int(np.ceil(size / float(batch_size))) return [(i * batch_size, min(size, (i + 1) * batch_size)) for i in range(0, num_batches)] def slice_arrays(arrays, start=None, stop=None): """Slice an array or list of arrays. This takes an array-like, or a list of array-likes, and outputs: - arrays[start:stop] if `arrays` is an array-like - [x[start:stop] for x in arrays] if `arrays` is a list Can also work on list/array of indices: `slice_arrays(x, indices)` Arguments: arrays: Single array or list of arrays. start: can be an integer index (start index) or a list/array of indices stop: integer (stop index); should be None if `start` was a list. Returns: A slice of the array(s). Raises: ValueError: If the value of start is a list and stop is not None. """ if arrays is None: return [None] if isinstance(start, list) and stop is not None: raise ValueError('The stop argument has to be None if the value of start ' 'is a list.') elif isinstance(arrays, list): if hasattr(start, '__len__'): # hdf5 datasets only support list objects as indices if hasattr(start, 'shape'): start = start.tolist() return [None if x is None else x[start] for x in arrays] return [ None if x is None else None if not hasattr(x, '__getitem__') else x[start:stop] for x in arrays ] else: if hasattr(start, '__len__'): if hasattr(start, 'shape'): start = start.tolist() return arrays[start] if hasattr(start, '__getitem__'): return arrays[start:stop] return [None] def to_list(x): """Normalizes a list/tensor into a list. If a tensor is passed, we return a list of size 1 containing the tensor. Arguments: x: target object to be normalized. Returns: A list. """ if isinstance(x, list): return x return [x] def object_list_uid(object_list): """Creates a single string from object ids.""" object_list = nest.flatten(object_list) return ', '.join([str(abs(id(x))) for x in object_list]) def to_snake_case(name): intermediate = re.sub('(.)([A-Z][a-z0-9]+)', r'\1_\2', name) insecure = re.sub('([a-z])([A-Z])', r'\1_\2', intermediate).lower() # If the class is private the name starts with "_" which is not secure # for creating scopes. We prefix the name with "private" in this case. if insecure[0] != '_': return insecure return 'private' + insecure def is_all_none(structure): iterable = nest.flatten(structure) # We cannot use Python's `any` because the iterable may return Tensors. for element in iterable: if element is not None: return False return True def check_for_unexpected_keys(name, input_dict, expected_values): unknown = set(input_dict.keys()).difference(expected_values) if unknown: raise ValueError('Unknown entries in {} dictionary: {}. Only expected ' 'following keys: {}'.format(name, list(unknown), expected_values)) def validate_kwargs(kwargs, allowed_kwargs, error_message='Keyword argument not understood:'): """Checks that all keyword arguments are in the set of allowed keys.""" for kwarg in kwargs: if kwarg not in allowed_kwargs: raise TypeError(error_message, kwarg)
tensorflow-master
tensorflow/python/keras/utils/generic_utils.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras generic Python utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python import keras from tensorflow.python.platform import test class HasArgTest(test.TestCase): def test_has_arg(self): def f_x(x): return x def f_x_args(x, *args): _ = args return x def f_x_kwargs(x, **kwargs): _ = kwargs return x self.assertTrue(keras.utils.generic_utils.has_arg( f_x, 'x', accept_all=False)) self.assertFalse(keras.utils.generic_utils.has_arg( f_x, 'y', accept_all=False)) self.assertTrue(keras.utils.generic_utils.has_arg( f_x_args, 'x', accept_all=False)) self.assertFalse(keras.utils.generic_utils.has_arg( f_x_args, 'y', accept_all=False)) self.assertTrue(keras.utils.generic_utils.has_arg( f_x_kwargs, 'x', accept_all=False)) self.assertFalse(keras.utils.generic_utils.has_arg( f_x_kwargs, 'y', accept_all=False)) self.assertTrue(keras.utils.generic_utils.has_arg( f_x_kwargs, 'y', accept_all=True)) class TestCustomObjectScope(test.TestCase): def test_custom_object_scope(self): def custom_fn(): pass class CustomClass(object): pass with keras.utils.generic_utils.custom_object_scope( {'CustomClass': CustomClass, 'custom_fn': custom_fn}): act = keras.activations.get('custom_fn') self.assertEqual(act, custom_fn) cl = keras.regularizers.get('CustomClass') self.assertEqual(cl.__class__, CustomClass) class SerializeKerasObjectTest(test.TestCase): def test_serialize_none(self): serialized = keras.utils.generic_utils.serialize_keras_object(None) self.assertEqual(serialized, None) deserialized = keras.utils.generic_utils.deserialize_keras_object( serialized) self.assertEqual(deserialized, None) class SliceArraysTest(test.TestCase): def test_slice_arrays(self): input_a = list([1, 2, 3]) self.assertEqual( keras.utils.generic_utils.slice_arrays(input_a, start=0), [None, None, None]) self.assertEqual( keras.utils.generic_utils.slice_arrays(input_a, stop=3), [None, None, None]) self.assertEqual( keras.utils.generic_utils.slice_arrays(input_a, start=0, stop=1), [None, None, None]) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/keras/utils/generic_utils_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for np_utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python import keras from tensorflow.python.platform import test class TestNPUtils(test.TestCase): def test_to_categorical(self): num_classes = 5 shapes = [(1,), (3,), (4, 3), (5, 4, 3), (3, 1), (3, 2, 1)] expected_shapes = [(1, num_classes), (3, num_classes), (4, 3, num_classes), (5, 4, 3, num_classes), (3, num_classes), (3, 2, num_classes)] labels = [np.random.randint(0, num_classes, shape) for shape in shapes] one_hots = [ keras.utils.to_categorical(label, num_classes) for label in labels] for label, one_hot, expected_shape in zip(labels, one_hots, expected_shapes): # Check shape self.assertEqual(one_hot.shape, expected_shape) # Make sure there is only one 1 in a row self.assertTrue(np.all(one_hot.sum(axis=-1) == 1)) # Get original labels back from one hots self.assertTrue(np.all( np.argmax(one_hot, -1).reshape(label.shape) == label)) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/keras/utils/np_utils_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for data_utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from itertools import cycle import os import tarfile import threading import unittest import zipfile import numpy as np from six.moves.urllib.parse import urljoin from six.moves.urllib.request import pathname2url from tensorflow.python import keras from tensorflow.python.platform import test class TestGetFileAndValidateIt(test.TestCase): def test_get_file_and_validate_it(self): """Tests get_file from a url, plus extraction and validation. """ dest_dir = self.get_temp_dir() orig_dir = self.get_temp_dir() text_file_path = os.path.join(orig_dir, 'test.txt') zip_file_path = os.path.join(orig_dir, 'test.zip') tar_file_path = os.path.join(orig_dir, 'test.tar.gz') with open(text_file_path, 'w') as text_file: text_file.write('Float like a butterfly, sting like a bee.') with tarfile.open(tar_file_path, 'w:gz') as tar_file: tar_file.add(text_file_path) with zipfile.ZipFile(zip_file_path, 'w') as zip_file: zip_file.write(text_file_path) origin = urljoin('file://', pathname2url(os.path.abspath(tar_file_path))) path = keras.utils.data_utils.get_file('test.txt', origin, untar=True, cache_subdir=dest_dir) filepath = path + '.tar.gz' hashval_sha256 = keras.utils.data_utils._hash_file(filepath) hashval_md5 = keras.utils.data_utils._hash_file(filepath, algorithm='md5') path = keras.utils.data_utils.get_file( 'test.txt', origin, md5_hash=hashval_md5, untar=True, cache_subdir=dest_dir) path = keras.utils.data_utils.get_file( filepath, origin, file_hash=hashval_sha256, extract=True, cache_subdir=dest_dir) self.assertTrue(os.path.exists(filepath)) self.assertTrue(keras.utils.data_utils.validate_file(filepath, hashval_sha256)) self.assertTrue(keras.utils.data_utils.validate_file(filepath, hashval_md5)) os.remove(filepath) origin = urljoin('file://', pathname2url(os.path.abspath(zip_file_path))) hashval_sha256 = keras.utils.data_utils._hash_file(zip_file_path) hashval_md5 = keras.utils.data_utils._hash_file(zip_file_path, algorithm='md5') path = keras.utils.data_utils.get_file( 'test', origin, md5_hash=hashval_md5, extract=True, cache_subdir=dest_dir) path = keras.utils.data_utils.get_file( 'test', origin, file_hash=hashval_sha256, extract=True, cache_subdir=dest_dir) self.assertTrue(os.path.exists(path)) self.assertTrue(keras.utils.data_utils.validate_file(path, hashval_sha256)) self.assertTrue(keras.utils.data_utils.validate_file(path, hashval_md5)) class ThreadsafeIter(object): def __init__(self, it): self.it = it self.lock = threading.Lock() # After a generator throws an exception all subsequent next() calls raise a # StopIteration Exception. This, however, presents an issue when mixing # generators and threading because it means the order of retrieval need not # match the order in which the generator was called. This can make it appear # that a generator exited normally when in fact the terminating exception is # just in a different thread. In order to provide thread safety, once # self.it has thrown an exception we continue to throw the same exception. self._exception = None def __iter__(self): return self def __next__(self): return self.next() def next(self): with self.lock: if self._exception: raise self._exception # pylint: disable=raising-bad-type try: return next(self.it) except Exception as e: self._exception = e raise def threadsafe_generator(f): def g(*a, **kw): return ThreadsafeIter(f(*a, **kw)) return g class TestSequence(keras.utils.data_utils.Sequence): def __init__(self, shape, value=1.): self.shape = shape self.inner = value def __getitem__(self, item): return np.ones(self.shape, dtype=np.uint32) * item * self.inner def __len__(self): return 100 def on_epoch_end(self): self.inner *= 5.0 class FaultSequence(keras.utils.data_utils.Sequence): def __getitem__(self, item): raise IndexError(item, 'item is not present') def __len__(self): return 100 @threadsafe_generator def create_generator_from_sequence_threads(ds): for i in cycle(range(len(ds))): yield ds[i] def create_generator_from_sequence_pcs(ds): for i in cycle(range(len(ds))): yield ds[i] class TestEnqueuers(test.TestCase): def test_generator_enqueuer_threads(self): enqueuer = keras.utils.data_utils.GeneratorEnqueuer( create_generator_from_sequence_threads(TestSequence([3, 200, 200, 3])), use_multiprocessing=False) enqueuer.start(3, 10) gen_output = enqueuer.get() acc = [] for _ in range(100): acc.append(int(next(gen_output)[0, 0, 0, 0])) self.assertEqual(len(set(acc) - set(range(100))), 0) enqueuer.stop() @unittest.skipIf( os.name == 'nt', 'use_multiprocessing=True does not work on windows properly.') def test_generator_enqueuer_processes(self): enqueuer = keras.utils.data_utils.GeneratorEnqueuer( create_generator_from_sequence_pcs(TestSequence([3, 200, 200, 3])), use_multiprocessing=True) enqueuer.start(3, 10) gen_output = enqueuer.get() acc = [] for _ in range(100): acc.append(int(next(gen_output)[0, 0, 0, 0])) self.assertNotEqual(acc, list(range(100))) enqueuer.stop() def test_generator_enqueuer_fail_threads(self): enqueuer = keras.utils.data_utils.GeneratorEnqueuer( create_generator_from_sequence_threads(FaultSequence()), use_multiprocessing=False) enqueuer.start(3, 10) gen_output = enqueuer.get() with self.assertRaises(IndexError): next(gen_output) @unittest.skipIf( os.name == 'nt', 'use_multiprocessing=True does not work on windows properly.') def test_generator_enqueuer_fail_processes(self): enqueuer = keras.utils.data_utils.GeneratorEnqueuer( create_generator_from_sequence_pcs(FaultSequence()), use_multiprocessing=True) enqueuer.start(3, 10) gen_output = enqueuer.get() with self.assertRaises(IndexError): next(gen_output) def test_ordered_enqueuer_threads(self): enqueuer = keras.utils.data_utils.OrderedEnqueuer( TestSequence([3, 200, 200, 3]), use_multiprocessing=False) enqueuer.start(3, 10) gen_output = enqueuer.get() acc = [] for _ in range(100): acc.append(next(gen_output)[0, 0, 0, 0]) self.assertEqual(acc, list(range(100))) enqueuer.stop() def test_ordered_enqueuer_processes(self): enqueuer = keras.utils.data_utils.OrderedEnqueuer( TestSequence([3, 200, 200, 3]), use_multiprocessing=True) enqueuer.start(3, 10) gen_output = enqueuer.get() acc = [] for _ in range(100): acc.append(next(gen_output)[0, 0, 0, 0]) self.assertEqual(acc, list(range(100))) enqueuer.stop() def test_ordered_enqueuer_fail_threads(self): enqueuer = keras.utils.data_utils.OrderedEnqueuer( FaultSequence(), use_multiprocessing=False) enqueuer.start(3, 10) gen_output = enqueuer.get() with self.assertRaises(IndexError): next(gen_output) def test_ordered_enqueuer_fail_processes(self): enqueuer = keras.utils.data_utils.OrderedEnqueuer( FaultSequence(), use_multiprocessing=True) enqueuer.start(3, 10) gen_output = enqueuer.get() with self.assertRaises(IndexError): next(gen_output) def test_on_epoch_end_processes(self): enqueuer = keras.utils.data_utils.OrderedEnqueuer( TestSequence([3, 200, 200, 3]), use_multiprocessing=True) enqueuer.start(3, 10) gen_output = enqueuer.get() acc = [] for _ in range(200): acc.append(next(gen_output)[0, 0, 0, 0]) # Check that order was keep in GeneratorEnqueuer with processes self.assertEqual(acc[100:], list([k * 5 for k in range(100)])) enqueuer.stop() def test_context_switch(self): enqueuer = keras.utils.data_utils.OrderedEnqueuer( TestSequence([3, 200, 200, 3]), use_multiprocessing=True) enqueuer2 = keras.utils.data_utils.OrderedEnqueuer( TestSequence([3, 200, 200, 3], value=15), use_multiprocessing=True) enqueuer.start(3, 10) enqueuer2.start(3, 10) gen_output = enqueuer.get() gen_output2 = enqueuer2.get() acc = [] for _ in range(100): acc.append(next(gen_output)[0, 0, 0, 0]) self.assertEqual(acc[-1], 99) # One epoch is completed so enqueuer will switch the Sequence acc = [] for _ in range(100): acc.append(next(gen_output2)[0, 0, 0, 0]) self.assertEqual(acc[-1], 99 * 15) # One epoch has been completed so enqueuer2 will switch # Be sure that both Sequence were updated self.assertEqual(next(gen_output)[0, 0, 0, 0], 0) self.assertEqual(next(gen_output)[0, 0, 0, 0], 5) self.assertEqual(next(gen_output2)[0, 0, 0, 0], 0) self.assertEqual(next(gen_output2)[0, 0, 0, 0], 15 * 5) # Tear down everything enqueuer.stop() enqueuer2.stop() def test_on_epoch_end_threads(self): enqueuer = keras.utils.data_utils.OrderedEnqueuer( TestSequence([3, 200, 200, 3]), use_multiprocessing=False) enqueuer.start(3, 10) gen_output = enqueuer.get() acc = [] for _ in range(100): acc.append(next(gen_output)[0, 0, 0, 0]) acc = [] for _ in range(100): acc.append(next(gen_output)[0, 0, 0, 0]) # Check that order was keep in GeneratorEnqueuer with processes self.assertEqual(acc, list([k * 5 for k in range(100)])) enqueuer.stop() if __name__ == '__main__': # Bazel sets these environment variables to very long paths. # Tempfile uses them to create long paths, and in turn multiprocessing # library tries to create sockets named after paths. Delete whatever bazel # writes to these to avoid tests failing due to socket addresses being too # long. for var in ('TMPDIR', 'TMP', 'TEMP'): if var in os.environ: del os.environ[var] test.main()
tensorflow-master
tensorflow/python/keras/utils/data_utils_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras composite tensor support.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import scipy.sparse from tensorflow.python import keras from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.keras.engine import input_layer from tensorflow.python.keras.layers import core from tensorflow.python.keras.layers import Layer from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.ops.ragged import ragged_test_util from tensorflow.python.platform import test # Define test-only Layer classes to validate passing Sparse and Ragged tensors # between layers. class ToDense(Layer): """Create a dense (standard) tensor from the given input tensor.""" def __init__(self, default_value, **kwargs): super(ToDense, self).__init__(**kwargs) self._default_value = default_value def call(self, inputs): if isinstance(inputs, ragged_tensor.RaggedTensor): return inputs.to_tensor(default_value=self._default_value) elif isinstance(inputs, sparse_tensor.SparseTensor): return sparse_ops.sparse_tensor_to_dense( inputs, default_value=self._default_value) elif isinstance(inputs, ops.Tensor): return inputs else: raise TypeError("Unexpected tensor type %s" % type(inputs).__name__) class ToRagged(Layer): """Create a ragged tensor based on a given dense tensor.""" def __init__(self, padding, ragged_rank=1, **kwargs): super(ToRagged, self).__init__(**kwargs) self._padding = padding self._ragged_rank = ragged_rank def call(self, inputs): return ragged_tensor.RaggedTensor.from_tensor( inputs, padding=self._padding, ragged_rank=self._ragged_rank) class ToSparse(Layer): """Create a sparse tensor based on a given dense tensor.""" def call(self, inputs): indices = array_ops.where(math_ops.not_equal(inputs, 0)) values = array_ops.gather_nd(inputs, indices) shape = array_ops.shape(inputs, out_type=dtypes.int64) return sparse_tensor.SparseTensor(indices, values, dense_shape=shape) class _SubclassModel(keras.Model): """A Keras subclass model.""" def __init__(self, layers, i_layer=None): super(_SubclassModel, self).__init__() # Note that clone and build doesn't support lists of layers in subclassed # models. Adding each layer directly here. for i, layer in enumerate(layers): setattr(self, self._layer_name_for_i(i), layer) self.num_layers = len(layers) if i_layer: self._set_inputs(i_layer) def _layer_name_for_i(self, i): return "layer{}".format(i) def call(self, inputs, **kwargs): x = inputs for i in range(self.num_layers): layer = getattr(self, self._layer_name_for_i(i)) x = layer(x) return x def get_model_from_layers_with_input(layers, input_shape=None, input_dtype=None, model_input=None): """Builds a model from a sequence of layers.""" if model_input is not None and input_shape is not None: raise ValueError("Cannot specify a model_input and an input shape.") model_type = testing_utils.get_model_type() if model_type == "subclass": return _SubclassModel(layers, model_input) if model_type == "sequential": model = keras.models.Sequential() if model_input is not None: model.add(model_input) elif input_shape is not None: model.add(keras.Input(shape=input_shape, dtype=input_dtype)) for layer in layers: model.add(layer) return model if model_type == "functional": if model_input: inputs = model_input else: if not input_shape: raise ValueError("Cannot create a functional model from layers with no " "input shape.") inputs = keras.Input(shape=input_shape, dtype=input_dtype) outputs = inputs for layer in layers: outputs = layer(outputs) return keras.Model(inputs, outputs) raise ValueError("Unknown model type {}".format(model_type)) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes class CompositeTensorInternalTest(keras_parameterized.TestCase, ragged_test_util.RaggedTensorTestCase): def test_internal_ragged_tensors(self): # Create a model that accepts an input, converts it to Ragged, and # converts the ragged tensor back to a dense tensor. layers = [ToRagged(padding=0), ToDense(default_value=-1)] model = testing_utils.get_model_from_layers(layers, input_shape=(None,)) # Define some input data with additional padding. input_data = np.array([[1, 0, 0], [2, 3, 0]]) expected_output = np.array([[1, -1], [2, 3]]) output = model.predict(input_data) self.assertAllEqual(expected_output, output) def test_internal_sparse_tensors(self): # Create a model that accepts an input, converts it to Sparse, and # converts the sparse tensor back to a dense tensor. layers = [ToSparse(), ToDense(default_value=-1)] model = testing_utils.get_model_from_layers(layers, input_shape=(None,)) # Define some input data with additional padding. input_data = np.array([[1, 0, 0], [2, 3, 0]]) expected_output = np.array([[1, -1, -1], [2, 3, -1]]) output = model.predict(input_data) self.assertAllEqual(expected_output, output) def test_training_internal_ragged_tensors(self): # Create a model that implements y=Mx. This is easy to learn and will # demonstrate appropriate gradient passing. (We have to use RaggedTensors # for this test, as ToSparse() doesn't support gradient propagation through # the layer.) TODO(b/124796939): Investigate this. layers = [core.Dense(2), ToRagged(padding=0), ToDense(default_value=-1)] model = testing_utils.get_model_from_layers(layers, input_shape=(1,)) input_data = np.random.rand(1024, 1) expected_data = np.concatenate((input_data * 3, input_data * .5), axis=-1) model.compile( loss="mse", optimizer="adam", run_eagerly=testing_utils.should_run_eagerly()) history = model.fit(input_data, expected_data, epochs=10, verbose=0) # If the model trained, the loss stored at history[0] should be different # than the one stored at history[-1]. self.assertNotEqual(history.history["loss"][-1], history.history["loss"][0]) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes class CompositeTensorOutputTest(keras_parameterized.TestCase, ragged_test_util.RaggedTensorTestCase): def test_ragged_tensor_outputs(self): # Create a model that accepts an input, converts it to Ragged, and # converts the ragged tensor back to a dense tensor. layers = [ToRagged(padding=0)] model = testing_utils.get_model_from_layers(layers, input_shape=(None,)) # Define some input data with additional padding. input_data = np.array([[1, 0, 0], [2, 3, 0]]) output = model.predict(input_data) expected_values = [[1], [2, 3]] self.assertRaggedEqual(expected_values, output) def test_ragged_tensor_rebatched_outputs(self): # Create a model that accepts an input, converts it to Ragged, and # converts the ragged tensor back to a dense tensor. layers = [ToRagged(padding=0)] model = testing_utils.get_model_from_layers(layers, input_shape=(None,)) # Define some input data with additional padding. input_data = np.array([[1, 0, 0], [2, 3, 0], [4, 0, 0], [5, 6, 0]]) output = model.predict(input_data, batch_size=2) expected_values = [[1], [2, 3], [4], [5, 6]] self.assertRaggedEqual(expected_values, output) def test_sparse_tensor_outputs(self): # Create a model that accepts an input, converts it to Ragged, and # converts the ragged tensor back to a dense tensor. layers = [ToSparse()] model = testing_utils.get_model_from_layers(layers, input_shape=(None,)) # Define some input data with additional padding. input_data = np.array([[1, 0, 0], [2, 3, 0]]) output = model.predict(input_data) expected_indices = np.array([[0, 0], [1, 0], [1, 1]]) expected_values = np.array([1, 2, 3]) expected_dense_shape = np.array([2, 3]) self.assertAllEqual(output.indices, expected_indices) self.assertAllEqual(output.values, expected_values) self.assertAllEqual(output.dense_shape, expected_dense_shape) def test_sparse_tensor_rebatched_outputs(self): # Create a model that accepts an input, converts it to Ragged, and # converts the ragged tensor back to a dense tensor. layers = [ToSparse()] model = testing_utils.get_model_from_layers(layers, input_shape=(None,)) # Define some input data with additional padding. input_data = np.array([[1, 0, 0], [2, 3, 0], [4, 0, 0], [5, 6, 0]]) output = model.predict(input_data, batch_size=2) expected_indices = np.array([[0, 0], [1, 0], [1, 1], [2, 0], [3, 0], [3, 1]]) expected_values = np.array([1, 2, 3, 4, 5, 6]) expected_dense_shape = np.array([4, 3]) self.assertAllEqual(output.indices, expected_indices) self.assertAllEqual(output.values, expected_values) self.assertAllEqual(output.dense_shape, expected_dense_shape) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes class SparseTensorInputTest(keras_parameterized.TestCase, ragged_test_util.RaggedTensorTestCase): def test_sparse_scipy_predict_inputs_via_input_layer_args(self): # Create a model that accepts a sparse input and converts the sparse tensor # back to a dense tensor. Scipy sparse matrices are limited to 2D, so use # a one-dimensional shape. model_input = input_layer.Input(shape=(3,), sparse=True) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input(layers, model_input=model_input) input_data = scipy.sparse.coo_matrix(([1, 2, 3], ([0, 1, 1], [0, 0, 1])), shape=[2, 3]) expected_output = np.array([[1, -1, -1], [2, 3, -1]]) output = model.predict(input_data, steps=1) self.assertAllEqual(expected_output, output) input_data_2 = scipy.sparse.coo_matrix( ([5, 6, 7, 8], ([0, 1, 1, 2], [0, 0, 1, 1])), shape=[3, 3]) expected_output_2 = np.array([[5, -1, -1], [6, 7, -1], [-1, 8, -1]]) output_2 = model.predict(input_data_2, steps=1) self.assertAllEqual(expected_output_2, output_2) def test_sparse_scipy_eval_inputs(self): # Create a model that accepts a sparse input and converts the sparse tensor # back to a dense tensor. Scipy sparse matrices are limited to 2D, so use # a one-dimensional shape. model_input = input_layer.Input(shape=(3,), sparse=True) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input(layers, model_input=model_input) model.compile(optimizer="sgd", loss="mse", metrics=["accuracy"]) input_data = scipy.sparse.coo_matrix(([1, 2, 3], ([0, 1, 1], [0, 0, 1])), shape=[2, 3]) expected_output = np.array([[1, -1, -1], [2, 3, -1]]) output = model.evaluate(input_data, expected_output, steps=1) self.assertAllEqual(1.0, output[-1]) input_data_2 = scipy.sparse.coo_matrix( ([5, 6, 7, 8], ([0, 1, 1, 2], [0, 0, 1, 1])), shape=[3, 3]) expected_output_2 = np.array([[5, -1, -1], [6, 7, -1], [-1, 8, -1]]) output_2 = model.evaluate(input_data_2, expected_output_2, steps=1) self.assertAllEqual(1.0, output_2[-1]) def test_sparse_scipy_predict_input_dicts_via_input_layer_args(self): # Create a model that accepts a sparse input and converts the sparse tensor # back to a dense tensor. Scipy sparse matrices are limited to 2D, so use # a one-dimensional shape. if testing_utils.get_model_type() == "subclass": input_name = "input_1" # Subclass models don"t support input names. else: input_name = "test_input_name" model_input = input_layer.Input(shape=(3,), sparse=True, name=input_name) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input(layers, model_input=model_input) input_data = { input_name: scipy.sparse.coo_matrix(([1, 2, 3], ([0, 1, 1], [0, 0, 1])), shape=[2, 3]) } expected_output = np.array([[1, -1, -1], [2, 3, -1]]) output = model.predict(input_data, steps=1) self.assertAllEqual(expected_output, output) input_data_2 = { input_name: scipy.sparse.coo_matrix( ([5, 6, 7, 8], ([0, 1, 1, 2], [0, 0, 1, 1])), shape=[3, 3]) } expected_output_2 = np.array([[5, -1, -1], [6, 7, -1], [-1, 8, -1]]) output_2 = model.predict(input_data_2, steps=1) self.assertAllEqual(expected_output_2, output_2) def test_sparse_scipy_eval_input_dicts(self): # Create a model that accepts a sparse input and converts the sparse tensor # back to a dense tensor. Scipy sparse matrices are limited to 2D, so use # a one-dimensional shape. if testing_utils.get_model_type() == "subclass": input_name = "input_1" # Subclass models don"t support input names. else: input_name = "test_input_name" model_input = input_layer.Input(shape=(3,), sparse=True, name=input_name) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input(layers, model_input=model_input) model.compile(optimizer="sgd", loss="mse", metrics=["accuracy"]) input_data = { input_name: scipy.sparse.coo_matrix(([1, 2, 3], ([0, 1, 1], [0, 0, 1])), shape=[2, 3]) } expected_output = np.array([[1, -1, -1], [2, 3, -1]]) output = model.evaluate(input_data, expected_output, steps=1) self.assertAllEqual(1.0, output[-1]) input_data_2 = { input_name: scipy.sparse.coo_matrix( ([5, 6, 7, 8], ([0, 1, 1, 2], [0, 0, 1, 1])), shape=[3, 3]) } expected_output_2 = np.array([[5, -1, -1], [6, 7, -1], [-1, 8, -1]]) output_2 = model.evaluate(input_data_2, expected_output_2, steps=1) self.assertAllEqual(1.0, output_2[-1]) def test_sparse_tensor_eval_inputs(self): # Create a model that accepts a sparse input and converts the sparse tensor # back to a dense tensor. model_input = input_layer.Input(shape=(1, None), sparse=True) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input(layers, model_input=model_input) model.compile(optimizer="sgd", loss="mse", metrics=["accuracy"]) # Define some input data. input_data = sparse_tensor.SparseTensor([[0, 0, 0], [1, 0, 0], [1, 0, 1]], [1, 2, 3], [2, 1, 3]) expected_output = np.array([[[1, -1, -1]], [[2, 3, -1]]]) output = model.evaluate(input_data, expected_output, steps=1) self.assertAllEqual(1.0, output[-1]) input_data_2 = sparse_tensor.SparseTensor( [[0, 0, 0], [1, 0, 0], [1, 0, 1], [2, 0, 1]], [5, 6, 7, 8], [3, 1, 4]) expected_output_2 = np.array([[[5, -1, -1, -1]], [[6, 7, -1, -1]], [[-1, 8, -1, -1]]]) output_2 = model.evaluate(input_data_2, expected_output_2, steps=1) self.assertAllEqual(1.0, output_2[-1]) def test_sparse_tensor_predict_inputs_via_input_layer_args(self): # Create a model that accepts a sparse input and converts the sparse tensor # back to a dense tensor. model_input = input_layer.Input(shape=(1, None), sparse=True) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input(layers, model_input=model_input) # Define some input data. input_data = sparse_tensor.SparseTensor([[0, 0, 0], [1, 0, 0], [1, 0, 1]], [1, 2, 3], [2, 1, 3]) expected_output = np.array([[[1, -1, -1]], [[2, 3, -1]]]) output = model.predict(input_data, steps=1) self.assertAllEqual(expected_output, output) input_data_2 = sparse_tensor.SparseTensor( [[0, 0, 0], [1, 0, 0], [1, 0, 1], [2, 0, 1]], [5, 6, 7, 8], [3, 1, 4]) expected_output_2 = np.array([[[5, -1, -1, -1]], [[6, 7, -1, -1]], [[-1, 8, -1, -1]]]) output_2 = model.predict(input_data_2, steps=1) self.assertAllEqual(expected_output_2, output_2) def test_sparse_tensor_predict_input_dicts_via_input_layer_args(self): # Create a model that accepts a sparse input and converts the sparse tensor # back to a dense tensor. if testing_utils.get_model_type() == "subclass": input_name = "input_1" # Subclass models don"t support input names. else: input_name = "test_input_name" model_input = input_layer.Input( shape=(1, None), sparse=True, name=input_name) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input(layers, model_input=model_input) # Define some input data. input_data = { input_name: sparse_tensor.SparseTensor([[0, 0, 0], [1, 0, 0], [1, 0, 1]], [1, 2, 3], [2, 1, 3]) } expected_output = np.array([[[1, -1, -1]], [[2, 3, -1]]]) output = model.predict(input_data, steps=1) self.assertAllEqual(expected_output, output) input_data_2 = { input_name: sparse_tensor.SparseTensor( [[0, 0, 0], [1, 0, 0], [1, 0, 1], [2, 0, 1]], [5, 6, 7, 8], [3, 1, 4]) } expected_output_2 = np.array([[[5, -1, -1, -1]], [[6, 7, -1, -1]], [[-1, 8, -1, -1]]]) output_2 = model.predict(input_data_2, steps=1) self.assertAllEqual(expected_output_2, output_2) def test_sparse_tensor_eval_input_dicts_via_input_layer_args(self): # Create a model that accepts a sparse input and converts the sparse tensor # back to a dense tensor. if testing_utils.get_model_type() == "subclass": input_name = "input_1" # Subclass models don"t support input names. else: input_name = "test_input_name" model_input = input_layer.Input( shape=(1, None), sparse=True, name=input_name) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input(layers, model_input=model_input) model.compile(optimizer="sgd", loss="mse", metrics=["accuracy"]) # Define some input data. input_data = { input_name: sparse_tensor.SparseTensor([[0, 0, 0], [1, 0, 0], [1, 0, 1]], [1, 2, 3], [2, 1, 3]) } expected_output = np.array([[[1, -1, -1]], [[2, 3, -1]]]) output = model.evaluate(input_data, expected_output, steps=1) self.assertAllEqual(1.0, output[-1]) input_data_2 = { input_name: sparse_tensor.SparseTensor( [[0, 0, 0], [1, 0, 0], [1, 0, 1], [2, 0, 1]], [5, 6, 7, 8], [3, 1, 4]) } expected_output_2 = np.array([[[5, -1, -1, -1]], [[6, 7, -1, -1]], [[-1, 8, -1, -1]]]) output_2 = model.evaluate(input_data_2, expected_output_2, steps=1) self.assertAllEqual(1.0, output_2[-1]) def test_sparse_tensor_dataset_predict_inputs_via_input_layer_args(self): # Create a model that accepts a sparse input and converts the sparse tensor # back to a dense tensor. model_input = input_layer.Input(shape=(1, None), sparse=True) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input(layers, model_input=model_input) # Define some input data. input_data = dataset_ops.Dataset.from_tensors( sparse_tensor.SparseTensor([[0, 0, 0], [1, 0, 0], [1, 0, 1]], [1, 2, 3], [2, 1, 3])) expected_output = np.array([[[1, -1, -1]], [[2, 3, -1]]]) output = model.predict(input_data) self.assertAllEqual(expected_output, output) input_data_2 = dataset_ops.Dataset.from_tensors( sparse_tensor.SparseTensor([[0, 0, 0], [1, 0, 0], [1, 0, 1], [2, 0, 1]], [5, 6, 7, 8], [3, 1, 4])) expected_output_2 = np.array([[[5, -1, -1, -1]], [[6, 7, -1, -1]], [[-1, 8, -1, -1]]]) output_2 = model.predict(input_data_2) self.assertAllEqual(expected_output_2, output_2) def test_sparse_tensor_dataset_eval_inputs_via_input_layer_args(self): # Create a model that accepts a sparse input and converts the sparse tensor # back to a dense tensor. model_input = input_layer.Input(shape=(1, None), sparse=True) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input(layers, model_input=model_input) model.compile(optimizer="sgd", loss="mse", metrics=["accuracy"]) # Define some input data. input_tensor = sparse_tensor.SparseTensor([[0, 0, 0], [1, 0, 0], [1, 0, 1]], [1, 2, 3], [2, 1, 3]) expected_output = np.array([[[1, -1, -1]], [[2, 3, -1]]]) input_data = dataset_ops.Dataset.from_tensors( (input_tensor, expected_output)) output = model.evaluate(input_data) self.assertAllEqual(1.0, output[-1]) input_tensor_2 = sparse_tensor.SparseTensor( [[0, 0, 0], [1, 0, 0], [1, 0, 1], [2, 0, 1]], [5, 6, 7, 8], [3, 1, 4]) expected_output_2 = np.array([[[5, -1, -1, -1]], [[6, 7, -1, -1]], [[-1, 8, -1, -1]]]) input_data_2 = dataset_ops.Dataset.from_tensors( (input_tensor_2, expected_output_2)) output_2 = model.evaluate(input_data_2) self.assertAllEqual(1.0, output_2[-1]) def test_sparse_tensor_dataset_dict_predict_inputs_via_input_layer_args(self): # Create a model that accepts a sparse input and converts the sparse tensor # back to a dense tensor. if testing_utils.get_model_type() == "subclass": input_name = "input_1" # Subclass models don"t support custom input names else: input_name = "test_input_name" model_input = input_layer.Input( shape=(1, None), sparse=True, name=input_name) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input(layers, model_input=model_input) # Define some input data. input_data = dataset_ops.Dataset.from_tensors({ input_name: sparse_tensor.SparseTensor([[0, 0, 0], [1, 0, 0], [1, 0, 1]], [1, 2, 3], [2, 1, 3]) }) expected_output = np.array([[[1, -1, -1]], [[2, 3, -1]]]) output = model.predict(input_data) self.assertAllEqual(expected_output, output) input_data_2 = dataset_ops.Dataset.from_tensors({ input_name: sparse_tensor.SparseTensor( [[0, 0, 0], [1, 0, 0], [1, 0, 1], [2, 0, 1]], [5, 6, 7, 8], [3, 1, 4]) }) expected_output_2 = np.array([[[5, -1, -1, -1]], [[6, 7, -1, -1]], [[-1, 8, -1, -1]]]) output_2 = model.predict(input_data_2) self.assertAllEqual(expected_output_2, output_2) def test_sparse_tensor_dataset_dict_eval_inputs_via_input_layer_args(self): # Create a model that accepts a sparse input and converts the sparse tensor # back to a dense tensor. if testing_utils.get_model_type() == "subclass": input_name = "input_1" # Subclass models don"t support custom input names else: input_name = "test_input_name" model_input = input_layer.Input( shape=(1, None), sparse=True, name=input_name) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input(layers, model_input=model_input) model.compile(optimizer="sgd", loss="mse", metrics=["accuracy"]) # Define some input data. input_tensor = { input_name: sparse_tensor.SparseTensor([[0, 0, 0], [1, 0, 0], [1, 0, 1]], [1, 2, 3], [2, 1, 3]) } expected_output = np.array([[[1, -1, -1]], [[2, 3, -1]]]) input_data = dataset_ops.Dataset.from_tensors( (input_tensor, expected_output)) output = model.evaluate(input_data) self.assertAllEqual(1.0, output[-1]) input_tensor_2 = { input_name: sparse_tensor.SparseTensor( [[0, 0, 0], [1, 0, 0], [1, 0, 1], [2, 0, 1]], [5, 6, 7, 8], [3, 1, 4]) } expected_output_2 = np.array([[[5, -1, -1, -1]], [[6, 7, -1, -1]], [[-1, 8, -1, -1]]]) input_data_2 = dataset_ops.Dataset.from_tensors( (input_tensor_2, expected_output_2)) output_2 = model.evaluate(input_data_2) self.assertAllEqual(1.0, output_2[-1]) # CompositeTensor shape validation only happens in non-eager modes and in non- # subclassed models, so we run a separate parameterized test for them. @keras_parameterized.run_with_all_model_types(exclude_models=["subclass"]) @keras_parameterized.run_all_keras_modes(always_skip_eager=True) class SparseTensorInputValidationTest(keras_parameterized.TestCase, ragged_test_util.RaggedTensorTestCase): def test_sparse_scipy_input_checks_shape(self): model_input = input_layer.Input(shape=(3,), sparse=True) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input(layers, model_input=model_input) input_data = scipy.sparse.coo_matrix(([1, 2, 3], ([0, 1, 1], [0, 0, 1])), shape=[2, 4]) with self.assertRaisesRegex(ValueError, ".*got array with shape.*"): _ = model.predict(input_data, steps=1) def test_sparse_tensor_input_checks_shapes(self): # Create a model that accepts a sparse input and converts the sparse tensor # back to a dense tensor. model_input = input_layer.Input(shape=(2, None), sparse=True) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input(layers, model_input=model_input) # Define some input data. input_data = sparse_tensor.SparseTensor([[0, 0, 0], [1, 0, 0], [1, 0, 1]], [1, 2, 3], [2, 1, 3]) with self.assertRaisesRegex(ValueError, ".*got array with shape.*"): _ = model.predict(input_data, steps=1) @keras_parameterized.run_with_all_model_types( exclude_models=["functional"]) @keras_parameterized.run_all_keras_modes class UndefinedCompositeTensorInputsTest(keras_parameterized.TestCase, ragged_test_util.RaggedTensorTestCase): def test_subclass_implicit_sparse_inputs_fails(self): # Create a model that accepts a sparse input and converts the sparse tensor # back to a dense tensor. layers = [ToDense(default_value=-1)] model = testing_utils.get_model_from_layers(layers) # Define some input data. input_data = sparse_tensor.SparseTensor([[0, 0], [1, 0], [1, 1]], [1, 2, 3], [2, 3]) with self.assertRaisesRegex( ValueError, ".*All SparseTensor and RaggedTensor inputs .*"): _ = model.predict(input_data, steps=1) def test_subclass_implicit_sparse_scipy_inputs_fails(self): # Create a model that accepts a sparse input and converts the sparse tensor # back to a dense tensor. layers = [ToDense(default_value=-1)] model = testing_utils.get_model_from_layers(layers) # Define some input data. input_data = scipy.sparse.coo_matrix(([1, 2, 3], ([0, 1, 1], [0, 0, 1])), shape=[2, 3]) with self.assertRaisesRegex(ValueError, ".*either a single array.*"): _ = model.predict(input_data, steps=1) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/keras/utils/composite_tensor_support_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for kernelized_utils.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools from absl.testing import parameterized from tensorflow.python.framework import constant_op from tensorflow.python.keras.utils import kernelized_utils from tensorflow.python.platform import test def _exact_gaussian(stddev): return functools.partial( kernelized_utils.exact_gaussian_kernel, stddev=stddev) def _exact_laplacian(stddev): return functools.partial( kernelized_utils.exact_laplacian_kernel, stddev=stddev) class KernelizedUtilsTest(test.TestCase, parameterized.TestCase): @parameterized.named_parameters( ('gaussian', _exact_gaussian(stddev=10.0), [[1.0]]), ('laplacian', _exact_laplacian(stddev=50.0), [[1.0]])) def test_equal_vectors(self, exact_kernel_fn, expected_values): """Identical vectors give exactly the identity kernel value.""" x = constant_op.constant([0.5, -0.5, -0.5, 0.5]) y = constant_op.constant([0.5, -0.5, -0.5, 0.5]) exact_kernel = exact_kernel_fn(x, y) shape = exact_kernel.shape.as_list() self.assertLen(shape, 2) # x and y are identical and therefore K(x, y) will be precisely equal to # the identity value of the kernel. self.assertAllClose(expected_values, exact_kernel, atol=1e-6) @parameterized.named_parameters( ('gaussian', _exact_gaussian(stddev=10.0), [[1.0]]), ('laplacian', _exact_laplacian(stddev=50.0), [[1.0]])) def test_almost_identical_vectors(self, exact_kernel_fn, expected_values): """Almost identical vectors give the identity kernel value.""" x = constant_op.constant([1.0, 0.4, -2.1, -1.1]) y = constant_op.constant([1.01, 0.39, -2.099, -1.101]) exact_kernel = exact_kernel_fn(x, y) shape = exact_kernel.shape.as_list() self.assertLen(shape, 2) # x and y are almost identical and therefore K(x, y) will be almost equal to # the identity value of the kernel. self.assertAllClose(expected_values, exact_kernel, atol=1e-3) @parameterized.named_parameters( ('gaussian', _exact_gaussian(stddev=1.0), [[0.99], [0.977]]), ('laplacian', _exact_laplacian(stddev=5.0), [[0.96], [0.94]])) def test_similar_matrices(self, exact_kernel_fn, expected_values): """Pairwise "close" vectors give high kernel values (similarity scores).""" x = constant_op.constant([1.0, 3.4, -2.1, 0.9, 3.3, -2.0], shape=[2, 3]) y = constant_op.constant([1.1, 3.35, -2.05]) exact_kernel = exact_kernel_fn(x, y) shape = exact_kernel.shape.as_list() self.assertLen(shape, 2) # The 2 rows of x are close to y. The pairwise kernel values (similarity # scores) are somewhat close to the identity value of the kernel. self.assertAllClose(expected_values, exact_kernel, atol=1e-2) @parameterized.named_parameters( ('gaussian', _exact_gaussian(stddev=2.0), [[.997, .279], [.251, 1.], [.164, 0.019]]), ('laplacian', _exact_laplacian(stddev=2.0), [[.904, .128], [.116, 1.], [.07, 0.027]])) def test_matrices_varying_similarity(self, exact_kernel_fn, expected_values): """Test matrices with row vectors of varying pairwise similarity.""" x = constant_op.constant([1.0, 2., -2., 0.9, 3.3, -1.0], shape=[3, 2]) y = constant_op.constant([1.1, 2.1, -2., 0.9], shape=[2, 2]) exact_kernel = exact_kernel_fn(x, y) shape = exact_kernel.shape.as_list() self.assertLen(shape, 2) self.assertAllClose(expected_values, exact_kernel, atol=1e-2) @parameterized.named_parameters( ('gaussian', _exact_gaussian(stddev=1.0), [[0.0]]), ('laplacian', _exact_laplacian(stddev=1.0), [[0.0]])) def test_completely_dissimilar_vectors(self, exact_kernel_fn, expected_values): """Very dissimilar vectors give very low similarity scores.""" x = constant_op.constant([1.0, 3.4, -2.1, -5.1]) y = constant_op.constant([0.5, 2.1, 1.0, 3.0]) exact_kernel = exact_kernel_fn(x, y) shape = exact_kernel.shape.as_list() self.assertLen(shape, 2) # x and y are very "far" from each other and so the corresponding kernel # value will be very low. self.assertAllClose(expected_values, exact_kernel, atol=1e-2) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/keras/utils/kernelized_utils_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras utilities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras.utils.data_utils import GeneratorEnqueuer from tensorflow.python.keras.utils.data_utils import get_file from tensorflow.python.keras.utils.data_utils import OrderedEnqueuer from tensorflow.python.keras.utils.data_utils import Sequence from tensorflow.python.keras.utils.data_utils import SequenceEnqueuer from tensorflow.python.keras.utils.generic_utils import class_and_config_for_serialized_keras_object from tensorflow.python.keras.utils.generic_utils import custom_object_scope from tensorflow.python.keras.utils.generic_utils import CustomObjectScope from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object from tensorflow.python.keras.utils.generic_utils import get_custom_objects from tensorflow.python.keras.utils.generic_utils import Progbar from tensorflow.python.keras.utils.generic_utils import serialize_keras_class_and_config from tensorflow.python.keras.utils.generic_utils import serialize_keras_object from tensorflow.python.keras.utils.io_utils import HDF5Matrix from tensorflow.python.keras.utils.layer_utils import convert_all_kernels_in_model from tensorflow.python.keras.utils.layer_utils import get_source_inputs from tensorflow.python.keras.utils.layer_utils import print_summary from tensorflow.python.keras.utils.multi_gpu_utils import multi_gpu_model from tensorflow.python.keras.utils.np_utils import normalize from tensorflow.python.keras.utils.np_utils import to_categorical from tensorflow.python.keras.utils.vis_utils import model_to_dot from tensorflow.python.keras.utils.vis_utils import plot_model del absolute_import del division del print_function
tensorflow-master
tensorflow/python/keras/utils/__init__.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utility methods related to kernelized layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops def _to_matrix(u): """If input tensor is a vector (i.e., has rank 1), converts it to matrix.""" u_rank = len(u.shape) if u_rank not in [1, 2]: raise ValueError('The input tensor should have rank 1 or 2. Given rank: {}' .format(u_rank)) if u_rank == 1: return array_ops.expand_dims(u, 0) return u def _align_matrices(x, y): """Aligns x and y tensors to allow computations over pairs of their rows.""" x_matrix = _to_matrix(x) y_matrix = _to_matrix(y) x_shape = x_matrix.shape y_shape = y_matrix.shape if y_shape[1] != x_shape[1]: # dimensions do not match. raise ValueError( 'The outermost dimensions of the input tensors should match. Given: {} ' 'vs {}.'.format(y_shape[1], x_shape[1])) x_tile = array_ops.tile( array_ops.expand_dims(x_matrix, 1), [1, y_shape[0], 1]) y_tile = array_ops.tile( array_ops.expand_dims(y_matrix, 0), [x_shape[0], 1, 1]) return x_tile, y_tile def inner_product(u, v): u = _to_matrix(u) v = _to_matrix(v) return math_ops.matmul(u, v, transpose_b=True) def exact_gaussian_kernel(x, y, stddev): r"""Computes exact Gaussian kernel value(s) for tensors x and y and stddev. The Gaussian kernel for vectors u, v is defined as follows: K(u, v) = exp(-||u-v||^2 / (2* stddev^2)) where the norm is the l2-norm. x, y can be either vectors or matrices. If they are vectors, they must have the same dimension. If they are matrices, they must have the same number of columns. In the latter case, the method returns (as a matrix) K(u, v) values for all pairs (u, v) where u is a row from x and v is a row from y. Args: x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim]. y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim]. stddev: The width of the Gaussian kernel. Returns: A single value (scalar) with shape (1, 1) (if x, y are vectors) or a matrix of shape (m, n) with entries K(u, v) (where K is the Gaussian kernel) for all (u,v) pairs where u, v are rows from x and y respectively. Raises: ValueError: if the shapes of x, y are not compatible. """ x_aligned, y_aligned = _align_matrices(x, y) diff_squared_l2_norm = math_ops.reduce_sum( math_ops.squared_difference(x_aligned, y_aligned), 2) return math_ops.exp(-diff_squared_l2_norm / (2 * stddev * stddev)) def exact_laplacian_kernel(x, y, stddev): r"""Computes exact Laplacian kernel value(s) for tensors x and y using stddev. The Laplacian kernel for vectors u, v is defined as follows: K(u, v) = exp(-||u-v|| / stddev) where the norm is the l1-norm. x, y can be either vectors or matrices. If they are vectors, they must have the same dimension. If they are matrices, they must have the same number of columns. In the latter case, the method returns (as a matrix) K(u, v) values for all pairs (u, v) where u is a row from x and v is a row from y. Args: x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim]. y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim]. stddev: The width of the Gaussian kernel. Returns: A single value (scalar) with shape (1, 1) if x, y are vectors or a matrix of shape (m, n) with entries K(u, v) (where K is the Laplacian kernel) for all (u,v) pairs where u, v are rows from x and y respectively. Raises: ValueError: if the shapes of x, y are not compatible. """ x_aligned, y_aligned = _align_matrices(x, y) diff_l1_norm = math_ops.reduce_sum( math_ops.abs(math_ops.subtract(x_aligned, y_aligned)), 2) return math_ops.exp(-diff_l1_norm / stddev)
tensorflow-master
tensorflow/python/keras/utils/kernelized_utils.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for metrics_utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.framework import constant_op from tensorflow.python.framework import test_util from tensorflow.python.keras.utils import metrics_utils from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.ops.ragged import ragged_test_util from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedSizeOpTest(ragged_test_util.RaggedTensorTestCase, parameterized.TestCase): @parameterized.parameters([ { 'x_list': [1], 'y_list': [2] }, { 'x_list': [1, 2], 'y_list': [2, 3] }, { 'x_list': [1, 2, 4], 'y_list': [2, 3, 5] }, { 'x_list': [[1, 2], [3, 4]], 'y_list': [[2, 3], [5, 6]] }, ]) def test_passing_dense_tensors(self, x_list, y_list): x = constant_op.constant(x_list) y = constant_op.constant(y_list) [x, y], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y]) x.shape.assert_is_compatible_with(y.shape) @parameterized.parameters([ { 'x_list': [1], }, { 'x_list': [1, 2], }, { 'x_list': [1, 2, 4], }, { 'x_list': [[1, 2], [3, 4]], }, ]) def test_passing_one_dense_tensor(self, x_list): x = constant_op.constant(x_list) [x], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x]) @parameterized.parameters([ { 'x_list': [1], 'y_list': [2] }, { 'x_list': [1, 2], 'y_list': [2, 3] }, { 'x_list': [1, 2, 4], 'y_list': [2, 3, 5] }, { 'x_list': [[1, 2], [3, 4]], 'y_list': [[2, 3], [5, 6]] }, { 'x_list': [[1, 2], [3, 4], [1]], 'y_list': [[2, 3], [5, 6], [3]] }, { 'x_list': [[1, 2], [], [1]], 'y_list': [[2, 3], [], [3]] }, ]) def test_passing_both_ragged(self, x_list, y_list): x = ragged_factory_ops.constant(x_list) y = ragged_factory_ops.constant(y_list) [x, y], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y]) x.shape.assert_is_compatible_with(y.shape) @parameterized.parameters([ { 'x_list': [1], }, { 'x_list': [1, 2], }, { 'x_list': [1, 2, 4], }, { 'x_list': [[1, 2], [3, 4]], }, { 'x_list': [[1, 2], [3, 4], [1]], }, { 'x_list': [[1, 2], [], [1]], }, ]) def test_passing_one_ragged(self, x_list): x = ragged_factory_ops.constant(x_list) [x], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x]) @parameterized.parameters([ { 'x_list': [1], 'y_list': [2], 'mask_list': [0] }, { 'x_list': [1, 2], 'y_list': [2, 3], 'mask_list': [0, 1] }, { 'x_list': [1, 2, 4], 'y_list': [2, 3, 5], 'mask_list': [1, 1, 1] }, { 'x_list': [[1, 2], [3, 4]], 'y_list': [[2, 3], [5, 6]], 'mask_list': [[1, 1], [0, 1]] }, { 'x_list': [[1, 2], [3, 4], [1]], 'y_list': [[2, 3], [5, 6], [3]], 'mask_list': [[1, 1], [0, 0], [1]] }, { 'x_list': [[1, 2], [], [1]], 'y_list': [[2, 3], [], [3]], 'mask_list': [[1, 1], [], [0]] }, ]) def test_passing_both_ragged_with_mask(self, x_list, y_list, mask_list): x = ragged_factory_ops.constant(x_list) y = ragged_factory_ops.constant(y_list) mask = ragged_factory_ops.constant(mask_list) [x, y], mask = \ metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y], mask) x.shape.assert_is_compatible_with(y.shape) y.shape.assert_is_compatible_with(mask.shape) @parameterized.parameters([ { 'x_list': [1], 'mask_list': [0] }, { 'x_list': [1, 2], 'mask_list': [0, 1] }, { 'x_list': [1, 2, 4], 'mask_list': [1, 1, 1] }, { 'x_list': [[1, 2], [3, 4]], 'mask_list': [[1, 1], [0, 1]] }, { 'x_list': [[1, 2], [3, 4], [1]], 'mask_list': [[1, 1], [0, 0], [1]] }, { 'x_list': [[1, 2], [], [1]], 'mask_list': [[1, 1], [], [0]] }, ]) def test_passing_one_ragged_with_mask(self, x_list, mask_list): x = ragged_factory_ops.constant(x_list) mask = ragged_factory_ops.constant(mask_list) [x], mask = \ metrics_utils.ragged_assert_compatible_and_get_flat_values([x], mask) x.shape.assert_is_compatible_with(mask.shape) @parameterized.parameters([ { 'x_list': [[[1, 3]]], 'y_list': [[2, 3]] }, ]) def test_failing_different_ragged_and_dense_ranks(self, x_list, y_list): x = ragged_factory_ops.constant(x_list) y = ragged_factory_ops.constant(y_list) with self.assertRaises(ValueError): # pylint: disable=g-error-prone-assert-raises [x, y ], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y]) @parameterized.parameters([ { 'x_list': [[[1, 3]]], 'y_list': [[[2, 3]]], 'mask_list': [[0, 1]] }, ]) def test_failing_different_mask_ranks(self, x_list, y_list, mask_list): x = ragged_factory_ops.constant(x_list) y = ragged_factory_ops.constant(y_list) mask = ragged_factory_ops.constant(mask_list) with self.assertRaises(ValueError): # pylint: disable=g-error-prone-assert-raises [x, y ], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y], mask) # we do not support such cases that ragged_ranks are different but overall # dimension shapes and sizes are identical due to adding too much performance # overheads to the overall use cases. def test_failing_different_ragged_ranks(self): dt = constant_op.constant([[[1, 2]]]) # adding a ragged dimension x = ragged_tensor.RaggedTensor.from_row_splits(dt, row_splits=[0, 1]) y = ragged_factory_ops.constant([[[[1, 2]]]]) with self.assertRaises(ValueError): # pylint: disable=g-error-prone-assert-raises [x, y], _ = \ metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y]) if __name__ == '__main__': googletest.main()
tensorflow-master
tensorflow/python/keras/utils/metrics_utils_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities used by convolution layers. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import numpy as np from six.moves import range # pylint: disable=redefined-builtin from tensorflow.python.keras import backend def convert_data_format(data_format, ndim): if data_format == 'channels_last': if ndim == 3: return 'NWC' elif ndim == 4: return 'NHWC' elif ndim == 5: return 'NDHWC' else: raise ValueError('Input rank not supported:', ndim) elif data_format == 'channels_first': if ndim == 3: return 'NCW' elif ndim == 4: return 'NCHW' elif ndim == 5: return 'NCDHW' else: raise ValueError('Input rank not supported:', ndim) else: raise ValueError('Invalid data_format:', data_format) def normalize_tuple(value, n, name): """Transforms a single integer or iterable of integers into an integer tuple. Arguments: value: The value to validate and convert. Could an int, or any iterable of ints. n: The size of the tuple to be returned. name: The name of the argument being validated, e.g. "strides" or "kernel_size". This is only used to format error messages. Returns: A tuple of n integers. Raises: ValueError: If something else than an int/long or iterable thereof was passed. """ if isinstance(value, int): return (value,) * n else: try: value_tuple = tuple(value) except TypeError: raise ValueError('The `' + name + '` argument must be a tuple of ' + str(n) + ' integers. Received: ' + str(value)) if len(value_tuple) != n: raise ValueError('The `' + name + '` argument must be a tuple of ' + str(n) + ' integers. Received: ' + str(value)) for single_value in value_tuple: try: int(single_value) except (ValueError, TypeError): raise ValueError('The `' + name + '` argument must be a tuple of ' + str(n) + ' integers. Received: ' + str(value) + ' ' 'including element ' + str(single_value) + ' of type' + ' ' + str(type(single_value))) return value_tuple def conv_output_length(input_length, filter_size, padding, stride, dilation=1): """Determines output length of a convolution given input length. Arguments: input_length: integer. filter_size: integer. padding: one of "same", "valid", "full", "causal" stride: integer. dilation: dilation rate, integer. Returns: The output length (integer). """ if input_length is None: return None assert padding in {'same', 'valid', 'full', 'causal'} dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1) if padding in ['same', 'causal']: output_length = input_length elif padding == 'valid': output_length = input_length - dilated_filter_size + 1 elif padding == 'full': output_length = input_length + dilated_filter_size - 1 return (output_length + stride - 1) // stride def conv_input_length(output_length, filter_size, padding, stride): """Determines input length of a convolution given output length. Arguments: output_length: integer. filter_size: integer. padding: one of "same", "valid", "full". stride: integer. Returns: The input length (integer). """ if output_length is None: return None assert padding in {'same', 'valid', 'full'} if padding == 'same': pad = filter_size // 2 elif padding == 'valid': pad = 0 elif padding == 'full': pad = filter_size - 1 return (output_length - 1) * stride - 2 * pad + filter_size def deconv_output_length(input_length, filter_size, padding, output_padding=None, stride=0, dilation=1): """Determines output length of a transposed convolution given input length. Arguments: input_length: Integer. filter_size: Integer. padding: one of `"same"`, `"valid"`, `"full"`. output_padding: Integer, amount of padding along the output dimension. Can be set to `None` in which case the output length is inferred. stride: Integer. dilation: Integer. Returns: The output length (integer). """ assert padding in {'same', 'valid', 'full'} if input_length is None: return None # Get the dilated kernel size filter_size = filter_size + (filter_size - 1) * (dilation - 1) # Infer length if output padding is None, else compute the exact length if output_padding is None: if padding == 'valid': length = input_length * stride + max(filter_size - stride, 0) elif padding == 'full': length = input_length * stride - (stride + filter_size - 2) elif padding == 'same': length = input_length * stride else: if padding == 'same': pad = filter_size // 2 elif padding == 'valid': pad = 0 elif padding == 'full': pad = filter_size - 1 length = ((input_length - 1) * stride + filter_size - 2 * pad + output_padding) return length def normalize_data_format(value): if value is None: value = backend.image_data_format() data_format = value.lower() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('The `data_format` argument must be one of ' '"channels_first", "channels_last". Received: ' + str(value)) return data_format def normalize_padding(value): if isinstance(value, (list, tuple)): return value padding = value.lower() if padding not in {'valid', 'same', 'causal'}: raise ValueError('The `padding` argument must be a list/tuple or one of ' '"valid", "same" (or "causal", only for `Conv1D). ' 'Received: ' + str(padding)) return padding def convert_kernel(kernel): """Converts a Numpy kernel matrix from Theano format to TensorFlow format. Also works reciprocally, since the transformation is its own inverse. Arguments: kernel: Numpy array (3D, 4D or 5D). Returns: The converted kernel. Raises: ValueError: in case of invalid kernel shape or invalid data_format. """ kernel = np.asarray(kernel) if not 3 <= kernel.ndim <= 5: raise ValueError('Invalid kernel shape:', kernel.shape) slices = [slice(None, None, -1) for _ in range(kernel.ndim)] no_flip = (slice(None, None), slice(None, None)) slices[-2:] = no_flip return np.copy(kernel[slices]) def conv_kernel_mask(input_shape, kernel_shape, strides, padding): """Compute a mask representing the connectivity of a convolution operation. Assume a convolution with given parameters is applied to an input having N spatial dimensions with `input_shape = (d_in1, ..., d_inN)` to produce an output with shape `(d_out1, ..., d_outN)`. This method returns a boolean array of shape `(d_in1, ..., d_inN, d_out1, ..., d_outN)` with `True` entries indicating pairs of input and output locations that are connected by a weight. Example: ```python >>> input_shape = (4,) >>> kernel_shape = (2,) >>> strides = (1,) >>> padding = "valid" >>> conv_kernel_mask(input_shape, kernel_shape, strides, padding) array([[ True, False, False], [ True, True, False], [False, True, True], [False, False, True]], dtype=bool) ``` where rows and columns correspond to inputs and outputs respectively. Args: input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the input. kernel_shape: tuple of size N, spatial shape of the convolutional kernel / receptive field. strides: tuple of size N, strides along each spatial dimension. padding: type of padding, string `"same"` or `"valid"`. Returns: A boolean 2N-D `np.ndarray` of shape `(d_in1, ..., d_inN, d_out1, ..., d_outN)`, where `(d_out1, ..., d_outN)` is the spatial shape of the output. `True` entries in the mask represent pairs of input-output locations that are connected by a weight. Raises: ValueError: if `input_shape`, `kernel_shape` and `strides` don't have the same number of dimensions. NotImplementedError: if `padding` is not in {`"same"`, `"valid"`}. """ if padding not in {'same', 'valid'}: raise NotImplementedError('Padding type %s not supported. ' 'Only "valid" and "same" ' 'are implemented.' % padding) in_dims = len(input_shape) if isinstance(kernel_shape, int): kernel_shape = (kernel_shape,) * in_dims if isinstance(strides, int): strides = (strides,) * in_dims kernel_dims = len(kernel_shape) stride_dims = len(strides) if kernel_dims != in_dims or stride_dims != in_dims: raise ValueError('Number of strides, input and kernel dimensions must all ' 'match. Received: %d, %d, %d.' % (stride_dims, in_dims, kernel_dims)) output_shape = conv_output_shape(input_shape, kernel_shape, strides, padding) mask_shape = input_shape + output_shape mask = np.zeros(mask_shape, np.bool) output_axes_ticks = [range(dim) for dim in output_shape] for output_position in itertools.product(*output_axes_ticks): input_axes_ticks = conv_connected_inputs(input_shape, kernel_shape, output_position, strides, padding) for input_position in itertools.product(*input_axes_ticks): mask[input_position + output_position] = True return mask def conv_connected_inputs(input_shape, kernel_shape, output_position, strides, padding): """Return locations of the input connected to an output position. Assume a convolution with given parameters is applied to an input having N spatial dimensions with `input_shape = (d_in1, ..., d_inN)`. This method returns N ranges specifying the input region that was convolved with the kernel to produce the output at position `output_position = (p_out1, ..., p_outN)`. Example: ```python >>> input_shape = (4, 4) >>> kernel_shape = (2, 1) >>> output_position = (1, 1) >>> strides = (1, 1) >>> padding = "valid" >>> conv_connected_inputs(input_shape, kernel_shape, output_position, >>> strides, padding) [xrange(1, 3), xrange(1, 2)] ``` Args: input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the input. kernel_shape: tuple of size N, spatial shape of the convolutional kernel / receptive field. output_position: tuple of size N: `(p_out1, ..., p_outN)`, a single position in the output of the convolution. strides: tuple of size N, strides along each spatial dimension. padding: type of padding, string `"same"` or `"valid"`. Returns: N ranges `[[p_in_left1, ..., p_in_right1], ..., [p_in_leftN, ..., p_in_rightN]]` specifying the region in the input connected to output_position. """ ranges = [] ndims = len(input_shape) for d in range(ndims): left_shift = int(kernel_shape[d] / 2) right_shift = kernel_shape[d] - left_shift center = output_position[d] * strides[d] if padding == 'valid': center += left_shift start = max(0, center - left_shift) end = min(input_shape[d], center + right_shift) ranges.append(range(start, end)) return ranges def conv_output_shape(input_shape, kernel_shape, strides, padding): """Return the output shape of an N-D convolution. Forces dimensions where input is empty (size 0) to remain empty. Args: input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the input. kernel_shape: tuple of size N, spatial shape of the convolutional kernel / receptive field. strides: tuple of size N, strides along each spatial dimension. padding: type of padding, string `"same"` or `"valid"`. Returns: tuple of size N: `(d_out1, ..., d_outN)`, spatial shape of the output. """ dims = range(len(kernel_shape)) output_shape = [conv_output_length(input_shape[d], kernel_shape[d], padding, strides[d]) for d in dims] output_shape = tuple([0 if input_shape[d] == 0 else output_shape[d] for d in dims]) return output_shape
tensorflow-master
tensorflow/python/keras/utils/conv_utils.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=g-import-not-at-top """Utilities for file download and caching.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from abc import abstractmethod from contextlib import closing import gc import hashlib import multiprocessing from multiprocessing.pool import ThreadPool import os import random import shutil import signal import sys import tarfile import threading import time import weakref import zipfile import numpy as np import six from six.moves.urllib.error import HTTPError from six.moves.urllib.error import URLError from six.moves.urllib.request import urlopen from tensorflow.python.keras.utils.generic_utils import Progbar from tensorflow.python.util import tf_inspect from tensorflow.python.util.tf_export import keras_export try: import queue except ImportError: import Queue as queue if sys.version_info[0] == 2: def urlretrieve(url, filename, reporthook=None, data=None): """Replacement for `urlretrive` for Python 2. Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy `urllib` module, known to have issues with proxy management. Arguments: url: url to retrieve. filename: where to store the retrieved data locally. reporthook: a hook function that will be called once on establishment of the network connection and once after each block read thereafter. The hook will be passed three arguments; a count of blocks transferred so far, a block size in bytes, and the total size of the file. data: `data` argument passed to `urlopen`. """ def chunk_read(response, chunk_size=8192, reporthook=None): content_type = response.info().get('Content-Length') total_size = -1 if content_type is not None: total_size = int(content_type.strip()) count = 0 while True: chunk = response.read(chunk_size) count += 1 if reporthook is not None: reporthook(count, chunk_size, total_size) if chunk: yield chunk else: break response = urlopen(url, data) with open(filename, 'wb') as fd: for chunk in chunk_read(response, reporthook=reporthook): fd.write(chunk) else: from six.moves.urllib.request import urlretrieve def is_generator_or_sequence(x): """Check if `x` is a Keras generator type.""" return tf_inspect.isgenerator(x) or isinstance(x, Sequence) def _extract_archive(file_path, path='.', archive_format='auto'): """Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats. Arguments: file_path: path to the archive file path: path to extract the archive file archive_format: Archive format to try for extracting the file. Options are 'auto', 'tar', 'zip', and None. 'tar' includes tar, tar.gz, and tar.bz files. The default 'auto' is ['tar', 'zip']. None or an empty list will return no matches found. Returns: True if a match was found and an archive extraction was completed, False otherwise. """ if archive_format is None: return False if archive_format == 'auto': archive_format = ['tar', 'zip'] if isinstance(archive_format, six.string_types): archive_format = [archive_format] for archive_type in archive_format: if archive_type == 'tar': open_fn = tarfile.open is_match_fn = tarfile.is_tarfile if archive_type == 'zip': open_fn = zipfile.ZipFile is_match_fn = zipfile.is_zipfile if is_match_fn(file_path): with open_fn(file_path) as archive: try: archive.extractall(path) except (tarfile.TarError, RuntimeError, KeyboardInterrupt): if os.path.exists(path): if os.path.isfile(path): os.remove(path) else: shutil.rmtree(path) raise return True return False @keras_export('keras.utils.get_file') def get_file(fname, origin, untar=False, md5_hash=None, file_hash=None, cache_subdir='datasets', hash_algorithm='auto', extract=False, archive_format='auto', cache_dir=None): """Downloads a file from a URL if it not already in the cache. By default the file at the url `origin` is downloaded to the cache_dir `~/.keras`, placed in the cache_subdir `datasets`, and given the filename `fname`. The final location of a file `example.txt` would therefore be `~/.keras/datasets/example.txt`. Files in tar, tar.gz, tar.bz, and zip formats can also be extracted. Passing a hash will verify the file after download. The command line programs `shasum` and `sha256sum` can compute the hash. Arguments: fname: Name of the file. If an absolute path `/path/to/file.txt` is specified the file will be saved at that location. origin: Original URL of the file. untar: Deprecated in favor of 'extract'. boolean, whether the file should be decompressed md5_hash: Deprecated in favor of 'file_hash'. md5 hash of the file for verification file_hash: The expected hash string of the file after download. The sha256 and md5 hash algorithms are both supported. cache_subdir: Subdirectory under the Keras cache dir where the file is saved. If an absolute path `/path/to/folder` is specified the file will be saved at that location. hash_algorithm: Select the hash algorithm to verify the file. options are 'md5', 'sha256', and 'auto'. The default 'auto' detects the hash algorithm in use. extract: True tries extracting the file as an Archive, like tar or zip. archive_format: Archive format to try for extracting the file. Options are 'auto', 'tar', 'zip', and None. 'tar' includes tar, tar.gz, and tar.bz files. The default 'auto' is ['tar', 'zip']. None or an empty list will return no matches found. cache_dir: Location to store cached files, when None it defaults to the [Keras Directory](/faq/#where-is-the-keras-configuration-filed-stored). Returns: Path to the downloaded file """ if cache_dir is None: cache_dir = os.path.join(os.path.expanduser('~'), '.keras') if md5_hash is not None and file_hash is None: file_hash = md5_hash hash_algorithm = 'md5' datadir_base = os.path.expanduser(cache_dir) if not os.access(datadir_base, os.W_OK): datadir_base = os.path.join('/tmp', '.keras') datadir = os.path.join(datadir_base, cache_subdir) if not os.path.exists(datadir): os.makedirs(datadir) if untar: untar_fpath = os.path.join(datadir, fname) fpath = untar_fpath + '.tar.gz' else: fpath = os.path.join(datadir, fname) download = False if os.path.exists(fpath): # File found; verify integrity if a hash was provided. if file_hash is not None: if not validate_file(fpath, file_hash, algorithm=hash_algorithm): print('A local file was found, but it seems to be ' 'incomplete or outdated because the ' + hash_algorithm + ' file hash does not match the original value of ' + file_hash + ' so we will re-download the data.') download = True else: download = True if download: print('Downloading data from', origin) class ProgressTracker(object): # Maintain progbar for the lifetime of download. # This design was chosen for Python 2.7 compatibility. progbar = None def dl_progress(count, block_size, total_size): if ProgressTracker.progbar is None: if total_size == -1: total_size = None ProgressTracker.progbar = Progbar(total_size) else: ProgressTracker.progbar.update(count * block_size) error_msg = 'URL fetch failure on {}: {} -- {}' try: try: urlretrieve(origin, fpath, dl_progress) except HTTPError as e: raise Exception(error_msg.format(origin, e.code, e.msg)) except URLError as e: raise Exception(error_msg.format(origin, e.errno, e.reason)) except (Exception, KeyboardInterrupt) as e: if os.path.exists(fpath): os.remove(fpath) raise ProgressTracker.progbar = None if untar: if not os.path.exists(untar_fpath): _extract_archive(fpath, datadir, archive_format='tar') return untar_fpath if extract: _extract_archive(fpath, datadir, archive_format) return fpath def _hash_file(fpath, algorithm='sha256', chunk_size=65535): """Calculates a file sha256 or md5 hash. Example: ```python >>> from keras.data_utils import _hash_file >>> _hash_file('/path/to/file.zip') 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' ``` Arguments: fpath: path to the file being validated algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'. The default 'auto' detects the hash algorithm in use. chunk_size: Bytes to read at a time, important for large files. Returns: The file hash """ if (algorithm == 'sha256') or (algorithm == 'auto' and len(hash) == 64): hasher = hashlib.sha256() else: hasher = hashlib.md5() with open(fpath, 'rb') as fpath_file: for chunk in iter(lambda: fpath_file.read(chunk_size), b''): hasher.update(chunk) return hasher.hexdigest() def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535): """Validates a file against a sha256 or md5 hash. Arguments: fpath: path to the file being validated file_hash: The expected hash string of the file. The sha256 and md5 hash algorithms are both supported. algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'. The default 'auto' detects the hash algorithm in use. chunk_size: Bytes to read at a time, important for large files. Returns: Whether the file is valid """ if (algorithm == 'sha256') or (algorithm == 'auto' and len(file_hash) == 64): hasher = 'sha256' else: hasher = 'md5' if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash): return True else: return False @keras_export('keras.utils.Sequence') class Sequence(object): """Base object for fitting to a sequence of data, such as a dataset. Every `Sequence` must implement the `__getitem__` and the `__len__` methods. If you want to modify your dataset between epochs you may implement `on_epoch_end`. The method `__getitem__` should return a complete batch. Notes: `Sequence` are a safer way to do multiprocessing. This structure guarantees that the network will only train once on each sample per epoch which is not the case with generators. Examples: ```python from skimage.io import imread from skimage.transform import resize import numpy as np import math # Here, `x_set` is list of path to the images # and `y_set` are the associated classes. class CIFAR10Sequence(Sequence): def __init__(self, x_set, y_set, batch_size): self.x, self.y = x_set, y_set self.batch_size = batch_size def __len__(self): return math.ceil(len(self.x) / self.batch_size) def __getitem__(self, idx): batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size] batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size] return np.array([ resize(imread(file_name), (200, 200)) for file_name in batch_x]), np.array(batch_y) ``` """ @abstractmethod def __getitem__(self, index): """Gets batch at position `index`. Arguments: index: position of the batch in the Sequence. Returns: A batch """ raise NotImplementedError @abstractmethod def __len__(self): """Number of batch in the Sequence. Returns: The number of batches in the Sequence. """ raise NotImplementedError def on_epoch_end(self): """Method called at the end of every epoch. """ pass def __iter__(self): """Create a generator that iterate over the Sequence.""" for item in (self[i] for i in range(len(self))): yield item def iter_sequence_infinite(seq): """Iterates indefinitely over a Sequence. Arguments: seq: Sequence instance. Yields: Batches of data from the Sequence. """ while True: for item in seq: yield item # Global variables to be shared across processes _SHARED_SEQUENCES = {} # We use a Value to provide unique id to different processes. _SEQUENCE_COUNTER = None # Because multiprocessing pools are inherently unsafe, starting from a clean # state can be essential to avoiding deadlocks. In order to accomplish this, we # need to be able to check on the status of Pools that we create. _DATA_POOLS = weakref.WeakSet() _WORKER_ID_QUEUE = None # Only created if needed. _WORKER_IDS = set() def get_worker_id_queue(): """Lazily create the queue to track worker ids.""" global _WORKER_ID_QUEUE if _WORKER_ID_QUEUE is None: _WORKER_ID_QUEUE = multiprocessing.Queue() return _WORKER_ID_QUEUE def init_pool(seqs): global _SHARED_SEQUENCES _SHARED_SEQUENCES = seqs @keras_export('keras.experimental.terminate_keras_multiprocessing_pools') def terminate_keras_multiprocessing_pools(grace_period=0.1, use_sigkill=False): """Destroy Keras' multiprocessing pools to prevent deadlocks. In general multiprocessing.Pool can interact quite badly with other, seemingly unrelated, parts of a codebase due to Pool's reliance on fork. This method cleans up all pools which are known to belong to Keras (and thus can be safely terminated). Args: grace_period: Time (in seconds) to wait for process cleanup to propagate. use_sigkill: Boolean of whether or not to perform a cleanup pass using SIGKILL. Returns: A list of human readable strings describing all issues encountered. It is up to the caller to decide whether to treat this as an error condition. """ errors = [] # First cleanup the pools spawned by Keras. If we start killing workers and # a parent pool is still alive it will just spawn replacements which we don't # want. gc.collect() for pool in _DATA_POOLS: pool.close() pool.terminate() # We do not join the pool, because that would wait forever if a worker # refused to exit. # Finally, delete our reference to the pool so that we do not block garbage # collection. del pool # If there were any pools, sleep for a small grace period to allow everything # to finalize. if _DATA_POOLS: time.sleep(grace_period) # Now we kill any workers which are still alive. However we must compare # the worker identifier to the set of identifiers which are known to have been # spawned by pools belonging to Keras to avoid deleting unrelated workers. # First we call the .terminate() method of a worker, and then if it still # persists we directly send a signal to the process. Certain worker tasks may # be able to gracefully handle shutdown, so we send a SIGTERM and then # optionally follow up with a SIGKILL. visited_workers = set() cleanup_passes = ['.terminate', 'SIGTERM'] if use_sigkill: cleanup_passes.append('SIGKILL') cleanup_passes.append('log') for cleanup_pass in cleanup_passes: while True: # In rare cases, queue.qsize() overestimates the number of elements. This # loop is designed to be more robust. try: _WORKER_IDS.add(get_worker_id_queue().get_nowait()) except queue.Empty: break gc.collect() workers_terminated_this_pass = False for worker in multiprocessing.active_children(): ident = worker.ident if ident in _WORKER_IDS and worker.is_alive(): try: if cleanup_pass == '.terminate': # First we ask nicely. worker.terminate() worker.join(timeout=grace_period) visited_workers.add(ident) workers_terminated_this_pass = True elif cleanup_pass in ('SIGTERM', 'SIGKILL'): # Then we ask increasingly tersely. os.kill(worker.pid, signal.SIGKILL if cleanup_pass == 'SIGKILL' else signal.SIGTERM) workers_terminated_this_pass = True elif cleanup_pass == 'log': # And finally we give up and log the failure. errors.append('worker still alive: {}, pid={}, hash={}' .format(worker.name, worker.pid, hash(worker))) except OSError: # Worker exited since the start of this loop. pass if workers_terminated_this_pass: # There can be a small propagation delay between worker destruction and # workers reporting False for is_alive and no longer appearing in the # list of active children. Once again, we sleep for a small grace period. # This prevents false positives from workers which are simply still in the # process of spinning down. time.sleep(grace_period) # Finally we remove the visited worker ids to handle the edge case that a # pid is reused. _WORKER_IDS.difference_update(visited_workers) gc.collect() for pool in _DATA_POOLS: errors.append('pool still exists: {}, hash={}'.format(pool, hash(pool))) return errors def get_index(uid, i): """Get the value from the Sequence `uid` at index `i`. To allow multiple Sequences to be used at the same time, we use `uid` to get a specific one. A single Sequence would cause the validation to overwrite the training Sequence. Arguments: uid: int, Sequence identifier i: index Returns: The value at index `i`. """ return _SHARED_SEQUENCES[uid][i] @keras_export('keras.utils.SequenceEnqueuer') class SequenceEnqueuer(object): """Base class to enqueue inputs. The task of an Enqueuer is to use parallelism to speed up preprocessing. This is done with processes or threads. Example: ```python enqueuer = SequenceEnqueuer(...) enqueuer.start() datas = enqueuer.get() for data in datas: # Use the inputs; training, evaluating, predicting. # ... stop sometime. enqueuer.close() ``` The `enqueuer.get()` should be an infinite stream of datas. """ def __init__(self, sequence, use_multiprocessing=False): self.sequence = sequence self.use_multiprocessing = use_multiprocessing global _SEQUENCE_COUNTER if _SEQUENCE_COUNTER is None: try: _SEQUENCE_COUNTER = multiprocessing.Value('i', 0) except OSError: # In this case the OS does not allow us to use # multiprocessing. We resort to an int # for enqueuer indexing. _SEQUENCE_COUNTER = 0 if isinstance(_SEQUENCE_COUNTER, int): self.uid = _SEQUENCE_COUNTER _SEQUENCE_COUNTER += 1 else: # Doing Multiprocessing.Value += x is not process-safe. with _SEQUENCE_COUNTER.get_lock(): self.uid = _SEQUENCE_COUNTER.value _SEQUENCE_COUNTER.value += 1 self.workers = 0 self.executor_fn = None self.queue = None self.run_thread = None self.stop_signal = None def is_running(self): return self.stop_signal is not None and not self.stop_signal.is_set() def start(self, workers=1, max_queue_size=10): """Starts the handler's workers. Arguments: workers: Number of workers. max_queue_size: queue size (when full, workers could block on `put()`) """ if self.use_multiprocessing: self.executor_fn = self._get_executor_init(workers) else: # We do not need the init since it's threads. self.executor_fn = lambda _: ThreadPool(workers) self.workers = workers self.queue = queue.Queue(max_queue_size) self.stop_signal = threading.Event() self.run_thread = threading.Thread(target=self._run) self.run_thread.daemon = True self.run_thread.start() def _send_sequence(self): """Sends current Iterable to all workers.""" # For new processes that may spawn _SHARED_SEQUENCES[self.uid] = self.sequence def stop(self, timeout=None): """Stops running threads and wait for them to exit, if necessary. Should be called by the same thread which called `start()`. Arguments: timeout: maximum time to wait on `thread.join()` """ self.stop_signal.set() with self.queue.mutex: self.queue.queue.clear() self.queue.unfinished_tasks = 0 self.queue.not_full.notify() self.run_thread.join(timeout) _SHARED_SEQUENCES[self.uid] = None @abstractmethod def _run(self): """Submits request to the executor and queue the `Future` objects.""" raise NotImplementedError @abstractmethod def _get_executor_init(self, workers): """Gets the Pool initializer for multiprocessing. Arguments: workers: Number of workers. Returns: Function, a Function to initialize the pool """ raise NotImplementedError @abstractmethod def get(self): """Creates a generator to extract data from the queue. Skip the data if it is `None`. # Returns Generator yielding tuples `(inputs, targets)` or `(inputs, targets, sample_weights)`. """ raise NotImplementedError @keras_export('keras.utils.OrderedEnqueuer') class OrderedEnqueuer(SequenceEnqueuer): """Builds a Enqueuer from a Sequence. Used in `fit_generator`, `evaluate_generator`, `predict_generator`. Arguments: sequence: A `tf.keras.utils.data_utils.Sequence` object. use_multiprocessing: use multiprocessing if True, otherwise threading shuffle: whether to shuffle the data at the beginning of each epoch """ def __init__(self, sequence, use_multiprocessing=False, shuffle=False): super(OrderedEnqueuer, self).__init__(sequence, use_multiprocessing) self.shuffle = shuffle def _get_executor_init(self, workers): """Gets the Pool initializer for multiprocessing. Arguments: workers: Number of workers. Returns: Function, a Function to initialize the pool """ def pool_fn(seqs): pool = multiprocessing.Pool( workers, initializer=init_pool_generator, initargs=(seqs, None, get_worker_id_queue())) _DATA_POOLS.add(pool) return pool return pool_fn def _wait_queue(self): """Wait for the queue to be empty.""" while True: time.sleep(0.1) if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set(): return def _run(self): """Submits request to the executor and queue the `Future` objects.""" sequence = list(range(len(self.sequence))) self._send_sequence() # Share the initial sequence while True: if self.shuffle: random.shuffle(sequence) with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor: for i in sequence: if self.stop_signal.is_set(): return self.queue.put( executor.apply_async(get_index, (self.uid, i)), block=True) # Done with the current epoch, waiting for the final batches self._wait_queue() if self.stop_signal.is_set(): # We're done return # Call the internal on epoch end. self.sequence.on_epoch_end() self._send_sequence() # Update the pool def get(self): """Creates a generator to extract data from the queue. Skip the data if it is `None`. Yields: The next element in the queue, i.e. a tuple `(inputs, targets)` or `(inputs, targets, sample_weights)`. """ try: while self.is_running(): inputs = self.queue.get(block=True).get() self.queue.task_done() if inputs is not None: yield inputs except Exception: # pylint: disable=broad-except self.stop() six.reraise(*sys.exc_info()) def init_pool_generator(gens, random_seed=None, id_queue=None): """Initializer function for pool workers. Args: gens: State which should be made available to worker processes. random_seed: An optional value with which to seed child processes. id_queue: A multiprocessing Queue of worker ids. This is used to indicate that a worker process was created by Keras and can be terminated using the cleanup_all_keras_forkpools utility. """ global _SHARED_SEQUENCES _SHARED_SEQUENCES = gens worker_proc = multiprocessing.current_process() # name isn't used for anything, but setting a more descriptive name is helpful # when diagnosing orphaned processes. worker_proc.name = 'Keras_worker_{}'.format(worker_proc.name) if random_seed is not None: np.random.seed(random_seed + worker_proc.ident) if id_queue is not None: # If a worker dies during init, the pool will just create a replacement. id_queue.put(worker_proc.ident, block=True, timeout=0.1) def next_sample(uid): """Gets the next value from the generator `uid`. To allow multiple generators to be used at the same time, we use `uid` to get a specific one. A single generator would cause the validation to overwrite the training generator. Arguments: uid: int, generator identifier Returns: The next value of generator `uid`. """ return six.next(_SHARED_SEQUENCES[uid]) @keras_export('keras.utils.GeneratorEnqueuer') class GeneratorEnqueuer(SequenceEnqueuer): """Builds a queue out of a data generator. The provided generator can be finite in which case the class will throw a `StopIteration` exception. Used in `fit_generator`, `evaluate_generator`, `predict_generator`. Arguments: generator: a generator function which yields data use_multiprocessing: use multiprocessing if True, otherwise threading wait_time: time to sleep in-between calls to `put()` random_seed: Initial seed for workers, will be incremented by one for each worker. """ def __init__(self, sequence, use_multiprocessing=False, random_seed=None): super(GeneratorEnqueuer, self).__init__(sequence, use_multiprocessing) self.random_seed = random_seed def _get_executor_init(self, workers): """Gets the Pool initializer for multiprocessing. Arguments: workers: Number of works. Returns: A Function to initialize the pool """ def pool_fn(seqs): pool = multiprocessing.Pool( workers, initializer=init_pool_generator, initargs=(seqs, self.random_seed, get_worker_id_queue())) _DATA_POOLS.add(pool) return pool return pool_fn def _run(self): """Submits request to the executor and queue the `Future` objects.""" self._send_sequence() # Share the initial generator with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor: while True: if self.stop_signal.is_set(): return self.queue.put( executor.apply_async(next_sample, (self.uid,)), block=True) def get(self): """Creates a generator to extract data from the queue. Skip the data if it is `None`. Yields: The next element in the queue, i.e. a tuple `(inputs, targets)` or `(inputs, targets, sample_weights)`. """ try: while self.is_running(): inputs = self.queue.get(block=True).get() self.queue.task_done() if inputs is not None: yield inputs except StopIteration: # Special case for finite generators last_ones = [] while self.queue.qsize() > 0: last_ones.append(self.queue.get(block=True)) # Wait for them to complete for f in last_ones: f.wait() # Keep the good ones last_ones = [future.get() for future in last_ones if future.successful()] for inputs in last_ones: if inputs is not None: yield inputs except Exception as e: # pylint: disable=broad-except self.stop() if 'generator already executing' in str(e): raise RuntimeError( 'Your generator is NOT thread-safe. ' 'Keras requires a thread-safe generator when ' '`use_multiprocessing=False, workers > 1`. ') six.reraise(*sys.exc_info())
tensorflow-master
tensorflow/python/keras/utils/data_utils.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras Layer utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python import keras from tensorflow.python.keras.utils import vis_utils from tensorflow.python.lib.io import file_io from tensorflow.python.platform import test class ModelToDotFormatTest(test.TestCase): def test_plot_model_cnn(self): model = keras.Sequential() model.add( keras.layers.Conv2D( filters=2, kernel_size=(2, 3), input_shape=(3, 5, 5), name='conv')) model.add(keras.layers.Flatten(name='flat')) model.add(keras.layers.Dense(5, name='dense')) dot_img_file = 'model_1.png' try: vis_utils.plot_model(model, to_file=dot_img_file, show_shapes=True) self.assertTrue(file_io.file_exists(dot_img_file)) file_io.delete_file(dot_img_file) except ImportError: pass def test_plot_model_with_wrapped_layers_and_models(self): inputs = keras.Input(shape=(None, 3)) lstm = keras.layers.LSTM(6, return_sequences=True, name='lstm') x = lstm(inputs) # Add layer inside a Wrapper bilstm = keras.layers.Bidirectional( keras.layers.LSTM(16, return_sequences=True, name='bilstm')) x = bilstm(x) # Add model inside a Wrapper submodel = keras.Sequential( [keras.layers.Dense(32, name='dense', input_shape=(None, 32))] ) wrapped_dense = keras.layers.TimeDistributed(submodel) x = wrapped_dense(x) # Add shared submodel outputs = submodel(x) model = keras.Model(inputs, outputs) dot_img_file = 'model_2.png' try: vis_utils.plot_model( model, to_file=dot_img_file, show_shapes=True, expand_nested=True) self.assertTrue(file_io.file_exists(dot_img_file)) file_io.delete_file(dot_img_file) except ImportError: pass if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/keras/utils/vis_utils_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access """Utils related to keras metrics. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import weakref from enum import Enum from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.keras.utils import tf_utils from tensorflow.python.keras.utils.generic_utils import to_list from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import weights_broadcast_ops from tensorflow.python.ops.losses import util as tf_losses_utils from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.ops.ragged import ragged_util from tensorflow.python.util import tf_decorator NEG_INF = -1e10 class Reduction(Enum): """Types of metrics reduction. Contains the following values: * `SUM`: Scalar sum of weighted values. * `SUM_OVER_BATCH_SIZE`: Scalar sum of weighted values divided by number of elements. * `WEIGHTED_MEAN`: Scalar sum of weighted values divided by sum of weights. """ SUM = 'sum' SUM_OVER_BATCH_SIZE = 'sum_over_batch_size' WEIGHTED_MEAN = 'weighted_mean' def update_state_wrapper(update_state_fn): """Decorator to wrap metric `update_state()` with `add_update()`. Args: update_state_fn: function that accumulates metric statistics. Returns: Decorated function that wraps `update_state_fn()` with `add_update()`. """ def decorated(metric_obj, *args, **kwargs): """Decorated function with `add_update()`.""" with tf_utils.graph_context_for_symbolic_tensors(*args, **kwargs): update_op = update_state_fn(*args, **kwargs) if update_op is not None: # update_op will be None in eager execution. metric_obj.add_update(update_op) return update_op return tf_decorator.make_decorator(update_state_fn, decorated) def result_wrapper(result_fn): """Decorator to wrap metric `result()` function in `merge_call()`. Result computation is an idempotent operation that simply calculates the metric value using the state variables. If metric state variables are distributed across replicas/devices and `result()` is requested from the context of one device - This function wraps `result()` in a distribution strategy `merge_call()`. With this, the metric state variables will be aggregated across devices. Args: result_fn: function that computes the metric result. Returns: Decorated function that wraps `result_fn()` in distribution strategy `merge_call()`. """ def decorated(_, *args): """Decorated function with merge_call.""" replica_context = distribution_strategy_context.get_replica_context() if replica_context is None: # if in cross replica context already result_t = array_ops.identity(result_fn(*args)) else: # TODO(psv): Test distribution of metrics using different distribution # strategies. # Creating a wrapper for merge_fn. merge_call invokes the given merge_fn # with distribution object as the first parameter. We create a wrapper # here so that the result function need not have that parameter. def merge_fn_wrapper(distribution, merge_fn, *args): # We will get `PerReplica` merge function. Taking the first one as all # are identical copies of the function that we had passed below. merged_result_fn = ( distribution.experimental_local_results(merge_fn)[0](*args)) # Wrapping result in identity so that control dependency between # update_op from `update_state` and result works in case result returns # a tensor. return array_ops.identity(merged_result_fn) # Wrapping result in merge_call. merge_call is used when we want to leave # replica mode and compute a value in cross replica mode. result_t = replica_context.merge_call( merge_fn_wrapper, args=(result_fn,) + args) return result_t return tf_decorator.make_decorator(result_fn, decorated) def weakmethod(method): """Creates a weak reference to the bound method.""" cls = method.im_class func = method.im_func instance_ref = weakref.ref(method.im_self) @functools.wraps(method) def inner(*args, **kwargs): return func.__get__(instance_ref(), cls)(*args, **kwargs) del method return inner def assert_thresholds_range(thresholds): if thresholds is not None: invalid_thresholds = [t for t in thresholds if t is None or t < 0 or t > 1] if invalid_thresholds: raise ValueError( 'Threshold values must be in [0, 1]. Invalid values: {}'.format( invalid_thresholds)) def parse_init_thresholds(thresholds, default_threshold=0.5): if thresholds is not None: assert_thresholds_range(to_list(thresholds)) thresholds = to_list(default_threshold if thresholds is None else thresholds) return thresholds class ConfusionMatrix(Enum): TRUE_POSITIVES = 'tp' FALSE_POSITIVES = 'fp' TRUE_NEGATIVES = 'tn' FALSE_NEGATIVES = 'fn' class AUCCurve(Enum): """Type of AUC Curve (ROC or PR).""" ROC = 'ROC' PR = 'PR' @staticmethod def from_str(key): if key in ('pr', 'PR'): return AUCCurve.PR elif key in ('roc', 'ROC'): return AUCCurve.ROC else: raise ValueError('Invalid AUC curve value "%s".' % key) class AUCSummationMethod(Enum): """Type of AUC summation method. https://en.wikipedia.org/wiki/Riemann_sum) Contains the following values: * 'interpolation': Applies mid-point summation scheme for `ROC` curve. For `PR` curve, interpolates (true/false) positives but not the ratio that is precision (see Davis & Goadrich 2006 for details). * 'minoring': Applies left summation for increasing intervals and right summation for decreasing intervals. * 'majoring': Applies right summation for increasing intervals and left summation for decreasing intervals. """ INTERPOLATION = 'interpolation' MAJORING = 'majoring' MINORING = 'minoring' @staticmethod def from_str(key): if key in ('interpolation', 'Interpolation'): return AUCSummationMethod.INTERPOLATION elif key in ('majoring', 'Majoring'): return AUCSummationMethod.MAJORING elif key in ('minoring', 'Minoring'): return AUCSummationMethod.MINORING else: raise ValueError('Invalid AUC summation method value "%s".' % key) def update_confusion_matrix_variables(variables_to_update, y_true, y_pred, thresholds, top_k=None, class_id=None, sample_weight=None): """Returns op to update the given confusion matrix variables. For every pair of values in y_true and y_pred: true_positive: y_true == True and y_pred > thresholds false_negatives: y_true == True and y_pred <= thresholds true_negatives: y_true == False and y_pred <= thresholds false_positive: y_true == False and y_pred > thresholds The results will be weighted and added together. When multiple thresholds are provided, we will repeat the same for every threshold. For estimation of these metrics over a stream of data, the function creates an `update_op` operation that updates the given variables. If `sample_weight` is `None`, weights default to 1. Use weights of 0 to mask values. Args: variables_to_update: Dictionary with 'tp', 'fn', 'tn', 'fp' as valid keys and corresponding variables to update as values. y_true: A `Tensor` whose shape matches `y_pred`. Will be cast to `bool`. y_pred: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. thresholds: A float value or a python list or tuple of float thresholds in `[0, 1]`, or NEG_INF (used when top_k is set). top_k: Optional int, indicates that the positive labels should be limited to the top k predictions. class_id: Optional int, limits the prediction and labels to the class specified by this argument. sample_weight: Optional `Tensor` whose rank is either 0, or the same rank as `y_true`, and must be broadcastable to `y_true` (i.e., all dimensions must be either `1`, or the same as the corresponding `y_true` dimension). Returns: Update op. Raises: ValueError: If `y_pred` and `y_true` have mismatched shapes, or if `sample_weight` is not `None` and its shape doesn't match `y_pred`, or if `variables_to_update` contains invalid keys. """ if variables_to_update is None: return y_true = math_ops.cast(y_true, dtype=dtypes.float32) y_pred = math_ops.cast(y_pred, dtype=dtypes.float32) [y_pred, y_true], _ = ragged_assert_compatible_and_get_flat_values([y_pred, y_true], sample_weight) y_pred.shape.assert_is_compatible_with(y_true.shape) if not any( key for key in variables_to_update if key in list(ConfusionMatrix)): raise ValueError( 'Please provide at least one valid confusion matrix ' 'variable to update. Valid variable key options are: "{}". ' 'Received: "{}"'.format( list(ConfusionMatrix), variables_to_update.keys())) invalid_keys = [ key for key in variables_to_update if key not in list(ConfusionMatrix) ] if invalid_keys: raise ValueError( 'Invalid keys: {}. Valid variable key options are: "{}"'.format( invalid_keys, list(ConfusionMatrix))) with ops.control_dependencies([ check_ops.assert_greater_equal( y_pred, math_ops.cast(0.0, dtype=y_pred.dtype), message='predictions must be >= 0'), check_ops.assert_less_equal( y_pred, math_ops.cast(1.0, dtype=y_pred.dtype), message='predictions must be <= 1') ]): if sample_weight is None: y_pred, y_true = tf_losses_utils.squeeze_or_expand_dimensions( y_pred, y_true) else: y_pred, y_true, sample_weight = ( tf_losses_utils.squeeze_or_expand_dimensions( y_pred, y_true, sample_weight=sample_weight)) if top_k is not None: y_pred = _filter_top_k(y_pred, top_k) if class_id is not None: y_true = y_true[..., class_id] y_pred = y_pred[..., class_id] thresholds = to_list(thresholds) num_thresholds = len(thresholds) num_predictions = array_ops.size(y_pred) # Reshape predictions and labels. predictions_2d = array_ops.reshape(y_pred, [1, -1]) labels_2d = array_ops.reshape( math_ops.cast(y_true, dtype=dtypes.bool), [1, -1]) # Tile the thresholds for every prediction. thresh_tiled = array_ops.tile( array_ops.expand_dims(array_ops.constant(thresholds), 1), array_ops.stack([1, num_predictions])) # Tile the predictions for every threshold. preds_tiled = array_ops.tile(predictions_2d, [num_thresholds, 1]) # Compare predictions and threshold. pred_is_pos = math_ops.greater(preds_tiled, thresh_tiled) # Tile labels by number of thresholds label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1]) if sample_weight is not None: weights = weights_broadcast_ops.broadcast_weights( math_ops.cast(sample_weight, dtype=dtypes.float32), y_pred) weights_tiled = array_ops.tile( array_ops.reshape(weights, [1, -1]), [num_thresholds, 1]) else: weights_tiled = None update_ops = [] def weighted_assign_add(label, pred, weights, var): label_and_pred = math_ops.cast( math_ops.logical_and(label, pred), dtype=dtypes.float32) if weights is not None: label_and_pred *= weights return var.assign_add(math_ops.reduce_sum(label_and_pred, 1)) loop_vars = { ConfusionMatrix.TRUE_POSITIVES: (label_is_pos, pred_is_pos), } update_tn = ConfusionMatrix.TRUE_NEGATIVES in variables_to_update update_fp = ConfusionMatrix.FALSE_POSITIVES in variables_to_update update_fn = ConfusionMatrix.FALSE_NEGATIVES in variables_to_update if update_fn or update_tn: pred_is_neg = math_ops.logical_not(pred_is_pos) loop_vars[ConfusionMatrix.FALSE_NEGATIVES] = (label_is_pos, pred_is_neg) if update_fp or update_tn: label_is_neg = math_ops.logical_not(label_is_pos) loop_vars[ConfusionMatrix.FALSE_POSITIVES] = (label_is_neg, pred_is_pos) if update_tn: loop_vars[ConfusionMatrix.TRUE_NEGATIVES] = (label_is_neg, pred_is_neg) for matrix_cond, (label, pred) in loop_vars.items(): if matrix_cond in variables_to_update: update_ops.append( weighted_assign_add(label, pred, weights_tiled, variables_to_update[matrix_cond])) return control_flow_ops.group(update_ops) def _filter_top_k(x, k): """Filters top-k values in the last dim of x and set the rest to NEG_INF. Used for computing top-k prediction values in dense labels (which has the same shape as predictions) for recall and precision top-k metrics. Args: x: tensor with any dimensions. k: the number of values to keep. Returns: tensor with same shape and dtype as x. """ _, top_k_idx = nn_ops.top_k(x, k, sorted=False) top_k_mask = math_ops.reduce_sum( array_ops.one_hot(top_k_idx, x.shape[-1], axis=-1), axis=-2) return x * top_k_mask + NEG_INF * (1 - top_k_mask) def ragged_assert_compatible_and_get_flat_values(values, mask=None): """If ragged, it checks the compatibility and then returns the flat_values. Note: If two tensors are dense, it does not check their compatibility. Note: Although two ragged tensors with different ragged ranks could have identical overall rank and dimension sizes and hence be compatible, we do not support those cases. Args: values: A list of potentially ragged tensor of the same ragged_rank. mask: A potentially ragged tensor of the same ragged_rank as elements in Values. Returns: A tuple in which the first element is the list of tensors and the second is the mask tensor. ([Values], mask). Mask and the element in Values are equal to the flat_values of the input arguments (if they were ragged). """ if isinstance(values, list): is_all_ragged = \ all(isinstance(rt, ragged_tensor.RaggedTensor) for rt in values) is_any_ragged = \ any(isinstance(rt, ragged_tensor.RaggedTensor) for rt in values) else: is_all_ragged = isinstance(values, ragged_tensor.RaggedTensor) is_any_ragged = is_all_ragged if (is_all_ragged and ((mask is None) or isinstance(mask, ragged_tensor.RaggedTensor))): to_be_stripped = False if not isinstance(values, list): values = [values] to_be_stripped = True # NOTE: we leave the flat_values compatiblity to # tf.TensorShape `assert_is_compatible_with` # check if both dynamic dimensions are equal and then use the flat_values. nested_row_split_list = [rt.nested_row_splits for rt in values] assertion_list = ragged_util.assert_splits_match(nested_row_split_list) # if both are ragged sample_weights also should be ragged with same dims. if isinstance(mask, ragged_tensor.RaggedTensor): assertion_list_for_mask = ragged_util.assert_splits_match( [nested_row_split_list[0], mask.nested_row_splits]) tmp = control_flow_ops.with_dependencies(assertion_list_for_mask, mask.flat_values) mask = array_ops.expand_dims(tmp, -1) # values has at least 1 element. flat_values = [] for value in values: tmp = control_flow_ops.with_dependencies(assertion_list, value.flat_values) flat_values.append(array_ops.expand_dims(tmp, -1)) values = flat_values[0] if to_be_stripped else flat_values elif is_any_ragged: raise TypeError('One of the inputs does not have acceptable types.') # values are empty or value are not ragged and mask is ragged. elif isinstance(mask, ragged_tensor.RaggedTensor): raise TypeError('Ragged mask is not allowed with non-ragged inputs.') return values, mask
tensorflow-master
tensorflow/python/keras/utils/metrics_utils.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=g-import-not-at-top """Utilities related to disk I/O.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import numpy as np import six from tensorflow.python.util.tf_export import keras_export try: import h5py except ImportError: h5py = None @keras_export('keras.utils.HDF5Matrix') class HDF5Matrix(object): """Representation of HDF5 dataset to be used instead of a Numpy array. Example: ```python x_data = HDF5Matrix('input/file.hdf5', 'data') model.predict(x_data) ``` Providing `start` and `end` allows use of a slice of the dataset. Optionally, a normalizer function (or lambda) can be given. This will be called on every slice of data retrieved. Arguments: datapath: string, path to a HDF5 file dataset: string, name of the HDF5 dataset in the file specified in datapath start: int, start of desired slice of the specified dataset end: int, end of desired slice of the specified dataset normalizer: function to be called on data when retrieved Returns: An array-like HDF5 dataset. """ refs = collections.defaultdict(int) def __init__(self, datapath, dataset, start=0, end=None, normalizer=None): if h5py is None: raise ImportError('The use of HDF5Matrix requires ' 'HDF5 and h5py installed.') if datapath not in list(self.refs.keys()): f = h5py.File(datapath) self.refs[datapath] = f else: f = self.refs[datapath] self.data = f[dataset] self.start = start if end is None: self.end = self.data.shape[0] else: self.end = end self.normalizer = normalizer def __len__(self): return self.end - self.start def __getitem__(self, key): if isinstance(key, slice): start, stop = key.start, key.stop if start is None: start = 0 if stop is None: stop = self.shape[0] if stop + self.start <= self.end: idx = slice(start + self.start, stop + self.start) else: raise IndexError elif isinstance(key, (int, np.integer)): if key + self.start < self.end: idx = key + self.start else: raise IndexError elif isinstance(key, np.ndarray): if np.max(key) + self.start < self.end: idx = (self.start + key).tolist() else: raise IndexError else: # Assume list/iterable if max(key) + self.start < self.end: idx = [x + self.start for x in key] else: raise IndexError if self.normalizer is not None: return self.normalizer(self.data[idx]) else: return self.data[idx] @property def shape(self): """Gets a numpy-style shape tuple giving the dataset dimensions. Returns: A numpy-style shape tuple. """ return (self.end - self.start,) + self.data.shape[1:] @property def dtype(self): """Gets the datatype of the dataset. Returns: A numpy dtype string. """ return self.data.dtype @property def ndim(self): """Gets the number of dimensions (rank) of the dataset. Returns: An integer denoting the number of dimensions (rank) of the dataset. """ return self.data.ndim @property def size(self): """Gets the total dataset size (number of elements). Returns: An integer denoting the number of elements in the dataset. """ return np.prod(self.shape) def ask_to_proceed_with_overwrite(filepath): """Produces a prompt asking about overwriting a file. Arguments: filepath: the path to the file to be overwritten. Returns: True if we can proceed with overwrite, False otherwise. """ overwrite = six.moves.input('[WARNING] %s already exists - overwrite? ' '[y/n]' % (filepath)).strip().lower() while overwrite not in ('y', 'n'): overwrite = six.moves.input('Enter "y" (overwrite) or "n" ' '(cancel).').strip().lower() if overwrite == 'n': return False print('[TIP] Next time specify overwrite=True!') return True
tensorflow-master
tensorflow/python/keras/utils/io_utils.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras TF utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python import keras from tensorflow.python.eager import context from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.keras.utils import tf_utils from tensorflow.python.ops import variables from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class TestIsSymbolicTensor(test.TestCase): def test_default_behavior(self): if context.executing_eagerly(): self.assertFalse(tf_utils.is_symbolic_tensor( variables.Variable(name='blah', initial_value=0.))) self.assertFalse(tf_utils.is_symbolic_tensor( ops.convert_to_tensor(0.))) self.assertFalse(tf_utils.is_symbolic_tensor( sparse_tensor.SparseTensor( indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4]))) else: self.assertTrue(tf_utils.is_symbolic_tensor( variables.Variable(name='blah', initial_value=0.))) self.assertTrue(tf_utils.is_symbolic_tensor( ops.convert_to_tensor(0.))) self.assertTrue(tf_utils.is_symbolic_tensor( sparse_tensor.SparseTensor( indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4]))) def test_works_with_registered(self): class CustomClass(object): def value(self): return ops.convert_to_tensor(42.) ops.register_tensor_conversion_function( CustomClass, lambda value, **_: value.value()) tf_utils.register_symbolic_tensor_type(CustomClass) if context.executing_eagerly(): self.assertFalse(tf_utils.is_symbolic_tensor( variables.Variable(name='blah', initial_value=0.))) self.assertFalse(tf_utils.is_symbolic_tensor( ops.convert_to_tensor(0.))) self.assertFalse(tf_utils.is_symbolic_tensor( sparse_tensor.SparseTensor( indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4]))) self.assertFalse(tf_utils.is_symbolic_tensor(CustomClass())) else: self.assertTrue(tf_utils.is_symbolic_tensor( variables.Variable(name='blah', initial_value=0.))) self.assertTrue(tf_utils.is_symbolic_tensor( ops.convert_to_tensor(0.))) self.assertTrue(tf_utils.is_symbolic_tensor( sparse_tensor.SparseTensor( indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4]))) self.assertTrue(tf_utils.is_symbolic_tensor(CustomClass())) def test_enables_nontensor_plumbing(self): # Setup. class Foo(object): def __init__(self, input_): self._input = input_ self.value = ops.convert_to_tensor(42.) @property def dtype(self): return self.value.dtype ops.register_tensor_conversion_function( Foo, lambda x, *args, **kwargs: x.value) tf_utils.register_symbolic_tensor_type(Foo) class PlumbingLayer(keras.layers.Lambda): def __init__(self, fn, **kwargs): def _fn(*fargs, **fkwargs): d = fn(*fargs, **fkwargs) x = ops.convert_to_tensor(d) d.shape = x.shape d.get_shape = x.get_shape return d, x super(PlumbingLayer, self).__init__(_fn, **kwargs) self._enter_dunder_call = False def __call__(self, inputs, *args, **kwargs): self._enter_dunder_call = True d, _ = super(PlumbingLayer, self).__call__(inputs, *args, **kwargs) self._enter_dunder_call = False return d def call(self, inputs, *args, **kwargs): d, v = super(PlumbingLayer, self).call(inputs, *args, **kwargs) if self._enter_dunder_call: return d, v return d # User-land. model = keras.Sequential([ keras.layers.InputLayer([]), PlumbingLayer(Foo), # Makes a `Foo` object. ]) # Let's ensure Keras graph history is preserved by composing the models. model = keras.Model(model.inputs, model(model.outputs)) # Now we instantiate the model and verify we have a `Foo` object, not a # `Tensor`. y = model(ops.convert_to_tensor(7.)) self.assertIsInstance(y, Foo) # Confirm that (custom) loss sees `Foo` instance, not Tensor. obtained_prediction_box = [None] def custom_loss(y_obs, y_pred): del y_obs obtained_prediction_box[0] = y_pred return y_pred # Apparently `compile` calls the loss function enough to trigger the # side-effect. model.compile('SGD', loss=custom_loss) self.assertIsInstance(obtained_prediction_box[0], Foo) class ConvertInnerNodeDataTest(test.TestCase): def test_convert_inner_node_data(self): data = tf_utils.convert_inner_node_data((tf_utils.ListWrapper(['l', 2, 3]), tf_utils.ListWrapper(['l', 5, 6]))) self.assertEqual(data, (['l', 2, 3], ['l', 5, 6])) data = tf_utils.convert_inner_node_data(((['l', 2, 3], ['l', 5, 6])), wrap=True) self.assertTrue(all(isinstance(ele, tf_utils.ListWrapper) for ele in data)) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/keras/utils/tf_utils_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for multi-gpu training.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.keras import backend as K from tensorflow.python.keras.engine.training import Model from tensorflow.python.ops import array_ops from tensorflow.python.util.tf_export import keras_export def _get_available_devices(): return [x.name for x in K.get_session().list_devices()] def _normalize_device_name(name): name = '/' + name.lower().split('device:')[1] return name @keras_export('keras.utils.multi_gpu_model') def multi_gpu_model(model, gpus, cpu_merge=True, cpu_relocation=False): """Replicates a model on different GPUs. Specifically, this function implements single-machine multi-GPU data parallelism. It works in the following way: - Divide the model's input(s) into multiple sub-batches. - Apply a model copy on each sub-batch. Every model copy is executed on a dedicated GPU. - Concatenate the results (on CPU) into one big batch. E.g. if your `batch_size` is 64 and you use `gpus=2`, then we will divide the input into 2 sub-batches of 32 samples, process each sub-batch on one GPU, then return the full batch of 64 processed samples. This induces quasi-linear speedup on up to 8 GPUs. This function is only available with the TensorFlow backend for the time being. Arguments: model: A Keras model instance. To avoid OOM errors, this model could have been built on CPU, for instance (see usage example below). gpus: Integer >= 2, number of on GPUs on which to create model replicas. cpu_merge: A boolean value to identify whether to force merging model weights under the scope of the CPU or not. cpu_relocation: A boolean value to identify whether to create the model's weights under the scope of the CPU. If the model is not defined under any preceding device scope, you can still rescue it by activating this option. Returns: A Keras `Model` instance which can be used just like the initial `model` argument, but which distributes its workload on multiple GPUs. Example 1: Training models with weights merge on CPU ```python import tensorflow as tf from keras.applications import Xception from keras.utils import multi_gpu_model import numpy as np num_samples = 1000 height = 224 width = 224 num_classes = 1000 # Instantiate the base model (or "template" model). # We recommend doing this with under a CPU device scope, # so that the model's weights are hosted on CPU memory. # Otherwise they may end up hosted on a GPU, which would # complicate weight sharing. with tf.device('/cpu:0'): model = Xception(weights=None, input_shape=(height, width, 3), classes=num_classes) # Replicates the model on 8 GPUs. # This assumes that your machine has 8 available GPUs. parallel_model = multi_gpu_model(model, gpus=8) parallel_model.compile(loss='categorical_crossentropy', optimizer='rmsprop') # Generate dummy data. x = np.random.random((num_samples, height, width, 3)) y = np.random.random((num_samples, num_classes)) # This `fit` call will be distributed on 8 GPUs. # Since the batch size is 256, each GPU will process 32 samples. parallel_model.fit(x, y, epochs=20, batch_size=256) # Save model via the template model (which shares the same weights): model.save('my_model.h5') ``` Example 2: Training models with weights merge on CPU using cpu_relocation ```python .. # Not needed to change the device scope for model definition: model = Xception(weights=None, ..) try: model = multi_gpu_model(model, cpu_relocation=True) print("Training using multiple GPUs..") except: print("Training using single GPU or CPU..") model.compile(..) .. ``` Example 3: Training models with weights merge on GPU (recommended for NV-link) ```python .. # Not needed to change the device scope for model definition: model = Xception(weights=None, ..) try: model = multi_gpu_model(model, cpu_merge=False) print("Training using multiple GPUs..") except: print("Training using single GPU or CPU..") model.compile(..) .. ``` Raises: ValueError: if the `gpus` argument does not match available devices. """ # pylint: disable=g-import-not-at-top from tensorflow.python.keras.layers.core import Lambda from tensorflow.python.keras.layers.merge import concatenate if isinstance(gpus, (list, tuple)): if len(gpus) <= 1: raise ValueError('For multi-gpu usage to be effective, ' 'call `multi_gpu_model` with `len(gpus) >= 2`. ' 'Received: `gpus=%s`' % gpus) num_gpus = len(gpus) target_gpu_ids = gpus else: if gpus <= 1: raise ValueError('For multi-gpu usage to be effective, ' 'call `multi_gpu_model` with `gpus >= 2`. ' 'Received: `gpus=%s`' % gpus) num_gpus = gpus target_gpu_ids = range(num_gpus) target_devices = ['/cpu:0'] + ['/gpu:%d' % i for i in target_gpu_ids] available_devices = _get_available_devices() available_devices = [ _normalize_device_name(name) for name in available_devices ] for device in target_devices: if device not in available_devices: raise ValueError('To call `multi_gpu_model` with `gpus=%s`, ' 'we expect the following devices to be available: %s. ' 'However this machine only has: %s. ' 'Try reducing `gpus`.' % (gpus, target_devices, available_devices)) def get_slice(data, i, parts): """Slice an array into `parts` slices and return slice `i`. Arguments: data: array to slice. i: index of slice to return. parts: number of slices to make. Returns: Slice `i` of `data`. """ shape = array_ops.shape(data) batch_size = shape[:1] input_shape = shape[1:] step = batch_size // parts if i == parts - 1: size = batch_size - step * i else: size = step size = array_ops.concat([size, input_shape], axis=0) stride = array_ops.concat([step, input_shape * 0], axis=0) start = stride * i return array_ops.slice(data, start, size) # Relocate the model definition under CPU device scope if needed if cpu_relocation: from tensorflow.python.keras.models import clone_model # pylint: disable=g-import-not-at-top with ops.device('/cpu:0'): model = clone_model(model) all_outputs = [] for i in range(len(model.outputs)): all_outputs.append([]) # Place a copy of the model on each GPU, # each getting a slice of the inputs. for i, gpu_id in enumerate(target_gpu_ids): with ops.device('/gpu:%d' % gpu_id): with K.name_scope('replica_%d' % gpu_id): inputs = [] # Retrieve a slice of the input. for x in model.inputs: input_shape = tuple(x.shape.as_list())[1:] slice_i = Lambda( get_slice, output_shape=input_shape, arguments={ 'i': i, 'parts': num_gpus })( x) inputs.append(slice_i) # Apply model on slice # (creating a model replica on the target device). outputs = model(inputs) if not isinstance(outputs, list): outputs = [outputs] # Save the outputs for merging back together later. for o in range(len(outputs)): all_outputs[o].append(outputs[o]) # Deduplicate output names to handle Siamese networks. occurrences = {} for n in model.output_names: if n not in occurrences: occurrences[n] = 1 else: occurrences[n] += 1 conflict_counter = {n: 0 for n, count in occurrences.items() if count > 1} output_names = [] for n in model.output_names: if n in conflict_counter: conflict_counter[n] += 1 n += '_%d' % conflict_counter[n] output_names.append(n) # Merge outputs under expected scope. with ops.device('/cpu:0' if cpu_merge else '/gpu:%d' % target_gpu_ids[0]): merged = [] for name, outputs in zip(output_names, all_outputs): merged.append(concatenate(outputs, axis=0, name=name)) return Model(model.inputs, merged)
tensorflow-master
tensorflow/python/keras/utils/multi_gpu_utils.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for conv_utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools from absl.testing import parameterized import numpy as np from tensorflow.python.keras.utils import conv_utils from tensorflow.python.platform import test def _get_const_output_shape(input_shape, dim): return tuple([min(d, dim) for d in input_shape]) input_shapes = [ (0,), (0, 0), (1,), (2,), (3,), (1, 0), (0, 3), (1, 1), (1, 2), (3, 1), (2, 2), (3, 3), (1, 0, 1), (5, 2, 3), (3, 5, 6, 7, 0), (3, 2, 2, 4, 4), (1, 2, 3, 4, 7, 2), ] class TestBasicConvUtilsTest(test.TestCase): def test_convert_data_format(self): self.assertEqual('NCDHW', conv_utils.convert_data_format( 'channels_first', 5)) self.assertEqual('NCHW', conv_utils.convert_data_format( 'channels_first', 4)) self.assertEqual('NCW', conv_utils.convert_data_format('channels_first', 3)) self.assertEqual('NHWC', conv_utils.convert_data_format('channels_last', 4)) self.assertEqual('NWC', conv_utils.convert_data_format('channels_last', 3)) self.assertEqual('NDHWC', conv_utils.convert_data_format( 'channels_last', 5)) with self.assertRaises(ValueError): conv_utils.convert_data_format('invalid', 2) def test_normalize_tuple(self): self.assertEqual((2, 2, 2), conv_utils.normalize_tuple(2, n=3, name='strides')) self.assertEqual((2, 1, 2), conv_utils.normalize_tuple((2, 1, 2), n=3, name='strides')) with self.assertRaises(ValueError): conv_utils.normalize_tuple((2, 1), n=3, name='strides') with self.assertRaises(ValueError): conv_utils.normalize_tuple(None, n=3, name='strides') def test_normalize_data_format(self): self.assertEqual('channels_last', conv_utils.normalize_data_format('Channels_Last')) self.assertEqual('channels_first', conv_utils.normalize_data_format('CHANNELS_FIRST')) with self.assertRaises(ValueError): conv_utils.normalize_data_format('invalid') def test_normalize_padding(self): self.assertEqual('same', conv_utils.normalize_padding('SAME')) self.assertEqual('valid', conv_utils.normalize_padding('VALID')) with self.assertRaises(ValueError): conv_utils.normalize_padding('invalid') def test_conv_output_length(self): self.assertEqual(4, conv_utils.conv_output_length(4, 2, 'same', 1, 1)) self.assertEqual(2, conv_utils.conv_output_length(4, 2, 'same', 2, 1)) self.assertEqual(3, conv_utils.conv_output_length(4, 2, 'valid', 1, 1)) self.assertEqual(2, conv_utils.conv_output_length(4, 2, 'valid', 2, 1)) self.assertEqual(5, conv_utils.conv_output_length(4, 2, 'full', 1, 1)) self.assertEqual(3, conv_utils.conv_output_length(4, 2, 'full', 2, 1)) self.assertEqual(2, conv_utils.conv_output_length(5, 2, 'valid', 2, 2)) def test_conv_input_length(self): self.assertEqual(3, conv_utils.conv_input_length(4, 2, 'same', 1)) self.assertEqual(2, conv_utils.conv_input_length(2, 2, 'same', 2)) self.assertEqual(4, conv_utils.conv_input_length(3, 2, 'valid', 1)) self.assertEqual(4, conv_utils.conv_input_length(2, 2, 'valid', 2)) self.assertEqual(3, conv_utils.conv_input_length(4, 2, 'full', 1)) self.assertEqual(4, conv_utils.conv_input_length(3, 2, 'full', 2)) def test_deconv_output_length(self): self.assertEqual(4, conv_utils.deconv_output_length(4, 2, 'same', stride=1)) self.assertEqual(8, conv_utils.deconv_output_length(4, 2, 'same', stride=2)) self.assertEqual(5, conv_utils.deconv_output_length( 4, 2, 'valid', stride=1)) self.assertEqual(8, conv_utils.deconv_output_length( 4, 2, 'valid', stride=2)) self.assertEqual(3, conv_utils.deconv_output_length(4, 2, 'full', stride=1)) self.assertEqual(6, conv_utils.deconv_output_length(4, 2, 'full', stride=2)) self.assertEqual( 5, conv_utils.deconv_output_length( 4, 2, 'same', output_padding=2, stride=1)) self.assertEqual( 7, conv_utils.deconv_output_length( 4, 2, 'same', output_padding=1, stride=2)) self.assertEqual( 7, conv_utils.deconv_output_length( 4, 2, 'valid', output_padding=2, stride=1)) self.assertEqual( 9, conv_utils.deconv_output_length( 4, 2, 'valid', output_padding=1, stride=2)) self.assertEqual( 5, conv_utils.deconv_output_length( 4, 2, 'full', output_padding=2, stride=1)) self.assertEqual( 7, conv_utils.deconv_output_length( 4, 2, 'full', output_padding=1, stride=2)) self.assertEqual( 5, conv_utils.deconv_output_length( 4, 2, 'same', output_padding=1, stride=1, dilation=2)) self.assertEqual( 12, conv_utils.deconv_output_length( 4, 2, 'valid', output_padding=2, stride=2, dilation=3)) self.assertEqual( 6, conv_utils.deconv_output_length( 4, 2, 'full', output_padding=2, stride=2, dilation=3)) @parameterized.parameters(input_shapes) class TestConvUtils(test.TestCase, parameterized.TestCase): def test_conv_kernel_mask_fc(self, *input_shape): padding = 'valid' kernel_shape = input_shape ndims = len(input_shape) strides = (1,) * ndims output_shape = _get_const_output_shape(input_shape, dim=1) mask = np.ones(input_shape + output_shape, np.bool) self.assertAllEqual( mask, conv_utils.conv_kernel_mask( input_shape, kernel_shape, strides, padding ) ) def test_conv_kernel_mask_diag(self, *input_shape): ndims = len(input_shape) kernel_shape = (1,) * ndims strides = (1,) * ndims for padding in ['valid', 'same']: mask = np.identity(int(np.prod(input_shape)), np.bool) mask = np.reshape(mask, input_shape * 2) self.assertAllEqual( mask, conv_utils.conv_kernel_mask( input_shape, kernel_shape, strides, padding ) ) def test_conv_kernel_mask_full_stride(self, *input_shape): padding = 'valid' ndims = len(input_shape) kernel_shape = (1,) * ndims strides = tuple([max(d, 1) for d in input_shape]) output_shape = _get_const_output_shape(input_shape, dim=1) mask = np.zeros(input_shape + output_shape, np.bool) if all(d > 0 for d in mask.shape): mask[(0,) * len(output_shape)] = True self.assertAllEqual( mask, conv_utils.conv_kernel_mask( input_shape, kernel_shape, strides, padding ) ) def test_conv_kernel_mask_almost_full_stride(self, *input_shape): padding = 'valid' ndims = len(input_shape) kernel_shape = (1,) * ndims strides = tuple([max(d - 1, 1) for d in input_shape]) output_shape = _get_const_output_shape(input_shape, dim=2) mask = np.zeros(input_shape + output_shape, np.bool) if all(d > 0 for d in mask.shape): for in_position in itertools.product(*[[0, d - 1] for d in input_shape]): out_position = tuple([min(p, 1) for p in in_position]) mask[in_position + out_position] = True self.assertAllEqual( mask, conv_utils.conv_kernel_mask( input_shape, kernel_shape, strides, padding ) ) def test_conv_kernel_mask_rect_kernel(self, *input_shape): padding = 'valid' ndims = len(input_shape) strides = (1,) * ndims for d in range(ndims): kernel_shape = [1] * ndims kernel_shape[d] = input_shape[d] output_shape = list(input_shape) output_shape[d] = min(1, input_shape[d]) mask = np.identity(int(np.prod(input_shape)), np.bool) mask = np.reshape(mask, input_shape * 2) for p in itertools.product(*[range(input_shape[dim]) for dim in range(ndims)]): p = list(p) p[d] = slice(None) mask[p * 2] = True mask = np.take(mask, range(0, min(1, input_shape[d])), ndims + d) self.assertAllEqual( mask, conv_utils.conv_kernel_mask( input_shape, kernel_shape, strides, padding ) ) def test_conv_kernel_mask_wrong_padding(self, *input_shape): ndims = len(input_shape) kernel_shape = (1,) * ndims strides = (1,) * ndims conv_utils.conv_kernel_mask( input_shape, kernel_shape, strides, 'valid' ) conv_utils.conv_kernel_mask( input_shape, kernel_shape, strides, 'same' ) self.assertRaises(NotImplementedError, conv_utils.conv_kernel_mask, input_shape, kernel_shape, strides, 'full') def test_conv_kernel_mask_wrong_dims(self, *input_shape): kernel_shape = 1 strides = 1 conv_utils.conv_kernel_mask( input_shape, kernel_shape, strides, 'valid' ) ndims = len(input_shape) kernel_shape = (2,) * (ndims + 1) self.assertRaises(ValueError, conv_utils.conv_kernel_mask, input_shape, kernel_shape, strides, 'same') strides = (1,) * ndims self.assertRaises(ValueError, conv_utils.conv_kernel_mask, input_shape, kernel_shape, strides, 'valid') kernel_shape = (1,) * ndims strides = (2,) * (ndims - 1) self.assertRaises(ValueError, conv_utils.conv_kernel_mask, input_shape, kernel_shape, strides, 'valid') strides = (2,) * ndims conv_utils.conv_kernel_mask( input_shape, kernel_shape, strides, 'valid' ) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/keras/utils/conv_utils_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access # pylint: disable=g-import-not-at-top """Utilities related to model visualization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys from tensorflow.python.util import nest from tensorflow.python.util.tf_export import keras_export try: # pydot-ng is a fork of pydot that is better maintained. import pydot_ng as pydot except ImportError: # pydotplus is an improved version of pydot try: import pydotplus as pydot except ImportError: # Fall back on pydot if necessary. try: import pydot except ImportError: pydot = None def check_pydot(): """Returns True if PyDot and Graphviz are available.""" if pydot is None: return False try: # Attempt to create an image of a blank graph # to check the pydot/graphviz installation. pydot.Dot.create(pydot.Dot()) return True except OSError: return False def is_wrapped_model(layer): from tensorflow.python.keras.engine import network from tensorflow.python.keras.layers import wrappers return (isinstance(layer, wrappers.Wrapper) and isinstance(layer.layer, network.Network)) def add_edge(dot, src, dst): if not dot.get_edge(src, dst): dot.add_edge(pydot.Edge(src, dst)) @keras_export('keras.utils.model_to_dot') def model_to_dot(model, show_shapes=False, show_layer_names=True, rankdir='TB', expand_nested=False, dpi=96, subgraph=False): """Convert a Keras model to dot format. Arguments: model: A Keras model instance. show_shapes: whether to display shape information. show_layer_names: whether to display layer names. rankdir: `rankdir` argument passed to PyDot, a string specifying the format of the plot: 'TB' creates a vertical plot; 'LR' creates a horizontal plot. expand_nested: whether to expand nested models into clusters. dpi: Dots per inch. subgraph: whether to return a `pydot.Cluster` instance. Returns: A `pydot.Dot` instance representing the Keras model or a `pydot.Cluster` instance representing nested model if `subgraph=True`. Raises: ImportError: if graphviz or pydot are not available. """ from tensorflow.python.keras.layers import wrappers from tensorflow.python.keras.engine import sequential from tensorflow.python.keras.engine import network if not check_pydot(): if 'IPython.core.magics.namespace' in sys.modules: # We don't raise an exception here in order to avoid crashing notebook # tests where graphviz is not available. print('Failed to import pydot. You must install pydot' ' and graphviz for `pydotprint` to work.') return else: raise ImportError('Failed to import pydot. You must install pydot' ' and graphviz for `pydotprint` to work.') if subgraph: dot = pydot.Cluster(style='dashed', graph_name=model.name) dot.set('label', model.name) dot.set('labeljust', 'l') else: dot = pydot.Dot() dot.set('rankdir', rankdir) dot.set('concentrate', True) dot.set('dpi', dpi) dot.set_node_defaults(shape='record') sub_n_first_node = {} sub_n_last_node = {} sub_w_first_node = {} sub_w_last_node = {} if not model._is_graph_network: node = pydot.Node(str(id(model)), label=model.name) dot.add_node(node) return dot elif isinstance(model, sequential.Sequential): if not model.built: model.build() layers = model._layers # Create graph nodes. for i, layer in enumerate(layers): layer_id = str(id(layer)) # Append a wrapped layer's label to node's label, if it exists. layer_name = layer.name class_name = layer.__class__.__name__ if isinstance(layer, wrappers.Wrapper): if expand_nested and isinstance(layer.layer, network.Network): submodel_wrapper = model_to_dot(layer.layer, show_shapes, show_layer_names, rankdir, expand_nested, subgraph=True) # sub_w : submodel_wrapper sub_w_nodes = submodel_wrapper.get_nodes() sub_w_first_node[layer.layer.name] = sub_w_nodes[0] sub_w_last_node[layer.layer.name] = sub_w_nodes[-1] dot.add_subgraph(submodel_wrapper) else: layer_name = '{}({})'.format(layer_name, layer.layer.name) child_class_name = layer.layer.__class__.__name__ class_name = '{}({})'.format(class_name, child_class_name) if expand_nested and isinstance(layer, network.Network): submodel_not_wrapper = model_to_dot(layer, show_shapes, show_layer_names, rankdir, expand_nested, subgraph=True) # sub_n : submodel_not_wrapper sub_n_nodes = submodel_not_wrapper.get_nodes() sub_n_first_node[layer.name] = sub_n_nodes[0] sub_n_last_node[layer.name] = sub_n_nodes[-1] dot.add_subgraph(submodel_not_wrapper) # Create node's label. if show_layer_names: label = '{}: {}'.format(layer_name, class_name) else: label = class_name # Rebuild the label as a table including input/output shapes. if show_shapes: def format_shape(shape): return str(shape).replace(str(None), '?') try: outputlabels = format_shape(layer.output_shape) except AttributeError: outputlabels = '?' if hasattr(layer, 'input_shape'): inputlabels = format_shape(layer.input_shape) elif hasattr(layer, 'input_shapes'): inputlabels = ', '.join( [format_shape(ishape) for ishape in layer.input_shapes]) else: inputlabels = '?' label = '%s\n|{input:|output:}|{{%s}|{%s}}' % (label, inputlabels, outputlabels) if not expand_nested or not isinstance(layer, network.Network): node = pydot.Node(layer_id, label=label) dot.add_node(node) # Connect nodes with edges. for layer in layers: layer_id = str(id(layer)) for i, node in enumerate(layer._inbound_nodes): node_key = layer.name + '_ib-' + str(i) if node_key in model._network_nodes: for inbound_layer in nest.flatten(node.inbound_layers): inbound_layer_id = str(id(inbound_layer)) if not expand_nested: assert dot.get_node(inbound_layer_id) assert dot.get_node(layer_id) add_edge(dot, inbound_layer_id, layer_id) else: # if inbound_layer is not Model or wrapped Model if (not isinstance(inbound_layer, network.Network) and not is_wrapped_model(inbound_layer)): # if current layer is not Model or wrapped Model if (not isinstance(layer, network.Network) and not is_wrapped_model(layer)): assert dot.get_node(inbound_layer_id) assert dot.get_node(layer_id) add_edge(dot, inbound_layer_id, layer_id) # if current layer is Model elif isinstance(layer, network.Network): add_edge(dot, inbound_layer_id, sub_n_first_node[layer.name].get_name()) # if current layer is wrapped Model elif is_wrapped_model(layer): add_edge(dot, inbound_layer_id, layer_id) name = sub_w_first_node[layer.layer.name].get_name() add_edge(dot, layer_id, name) # if inbound_layer is Model elif isinstance(inbound_layer, network.Network): name = sub_n_last_node[inbound_layer.name].get_name() if isinstance(layer, network.Network): output_name = sub_n_first_node[layer.name].get_name() add_edge(dot, name, output_name) else: add_edge(dot, name, layer_id) # if inbound_layer is wrapped Model elif is_wrapped_model(inbound_layer): inbound_layer_name = inbound_layer.layer.name add_edge(dot, sub_w_last_node[inbound_layer_name].get_name(), layer_id) return dot @keras_export('keras.utils.plot_model') def plot_model(model, to_file='model.png', show_shapes=False, show_layer_names=True, rankdir='TB', expand_nested=False, dpi=96): """Converts a Keras model to dot format and save to a file. Arguments: model: A Keras model instance to_file: File name of the plot image. show_shapes: whether to display shape information. show_layer_names: whether to display layer names. rankdir: `rankdir` argument passed to PyDot, a string specifying the format of the plot: 'TB' creates a vertical plot; 'LR' creates a horizontal plot. expand_nested: Whether to expand nested models into clusters. dpi: Dots per inch. Returns: A Jupyter notebook Image object if Jupyter is installed. This enables in-line display of the model plots in notebooks. """ dot = model_to_dot(model, show_shapes=show_shapes, show_layer_names=show_layer_names, rankdir=rankdir, expand_nested=expand_nested, dpi=dpi) if dot is None: return _, extension = os.path.splitext(to_file) if not extension: extension = 'png' else: extension = extension[1:] # Save image to disk. dot.write(to_file, format=extension) # Return the image as a Jupyter Image object, to be displayed in-line. # Note that we cannot easily detect whether the code is running in a # notebook, and thus we always return the Image if Jupyter is available. try: from IPython import display return display.Image(filename=to_file) except ImportError: pass
tensorflow-master
tensorflow/python/keras/utils/vis_utils.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for multi-gpu training utilities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python import data from tensorflow.python import keras from tensorflow.python.platform import test def check_if_compatible_devices(gpus=2): available_devices = [ keras.utils.multi_gpu_utils._normalize_device_name(name) for name in keras.utils.multi_gpu_utils._get_available_devices() ] if '/gpu:%d' % (gpus - 1) not in available_devices: return False return True class TestMultiGPUModel(test.TestCase): def test_multi_gpu_test_simple_model(self): gpus = 2 num_samples = 1000 input_dim = 10 output_dim = 1 hidden_dim = 10 epochs = 2 target_gpu_id = [0, 1] if not check_if_compatible_devices(gpus=gpus): return with self.cached_session(): model = keras.models.Sequential() model.add(keras.layers.Dense(hidden_dim, input_shape=(input_dim,))) model.add(keras.layers.Dense(output_dim)) x = np.random.random((num_samples, input_dim)) y = np.random.random((num_samples, output_dim)) parallel_model = keras.utils.multi_gpu_model(model, gpus=gpus) parallel_model.compile(loss='mse', optimizer='rmsprop') parallel_model.fit(x, y, epochs=epochs) parallel_model = keras.utils.multi_gpu_model(model, gpus=target_gpu_id) parallel_model.compile(loss='mse', optimizer='rmsprop') parallel_model.fit(x, y, epochs=epochs) def test_multi_gpu_test_multi_io_model(self): gpus = 2 num_samples = 1000 input_dim_a = 10 input_dim_b = 5 output_dim_a = 1 output_dim_b = 2 hidden_dim = 10 epochs = 2 target_gpu_id = [0, 1] if not check_if_compatible_devices(gpus=gpus): return with self.cached_session(): input_a = keras.Input((input_dim_a,)) input_b = keras.Input((input_dim_b,)) a = keras.layers.Dense(hidden_dim)(input_a) b = keras.layers.Dense(hidden_dim)(input_b) c = keras.layers.concatenate([a, b]) output_a = keras.layers.Dense(output_dim_a)(c) output_b = keras.layers.Dense(output_dim_b)(c) model = keras.models.Model([input_a, input_b], [output_a, output_b]) a_x = np.random.random((num_samples, input_dim_a)) b_x = np.random.random((num_samples, input_dim_b)) a_y = np.random.random((num_samples, output_dim_a)) b_y = np.random.random((num_samples, output_dim_b)) parallel_model = keras.utils.multi_gpu_model(model, gpus=gpus) parallel_model.compile(loss='mse', optimizer='rmsprop') parallel_model.fit([a_x, b_x], [a_y, b_y], epochs=epochs) parallel_model = keras.utils.multi_gpu_model(model, gpus=target_gpu_id) parallel_model.compile(loss='mse', optimizer='rmsprop') parallel_model.fit([a_x, b_x], [a_y, b_y], epochs=epochs) def test_multi_gpu_test_invalid_devices(self): if not check_if_compatible_devices(gpus=2): return with self.cached_session(): input_shape = (1000, 10) model = keras.models.Sequential() model.add(keras.layers.Dense(10, activation='relu', input_shape=input_shape[1:])) model.add(keras.layers.Dense(1, activation='sigmoid')) model.compile(loss='mse', optimizer='rmsprop') x = np.random.random(input_shape) y = np.random.random((input_shape[0], 1)) with self.assertRaises(ValueError): parallel_model = keras.utils.multi_gpu_model( model, gpus=len(keras.backend._get_available_gpus()) + 1) parallel_model.fit(x, y, epochs=2) with self.assertRaises(ValueError): parallel_model = keras.utils.multi_gpu_model( model, gpus=[0, 2, 4, 6, 8]) parallel_model.fit(x, y, epochs=2) with self.assertRaises(ValueError): parallel_model = keras.utils.multi_gpu_model(model, gpus=1) parallel_model.fit(x, y, epochs=2) with self.assertRaises(ValueError): parallel_model = keras.utils.multi_gpu_model(model, gpus=[0]) parallel_model.fit(x, y, epochs=2) def test_nested_model_with_tensor_input(self): gpus = 2 input_dim = 10 shape = (input_dim,) num_samples = 16 num_classes = 10 if not check_if_compatible_devices(gpus=gpus): return with self.cached_session(): input_shape = (num_samples,) + shape x_train = np.random.randint(0, 255, input_shape) y_train = np.random.randint(0, num_classes, (input_shape[0],)) y_train = keras.utils.to_categorical(y_train, num_classes) x_train = x_train.astype('float32') y_train = y_train.astype('float32') dataset = data.Dataset.from_tensor_slices((x_train, y_train)) dataset = dataset.repeat() dataset = dataset.batch(4) iterator = data.make_one_shot_iterator(dataset) inputs, targets = iterator.get_next() input_tensor = keras.layers.Input(tensor=inputs) model = keras.models.Sequential() model.add(keras.layers.Dense(3, input_shape=(input_dim,))) model.add(keras.layers.Dense(num_classes)) output = model(input_tensor) outer_model = keras.Model(input_tensor, output) parallel_model = keras.utils.multi_gpu_model(outer_model, gpus=gpus) parallel_model.compile( loss='categorical_crossentropy', optimizer=keras.optimizers.RMSprop(lr=0.0001, decay=1e-6), metrics=['accuracy'], target_tensors=[targets]) parallel_model.fit(epochs=1, steps_per_epoch=3) def test_multi_gpu_with_multi_input_layers(self): gpus = 2 if not check_if_compatible_devices(gpus=gpus): return with self.cached_session(): inputs = keras.Input((4, 3)) init_state = keras.Input((3,)) outputs = keras.layers.SimpleRNN( 3, return_sequences=True)(inputs, initial_state=init_state) x = [np.random.randn(2, 4, 3), np.random.randn(2, 3)] y = np.random.randn(2, 4, 3) model = keras.Model([inputs, init_state], outputs) parallel_model = keras.utils.multi_gpu_model(model, gpus=gpus) parallel_model.compile(loss='mean_squared_error', optimizer='adam') parallel_model.train_on_batch(x, y) def test_multi_gpu_with_siamese_network(self): gpus = 2 if not check_if_compatible_devices(gpus=gpus): return with self.cached_session(): input_shape = (3,) nested_model = keras.models.Sequential([ keras.layers.Dense(32, input_shape=input_shape), keras.layers.Dense(1) ], name='nested') input1 = keras.Input(input_shape) input2 = keras.Input(input_shape) score1 = nested_model(input1) score2 = nested_model(input2) score_sum = keras.layers.Add(name='add')([score1, score2]) siamese = keras.models.Model(inputs=[input1, input2], outputs=[score_sum, score1, score2], name='siamese') parallel_siamese = keras.utils.multi_gpu_model(siamese, gpus) self.assertEqual(parallel_siamese.output_names, ['add', 'nested', 'nested_1']) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/keras/utils/multi_gpu_utils_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Numpy-related utilities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.util.tf_export import keras_export @keras_export('keras.utils.to_categorical') def to_categorical(y, num_classes=None, dtype='float32'): """Converts a class vector (integers) to binary class matrix. E.g. for use with categorical_crossentropy. Arguments: y: class vector to be converted into a matrix (integers from 0 to num_classes). num_classes: total number of classes. dtype: The data type expected by the input. Default: `'float32'`. Returns: A binary matrix representation of the input. The classes axis is placed last. """ y = np.array(y, dtype='int') input_shape = y.shape if input_shape and input_shape[-1] == 1 and len(input_shape) > 1: input_shape = tuple(input_shape[:-1]) y = y.ravel() if not num_classes: num_classes = np.max(y) + 1 n = y.shape[0] categorical = np.zeros((n, num_classes), dtype=dtype) categorical[np.arange(n), y] = 1 output_shape = input_shape + (num_classes,) categorical = np.reshape(categorical, output_shape) return categorical @keras_export('keras.utils.normalize') def normalize(x, axis=-1, order=2): """Normalizes a Numpy array. Arguments: x: Numpy array to normalize. axis: axis along which to normalize. order: Normalization order (e.g. 2 for L2 norm). Returns: A normalized copy of the array. """ l2 = np.atleast_1d(np.linalg.norm(x, order, axis)) l2[l2 == 0] = 1 return x / np.expand_dims(l2, axis)
tensorflow-master
tensorflow/python/keras/utils/np_utils.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """TensorFlow-related utilities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import six from tensorflow.python.eager import context from tensorflow.python.framework import composite_tensor from tensorflow.python.framework import ops from tensorflow.python.framework import smart_cond as smart_module from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.keras import backend as K from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import variables from tensorflow.python.util import nest from tensorflow.python.util import tf_contextlib def smart_cond(pred, true_fn=None, false_fn=None, name=None): """Return either `true_fn()` if predicate `pred` is true else `false_fn()`. If `pred` is a bool or has a constant value, we return either `true_fn()` or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both. Arguments: pred: A scalar determining whether to return the result of `true_fn` or `false_fn`. true_fn: The callable to be performed if pred is true. false_fn: The callable to be performed if pred is false. name: Optional name prefix when using `tf.cond`. Returns: Tensors returned by the call to either `true_fn` or `false_fn`. Raises: TypeError: If `true_fn` or `false_fn` is not callable. """ if isinstance(pred, variables.Variable): return control_flow_ops.cond( pred, true_fn=true_fn, false_fn=false_fn, name=name) return smart_module.smart_cond( pred, true_fn=true_fn, false_fn=false_fn, name=name) def constant_value(pred): """Return the bool value for `pred`, or None if `pred` had a dynamic value. Arguments: pred: A scalar, either a Python bool or a TensorFlow boolean variable or tensor, or the Python integer 1 or 0. Returns: True or False if `pred` has a constant boolean value, None otherwise. Raises: TypeError: If `pred` is not a Variable, Tensor or bool, or Python integer 1 or 0. """ # Allow integer booleans. if isinstance(pred, int): if pred == 1: pred = True elif pred == 0: pred = False if isinstance(pred, variables.Variable): return None return smart_module.smart_constant_value(pred) def is_tensor_or_tensor_list(v): v = nest.flatten(v) if v and isinstance(v[0], ops.Tensor): return True else: return False def get_reachable_from_inputs(inputs, targets=None): """Returns the set of tensors/ops reachable from `inputs`. Stops if all targets have been found (target is optional). Only valid in Symbolic mode, not Eager mode. Args: inputs: List of tensors. targets: List of tensors. Returns: A set of tensors reachable from the inputs (includes the inputs themselves). """ inputs = nest.flatten(inputs) reachable = set(inputs) if targets and not isinstance(targets, set): targets = nest.flatten(targets) targets = set(targets) queue = inputs[:] while queue: x = queue.pop() if isinstance(x, tuple(_user_convertible_tensor_types)): # Can't find consumers of user-specific types. continue if isinstance(x, ops.Operation): outputs = x.outputs[:] or [] outputs += x._control_outputs # pylint: disable=protected-access elif isinstance(x, variables.Variable): try: outputs = [x.op] except AttributeError: # Variables can be created in an Eager context. outputs = [] elif tensor_util.is_tensor(x): outputs = x.consumers() else: raise TypeError('Expected Operation, Variable, or Tensor, got ' + str(x)) for y in outputs: if y not in reachable: reachable.add(y) queue.insert(0, y) if targets and targets.issubset(reachable): return reachable return reachable # This function needs access to private functions of `nest`. # pylint: disable=protected-access def map_structure_with_atomic(is_atomic_fn, map_fn, nested): """Maps the atomic elements of a nested structure. Arguments: is_atomic_fn: A function that determines if an element of `nested` is atomic. map_fn: The function to apply to atomic elements of `nested`. nested: A nested structure. Returns: The nested structure, with atomic elements mapped according to `map_fn`. Raises: ValueError: If an element that is neither atomic nor a sequence is encountered. """ if is_atomic_fn(nested): return map_fn(nested) # Recursively convert. if not nest.is_sequence(nested): raise ValueError( 'Received non-atomic and non-sequence element: {}'.format(nested)) if nest._is_mapping(nested): values = [nested[k] for k in nest._sorted(nested)] else: values = nested mapped_values = [ map_structure_with_atomic(is_atomic_fn, map_fn, ele) for ele in values ] return nest._sequence_like(nested, mapped_values) # pylint: enable=protected-access def convert_shapes(input_shape, to_tuples=True): """Converts nested shape representations to desired format. Performs: TensorShapes -> tuples if `to_tuples=True`. tuples of int or None -> TensorShapes if `to_tuples=False`. Valid objects to be converted are: - TensorShapes - tuples with elements of type int or None. - ints - None Arguments: input_shape: A nested structure of objects to be converted to TensorShapes. to_tuples: If `True`, converts all TensorShape to tuples. Otherwise converts all tuples representing shapes to TensorShapes. Returns: Nested structure of shapes in desired format. """ def _is_shape_component(value): return value is None or isinstance(value, (int, tensor_shape.Dimension)) def _is_atomic_shape(input_shape): # Ex: TensorShape or (None, 10, 32) or 5 or `None` if _is_shape_component(input_shape): return True if isinstance(input_shape, tensor_shape.TensorShape): return True if (isinstance(input_shape, (tuple, list)) and all(_is_shape_component(ele) for ele in input_shape)): return True return False def _convert_shape(input_shape): input_shape = tensor_shape.TensorShape(input_shape) if to_tuples: input_shape = tuple(input_shape.as_list()) return input_shape return map_structure_with_atomic(_is_atomic_shape, _convert_shape, input_shape) class ListWrapper(object): """A wrapper for lists to be treated as elements for `nest`.""" def __init__(self, list_to_wrap): self._list = list_to_wrap def as_list(self): return self._list def convert_inner_node_data(nested, wrap=False): """Either wraps or unwraps innermost node data lists in `ListWrapper` objects. Arguments: nested: A nested data structure. wrap: If `True`, wrap innermost lists in `ListWrapper` objects. If `False`, unwraps `ListWrapper` objects into lists. Returns: Structure of same type as nested, with lists wrapped/unwrapped. """ def _is_atomic_nested(nested): """Returns `True` if `nested` is a list representing node data.""" if isinstance(nested, ListWrapper): return True # Node data can be of form `[layer_name, node_id, tensor_id]` or # `[layer_name, node_id, tensor_id, kwargs]`. if (isinstance(nested, list) and (len(nested) in [3, 4]) and isinstance(nested[0], six.string_types)): return True return False def _convert_object_or_list(nested): """Convert b/t `ListWrapper` object and list representations.""" if wrap: if isinstance(nested, ListWrapper): return nested return ListWrapper(nested) else: if isinstance(nested, ListWrapper): return nested.as_list() return nested return map_structure_with_atomic(_is_atomic_nested, _convert_object_or_list, nested) def shape_type_conversion(fn): """Decorator that handles tuple/TensorShape conversion. Used in `compute_output_shape` and `build`. Arguments: fn: function to wrap. Returns: Wrapped function. """ def wrapper(instance, input_shape): # Pass shapes as tuples to `fn` # This preserves compatibility with external Keras. if input_shape is not None: input_shape = convert_shapes(input_shape, to_tuples=True) output_shape = fn(instance, input_shape) # Return shapes from `fn` as TensorShapes. if output_shape is not None: output_shape = convert_shapes(output_shape, to_tuples=False) return output_shape return wrapper def are_all_symbolic_tensors(tensors): return all(is_symbolic_tensor(tensor) for tensor in tensors) _user_convertible_tensor_types = set() def is_symbolic_tensor(tensor): """Returns whether a tensor is symbolic (from a TF graph) or an eager tensor. A Variable can be seen as either: it is considered symbolic when we are in a graph scope, and eager when we are in an eager scope. Arguments: tensor: A tensor instance to test. Returns: True for symbolic tensors, False for eager tensors. """ if isinstance(tensor, tuple(_user_convertible_tensor_types)): tensor = ops.convert_to_tensor_or_composite(tensor) if isinstance(tensor, variables.Variable): # Variables that are output of a Keras Layer in Functional API mode # should be considered symbolic. # TODO(omalleyt): We need a better way to check this in order to # enable `run_eagerly=True` for Models containing Layers that # return Variables as outputs. return (getattr(tensor, '_keras_history', False) or not context.executing_eagerly()) if isinstance(tensor, composite_tensor.CompositeTensor): return tensor._is_graph_tensor # pylint: disable=protected-access if isinstance(tensor, ops.Tensor): return hasattr(tensor, 'graph') return False def register_symbolic_tensor_type(cls): """Allows users to specify types regarded as symbolic `Tensor`s. Used in conjunction with `tf.register_tensor_conversion_function`, calling `tf.keras.utils.register_symbolic_tensor_type(cls)` allows non-`Tensor` objects to be plumbed through Keras layers. Example: ```python # One-time setup. class Foo(object): def __init__(self, input_): self._input = input_ def value(self): return tf.constant(42.) tf.register_tensor_conversion_function( Foo, lambda x, *args, **kwargs: x.value()) tf.keras.utils.register_symbolic_tensor_type(Foo) # User-land. layer = tf.keras.layers.Lambda(lambda input_: Foo(input_)) ``` Arguments: cls: A `class` type which shall be regarded as a symbolic `Tensor`. """ global _user_convertible_tensor_types _user_convertible_tensor_types.add(cls) def is_tensor_or_variable(x): return tensor_util.is_tensor(x) or isinstance(x, variables.Variable) def assert_no_legacy_layers(layers): """Prevent tf.layers.Layers from being used with Keras. Certain legacy layers inherit from their keras analogs; however they are not supported with keras and can lead to subtle and hard to diagnose bugs. Args: layers: A list of layers to check Raises: TypeError: If any elements of layers are tf.layers.Layers """ # isinstance check for tf.layers.Layer introduces a circular dependency. legacy_layers = [l for l in layers if getattr(l, '_is_legacy_layer', None)] if legacy_layers: layer_str = '\n'.join([' ' + str(l) for l in legacy_layers]) raise TypeError( 'The following are legacy tf.layers.Layers:\n{}\nTo use keras as a ' 'framework (for instance using the Network, Model, or Sequential ' 'classes), please use the tf.keras.layers implementation instead. ' '(Or, if writing custom layers, subclass from tf.keras.layers rather ' 'than tf.layers)'.format(layer_str)) @tf_contextlib.contextmanager def maybe_init_scope(layer): """Open an `init_scope` if in V2 mode and using the keras graph. Arguments: layer: The Layer/Model that is currently active. Yields: None """ # Don't open an init_scope in V1 mode or when using legacy tf.layers. if (ops.executing_eagerly_outside_functions() and getattr(layer, '_keras_style', True)): with ops.init_scope(): yield else: yield @tf_contextlib.contextmanager def graph_context_for_symbolic_tensors(*args, **kwargs): """Returns graph context manager if any of the inputs is a symbolic tensor.""" if any(is_symbolic_tensor(v) for v in list(args) + list(kwargs.values())): with K.get_graph().as_default(): yield else: yield
tensorflow-master
tensorflow/python/keras/utils/tf_utils.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests of `multi_worker_training_state.py` utilities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys from absl.testing import parameterized from tensorflow.python.distribute import combinations from tensorflow.python.distribute import multi_worker_test_base as test_base from tensorflow.python.keras import callbacks from tensorflow.python.keras.distribute import multi_worker_testing_utils from tensorflow.python.keras.distribute import multi_worker_training_state as training_state from tensorflow.python.platform import test class MultiWorkerTrainingStateTest(test_base.IndependentWorkerTestBase, parameterized.TestCase): @combinations.generate( combinations.combine( mode=['graph'], required_gpus=[0, 1], file_format=['h5', 'tf'], save_weights_only=[True, False])) def testCheckpointExists(self, file_format, save_weights_only): train_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset(64, 2) model = multi_worker_testing_utils.get_mnist_model((28, 28, 1)) saving_dir = self.get_temp_dir() saving_filepath = os.path.join(saving_dir, 'checkpoint.' + file_format) callbacks_list = [ callbacks.ModelCheckpoint( filepath=saving_filepath, save_weights_only=save_weights_only) ] self.assertFalse(training_state.checkpoint_exists(saving_filepath)) model.fit(x=train_ds, epochs=2, steps_per_epoch=2, callbacks=callbacks_list) self.assertTrue(training_state.checkpoint_exists(saving_filepath)) self.assertTrue( training_state.remove_checkpoint_if_exists(saving_dir, saving_filepath)) self.assertFalse(training_state.checkpoint_exists(saving_filepath)) if __name__ == '__main__': with test.mock.patch.object(sys, 'exit', os._exit): test.main()
tensorflow-master
tensorflow/python/keras/distribute/multi_worker_training_state_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test multi-worker Keras.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import functools import os import sys import threading from absl.testing import parameterized # pylint: disable=g-direct-tensorflow-import from tensorflow.python import keras from tensorflow.python.distribute import collective_all_reduce_strategy as collective_strategy from tensorflow.python.distribute import combinations from tensorflow.python.distribute import distribute_coordinator as dc from tensorflow.python.distribute import distribute_lib from tensorflow.python.distribute import multi_worker_test_base as test_base from tensorflow.python.distribute import multi_worker_util from tensorflow.python.distribute import parameter_server_strategy from tensorflow.python.distribute.cluster_resolver import TFConfigClusterResolver from tensorflow.python.framework import ops from tensorflow.python.keras import backend from tensorflow.python.keras import callbacks from tensorflow.python.keras import metrics as metrics_module from tensorflow.python.keras import models from tensorflow.python.keras import optimizers from tensorflow.python.keras.distribute import multi_worker_testing_utils from tensorflow.python.platform import test from tensorflow.python.util import nest # TODO(b/130375202): remove this class which is a temporary solution before we # get rid of configure method. class ParameterServerStrategy(distribute_lib.Strategy): """Temporarily mock the original strategy to bypass cluster_spec check.""" def __init__(self, cluster_resolver=None): """Initializes this strategy.""" # The `cluster_resolver` must be set so that # `ParameterServerStrategyExtended` will keep num_gpus for `configure` # method. if cluster_resolver is None: cluster_resolver = TFConfigClusterResolver() extended = parameter_server_strategy.ParameterServerStrategyExtended( self, cluster_resolver=cluster_resolver) super(ParameterServerStrategy, self).__init__(extended) def _clone_and_build_model(model, strategy): # The new "original" model in worker 0. with strategy.scope(): cloned_model = models.clone_model(model) # Compile and build model. if isinstance(model.optimizer, optimizers.TFOptimizer): optimizer = model.optimizer # TODO(yuefengz): figure out why the optimizer here is still a # TFOptimizer. while isinstance(optimizer, optimizers.TFOptimizer): optimizer = optimizer.optimizer optimizer = copy.deepcopy(optimizer) else: optimizer_config = model.optimizer.get_config() optimizer = type(model.optimizer).from_config(optimizer_config) cloned_model.compile( optimizer, model.loss, metrics=metrics_module.clone_metrics(model._compile_metrics), loss_weights=model.loss_weights, sample_weight_mode=model.sample_weight_mode, weighted_metrics=metrics_module.clone_metrics( model._compile_weighted_metrics)) return cloned_model # TODO(b/123918215): Possibly merge this Callback with keras_test.Counter. class MultiWorkerVerificationCallback(callbacks.Callback): """MultiWorkerVerificationCallback verifies the callbacks in multi-worker scheme. This Callback is intended to be used for verifying the callback is indeed called the correct number of times in various task types. Attributes: _task_dict: A nested dictionary storing the number of times a callback has been called in specific task type, task index, and method name. Look up structure is task_name -> task_id -> tracking_method_name -> invoke_count For example, a _task_dict of { 'ps': { 0: { 'on_epoch_begin': 2 }, 1: { 'on_epoch_begin': 2 } }, 'worker': { 0: { 'on_epoch_begin': 2 }, 1: { 'on_epoch_begin': 2 } } } indicates the ps task has 'on_epoch_begin' called twice on each of the two indices, and likewise for worker task. """ # TODO(rchao): Add other method calls to verify. METHODS_TO_VERIFY = ['on_epoch_begin'] def __init__(self, num_epoch, num_worker): """Initialize a MultiWorkerVerificationCallback. Args: num_epoch: Number of epochs this Callback is expected to be called for. num_worker: Number of workers this Callback is expected to be called from. """ super(MultiWorkerVerificationCallback, self).__init__() self._num_epoch = num_epoch self._num_worker = num_worker self._task_dict = { key: collections.defaultdict(lambda: collections.defaultdict(int)) for key in ['ps', 'worker'] } self._lock = threading.Lock() self._is_between_graph = None self.wrap_methods(self.METHODS_TO_VERIFY) @property def is_between_graph(self): return self._is_between_graph @is_between_graph.setter def is_between_graph(self, is_between_graph): self._is_between_graph = is_between_graph def wrap_methods(self, method_names): """Wrap methods so that the counts of calls are tracked. Args: method_names: A list of names of methods to track calls. """ for method_name in method_names: method = getattr(self, method_name) def wrapped_method(method_to_wrap, name, *arg, **kwargs): # Use lock to ensure += operation is thread-safe. with self._lock: self._task_dict[test_base.get_task_type()][ test_base.get_task_index()][name] += 1 method_to_wrap(*arg, **kwargs) setattr(self, method_name, functools.partial(wrapped_method, method, method_name)) def verify(self, test_case): method_count_dict = { method_name: self._num_epoch for method_name in self.METHODS_TO_VERIFY } assert self._is_between_graph is not None if self._is_between_graph: # TODO(b/124171024): In between-graph replication, by default only the # chief calls callback. Fix this test to cover that, as well as the rare # cases where all workers call. worker_call_count = { i: method_count_dict for i in range(0, self._num_worker) } else: # If in-graph, only the first worker calls callback methods. worker_call_count = {0: method_count_dict} test_case.assertDictEqual( self._task_dict, { # PS' callback is not supposed to be called. 'ps': {}, # Each of the Worker should be called num_epoch of times. 'worker': worker_call_count }) # TODO(yuefengz): right now, fit or evaluate has to be called under distribution # strategy's scope. def _run_standalone_client(test_obj, strategy, cluster_spec): input_shape = (28, 28, 1) with strategy.scope(): orig_model = multi_worker_testing_utils.get_mnist_model(input_shape) def worker_fn(strategy): with ops.Graph().as_default(): batch_size = 64 steps = 2 with strategy.scope(): train_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset( batch_size, steps) model = _clone_and_build_model(orig_model, strategy) orig_loss, orig_acc = model.evaluate(train_ds, steps=steps) # Workaround for the metrics issue (b/122928955) in async training. This # can only be used in standalone client mode. multi_worker_util.wait_for_other_workers() model.fit(x=train_ds, epochs=2, steps_per_epoch=steps) multi_worker_util.wait_for_other_workers() trained_loss, trained_acc = model.evaluate(train_ds, steps=steps) test_obj.assertLessEqual(trained_loss, orig_loss) test_obj.assertGreaterEqual(trained_acc, orig_acc) dc.run_distribute_coordinator( worker_fn, strategy, mode=dc.CoordinatorMode.STANDALONE_CLIENT, cluster_spec=cluster_spec) class KerasMultiWorkerTestStandaloneClient(test.TestCase, parameterized.TestCase): @classmethod def setUpClass(cls): """Create a local cluster with 2 workers.""" super(KerasMultiWorkerTestStandaloneClient, cls).setUpClass() cls._cluster_spec = test_base.create_in_process_cluster( num_workers=2, num_ps=1, has_eval=False) @combinations.generate( combinations.combine( mode=['graph'], strategy_cls=[ ParameterServerStrategy, collective_strategy.CollectiveAllReduceStrategy, ], required_gpus=[0, 1])) def testSimpleModelStandaloneClient(self, strategy_cls): # With standalone client, training_utils.should_run_multi_worker returns # False which means the distribute coordinator won't be called again in # `fit`. This is still correct and intended since session is still # configured under distribute coordinator's worker context and distribution # strategy object is already configured by distribute coordinator for # multi-worker training. # The logic should be much clearer once standalone client is merged into # core Keras as well. strategy = strategy_cls() _run_standalone_client(self, strategy, self._cluster_spec) class KerasMultiWorkerTestIndependentWorker(test_base.IndependentWorkerTestBase, parameterized.TestCase): @combinations.generate( combinations.combine( mode=['graph'], strategy_cls=[ collective_strategy.CollectiveAllReduceStrategy, ], required_gpus=[0, 1])) def testSimpleModelIndependentWorkerSync(self, strategy_cls): num_workers = 2 num_epoch = 2 cluster_spec = test_base.create_cluster_spec( num_workers=num_workers, test_obj=self) self._barrier = dc._Barrier(2) # The verification callback will be shared by multiple threads. verification_callback = MultiWorkerVerificationCallback( num_epoch=num_epoch, num_worker=num_workers) def _independent_worker_fn(*args, **kwargs): # pylint: disable=unused-argument """Simulates an Independent Worker inside of a thread.""" with test.mock.patch.object(dc, '_run_std_server', self._make_mock_run_std_server()): strategy = strategy_cls() verification_callback.is_between_graph = \ strategy.extended.experimental_between_graph batch_size = 64 steps = 2 train_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset( batch_size, steps) with strategy.scope(): model = multi_worker_testing_utils.get_mnist_model((28, 28, 1)) orig_loss, _ = model.evaluate(train_ds, steps=steps) callbacks_for_fit = nest.flatten( kwargs.get('verification_callback', [])) history = model.fit( x=train_ds, epochs=num_epoch, steps_per_epoch=steps, callbacks=callbacks_for_fit) self.assertIsInstance(history, keras.callbacks.History) trained_loss, _ = model.evaluate(train_ds, steps=steps) self.assertLess(trained_loss, orig_loss) threads = self.run_multiple_tasks_in_threads( _independent_worker_fn, cluster_spec, verification_callback=verification_callback) threads_to_join = [] strategy = strategy_cls() if strategy.extended.experimental_between_graph: for ts in threads.values(): threads_to_join.extend(ts) else: threads_to_join = [threads['worker'][0]] self.join_independent_workers(threads_to_join) verification_callback.verify(self) @combinations.generate( combinations.combine( mode=['graph'], strategy_cls=[ParameterServerStrategy], required_gpus=[0, 1])) def testSimpleModelIndependentWorkerAsync(self, strategy_cls): num_workers = 2 num_epoch = 2 cluster_spec = test_base.create_cluster_spec( num_workers=num_workers, num_ps=2, test_obj=self) self._barrier = dc._Barrier(4) # The verification callback will be shared by multiple threads. verification_callback = MultiWorkerVerificationCallback( num_epoch=num_epoch, num_worker=num_workers) def _independent_worker_fn(*args, **kwargs): # pylint: disable=unused-argument """Simulates an Independent Worker inside of a thread.""" # TODO(rchao/yuefengz): The following is run by both worker and ps # threads. The distribute coordinator should run std server immediately # without configuring the session (or building the graph) on PS. with test.mock.patch.object(dc, '_run_std_server', self._make_mock_run_std_server()): batch_size = 64 steps = 2 strategy = strategy_cls() verification_callback.is_between_graph = \ strategy.extended.experimental_between_graph train_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset( batch_size, steps) val_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset( batch_size, steps) with strategy.scope(): model = multi_worker_testing_utils.get_mnist_model((28, 28, 1)) # TODO(b/123868066): Verify callback for model.evaluate(). callbacks_for_fit = nest.flatten( kwargs.get('verification_callback', [])) history = model.fit( x=train_ds, epochs=num_epoch, steps_per_epoch=steps, validation_data=val_ds, validation_steps=steps, callbacks=callbacks_for_fit) self.assertIsInstance(history, keras.callbacks.History) threads = self.run_multiple_tasks_in_threads( _independent_worker_fn, cluster_spec, verification_callback=verification_callback) threads_to_join = [] for task_type, ts in threads.items(): # This test can finish once the worker threads complete, and thus # the ps threads don't need to be joined. if task_type == 'ps': continue threads_to_join.extend(ts) self.join_independent_workers(threads_to_join) verification_callback.verify(self) if __name__ == '__main__': # Enable manual variable initialization to make sure variables are initialized # by `init_restore_or_wait_for_variables`. backend.manual_variable_initialization(True) with test.mock.patch.object(sys, 'exit', os._exit): test.main()
tensorflow-master
tensorflow/python/keras/distribute/multi_worker_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Training state management in multi-worker distributed training.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib import os import tempfile from tensorflow.python.distribute import multi_worker_util from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.keras import backend as K from tensorflow.python.keras.utils import mode_keys from tensorflow.python.lib.io import file_io from tensorflow.python.ops import variables from tensorflow.python.training.tracking import tracking # Constant for `tf.keras.Model` attribute to store the epoch at which the most # recently saved checkpoint was saved. CKPT_SAVED_EPOCH = '_ckpt_saved_epoch' CKPT_SAVED_EPOCH_UNUSED_VALUE = -1 def checkpoint_exists(filepath): """Returns whether the checkpoint `filepath` refers to exists.""" if filepath.endswith('.h5'): return file_io.file_exists(filepath) tf_saved_model_exists = file_io.file_exists(filepath) tf_weights_only_checkpoint_exists = file_io.file_exists(filepath + '.index') return tf_saved_model_exists or tf_weights_only_checkpoint_exists def remove_checkpoint_if_exists(ckpt_dir, filepath): """Removes the checkpoint if it exists and returns whether it has removed.""" if checkpoint_exists(filepath): _remove_dir(ckpt_dir) return True return False def _remove_dir(dir_to_remove): file_io.delete_recursively(dir_to_remove) class MultiWorkerTrainingState(object): """Training state management class in multi-worker distributed training. In multi-worker training, model weights and epoch information are saved periodically for fault-tolerance, also known as preemption-recovery purpose. This class provides apis for backing up and restoring the training state. """ def __init__(self, model, original_filepath): self._model = model # The directory and filepath that store the training state backup file. self._backup_dir, self._backup_filepath = self._get_backup_filepath( original_filepath) # For those who should not checkpoint (e.g. non-chief worker in sync # training), create a temporary directory to write to (that will be # removed later). if not multi_worker_util.should_save_checkpoint(): self._temp_dir, self._temp_filepath = self._get_temp_filepath() # The epoch at which the checkpoint is saved. Used for fault-tolerance. # GPU device only has int64 dtype registered VarHandleOp. self._ckpt_saved_epoch = variables.Variable( initial_value=constant_op.constant( CKPT_SAVED_EPOCH_UNUSED_VALUE, dtype=dtypes.int64), name='ckpt_saved_epoch') # Variable initialization. K.set_value(self._ckpt_saved_epoch, CKPT_SAVED_EPOCH_UNUSED_VALUE) # Calling `AutoTrackable.__setattr__` to avoid getting added as a weight of # model (which is done in `Layer.__setattr__`), which breaks saving/loading # in hdf5 format. Once becomes an attr of `model`, _ckpt_saved_epoch gets # tracked and will be included in the checkpoint file when backing up. tracking.AutoTrackable.__setattr__(self._model, CKPT_SAVED_EPOCH, self._ckpt_saved_epoch) def back_up(self, epoch): """Back up the current state of training into a checkpoint file. Arguments: epoch: The current epoch information to be saved. """ # pylint: disable=protected-access self._assert_in_multi_worker_mode() # Update `_ckpt_saved_epoch`. K.set_value(self._ckpt_saved_epoch, epoch) # If this is multi-worker training, and this worker should not # save checkpoint, we replace the filepath with a dummy filepath so # it writes to a file that will be removed at the end of _save_model() # call. This is because the SyncOnReadVariable needs to be synced across # all the workers in order to be read, and all workers need to initiate # that. if multi_worker_util.should_save_checkpoint(): save_filepath = self._backup_filepath else: save_filepath = self._temp_filepath # Save the weights plus CKPT_SAVED_EPOCH variable. self._model.save_weights(save_filepath, overwrite=True) if not multi_worker_util.should_save_checkpoint(): # Remove the file in multi-worker training where this worker should # not checkpoint. It is a dummy file previously saved for sync distributed # training. _remove_dir(self._temp_dir) def restore(self): """Restore the training state from the backed up checkpoint file. Returns: True if the training state is successfully restored. False if the training state doesn't need to be restored, or error occurred so it can't. """ self._assert_in_multi_worker_mode() if not multi_worker_util.should_load_checkpoint(): # For multi-worker training, it should not restore a model in certain # worker setting (e.g. non-chief worker in ParameterServerStrategy). return False if file_io.file_exists(self._backup_dir): try: # Load the weights plus CKPT_SAVED_EPOCH variable. self._model.load_weights(self._backup_filepath) return True except (IOError, ValueError) as e: raise ValueError('Error loading file from {}. Reason: {}'.format( self._backup_filepath, e)) return False def delete_backup(self): """Delete the backup directories. Delete the backup directories which should not exist after `fit()` successfully finishes. """ self._assert_in_multi_worker_mode() tracking.AutoTrackable.__delattr__(self._model, CKPT_SAVED_EPOCH) if multi_worker_util.should_save_checkpoint(): _remove_dir(self._backup_dir) else: assert not file_io.file_exists(self._temp_dir) def maybe_load_initial_epoch_from_ckpt(self, initial_epoch, mode): """Maybe load initial epoch from ckpt considering possible worker recovery. When `_ckpt_saved_epoch` attribute exists and is not `CKPT_SAVED_EPOCH_UNUSED_VALUE`, this is under multi-worker training setting and indicates the worker is recovering from previous failure. In this case, infer `initial_epoch` from `self._ckpt_saved_epoch` to continue previous unfinished training from certain epoch. Arguments: initial_epoch: The original initial_epoch user passes in in `fit()`. mode: The mode for running `model.fit()`. Returns: If the training is recovering from previous failure under multi-worker training setting, return the epoch the training is supposed to continue at. Otherwise, return the `initial_epoch` the user passes in. """ self._assert_in_multi_worker_mode() # TODO(rchao): Add recovery for validation case # (when mode == ModeKeys.TEST). epoch = K.eval(self._ckpt_saved_epoch) if mode == mode_keys.ModeKeys.TRAIN and epoch >= 0: # The most recently saved epoch is one epoch prior to the epoch it # failed at, so return the value of 'self._ckpt_saved_epoch' plus one. return epoch + 1 return initial_epoch @contextlib.contextmanager def untrack_vars(self): """Provides a scope within which training state variables are untracked. Regular checkpoint file saved by `ModelCheckpoint` callback that the user requests should not contain training state variables such as `CKPT_SAVED_EPOCH`, or the epoch the checkpoint is most recently saved at. Yields: None. """ tracking.AutoTrackable.__delattr__(self._model, CKPT_SAVED_EPOCH) yield tracking.AutoTrackable.__setattr__(self._model, CKPT_SAVED_EPOCH, self._ckpt_saved_epoch) def _get_backup_filepath(self, original_filepath): backup_dir = os.path.join(os.path.dirname(original_filepath), 'backup') return backup_dir, os.path.join(backup_dir, 'training_state') def _get_temp_filepath(self): temp_dir = tempfile.mkdtemp() return temp_dir, os.path.join(temp_dir, 'temp_training_state') def _assert_in_multi_worker_mode(self): if not multi_worker_util.in_multi_worker_mode(): raise ValueError('MultiWorkerTrainingState is only supposed to be used ' 'in multi-worker training. This indicates some error ' 'that needs to be fixed. Please submit a bug issue to ' 'tf.keras team.')
tensorflow-master
tensorflow/python/keras/distribute/multi_worker_training_state.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Correctness tests for tf.keras DNN model using DistributionStrategy.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python import keras from tensorflow.python.data.ops import dataset_ops from tensorflow.python.distribute import combinations from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.eager import context from tensorflow.python.eager import test from tensorflow.python.keras import backend as K from tensorflow.python.keras.distribute import keras_correctness_test_base from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras from tensorflow.python.training import gradient_descent def all_strategy_combinations_with_eager_and_graph_modes(): return (combinations.combine( distribution=keras_correctness_test_base.all_strategies, mode=['graph', 'eager'], cloning=[True, False])) def all_strategy_combinations_with_graph_mode(): return (combinations.combine( distribution=keras_correctness_test_base.all_strategies, mode=['graph'], cloning=[True, False])) def is_default_strategy(strategy): with strategy.scope(): return not distribution_strategy_context.has_strategy() class TestDistributionStrategyDnnCorrectness( keras_correctness_test_base.TestDistributionStrategyCorrectnessBase): def get_model(self, cloning, initial_weights=None, distribution=None, input_shapes=None): with keras_correctness_test_base.MaybeDistributionScope(distribution): # We add few non-linear layers to make it non-trivial. model = keras.Sequential() model.add(keras.layers.Dense(10, activation='relu', input_shape=(1,))) model.add(keras.layers.Dense( 10, activation='relu', kernel_regularizer=keras.regularizers.l2(1e-4))) model.add(keras.layers.Dense(10, activation='relu')) model.add(keras.layers.Dense(1)) if initial_weights: model.set_weights(initial_weights) model.compile( loss=keras.losses.mean_squared_error, optimizer=gradient_descent_keras.SGD(0.05), metrics=['mse'], cloning=cloning) return model def get_data(self): x_train = np.random.rand(9984, 1).astype('float32') y_train = 3 * x_train x_predict = np.array([[1.], [2.], [3.], [4.]], dtype=np.float32) return x_train, y_train, x_predict def get_data_with_partial_last_batch(self): x_train = np.random.rand(10000, 1).astype('float32') y_train = 3 * x_train x_eval = np.random.rand(10000, 1).astype('float32') y_eval = 3 * x_eval x_predict = np.array([[1.], [2.], [3.], [4.]], dtype=np.float32) return x_train, y_train, x_eval, y_eval, x_predict def get_data_with_partial_last_batch_eval(self): x_train = np.random.rand(9984, 1).astype('float32') y_train = 3 * x_train x_eval = np.random.rand(10000, 1).astype('float32') y_eval = 3 * x_eval x_predict = np.array([[1.], [2.], [3.], [4.]], dtype=np.float32) return x_train, y_train, x_eval, y_eval, x_predict @combinations.generate(keras_correctness_test_base. all_strategy_and_input_config_combinations()) def test_dnn_correctness(self, distribution, use_numpy, use_validation_data, cloning): self.run_correctness_test(distribution, use_numpy, use_validation_data, cloning) @combinations.generate( keras_correctness_test_base.test_combinations_with_tpu_strategies()) def test_dnn_correctness_with_partial_last_batch_eval(self, distribution, use_numpy, use_validation_data): self.run_correctness_test( distribution, use_numpy, use_validation_data, partial_last_batch='eval') @combinations.generate( keras_correctness_test_base .strategy_minus_tpu_and_input_config_combinations_eager()) def test_dnn_correctness_with_partial_last_batch( self, distribution, use_numpy, use_validation_data): distribution.extended.experimental_enable_get_next_as_optional = True self.run_correctness_test( distribution, use_numpy, use_validation_data, partial_last_batch='train_and_eval', training_epochs=1) @combinations.generate(all_strategy_combinations_with_graph_mode()) def test_dnn_with_dynamic_learning_rate(self, distribution, cloning): self.run_dynamic_lr_test(distribution, cloning) class TestDistributionStrategyDnnMetricCorrectness( keras_correctness_test_base.TestDistributionStrategyCorrectnessBase): def get_model(self, cloning, distribution=None, input_shapes=None): with distribution.scope(): model = keras.Sequential() model.add(keras.layers.Dense(1, input_shape=(1,), kernel_initializer='ones')) model.compile( loss=keras.losses.mean_squared_error, # TODO(b/130808953): Switch back to the V1 optimizer after # global_step is made mirrored. optimizer=gradient_descent_keras.SGD(0.05), metrics=[keras.metrics.BinaryAccuracy()], cloning=cloning) return model def run_metric_correctness_test(self, distribution, cloning): with self.cached_session(): self.set_up_test_config() self.skip_unsupported_test_configuration(distribution, cloning) x_train, y_train, _ = self.get_data() model = self.get_model(cloning, distribution=distribution) batch_size = 64 batch_size = (keras_correctness_test_base. get_batch_size(batch_size, distribution)) train_dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train)) train_dataset = (keras_correctness_test_base. batch_wrapper(train_dataset, batch_size)) history = model.fit(x=train_dataset, epochs=2, steps_per_epoch=10) self.assertEqual(history.history['binary_accuracy'], [1.0, 1.0]) @combinations.generate(all_strategy_combinations_with_eager_and_graph_modes()) def test_simple_dnn_metric_correctness(self, distribution, cloning): self.run_metric_correctness_test(distribution, cloning) class TestDistributionStrategyDnnMetricEvalCorrectness( keras_correctness_test_base.TestDistributionStrategyCorrectnessBase): def get_model(self, cloning, distribution=None, input_shapes=None): with distribution.scope(): model = keras.Sequential() model.add( keras.layers.Dense( 3, activation='relu', input_dim=4, kernel_initializer='ones')) model.add( keras.layers.Dense( 1, activation='sigmoid', kernel_initializer='ones')) model.compile( loss='mae', metrics=['accuracy', keras.metrics.BinaryAccuracy()], optimizer=gradient_descent.GradientDescentOptimizer(0.001), cloning=cloning) return model def run_eval_metrics_correctness_test(self, distribution, cloning): with self.cached_session(): self.set_up_test_config() self.skip_unsupported_test_configuration(distribution, cloning) model = self.get_model(cloning, distribution=distribution) # verify correctness of stateful and stateless metrics. x = np.ones((100, 4)).astype('float32') y = np.ones((100, 1)).astype('float32') dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).repeat() dataset = keras_correctness_test_base.batch_wrapper(dataset, 4) outs = model.evaluate(dataset, steps=10) self.assertEqual(outs[1], 1.) self.assertEqual(outs[2], 1.) y = np.zeros((100, 1)).astype('float32') dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).repeat() dataset = keras_correctness_test_base.batch_wrapper(dataset, 4) outs = model.evaluate(dataset, steps=10) self.assertEqual(outs[1], 0.) self.assertEqual(outs[2], 0.) @combinations.generate(all_strategy_combinations_with_eager_and_graph_modes()) def test_identity_model_metric_eval_correctness(self, distribution, cloning): self.run_eval_metrics_correctness_test(distribution, cloning) class SubclassedModel(keras.Model): def __init__(self, initial_weights, input_shapes): super(SubclassedModel, self).__init__() self.dense1 = keras.layers.Dense(10, activation='relu', input_shape=(1,)) self.dense2 = keras.layers.Dense( 10, activation='relu', kernel_regularizer=keras.regularizers.l2(1e-4)) self.dense3 = keras.layers.Dense(10, activation='relu') self.dense4 = keras.layers.Dense(1) if input_shapes: self.build(input_shapes) else: # This covers cases when the input is DatasetV1Adapter. self.build((None, 1)) if initial_weights: self.set_weights(initial_weights) def call(self, inputs): x = self.dense1(inputs) x = self.dense2(x) x = self.dense3(x) return self.dense4(x) class TestDistributionStrategyDnnCorrectnessWithSubclassedModel( TestDistributionStrategyDnnCorrectness): def get_model(self, cloning, initial_weights=None, distribution=None, input_shapes=None): with keras_correctness_test_base.MaybeDistributionScope(distribution): model = SubclassedModel(initial_weights, input_shapes) model.compile( loss=keras.losses.mean_squared_error, optimizer=gradient_descent_keras.SGD(0.05), metrics=['mse'], cloning=cloning) return model @combinations.generate( keras_correctness_test_base.all_strategy_and_input_config_combinations()) def test_dnn_correctness(self, distribution, use_numpy, use_validation_data, cloning): if ((not cloning and context.executing_eagerly()) or is_default_strategy(distribution)): self.run_correctness_test(distribution, use_numpy, use_validation_data, cloning) elif K.is_tpu_strategy(distribution) and not context.executing_eagerly(): with self.assertRaisesRegexp( ValueError, 'Expected `model` argument to be a functional `Model` instance, ' 'but got a subclass model instead.'): self.run_correctness_test(distribution, use_numpy, use_validation_data, cloning) else: with self.assertRaisesRegexp( ValueError, 'We currently do not support distribution strategy with a ' '`Sequential` model that is created without `input_shape`/' '`input_dim` set in its first layer or a subclassed model.'): self.run_correctness_test(distribution, use_numpy, use_validation_data, cloning) @combinations.generate(all_strategy_combinations_with_graph_mode()) def test_dnn_with_dynamic_learning_rate(self, distribution, cloning): if ((not cloning and context.executing_eagerly() and not K.is_tpu_strategy(distribution)) or is_default_strategy(distribution)): self.run_dynamic_lr_test(distribution, cloning) elif K.is_tpu_strategy(distribution): with self.assertRaisesRegexp( ValueError, 'Expected `model` argument to be a functional `Model` instance, ' 'but got a subclass model instead.'): self.run_dynamic_lr_test(distribution, cloning) else: with self.assertRaisesRegexp( ValueError, 'We currently do not support distribution strategy with a ' '`Sequential` model that is created without `input_shape`/' '`input_dim` set in its first layer or a subclassed model.'): self.run_dynamic_lr_test(distribution, cloning) @combinations.generate( keras_correctness_test_base.test_combinations_with_tpu_strategies()) def test_dnn_correctness_with_partial_last_batch_eval(self, distribution, use_numpy, use_validation_data): with self.assertRaisesRegexp( ValueError, 'Expected `model` argument to be a functional `Model` instance, ' 'but got a subclass model instead.'): self.run_correctness_test( distribution, use_numpy, use_validation_data, partial_last_batch='eval') if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/keras/distribute/keras_dnn_correctness_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for stateful tf.keras LSTM models using DistributionStrategy.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python import keras from tensorflow.python.distribute import combinations from tensorflow.python.distribute import strategy_combinations from tensorflow.python.eager import test from tensorflow.python.keras.distribute import keras_correctness_test_base from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras from tensorflow.python.training import gradient_descent def strategies_for_stateful_embedding_model(): """Returns TPUStrategy with single core device assignment.""" return [ strategy_combinations.tpu_strategy_one_core, strategy_combinations.tpu_strategy_one_step_one_core ] def test_combinations_for_stateful_embedding_model(): return ( combinations.combine( distribution=strategies_for_stateful_embedding_model(), mode='graph', use_numpy=False, use_validation_data=False, cloning=[True, False] )) class DistributionStrategyStatefulLstmModelCorrectnessTest( keras_correctness_test_base. TestDistributionStrategyEmbeddingModelCorrectnessBase): def get_model(self, max_words=10, initial_weights=None, distribution=None, cloning=None, input_shapes=None): del input_shapes batch_size = keras_correctness_test_base._GLOBAL_BATCH_SIZE with keras_correctness_test_base.MaybeDistributionScope(distribution): word_ids = keras.layers.Input( shape=(max_words,), batch_size=batch_size, dtype=np.int32, name='words') word_embed = keras.layers.Embedding(input_dim=20, output_dim=10)(word_ids) lstm_embed = keras.layers.LSTM(units=4, return_sequences=False, stateful=True)(word_embed) preds = keras.layers.Dense(2, activation='softmax')(lstm_embed) model = keras.Model(inputs=[word_ids], outputs=[preds]) if initial_weights: model.set_weights(initial_weights) # TODO(b/130808953): Re-enable the V1 optimizer after iterations # is mirrored. optimizer_fn = ( gradient_descent.GradientDescentOptimizer if cloning else gradient_descent_keras.SGD) model.compile( optimizer=optimizer_fn(learning_rate=0.1), loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy']) return model @combinations.generate(test_combinations_for_stateful_embedding_model()) def test_stateful_lstm_model_correctness(self, distribution, use_numpy, use_validation_data, cloning): self.run_correctness_test(distribution, use_numpy, use_validation_data, is_stateful_model=True, cloning=cloning) @combinations.generate( combinations.times( keras_correctness_test_base.test_combinations_with_tpu_strategies(), combinations.combine(cloning=[True, False]))) def test_incorrectly_use_multiple_cores_for_stateful_lstm_model( self, distribution, use_numpy, use_validation_data, cloning): with self.assertRaisesRegexp( ValueError, 'Single core must be used for computation on stateful models. Consider ' 'adding `device_assignment` parameter to TPUStrategy'): self.run_correctness_test( distribution, use_numpy, use_validation_data, is_stateful_model=True, cloning=cloning) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/keras/distribute/keras_stateful_lstm_model_correctness_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Correctness tests for tf.keras using DistributionStrategy.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools from absl.testing import parameterized import numpy as np import six from tensorflow.python import keras from tensorflow.python.data.ops import dataset_ops from tensorflow.python.distribute import combinations from tensorflow.python.distribute import distribute_lib from tensorflow.python.distribute import mirrored_strategy from tensorflow.python.distribute import strategy_combinations from tensorflow.python.distribute import tpu_strategy from tensorflow.python.eager import context from tensorflow.python.eager import test from tensorflow.python.framework import random_seed from tensorflow.python.keras.distribute import distributed_training_utils from tensorflow.python.util import nest _RANDOM_SEED = 1337 _EVAL_STEPS = 20 _GLOBAL_BATCH_SIZE = 64 # Note: Please make sure the tests in this file are also covered in # keras_backward_compat_test for features that are supported with both APIs. all_strategies = [ strategy_combinations.default_strategy, strategy_combinations.one_device_strategy, strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_two_gpus, strategy_combinations.tpu_strategy, # steps_per_run=2 strategy_combinations.tpu_strategy_one_step, ] def eager_mode_test_configuration(): return combinations.combine( mode='eager', use_numpy=[True, False], use_validation_data=[True, False]) def graph_mode_test_configuration(): return combinations.combine(mode='graph', use_numpy=[True, False], use_validation_data=[True, False]) def all_strategy_and_input_config_combinations(): return (combinations.times( combinations.combine( distribution=all_strategies, cloning=[True, False]), eager_mode_test_configuration() + graph_mode_test_configuration())) def strategy_minus_tpu_and_input_config_combinations_eager(): return (combinations.times( combinations.combine( distribution=strategy_combinations.strategies_minus_tpu), eager_mode_test_configuration())) def strategies_for_embedding_models(): """Returns distribution strategies to test for embedding models. Since embedding models take longer to train, we disregard DefaultStrategy in order to prevent testing timeouts. """ return [ s for s in all_strategies if s.required_tpu or s.required_gpus or s is strategy_combinations.one_device_strategy ] def test_combinations_for_embedding_model(): # TODO(sourabhbajaj): Enable tests for eager mode eager_mode_strategies = [s for s in strategies_for_embedding_models() if not s.required_tpu] return (combinations.times( combinations.combine( distribution=strategies_for_embedding_models(), cloning=[True, False]), (graph_mode_test_configuration())) + combinations.times( combinations.combine( distribution=eager_mode_strategies, cloning=[False]), (eager_mode_test_configuration()))) def test_combinations_with_tpu_strategies(): tpu_strategies = [ strategy_combinations.tpu_strategy, strategy_combinations.tpu_strategy_one_step ] return ( combinations.times( combinations.combine(distribution=tpu_strategies), graph_mode_test_configuration())) class MaybeDistributionScope(object): """Provides a context allowing no distribution strategy.""" def __init__(self, distribution): self._distribution = distribution self._scope = None def __enter__(self): if self._distribution: self._scope = self._distribution.scope() self._scope.__enter__() def __exit__(self, exc_type, value, traceback): if self._distribution: self._scope.__exit__(exc_type, value, traceback) self._scope = None def batch_wrapper(dataset, batch_size, repeat=None): if repeat: dataset = dataset.repeat(repeat) return dataset.batch(batch_size) def get_batch_size(global_batch_size, distribution): batch_size = global_batch_size # TODO(b/118776054): Use global batch size for Keras/DS support. use_per_core_batch_size = ( distribution and not distributed_training_utils.global_batch_size_supported(distribution)) if use_per_core_batch_size: batch_size //= distribution.num_replicas_in_sync return batch_size def get_data_size(data): """Gets the size of data in list, tuple, dict, or a numpy array.""" assert isinstance(data, (np.ndarray, list, dict, tuple)) if isinstance(data, np.ndarray): return len(data) if isinstance(data, (list, tuple)): return len(data[0]) return len(six.next(six.itervalues(data))) def get_shapes(data): shapes = None if all(hasattr(x, 'shape') for x in nest.flatten(data)): shapes = nest.map_structure(lambda x: x.shape, data) return shapes def get_correctness_test_inputs(use_numpy, use_validation_data, with_distribution, x_train, y_train, x_eval, y_eval, x_predict, training_epochs): """Generates the inputs for correctness check when enable Keras with DS.""" global_batch_size = _GLOBAL_BATCH_SIZE batch_size = get_batch_size(global_batch_size, with_distribution) if use_numpy: training_inputs = { 'batch_size': batch_size, 'x': x_train, 'y': y_train, 'epochs': training_epochs, 'shuffle': False, } if use_validation_data: eval_inputs = None training_inputs['validation_data'] = (x_eval, y_eval) else: eval_inputs = { 'batch_size': batch_size, 'x': x_eval, 'y': y_eval, } predict_inputs = { 'x': x_predict } else: training_data_size = get_data_size(x_train) # For dataset inputs, we do not pass batch_size to # keras.fit/evaluate/predict. The batch size is part of the dataset. train_dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train)) x = batch_wrapper(train_dataset, batch_size, repeat=training_epochs) steps_per_epoch = int(np.ceil(1.0 * training_data_size / global_batch_size)) training_inputs = { 'batch_size': None, 'x': x, 'y': None, 'epochs': training_epochs, 'shuffle': False, 'steps_per_epoch': steps_per_epoch } if use_validation_data: eval_inputs = None # Remove the eval_inputs eval_dataset = dataset_ops.Dataset.from_tensor_slices((x_eval, y_eval)) x = batch_wrapper(eval_dataset, batch_size) training_inputs['validation_data'] = x training_inputs['validation_steps'] = 5 else: eval_dataset = dataset_ops.Dataset.from_tensor_slices((x_eval, y_eval)) x = batch_wrapper(eval_dataset, batch_size) eval_steps = int(np.ceil(1.0 * get_data_size(x_eval) / global_batch_size)) eval_inputs = { 'batch_size': None, 'x': x, 'y': None, 'steps': eval_steps, } predict_batch_size = get_batch_size(get_data_size(x_predict), with_distribution) predict_dataset = dataset_ops.Dataset.from_tensor_slices(x_predict) predict_dataset = batch_wrapper(predict_dataset, predict_batch_size) predict_inputs = { 'steps': 1, 'x': predict_dataset, } return training_inputs, eval_inputs, predict_inputs def fit_eval_and_predict(initial_weights, input_fn, model_fn, cloning=None, distribution=None, is_stateful_model=False): """Generates results for fit/predict/evaluate for given model.""" training_inputs, eval_inputs, predict_inputs = input_fn() model = model_fn( cloning=cloning, initial_weights=initial_weights, distribution=distribution, input_shapes=get_shapes(training_inputs['x'])) result = {} result['training_history_1'] = model.fit(**training_inputs).history if eval_inputs is not None: result['eval_result_1'] = model.evaluate(**eval_inputs) result['weights_1'] = model.get_weights() if predict_inputs is not None: # Check correctness of the result of predict() invoked # multiple times -- as for stateful models, result of # predict may differ for each batch. predict_length = 1 if is_stateful_model: predict_length = 3 for i in range(predict_length): result_key = 'predict_result_{}'.format(i) result[result_key] = model.predict(**predict_inputs) # Train and eval again to mimic user's flow. result['training_history_2'] = model.fit(**training_inputs).history if eval_inputs is not None: result['eval_result_2'] = model.evaluate(**eval_inputs) result['weights_2'] = model.get_weights() return result def compare_results(results_with_ds, results_without_ds, distribution, testcase, partial_last_batch=None): """Compares results of model compiled with/without distribution strategy.""" if partial_last_batch == 'train_and_eval': # We relax the tolerence a lot in the partial last batch case as # 1. the examples in uneven batches may have different weights when # applying the gradients in the distributed case. # 2. TF Keras and TF Keras DS have different ways to handle the case when # training with epochs > 1 with numpy inputs. In TF Keras, every epoch # may have a partial batch. While in TF Keras DS, as we convert # numpy inputs into dataset, it will do a repeat() first and calculate # steps_per_epoch, so it will at most have one partial batch. This # makes the 1-CPU result even different. default_tolerance = 1e-3 relaxed_tolerance = 1e-3 else: default_tolerance = 1e-5 relaxed_tolerance = 1e-4 def _get_compare_result_tolerance(key): """Returns tolerance to compare results.""" # TODO(b/119257215): For MirroredStrategy, weights are not exactly the same, # so use larger tolerance for now. Predict should be related to weights. if (isinstance(distribution, ( mirrored_strategy.MirroredStrategy, distribute_lib._DefaultDistributionStrategy)) and # pylint: disable=protected-access key.startswith(('weights_1', 'weights_2', 'predict_result'))): return relaxed_tolerance return default_tolerance for key in sorted(results_with_ds.keys()): if (key.startswith('training_history') and isinstance(distribution, (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)) and distribution.extended.steps_per_run > 1): # TODO(b/119894254): Enable this test for all cases once the # underlying bug is fixed. continue tolerance = _get_compare_result_tolerance(key) # We don't compare the loss as loss is currently not computed as metric # in Keras, the loss value is inaccurate for last partial batch due to # more weights for the last batch samples. if partial_last_batch is not None: if key.startswith('eval_result'): results_with_ds[key] = results_with_ds[key][1:] results_without_ds[key] = results_without_ds[key][1:] if key.startswith('training_history'): results_with_ds[key]['val_loss'] = 0 results_without_ds[key]['val_loss'] = 0 testcase.assertAllClose( results_with_ds[key], results_without_ds[key], atol=tolerance, rtol=tolerance, msg='Fail to assert {}.'.format(key)) def should_skip_tpu_with_eager(distribution): return (context.executing_eagerly() and isinstance(distribution, (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1))) class LearningRateBatchScheduler(keras.callbacks.Callback): """Scheduler that dynamically sets the learning rate of model.""" def __init__(self, update_freq=None): self._update_freq = update_freq def on_batch_begin(self, batch, logs=None): if self._update_freq and batch % self._update_freq != 0: return # To avoid divergence, limit the value range. lr = 0.001 * (batch % 10) keras.backend.set_value(self.model.optimizer.lr, lr) class TestDistributionStrategyCorrectnessBase(test.TestCase, parameterized.TestCase): """Model agnostic testing infra to test correctness of Keras models.""" def set_up_test_config(self, use_numpy=False, use_validation_data=False, with_batch_norm=False): self.use_numpy = use_numpy self.use_validation_data = use_validation_data self.with_batch_norm = with_batch_norm keras.backend.set_image_data_format('channels_last') np.random.seed(_RANDOM_SEED) random_seed.set_random_seed(_RANDOM_SEED) def get_data(self): num_samples = 10000 x_train = np.random.randint(0, 2, num_samples) x_train = np.reshape(x_train, (num_samples, 1)) y_train = x_train return (x_train.astype('float32'), y_train.astype('float32'), None) def get_data_with_partial_last_batch(self): raise NotImplementedError def get_data_with_partial_last_batch_eval(self): raise NotImplementedError def get_input_for_correctness_test(self, **kwargs): """Generates inputs that are dictionaries. We only provide a default implementation of this method here. If you need more customized way of providing input to your model, overwrite this method. Arguments: **kwargs: key word arguments about how to create the input dictionaries Returns: Three dictionaries representing the input for fit(), evalutate() and predict() """ return get_correctness_test_inputs(**kwargs) def get_model(self, distribution=None, cloning=None, input_shapes=None): raise NotImplementedError def skip_unsupported_test_configuration(self, distribution, cloning): if should_skip_tpu_with_eager(distribution) and cloning: self.skipTest('TPUStrategy does not support eager mode with cloning.') return def run_correctness_test(self, distribution, use_numpy, use_validation_data, cloning=None, with_batch_norm=False, is_stateful_model=False, partial_last_batch=None, training_epochs=2): with self.cached_session(): self.set_up_test_config(use_numpy, use_validation_data, with_batch_norm) self.skip_unsupported_test_configuration(distribution, cloning) if partial_last_batch == 'eval': x_train, y_train, x_eval, y_eval, x_predict = ( self.get_data_with_partial_last_batch_eval()) elif partial_last_batch == 'train_and_eval': x_train, y_train, x_eval, y_eval, x_predict = ( self.get_data_with_partial_last_batch()) else: x_train, y_train, x_predict = self.get_data() x_eval = x_train y_eval = y_train # The model is built once and the initial weights are saved. # This is used to initialize the model for both the distribution and # non-distribution run. model = self.get_model(cloning=cloning, input_shapes=get_shapes(x_train)) initial_weights = model.get_weights() ds_input_fn = functools.partial( self.get_input_for_correctness_test, use_numpy=use_numpy, use_validation_data=use_validation_data, with_distribution=distribution, x_train=x_train, y_train=y_train, x_eval=x_eval, y_eval=y_eval, x_predict=x_predict, training_epochs=training_epochs) nods_input_fn = functools.partial( self.get_input_for_correctness_test, use_numpy=use_numpy, use_validation_data=use_validation_data, with_distribution=None, x_train=x_train, y_train=y_train, x_eval=x_eval, y_eval=y_eval, x_predict=x_predict, training_epochs=training_epochs) results_with_ds = fit_eval_and_predict( initial_weights, input_fn=ds_input_fn, model_fn=self.get_model, cloning=cloning, distribution=distribution, is_stateful_model=is_stateful_model) results_without_ds = fit_eval_and_predict( initial_weights, input_fn=nods_input_fn, model_fn=self.get_model, cloning=cloning, distribution=None, is_stateful_model=is_stateful_model) # First, special case, for multi-replica distributed training, batch # norm is not aggregated globally. So it is expected to have different # weights. if (self.with_batch_norm and distribution.num_replicas_in_sync > 1): with self.assertRaises(AssertionError): compare_results( results_with_ds, results_without_ds, distribution, testcase=self, partial_last_batch=partial_last_batch) else: compare_results( results_with_ds, results_without_ds, distribution, testcase=self, partial_last_batch=partial_last_batch) def get_input_for_dynamic_lr_test(self, **kwargs): """Generates inputs that are dictionaries. We only provide a default implementation of this method here. If you need more customized way of providing input to your model, overwrite this method. Arguments: **kwargs: key word arguments about how to create the input dictionaries Returns: Three dictionaries representing the input for fit(), evalutate() and predict() """ training_input = kwargs return training_input, None, None def run_dynamic_lr_test(self, distribution, cloning=None): with self.cached_session(): self.set_up_test_config() self.skip_unsupported_test_configuration(distribution, cloning) x_train, y_train, _ = self.get_data() model = self.get_model(cloning=cloning, input_shapes=get_shapes(x_train)) initial_weights = model.get_weights() update_freq = None if (isinstance(distribution, tpu_strategy.TPUStrategyV1) and distribution.extended.steps_per_run > 1): # For TPUStrategy with steps_per_run > 1, the callback is not invoked # every step. So, to compare the CPU/TPU, we let the CPU to behave the # same as TPU. update_freq = distribution.extended.steps_per_run training_epochs = 2 global_batch_size = 64 ds_batch_size = get_batch_size(global_batch_size, distribution) nods_batch_size = get_batch_size(global_batch_size, None) ds_input_fn = functools.partial( self.get_input_for_dynamic_lr_test, x=x_train, y=y_train, batch_size=ds_batch_size, shuffle=False, epochs=training_epochs, callbacks=[LearningRateBatchScheduler(update_freq)], validation_data=(x_train, y_train)) nods_input_fn = functools.partial( self.get_input_for_dynamic_lr_test, x=x_train, y=y_train, batch_size=nods_batch_size, shuffle=False, epochs=training_epochs, callbacks=[LearningRateBatchScheduler(update_freq)], validation_data=(x_train, y_train)) results_with_ds = fit_eval_and_predict( initial_weights, input_fn=ds_input_fn, model_fn=self.get_model, cloning=cloning, distribution=distribution) results_without_ds = fit_eval_and_predict( initial_weights, input_fn=nods_input_fn, model_fn=self.get_model, cloning=cloning, distribution=None) compare_results(results_with_ds, results_without_ds, distribution, testcase=self) class TestDistributionStrategyEmbeddingModelCorrectnessBase( TestDistributionStrategyCorrectnessBase): """Base class to test correctness of Keras models with embedding layers.""" def get_data(self, count=(_GLOBAL_BATCH_SIZE * _EVAL_STEPS), min_words=5, max_words=10, max_word_id=19, num_classes=2): distribution = [] for _ in range(num_classes): dist = np.abs(np.random.randn(max_word_id)) dist /= np.sum(dist) distribution.append(dist) features = [] labels = [] for _ in range(count): label = np.random.randint(0, num_classes, size=1)[0] num_words = np.random.randint(min_words, max_words, size=1)[0] word_ids = np.random.choice( max_word_id, size=num_words, replace=True, p=distribution[label]) word_ids = word_ids labels.append(label) features.append(word_ids) features = keras.preprocessing.sequence.pad_sequences( features, maxlen=max_words) x_train = np.asarray(features, dtype=np.float32) y_train = np.asarray(labels, dtype=np.int32).reshape((count, 1)) x_predict = x_train[:_GLOBAL_BATCH_SIZE] return x_train, y_train, x_predict if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/keras/distribute/keras_correctness_test_base.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Correctness tests for tf.keras CNN models using DistributionStrategy.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python import keras from tensorflow.python.distribute import combinations from tensorflow.python.eager import test from tensorflow.python.keras.distribute import keras_correctness_test_base from tensorflow.python.keras.optimizer_v2 import gradient_descent class DistributionStrategyCnnCorrectnessTest( keras_correctness_test_base.TestDistributionStrategyCorrectnessBase): def get_model(self, initial_weights=None, distribution=None, cloning=None, input_shapes=None): del input_shapes with keras_correctness_test_base.MaybeDistributionScope(distribution): image = keras.layers.Input(shape=(28, 28, 3), name='image') c1 = keras.layers.Conv2D( name='conv1', filters=16, kernel_size=(3, 3), strides=(4, 4), kernel_regularizer=keras.regularizers.l2(1e-4))( image) if self.with_batch_norm: c1 = keras.layers.BatchNormalization(name='bn1')(c1) c1 = keras.layers.MaxPooling2D(pool_size=(2, 2))(c1) logits = keras.layers.Dense( 10, activation='softmax', name='pred')( keras.layers.Flatten()(c1)) model = keras.Model(inputs=[image], outputs=[logits]) if initial_weights: model.set_weights(initial_weights) model.compile( optimizer=gradient_descent.SGD( learning_rate=0.1), loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'], cloning=cloning) return model def _get_data(self, count, shape=(28, 28, 3), num_classes=10): centers = np.random.randn(num_classes, *shape) features = [] labels = [] for _ in range(count): label = np.random.randint(0, num_classes, size=1)[0] offset = np.random.normal(loc=0, scale=0.1, size=np.prod(shape)) offset = offset.reshape(shape) labels.append(label) features.append(centers[label] + offset) x = np.asarray(features, dtype=np.float32) y = np.asarray(labels, dtype=np.float32).reshape((count, 1)) return x, y def get_data(self): x_train, y_train = self._get_data( count=keras_correctness_test_base._GLOBAL_BATCH_SIZE * keras_correctness_test_base._EVAL_STEPS) x_predict = x_train return x_train, y_train, x_predict def get_data_with_partial_last_batch_eval(self): x_train, y_train = self._get_data(count=1280) x_eval, y_eval = self._get_data(count=1000) return x_train, y_train, x_eval, y_eval, x_eval @combinations.generate(keras_correctness_test_base. all_strategy_and_input_config_combinations()) def test_cnn_correctness(self, distribution, use_numpy, use_validation_data, cloning): self.run_correctness_test(distribution, use_numpy, use_validation_data, cloning) @combinations.generate(keras_correctness_test_base. all_strategy_and_input_config_combinations()) def test_cnn_with_batch_norm_correctness(self, distribution, use_numpy, use_validation_data, cloning): self.skipTest('Flakily times out, b/134670856') self.run_correctness_test(distribution, use_numpy, use_validation_data, with_batch_norm=True, cloning=cloning) @combinations.generate( keras_correctness_test_base.test_combinations_with_tpu_strategies() + keras_correctness_test_base .strategy_minus_tpu_and_input_config_combinations_eager()) def test_cnn_correctness_with_partial_last_batch_eval(self, distribution, use_numpy, use_validation_data): self.run_correctness_test( distribution, use_numpy, use_validation_data, partial_last_batch=True, training_epochs=1) @combinations.generate( keras_correctness_test_base.test_combinations_with_tpu_strategies() + keras_correctness_test_base .strategy_minus_tpu_and_input_config_combinations_eager()) def test_cnn_with_batch_norm_correctness_and_partial_last_batch_eval( self, distribution, use_numpy, use_validation_data): self.run_correctness_test( distribution, use_numpy, use_validation_data, with_batch_norm=True, partial_last_batch=True) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/keras/distribute/keras_image_model_correctness_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras' Distribution Strategy library.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function
tensorflow-master
tensorflow/python/keras/distribute/__init__.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests Keras multi worker fault tolerance.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import os import sys import tempfile import threading from absl.testing import parameterized from tensorflow.python.distribute import collective_all_reduce_strategy as collective_strategy from tensorflow.python.distribute import combinations from tensorflow.python.distribute import distribute_coordinator as dc from tensorflow.python.distribute import mirrored_strategy from tensorflow.python.distribute import multi_worker_test_base as test_base from tensorflow.python.keras import backend as K from tensorflow.python.keras import callbacks from tensorflow.python.keras.distribute import multi_worker_testing_utils from tensorflow.python.keras.distribute import multi_worker_training_state as training_state from tensorflow.python.platform import test def get_strategy_object(strategy_cls): if strategy_cls == mirrored_strategy.MirroredStrategy: return strategy_cls(mirrored_strategy.all_local_devices()) else: # CollectiveAllReduceStrategy and ParameterServerStrategy. return strategy_cls() class KerasMultiWorkerFaultToleranceTest(test_base.IndependentWorkerTestBase, parameterized.TestCase): class PreemptionAtBatchBoundarySimulatingCallback(callbacks.Callback): """Callback to simulate preemtion at batch boundary.""" def on_epoch_begin(self, epoch, logs=None): self._current_epoch = epoch def on_batch_begin(self, batch, logs=None): if self._current_epoch == 1 and batch == 1 and not test_base.is_chief(): # Simulate preemtion at the start of second batch of second epoch. raise RuntimeError('Preemption!') def on_batch_end(self, batch, logs=None): assert self._current_epoch < 1 or batch < 1 def on_epoch_end(self, epoch, logs=None): assert epoch < 1 # TODO(rchao): Add tests for checking 0th and 2nd epoch boundary. class PreemptionAtEpochBoundarySimulatingCallback(callbacks.Callback): """Callback to simulate preemtion at epoch boundary.""" def on_epoch_begin(self, epoch, logs=None): if epoch == 1 and not test_base.is_chief(): # Simulate preemtion at the start of second epoch. raise RuntimeError('Preemption!') def on_epoch_end(self, epoch, logs=None): assert epoch < 1 @combinations.generate( combinations.combine( # Eager runtime unfortunately cannot be tested with multi-threading. # TODO(rchao): Add test to use multi-process for eager mode after # b/132095481 is resolved. mode=['graph'], strategy_cls=[collective_strategy.CollectiveAllReduceStrategy], required_gpus=[0, 1], file_format=['h5', 'tf'], preemption_callback=[ PreemptionAtEpochBoundarySimulatingCallback, PreemptionAtBatchBoundarySimulatingCallback ], # FT should work regardless of `ModelCheckpoint`'s parameters. save_weights_only=[True, False], load_weights_on_restart=[True, False], )) def testFaultToleranceInSyncStrategy(self, strategy_cls, file_format, preemption_callback, save_weights_only, load_weights_on_restart): """Test fault-tolerance with multi-threading using sync dist-strat. This test simulates multi-worker training that is interrupted by a preemption, by having two threads, each of which represents a chief and a non-chief worker, where the non-chief raises an error in the middle of training loop. Upon excepting the error, a new thread with a new cluster spec is created to simulate the recovered non-chief worker. Meanwhile, the chief worker cannot proceed and hangs since the non-chief worker has crashed. To simulate a restart of the chief, a new thread has been prepared to run to take over chief with the help of a condition variable. It is expected that after the restart of both chief and non-chief workers, the training continues from the epoch they previously failed at. The test concludes by verifying the preemption-interrupted training can finish with the same loss and accuracy had the preemption not occurred. TODO(rchao): Add test to check preemption on chief (possibly using multi processes). TODO(rchao): Add test to check fault-tolerance with multiple `model.fit()`. Arguments: strategy_cls: The strategy class to use. file_format: `h5` or `tf`. preemption_callback: The callback to simulate preemption. save_weights_only: The argument for `model.fit()`'s `save_weights_only`. load_weights_on_restart: The argument for `model.fit()`'s `load_weights_on_restart`. """ def _independent_worker_fn(*args, **kwargs): # pylint: disable=unused-argument with test.mock.patch.object(dc, '_run_std_server', self._make_mock_run_std_server()): # Condition variable that blocks the thread that represents the # restarted chief. cv = kwargs.get('cv', None) # `before_restart` is True for the threads that represent the original # chief and non-chief worker, and False for threads that represent the # restarted chief and non-chief workers. before_restart = kwargs['before_restart'] if kwargs['new_chief']: # `new_chief` is only True for the restarted chief thread. It waits # until non-chief is preempted and restarted to simulate the causality # where chief's restart results from non-chief's failure. cv.acquire() while not hasattr(cv, 'preempted'): cv.wait() cv.release() # Model building under strategy scope. Following is the code we expect # the user runs on every worker. strategy = get_strategy_object(strategy_cls) batch_size = 64 steps = 3 train_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset( batch_size, steps) with strategy.scope(): model = multi_worker_testing_utils.get_mnist_model((28, 28, 1)) # Function to start a new thread. This will be called twice in the # following code: one represents the restart of the non-chief, and one # represents the restart of the chief as a result of the restart of the # non-chief (so the training can continue in sync). def start_new_thread(new_chief=False): new_thread_tf_config = json.loads(os.environ['TF_CONFIG']) new_thread_tf_config['cluster']['worker'] = kwargs['reserved_ports'] return self._run_task_in_thread( task_fn=_independent_worker_fn, cluster_spec=None, task_type=None, task_id=None, tf_config=new_thread_tf_config, before_restart=False, cv=cv, new_chief=new_chief) if test_base.is_chief() and before_restart: # Chief to start a new thread (that will be blocked by a condition # variable until the non-chief's new thread is started). The thread # for (recovered) chief is started before entering `fit()` because # the original chief thread will eventually hang and be ignored. start_new_thread(new_chief=True) try: class CkptSavedEpochAssertingCallback(callbacks.Callback): def __init__(self, test_obj): super(CkptSavedEpochAssertingCallback, self).__init__() self.test_obj = test_obj def on_epoch_begin(self, epoch, logs=None): # `_ckpt_saved_epoch` attribute is set at the end of every epoch. self.test_obj.assertEqual( K.eval(self.model._ckpt_saved_epoch) == training_state.CKPT_SAVED_EPOCH_UNUSED_VALUE, epoch == 0) callbacks_list = [ callbacks.ModelCheckpoint( filepath=saving_filepath, save_weights_only=save_weights_only, load_weights_on_restart=load_weights_on_restart), CkptSavedEpochAssertingCallback(self) ] if before_restart: callbacks_list.append(preemption_callback()) self.assertFalse(hasattr(model, training_state.CKPT_SAVED_EPOCH)) history = model.fit( x=train_ds, epochs=num_epoch, steps_per_epoch=steps, callbacks=callbacks_list) self.assertFalse(hasattr(model, training_state.CKPT_SAVED_EPOCH)) # `history` of the training result is collected to be compared against # each other. It is expected that the training results (loss and # accuracy`) are the same with or without preemption. self._histories.append(history.history) except RuntimeError: # pylint: disable=g-assert-in-except self.assertTrue(before_restart) # Reset the barrier so the new threads simulating recovery can # continue. self._barrier._counter = 0 self._barrier._flag = False # Now that the non-chief has been preempted, it notifies the thread # that simulates the restarted chief to start so they can be back in # sync. cv.acquire() cv.preempted = True cv.notify() cv.release() # At this point we should discard the original non-chief thread, and # start the new thread that simulates the restarted non-chief, hence # joining the thread and return. self.join_independent_workers([start_new_thread()]) return # Successful end of a `fit()` call. self._successful_thread_ends += 1 self.assertFalse(before_restart) # Common parameters num_workers = 2 num_epoch = 3 # History list storing the results for preemption and no preemption cases. self._histories = [] strategy = get_strategy_object(strategy_cls) def get_saving_dir_and_filepath(): saving_dir = tempfile.mkdtemp(prefix=self.get_temp_dir()) saving_filepath = os.path.join(saving_dir, 'checkpoint.' + file_format) return saving_dir, saving_filepath # Case 1: Training for `num_epoch` without preemptions. cluster_spec = test_base.create_cluster_spec( num_workers=num_workers, test_obj=self) self._barrier = dc._Barrier(2) self._successful_thread_ends = 0 # Get a new temporary filepath to save the checkpoint to. saving_dir, saving_filepath = get_saving_dir_and_filepath() threads = self.run_multiple_tasks_in_threads( _independent_worker_fn, cluster_spec, # Pass `saving_filepath` from the parent thread to ensure every worker # has the same filepath to save. saving_filepath=saving_filepath, before_restart=False, new_chief=False) threads_to_join = [] if strategy.extended.experimental_between_graph: for ts in threads.values(): threads_to_join.extend(ts) else: threads_to_join = [threads['worker'][0]] self.join_independent_workers(threads_to_join) # Asserting the checkpoint file exists. self.assertTrue( training_state.remove_checkpoint_if_exists(saving_dir, saving_filepath)) self.assertEqual(self._successful_thread_ends, 2) # Case 2: Training for `num_epoch` epoch with preemptions. # The preemption is simulated at both epoch boundary and batch boundary. cluster_spec = test_base.create_cluster_spec( num_workers=num_workers, test_obj=self) cv = threading.Condition() self._barrier = dc._Barrier(2) # Ports reserved for new threads simulating recovery. reserved_ports = [ 'localhost:%s' % test_base.pick_unused_port() for _ in range(num_workers) ] self._successful_thread_ends = 0 # Get a new temporary filepath to save the checkpoint to. saving_dir, saving_filepath = get_saving_dir_and_filepath() threads = self.run_multiple_tasks_in_threads( _independent_worker_fn, cluster_spec, # Pass `saving_filepath` from the parent thread to ensure every worker # has the same filepath to save. saving_filepath=saving_filepath, reserved_ports=reserved_ports, before_restart=True, cv=cv, new_chief=False) threads_to_join = [] if strategy.extended.experimental_between_graph: # Only join the non-chief thread since the first thread for chief will # eventually hang and be ignored. threads_to_join = [threads['worker'][1]] else: threads_to_join = [threads['worker'][0]] self.join_independent_workers(threads_to_join) # Asserting the checkpoint file exists. self.assertTrue( training_state.remove_checkpoint_if_exists(saving_dir, saving_filepath)) self.assertEqual(self._successful_thread_ends, 2) def assert_all_elements_are_identical(list_to_check): first_item = list_to_check[0] for item in list_to_check[1:]: self.assertAllClose(first_item, item, rtol=2e-5, atol=1e-5) # Important: the results from preemption interrupted and non-interrupted # cases should give the same final results. assert_all_elements_are_identical( [history['acc'][-1] for history in self._histories]) assert_all_elements_are_identical( [history['loss'][-1] for history in self._histories]) # The length of `self._histories` would be num_workers * num_runs (3). self.assertLen(self._histories, 4) if __name__ == '__main__': with test.mock.patch.object(sys, 'exit', os._exit): test.main()
tensorflow-master
tensorflow/python/keras/distribute/multi_worker_fault_tolerance_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for testing multi-worker distribution strategies with Keras.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python import keras from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import dtypes from tensorflow.python.keras.optimizer_v2 import gradient_descent from tensorflow.python.ops import array_ops from tensorflow.python.ops import random_ops def mnist_synthetic_dataset(batch_size, steps_per_epoch): """Generate synthetic MNIST dataset for testing.""" # train dataset x_train = array_ops.ones([batch_size * steps_per_epoch, 28, 28, 1], dtype=dtypes.float32) y_train = array_ops.ones([batch_size * steps_per_epoch, 1], dtype=dtypes.int32) train_ds = dataset_ops.Dataset.from_tensor_slices((x_train, y_train)) train_ds = train_ds.repeat() # train_ds = train_ds.shuffle(100) train_ds = train_ds.batch(64, drop_remainder=True) # eval dataset x_test = random_ops.random_uniform([10000, 28, 28, 1], dtype=dtypes.float32) y_test = random_ops.random_uniform([10000, 1], minval=0, maxval=9, dtype=dtypes.int32) eval_ds = dataset_ops.Dataset.from_tensor_slices((x_test, y_test)) eval_ds = eval_ds.repeat() eval_ds = eval_ds.batch(64, drop_remainder=True) return train_ds, eval_ds def get_mnist_model(input_shape): """Define a deterministically-initialized CNN model for MNIST testing.""" model = keras.models.Sequential() model.add( keras.layers.Conv2D( 32, kernel_size=(3, 3), activation="relu", input_shape=input_shape, kernel_initializer=keras.initializers.TruncatedNormal(seed=99))) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.Flatten()) model.add( keras.layers.Dense( 10, activation="softmax", kernel_initializer=keras.initializers.TruncatedNormal(seed=99))) # TODO(yuefengz): optimizer with slot variables doesn't work because of # optimizer's bug. # TODO(yuefengz): we should not allow non-v2 optimizer. model.compile( loss=keras.losses.sparse_categorical_crossentropy, optimizer=gradient_descent.SGD(learning_rate=0.001), metrics=["accuracy"]) return model
tensorflow-master
tensorflow/python/keras/distribute/multi_worker_testing_utils.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests Keras multi worker callbacks.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys import tempfile import threading from absl.testing import parameterized # pylint: disable=g-direct-tensorflow-import from tensorflow.python import keras from tensorflow.python.distribute import collective_all_reduce_strategy as collective_strategy from tensorflow.python.distribute import combinations from tensorflow.python.distribute import distribute_coordinator as dc from tensorflow.python.distribute import mirrored_strategy from tensorflow.python.distribute import multi_worker_test_base as test_base from tensorflow.python.distribute import multi_worker_util from tensorflow.python.keras import backend as K from tensorflow.python.keras import callbacks from tensorflow.python.keras import testing_utils from tensorflow.python.keras.distribute import multi_worker_testing_utils from tensorflow.python.keras.distribute import multi_worker_training_state as training_state from tensorflow.python.platform import test def get_strategy_object(strategy_cls): if strategy_cls == mirrored_strategy.MirroredStrategy: return strategy_cls(mirrored_strategy.all_local_devices()) else: # CollectiveAllReduceStrategy and ParameterServerStrategy. return strategy_cls() def generate_callback_test_function(custom_callable): """Generic template for callback tests using mnist synthetic dataset.""" @combinations.generate( combinations.combine( mode=['graph'], strategy_cls=[collective_strategy.CollectiveAllReduceStrategy], required_gpus=[0, 1], file_format=['h5', 'tf'])) def test_template(self, strategy_cls, file_format): num_workers = 2 num_epoch = 2 cluster_spec = test_base.create_cluster_spec( num_workers=num_workers, test_obj=self) self._barrier = dc._Barrier(2) def _independent_worker_fn(*args, **kwargs): # pylint: disable=unused-argument """Simulates an Independent Worker inside of a thread.""" with test.mock.patch.object(dc, '_run_std_server', self._make_mock_run_std_server()): strategy = get_strategy_object(strategy_cls) batch_size = 64 steps = 2 train_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset( batch_size, steps) with strategy.scope(): model = multi_worker_testing_utils.get_mnist_model((28, 28, 1)) custom_callable( model, self, train_ds, num_epoch, steps, strategy, saving_filepath=kwargs['saving_filepath'], barrier=kwargs['barrier'], threading_local=kwargs['threading_local']) # Pass saving_filepath from the parent thread to ensure every worker has the # same fileapth to save. saving_filepath = os.path.join(self.get_temp_dir(), 'checkpoint.' + file_format) barrier = dc._Barrier(2) threading_local = threading.local() threads = self.run_multiple_tasks_in_threads( _independent_worker_fn, cluster_spec, saving_filepath=saving_filepath, barrier=barrier, threading_local=threading_local) self.assertFalse(training_state.checkpoint_exists(saving_filepath)) threads_to_join = [] strategy = get_strategy_object(strategy_cls) if strategy.extended.experimental_between_graph: for ts in threads.values(): threads_to_join.extend(ts) else: threads_to_join = [threads['worker'][0]] self.join_independent_workers(threads_to_join) return test_template class KerasMultiWorkerCallbackTest(test_base.IndependentWorkerTestBase, parameterized.TestCase): # The callables of the actual testing content to be run go below. @staticmethod def callableForTestChiefOnlyCallback(model, test_obj, train_ds, num_epoch, steps, strategy, saving_filepath, **kwargs): class ChiefOnly(keras.callbacks.Callback): def __init__(self): self._chief_worker_only = True self.filtered_correctly = True def on_train_begin(self, logs): if not multi_worker_util.is_chief(): # Non-chief workers shouldn't run this callback. self.filtered_correctly = False cb = ChiefOnly() model.fit( x=train_ds, epochs=num_epoch, steps_per_epoch=steps, callbacks=[cb]) test_obj.assertTrue(cb.filtered_correctly) @staticmethod def callableForTestModelCheckpointSavesOnChiefButNotOtherwise( model, test_obj, train_ds, num_epoch, steps, strategy, saving_filepath, **kwargs): extension = os.path.splitext(saving_filepath)[1] # TODO(rchao): Remove using .h5 once b/134551335 is fixed. extension = '.h5' # Incorporate type/index information and thread id in saving_filepath to # ensure every worker has a unique path. Note that in normal use case the # saving_filepath will be the same for all workers, but we use different # ones here just to test out chief saves checkpoint but non-chief doesn't. # TODO(b/134551335): Must save to hdf5 until bug with copying # MirroredVariables is resolved. saving_filepath = os.path.join( test_obj.get_temp_dir(), 'checkpoint_%s_%d%s' % (test_base.get_task_type(), test_base.get_task_index(), extension)) # The saving_filepath shouldn't exist at the beginning (as it's unique). test_obj.assertFalse(training_state.checkpoint_exists(saving_filepath)) model.fit( x=train_ds, epochs=num_epoch, steps_per_epoch=steps, callbacks=[callbacks.ModelCheckpoint(filepath=saving_filepath)]) # If it's chief, the model should be saved; if not, the model shouldn't. test_obj.assertEqual( training_state.checkpoint_exists(saving_filepath), test_base.is_chief()) @staticmethod def initialFitting(test_obj, model, train_ds, num_epoch, steps, saving_filepath): # The saving_filepath shouldn't exist at the beginning. test_obj.assertFalse(training_state.checkpoint_exists(saving_filepath)) model.fit( x=train_ds, epochs=num_epoch, steps_per_epoch=steps, callbacks=[ callbacks.ModelCheckpoint( filepath=saving_filepath, save_weights_only=True) ]) # The saving_filepath should exist after fitting with callback. Both chief # and non-chief worker should both see it exists (which was saved only by # chief). test_obj.assertTrue(training_state.checkpoint_exists(saving_filepath)) history_after_one_more_epoch = model.fit( x=train_ds, epochs=1, steps_per_epoch=steps) # The saving_filepath should continue to exist (if it did) after fitting # without callback. test_obj.assertTrue(training_state.checkpoint_exists(saving_filepath)) return saving_filepath, history_after_one_more_epoch @staticmethod def callableForTestLoadWeightFromModelCheckpoint(model, test_obj, train_ds, num_epoch, steps, strategy, saving_filepath, **kwargs): filepaths = [] real_mkstemp = tempfile.mkstemp def mocked_mkstemp(): # Only non-chief should call tempfile.mkstemp() inside fit() in sync # training. assert not test_base.is_chief() file_handle, temp_file_name = real_mkstemp() extension = os.path.splitext(saving_filepath)[1] temp_filepath = temp_file_name + extension filepaths.append(temp_filepath) return file_handle, temp_file_name # Mock tempfile.mkstemp() so the filepaths can be stored and verified later. with test.mock.patch.object(tempfile, 'mkstemp', mocked_mkstemp): saving_filepath, history_after_one_more_epoch = \ KerasMultiWorkerCallbackTest.initialFitting( test_obj, model, train_ds, num_epoch, steps, saving_filepath) with strategy.scope(): model.load_weights(saving_filepath) history_after_loading_weight_and_one_more_epoch = model.fit( x=train_ds, epochs=1, steps_per_epoch=steps) test_obj.assertAllClose( history_after_one_more_epoch.history, history_after_loading_weight_and_one_more_epoch.history, rtol=5e-5) # Verify the temp files are indeed removed (no trace left behind). for filepath in filepaths: assert not training_state.checkpoint_exists(filepath) @staticmethod def callableForTestModelRestoreCallback(model, test_obj, train_ds, num_epoch, steps, strategy, saving_filepath, **kwargs): saving_filepath, history_after_one_more_epoch = \ KerasMultiWorkerCallbackTest.initialFitting( test_obj, model, train_ds, num_epoch, steps, saving_filepath) # The model should get restored to the weights previously saved, by # adding a ModelCheckpoint callback (which results in a # _ModelRestoreCallback being added), with load_weights_on_restart=True. history_after_model_restoring_and_one_more_epoch = model.fit( x=train_ds, epochs=1, steps_per_epoch=steps, callbacks=[ callbacks.ModelCheckpoint( filepath=saving_filepath, save_weights_only=True, load_weights_on_restart=True) ]) # Asserting the history one epoch after initial fitting and one epoch after # restoring are closed. test_obj.assertAllClose( history_after_one_more_epoch.history, history_after_model_restoring_and_one_more_epoch.history, rtol=5e-5) history_one_more_epoch_without_model_restoring = model.fit( x=train_ds, epochs=1, steps_per_epoch=steps) # Ensuring training for another epoch gives different result. test_obj.assertNotAllClose( history_after_model_restoring_and_one_more_epoch.history, history_one_more_epoch_without_model_restoring.history, rtol=5e-5) @staticmethod def callableForTestUnmatchedModelFile(model, test_obj, train_ds, num_epoch, steps, strategy, saving_filepath, **kwargs): # The saving_filepath shouldn't exist at the beginning. test_obj.assertFalse(training_state.checkpoint_exists(saving_filepath)) model.fit( x=train_ds, epochs=num_epoch, steps_per_epoch=steps, callbacks=[ callbacks.ModelCheckpoint( filepath=saving_filepath, save_weights_only=True) ]) (train_ds, _), (_, _) = testing_utils.get_test_data( train_samples=10, test_samples=10, input_shape=(3,), num_classes=2) # Switch to a model of different structure. with strategy.scope(): model = keras.models.Sequential() model.add(keras.layers.Dense(5, input_dim=3, activation='relu')) model.add(keras.layers.Dense(2, activation='softmax')) model.compile( loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc']) test_obj.assertTrue(training_state.checkpoint_exists(saving_filepath)) if saving_filepath.endswith('.tf'): test_obj.skipTest('Loading mismatched TF checkpoint would cause Fatal ' 'Python error: Aborted. Skipping.') # Unmatched format. Should raise ValueError. with test_obj.assertRaisesRegexp(ValueError, 'Error loading file from'): model.fit( x=train_ds, epochs=num_epoch, batch_size=8, callbacks=[ callbacks.ModelCheckpoint( filepath=saving_filepath, save_weights_only=True, load_weights_on_restart=True) ]) @staticmethod def callableForTestReduceLROnPlateau(model, test_obj, train_ds, num_epoch, steps, strategy, saving_filepath, **kwargs): cbks = [ callbacks.ReduceLROnPlateau( monitor='loss', factor=0.1, min_delta=1, patience=1, cooldown=5, verbose=1) ] # It is expected that the learning rate would drop by `factor` within # 3 epochs with `min_delta=1`. model.fit(x=train_ds, epochs=3, steps_per_epoch=steps, callbacks=cbks) test_obj.assertAllClose( float(K.get_value(model.optimizer.lr)), 0.0001, atol=1e-8) # It is expected that the learning rate would drop by another `factor` # within 3 epochs with `min_delta=1`. model.fit(x=train_ds, epochs=3, steps_per_epoch=steps, callbacks=cbks) test_obj.assertAllClose( float(K.get_value(model.optimizer.lr)), 0.00001, atol=1e-8) @staticmethod def callableForTestEarlyStopping(model, test_obj, train_ds, num_epoch, steps, strategy, saving_filepath, **kwargs): class EpochCounterCallback(callbacks.Callback): def on_epoch_begin(self, epoch, logs): self.last_epoch = epoch epoch_counter_cbk = EpochCounterCallback() cbks = [ callbacks.EarlyStopping( monitor='loss', min_delta=0.05, patience=1, verbose=1), epoch_counter_cbk ] # Empirically, it is expected that `model.fit()` would terminate around the # 22th epoch. Asserting that it should have been stopped before the 50th # epoch to avoid flakiness and be more predictable. model.fit(x=train_ds, epochs=100, steps_per_epoch=steps, callbacks=cbks) test_obj.assertLess(epoch_counter_cbk.last_epoch, 50) @staticmethod def callableForTestLearningRateScheduler(model, test_obj, train_ds, num_epoch, steps, strategy, saving_filepath, **kwargs): cbks = [ callbacks.LearningRateScheduler( schedule=lambda x: 1. / (1. + x), verbose=1) ] # It is expected that with `epochs=2`, the learning rate would drop to # 1 / (1 + 2) = 0.5. model.fit(x=train_ds, epochs=2, steps_per_epoch=steps, callbacks=cbks) test_obj.assertAllClose( float(K.get_value(model.optimizer.lr)), 0.5, atol=1e-8) # It is expected that with `epochs=4`, the learning rate would drop to # 1 / (1 + 4) = 0.25. model.fit(x=train_ds, epochs=4, steps_per_epoch=steps, callbacks=cbks) test_obj.assertAllClose( float(K.get_value(model.optimizer.lr)), 0.25, atol=1e-8) # pylint: disable=g-doc-args @staticmethod def callableForTestIntermediateDirForFTAreRemoved(model, test_obj, train_ds, num_epoch, steps, strategy, saving_filepath, **kwargs): """Testing that the temporary directory are removed. Some temporary directories are created for the purpose of fault tolerance. This test ensures that such directories should have been removed at the time `model.fit()` finishes successfully. """ # `threading_local` and `barrier` objects have to be passed in from parent # thread so both threads refer to the same object. threading_local = kwargs['threading_local'] barrier = kwargs['barrier'] # Two threads will each has one copy of `temp_dirs_supposed_to_be_removed` # list. threading_local.temp_dirs_supposed_to_be_removed = [] callbacks_list = [ callbacks.ModelCheckpoint( filepath=saving_filepath, save_weights_only=True, load_weights_on_restart=True), ] # Keep the references to the real function objects. real_os_path_join = os.path.join real_tempfile_mkdtemp = tempfile.mkdtemp # Make a `os.path.join` wrapper, which will be patched onto the real # function, so the temporary directories can be tracked. def wrapper_os_path_join(path, *paths): join_result = real_os_path_join(path, *paths) if len(paths) == 1 and paths[0] == 'backup': threading_local.temp_dirs_supposed_to_be_removed.append(join_result) return join_result # Likewise for `tempfile.mkdtemp`. def wrapper_tempfile_mkdtemp(): result = real_tempfile_mkdtemp() threading_local.temp_dirs_supposed_to_be_removed.append(result) return result # Now the two threads must sync here: if they are out of sync, one thread # can go ahead and patch `os.path.join` while the other has not even # assigned the real `os.path.join` to `real_os_path_join`. If this happened, # the "real" `os.path.join` the slower thread would see is actually the # wrapper of the other. barrier.wait() # Note that `os.path.join` will respect the second patch (there are two # patches because of the two threads). Both threads will refer to the same # copy of `wrapper_os_path_join` because of the `barrier` preceding # `model.fit()`. Likewise for `wrapper_tempfile_mkdtemp`. os.path.join = wrapper_os_path_join tempfile.mkdtemp = wrapper_tempfile_mkdtemp barrier.wait() model.fit( x=train_ds, epochs=num_epoch, steps_per_epoch=steps, callbacks=callbacks_list) # Sync before un-patching to prevent either thread from accessing the real # functions. Also to make sure `model.fit()` is done on both threads (so we # can safely assert the directories are removed). barrier.wait() os.path.join = real_os_path_join tempfile.mkdtemp = real_tempfile_mkdtemp # There should be directory (names) that are supposed to be removed. test_obj.assertTrue(threading_local.temp_dirs_supposed_to_be_removed) for temp_dir_supposed_to_be_removed in ( threading_local.temp_dirs_supposed_to_be_removed): # They should have been removed and thus don't exist. test_obj.assertFalse(os.path.exists(temp_dir_supposed_to_be_removed)) # The actual testing methods go here. test_chief_only_callback = generate_callback_test_function( callableForTestChiefOnlyCallback.__func__) test_model_checkpoint_saves_on_chief_but_not_otherwise = \ generate_callback_test_function( callableForTestModelCheckpointSavesOnChiefButNotOtherwise.__func__) test_load_weight_from_model_checkpoint = generate_callback_test_function( callableForTestLoadWeightFromModelCheckpoint.__func__) test_model_restore_callback = generate_callback_test_function( callableForTestModelRestoreCallback.__func__) test_unmatched_model_file = generate_callback_test_function( callableForTestUnmatchedModelFile.__func__) test_reduce_lr_on_plateau = generate_callback_test_function( callableForTestReduceLROnPlateau.__func__) test_early_stopping = generate_callback_test_function( callableForTestEarlyStopping.__func__) test_learning_rate_scheduler = generate_callback_test_function( callableForTestLearningRateScheduler.__func__) test_intermediate_dir_for_ft_are_removed = generate_callback_test_function( callableForTestIntermediateDirForFTAreRemoved.__func__) if __name__ == '__main__': with test.mock.patch.object(sys, 'exit', os._exit): test.main()
tensorflow-master
tensorflow/python/keras/distribute/multi_worker_callback_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tf.keras models using tf.distribute.Strategy.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.data.experimental.ops import cardinality from tensorflow.python.data.ops import dataset_ops from tensorflow.python.distribute import combinations from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.distribute import mirrored_strategy from tensorflow.python.distribute import strategy_combinations from tensorflow.python.distribute import tpu_strategy from tensorflow.python.eager import test from tensorflow.python.keras import testing_utils from tensorflow.python.keras.distribute import distributed_training_utils from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.losses import loss_reduction from tensorflow.python.training import gradient_descent from tensorflow.python.training import rmsprop _RANDOM_SEED = 1337 _TRAIN_SIZE = 200 _INPUT_SIZE = (10,) _NUM_CLASS = 2 # Note: Please make sure the tests in this file are also covered in # keras_backward_compat_test for features that are supported with both APIs. # TODO(anjalisridhar): Add a decorator that will allow us to run these tests as # part of the tf.keras unit tests suite. def simple_sequential_model(): model = keras.models.Sequential() model.add(keras.layers.Dense(16, activation='relu', input_shape=_INPUT_SIZE)) model.add(keras.layers.Dropout(0.1)) model.add(keras.layers.Dense(_NUM_CLASS, activation='softmax')) return model def simple_subclassed_model(num_labels=_NUM_CLASS): class _SimpleMLP(keras.Model): def __init__(self, num_labels): super(_SimpleMLP, self).__init__() self.dense = keras.layers.Dense(num_labels) def call(self, inputs): return self.dense(inputs) return _SimpleMLP(num_labels) def simple_multi_inputs_multi_outputs_model(): input_a = keras.layers.Input(shape=(16,), name='input_a') input_b = keras.layers.Input(shape=(16,), name='input_b') merged = keras.layers.concatenate([input_a, input_b], name='merge') output_c = keras.layers.Dense(3, activation='softmax', name='dense_2')(merged) output_d = keras.layers.Dense(2, activation='softmax', name='dense_3')(merged) model = keras.models.Model( inputs=[input_a, input_b], outputs=[output_c, output_d]) return model def get_multi_inputs_multi_outputs_data(): (a_train, c_train), (a_test, c_test) = testing_utils.get_test_data( train_samples=_TRAIN_SIZE, test_samples=50, input_shape=(16,), num_classes=3, random_seed=_RANDOM_SEED) (b_train, d_train), (b_test, d_test) = testing_utils.get_test_data( train_samples=_TRAIN_SIZE, test_samples=50, input_shape=(16,), num_classes=2, random_seed=_RANDOM_SEED) (m_train, _), (m_test, _) = testing_utils.get_test_data( train_samples=_TRAIN_SIZE, test_samples=50, input_shape=(8,), num_classes=2, random_seed=_RANDOM_SEED) c_train = keras.utils.to_categorical(c_train) c_test = keras.utils.to_categorical(c_test) d_train = keras.utils.to_categorical(d_train) d_test = keras.utils.to_categorical(d_test) train_data = { 'input_a': a_train, 'input_b': b_train, 'input_m': m_train, 'output_c': c_train, 'output_d': d_train } test_data = { 'input_a': a_test, 'input_b': b_test, 'input_m': m_test, 'output_c': c_test, 'output_d': d_test } return (train_data, test_data) def batch_wrapper(dataset, batch_size, distribution, repeat=None): if repeat: dataset = dataset.repeat(repeat) # TPUs currently require fully defined input shapes, drop_remainder ensures # the input will have fully defined shapes. if isinstance(distribution, (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)): return dataset.batch(batch_size, drop_remainder=True) else: return dataset.batch(batch_size) def get_model(): x = keras.layers.Input(shape=(3,), name='input') y = keras.layers.Dense(4, name='dense')(x) model = keras.Model(x, y) return model def get_sample_weights_model(): x = keras.layers.Input(shape=(1,), name='input') y = keras.layers.Dense( 1, kernel_initializer='ones', bias_initializer='zeros', name='dense')(x) model = keras.Model(x, y) return model def get_dataset(distribution): inputs = np.zeros((10, 3), dtype=np.float32) targets = np.zeros((10, 4), dtype=np.float32) dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat(100) dataset = batch_wrapper(dataset, 10, distribution) return dataset def get_predict_dataset(distribution): inputs = np.zeros((10, 3), dtype=np.float32) dataset = dataset_ops.Dataset.from_tensor_slices(inputs) dataset = dataset.repeat(100) dataset = batch_wrapper(dataset, 10, distribution) return dataset def convert_numpy_to_dataset_with_unknown_cardinality(inputs, targets=None): if targets is not None: input_slices = (inputs, targets) dummy_op = (lambda inp, target: True) else: input_slices = inputs dummy_op = (lambda inp: True) original_dataset = (dataset_ops.Dataset.from_tensor_slices( input_slices)) ds_with_unknown_cardinality = (original_dataset.filter(dummy_op). batch(10, drop_remainder=True)) return ds_with_unknown_cardinality def multi_input_output_model(): a = keras.layers.Input(shape=(3,), name='input_a') b = keras.layers.Input(shape=(5,), name='input_b') # TODO(anjalisridhar): Change the output dimension of the second Dense layer # once the iterator output validation issue has been fixed. dense_1 = keras.layers.Dense(7, name='dense_1') dense_2 = keras.layers.Dense(7, name='dense_2') c = dense_1(a) d = dense_2(b) e = keras.layers.Dropout(0.5, name='dropout')(c) model = keras.models.Model([a, b], [d, e]) return model strategies_minus_default_minus_tpu = [ strategy_combinations.one_device_strategy, strategy_combinations.one_device_strategy_gpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_two_gpus ] strategies_minus_tpu = [ strategy_combinations.default_strategy, strategy_combinations.one_device_strategy, strategy_combinations.one_device_strategy_gpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_two_gpus ] tpu_strategies = [ strategy_combinations.tpu_strategy, # steps_per_run=2 strategy_combinations.tpu_strategy_one_step ] def strategy_minus_tpu_combinations(): return combinations.combine(distribution=strategies_minus_tpu, mode=['graph', 'eager']) def tpu_strategy_combinations(): return combinations.combine(distribution=tpu_strategies, mode=['graph', 'eager']) def tpu_strategy_combinations_graph_only(): return combinations.combine(distribution=tpu_strategies, mode=['graph']) def all_strategy_combinations(): return strategy_minus_tpu_combinations() + tpu_strategy_combinations() def all_strategy_combinations_plus_cloning(): return ( combinations.combine( distribution=strategies_minus_tpu, mode=['graph', 'eager'], cloning=[True, False]) + combinations.combine( distribution=tpu_strategies, mode=['graph', 'eager'], cloning=[False])) def all_strategy_minus_default_and_tpu_combinations(): return combinations.combine( distribution=[ strategy_combinations.one_device_strategy, strategy_combinations.one_device_strategy_gpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_two_gpus, ], mode=['graph', 'eager']) def all_strategy_combinations_minus_default(): return (all_strategy_minus_default_and_tpu_combinations() + tpu_strategy_combinations()) def strategy_and_optimizer_combinations(): non_tpu_strategies = combinations.times( strategy_minus_tpu_combinations(), # TODO(b/130808953): Simplify when optimizers v1 work with cloning=False. combinations.combine( optimizer=[ strategy_combinations.adagrad_optimizer_v1_fn, strategy_combinations.adam_optimizer_v1_fn, strategy_combinations.gradient_descent_optimizer_v1_fn, strategy_combinations.rmsprop_optimizer_v1_fn, ], cloning=True) + combinations.combine( optimizer=[ strategy_combinations.adagrad_optimizer_keras_v2_fn, strategy_combinations.adam_optimizer_keras_v2_fn, strategy_combinations.gradient_descent_optimizer_keras_v2_fn, strategy_combinations.rmsprop_optimizer_keras_v2_fn ], cloning=[True, False])) # TODO(b/130808953): Simplify when optimizers v1 work with cloning=False. tpu_strategies_graph = combinations.combine( distribution=tpu_strategies, mode=['graph'], cloning=[True], optimizer=[ strategy_combinations.adagrad_optimizer_v1_fn, strategy_combinations.adam_optimizer_v1_fn, strategy_combinations.gradient_descent_optimizer_v1_fn, strategy_combinations.rmsprop_optimizer_v1_fn, strategy_combinations.adagrad_optimizer_keras_v2_fn, strategy_combinations.adam_optimizer_keras_v2_fn, strategy_combinations.gradient_descent_optimizer_keras_v2_fn, strategy_combinations.rmsprop_optimizer_keras_v2_fn ]) tpu_strategies_eager = combinations.combine( distribution=tpu_strategies, mode=['eager'], cloning=[False], optimizer=[ strategy_combinations.adagrad_optimizer_keras_v2_fn, strategy_combinations.adam_optimizer_keras_v2_fn, strategy_combinations.gradient_descent_optimizer_keras_v2_fn, strategy_combinations.rmsprop_optimizer_keras_v2_fn ]) return non_tpu_strategies + tpu_strategies_eager + tpu_strategies_graph class TestDistributionStrategyWithNumpyArrays(test.TestCase, parameterized.TestCase): @combinations.generate(all_strategy_combinations()) def test_calculating_input_params_no_steps_no_batch_size(self, distribution): # Calculate the per_replica_batch_size scaling factor for strategies # that use per_core_batch_size replica_scale_factor = 1.0 if not distributed_training_utils.global_batch_size_supported(distribution): replica_scale_factor = distribution.num_replicas_in_sync with self.cached_session(): # Input samples of different sizes input_20_samples = np.zeros((20, 3), dtype=np.float32) input_64_samples = np.zeros((64, 3), dtype=np.float32) # Default global batch size 32 for input with 64 samples run in 2 steps steps, batch_size = distributed_training_utils.get_input_params( distribution, input_64_samples, steps=None, batch_size=None) self.assertEqual(batch_size, 32 // replica_scale_factor) self.assertEqual(steps, 2) # Computed global batch size 20 is lower than 32 if we pass less samples. steps, batch_size = distributed_training_utils.get_input_params( distribution, input_20_samples, steps=None, batch_size=None) self.assertEqual(batch_size, 20 // replica_scale_factor) self.assertEqual(steps, 1) @combinations.generate(all_strategy_combinations()) def test_calculating_input_params_with_steps_no_batch_size(self, distribution): # Calculate the per_replica_batch_size scaling factor for strategies # that use per_core_batch_size replica_scale_factor = 1.0 if not distributed_training_utils.global_batch_size_supported(distribution): replica_scale_factor = distribution.num_replicas_in_sync with self.cached_session(): # Input samples of different sizes input_63_samples = np.zeros((63, 3), dtype=np.float32) input_64_samples = np.zeros((64, 3), dtype=np.float32) # Computed global batch size is correct for number of specified 1 step steps, batch_size = distributed_training_utils.get_input_params( distribution, input_64_samples, steps=1, batch_size=None) self.assertEqual(batch_size, 64 // replica_scale_factor) self.assertEqual(steps, 1) # Computed global batch size is correct for number of specified 2 steps steps, batch_size = distributed_training_utils.get_input_params( distribution, input_64_samples, steps=2, batch_size=None) self.assertEqual(batch_size, 32 // replica_scale_factor) self.assertEqual(steps, 2) # All samples can not be consumed in specified number of steps with self.assertRaisesRegexp(ValueError, 'not divisible by steps'): distributed_training_utils.get_input_params( distribution, input_63_samples, steps=2, batch_size=None) # This cases is different for different strategies due to the # difference in supported batch size being global or per-replica. if replica_scale_factor == 1: # Computed global batch size is correct even if not sharadable steps, batch_size = distributed_training_utils.get_input_params( distribution, input_63_samples, steps=3, batch_size=None) self.assertEqual(batch_size, 21) self.assertEqual(steps, 3) else: # Computed global batch size can not be sharded across replicas with self.assertRaisesRegexp(ValueError, 'could not be sharded evenly ' 'across the sync replicas'): distributed_training_utils.get_input_params( distribution, input_63_samples, steps=1, batch_size=None) @combinations.generate(all_strategy_combinations()) def test_calculating_input_params_no_steps_with_batch_size(self, distribution): # Calculate the per_replica_batch_size scaling factor for strategies # that use per_core_batch_size replica_scale_factor = 1.0 if not distributed_training_utils.global_batch_size_supported(distribution): replica_scale_factor = distribution.num_replicas_in_sync with self.cached_session(): input_64_samples = np.zeros((64, 3), dtype=np.float32) # Computed steps is correct for specified batch size steps, batch_size = distributed_training_utils.get_input_params( distribution, input_64_samples, steps=None, batch_size=16) self.assertEqual(batch_size, 16) self.assertEqual(steps, 4 // replica_scale_factor) # Computed steps is correct for specified batch size steps, batch_size = distributed_training_utils.get_input_params( distribution, input_64_samples, steps=None, batch_size=32) self.assertEqual(batch_size, 32) self.assertEqual(steps, 2 // replica_scale_factor) @combinations.generate(all_strategy_combinations()) def test_calculating_input_params_with_steps_with_batch_size(self, distribution): with self.cached_session(): input_64_samples = np.zeros((64, 3), dtype=np.float32) # No change to steps and batch size if both specified and feasible steps, batch_size = distributed_training_utils.get_input_params( distribution, input_64_samples, steps=5, batch_size=3) self.assertEqual(batch_size, 3) self.assertEqual(steps, 5) # Number of samples is less than global batch size * steps with self.assertRaisesRegexp(ValueError, 'less than samples required'): distributed_training_utils.get_input_params( distribution, input_64_samples, steps=10, batch_size=13) @combinations.generate(all_strategy_combinations_plus_cloning()) def test_calling_model_with_numpy_arrays(self, distribution, cloning): with self.cached_session(): with distribution.scope(): # TODO(b/130808953): Re-enable the V1 optimizer after iterations is # mirrored. optimizer_fn = ( gradient_descent.GradientDescentOptimizer if cloning or not distribution_strategy_context.has_strategy() else gradient_descent_keras.SGD) optimizer = optimizer_fn(0.001) model = get_model() loss = 'mse' metrics = ['mae'] model.compile(optimizer, loss, metrics=metrics, cloning=cloning) inputs = np.zeros((64, 3), dtype=np.float32) targets = np.zeros((64, 4), dtype=np.float32) # Call fit with validation data model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0, validation_data=(inputs, targets)) # TODO(anjalisridhar): We need tests for when the batch size and steps # are smaller and results in a 0 batch_size and steps value. model.evaluate(inputs, targets) # with steps model.evaluate(inputs, targets, steps=2) # with batch_size model.evaluate(inputs, targets, batch_size=8) model.predict(inputs) # with steps model.predict(inputs, steps=2) # with batch_size model.predict(inputs, batch_size=8) @combinations.generate(all_strategy_combinations_plus_cloning()) def test_calling_model_with_nested_numpy_arrays(self, distribution, cloning): with self.cached_session(): with distribution.scope(): # TODO(b/130808953): Re-enable the V1 optimizer after iterations is # mirrored. optimizer_fn = ( gradient_descent.GradientDescentOptimizer if cloning else gradient_descent_keras.SGD) optimizer = optimizer_fn(learning_rate=0.001) model = multi_input_output_model() loss = 'mse' model.compile(optimizer, loss, cloning=cloning) input_a_np = np.asarray(np.random.random((64, 3)), dtype=np.float32) input_b_np = np.asarray(np.random.random((64, 5)), dtype=np.float32) inputs = [input_a_np, input_b_np] output_d_np = np.asarray(np.random.random((64, 7)), dtype=np.float32) output_e_np = np.asarray(np.random.random((64, 7)), dtype=np.float32) targets = [output_d_np, output_e_np] # Call fit with validation data model.fit(inputs, targets, epochs=1, batch_size=8, verbose=0) # TODO(anjalisridhar): We need tests for when the batch size and steps are # smaller and results in a 0 batch_size and steps value. model.evaluate(inputs, targets) # with steps model.evaluate(inputs, targets, steps=2) # with batch_size model.evaluate(inputs, targets, batch_size=8) model.predict(inputs) # with steps model.predict(inputs, steps=2) # with batch_size model.predict(inputs, batch_size=8) @combinations.generate( combinations.combine(distribution=strategies_minus_tpu, mode=['graph', 'eager'])) def test_numpy_with_sample_weights(self, distribution): with self.cached_session(), distribution.scope(): model = get_sample_weights_model() optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001) loss = 'mse' model.compile(optimizer, loss) inputs = np.array([[0], [1], [2], [3]], np.float32) targets = np.array([[2], [4], [6], [8]], np.float32) sample_weights = np.array([0.25, 0.5, 0.75, 1], np.float32) result = model.evaluate(inputs, targets, batch_size=2, sample_weight=sample_weights, verbose=1) # The per sample loss is multipled by the corresponding sample weight. The # average of these weighted losses is the return value of the `evaluate` # call. For example, in the test above the average weighted loss is # calculated in the following manner: # batch_1 = (((2-0)^2) * 0.25 + ((4-1)^2) * 0.5) / 2 = 5.5 / 2 = 2.75 # batch_2 = (((6-2)^2 * 0.75) + ((8-3)^2 * 1)) / 2 = 37 / 2 = 18.5 # final result = (batch_1 + batch_2) / 2 = 10.625. # The first time we divide by number of input samples and the second time # we divide by number of steps/batches that the loss is aggregated over. self.assertAllClose(result, 10.625) # We now test without passing sample_weights: # batch_1 = ((2-0)^2) + ((4-1)^2) / 2 = 13 / 2 = 6.5 # batch_2 = ((6-2)^2) + ((8-3)^2) / 2 = 41 / 2 = 20.5 # final result = (batch_1 + batch_2) / 2 = 27 / 2 = 13.5 result = model.evaluate(inputs, targets, batch_size=2, verbose=1) self.assertAllClose(result, 13.5) @combinations.generate( combinations.combine(distribution=strategies_minus_default_minus_tpu, mode=['eager'])) def test_numpy_with_sample_weights_eager_with_cloning(self, distribution): with self.cached_session(), distribution.scope(): model = get_sample_weights_model() optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001) loss = 'mse' model.compile(optimizer, loss, cloning=True) inputs = np.array([[0], [1], [2], [3]], np.float32) targets = np.array([[2], [4], [6], [8]], np.float32) sample_weights = np.array([0.25, 0.5, 0.75, 1], np.float32) with self.assertRaisesRegexp(NotImplementedError, '`sample_weight` is not supported when ' 'using tf.distribute.Strategy in '): model.evaluate(inputs, targets, batch_size=2, sample_weight=sample_weights, verbose=1) @combinations.generate(all_strategy_combinations_plus_cloning()) def test_flatten_predict_outputs(self, distribution, cloning): with self.cached_session(): with distribution.scope(): model = multi_input_output_model() # TODO(b/130808953): Re-enable the V1 optimizer after iterations is # mirrored. optimizer_fn = ( gradient_descent.GradientDescentOptimizer if cloning else gradient_descent_keras.SGD) optimizer = optimizer_fn(learning_rate=0.001) loss = 'mse' model.compile(optimizer, loss, cloning=cloning) # We take 6 input samples with each input having a dimension of 3 or 5. input_a_np = np.asarray(np.random.random((6, 3)), dtype=np.float32) input_b_np = np.asarray(np.random.random((6, 5)), dtype=np.float32) inputs = [input_a_np, input_b_np] outs = model.predict(inputs, steps=1) # `predict` a list that is equal in length to the number of model outputs. # In this test our model has two outputs and each element of `outs` # corresponds to all the samples of one of the model outputs. self.assertLen(outs, 2) # Each of the output samples have a dimension of 7. We should process all # the available input samples(6). self.assertAllEqual([6, 7], outs[0].shape) self.assertAllEqual([6, 7], outs[1].shape) @combinations.generate( combinations.times(tpu_strategy_combinations_graph_only(), combinations.combine(batch_size=[4, 6]))) def test_evaluate_with_partial_batch(self, distribution, batch_size): with self.cached_session(): optimizer = gradient_descent.GradientDescentOptimizer(0.001) loss = 'mse' metrics = ['mae', keras.metrics.CategoricalAccuracy()] with distribution.scope(): model_with_ds_strategy = get_model() model_with_ds_strategy.compile(optimizer, loss, metrics=metrics) cpu_model = get_model() cpu_model.compile(optimizer, loss, metrics=metrics) x = np.random.random((10, 3)).astype('float32') y = np.random.random((10, 4)).astype('float32') # As sample size is 10, we batch by 4 so that the last batch is # a partial batch. Also `evaluate()` using numpy array as inputs without # distribution strategy uses entire sample as a single batch. As so, # we remove parameters `batch_size` and `steps`. cpu_model.set_weights(model_with_ds_strategy.get_weights()) evaluate_ground_truth = cpu_model.evaluate(x, y) # We don't compare the loss as loss is currently not computed as metric # in Keras, the loss value is inaccurate for last partial batch due to # more weights for the last batch samples. steps = np.ceil(10.0 / batch_size) self.assertAllClose( model_with_ds_strategy.evaluate( x, y, batch_size=batch_size, steps=steps)[1:], evaluate_ground_truth[1:], atol=1e-5, rtol=1e-5) # Test that `steps` is inferred correctly when final partial batch exists. self.assertAllClose( model_with_ds_strategy.evaluate(x, y, batch_size=batch_size)[1:], evaluate_ground_truth[1:], atol=1e-5, rtol=1e-5) @combinations.generate( combinations.times(tpu_strategy_combinations_graph_only(), combinations.combine(cloning=[True, False]))) def test_predict_with_partial_batch(self, distribution, cloning): with self.cached_session(): optimizer = gradient_descent.GradientDescentOptimizer(0.001) loss = 'mse' with distribution.scope(): model_with_ds_strategy = get_model() model_with_ds_strategy.compile(optimizer, loss, cloning=cloning) cpu_model = get_model() cpu_model.compile(optimizer, loss) inputs = np.random.random((10, 3)).astype(np.float32) # As sample size is 10, we batch by 4 so that the last batch is # a partial batch. Also `predict()` using numpy array as inputs without # distribution strategy uses entire sample as a single batch. As so, # we remove parameters `batch_size` and `steps`. cpu_model.set_weights(model_with_ds_strategy.get_weights()) predict_ground_truth = cpu_model.predict(inputs) self.assertAllClose( model_with_ds_strategy.predict(inputs, batch_size=4, steps=3), predict_ground_truth, atol=1e-5, rtol=1e-5) # Test that `steps` is inferred correctly when final partial batch exists. self.assertAllClose( model_with_ds_strategy.predict(inputs, batch_size=4), predict_ground_truth, atol=1e-5, rtol=1e-5) @combinations.generate(tpu_strategy_combinations_graph_only()) def test_no_target_model(self, distribution): with self.cached_session(): optimizer = gradient_descent.GradientDescentOptimizer(0.001) class MyLayer(keras.layers.Layer): def call(self, inputs, training=None): self.add_loss(math_ops.reduce_sum(inputs), inputs=True) return inputs with distribution.scope(): model = keras.models.Sequential() model.add(keras.layers.Dense(16, activation='relu', input_shape=_INPUT_SIZE)) model.add(MyLayer()) model.add(keras.layers.Dense(_NUM_CLASS, activation='softmax')) model.compile(optimizer) inputs = np.zeros((20, 10), np.float32) model.fit(inputs, epochs=1, steps_per_epoch=2) model.predict(inputs, steps=1) model.evaluate(inputs, steps=1) @combinations.generate( combinations.times(tpu_strategy_combinations_graph_only(), combinations.combine(cloning=[True, False]))) def test_predict_multi_output_model_with_partial_batch( self, distribution, cloning): with self.cached_session(): optimizer = gradient_descent.GradientDescentOptimizer(0.001) loss = 'mse' with distribution.scope(): model_with_ds_strategy = simple_multi_inputs_multi_outputs_model() model_with_ds_strategy.compile(optimizer, loss, cloning=cloning) cpu_model = simple_multi_inputs_multi_outputs_model() cpu_model.compile(optimizer, loss) input_data, _ = get_multi_inputs_multi_outputs_data() input_dict = { 'input_a': input_data['input_a'], 'input_b': input_data['input_b'], } # As sample size is 200, we batch by 18 so that the last batch is # a partial batch. Also `fit()` using numpy array as inputs without # distribution strategy uses entire sample as a single batch. As so, # we remove parameters `batch_size` and `steps`. cpu_model.set_weights(model_with_ds_strategy.get_weights()) self.assertAllClose( model_with_ds_strategy.predict(input_dict, batch_size=18, steps=12), cpu_model.predict(input_dict), atol=1e-4, rtol=1e-4) class TestDistributionStrategyWithDatasets(test.TestCase, parameterized.TestCase): @combinations.generate(all_strategy_combinations_plus_cloning()) def test_calling_model_on_same_dataset(self, distribution, cloning): with self.cached_session(): with distribution.scope(): # TODO(b/130808953): Re-enable the V1 optimizer after iterations is # mirrored. optimizer_fn = ( gradient_descent.GradientDescentOptimizer if cloning else gradient_descent_keras.SGD) optimizer = optimizer_fn(0.001) model = get_model() loss = 'mse' metrics = ['mae', keras.metrics.CategoricalAccuracy()] model.compile(optimizer, loss, metrics=metrics, cloning=cloning) dataset = get_dataset(distribution) # Call fit with validation data model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0, validation_data=dataset, validation_steps=2) model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0, validation_data=dataset, validation_steps=2) model.predict(get_predict_dataset(distribution), steps=2) @combinations.generate(all_strategy_combinations_plus_cloning()) def test_model_interleaved_eval_same_as_direct_eval(self, distribution, cloning): with self.cached_session(): with distribution.scope(): # TODO(b/130808953): Re-enable the V1 optimizer after iterations is # mirrored. optimizer_fn = ( gradient_descent.GradientDescentOptimizer if cloning else gradient_descent_keras.SGD) user_controlled_model = get_model() user_controlled_model.compile( optimizer_fn(0.001), loss='mse', metrics=['mae', keras.metrics.CategoricalAccuracy()], cloning=cloning) interleaved_model = get_model() interleaved_model.set_weights(user_controlled_model.get_weights()) interleaved_model.compile( optimizer_fn(0.001), loss='mse', metrics=['mae', keras.metrics.CategoricalAccuracy()], cloning=cloning) dataset = get_dataset(distribution) # Call fit with validation interleaved interleaved_output = interleaved_model.fit( dataset, epochs=2, steps_per_epoch=2, verbose=1, validation_data=dataset, validation_steps=2, shuffle=False) # Manually control the validation running after each epoch. user_controlled_output = [] for _ in range(2): user_controlled_model.fit( dataset, epochs=1, steps_per_epoch=2, verbose=1, shuffle=False) user_controlled_output.append( user_controlled_model.evaluate(dataset, steps=2)) self.assertEqual(interleaved_output.history['val_loss'], [x[0] for x in user_controlled_output]) val_mean_absolute_error = interleaved_output.history.get( 'val_mean_absolute_error') if not val_mean_absolute_error: # The name of the metric changed in TF2.0 val_mean_absolute_error = interleaved_output.history['val_mae'] self.assertEqual(val_mean_absolute_error, [x[1] for x in user_controlled_output]) self.assertEqual(interleaved_output.history['val_categorical_accuracy'], [x[2] for x in user_controlled_output]) @combinations.generate(all_strategy_combinations_plus_cloning()) def test_fit_with_tuple_and_dict_dataset_inputs(self, distribution, cloning): with self.cached_session(): with distribution.scope(): # TODO(b/130808953): Re-enable the V1 optimizer after iterations is # mirrored. optimizer_fn = ( gradient_descent.GradientDescentOptimizer if cloning else gradient_descent_keras.SGD) optimizer = optimizer_fn(learning_rate=0.001) model = multi_input_output_model() loss = 'mse' metrics = ['mae', keras.metrics.CategoricalAccuracy()] model.compile(optimizer, loss, metrics=metrics, cloning=cloning) input_a_np = np.random.random((10, 3)).astype('float32') input_b_np = np.random.random((10, 5)).astype('float32') output_d_np = np.random.random((10, 7)).astype('float32') output_e_np = np.random.random((10, 7)).astype('float32') # Test with tuples dataset_tuple = dataset_ops.Dataset.from_tensor_slices(( (input_a_np, input_b_np), (output_d_np, output_e_np))) dataset_tuple = dataset_tuple.repeat(100) dataset_tuple = dataset_tuple.batch(10) model.fit(dataset_tuple, epochs=1, steps_per_epoch=2, verbose=1) # Test with dict dataset_dict = dataset_ops.Dataset.from_tensor_slices(( {'input_a': input_a_np, 'input_b': input_b_np}, (output_d_np, output_e_np))) dataset_dict = dataset_dict.repeat(100) dataset_dict = dataset_dict.batch(10) model.fit(dataset_dict, epochs=1, steps_per_epoch=2, verbose=1) @combinations.generate(all_strategy_combinations_plus_cloning()) def test_fit_with_dictionary_in_the_dataset_b135161171( self, distribution, cloning): def custom_loss(predict, label, weight): bce = keras.losses.binary_crossentropy(label, predict) return math_ops.reduce_mean(bce * weight) with self.cached_session(): with distribution.scope(): input_img = keras.layers.Input([64, 64, 3], name='img') input_lbl = keras.layers.Input([64, 64, 1], name='lbl') input_weight = keras.layers.Input([64, 64], name='weight') predict = keras.layers.Conv2D(2, [1, 1], padding='same')(input_img) loss_lambda = keras.layers.Lambda( lambda x: custom_loss(*x), name='my_loss') my_loss = loss_lambda([predict, input_lbl, input_weight]) model = keras.models.Model( inputs=[input_img, input_lbl, input_weight], outputs=[predict, my_loss]) model.add_loss(model.get_layer('my_loss').output) model.compile(optimizer='adam', cloning=cloning) def map_fn(img, lbl, weight): inputs = {'img': img, 'lbl': lbl, 'weight': weight} targets = {} return inputs, targets fake_imgs = np.ones([50, 64, 64, 3], dtype=np.float32) fake_lbls = np.ones([50, 64, 64, 1], dtype=np.float32) fake_weights = np.ones([50, 64, 64], dtype=np.float32) data = dataset_ops.Dataset.from_tensor_slices( (fake_imgs, fake_lbls, fake_weights)).map(map_fn).batch(10) model.fit(data) @combinations.generate(all_strategy_combinations_plus_cloning()) def test_fit_eval_and_predict_methods_on_dataset_without_steps( self, distribution, cloning): with self.cached_session(): with distribution.scope(): # TODO(b/130808953): Re-enable the V1 optimizer after iterations is # mirrored. optimizer_fn = ( gradient_descent.GradientDescentOptimizer if cloning else gradient_descent_keras.SGD) optimizer = optimizer_fn(0.001) model = get_model() loss = 'mse' metrics = ['mae', keras.metrics.CategoricalAccuracy()] model.compile(optimizer, loss, metrics=metrics, cloning=cloning) inputs = np.zeros((1000, 3), dtype=np.float32) targets = np.zeros((1000, 4), dtype=np.float32) # steps/steps_per_epoch are calculated when using numpy arrays as # input data. fit_with_numpy = model.fit(inputs, targets, epochs=1, batch_size=10).history eval_with_numpy = model.evaluate(inputs, targets, batch_size=10) predict_with_numpy = model.predict(inputs, batch_size=10) dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.batch(10, drop_remainder=True) fit_with_ds = model.fit(dataset, epochs=1).history eval_with_ds = model.evaluate(dataset) predict_dataset = dataset_ops.Dataset.from_tensor_slices(inputs) predict_dataset = predict_dataset.batch(10, drop_remainder=True) predict_with_ds = model.predict(predict_dataset) self.assertAllClose( fit_with_numpy, fit_with_ds, atol=1e-4, rtol=1e-4) self.assertAllClose( eval_with_numpy, eval_with_ds, atol=1e-4, rtol=1e-4) self.assertAllClose( predict_with_numpy, predict_with_ds, atol=1e-4, rtol=1e-4) @combinations.generate( combinations.times(strategy_minus_tpu_combinations(), combinations.combine(cloning=[True, False]))) def test_on_dataset_with_unknown_cardinality_without_steps( self, distribution, cloning): with self.cached_session(): with distribution.scope(): # TODO(b/130808953): Re-enable the V1 optimizer after iterations is # mirrored. optimizer_fn = ( gradient_descent.GradientDescentOptimizer if cloning else gradient_descent_keras.SGD) optimizer = optimizer_fn(0.001) model = get_model() loss = 'mse' metrics = ['mae', keras.metrics.CategoricalAccuracy()] model.compile(optimizer, loss, metrics=metrics, cloning=cloning) inputs = np.zeros((1000, 3), dtype=np.float32) targets = np.zeros((1000, 4), dtype=np.float32) # steps/steps_per_epoch are calculated when using numpy arrays as # input data. fit_with_numpy = model.fit(inputs, targets, epochs=1, batch_size=10).history fit_with_numpy_multiple_epochs = model.fit( inputs, targets, epochs=2, batch_size=10).history eval_with_numpy = model.evaluate(inputs, targets, batch_size=10) predict_with_numpy = model.predict(inputs, batch_size=10) dataset = convert_numpy_to_dataset_with_unknown_cardinality( inputs, targets) predict_dataset = convert_numpy_to_dataset_with_unknown_cardinality( inputs) self.assertEqual(keras.backend.get_value(cardinality.cardinality( dataset)), cardinality.UNKNOWN) self.assertEqual(keras.backend.get_value(cardinality.cardinality( predict_dataset)), cardinality.UNKNOWN) eval_with_ds = model.evaluate(dataset) predict_with_ds = model.predict(predict_dataset) self.assertAllClose( eval_with_numpy, eval_with_ds, atol=1e-4, rtol=1e-4) self.assertAllClose( predict_with_numpy, predict_with_ds, atol=1e-4, rtol=1e-4) fit_with_ds = model.fit(dataset, epochs=1).history fit_with_ds_multiple_epochs = model.fit(dataset, epochs=2).history self.assertAllClose( fit_with_numpy, fit_with_ds, atol=1e-4, rtol=1e-4) self.assertAllClose( fit_with_numpy_multiple_epochs, fit_with_ds_multiple_epochs, atol=1e-4, rtol=1e-4) @combinations.generate( combinations.times(tpu_strategy_combinations(), combinations.combine(cloning=[True, False]))) def test_on_dataset_with_unknown_cardinality(self, distribution, cloning): with self.cached_session(): with distribution.scope(): model = get_model() loss = 'mse' metrics = ['mae', keras.metrics.CategoricalAccuracy()] model.compile( gradient_descent.GradientDescentOptimizer(0.001), loss, metrics=metrics, cloning=cloning) inputs = np.zeros((1000, 3), dtype=np.float32) targets = np.zeros((1000, 4), dtype=np.float32) # steps/steps_per_epoch are calculated when using numpy arrays as # input data. eval_with_numpy = model.evaluate(inputs, targets, batch_size=10) predict_with_numpy = model.predict(inputs, batch_size=10) dataset = convert_numpy_to_dataset_with_unknown_cardinality( inputs, targets) predict_dataset = convert_numpy_to_dataset_with_unknown_cardinality( inputs) self.assertEqual( keras.backend.get_value(cardinality.cardinality(dataset)), cardinality.UNKNOWN) self.assertEqual( keras.backend.get_value(cardinality.cardinality(predict_dataset)), cardinality.UNKNOWN) eval_with_ds = model.evaluate(dataset, steps=100) predict_with_ds = model.predict(predict_dataset, steps=100) self.assertAllClose(eval_with_numpy, eval_with_ds, atol=1e-4, rtol=1e-4) self.assertAllClose( predict_with_numpy, predict_with_ds, atol=1e-4, rtol=1e-4) with self.assertRaisesRegexp(ValueError, 'Number of steps could not be infered'): model.fit(dataset, epochs=1) @combinations.generate(all_strategy_combinations_plus_cloning()) def test_fit_eval_and_predict_methods_on_dataset(self, distribution, cloning): with self.cached_session(): with distribution.scope(): # TODO(b/130808953): Re-enable the V1 optimizer after iterations is # mirrored. optimizer_fn = ( gradient_descent.GradientDescentOptimizer if cloning else gradient_descent_keras.SGD) optimizer = optimizer_fn(0.001) model = get_model() loss = 'mse' metrics = ['mae', keras.metrics.CategoricalAccuracy()] model.compile(optimizer, loss, metrics=metrics, cloning=cloning) dataset = get_dataset(distribution) model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1) model.evaluate(dataset, steps=2, verbose=1) model.predict(get_predict_dataset(distribution), steps=2) @combinations.generate(strategy_and_optimizer_combinations()) def test_fit_eval_and_predict_with_optimizer(self, distribution, optimizer, cloning): with self.cached_session(): with distribution.scope(): model = get_model() loss = 'mse' model.compile(optimizer(), loss, cloning=cloning) dataset = get_dataset(distribution) model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1) model.evaluate(dataset, steps=2, verbose=1) model.predict(get_predict_dataset(distribution), steps=2) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.one_device_strategy ], mode=['graph', 'eager'], cloning=[True, False])) def test_dataset_wrong_input_shape(self, distribution, cloning, mode): if cloning or mode == 'graph': self.skipTest('TODO(b/120943676, b/120957836): Re-enable for cloning=True' ' once the validation code is restored.') with self.cached_session(): with distribution.scope(): # TODO(b/130808953): Re-enable the V1 optimizer after iterations is # mirrored. optimizer_fn = ( rmsprop.RMSPropOptimizer if cloning else gradient_descent_keras.SGD) optimizer = optimizer_fn(learning_rate=0.001) model = get_model() loss = 'mse' model.compile(optimizer, loss, cloning=cloning) # Wrong input shape inputs = np.zeros((10, 5), dtype=np.float32) targets = np.zeros((10, 4), dtype=np.float32) dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat(100) dataset = dataset.batch(10) with self.assertRaisesRegexp(ValueError, 'expected input to have shape'): model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu ], mode=['graph', 'eager'], cloning=[True, False])) def test_dataset_no_batch_input_validation(self, distribution, cloning, mode): if cloning or mode == 'graph': self.skipTest('TODO(b/120943676, b/120957836): Re-enable for cloning=True' ' once the validation code is restored.') with self.cached_session(): with distribution.scope(): model = get_model() optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001) loss = 'mse' model.compile(optimizer, loss, cloning=cloning) # User forgets to batch the dataset inputs = np.zeros((10, 6), dtype=np.float32) targets = np.zeros((10, 4), dtype=np.float32) dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat(100) with self.assertRaisesRegexp(ValueError, 'Call.*batch.*on.*Dataset'): model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_two_gpus ], mode=['graph', 'eager'], cloning=[True, False])) def test_learning_phase_value(self, distribution, cloning): # TODO(anjalisridhar): Modify this test to use Lambdas since we can compare # meaningful values. Currently we don't pass the learning phase if the # Lambda layer uses the learning phase. with self.cached_session(): with distribution.scope(): x = keras.layers.Input(shape=(1,), name='input') y = keras.layers.Dense(1, kernel_initializer='ones')(x) z = keras.layers.Dropout(0.9999)(y) model = keras.Model(x, z) initial_weights = model.get_weights() # TODO(b/130808953): Re-enable the V1 optimizer after iterations is # mirrored. optimizer_fn = ( gradient_descent.GradientDescentOptimizer if cloning else gradient_descent_keras.SGD) optimizer = optimizer_fn(0.005) loss = 'mse' metrics = ['acc'] model.compile(optimizer, loss, metrics=metrics, cloning=cloning) batch_size = 8 if isinstance(distribution, mirrored_strategy.MirroredStrategy): # MirroredStrategy uses global batch size. batch_size = 8 * distribution.num_replicas_in_sync inputs = np.ones((10, 1), dtype=np.float32) targets = np.ones((10, 1), dtype=np.float32) dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat().batch(batch_size) hist = model.fit(dataset, epochs=1, steps_per_epoch=20, verbose=1) self.assertAlmostEqual(hist.history['acc'][0], 0, 0) with distribution.scope(): model.set_weights(initial_weights) # TODO(psv/anjalisridhar): Enable these lines after we fix b/117431185. # evaluate_output = model.evaluate(dataset, steps=20) # self.assertAlmostEqual(evaluate_output[1], 1, 0) inputs = np.ones((10, 1), dtype=np.float32) predict_dataset = dataset_ops.Dataset.from_tensor_slices(inputs) predict_dataset = predict_dataset.repeat().batch(batch_size) output = model.predict(predict_dataset, steps=10) # `predict` runs for 10 steps ref_output = np.ones((160, 1), dtype=np.float32) self.assertArrayNear(output, ref_output, 1e-1) @combinations.generate(all_strategy_combinations_plus_cloning()) def testOptimizerWithCallbacks(self, distribution, cloning): with self.cached_session(): with distribution.scope(): model = get_model() optimizer = gradient_descent_keras.SGD(0.01) loss = 'mse' model.compile(optimizer, loss, cloning=cloning) dataset = get_dataset(distribution) def schedule(_): return 0.001 model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0, callbacks=[keras.callbacks.LearningRateScheduler(schedule)]) self.assertAllClose(0.001, keras.backend.get_value(model.optimizer.lr)) @combinations.generate( combinations.times(tpu_strategy_combinations_graph_only(), combinations.combine(batch_size=[4, 6]))) def test_evaluate_with_dataset_with_partial_batch(self, distribution, batch_size): with self.cached_session(): optimizer = gradient_descent.GradientDescentOptimizer(0.001) loss = 'mse' metrics = ['mae', keras.metrics.CategoricalAccuracy()] with distribution.scope(): model_with_ds_strategy = get_model() model_with_ds_strategy.compile(optimizer, loss, metrics=metrics) cpu_model = get_model() cpu_model.compile(optimizer, loss, metrics=metrics) x = np.random.random((10, 3)).astype('float32') y = np.random.random((10, 4)).astype('float32') dataset = dataset_ops.Dataset.from_tensor_slices((x, y)) # As sample size is 10, we make the last batch a partial batch. cpu_model.set_weights(model_with_ds_strategy.get_weights()) dataset_with_partial_batch = dataset.batch(batch_size) # We don't compare the loss as loss is currently not computed as metric # in Keras, the loss value is inaccurate for last partial batch due to # more weights for the last batch samples. steps = np.ceil(10.0 / batch_size) self.assertAllClose( model_with_ds_strategy.evaluate( dataset_with_partial_batch, steps=steps)[1:], cpu_model.evaluate(dataset_with_partial_batch, steps=steps)[1:], atol=1e-5, rtol=1e-5) self.assertAllClose( model_with_ds_strategy.evaluate(dataset_with_partial_batch)[1:], cpu_model.evaluate(dataset_with_partial_batch)[1:], atol=1e-5, rtol=1e-5) @combinations.generate( combinations.times(tpu_strategy_combinations_graph_only(), combinations.combine(cloning=[True, False]))) def test_predict_with_dataset_with_partial_batch(self, distribution, cloning): with self.cached_session(): optimizer = gradient_descent.GradientDescentOptimizer(0.001) loss = 'mse' with distribution.scope(): model_with_ds_strategy = get_model() model_with_ds_strategy.compile(optimizer, loss, cloning=cloning) cpu_model = get_model() cpu_model.compile(optimizer, loss) inputs = np.random.random((10, 3)).astype(np.float32) dataset = dataset_ops.Dataset.from_tensor_slices((inputs)) # As sample size is 10, we batch by 4 so that the last batch is # a partial batch. dataset_with_partial_batch = dataset.batch(4) cpu_model.set_weights(model_with_ds_strategy.get_weights()) self.assertAllClose( model_with_ds_strategy.predict(dataset_with_partial_batch, steps=3), cpu_model.predict(dataset_with_partial_batch, steps=3), atol=1e-5, rtol=1e-5) @combinations.generate( combinations.times(tpu_strategy_combinations_graph_only(), combinations.combine(cloning=[True, False]))) def test_predict_multi_output_model_with_dataset_with_partial_batch( self, distribution, cloning): with self.cached_session(): optimizer = gradient_descent.GradientDescentOptimizer(0.001) loss = 'mse' with distribution.scope(): model_with_ds_strategy = simple_multi_inputs_multi_outputs_model() model_with_ds_strategy.compile(optimizer, loss, cloning=cloning) cpu_model = simple_multi_inputs_multi_outputs_model() cpu_model.compile(optimizer, loss) input_data, _ = get_multi_inputs_multi_outputs_data() input_dict = { 'input_a': input_data['input_a'], 'input_b': input_data['input_b'], } dataset = dataset_ops.Dataset.from_tensor_slices(input_dict) # As sample size is 200, we batch by 18 using 12 steps per epoch so # that the last batch is a partial batch. dataset_with_partial_batch = dataset.batch(18) cpu_model.set_weights(model_with_ds_strategy.get_weights()) self.assertAllClose( model_with_ds_strategy.predict(dataset_with_partial_batch, steps=12), cpu_model.predict(dataset_with_partial_batch, steps=12), atol=1e-4, rtol=1e-4) @combinations.generate(all_strategy_combinations_minus_default()) def test_match_model_input_matches_with_dataset_tensors(self, distribution): def _create_model_input_output_tensors(): input_a = keras.layers.Input(shape=(16,), name='z_input_sorted_last') input_b = keras.layers.Input(shape=(32,), name='a_input_sorted_first') intermediate_a = keras.layers.Dense(10)(input_a) intermediate_b = keras.layers.Dense(10)(input_b) merged = keras.layers.Add()([intermediate_a, intermediate_b]) output = keras.layers.Dense(2)(merged) return input_a, input_b, output input_dict = { 'z_input_sorted_last': np.random.rand(32, 16).astype(np.float32), 'a_input_sorted_first': np.random.rand(32, 32).astype(np.float32) } target = np.ones((32, 2), dtype=np.float32) dataset = dataset_ops.Dataset.from_tensor_slices((input_dict, target)) dataset = dataset.batch(4, drop_remainder=True) with self.cached_session(): with distribution.scope(): input_a, input_b, output = _create_model_input_output_tensors() # `input_a`, which has input name that comes last in alphanumeric # order, is the first input of the model input layers. If tensors # from `input_dict` is blindly flattened and passed to model # inputs incorrectly, this would result in `input_a` input layer # matching with tensor `a_input_sorted_first` and would result in # shape mismatch. model_with_array_input = keras.models.Model( inputs=[input_a, input_b], outputs=output) model_with_array_input.compile('sgd', 'mse') model_weights = model_with_array_input.get_weights() model_with_array_input_fit = model_with_array_input.fit( dataset, steps_per_epoch=1, epochs=1).history input_a, input_b, output = _create_model_input_output_tensors() model_with_dict_input = keras.models.Model( inputs={ 'z_input_sorted_last': input_a, 'a_input_sorted_first': input_b, }, outputs=output) model_with_dict_input.compile('sgd', 'mse') model_with_dict_input.set_weights(model_weights) model_with_dict_input_fit = model_with_dict_input.fit( dataset, steps_per_epoch=1, epochs=1).history self.assertAllClose( model_with_dict_input_fit, model_with_array_input_fit, atol=1e-4, rtol=1e-4) @combinations.generate( combinations.combine(distribution=strategies_minus_tpu, mode=['graph', 'eager'])) def test_dataset_with_sample_weights(self, distribution): with self.cached_session(), distribution.scope(): model = get_sample_weights_model() optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001) loss = 'mse' model.compile(optimizer, loss) inputs = np.array([[0], [1], [2], [3]], np.float32) targets = np.array([[2], [4], [6], [8]], np.float32) sample_weights = np.array([0.25, 0.5, 0.75, 1], np.float32) ds = dataset_ops.Dataset.from_tensor_slices((inputs, targets, sample_weights)).batch(2) result = model.evaluate(ds, verbose=1) # The per sample loss is multipled by the corresponding sample weight. The # average of these weighted losses is the return value of the `evaluate` # call. For example, in the test above the average weighted loss is # calculated in the following manner: # batch_1 = (((2-0)^2) * 0.25 + ((4-1)^2) * 0.5) / 2 = 5.5 / 2 = 2.75 # batch_2 = (((6-2)^2 * 0.75) + ((8-3)^2 * 1)) / 2 = 37 / 2 = 18.5 # final result = (batch_1 + batch_2) / 2 = 10.625. # The first time we divide by number of input samples and the second time # we divide by number of steps/batches that the loss is aggregated over. self.assertAllClose(result, 10.625) # We now test without passing sample_weights: # batch_1 = ((2-0)^2) + ((4-1)^2) / 2 = 13 / 2 = 6.5 # batch_2 = ((6-2)^2) + ((8-3)^2) / 2 = 41 / 2 = 20.5 # final result = (batch_1 + batch_2) / 2 = 27 / 2 = 13.5 ds = dataset_ops.Dataset.from_tensor_slices((inputs, targets)).batch(2) result = model.evaluate(ds, verbose=1) self.assertAllClose(result, 13.5) @combinations.generate( combinations.combine(distribution=strategies_minus_default_minus_tpu, mode=['eager'])) def test_dataset_with_sample_weights_eager_with_cloning(self, distribution): with self.cached_session(), distribution.scope(): model = get_sample_weights_model() optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001) loss = 'mse' model.compile(optimizer, loss, cloning=True) inputs = np.array([[0], [1], [2], [3]], np.float32) targets = np.array([[2], [4], [6], [8]], np.float32) sample_weights = np.array([0.25, 0.5, 0.75, 1], np.float32) ds = dataset_ops.Dataset.from_tensor_slices((inputs, targets, sample_weights)).batch(2) with self.assertRaisesRegexp(NotImplementedError, '`sample_weight` is not supported when ' 'using tf.distribute.Strategy in '): model.evaluate(ds, verbose=1) class TestRegularizerLoss(test.TestCase, parameterized.TestCase): class IdentityRegularizer(keras.regularizers.Regularizer): def __call__(self, x): return array_ops.identity(x) class AddLayer(keras.layers.Layer): def build(self, _): self.v = self.add_weight( 'v', (), initializer='ones', regularizer=TestRegularizerLoss.IdentityRegularizer()) def call(self, inputs): return inputs + self.v @staticmethod def loss_fn(_, y_pred): return math_ops.reduce_mean(y_pred) @combinations.generate( combinations.times( strategy_combinations.all_strategy_combinations_minus_default(), combinations.combine(cloning=[True, False]))) def test_regularizer_loss(self, distribution, cloning): batch_size = 2 if not distributed_training_utils.global_batch_size_supported(distribution): batch_size //= distribution.num_replicas_in_sync # Given an input x, which is always 1, and variable v, this model computes # Loss=x+v+regularizer_loss, where regularizer_loss=v and the variable is # initialized to 1. Therefore, this model computes Loss=1+2v, and so the # gradient dLoss/dv = 2. This gradient of 2 is averaged over all examples # in a batch and then multiplied by the learning rate of 1. As a result, # the model update for one batch should subtract 2 from v, resulting in v # being -1. If the regularizer loss is not scaled correctly by number of # replicas, the variable value will be incorrect when number of replicas # >1. For e.g. it will be -2 if num replicas = 2. with distribution.scope(): x = keras.layers.Input(shape=(1,), batch_size=batch_size) y = TestRegularizerLoss.AddLayer()(x) model = keras.models.Model(inputs=x, outputs=y) opt = gradient_descent_keras.SGD(1.) model.compile(opt, loss=TestRegularizerLoss.loss_fn, cloning=cloning) model.fit( x=np.array([[1.], [1.]], dtype=np.float32), y=np.array([[1.], [1.]], dtype=np.float32), batch_size=batch_size) v = model.get_weights()[0] self.assertEqual(-1.0, v) class TestDistributionStrategyWithKerasModels(test.TestCase, parameterized.TestCase): @combinations.generate(all_strategy_combinations_plus_cloning()) def test_distribution_strategy_on_sequential_model(self, distribution, cloning): with distribution.scope(): # TODO(b/130808953): Re-enable the V1 optimizer after iterations is # mirrored. optimizer_fn = ( rmsprop.RMSPropOptimizer if cloning else gradient_descent_keras.SGD) optimizer = optimizer_fn(learning_rate=0.001) model = simple_sequential_model() loss = 'mse' model.compile(optimizer, loss, cloning=cloning) inputs = np.zeros((20, 10), np.float32) targets = np.zeros((20, 2), np.float32) model.fit(inputs, targets, epochs=1, steps_per_epoch=2) model.predict(inputs, steps=1) model.evaluate(inputs, targets, steps=1) @combinations.generate(all_strategy_combinations_plus_cloning()) def test_distribution_strategy_on_functional_model(self, distribution, cloning): with distribution.scope(): # TODO(b/130808953): Re-enable the V1 optimizer after iterations is # mirrored. optimizer_fn = ( rmsprop.RMSPropOptimizer if cloning else gradient_descent_keras.SGD) optimizer = optimizer_fn(learning_rate=0.001) model = get_model() loss = 'mse' model.compile(optimizer, loss, cloning=cloning) inputs = np.zeros((64, 3), dtype=np.float32) targets = np.zeros((64, 4), dtype=np.float32) model.fit(inputs, targets, epochs=1, steps_per_epoch=2) model.predict(inputs, steps=1) model.evaluate(inputs, targets, steps=1) @combinations.generate( combinations.times( all_strategy_minus_default_and_tpu_combinations() + tpu_strategy_combinations(), combinations.combine(cloning=[True, False]))) def test_distribution_strategy_one_dimensional(self, distribution, cloning): with distribution.scope(): inp = keras.layers.Input(shape=(10,)) out = keras.layers.Dense(3, activation='softmax')(inp) model = keras.Model(inputs=[inp], outputs=[out]) model.compile( optimizer='rmsprop', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'], cloning=cloning) x = np.random.random((64, 10)).astype('float32') y = np.random.randint(3, size=64) model.fit(x, y, epochs=1, steps_per_epoch=2) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_two_gpus ], mode=['graph', 'eager'], cloning=[True, False], reduction=[ loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE, loss_reduction.ReductionV2.SUM ])) def test_distribution_strategy_with_loss_reduction_types( self, distribution, cloning, reduction): np.random.seed(_RANDOM_SEED) def _get_model(): inputs = keras.Input((10,)) x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs) x2 = keras.layers.Dense(10, kernel_initializer='zeros')(x1) outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x2) model = keras.Model(inputs, outputs) return model x = np.random.random((64, 10)) y = np.random.random((64, 1)) dataset = dataset_ops.Dataset.from_tensor_slices((x, y)) dataset = dataset.batch(32) model = _get_model() model.compile( 'sgd', loss=keras.losses.MeanSquaredError(reduction=reduction)) history = model.fit(dataset, steps_per_epoch=2, epochs=1, shuffle=False) with distribution.scope(): ds_model = _get_model() ds_model.compile( 'sgd', loss=keras.losses.MeanSquaredError(reduction=reduction), cloning=cloning) ds_history = ds_model.fit( dataset, steps_per_epoch=2, epochs=1, shuffle=False) self.assertArrayNear(history.history['loss'], ds_history.history['loss'], 1e-5) @combinations.generate( combinations.times(all_strategy_minus_default_and_tpu_combinations(), combinations.combine(cloning=[True, False]))) def test_distribution_strategy_with_symbolic_add_loss(self, distribution, cloning): def _make_model_with_add_loss(): inputs = keras.Input((10,)) x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs) x2 = keras.layers.Dense(10, kernel_initializer='zeros')(x1) outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x2) model = keras.Model(inputs, outputs) model.add_loss(math_ops.reduce_mean(x1)) model.add_loss(math_ops.reduce_mean(outputs)) return model x = np.ones((64, 10)).astype('float32') model = _make_model_with_add_loss() model.compile('sgd') history = model.fit(x, steps_per_epoch=2, epochs=1) with distribution.scope(): ds_model = _make_model_with_add_loss() ds_model.compile('sgd', cloning=cloning) ds_history = ds_model.fit(x, steps_per_epoch=2, epochs=1) self.assertAllClose(history.history, ds_history.history) # TODO(omalleyt): Investigate flakiness and re-enable. @combinations.generate(all_strategy_minus_default_and_tpu_combinations()) def DISABLED_test_distribution_strategy_with_callable_add_loss( self, distribution): def _make_model(): inputs = keras.Input((10,)) x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs) x2 = keras.layers.Dense(10, kernel_initializer='zeros')(x1) d = keras.layers.Dense(1, kernel_initializer='zeros') outputs = d(x2) model = keras.Model(inputs, outputs) model.add_loss(lambda: 100. * math_ops.reduce_mean(d.kernel)) return model x = np.ones((64, 10)).astype('float32') y = np.ones((64, 1)).astype('float32') model = _make_model() self.assertLen(model.losses, 1) model.compile('sgd', 'mse') history = model.fit(x, y, steps_per_epoch=2, epochs=1) with distribution.scope(): ds_model = _make_model() self.assertLen(ds_model.losses, 1) ds_model.compile('sgd', 'mse') ds_history = ds_model.fit(x, y, steps_per_epoch=2, epochs=1) self.assertAllClose(history.history, ds_history.history) @combinations.generate( combinations.times(all_strategy_minus_default_and_tpu_combinations(), combinations.combine(cloning=[True, False]))) def test_distribution_strategy_with_add_metric_in_call( self, distribution, cloning): class Bias(keras.layers.Layer): def build(self, input_shape): self.bias = self.add_weight(name='bias', initializer='zeros', shape=()) def call(self, inputs): self.add_metric( math_ops.reduce_mean(inputs), name='bias', aggregation='mean') return inputs + self.bias def _make_model_with_add_metric(): inputs = keras.Input((10,)) x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs) x2 = Bias()(x1) outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x2) model = keras.Model(inputs, outputs) return model x = np.ones((64, 10)).astype('float32') y = np.ones((64, 1)).astype('float32') model = _make_model_with_add_metric() self.assertLen(model.metrics, 1) model.compile('sgd', 'mse') history = model.fit( x, y, steps_per_epoch=2, validation_data=(x, y), validation_steps=2, epochs=2) with distribution.scope(): ds_model = _make_model_with_add_metric() self.assertLen(ds_model.metrics, 1) ds_model.compile('sgd', 'mse', cloning=cloning) ds_history = ds_model.fit( x, y, steps_per_epoch=2, validation_data=(x, y), validation_steps=2, epochs=2) self.assertLen(ds_model.metrics, 1) self.assertAllClose(history.history, ds_history.history) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.one_device_strategy, strategy_combinations.one_device_strategy_gpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_two_gpus, ], mode=['eager'], cloning=[False])) def test_distribution_strategy_with_add_metric_object(self, distribution, cloning): class Bias(keras.layers.Layer): def build(self, input_shape): self.bias = self.add_weight(name='bias', initializer='zeros', shape=()) self.mean = keras.metrics.Mean(name='mean') def call(self, inputs): self.add_metric(self.mean(inputs)) return inputs + self.bias def _make_model_with_add_metric_object(): inputs = keras.Input((10,)) x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs) x2 = Bias()(x1) outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x2) model = keras.Model(inputs, outputs) return model x = np.ones((64, 10)).astype('float32') y = np.ones((64, 1)).astype('float32') model = _make_model_with_add_metric_object() self.assertLen(model.metrics, 1) model.compile('sgd', 'mse') history = model.fit( x, y, steps_per_epoch=2, validation_data=(x, y), validation_steps=2, epochs=2) with distribution.scope(): ds_model = _make_model_with_add_metric_object() self.assertLen(ds_model.metrics, 1) ds_model.compile('sgd', 'mse', cloning=cloning) ds_history = ds_model.fit( x, y, steps_per_epoch=2, validation_data=(x, y), validation_steps=2, epochs=2) self.assertLen(ds_model.metrics, 1) self.assertAllClose(history.history, ds_history.history) @combinations.generate( combinations.times(all_strategy_minus_default_and_tpu_combinations(), combinations.combine(cloning=[True, False]))) def test_distribution_strategy_with_add_metric_outside_call( self, distribution, cloning): def _make_model_with_add_metric(): inputs = keras.Input((10,)) x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs) outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x1) model = keras.Model(inputs, outputs) model.add_metric( math_ops.reduce_mean(x1), name='mid_mean', aggregation='mean') return model x = np.ones((64, 10)).astype('float32') y = np.ones((64, 1)).astype('float32') model = _make_model_with_add_metric() self.assertLen(model.metrics, 1) model.compile('sgd', 'mse') history = model.fit( x, y, steps_per_epoch=2, validation_data=(x, y), validation_steps=2, epochs=2) with distribution.scope(): ds_model = _make_model_with_add_metric() self.assertLen(ds_model.metrics, 1) ds_model.compile('sgd', 'mse', cloning=cloning) ds_history = ds_model.fit( x, y, steps_per_epoch=2, validation_data=(x, y), validation_steps=2, epochs=2) self.assertLen(ds_model.metrics, 1) self.assertAllClose(history.history, ds_history.history) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/keras/distribute/distribute_strategy_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests Keras multi worker callbacks.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys import threading from absl.testing import parameterized # pylint: disable=g-direct-tensorflow-import from tensorflow.python import keras from tensorflow.python.data.ops import dataset_ops from tensorflow.python.distribute import collective_all_reduce_strategy as collective_strategy from tensorflow.python.distribute import combinations from tensorflow.python.distribute import distribute_coordinator as dc from tensorflow.python.distribute import multi_worker_test_base as test_base from tensorflow.python.keras.engine import base_layer from tensorflow.python.keras.engine import sequential from tensorflow.python.keras.optimizer_v2 import gradient_descent from tensorflow.python.keras.optimizer_v2 import rmsprop from tensorflow.python.platform import test from tensorflow.python.training import gradient_descent as gradient_descent_v1 from tensorflow.python.training import rmsprop as rmsprop_v1 class KerasMultiWorkerOptimizerTest(test_base.IndependentWorkerTestBase, parameterized.TestCase): def run_optimizer_comparison_with_simple_bias_model( self, strategy_cls, optimizer_class_1, optimizer_class_2): def get_input_datasets(): # Simple training input. train_input = [[1]] * 16 train_label = [[0]] * 16 ds = dataset_ops.Dataset.from_tensor_slices((train_input, train_label)) # TODO(rchao): Investigate to figure out the reason for having 8 workers # instead of 2 as expected. return ds.batch(8, drop_remainder=True) def get_simple_bias_model(): class Bias(base_layer.Layer): def build(self, input_shape): self.bias = self.add_variable('bias', (1,), initializer='zeros') def call(self, inputs): return inputs + self.bias model = sequential.Sequential() model.add(Bias(input_shape=(1,))) return model self._lock = threading.Lock() cluster_spec = test_base.create_cluster_spec(num_workers=2, test_obj=self) self._barrier = dc._Barrier(2) def _independent_worker_fn(*args, **kwargs): # pylint: disable=unused-argument """Simulates an Independent Worker inside a thread.""" # TODO(rchao): Refactor to abstract the common boilerplate out. with test.mock.patch.object(dc, '_run_std_server', self._make_mock_run_std_server()): model = get_simple_bias_model() initial_weights = model.get_weights() def _get_model_results(optimizer, initial_weights): # Clear Keras session to reset device assignment keras.backend._SESSION.session = None strategy = strategy_cls() with strategy.scope(): train_ds = get_input_datasets() model = get_simple_bias_model() model.set_weights(initial_weights) model.compile(loss='mae', optimizer=optimizer, metrics=['mae']) return { 'trained_loss_and_accuracy': model.fit(x=train_ds, epochs=20).history, 'trained_weights': model.get_weights(), } results1 = _get_model_results(optimizer_class_1(0.01), initial_weights) results2 = _get_model_results(optimizer_class_2(0.01), initial_weights) for key in results1: self.assertAllClose( results1[key], results2[key], atol=1e-5, rtol=1e-5, msg='Fail to assert {}'.format(key)) threads = self.run_multiple_tasks_in_threads(_independent_worker_fn, cluster_spec) threads_to_join = [] strategy = strategy_cls() if strategy.extended.experimental_between_graph: for ts in threads.values(): threads_to_join.extend(ts) else: threads_to_join = [threads['worker'][0]] self.join_independent_workers(threads_to_join) @combinations.generate( combinations.combine( mode=['graph'], strategy_cls=[collective_strategy.CollectiveAllReduceStrategy], required_gpus=[0, 1])) def test_sgd_optimizer_v1_v2_comparison(self, strategy_cls): self.run_optimizer_comparison_with_simple_bias_model( strategy_cls, gradient_descent.SGD, gradient_descent_v1.GradientDescentOptimizer) @combinations.generate( combinations.combine( mode=['graph'], strategy_cls=[collective_strategy.CollectiveAllReduceStrategy], required_gpus=[0, 1])) def test_rmsprop_optimizer_v1_v2_comparison(self, strategy_cls): self.skipTest('There is an issue in collective ops (b/127700538) that ' 'prevent us from running this test with rmsprop optimizers.') self.run_optimizer_comparison_with_simple_bias_model( strategy_cls, rmsprop.RMSprop, rmsprop_v1.RMSPropOptimizer) if __name__ == '__main__': with test.mock.patch.object(sys, 'exit', os._exit): test.main()
tensorflow-master
tensorflow/python/keras/distribute/multi_worker_optimizer_comparison_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Correctness test for tf.keras Embedding models using DistributionStrategy.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python import keras from tensorflow.python.distribute import combinations from tensorflow.python.eager import test from tensorflow.python.keras.distribute import keras_correctness_test_base from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras class DistributionStrategyEmbeddingModelCorrectnessTest( keras_correctness_test_base. TestDistributionStrategyEmbeddingModelCorrectnessBase): def get_model(self, max_words=10, initial_weights=None, distribution=None, cloning=None, input_shapes=None): del input_shapes with keras_correctness_test_base.MaybeDistributionScope(distribution): word_ids = keras.layers.Input( shape=(max_words,), dtype=np.int32, name='words') word_embed = keras.layers.Embedding(input_dim=20, output_dim=10)(word_ids) if self.use_distributed_dense: word_embed = keras.layers.TimeDistributed(keras.layers.Dense(4))( word_embed) avg = keras.layers.GlobalAveragePooling1D()(word_embed) preds = keras.layers.Dense(2, activation='softmax')(avg) model = keras.Model(inputs=[word_ids], outputs=[preds]) if initial_weights: model.set_weights(initial_weights) model.compile( # TODO(b/130808953): Switch back the V1 optimizer once global_step is # mirrored. optimizer=gradient_descent_keras.SGD(learning_rate=0.1), loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'], cloning=cloning) return model @combinations.generate(keras_correctness_test_base. test_combinations_for_embedding_model()) def test_embedding_model_correctness(self, distribution, use_numpy, use_validation_data, cloning): self.use_distributed_dense = False self.run_correctness_test(distribution, use_numpy, use_validation_data, cloning) @combinations.generate(keras_correctness_test_base. test_combinations_for_embedding_model()) def test_embedding_time_distributed_model_correctness(self, distribution, use_numpy, use_validation_data, cloning): self.use_distributed_dense = True self.run_correctness_test(distribution, use_numpy, use_validation_data, cloning) class DistributionStrategySiameseEmbeddingModelCorrectnessTest( keras_correctness_test_base. TestDistributionStrategyEmbeddingModelCorrectnessBase): def get_model(self, max_words=10, initial_weights=None, distribution=None, cloning=None, input_shapes=None): del input_shapes with keras_correctness_test_base.MaybeDistributionScope(distribution): word_ids_a = keras.layers.Input( shape=(max_words,), dtype=np.int32, name='words_a') word_ids_b = keras.layers.Input( shape=(max_words,), dtype=np.int32, name='words_b') def submodel(embedding, word_ids): word_embed = embedding(word_ids) rep = keras.layers.GlobalAveragePooling1D()(word_embed) return keras.Model(inputs=[word_ids], outputs=[rep]) word_embed = keras.layers.Embedding( input_dim=20, output_dim=10, input_length=max_words, embeddings_initializer=keras.initializers.RandomUniform(0, 1)) a_rep = submodel(word_embed, word_ids_a).outputs[0] b_rep = submodel(word_embed, word_ids_b).outputs[0] sim = keras.layers.Dot(axes=1, normalize=True)([a_rep, b_rep]) model = keras.Model(inputs=[word_ids_a, word_ids_b], outputs=[sim]) if initial_weights: model.set_weights(initial_weights) # TODO(b/130808953): Switch back to the V1 optimizer after global_step # is made mirrored. model.compile( optimizer=gradient_descent_keras.SGD(learning_rate=0.1), loss='mse', cloning=cloning, metrics=['mse']) return model def get_data(self, count=(keras_correctness_test_base._GLOBAL_BATCH_SIZE * keras_correctness_test_base._EVAL_STEPS), min_words=5, max_words=10, max_word_id=19, num_classes=2): features_a, labels_a, _ = (super( DistributionStrategySiameseEmbeddingModelCorrectnessTest, self). get_data(count, min_words, max_words, max_word_id, num_classes)) features_b, labels_b, _ = (super( DistributionStrategySiameseEmbeddingModelCorrectnessTest, self). get_data(count, min_words, max_words, max_word_id, num_classes)) y_train = np.zeros((count, 1), dtype=np.float32) y_train[labels_a == labels_b] = 1.0 y_train[labels_a != labels_b] = -1.0 # TODO(b/123360757): Add tests for using list as inputs for multi-input # models. x_train = { 'words_a': features_a, 'words_b': features_b, } x_predict = x_train return x_train, y_train, x_predict @combinations.generate(keras_correctness_test_base. test_combinations_for_embedding_model()) def test_siamese_embedding_model_correctness(self, distribution, use_numpy, use_validation_data, cloning): self.run_correctness_test(distribution, use_numpy, use_validation_data, cloning) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/keras/distribute/keras_embedding_model_correctness_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tf.keras models with callbacks, checkpointing with dist strategy.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import tempfile from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.data.ops import dataset_ops from tensorflow.python.distribute import combinations from tensorflow.python.distribute import strategy_combinations from tensorflow.python.distribute import tpu_strategy from tensorflow.python.distribute import values from tensorflow.python.eager import context from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.keras import losses from tensorflow.python.keras.distribute import distribute_strategy_test as keras_test_lib from tensorflow.python.keras.distribute import distributed_training_utils from tensorflow.python.training import gradient_descent class Counter(keras.callbacks.Callback): """Counts the number of times each callback method was run. Attributes: method_counts: dict. Contains the counts of time each callback method was run. """ def __init__(self): self.method_counts = collections.defaultdict(int) methods_to_count = [ 'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end', 'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin', 'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end', 'on_test_begin', 'on_test_end', 'on_train_batch_begin', 'on_train_batch_end', 'on_train_begin', 'on_train_end' ] for method_name in methods_to_count: setattr(self, method_name, self.wrap_with_counts(method_name, getattr(self, method_name))) def wrap_with_counts(self, method_name, method): def _call_and_count(*args, **kwargs): self.method_counts[method_name] += 1 return method(*args, **kwargs) return _call_and_count class TestDistributionStrategyWithCallbacks(test.TestCase, parameterized.TestCase): @combinations.generate( combinations.times(keras_test_lib.all_strategy_combinations(), combinations.combine(cloning=[True, False]))) def test_callbacks_in_fit(self, distribution, cloning): with distribution.scope(): model = keras_test_lib.get_model() model.compile( optimizer='sgd', loss='mse', metrics=['mae'], cloning=cloning) dataset = keras_test_lib.get_dataset(distribution) counter = Counter() epochs = 2 steps_per_epoch = 5 validation_steps = 3 model.fit( dataset, epochs=epochs, steps_per_epoch=steps_per_epoch, verbose=0, validation_data=dataset, validation_steps=validation_steps, callbacks=[counter]) if (isinstance(distribution, tpu_strategy.TPUStrategyV1) and not context.executing_eagerly()): # TPU Strategy can have multi step training, from extended.steps_per_run # if steps_per_run = 1, then num_batch_call_per_epoch = steps_per_epoch steps_per_run = distribution.extended.steps_per_run num_batch_call_per_epoch = steps_per_epoch // steps_per_run if steps_per_epoch % steps_per_run: num_batch_call_per_epoch += 1 else: num_batch_call_per_epoch = steps_per_epoch self.assertDictEqual( counter.method_counts, { 'on_batch_begin': epochs * num_batch_call_per_epoch, 'on_batch_end': epochs * num_batch_call_per_epoch, 'on_epoch_begin': epochs, 'on_epoch_end': epochs, 'on_test_batch_begin': epochs * validation_steps, 'on_test_batch_end': epochs * validation_steps, 'on_test_begin': epochs, 'on_test_end': epochs, 'on_train_batch_begin': epochs * num_batch_call_per_epoch, 'on_train_batch_end': epochs * num_batch_call_per_epoch, 'on_train_begin': 1, 'on_train_end': 1 }) @combinations.generate( combinations.times(keras_test_lib.all_strategy_combinations(), combinations.combine(cloning=[True, False]))) def test_callbacks_in_eval(self, distribution, cloning): with distribution.scope(): model = keras_test_lib.get_model() model.compile( optimizer='sgd', loss='mse', metrics=['mae'], cloning=cloning) dataset = keras_test_lib.get_dataset(distribution) counter = Counter() model.evaluate(dataset, steps=5, callbacks=[counter]) self.assertDictEqual( counter.method_counts, { 'on_test_batch_begin': 5, 'on_test_batch_end': 5, 'on_test_begin': 1, 'on_test_end': 1 }) @combinations.generate( combinations.times(keras_test_lib.all_strategy_combinations(), combinations.combine(cloning=[True, False]))) def test_callbacks_in_predict(self, distribution, cloning): with distribution.scope(): model = keras_test_lib.get_model() model.compile( optimizer='sgd', loss='mse', metrics=['mae'], cloning=cloning) dataset = keras_test_lib.get_dataset(distribution) counter = Counter() model.predict( keras_test_lib.get_predict_dataset(dataset), steps=5, callbacks=[counter]) self.assertDictEqual( counter.method_counts, { 'on_predict_batch_begin': 5, 'on_predict_batch_end': 5, 'on_predict_begin': 1, 'on_predict_end': 1 }) class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase): @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, ], mode=['graph'])) def test_validating_dataset_input_tensors_with_shape_mismatch( self, distribution): with self.cached_session(): a = constant_op.constant([1, 2], shape=(1, 2)) b = constant_op.constant([[1, 2], [1, 2]], shape=(2, 2)) device_map = values.ReplicaDeviceMap(('/device:CPU:0', '/device:GPU:0')) x = values.DistributedValues(device_map, (a, b)) y = values.DistributedValues(device_map, (a, a)) # Removed device and input tensor shape details from the error message # since the order of the device and the corresponding input tensor shape # is not deterministic over different runs. with self.assertRaisesRegexp( ValueError, 'Input tensor shapes do not match for ' 'distributed tensor inputs ' 'DistributedValues:.+'): with distribution.scope(): distributed_training_utils.validate_distributed_dataset_inputs( distribution, x, y) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, ], mode=['graph', 'eager'])) def test_validating_dataset_input_tensors_with_dtype_mismatch( self, distribution): with self.cached_session(): a = constant_op.constant([1, 2], shape=(1, 2), dtype=dtypes.int32) b = constant_op.constant([1, 2], shape=(1, 2), dtype=dtypes.float64) device_map = values.ReplicaDeviceMap(('/device:CPU:0', '/device:GPU:0')) x = values.DistributedValues(device_map, (a, b)) y = values.DistributedValues(device_map, (a, a)) # Removed device and input tensor dtype details from the error message # since the order of the device and the corresponding input tensor dtype # is not deterministic over different runs. with self.assertRaisesRegexp( ValueError, 'Input tensor dtypes do not match for ' 'distributed tensor inputs ' 'DistributedValues:.+'): with distribution.scope(): distributed_training_utils.validate_distributed_dataset_inputs( distribution, x, y) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, ], mode=['graph', 'eager'], cloning=[True, False])) def test_unsupported_features(self, distribution, cloning): with self.cached_session(): with distribution.scope(): model = keras_test_lib.get_model() optimizer = gradient_descent.GradientDescentOptimizer(0.001) loss = 'mse' metrics = ['mae'] model.compile(optimizer, loss, metrics=metrics, cloning=cloning) dataset = keras_test_lib.get_dataset(distribution) # Test with validation split with self.assertRaisesRegexp( ValueError, '`validation_split` argument is not ' 'supported when input `x` is a dataset or a ' 'dataset iterator.+'): model.fit( dataset, epochs=1, steps_per_epoch=2, verbose=0, validation_split=0.5, validation_steps=2) # Test with sample weight. sample_weight = np.random.random((10,)) with self.assertRaisesRegexp( ValueError, '`sample_weight` argument is not supported when input ' '`x` is a dataset or a dataset iterator.'): model.fit( dataset, epochs=1, steps_per_epoch=2, verbose=0, sample_weight=sample_weight) # Test with not specifying the `steps` argument for dataset with infinite # cardinality. dataset = dataset.repeat() with self.assertRaisesRegexp( ValueError, 'When passing an infinitely ' 'repeating dataset, you must specify the ' '`steps_per_epoch` argument'): model.fit(dataset, epochs=1, verbose=0) with self.assertRaisesRegexp( ValueError, 'When passing an infinitely ' 'repeating dataset, you must specify the ' '`steps` argument'): model.evaluate(dataset, verbose=0) with self.assertRaisesRegexp( ValueError, 'When passing an infinitely ' 'repeating dataset, you must specify the ' '`steps` argument'): model.predict(dataset, verbose=0) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, ], mode=['graph', 'eager'], cloning=[True, False])) def test_calling_with_unsupported_predefined_callbacks( self, distribution, cloning): with self.cached_session(): with distribution.scope(): model = keras_test_lib.get_model() optimizer = gradient_descent.GradientDescentOptimizer(0.001) loss = 'mse' metrics = ['mae'] model.compile(optimizer, loss, metrics=metrics, cloning=cloning) dataset = keras_test_lib.get_dataset(distribution) def schedule(_): return 0.001 with self.assertRaisesRegexp( ValueError, 'You must specify a Keras Optimizer V2 when ' 'using'): model.fit( dataset, epochs=1, steps_per_epoch=2, verbose=0, callbacks=[keras.callbacks.LearningRateScheduler(schedule)]) with self.assertRaisesRegexp( ValueError, 'You must specify a Keras Optimizer V2 when ' 'using'): model.fit( dataset, epochs=1, steps_per_epoch=2, verbose=0, callbacks=[keras.callbacks.ReduceLROnPlateau()]) @combinations.generate( combinations.combine( distribution=[strategy_combinations.one_device_strategy], mode=['eager'], cloning=[True, False])) def test_distribution_strategy_with_run_eagerly(self, distribution, cloning): with distribution.scope(): x = keras.layers.Input(shape=(1,)) y = keras.layers.Dense(1, kernel_initializer='ones')(x) model = keras.models.Model(x, y) err_msg = ('We currently do not support enabling `run_eagerly` with ' 'distribution strategy.') with self.assertRaisesRegex(ValueError, err_msg): model.compile('sgd', run_eagerly=True, cloning=cloning) # TODO(b/124377929): Remove error assertions once subclassed models # are supported in DistributedStrategy. @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.one_device_strategy, ], mode=['graph', 'eager'], cloning=[True, False])) def test_distribution_strategy_on_subclassed_model(self, distribution, cloning): with distribution.scope(): class _SimpleMLP(keras.Model): def __init__(self, num_labels): super(_SimpleMLP, self).__init__() self.dense = keras.layers.Dense(num_labels) def call(self, inputs): return self.dense(inputs) model = _SimpleMLP(3) if cloning or not context.executing_eagerly(): with self.assertRaisesRegexp( ValueError, 'We currently do not support distribution strategy with a ' '`Sequential` model that is created without `input_shape`/' '`input_dim` set in its first layer or a subclassed model.'): model.compile('sgd', cloning=cloning) else: model.compile('sgd', cloning=cloning) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.one_device_strategy, ], mode=['graph', 'eager'], cloning=[True, False])) def test_distribution_strategy_on_deferred_sequential_model( self, distribution, cloning): with distribution.scope(): model = keras.models.Sequential() model.add(keras.layers.Dense(16, activation='relu')) model.add(keras.layers.Dense(3, activation='softmax')) if not cloning and context.executing_eagerly(): model.compile('sgd', cloning=cloning) else: with self.assertRaisesRegexp( ValueError, 'We currently do not support distribution strategy with a ' '`Sequential` model that is created without ' '`input_shape`/`input_dim` set in its first layer or ' 'a subclassed model.'): model.compile('sgd', cloning=cloning) @combinations.generate( keras_test_lib.all_strategy_combinations_minus_default()) def test_standalone_loss_without_loss_reduction(self, distribution): with distribution.scope(): loss_object = losses.MeanSquaredError() with self.assertRaisesRegexp( ValueError, 'Please use `tf.keras.losses.Reduction.SUM` or ' '`tf.keras.losses.Reduction.NONE`'): y = np.asarray([1, 0]) loss_object(y, y) class TestDistributionStrategyWithLossMasking(test.TestCase, parameterized.TestCase): # TODO(priyag): Enable all strategies for this test. Currently it does not # work for TPU due to some invalid datatype. @combinations.generate( combinations.times( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, ], mode=['graph', 'eager']), combinations.combine( cloning=True, optimizer=strategy_combinations.gradient_descent_optimizer_v1_fn) + combinations.combine( cloning=False, optimizer=strategy_combinations .gradient_descent_optimizer_keras_v2_fn))) def test_masking(self, distribution, cloning, optimizer): with self.cached_session(): np.random.seed(1337) x = np.array([[[1], [1]], [[0], [0]]]) with distribution.scope(): model = keras.models.Sequential() model.add(keras.layers.Masking(mask_value=0, input_shape=(2, 1))) model.add( keras.layers.TimeDistributed( keras.layers.Dense(1, kernel_initializer='one'))) model.compile(loss='mse', optimizer=optimizer(), cloning=cloning) y = np.array([[[1], [1]], [[1], [1]]]) dataset = dataset_ops.Dataset.from_tensor_slices((x, y)) dataset = dataset.repeat(100) dataset = dataset.batch(10) hist = model.fit(x=dataset, epochs=1, steps_per_epoch=2) self.assertEqual(hist.history['loss'][0], 0) class TestDistributionStrategyWithNormalizationLayer(test.TestCase, parameterized.TestCase): @combinations.generate( combinations.times( keras_test_lib.all_strategy_combinations(), combinations.combine(fused=[True, False]), combinations.combine( cloning=True, optimizer=strategy_combinations.gradient_descent_optimizer_v1_fn) + combinations.combine( cloning=False, optimizer=strategy_combinations .gradient_descent_optimizer_keras_v2_fn))) def test_batchnorm_correctness(self, distribution, fused, optimizer, cloning): with self.cached_session(): with distribution.scope(): model = keras.models.Sequential() norm = keras.layers.BatchNormalization( input_shape=( 10, 20, 30, ), momentum=0.8, fused=fused) model.add(norm) model.compile(loss='mse', optimizer=optimizer(), cloning=cloning) # centered on 5.0, variance 10.0 x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10, 20, 30)) x = x.astype('float32') dataset = dataset_ops.Dataset.from_tensor_slices((x, x)) dataset = dataset.repeat(100) dataset = keras_test_lib.batch_wrapper(dataset, 32, distribution) predict_dataset = dataset_ops.Dataset.from_tensor_slices(x) predict_dataset = predict_dataset.repeat(100) predict_dataset = keras_test_lib.batch_wrapper(predict_dataset, 32, distribution) model.fit(dataset, epochs=4, verbose=0, steps_per_epoch=10) out = model.predict(predict_dataset, steps=2) out -= keras.backend.eval(norm.beta) out /= keras.backend.eval(norm.gamma) np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1) np.testing.assert_allclose(out.std(), 1.0, atol=1e-1) class TestDistributionStrategySaveLoadWeights(test.TestCase, parameterized.TestCase): @combinations.generate( combinations.times( keras_test_lib.all_strategy_combinations_minus_default(), combinations.combine( cloning=True, optimizer=strategy_combinations.rmsprop_optimizer_v1_fn) + combinations.combine( cloning=False, optimizer=strategy_combinations.rmsprop_optimizer_keras_v2_fn))) def test_save_load_h5(self, distribution, optimizer, cloning): with self.cached_session(): dataset = keras_test_lib.get_dataset(distribution) with distribution.scope(): model = keras_test_lib.get_model() model.compile(optimizer(), 'mse', cloning=cloning) model.fit(dataset, epochs=1, steps_per_epoch=1) weights_file = tempfile.mktemp('.h5') model.save_weights(weights_file) model_2 = keras_test_lib.get_model() model_2.compile(optimizer(), 'mse', cloning=cloning) model_2.load_weights(weights_file) model_2.predict( keras_test_lib.get_predict_dataset(distribution), steps=2) model_2.fit(dataset, epochs=1, steps_per_epoch=1) @combinations.generate( combinations.times( keras_test_lib.all_strategy_combinations_minus_default(), combinations.combine( cloning=True, optimizer=strategy_combinations.rmsprop_optimizer_v1_fn) + combinations.combine( cloning=False, optimizer=strategy_combinations.rmsprop_optimizer_keras_v2_fn))) def test_save_load_trackable(self, distribution, optimizer, cloning): # TODO(b/123533246): Enable the test for TPU once bug is fixed if (isinstance(distribution, (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)) and distribution.extended.steps_per_run > 1): self.skipTest('MultiStep TPU Strategy deadlocks with optimizer restore.') with self.cached_session(): dataset = keras_test_lib.get_dataset(distribution) with distribution.scope(): model = keras_test_lib.get_model() model.compile(optimizer(), 'mse', cloning=cloning) model.fit(dataset, epochs=1, steps_per_epoch=1) weights_file = tempfile.mktemp() model.save_weights(weights_file) model_2 = keras_test_lib.get_model() model_2.compile(optimizer(), 'mse', cloning=cloning) model_2.load_weights(weights_file) model_2.predict( keras_test_lib.get_predict_dataset(distribution), steps=2) model_2.fit(dataset, epochs=1, steps_per_epoch=1) class TestDistributionStrategyValidation(test.TestCase, parameterized.TestCase): @combinations.generate( combinations.times( keras_test_lib.all_strategy_combinations_minus_default(), combinations.combine(cloning=[True, False]))) def test_layer_outside_scope(self, distribution, cloning): with self.cached_session(): with self.assertRaisesRegexp( ValueError, 'was not created in the distribution strategy'): x = keras.layers.Input(shape=(3,), name='input') y = keras.layers.Dense(4, name='dense')(x) with distribution.scope(): model = keras.Model(x, y) optimizer = gradient_descent.GradientDescentOptimizer(0.001) loss = 'mse' metrics = ['mae', keras.metrics.CategoricalAccuracy()] model.compile(optimizer, loss, metrics=metrics, cloning=cloning) @combinations.generate( combinations.times( keras_test_lib.all_strategy_combinations_minus_default(), combinations.combine(cloning=[True, False]))) def test_model_outside_scope(self, distribution, cloning): with self.cached_session(): with self.assertRaisesRegexp( ValueError, 'was not created in the distribution strategy'): x = keras.layers.Input(shape=(3,), name='input') y = keras.layers.Dense(4, name='dense')(x) model = keras.Model(x, y) with distribution.scope(): optimizer = gradient_descent.GradientDescentOptimizer(0.001) loss = 'mse' metrics = ['mae', keras.metrics.CategoricalAccuracy()] model.compile(optimizer, loss, metrics=metrics, cloning=cloning) class TestDistributionStrategyWithStaticShapes(test.TestCase, parameterized.TestCase): @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, ], mode=['graph', 'eager'])) def test_input_batch_size_not_divisible_by_num_replicas(self, distribution): with distribution.scope(): with self.assertRaisesRegexp( ValueError, 'The `batch_size` argument value 5 cannot be divisible ' 'by number of replicas 2'): keras.layers.Input(shape=(3,), batch_size=5, name='input') @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, ], mode=['graph', 'eager'])) def test_static_input_batch_size(self, distribution): inputs = np.zeros((10, 3), dtype=np.float32) targets = np.zeros((10, 4), dtype=np.float32) dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat(100) dataset = dataset.batch(10, drop_remainder=True) with distribution.scope(): x = keras.layers.Input(shape=(3,), batch_size=10, name='input') y = keras.layers.Dense(4, name='dense')(x) model = keras.Model(x, y) model.compile(optimizer='sgd', loss='mse', metrics=['mae']) model.fit(dataset, epochs=1, steps_per_epoch=5) model.evaluate(dataset, steps=5) model.predict(dataset) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/keras/distribute/keras_utils_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests that show that DistributionStrategy works with optimizer v2.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.distribute import combinations from tensorflow.python.distribute import distribution_strategy_context as ds_context from tensorflow.python.distribute import strategy_combinations from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.keras.optimizer_v2 import adam from tensorflow.python.keras.optimizer_v2 import gradient_descent from tensorflow.python.ops import math_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import test def get_model(): x = keras.layers.Input(shape=(3,), name='input') y = keras.layers.Dense(4, name='dense')(x) model = keras.Model(x, y) return model class MirroredStrategyOptimizerV2Test(test.TestCase, parameterized.TestCase): @combinations.generate( combinations.combine( distribution=[ strategy_combinations.central_storage_strategy_with_two_gpus, ], mode=['graph', 'eager'])) def testKerasOptimizerWithUnequalInput(self, distribution): self.skipTest('b/130309197') with distribution.scope(): var = variables.Variable( 2.0, name='var', aggregation=variable_scope.VariableAggregation.SUM) optimizer = adam.Adam(learning_rate=0.01, beta_1=0.2, beta_2=0.2) all_vars = [] def model_fn(): def loss_fn(): replica_id = _replica_id() return math_ops.cast(replica_id + 1, dtype=dtypes.float32) * 0.5 * var train_op = optimizer.minimize(loss_fn, var_list=[var]) return train_op, optimizer def train_fn(): train_op, optimizer = distribution.extended.call_for_each_replica( model_fn) if not all_vars: all_vars.append(var) all_vars.append(optimizer.get_slot(var, 'm')) all_vars.append(optimizer.get_slot(var, 'v')) return distribution.group(train_op) if not context.executing_eagerly(): with self.cached_session() as sess: train_fn = sess.make_callable(train_fn()) self.evaluate(variables.global_variables_initializer()) # first step. train_fn() # var(1) = var(0) - lr * m(1) * sqrt(1 - beta2) / sqrt(v(1)) / (1 - beta1) # = 2.0 - 0.01 * 1.2 * sqrt(0.8) / sqrt(1.8) / 0.8 self.assertAllClose(1.99, self.evaluate(all_vars[0])) # m(1) = beta1 * m(0) + (1-beta1) * grad = 0.2 * 0 + 0.8 * (1 + 2) / 2 self.assertAllClose(1.2, self.evaluate(all_vars[1])) # v(1) = beta2 * v(0) + (1-beta2) * grad^2 = 0.2 * 0 + 0.8 * 2.25 self.assertAllClose(1.8, self.evaluate(all_vars[2])) # second step. train_fn() # var(1) = var(0) - lr * 2 = 1.98 self.assertAllClose(1.98, self.evaluate(all_vars[0])) # m(2) = beta1 * m(1) + (1-beta1) * grad = 0.2 * 1.2 + 0.8 * 1.5 self.assertAllClose(1.44, self.evaluate(all_vars[1])) # v(2) = beta2 * v(1) + (1-beta2) * grad^2 = 0.2 * 1.8 + 0.8 * 2.25 self.assertAllClose(2.16, self.evaluate(all_vars[2])) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.central_storage_strategy_with_two_gpus, ], mode=['graph', 'eager'], cloning=[True, False])) def testOptimizerWithKerasModelAndNumpyArrays(self, distribution, cloning): self.skipTest('b/130309197') with self.cached_session(): with distribution.scope(): model = get_model() optimizer = gradient_descent.SGD(0.001) loss = 'mse' metrics = ['mae'] model.compile(optimizer, loss, metrics=metrics, cloning=cloning) inputs = np.zeros((64, 3), dtype=np.float32) targets = np.zeros((64, 4), dtype=np.float32) model.fit( inputs, targets, epochs=1, batch_size=2, verbose=0, validation_data=(inputs, targets)) model.evaluate(inputs, targets) model.predict(inputs) def _replica_id(): replica_id = ds_context.get_replica_context().replica_id_in_sync_group if not isinstance(replica_id, ops.Tensor): replica_id = constant_op.constant(replica_id) return replica_id if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/keras/distribute/keras_optimizer_v2_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Correctness tests for tf.keras LSTM model using DistributionStrategy.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python import keras from tensorflow.python.distribute import combinations from tensorflow.python.eager import test from tensorflow.python.keras.distribute import keras_correctness_test_base from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras from tensorflow.python.training import gradient_descent class DistributionStrategyLstmModelCorrectnessTest( keras_correctness_test_base. TestDistributionStrategyEmbeddingModelCorrectnessBase): def get_model(self, max_words=10, initial_weights=None, distribution=None, cloning=None, input_shapes=None): del input_shapes with keras_correctness_test_base.MaybeDistributionScope(distribution): word_ids = keras.layers.Input( shape=(max_words,), dtype=np.int32, name='words') word_embed = keras.layers.Embedding(input_dim=20, output_dim=10)(word_ids) lstm_embed = keras.layers.LSTM(units=4, return_sequences=False)(word_embed) preds = keras.layers.Dense(2, activation='softmax')(lstm_embed) model = keras.Model(inputs=[word_ids], outputs=[preds]) if initial_weights: model.set_weights(initial_weights) # TODO(b/130808953): Re-enable the V1 optimizer after iterations is # mirrored. optimizer_fn = ( gradient_descent.GradientDescentOptimizer if cloning else gradient_descent_keras.SGD) model.compile( optimizer=optimizer_fn(learning_rate=0.1), loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'], cloning=cloning) return model @combinations.generate(keras_correctness_test_base. test_combinations_for_embedding_model()) def test_lstm_model_correctness(self, distribution, use_numpy, use_validation_data, cloning): self.run_correctness_test(distribution, use_numpy, use_validation_data, cloning) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/keras/distribute/keras_lstm_model_correctness_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities related to distributed training.""" # pylint:disable=protected-access from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import numpy as np from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import iterator_ops from tensorflow.python.distribute import distribute_coordinator_context as dc_context from tensorflow.python.distribute import distribution_strategy_context as ds_context from tensorflow.python.distribute import multi_worker_util from tensorflow.python.distribute import reduce_util from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.keras import backend as K from tensorflow.python.keras import callbacks from tensorflow.python.keras import metrics as metrics_module from tensorflow.python.keras import optimizers from tensorflow.python.keras.engine import training_utils from tensorflow.python.keras.optimizer_v2 import optimizer_v2 from tensorflow.python.keras.utils.mode_keys import ModeKeys from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import variables from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import nest from tensorflow.python.util import tf_contextlib def set_weights(distribution_strategy, dist_model, weights): """Sets the weights of the replicated models. The weights of the replicated models are set to the weights of the original model. The weights of the replicated model are Mirrored variables and hence we need to use the `update` call within a DistributionStrategy scope. Args: distribution_strategy: DistributionStrategy used to distribute training and validation. dist_model: The replicated models on the different devices. weights: The weights of the original model. """ assign_ops = [] for layer in dist_model.layers: num_param = len(layer.weights) layer_weights = weights[:num_param] for sw, w in zip(layer.weights, layer_weights): if ops.executing_eagerly_outside_functions(): sw.assign(w) else: assign_ops.append(distribution_strategy.unwrap(sw.assign(w))) weights = weights[num_param:] if not ops.executing_eagerly_outside_functions(): K.get_session(assign_ops).run(assign_ops) def unwrap_values(distribution_strategy, grouped_inputs, grouped_outputs, grouped_updates=None, grouped_session_args=None, with_loss_tensor=False): """Unwrap the list of values contained in the PerReplica parameters. This function calls `flatten_per_replica_values` to parse each of the input parameters into a list of values on the different devices. If we set `with_loss_tensor` to be True, we also call `reduce` on the list of losses on the different devices to give us one loss tensor. Args: distribution_strategy: DistributionStrategy used to distribute training and validation. grouped_inputs: PerReplica inputs returned from the train or test function that we ran on each device. grouped_outputs: PerReplica outputs returned from the train or test function that we ran on each device. grouped_updates: PerReplica updates returned from the train or test function that we ran on each device. grouped_session_args: PerReplica session args returned from the train or test function that we ran on each device. with_loss_tensor: Boolean that indicates if we need to add the reduced loss tensor as one of the outputs. Returns: Values of each of the PerReplica parameters. """ # Unwrap per device values returned from each model's train function. # This will be used to construct the main train function. all_inputs = flatten_per_replica_values(distribution_strategy, grouped_inputs) all_outputs = unwrap_outputs(distribution_strategy, grouped_outputs, with_loss_tensor) if grouped_updates: all_updates = flatten_per_replica_values(distribution_strategy, grouped_updates) else: all_updates = None all_session_args = {} if grouped_session_args: grouped_feed_dict = grouped_session_args.get('feed_dict') if grouped_feed_dict: all_session_args['feed_dict'] = flatten_per_replica_values( distribution_strategy, grouped_feed_dict) grouped_fetches = grouped_session_args.get('fetches') if grouped_fetches: all_session_args['fetches'] = flatten_per_replica_values( distribution_strategy, grouped_fetches) # TODO(priyag): Return only non empty/None values return all_inputs, all_outputs, all_updates, all_session_args def unwrap_outputs(distribution_strategy, grouped_outputs, with_loss_tensor=False): """Unwrap the list of outputs contained in the PerReplica parameters. This function calls `flatten_per_replica_values` to parse each of the input parameters into a list of outputs on the different devices. If we set `with_loss_tensor` to be True, we also call `reduce` on the list of losses on the different devices to give us one loss tensor. Args: distribution_strategy: DistributionStrategy used to distribute training and validation. grouped_outputs: PerReplica outputs returned from the train or test function that we ran on each device. with_loss_tensor: Boolean that indicates if we need to add the reduced loss tensor as one of the outputs. Returns: Values of each of the PerReplica outputs. """ if not with_loss_tensor: return flatten_per_replica_values(distribution_strategy, grouped_outputs) if not isinstance(grouped_outputs, list): grouped_outputs = [grouped_outputs] # reduce loss tensor before adding it to the list of fetches loss = distribution_strategy.reduce(reduce_util.ReduceOp.SUM, grouped_outputs[0], axis=None) all_outputs = flatten_per_replica_values(distribution_strategy, grouped_outputs[1:]) if (is_tpu_strategy(distribution_strategy) and ops.executing_eagerly_outside_functions()): # Choose 1 value per replica in the TPU case since all replicas produce the # same output. # We only do this in eager mode for now since this function is used in # both graph and eager mode and in the graph case we currently don't use # experimental_run so would need to be removed when we converge the graph # code path as well. all_outputs = all_outputs[::distribution_strategy.num_replicas_in_sync] return [loss] + all_outputs def flatten_per_replica_values(distribution_strategy, per_replica_values): """Unwraps and flattens a nest of PerReplica parameters. PerReplica values have one value associated with each device. Each entry in the PerReplica dict has a device `key` and the corresponding value on the device as the `value`. In this function we take a PerReplica value or a list of PerReplica values and return all the values in the PerReplica dict. Args: distribution_strategy: DistributionStrategy used to distribute training and validation. per_replica_values: List of PerReplica object or a single PerReplica object. Returns: List of values of all the PerReplica objects. """ # pylint: disable=g-complex-comprehension # This function takes a PerReplica object or a list of PerReplica objects and # returns all the values associated with it. return [e for flattened in nest.flatten(per_replica_values) for e in distribution_strategy.unwrap(flattened)] def validate_callbacks(input_callbacks, optimizer): """Validate whether given callbacks are supported by DistributionStrategy. Args: input_callbacks: List of callbacks passed by the user to fit. optimizer: Optimizer instance used to train the model. Raises: ValueError: If `LearningRateScheduler` or `ReduceLROnPlateau` is one of the callbacks passed. ValueError: If `histogram_freq` or `write_grads` is one of the parameters passed as part of the TensorBoard callback. """ if input_callbacks: for callback in input_callbacks: if isinstance(callback, (callbacks.LearningRateScheduler, callbacks.ReduceLROnPlateau)): if not isinstance(optimizer, optimizer_v2.OptimizerV2): raise ValueError('You must specify a Keras Optimizer V2 when using ' '%s callback with DistributionStrategy.' % callback) # If users want to use the TensorBoard callback they cannot use certain # features of the callback that involve accessing model attributes and # running ops. if isinstance(callback, callbacks.TensorBoard): if getattr(callback, 'histogram_freq', False): logging.warning( UserWarning( '`histogram_freq` in the TensorBoard callback is not ' 'supported when using DistributionStrategy. Setting ' '`histogram_freq` to `0`.')) callback.histogram_freq = 0 if getattr(callback, 'write_grads', False): logging.warning( UserWarning( '`write_grads` in the TensorBoard callback is not supported ' 'when using DistributionStrategy. Setting `write_grads` ' 'to `False`.')) callback.histogram_freq = False def validate_distributed_dataset_inputs(distribution_strategy, x, y, sample_weights=None): """Validate all the components of a DistributedValue Dataset input. Args: distribution_strategy: The current DistributionStrategy used to call `fit`/`evaluate`. x: Input Dataset DistributedValue object. For example, when we use `MirroredStrategy` this is a PerReplica object with a tensor for each device set in the dict. x can also be a tuple or dict. The keys of the dict should match the names of the input layers of the model. y: Target Dataset DistributedValue object. For example, when we use `MirroredStrategy` this is a PerReplica object with a tensor for each device set in the dict. y can also be a tuple or dict. The keys of the dict should match the names of the output layers of the model. sample_weights: Sample weights Dataset DistributedValue object. For example, when we use `MirroredStrategy` this is a PerReplica object with a tensor for each device set in the dict. Returns: The unwrapped values list of the x and y DistributedValues inputs. Raises: ValueError: If x and y do not have support for being evaluated as tensors. or if x and y contain elements that are not tensors or if x and y contain elements that have a shape or dtype mismatch. """ # If the input and target used to call the model are not dataset tensors, # we need to raise an error. When using a DistributionStrategy, the input # and targets to a model should be from a `tf.data.Dataset`. # If each element of x and y are not tensors, we cannot standardize and # validate the input and targets. x_values_list = validate_per_replica_inputs(distribution_strategy, x) if y is not None: y_values_list = validate_per_replica_inputs(distribution_strategy, y) else: y_values_list = None if sample_weights is not None: sample_weights_list = validate_per_replica_inputs(distribution_strategy, sample_weights) else: sample_weights_list = None # Return the unwrapped values to avoid calling `unwrap` a second time. return x_values_list, y_values_list, sample_weights_list def validate_per_replica_inputs(distribution_strategy, x): """Validates PerReplica dataset input list. Args: distribution_strategy: The current DistributionStrategy used to call `fit`, `evaluate` and `predict`. x: A list of PerReplica objects that represent the input or target values. Returns: List containing the first element of each of the PerReplica objects in the input list. Raises: ValueError: If any of the objects in the `per_replica_list` is not a tensor. """ # Convert the inputs and targets into a list of PerReplica objects. per_replica_list = nest.flatten(x) x_values_list = [] for x in per_replica_list: if not tensor_util.is_tensor(x): raise ValueError('Dataset input to the model should be tensors instead ' 'they are of type {}'.format(type(x))) # At this point both x and y contain tensors in the `DistributedValues` # structure. x_values = distribution_strategy.unwrap(x) if not context.executing_eagerly(): # Validate that the shape and dtype of all the elements in x are the same. validate_all_tensor_shapes(x, x_values) validate_all_tensor_types(x, x_values) x_values_list.append(x_values[0]) return x_values_list def validate_all_tensor_types(x, x_values): x_dtype = x_values[0].dtype for i in range(1, len(x_values)): if x_dtype != x_values[i].dtype: raise ValueError('Input tensor dtypes do not match for distributed tensor' ' inputs {}'.format(x)) def validate_all_tensor_shapes(x, x_values): # Validate that the shape of all the elements in x have the same shape x_shape = x_values[0].shape.as_list() for i in range(1, len(x_values)): if x_shape != x_values[i].shape.as_list(): raise ValueError('Input tensor shapes do not match for distributed tensor' ' inputs {}'.format(x)) def _wait_for_variable_initialization(session): """Utility to wait for variables to be initialized.""" all_variables = K._get_variables(K.get_graph()) # pylint: disable=protected-access candidate_vars = [] for v in all_variables: if not getattr(v, '_keras_initialized', False): candidate_vars.append(v) if not candidate_vars: return while True: is_initialized = session.run( [variables.is_variable_initialized(v) for v in candidate_vars]) uninitialized_vars = [] for flag, v in zip(is_initialized, candidate_vars): if not flag: uninitialized_vars.append(v) v._keras_initialized = True # pylint: disable=protected-access if not uninitialized_vars: break def init_restore_or_wait_for_variables(): """Initialize or restore variables or wait for variables to be initialized.""" session = K._get_session() # pylint: disable=protected-access if not multi_worker_util.has_worker_context( ) or multi_worker_util.should_load_checkpoint(): # TODO(yuefengz): if checkpoints exist, restore from checkpoint. K._initialize_variables(session) # pylint: disable=protected-access else: _wait_for_variable_initialization(session) def validate_inputs(x, y): """Validate inputs when using DistributionStrategy. Args: x: Model Inputs. y: Model Targets. Raises: ValueError: if input is not a Dataset or a numpy array(when we use MirroredStrategy). """ if (isinstance(x, iterator_ops.Iterator) or isinstance(y, iterator_ops.Iterator)): raise ValueError('`DistributionStrategy` does not support inputs of type ' 'Iterator. You must pass a `tf.data.Dataset` object or a ' 'numpy array as input.') # TODO(b/118776054): Currently we support global batch size for TPUStrategy and # core MirroredStrategy only. Remove this check when contrib MirroredStrategy is # no longer needed. def global_batch_size_supported(distribution_strategy): return distribution_strategy.extended._global_batch_size # pylint: disable=protected-access # TODO(sourabhbajaj): Remove this once we use the same API for all strategies. def is_tpu_strategy(strategy): """We're executing TPU Strategy.""" return (strategy is not None and strategy.__class__.__name__.startswith('TPUStrategy')) def is_dataset_shape_fully_defined(dataset): """Returns whether a dataset contains a final partial batch.""" shapes = nest.flatten(dataset_ops.get_legacy_output_shapes(dataset)) unknown_shapes = [s for s in shapes if not s.is_fully_defined()] return not unknown_shapes def get_input_params(distribution_strategy, first_x_value, steps, batch_size, mode=None): """Calculate the number of batches and steps/steps_per_epoch. Args: distribution_strategy: The DistributionStrategy used to compile the model. first_x_value: This is the first input numpy array that is passed in as the model input. steps: The specified number of steps. batch_size: The specified batch_size. mode: ModeKey representing whether input will be used for training, evaluation, or prediction. This is used to relax the constraints on consuming all the training samples to keep compatibility till we support partial batches. If none, then partial batches are not allowed. Returns: steps: The steps or steps_per_epoch argument depending on if a user is calling `fit`, `evaluate` or `predict`. If the is_training flag is set we don't require the number of samples to be used completely. batch_size: The batch size to be used in model iterations. Raises: ValueError: If the number of batches or steps evaluates to 0. """ num_samples = first_x_value.shape[0] # TODO(b/118776054): Use global batch size for Keras/DS support. # Currently this is only supported in TPUStrategy and CoreMirroredStrategy. use_per_replica_batch = not global_batch_size_supported( distribution_strategy) # TODO(b/128995245): In eager mode, uneven batch sizes are allowed except for # `fit()` on TPUStrategy. # In graph mode, the zero batch case in batch norm is not handled due to # XLA-GPU regression. Uneven batch sizes are not allowed except # for `test()` and `predict()` on TPUStrategy. if context.executing_eagerly(): allow_partial_batch = (mode != ModeKeys.TRAIN or not is_tpu_strategy(distribution_strategy)) else: allow_partial_batch = (mode == ModeKeys.TRAIN or ((mode == ModeKeys.PREDICT or mode == ModeKeys.TEST) and is_tpu_strategy(distribution_strategy))) if steps is None: if batch_size is None: # If neither the batch size or number of steps are set. We choose the # global batch size as the minimum of number of samples and 32. 32 is # chosen to provide backward compatibility. global_batch_size = min(num_samples, 32) else: # If the user provided the batch size we need to handle the case # between different strategies that use the global/per-replica batch size global_batch_size = batch_size if use_per_replica_batch: global_batch_size *= distribution_strategy.num_replicas_in_sync if allow_partial_batch: steps = np.ceil(num_samples / global_batch_size).astype(int) else: if num_samples % global_batch_size: raise ValueError('The number of samples %s is not divisible by ' 'batch size %s.' % (num_samples, global_batch_size)) steps = num_samples // global_batch_size else: if batch_size is None: # We calculate the batch size based on the number of steps specified if num_samples % steps: raise ValueError('The number of samples %s is not divisible by ' 'steps %s. Please change the number of steps to a ' 'value that can consume all the samples' % ( num_samples, steps)) global_batch_size = num_samples // steps else: # If the user provided the batch size we need to handle the case # between different strategies that use the global/per-replica batch size global_batch_size = batch_size if use_per_replica_batch: global_batch_size *= distribution_strategy.num_replicas_in_sync min_num_samples = global_batch_size * steps if allow_partial_batch: min_num_samples = global_batch_size * (steps-1) + 1 if steps > 1 else 0 if num_samples < min_num_samples: raise ValueError('Number of samples %s is less than samples required ' 'for specified batch_size %s and steps %s' % ( num_samples, global_batch_size, steps)) # We need to return the per replica or global batch size based on the strategy if use_per_replica_batch: if global_batch_size % distribution_strategy.num_replicas_in_sync: raise ValueError( 'The batch size (%s) could not be sharded evenly across the sync ' 'replicas (%s) in the distribution strategy.' % ( global_batch_size, distribution_strategy.num_replicas_in_sync)) batch_size = global_batch_size // distribution_strategy.num_replicas_in_sync else: batch_size = global_batch_size return steps, batch_size def get_batch_dimension(iterator): shapes = nest.flatten(dataset_ops.get_legacy_output_shapes(iterator)) # Take the batch size from the first element, as it should be the same for # all. dims = shapes[0].dims return dims[0] if dims else None def list_to_tuple(maybe_list): """Datasets treat lists specially, so switch them to tuples.""" if isinstance(maybe_list, list): return tuple(maybe_list) return maybe_list def get_iterator(dataset, distribution_strategy): with distribution_strategy.scope(): iterator = distribution_strategy.make_dataset_iterator(dataset) initialize_iterator(iterator, distribution_strategy) return iterator def initialize_iterator(iterator, distribution_strategy): with distribution_strategy.scope(): init_op = control_flow_ops.group(iterator.initialize()) if not context.executing_eagerly(): K.get_session((init_op,)).run(init_op) def _get_input_from_iterator(iterator, model): """Get elements from the iterator and verify the input shape and type.""" next_element = iterator.get_next() # `len(nest.flatten(x))` is going to not count empty elements such as {}. # len(nest.flatten([[0,1,2], {}])) is 3 and not 4. The `next_element` is # going to get flattened in `_prepare_feed_values` to work around that. Empty # elements are going to get filtered out as part of the flattening. if len(nest.flatten(next_element)) == len(model.inputs): x = next_element y = None sample_weights = None elif len(nest.flatten(next_element)) == (len(model.inputs) + len(model.outputs)): x, y = next_element sample_weights = None else: x, y, sample_weights = next_element # Validate that all the elements in x and y are of the same type and shape. validate_distributed_dataset_inputs( model._distribution_strategy, x, y, sample_weights) return x, y, sample_weights def _prepare_feed_values(model, inputs, targets, sample_weights, mode): """Prepare feed values to the model execution function. Arguments: model: Model to prepare feed values for. inputs: List or dict of model inputs. targets: Optional list of model targets. sample_weights: Optional list of sample weight arrays. mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT. Returns: Feed values for the model in the given mode. """ strategy = model._distribution_strategy inputs, targets, sample_weights = _get_input_from_iterator(inputs, model) if is_tpu_strategy(strategy): if sample_weights is not None: raise ValueError('TPUStrategy does not support sample weights.') # When the inputs are dict, then we want to flatten it in the same order as # the input layers, such that the data are fed into the input layers in the # correct order. if isinstance(inputs, dict): inputs = [inputs[key] for key in model._feed_input_names] if is_distributing_by_cloning(model): inputs = flatten_per_replica_values(strategy, inputs) targets = flatten_per_replica_values(strategy, targets) # Expand 1-dimensional inputs. # TODO(b/124535720): Remove once this standarize data logic is shared with # main flow. inputs, targets = nest.map_structure( training_utils.standardize_single_array, (inputs, targets)) else: inputs = training_utils.ModelInputs(inputs).as_list() if mode == ModeKeys.PREDICT: sample_weights = [] targets = [] elif sample_weights is not None and is_distributing_by_cloning(model): if context.executing_eagerly() and not model._compile_distribution: raise NotImplementedError('`sample_weight` is not supported when using ' 'tf.distribute.Strategy in eager mode and ' 'cloning=True.') sample_weights = flatten_per_replica_values(strategy, sample_weights) ins = [inputs, targets, sample_weights] return tuple(ins) def is_distributing_by_cloning(model): """Decide whether this model is going to be distributed via cloning. We are going to distribute the model by cloning if the user has signaled that intent by setting `cloning=True` in `Model.compile()` unless we are in graph mode. Args: model: Keras model to distribute. Returns: True if the `model` is going to be distributed using cloning and False otherwise. """ if (is_tpu_strategy(model._distribution_strategy) and context.executing_eagerly): if model._cloning: logging.warning( 'Model cloning is not supported in TPU Strategy in Eager mode.' 'cloning argument will be ignored.') return False return (model._cloning or model._compile_distribution or not ops.executing_eagerly_outside_functions()) def _custom_compile_for_predict(model): """Custom compile for TPU predict mode.""" if not model.built: # Model is not compilable because it does not know its number of inputs # and outputs, nor their shapes and names. We will compile after the first # time the model gets called on training data. return model._is_compiled = True model.total_loss = None model.train_function = None model.test_function = None model.predict_function = None def _build_network_on_replica(model, mode, inputs=None, targets=None): """Build an updated model on replicas. We create a new Keras model while sharing the variables from the old graph. Building a new sub-graph is required since the original keras model creates placeholders for the input and the output that are not accessible till we call iterator.get_next() inside the step_fn for `fit`/`evaluate`/`predict`. The sharing of weights and layers between the old and the new model gaurantee that we're using Strategy variables and any updates on either model are reflected correctly in callbacks and loop iterations. We need to make sure we share the optimizers between the old and the new model as well so that optimizer state is not lost if the user is running fit multiple times. Args: model: Model to be replicated across Replicas mode: Which of fit/eval/predict is building the distributed network inputs: Input variables to be passed to the model targets: Target tensor to be passed to model.compile Returns: A new model with shared layers with the old model. """ # Need to do imports here since we run into a circular dependency error. from tensorflow.python.keras import models # pylint: disable=g-import-not-at-top from tensorflow.python.keras.engine import sequential # pylint: disable=g-import-not-at-top # We rely on the internal methods to avoid having share_weights weights in the # public API. if isinstance(model, sequential.Sequential): updated_model = models._clone_sequential_model( model, input_tensors=inputs, layer_fn=models.share_weights) else: updated_model = models._clone_functional_model( model, input_tensors=inputs, layer_fn=models.share_weights) # Callable losses added directly to a functional Model need to be added # here. updated_model._callable_losses = model._callable_losses # Recast all low precision outputs back to float32 since we only casted # the inputs to bfloat16 and not targets. This is done so that we can preserve # precision when calculating the loss value. def _upcast_low_precision_outputs(output): if output.dtype == dtypes.bfloat16: return math_ops.cast(output, dtypes.float32) else: return output updated_model.outputs = [_upcast_low_precision_outputs(o) for o in updated_model.outputs] if isinstance(targets, tuple): targets = nest.flatten(targets) if mode == ModeKeys.PREDICT and inputs is not None: # TPU predict case _custom_compile_for_predict(updated_model) else: updated_model.compile( model.optimizer, model.loss, metrics=metrics_module.clone_metrics(model._compile_metrics), loss_weights=model.loss_weights, sample_weight_mode=model.sample_weight_mode, weighted_metrics=metrics_module.clone_metrics( model._compile_weighted_metrics), target_tensors=targets) return updated_model def _build_distributed_network(model, strategy, mode, inputs=None, targets=None): """Create a cloned model on each replica.""" with K.get_graph().as_default(), strategy.scope(): distributed_model = strategy.extended.call_for_each_replica( _build_network_on_replica, args=(model, mode, inputs, targets)) set_distributed_model(model, mode, distributed_model) def _clone_and_build_model(model, mode, inputs=None, targets=None): """Clone and build the given keras_model.""" # We need to set the import here since we run into a circular dependency # error. from tensorflow.python.keras import models # pylint: disable=g-import-not-at-top cloned_model = models.clone_model(model, input_tensors=inputs) # Compile and build model. if isinstance(model.optimizer, optimizers.TFOptimizer): optimizer = model.optimizer else: optimizer_config = model.optimizer.get_config() optimizer = model.optimizer.__class__.from_config(optimizer_config) # Recast all low precision outputs back to float32 since we only casted # the inputs to bfloat16 and not targets. This is done so that we can preserve # precision when calculating the loss value. def _upcast_low_precision_outputs(output): if output.dtype == dtypes.bfloat16: return math_ops.cast(output, dtypes.float32) else: return output cloned_model.outputs = [_upcast_low_precision_outputs(o) for o in cloned_model.outputs] if isinstance(targets, tuple): targets = nest.flatten(targets) if mode == ModeKeys.PREDICT and inputs is not None: # TPU predict case _custom_compile_for_predict(cloned_model) else: cloned_model.compile( optimizer, model.loss, metrics=metrics_module.clone_metrics(model._compile_metrics), loss_weights=model.loss_weights, sample_weight_mode=model.sample_weight_mode, weighted_metrics=metrics_module.clone_metrics( model._compile_weighted_metrics), target_tensors=targets) return cloned_model def clone_model_on_replicas(model, strategy, mode, inputs=None, targets=None): """Create a cloned model on each replica.""" with K.get_graph().as_default(), strategy.scope(): distributed_model = strategy.extended.call_for_each_replica( _clone_and_build_model, args=(model, mode, inputs, targets)) set_distributed_model(model, mode, distributed_model) if mode == ModeKeys.TRAIN: model._make_callback_model(distributed_model) def _make_execution_function(model, mode): """Makes or reuses function to run one step of distributed model execution.""" if is_distributing_by_cloning(model): return _make_execution_function_with_cloning(model, mode) distributed_function = get_distributed_function(model, mode) if distributed_function: return distributed_function distribution_function = _make_execution_function_without_cloning(model, mode) set_distributed_function(model, mode, distribution_function) return distribution_function def _make_execution_function_without_cloning(model, mode): """Creates a function to run one step of distributed model execution.""" strategy = model._distribution_strategy with strategy.scope(): per_replica_function = _make_replica_execution_function(model, mode) @def_function.function def distributed_function(input_fn): """A single step of the distributed execution across replicas.""" x, y, sample_weights = input_fn() # Call `Model.{train,test,predict}_on_batch` on every replica passing # PerReplicas as arguments. On every replica inside this call, each # PerReplica object will return the value for that replica. The outputs # are PerReplicas too. outputs = strategy.experimental_run_v2( per_replica_function, args=(x, y, sample_weights)) # Out of PerReplica outputs reduce or pick values to return. all_outputs = unwrap_outputs( strategy, outputs, with_loss_tensor=(mode != ModeKeys.PREDICT)) return all_outputs def execution_function(input_fn): # `numpy` translates Tensors to values in Eager mode. return [out.numpy() for out in distributed_function(input_fn)] return execution_function def _make_replica_execution_function(model, mode): """A single step of the distributed execution on a replica.""" if mode == ModeKeys.TRAIN: func = model.train_on_batch elif mode == ModeKeys.TEST: func = model.test_on_batch else: def predict_on_batch(x, y=None, sample_weights=None): del y, sample_weights return model.predict_on_batch(x) func = predict_on_batch if mode != ModeKeys.PREDICT: # `reset_metrics` is set to False to maintain stateful metrics across # batch-level calls. func = functools.partial(func, reset_metrics=False) return func def _make_replicated_models_with_cloning(model, mode): """Build models on each replica.""" strategy = model._distribution_strategy # If distributed_model is not built, create one for `mode`. if model._compile_distribution: clone_model_on_replicas(model, strategy, mode) else: _build_distributed_network(model, strategy, mode) def _make_execution_function_with_cloning(model, mode): """Clones or re-uses models to run one step of distributed model execution.""" distributed_model = get_distributed_model(model, mode) # TODO(b/134069401): Create a cache for the distributed model and exec # function that incorporates additional attributes to be part of the cache key # than just the mode. # If distributed model for a particular `mode` is already built, use the # `_distribution_function` on that distributed model. # If you have updated the sample_weight_mode on the model, then you will need # to recompile metrics and recreate the execution function. This is indicated # by the `_recompile_exec_function` property. if (distributed_model and hasattr(distributed_model, '_distribution_function') and not (hasattr(distributed_model, '_recompile_exec_function') and distributed_model._recompile_exec_function)): return distributed_model._distributed_function if not distributed_model: _make_replicated_models_with_cloning(model, mode) distributed_model = get_distributed_model(model, mode) assert distributed_model # Also create an execution fuction on that distributed model. if context.executing_eagerly(): distributed_function = _make_eager_execution_function(model, mode) else: distributed_function = _make_graph_execution_function(model, mode) # We cache the distributed execution function on the model since creating # distributed models and execution functions are expensive. distributed_model._distributed_function = distributed_function distributed_model._recompile_exec_function = False return distributed_function def _make_graph_execution_function(model, mode): """Makes function to run one step of distributed model in graph mode.""" def _per_replica_function(model): f = model._make_execution_function(mode) return (f.inputs, f.outputs, f.updates_op, f.session_kwargs) strategy = model._distribution_strategy with strategy.scope(): # Create train ops on each of the devices when we call # `_per_replica_fit_function`. (grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args) = strategy.extended.call_for_each_replica( _per_replica_function, args=(get_distributed_model(model, mode),)) # Initialize the variables in the replicated model. This is necessary for # multi-worker training because on some workers, initialization is not # needed. This method does initialization or waiting for initialization # according to the context object of distribute coordinator. init_restore_or_wait_for_variables() # Unwrap all the per device values returned from `call_for_each_replica`. # Unwrapping per device values gives you a list of values that can be # used to construct a new train function that is composed of update ops on # all the devices over which the model is distributed. (all_inputs, all_outputs, all_updates, all_session_args) = unwrap_values( strategy, grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args, with_loss_tensor=(mode != ModeKeys.PREDICT)) return K.function( all_inputs, all_outputs, updates=all_updates, name='distributed_{}_function'.format(mode), **all_session_args) def _make_eager_execution_function(model, mode): """Makes function to run one step of distributed model eager execution.""" def _per_replica_function(model): f = model._make_execution_function(mode) return (f.inputs, f.outputs) # NOTE(priyag): Try creating a new FuncGraph within DS scope instead of using # the global one. strategy = model._distribution_strategy global_graph = K.get_graph() with global_graph.as_default(), strategy.scope(): # First we gather the relevant portions of the model across all replicas. # `K._scratch_graph(global_graph)` signals to Keras that it should not # lift to a separate graph when creating the per-replica functions. with K._scratch_graph(global_graph): # Create train ops on each of the devices when we call # `_per_replica_fit_function`. grouped = strategy.extended.call_for_each_replica( _per_replica_function, args=(get_distributed_model(model, mode),)) grouped_inputs, grouped_outputs = grouped # Unwrap all the per device values returned from `call_for_each_replica`. # Unwrapping per device values gives you a list of values that can be # used to construct a new train function that is composed of # inputs/outputs on all the devices over which the model is distributed. (all_inputs, all_outputs, _, _) = unwrap_values( strategy, grouped_inputs, grouped_outputs, with_loss_tensor=(mode != ModeKeys.PREDICT)) # Finally, a joint Keras function is created; this one will be created in # a separate FuncGraph. return K.function( all_inputs, all_outputs, name='eager_distributed_{}_function'.format(mode)) def _copy_weights_to_distributed_model(original_model, mode): """Copies weights from original model to distributed models.""" strategy = original_model._distribution_strategy distributed_model = get_distributed_model(original_model, mode) if strategy: # Copy the weights from the original model to each of the replicated # models. orig_model_weights = original_model.get_weights() first_model = strategy.unwrap(distributed_model)[0] set_weights(strategy, first_model, orig_model_weights) def _copy_weights_to_original_model(model, mode): """Copies weights from first distributed model back to original model.""" if model._distribution_strategy and mode == ModeKeys.TRAIN: distributed_model = get_distributed_model(model, mode) updated_weights = model._distribution_strategy.unwrap( distributed_model)[0].get_weights() model.set_weights(updated_weights) def _per_replica_aggregate_batch(batch_outs, model, mode): """Aggregates the per-replica batch-level outputs from a distributed step.""" if model._distribution_strategy is not None and mode == ModeKeys.PREDICT: total_batch_outs = [] for i in range(len(model.outputs)): num_replicas = model._distribution_strategy.num_replicas_in_sync nested_outs = batch_outs[i * num_replicas:i * num_replicas + num_replicas] total_batch_outs.append(np.concatenate(nest.flatten(nested_outs))) return total_batch_outs return batch_outs def _reset_metrics(model): if model._distribution_strategy: for mode in [ModeKeys.TRAIN, ModeKeys.TEST, ModeKeys.PREDICT]: distributed_model = get_distributed_model(model, mode) if distributed_model: first_model = model._distribution_strategy.unwrap(distributed_model)[0] first_model.reset_metrics() def get_distributed_model(model, mode): key = _generate_cache_key(mode) return model._distributed_model_cache.get(key, None) def set_distributed_model(model, mode, distributed_model): key = _generate_cache_key(mode) model._distributed_model_cache[key] = distributed_model def get_distributed_function(model, mode): key = _generate_cache_key(mode) return model._distributed_function_cache.get(key, None) def set_distributed_function(model, mode, distributed_function): key = _generate_cache_key(mode) model._distributed_function_cache[key] = distributed_function def _generate_cache_key(mode): key = hash(mode) return key @tf_contextlib.contextmanager def distributed_scope(strategy, learning_phase): with strategy.scope(), K.learning_phase_scope(learning_phase): yield def call_replica_local_fn(fn, *args, **kwargs): """Call a function that uses replica-local variables. This function correctly handles calling `fn` in a cross-replica context. Arguments: fn: The function to call. *args: Positional arguments to the `fn`. **kwargs: Keyword argument to `fn`. Returns: The result of calling `fn`. """ # TODO(b/132666209): Remove this function when we support assign_* # for replica-local variables. strategy = None if 'strategy' in kwargs: strategy = kwargs.pop('strategy') else: if ds_context.has_strategy(): strategy = ds_context.get_strategy() # TODO(b/120571621): TPUStrategy does not implement replica-local variables. is_tpu = is_tpu_strategy(strategy) if ((not is_tpu) and strategy and ds_context.in_cross_replica_context()): with strategy.scope(): return strategy.extended.call_for_each_replica(fn, args, kwargs) return fn(*args, **kwargs) def is_current_worker_chief(): return dc_context.get_current_worker_context().is_chief def filter_distributed_callbacks(callbacks_list): """Filter Callbacks based on the worker context when running multi-worker. Arguments: callbacks_list: A list of `Callback` instances. Returns: The list of `Callback` instances that should be run on this worker. """ if not multi_worker_util.in_multi_worker_mode(): raise ValueError( 'filter_distributed_callbacks() should only be called when Keras ' 'is in multi worker mode.') callbacks_list = callbacks_list or [] if not [ c for c in callbacks_list if isinstance(c, callbacks.ModelCheckpoint) ]: # TODO(rchao): Consider providing a ModelCheckpoint here if the user # fails to (possibly with tempfile directory). logging.warning('ModelCheckpoint callback is not provided. ' 'Workers will need to restart training if any fails.') if callbacks_list is None or is_current_worker_chief(): return callbacks_list # Some Callbacks should only run on the chief worker. return [ callback for callback in callbacks_list if not callback._chief_worker_only ] # pylint: disable=protected-access def _update_sample_weight_modes(model, mode, sample_weights): """Update sample_weight_mode of the distributed model.""" if is_distributing_by_cloning(model): distributed_model = get_distributed_model(model, mode) if not distributed_model: _make_replicated_models_with_cloning(model, mode) distributed_model = get_distributed_model(model, mode) distributed_model._recompile_exec_function = any( [e.sample_weights_mismatch() for e in model._training_endpoints]) if sample_weights: distributed_models = flatten_per_replica_values( model._distribution_strategy, distributed_model) # sample_weights is a tuple of 1 list where the number of elements in the # list is equal to the number of replicas in sync. sample_weights = sample_weights[0] if sample_weights and None not in sample_weights: for m, sw in zip(distributed_models, sample_weights): m._update_sample_weight_modes(sample_weights=[sw])
tensorflow-master
tensorflow/python/keras/distribute/distributed_training_utils.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for distributed training utility functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras import callbacks from tensorflow.python.keras.distribute import distributed_training_utils from tensorflow.python.keras.optimizer_v2 import adam from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import adam as v1_adam class DistributedTrainingUtilsTest(test.TestCase): @test.mock.patch.object(logging, 'warning', autospec=True) def test_validate_callbacks_predefined_callbacks(self, mock_warning): supported_predefined_callbacks = [ callbacks.TensorBoard(), callbacks.CSVLogger(filename='./log.csv'), callbacks.EarlyStopping(), callbacks.ModelCheckpoint(filepath='./checkpoint'), callbacks.TerminateOnNaN(), callbacks.ProgbarLogger(), callbacks.History(), callbacks.RemoteMonitor() ] distributed_training_utils.validate_callbacks( supported_predefined_callbacks, adam.Adam()) unsupported_predefined_callbacks = [ callbacks.ReduceLROnPlateau(), callbacks.LearningRateScheduler(schedule=lambda epoch: 0.001) ] for callback in unsupported_predefined_callbacks: with self.assertRaisesRegexp( ValueError, 'You must specify a Keras Optimizer V2'): distributed_training_utils.validate_callbacks([callback], v1_adam.AdamOptimizer()) self.assertEqual(0, mock_warning.call_count) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/keras/distribute/distributed_training_utils_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """RMSprop for TensorFlow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import ops from tensorflow.python.keras import backend_config from tensorflow.python.keras.optimizer_v2 import optimizer_v2 from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import state_ops from tensorflow.python.training import training_ops from tensorflow.python.util.tf_export import keras_export @keras_export("keras.optimizers.RMSprop") class RMSprop(optimizer_v2.OptimizerV2): r"""Optimizer that implements the RMSprop algorithm. A detailed description of rmsprop. - maintain a moving (discounted) average of the square of gradients - divide gradient by the root of this average $$mean_square_t = rho * mean_square{t-1} + (1-rho) * gradient ** 2$$ $$mom_t = momentum * mom_{t-1} + learning_rate * gradient / \sqrt{ / mean_square_t + \epsilon}$$ $$variable_t := variable_{t-1} - mom_t$$ This implementation of RMSprop uses plain momentum, not Nesterov momentum. The centered version additionally maintains a moving average of the gradients, and uses that average to estimate the variance: $$mean_grad_t = rho * mean_grad_{t-1} + (1-rho) * gradient$$ $$mean_square_t = rho * mean_square_{t-1} + (1-rho) * gradient ** 2$$ $$mom_t = momentum * mom_{t-1} + learning_rate * gradient / sqrt(mean_square_t - mean_grad_t**2 + epsilon)$$ $$variable_t := variable_{t-1} - mom_t$$ References See ([pdf] http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf). """ def __init__(self, learning_rate=0.001, rho=0.9, momentum=0.0, epsilon=1e-7, centered=False, name="RMSprop", **kwargs): """Construct a new RMSprop optimizer. Note that in the dense implementation of this algorithm, variables and their corresponding accumulators (momentum, gradient moving average, square gradient moving average) will be updated even if the gradient is zero (i.e. accumulators will decay, momentum will be applied). The sparse implementation (used when the gradient is an `IndexedSlices` object, typically because of `tf.gather` or an embedding lookup in the forward pass) will not update variable slices or their accumulators unless those slices were used in the forward pass (nor is there an "eventual" correction to account for these omitted updates). This leads to more efficient updates for large embedding lookup tables (where most of the slices are not accessed in a particular graph execution), but differs from the published algorithm. Args: learning_rate: A Tensor or a floating point value. The learning rate. rho: Discounting factor for the history/coming gradient momentum: A scalar tensor. epsilon: Small value to avoid zero denominator. centered: If True, gradients are normalized by the estimated variance of the gradient; if False, by the uncentered second moment. Setting this to True may help with training, but is slightly more expensive in terms of computation and memory. Defaults to False. name: Optional name prefix for the operations created when applying gradients. Defaults to "RMSprop". @compatibility(eager) When eager execution is enabled, `learning_rate`, `decay`, `momentum`, and `epsilon` can each be a callable that takes no arguments and returns the actual value to use. This can be useful for changing these values across different invocations of optimizer functions. @end_compatibility **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip gradients by value, `decay` is included for backward compatibility to allow time inverse decay of learning rate. `lr` is included for backward compatibility, recommended to use `learning_rate` instead. """ super(RMSprop, self).__init__(name, **kwargs) self._set_hyper("learning_rate", kwargs.get("lr", learning_rate)) self._set_hyper("decay", self._initial_decay) self._set_hyper("rho", rho) self._momentum = False if isinstance(momentum, ops.Tensor) or callable(momentum) or momentum > 0: self._momentum = True if isinstance(momentum, (int, float)) and (momentum < 0 or momentum > 1): raise ValueError("`momentum` must be between [0, 1].") self._set_hyper("momentum", momentum) self.epsilon = epsilon or backend_config.epsilon() self.centered = centered def _create_slots(self, var_list): for var in var_list: self.add_slot(var, "rms") if self._momentum: for var in var_list: self.add_slot(var, "momentum") if self.centered: for var in var_list: self.add_slot(var, "mg") def _resource_apply_dense(self, grad, var): var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr_t[var_dtype] rms = self.get_slot(var, "rms") rho = self._get_hyper("rho", var_dtype) momentum = self._get_hyper("momentum", var_dtype) epsilon_t = ops.convert_to_tensor(self.epsilon, var_dtype) if self._momentum: mom = self.get_slot(var, "momentum") if self.centered: mg = self.get_slot(var, "mg") return training_ops.resource_apply_centered_rms_prop( var.handle, mg.handle, rms.handle, mom.handle, lr_t, rho, momentum, epsilon_t, grad, use_locking=self._use_locking) else: return training_ops.resource_apply_rms_prop( var.handle, rms.handle, mom.handle, lr_t, rho, momentum, epsilon_t, grad, use_locking=self._use_locking) else: rms_t = rho * rms + (1. - rho) * math_ops.square(grad) rms_t = state_ops.assign(rms, rms_t, use_locking=self._use_locking) denom_t = rms_t if self.centered: mg = self.get_slot(var, "mg") mg_t = rho * mg + (1. - rho) * grad mg_t = state_ops.assign(mg, mg_t, use_locking=self._use_locking) denom_t = rms_t - math_ops.square(mg_t) var_t = var - lr_t * grad / (math_ops.sqrt(denom_t) + epsilon_t) return state_ops.assign(var, var_t, use_locking=self._use_locking).op def _resource_apply_sparse(self, grad, var, indices): var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr_t[var_dtype] rms = self.get_slot(var, "rms") rho = self._get_hyper("rho", var_dtype) momentum = self._get_hyper("momentum", var_dtype) epsilon_t = ops.convert_to_tensor(self.epsilon, var_dtype) if self._momentum: mom = self.get_slot(var, "momentum") if self.centered: mg = self.get_slot(var, "mg") return training_ops.resource_sparse_apply_centered_rms_prop( var.handle, mg.handle, rms.handle, mom.handle, lr_t, rho, momentum, epsilon_t, grad, indices, use_locking=self._use_locking) else: return training_ops.resource_sparse_apply_rms_prop( var.handle, rms.handle, mom.handle, lr_t, rho, momentum, epsilon_t, grad, indices, use_locking=self._use_locking) else: rms_scaled_g_values = (grad * grad) * (1. - rho) rms_t = state_ops.assign(rms, rms * rho, use_locking=self._use_locking) with ops.control_dependencies([rms_t]): rms_t = self._resource_scatter_add(rms, indices, rms_scaled_g_values) rms_slice = array_ops.gather(rms_t, indices) denom_slice = rms_slice if self.centered: mg = self.get_slot(var, "mg") mg_scaled_g_values = grad * (1. - rho) mg_t = state_ops.assign(mg, mg * rho, use_locking=self._use_locking) with ops.control_dependencies([mg_t]): mg_t = self._resource_scatter_add(mg, indices, mg_scaled_g_values) mg_slice = array_ops.gather(mg_t, indices) denom_slice = rms_slice - math_ops.square(mg_slice) var_update = self._resource_scatter_add( var, indices, -lr_t * grad / (math_ops.sqrt(denom_slice) + epsilon_t)) if self.centered: return control_flow_ops.group(*[var_update, rms_t, mg_t]) return control_flow_ops.group(*[var_update, rms_t]) def set_weights(self, weights): params = self.weights # Override set_weights for backward compatibility of Keras V1 optimizer # since it does not include iteration at head of the weight list. Set # iteration to 0. if len(params) == len(weights) + 1: weights = [np.array(0)] + weights super(RMSprop, self).set_weights(weights) def get_config(self): config = super(RMSprop, self).get_config() config.update({ "learning_rate": self._serialize_hyperparameter("learning_rate"), "decay": self._serialize_hyperparameter("decay"), "rho": self._serialize_hyperparameter("rho"), "momentum": self._serialize_hyperparameter("momentum"), "epsilon": self.epsilon, "centered": self.centered, }) return config RMSProp = RMSprop
tensorflow-master
tensorflow/python/keras/optimizer_v2/rmsprop.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Momentum for TensorFlow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.keras.optimizer_v2 import optimizer_v2 from tensorflow.python.ops import resource_variable_ops from tensorflow.python.training import training_ops from tensorflow.python.util.tf_export import keras_export @keras_export("keras.optimizers.SGD") class SGD(optimizer_v2.OptimizerV2): """Stochastic gradient descent and momentum optimizer. Computes: ``` theta(t+1) = theta(t) - learning_rate * gradient gradient is evaluated at theta(t). ``` or Computes (if `nesterov = False`): ``` v(t+1) = momentum * v(t) - learning_rate * gradient theta(t+1) = theta(t) + v(t+1) if `nesterov` is False, gradient is evaluated at theta(t). if `nesterov` is True, gradient is evaluated at theta(t) + momentum * v(t), and the variables always store theta + m v instead of theta ``` Some of the args below are hyperparameters, where a hyperparameter is defined as a scalar Tensor, a regular Python value, or a callable (which will be evaluated when `apply_gradients` is called) returning a scalar Tensor or a Python value. @compatibility(eager) When eager execution is enabled, learning_rate can be a callable that takes no arguments and returns the actual value to use. This can be useful for changing these values across different invocations of optimizer functions. @end_compatibility # References nesterov = True, See [Sutskever et al., 2013]( http://jmlr.org/proceedings/papers/v28/sutskever13.pdf). """ def __init__(self, learning_rate=0.01, momentum=0.0, nesterov=False, name="SGD", **kwargs): """Construct a new Stochastic Gradient Descent or Momentum optimizer. Arguments: learning_rate: float hyperparameter >= 0. Learning rate. momentum: float hyperparameter >= 0 that accelerates SGD in the relevant direction and dampens oscillations. nesterov: boolean. Whether to apply Nesterov momentum. name: Optional name prefix for the operations created when applying gradients. Defaults to 'SGD'. **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip gradients by value, `decay` is included for backward compatibility to allow time inverse decay of learning rate. `lr` is included for backward compatibility, recommended to use `learning_rate` instead. """ super(SGD, self).__init__(name, **kwargs) self._set_hyper("learning_rate", kwargs.get("lr", learning_rate)) self._set_hyper("decay", self._initial_decay) self._momentum = False if isinstance(momentum, ops.Tensor) or callable(momentum) or momentum > 0: self._momentum = True if isinstance(momentum, (int, float)) and (momentum < 0 or momentum > 1): raise ValueError("`momentum` must be between [0, 1].") self._set_hyper("momentum", momentum) self.nesterov = nesterov def _create_slots(self, var_list): if self._momentum: for var in var_list: self.add_slot(var, "momentum") def _resource_apply_dense(self, grad, var): var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr_t[var_dtype] if self._momentum: momentum_var = self.get_slot(var, "momentum") return training_ops.resource_apply_keras_momentum( var.handle, momentum_var.handle, lr_t, grad, self._get_hyper("momentum", var_dtype), use_locking=self._use_locking, use_nesterov=self.nesterov) else: return training_ops.resource_apply_gradient_descent( var.handle, lr_t, grad, use_locking=self._use_locking) def _resource_apply_sparse_duplicate_indices(self, grad, var, indices): if self._momentum: return super(SGD, self)._resource_apply_sparse_duplicate_indices( grad, var, indices) else: var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr_t[var_dtype] return resource_variable_ops.resource_scatter_add(var.handle, indices, -grad * lr_t) def _resource_apply_sparse(self, grad, var, indices): # This method is only needed for momentum optimization. var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr_t[var_dtype] momentum_var = self.get_slot(var, "momentum") return training_ops.resource_sparse_apply_keras_momentum( var.handle, momentum_var.handle, lr_t, grad, indices, self._get_hyper("momentum", var_dtype), use_locking=self._use_locking, use_nesterov=self.nesterov) def get_config(self): config = super(SGD, self).get_config() config.update({ "learning_rate": self._serialize_hyperparameter("learning_rate"), "decay": self._serialize_hyperparameter("decay"), "momentum": self._serialize_hyperparameter("momentum"), "nesterov": self.nesterov, }) return config
tensorflow-master
tensorflow/python/keras/optimizer_v2/gradient_descent.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Various learning rate decay functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import math from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.keras.utils import generic_utils from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.util.tf_export import keras_export @keras_export("keras.optimizers.schedules.LearningRateSchedule") class LearningRateSchedule(object): """A serializable learning rate decay schedule. `LearningRateSchedule`s can be passed in as the learning rate of optimizers in `tf.keras.optimizers`. They can be serialized and deserialized using `tf.keras.optimizers.schedules.serialize` and `tf.keras.optimizers.schedules.deserialize`. """ @abc.abstractmethod def __call__(self, step): raise NotImplementedError("Learning rate schedule must override __call__") @abc.abstractmethod def get_config(self): raise NotImplementedError("Learning rate schedule must override get_config") @classmethod def from_config(cls, config): """Instantiates a `LearningRateSchedule` from its config. Args: config: Output of `get_config()`. Returns: A `LearningRateSchedule` instance. """ return cls(**config) @keras_export("keras.optimizers.schedules.ExponentialDecay") class ExponentialDecay(LearningRateSchedule): """A LearningRateSchedule that uses an exponential decay schedule.""" def __init__( self, initial_learning_rate, decay_steps, decay_rate, staircase=False, name=None): """Applies exponential decay to the learning rate. When training a model, it is often recommended to lower the learning rate as the training progresses. This schedule applies an exponential decay function to an optimizer step, given a provided initial learning rate. The schedule a 1-arg callable that produces a decayed learning rate when passed the current optimizer step. This can be useful for changing the learning rate value across different invocations of optimizer functions. It is computed as: ```python def decayed_learning_rate(step): return initial_learning_rate * decay_rate ^ (step / decay_steps) ``` If the argument `staircase` is `True`, then `step / decay_steps` is an integer division and the decayed learning rate follows a staircase function. You can pass this schedule directly into a `tf.keras.optimizers.Optimizer` as the learning rate. Example: When fitting a Keras model, decay every 100000 steps with a base of 0.96: ```python initial_learning_rate = 0.1 lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate, decay_steps=100000, decay_rate=0.96, staircase=True) model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=lr_schedule), loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(data, labels, epochs=5) ``` The learning rate schedule is also serializable and deserializable using `tf.keras.optimizers.schedules.serialize` and `tf.keras.optimizers.schedules.deserialize`. Args: initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The initial learning rate. decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Must be positive. See the decay computation above. decay_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The decay rate. staircase: Boolean. If `True` decay the learning rate at discrete intervals name: String. Optional name of the operation. Defaults to 'ExponentialDecay'. Returns: A 1-arg callable learning rate schedule that takes the current optimizer step and outputs the decayed learning rate, a scalar `Tensor` of the same type as `initial_learning_rate`. """ super(ExponentialDecay, self).__init__() self.initial_learning_rate = initial_learning_rate self.decay_steps = decay_steps self.decay_rate = decay_rate self.staircase = staircase self.name = name def __call__(self, step): with ops.name_scope_v2(self.name or "ExponentialDecay") as name: initial_learning_rate = ops.convert_to_tensor( self.initial_learning_rate, name="initial_learning_rate") dtype = initial_learning_rate.dtype decay_steps = math_ops.cast(self.decay_steps, dtype) decay_rate = math_ops.cast(self.decay_rate, dtype) global_step_recomp = math_ops.cast(step, dtype) p = global_step_recomp / decay_steps if self.staircase: p = math_ops.floor(p) return math_ops.multiply( initial_learning_rate, math_ops.pow(decay_rate, p), name=name) def get_config(self): return { "initial_learning_rate": self.initial_learning_rate, "decay_steps": self.decay_steps, "decay_rate": self.decay_rate, "staircase": self.staircase, "name": self.name } @keras_export("keras.optimizers.schedules.PiecewiseConstantDecay") class PiecewiseConstantDecay(LearningRateSchedule): """A LearningRateSchedule that uses a piecewise constant decay schedule.""" def __init__( self, boundaries, values, name=None): """Piecewise constant from boundaries and interval values. The function returns a 1-arg callable to compute the piecewise constant when passed the current optimizer step. This can be useful for changing the learning rate value across different invocations of optimizer functions. Example: use a learning rate that's 1.0 for the first 100001 steps, 0.5 for the next 10000 steps, and 0.1 for any additional steps. ```python step = tf.Variable(0, trainable=False) boundaries = [100000, 110000] values = [1.0, 0.5, 0.1] learning_rate_fn = keras.optimizers.schedules.PiecewiseConstantDecay( boundaries, values) # Later, whenever we perform an optimization step, we pass in the step. learning_rate = learning_rate_fn(step) ``` You can pass this schedule directly into a `tf.keras.optimizers.Optimizer` as the learning rate. The learning rate schedule is also serializable and deserializable using `tf.keras.optimizers.schedules.serialize` and `tf.keras.optimizers.schedules.deserialize`. Args: boundaries: A list of `Tensor`s or `int`s or `float`s with strictly increasing entries, and with all elements having the same type as the optimizer step. values: A list of `Tensor`s or `float`s or `int`s that specifies the values for the intervals defined by `boundaries`. It should have one more element than `boundaries`, and all elements should have the same type. name: A string. Optional name of the operation. Defaults to 'PiecewiseConstant'. Returns: A 1-arg callable learning rate schedule that takes the current optimizer step and outputs the decayed learning rate, a scalar `Tensor` of the same type as the boundary tensors. The output of the 1-arg function that takes the `step` is `values[0]` when `step <= boundaries[0]`, `values[1]` when `step > boundaries[0]` and `step <= boundaries[1]`, ..., and values[-1] when `step > boundaries[-1]`. Raises: ValueError: if the number of elements in the lists do not match. """ super(PiecewiseConstantDecay, self).__init__() if len(boundaries) != len(values) - 1: raise ValueError( "The length of boundaries should be 1 less than the length of values") self.boundaries = boundaries self.values = values self.name = name def __call__(self, step): with ops.name_scope_v2(self.name or "PiecewiseConstant"): boundaries = ops.convert_n_to_tensor(self.boundaries) values = ops.convert_n_to_tensor(self.values) x_recomp = ops.convert_to_tensor(step) for i, b in enumerate(boundaries): if b.dtype.base_dtype != x_recomp.dtype.base_dtype: # We cast the boundaries to have the same type as the step b = math_ops.cast(b, x_recomp.dtype.base_dtype) boundaries[i] = b pred_fn_pairs = [] pred_fn_pairs.append((x_recomp <= boundaries[0], lambda: values[0])) pred_fn_pairs.append((x_recomp > boundaries[-1], lambda: values[-1])) for low, high, v in zip(boundaries[:-1], boundaries[1:], values[1:-1]): # Need to bind v here; can do this with lambda v=v: ... pred = (x_recomp > low) & (x_recomp <= high) pred_fn_pairs.append((pred, lambda v=v: v)) # The default isn't needed here because our conditions are mutually # exclusive and exhaustive, but tf.case requires it. default = lambda: values[0] return control_flow_ops.case(pred_fn_pairs, default, exclusive=True) def get_config(self): return { "boundaries": self.boundaries, "values": self.values, "name": self.name } @keras_export("keras.optimizers.schedules.PolynomialDecay") class PolynomialDecay(LearningRateSchedule): """A LearningRateSchedule that uses a polynomial decay schedule.""" def __init__( self, initial_learning_rate, decay_steps, end_learning_rate=0.0001, power=1.0, cycle=False, name=None): """Applies a polynomial decay to the learning rate. It is commonly observed that a monotonically decreasing learning rate, whose degree of change is carefully chosen, results in a better performing model. This schedule applies a polynomial decay function to an optimizer step, given a provided `initial_learning_rate`, to reach an `end_learning_rate` in the given `decay_steps`. It requires a `step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The schedule is a 1-arg callable that produces a decayed learning rate when passed the current optimizer step. This can be useful for changing the learning rate value across different invocations of optimizer functions. It is computed as: ```python def decayed_learning_rate(step): step = min(step, decay_steps) return ((initial_learning_rate - end_learning_rate) * (1 - step / decay_steps) ^ (power) ) + end_learning_rate ``` If `cycle` is True then a multiple of `decay_steps` is used, the first one that is bigger than `step`. ```python def decayed_learning_rate(step): decay_steps = decay_steps * ceil(step / decay_steps) return ((initial_learning_rate - end_learning_rate) * (1 - step / decay_steps) ^ (power) ) + end_learning_rate ``` You can pass this schedule directly into a `tf.keras.optimizers.Optimizer` as the learning rate. Example: Fit a model while decaying from 0.1 to 0.01 in 10000 steps using sqrt (i.e. power=0.5): ```python ... starter_learning_rate = 0.1 end_learning_rate = 0.01 decay_steps = 10000 learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay( starter_learning_rate, decay_steps, end_learning_rate, power=0.5) model.compile(optimizer=tf.keras.optimizers.SGD( learning_rate=learning_rate_fn), loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(data, labels, epochs=5) ``` The learning rate schedule is also serializable and deserializable using `tf.keras.optimizers.schedules.serialize` and `tf.keras.optimizers.schedules.deserialize`. Args: initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The initial learning rate. decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Must be positive. See the decay computation above. end_learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The minimal end learning rate. power: A scalar `float32` or `float64` `Tensor` or a Python number. The power of the polynomial. Defaults to linear, 1.0. cycle: A boolean, whether or not it should cycle beyond decay_steps. name: String. Optional name of the operation. Defaults to 'PolynomialDecay'. Returns: A 1-arg callable learning rate schedule that takes the current optimizer step and outputs the decayed learning rate, a scalar `Tensor` of the same type as `initial_learning_rate`. """ super(PolynomialDecay, self).__init__() self.initial_learning_rate = initial_learning_rate self.decay_steps = decay_steps self.end_learning_rate = end_learning_rate self.power = power self.cycle = cycle self.name = name def __call__(self, step): with ops.name_scope_v2(self.name or "PolynomialDecay") as name: initial_learning_rate = ops.convert_to_tensor( self.initial_learning_rate, name="initial_learning_rate") dtype = initial_learning_rate.dtype end_learning_rate = math_ops.cast(self.end_learning_rate, dtype) power = math_ops.cast(self.power, dtype) global_step_recomp = math_ops.cast(step, dtype) decay_steps_recomp = math_ops.cast(self.decay_steps, dtype) if self.cycle: # Find the first multiple of decay_steps that is bigger than # global_step. If global_step is zero set the multiplier to 1 multiplier = control_flow_ops.cond( math_ops.equal(global_step_recomp, 0), lambda: 1.0, lambda: math_ops.ceil(global_step_recomp / self.decay_steps)) decay_steps_recomp = math_ops.multiply(decay_steps_recomp, multiplier) else: # Make sure that the global_step used is not bigger than decay_steps. global_step_recomp = math_ops.minimum(global_step_recomp, self.decay_steps) p = math_ops.div(global_step_recomp, decay_steps_recomp) return math_ops.add( math_ops.multiply(initial_learning_rate - end_learning_rate, math_ops.pow(1 - p, power)), end_learning_rate, name=name) def get_config(self): return { "initial_learning_rate": self.initial_learning_rate, "decay_steps": self.decay_steps, "end_learning_rate": self.end_learning_rate, "power": self.power, "cycle": self.cycle, "name": self.name } @keras_export("keras.optimizers.schedules.InverseTimeDecay") class InverseTimeDecay(LearningRateSchedule): """A LearningRateSchedule that uses an inverse time decay schedule.""" def __init__( self, initial_learning_rate, decay_steps, decay_rate, staircase=False, name=None): """Applies inverse time decay to the initial learning rate. When training a model, it is often recommended to lower the learning rate as the training progresses. This schedule applies the inverse decay function to an optimizer step, given a provided initial learning rate. It requires a `step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The schedule a 1-arg callable that produces a decayed learning rate when passed the current optimizer step. This can be useful for changing the learning rate value across different invocations of optimizer functions. It is computed as: ```python def decayed_learning_rate(step): return initial_learning_rate / (1 + decay_rate * step / decay_step) ``` or, if `staircase` is `True`, as: ```python def decayed_learning_rate(step): return initial_learning_rate / (1 + decay_rate * floor(step / decay_step)) ``` You can pass this schedule directly into a `tf.keras.optimizers.Optimizer` as the learning rate. Example: Fit a Keras model when decaying 1/t with a rate of 0.5: ```python ... initial_learning_rate = 0.1 decay_steps = 1.0 decay_rate = 0.5 learning_rate_fn = keras.optimizers.schedules.InverseTimeDecay( initial_learning_rate, global_step, decay_steps, decay_rate) model.compile(optimizer=tf.keras.optimizers.SGD( learning_rate=learning_rate_fn), loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(data, labels, epochs=5) ``` Args: initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The initial learning rate. decay_steps: How often to apply decay. decay_rate: A Python number. The decay rate. staircase: Whether to apply decay in a discrete staircase, as opposed to continuous, fashion. name: String. Optional name of the operation. Defaults to 'InverseTimeDecay'. Returns: A 1-arg callable learning rate schedule that takes the current optimizer step and outputs the decayed learning rate, a scalar `Tensor` of the same type as `initial_learning_rate`. """ super(InverseTimeDecay, self).__init__() self.initial_learning_rate = initial_learning_rate self.decay_steps = decay_steps self.decay_rate = decay_rate self.staircase = staircase self.name = name def __call__(self, step): with ops.name_scope_v2(self.name or "InverseTimeDecay") as name: initial_learning_rate = ops.convert_to_tensor( self.initial_learning_rate, name="initial_learning_rate") dtype = initial_learning_rate.dtype decay_steps = math_ops.cast(self.decay_steps, dtype) decay_rate = math_ops.cast(self.decay_rate, dtype) global_step_recomp = math_ops.cast(step, dtype) p = global_step_recomp / decay_steps if self.staircase: p = math_ops.floor(p) const = math_ops.cast(constant_op.constant(1), dtype) denom = math_ops.add(const, math_ops.multiply(decay_rate, p)) return math_ops.div(initial_learning_rate, denom, name=name) def get_config(self): return { "initial_learning_rate": self.initial_learning_rate, "decay_steps": self.decay_steps, "decay_rate": self.decay_rate, "staircase": self.staircase, "name": self.name } @keras_export("keras.experimental.CosineDecay") class CosineDecay(LearningRateSchedule): """A LearningRateSchedule that uses a cosine decay schedule.""" def __init__( self, initial_learning_rate, decay_steps, alpha=0.0, name=None): """Applies cosine decay to the learning rate. See [Loshchilov & Hutter, ICLR2016], SGDR: Stochastic Gradient Descent with Warm Restarts. https://arxiv.org/abs/1608.03983 When training a model, it is often recommended to lower the learning rate as the training progresses. This schedule applies a cosine decay function to an optimizer step, given a provided initial learning rate. It requires a `step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The schedule a 1-arg callable that produces a decayed learning rate when passed the current optimizer step. This can be useful for changing the learning rate value across different invocations of optimizer functions. It is computed as: ```python def decayed_learning_rate(step): step = min(step, decay_steps) cosine_decay = 0.5 * (1 + cos(pi * step / decay_steps)) decayed = (1 - alpha) * cosine_decay + alpha return initial_learning_rate * decayed ``` Example usage: ```python decay_steps = 1000 lr_decayed_fn = tf.keras.experimental.CosineDecay( initial_learning_rate, global_step, decay_steps) ``` You can pass this schedule directly into a `tf.keras.optimizers.Optimizer` as the learning rate. The learning rate schedule is also serializable and deserializable using `tf.keras.optimizers.schedules.serialize` and `tf.keras.optimizers.schedules.deserialize`. Args: initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python number. The initial learning rate. decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Number of steps to decay over. alpha: A scalar `float32` or `float64` Tensor or a Python number. Minimum learning rate value as a fraction of initial_learning_rate. name: String. Optional name of the operation. Defaults to 'CosineDecay'. Returns: A 1-arg callable learning rate schedule that takes the current optimizer step and outputs the decayed learning rate, a scalar `Tensor` of the same type as `initial_learning_rate`. """ super(CosineDecay, self).__init__() self.initial_learning_rate = initial_learning_rate self.decay_steps = decay_steps self.alpha = alpha self.name = name def __call__(self, step): with ops.name_scope_v2(self.name or "CosineDecay"): initial_learning_rate = ops.convert_to_tensor( self.initial_learning_rate, name="initial_learning_rate") dtype = initial_learning_rate.dtype decay_steps = math_ops.cast(self.decay_steps, dtype) global_step_recomp = math_ops.cast(step, dtype) global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps) completed_fraction = global_step_recomp / decay_steps cosine_decayed = 0.5 * (1.0 + math_ops.cos( constant_op.constant(math.pi) * completed_fraction)) decayed = (1 - self.alpha) * cosine_decayed + self.alpha return math_ops.multiply(initial_learning_rate, decayed) def get_config(self): return { "initial_learning_rate": self.initial_learning_rate, "decay_steps": self.decay_steps, "alpha": self.alpha, "name": self.name } @keras_export("keras.experimental.CosineDecayRestarts") class CosineDecayRestarts(LearningRateSchedule): """A LearningRateSchedule that uses a cosine decay schedule with restarts.""" def __init__( self, initial_learning_rate, first_decay_steps, t_mul=2.0, m_mul=1.0, alpha=0.0, name=None): """Applies cosine decay with restarts to the learning rate. See [Loshchilov & Hutter, ICLR2016], SGDR: Stochastic Gradient Descent with Warm Restarts. https://arxiv.org/abs/1608.03983 When training a model, it is often recommended to lower the learning rate as the training progresses. This schedule applies a cosine decay function with restarts to an optimizer step, given a provided initial learning rate. It requires a `step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The schedule a 1-arg callable that produces a decayed learning rate when passed the current optimizer step. This can be useful for changing the learning rate value across different invocations of optimizer functions. The learning rate multiplier first decays from 1 to `alpha` for `first_decay_steps` steps. Then, a warm restart is performed. Each new warm restart runs for `t_mul` times more steps and with `m_mul` times smaller initial learning rate. Example usage: ```python first_decay_steps = 1000 lr_decayed_fn = ( tf.keras.experimental.CosineDecayRestarts( initial_learning_rate, global_step, first_decay_steps)) ``` You can pass this schedule directly into a `tf.keras.optimizers.Optimizer` as the learning rate. The learning rate schedule is also serializable and deserializable using `tf.keras.optimizers.schedules.serialize` and `tf.keras.optimizers.schedules.deserialize`. Args: initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python number. The initial learning rate. first_decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Number of steps to decay over. t_mul: A scalar `float32` or `float64` `Tensor` or a Python number. Used to derive the number of iterations in the i-th period m_mul: A scalar `float32` or `float64` `Tensor` or a Python number. Used to derive the initial learning rate of the i-th period: alpha: A scalar `float32` or `float64` Tensor or a Python number. Minimum learning rate value as a fraction of the initial_learning_rate. name: String. Optional name of the operation. Defaults to 'SGDRDecay'. Returns: A 1-arg callable learning rate schedule that takes the current optimizer step and outputs the decayed learning rate, a scalar `Tensor` of the same type as `initial_learning_rate`. Raises: ValueError: if `global_step` is not supplied. """ super(CosineDecayRestarts, self).__init__() self.initial_learning_rate = initial_learning_rate self.first_decay_steps = first_decay_steps self._t_mul = t_mul self._m_mul = m_mul self.alpha = alpha self.name = name def __call__(self, step): with ops.name_scope_v2(self.name or "SGDRDecay") as name: initial_learning_rate = ops.convert_to_tensor( self.initial_learning_rate, name="initial_learning_rate") dtype = initial_learning_rate.dtype first_decay_steps = math_ops.cast(self.first_decay_steps, dtype) alpha = math_ops.cast(self.alpha, dtype) t_mul = math_ops.cast(self._t_mul, dtype) m_mul = math_ops.cast(self._m_mul, dtype) global_step_recomp = math_ops.cast(step, dtype) completed_fraction = global_step_recomp / first_decay_steps def compute_step(completed_fraction, geometric=False): """Helper for `cond` operation.""" if geometric: i_restart = math_ops.floor( math_ops.log(1.0 - completed_fraction * (1.0 - t_mul)) / math_ops.log(t_mul)) sum_r = (1.0 - t_mul**i_restart) / (1.0 - t_mul) completed_fraction = (completed_fraction - sum_r) / t_mul**i_restart else: i_restart = math_ops.floor(completed_fraction) completed_fraction -= i_restart return i_restart, completed_fraction i_restart, completed_fraction = control_flow_ops.cond( math_ops.equal(t_mul, 1.0), lambda: compute_step(completed_fraction, geometric=False), lambda: compute_step(completed_fraction, geometric=True)) m_fac = m_mul**i_restart cosine_decayed = 0.5 * m_fac * (1.0 + math_ops.cos( constant_op.constant(math.pi) * completed_fraction)) decayed = (1 - alpha) * cosine_decayed + alpha return math_ops.multiply(initial_learning_rate, decayed, name=name) def get_config(self): return { "initial_learning_rate": self.initial_learning_rate, "first_decay_steps": self.first_decay_steps, "t_mul": self._t_mul, "m_mul": self._m_mul, "alpha": self.alpha, "name": self.name } @keras_export("keras.experimental.LinearCosineDecay") class LinearCosineDecay(LearningRateSchedule): """A LearningRateSchedule that uses a linear cosine decay schedule.""" def __init__( self, initial_learning_rate, decay_steps, num_periods=0.5, alpha=0.0, beta=0.001, name=None): """Applies linear cosine decay to the learning rate. See [Bello et al., ICML2017] Neural Optimizer Search with RL. https://arxiv.org/abs/1709.07417 For the idea of warm starts here controlled by `num_periods`, see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent with Warm Restarts. https://arxiv.org/abs/1608.03983 Note that linear cosine decay is more aggressive than cosine decay and larger initial learning rates can typically be used. When training a model, it is often recommended to lower the learning rate as the training progresses. This schedule applies a linear cosine decay function to an optimizer step, given a provided initial learning rate. It requires a `step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The schedule a 1-arg callable that produces a decayed learning rate when passed the current optimizer step. This can be useful for changing the learning rate value across different invocations of optimizer functions. It is computed as: ```python def decayed_learning_rate(step): step = min(step, decay_steps) linear_decay = (decay_steps - step) / decay_steps cosine_decay = 0.5 * ( 1 + cos(pi * 2 * num_periods * step / decay_steps)) decayed = (alpha + linear_decay) * cosine_decay + beta return initial_learning_rate * decayed ``` Example usage: ```python decay_steps = 1000 lr_decayed_fn = ( tf.keras.experimental.LinearCosineDecay( initial_learning_rate, global_step, decay_steps)) ``` You can pass this schedule directly into a `tf.keras.optimizers.Optimizer` as the learning rate. The learning rate schedule is also serializable and deserializable using `tf.keras.optimizers.schedules.serialize` and `tf.keras.optimizers.schedules.deserialize`. Args: initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python number. The initial learning rate. decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Number of steps to decay over. num_periods: Number of periods in the cosine part of the decay. See computation above. alpha: See computation above. beta: See computation above. name: String. Optional name of the operation. Defaults to 'LinearCosineDecay'. Returns: A 1-arg callable learning rate schedule that takes the current optimizer step and outputs the decayed learning rate, a scalar `Tensor` of the same type as `initial_learning_rate`. """ super(LinearCosineDecay, self).__init__() self.initial_learning_rate = initial_learning_rate self.decay_steps = decay_steps self.num_periods = num_periods self.alpha = alpha self.beta = beta self.name = name def __call__(self, step): with ops.name_scope_v2(self.name or "LinearCosineDecay") as name: initial_learning_rate = ops.convert_to_tensor( self.initial_learning_rate, name="initial_learning_rate") dtype = initial_learning_rate.dtype decay_steps = math_ops.cast(self.decay_steps, dtype) num_periods = math_ops.cast(self.num_periods, dtype) alpha = math_ops.cast(self.alpha, dtype) beta = math_ops.cast(self.beta, dtype) global_step_recomp = math_ops.cast(step, dtype) global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps) linear_decayed = (decay_steps - global_step_recomp) / decay_steps completed_fraction = global_step_recomp / decay_steps fraction = 2.0 * num_periods * completed_fraction cosine_decayed = 0.5 * ( 1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction)) linear_cosine_decayed = (alpha + linear_decayed) * cosine_decayed + beta return math_ops.multiply(initial_learning_rate, linear_cosine_decayed, name=name) def get_config(self): return { "initial_learning_rate": self.initial_learning_rate, "decay_steps": self.decay_steps, "num_periods": self.num_periods, "alpha": self.alpha, "beta": self.beta, "name": self.name } @keras_export("keras.experimental.NoisyLinearCosineDecay") class NoisyLinearCosineDecay(LearningRateSchedule): """A LearningRateSchedule that uses a noisy linear cosine decay schedule.""" def __init__( self, initial_learning_rate, decay_steps, initial_variance=1.0, variance_decay=0.55, num_periods=0.5, alpha=0.0, beta=0.001, name=None): """Applies noisy linear cosine decay to the learning rate. See [Bello et al., ICML2017] Neural Optimizer Search with RL. https://arxiv.org/abs/1709.07417 For the idea of warm starts here controlled by `num_periods`, see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent with Warm Restarts. https://arxiv.org/abs/1608.03983 Note that linear cosine decay is more aggressive than cosine decay and larger initial learning rates can typically be used. When training a model, it is often recommended to lower the learning rate as the training progresses. This schedule applies a noisy linear cosine decay function to an optimizer step, given a provided initial learning rate. It requires a `step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The schedule a 1-arg callable that produces a decayed learning rate when passed the current optimizer step. This can be useful for changing the learning rate value across different invocations of optimizer functions. It is computed as: ```python def decayed_learning_rate(step): step = min(step, decay_steps) linear_decay = (decay_steps - step) / decay_steps) cosine_decay = 0.5 * ( 1 + cos(pi * 2 * num_periods * step / decay_steps)) decayed = (alpha + linear_decay + eps_t) * cosine_decay + beta return initial_learning_rate * decayed ``` where eps_t is 0-centered gaussian noise with variance initial_variance / (1 + global_step) ** variance_decay Example usage: ```python decay_steps = 1000 lr_decayed_fn = ( tf.keras.experimental.NoisyLinearCosineDecay( initial_learning_rate, global_step, decay_steps)) ``` You can pass this schedule directly into a `tf.keras.optimizers.Optimizer` as the learning rate. The learning rate schedule is also serializable and deserializable using `tf.keras.optimizers.schedules.serialize` and `tf.keras.optimizers.schedules.deserialize`. Args: initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python number. The initial learning rate. decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Number of steps to decay over. initial_variance: initial variance for the noise. See computation above. variance_decay: decay for the noise's variance. See computation above. num_periods: Number of periods in the cosine part of the decay. See computation above. alpha: See computation above. beta: See computation above. name: String. Optional name of the operation. Defaults to 'NoisyLinearCosineDecay'. Returns: A 1-arg callable learning rate schedule that takes the current optimizer step and outputs the decayed learning rate, a scalar `Tensor` of the same type as `initial_learning_rate`. """ super(NoisyLinearCosineDecay, self).__init__() self.initial_learning_rate = initial_learning_rate self.decay_steps = decay_steps self.initial_variance = initial_variance self.variance_decay = variance_decay self.num_periods = num_periods self.alpha = alpha self.beta = beta self.name = name def __call__(self, step): with ops.name_scope_v2(self.name or "NoisyLinearCosineDecay") as name: initial_learning_rate = ops.convert_to_tensor( self.initial_learning_rate, name="initial_learning_rate") dtype = initial_learning_rate.dtype decay_steps = math_ops.cast(self.decay_steps, dtype) initial_variance = math_ops.cast(self.initial_variance, dtype) variance_decay = math_ops.cast(self.variance_decay, dtype) num_periods = math_ops.cast(self.num_periods, dtype) alpha = math_ops.cast(self.alpha, dtype) beta = math_ops.cast(self.beta, dtype) global_step_recomp = math_ops.cast(step, dtype) global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps) linear_decayed = (decay_steps - global_step_recomp) / decay_steps variance = initial_variance / ( math_ops.pow(1.0 + global_step_recomp, variance_decay)) std = math_ops.sqrt(variance) noisy_linear_decayed = ( linear_decayed + random_ops.random_normal( linear_decayed.shape, stddev=std)) completed_fraction = global_step_recomp / decay_steps fraction = 2.0 * num_periods * completed_fraction cosine_decayed = 0.5 * ( 1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction)) noisy_linear_cosine_decayed = ( (alpha + noisy_linear_decayed) * cosine_decayed + beta) return math_ops.multiply( initial_learning_rate, noisy_linear_cosine_decayed, name=name) def get_config(self): return { "initial_learning_rate": self.initial_learning_rate, "decay_steps": self.decay_steps, "initial_variance": self.initial_variance, "variance_decay": self.variance_decay, "num_periods": self.num_periods, "alpha": self.alpha, "beta": self.beta, "name": self.name } @keras_export("keras.optimizers.schedules.serialize") def serialize(learning_rate_schedule): return generic_utils.serialize_keras_object(learning_rate_schedule) @keras_export("keras.optimizers.schedules.deserialize") def deserialize(config, custom_objects=None): return generic_utils.deserialize_keras_object( config, module_objects=globals(), custom_objects=custom_objects, printable_module_name="decay")
tensorflow-master
tensorflow/python/keras/optimizer_v2/learning_rate_schedule.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for aggregate operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import numpy as np from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.keras.optimizer_v2 import adagrad from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test def adagrad_update_numpy(param, accum, g_t, lr=0.001, epsilon=1e-7): accum_t = accum + g_t * g_t param_t = param - lr * g_t / (np.sqrt(accum_t) + epsilon) return param_t, accum_t def sparse_adagrad_update_numpy(param, accum, gindexs, gvalues, lr=0.001, epsilon=1e-7): accum_t = copy.deepcopy(accum) param_t = copy.deepcopy(param) # first loop accumulates repeated indices if necessary. for i in range(len(gindexs)): gindex = gindexs[i] gvalue = gvalues[i] accum_t[gindex] = accum_t[gindex] + gvalue * gvalue for i in range(len(gindexs)): gindex = gindexs[i] gvalue = gvalues[i] param_t[gindex] = param_t[gindex] - lr * gvalue / ( np.sqrt(accum_t[gindex]) + epsilon) return param_t, accum_t class AdagradOptimizerTest(test.TestCase): def doTestBasic(self, use_callable_params=False): for dtype in [dtypes.float32, dtypes.float64]: with self.cached_session(): var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = resource_variable_ops.ResourceVariable(var0_np) var1 = resource_variable_ops.ResourceVariable(var1_np) grads0 = constant_op.constant(grads0_np) grads1 = constant_op.constant(grads1_np) learning_rate = lambda: 3.0 if not use_callable_params: learning_rate = learning_rate() ada_opt = adagrad.Adagrad(learning_rate) accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) if not context.executing_eagerly(): ada_update = ada_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllClose([1.0, 2.0], v0_val) self.assertAllClose([3.0, 4.0], v1_val) # Run 3 steps of adagrad for _ in range(3): if not context.executing_eagerly(): self.evaluate(ada_update) else: ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1])) var0_np, accum0_np = adagrad_update_numpy(var0_np, accum0_np, grads0_np, 3.0) var1_np, accum1_np = adagrad_update_numpy(var1_np, accum1_np, grads1_np, 3.0) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) @test_util.run_in_graph_and_eager_modes(reset_test=True) def testBasic(self): self.doTestBasic() def testBasicCallableParams(self): with context.eager_mode(): self.doTestBasic(use_callable_params=True) def testBasicWithLearningRateDecay(self): for dtype in [dtypes.float32, dtypes.float64]: with self.cached_session(): var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = resource_variable_ops.ResourceVariable(var0_np) var1 = resource_variable_ops.ResourceVariable(var1_np) grads0 = constant_op.constant(grads0_np) grads1 = constant_op.constant(grads1_np) learning_rate = 3.0 decay = 0.5 ada_opt = adagrad.Adagrad(learning_rate, decay=decay) accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) if not context.executing_eagerly(): ada_update = ada_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllClose([1.0, 2.0], v0_val) self.assertAllClose([3.0, 4.0], v1_val) # Run 3 steps of adagrad for t in range(3): if not context.executing_eagerly(): self.evaluate(ada_update) else: ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1])) lr_np = learning_rate / (1 + decay * t) var0_np, accum0_np = adagrad_update_numpy(var0_np, accum0_np, grads0_np, lr_np) var1_np, accum1_np = adagrad_update_numpy(var1_np, accum1_np, grads1_np, lr_np) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testBasicWithLearningRateInverseTimeDecay(self): for dtype in [dtypes.float32, dtypes.float64]: with self.cached_session(): var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = resource_variable_ops.ResourceVariable(var0_np) var1 = resource_variable_ops.ResourceVariable(var1_np) grads0 = constant_op.constant(grads0_np) grads1 = constant_op.constant(grads1_np) learning_rate = 3.0 decay = 0.5 lr_schedule = learning_rate_schedule.InverseTimeDecay( learning_rate, decay_steps=1.0, decay_rate=decay) ada_opt = adagrad.Adagrad(lr_schedule) accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) if not context.executing_eagerly(): ada_update = ada_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllClose([1.0, 2.0], v0_val) self.assertAllClose([3.0, 4.0], v1_val) # Run 3 steps of adagrad for t in range(3): if not context.executing_eagerly(): self.evaluate(ada_update) else: ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1])) lr_np = learning_rate / (1 + decay * t) var0_np, accum0_np = adagrad_update_numpy(var0_np, accum0_np, grads0_np, lr_np) var1_np, accum1_np = adagrad_update_numpy(var1_np, accum1_np, grads1_np, lr_np) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) @test_util.run_deprecated_v1 def testMinimizeSparseResourceVariable(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with self.cached_session(): var0 = resource_variable_ops.ResourceVariable( [[1.0, 2.0], [3.0, 4.0]], dtype=dtype) x = constant_op.constant([[4.0], [5.0]], dtype=dtype) def loss(): pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop return pred * pred sgd_op = adagrad.Adagrad(1.0).minimize(loss, var_list=[var0]) variables.global_variables_initializer().run() # Fetch params to validate initial values self.assertAllCloseAccordingToType( [[1.0, 2.0], [3.0, 4.0]], var0.eval()) # Run 1 step of sgd sgd_op.run() # Validate updated params self.assertAllCloseAccordingToType( [[0, 1], [3, 4]], var0.eval(), atol=0.01) @test_util.run_deprecated_v1 def testTensorLearningRate(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with self.cached_session(): var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = resource_variable_ops.ResourceVariable(var0_np) var1 = resource_variable_ops.ResourceVariable(var1_np) grads0 = constant_op.constant(grads0_np) grads1 = constant_op.constant(grads1_np) learning_rate = constant_op.constant(3.0) ada_opt = adagrad.Adagrad(learning_rate) ada_update = ada_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], var0.eval()) self.assertAllClose([3.0, 4.0], var1.eval()) accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) # Run 3 steps of adagrad for _ in range(3): ada_update.run() var0_np, accum0_np = adagrad_update_numpy(var0_np, accum0_np, grads0_np, learning_rate) var1_np, accum1_np = adagrad_update_numpy(var1_np, accum1_np, grads1_np, learning_rate) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) @test_util.run_deprecated_v1 def testSparseBasic(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with self.cached_session(): var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0, 0.01], dtype=dtype.as_numpy_dtype) var0 = resource_variable_ops.ResourceVariable(var0_np) var1 = resource_variable_ops.ResourceVariable(var1_np) grads0_np_indices = np.array([0, 2], dtype=np.int32) grads0 = ops.IndexedSlices( constant_op.constant(grads0_np[grads0_np_indices]), constant_op.constant(grads0_np_indices), constant_op.constant([3])) grads1_np_indices = np.array([0, 2], dtype=np.int32) grads1 = ops.IndexedSlices( constant_op.constant(grads1_np[grads1_np_indices]), constant_op.constant(grads1_np_indices), constant_op.constant([3])) learning_rate = 3.0 ada_opt = adagrad.Adagrad(learning_rate) ada_update = ada_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() # Fetch params to validate initial values self.assertAllClose([1.0, 1.0, 2.0], var0.eval()) self.assertAllClose([3.0, 3.0, 4.0], var1.eval()) accum0_np = np.array([0.1, 0.1, 0.1], dtype=dtype.as_numpy_dtype) accum1_np = np.array([0.1, 0.1, 0.1], dtype=dtype.as_numpy_dtype) # Run 3 step of sgd for _ in range(3): ada_update.run() var0_np, accum0_np = sparse_adagrad_update_numpy( var0_np, accum0_np, grads0_np_indices, grads0_np[grads0_np_indices], learning_rate) var1_np, accum1_np = sparse_adagrad_update_numpy( var1_np, accum1_np, grads1_np_indices, grads1_np[grads1_np_indices], learning_rate) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) @test_util.run_deprecated_v1 def testSparseRepeatedIndices(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with self.cached_session(): var_np = np.array([[1.0], [2.0]], dtype=dtype.as_numpy_dtype) repeated_index_update_var = resource_variable_ops.ResourceVariable( var_np, dtype=dtype) aggregated_update_var = resource_variable_ops.ResourceVariable( var_np, dtype=dtype) grad_repeated_index = ops.IndexedSlices( constant_op.constant( [0.1, 0.1], shape=[2, 1], dtype=dtype), constant_op.constant([1, 1]), constant_op.constant([2, 1])) grad_aggregated = ops.IndexedSlices( constant_op.constant( [0.2], shape=[1, 1], dtype=dtype), constant_op.constant([1]), constant_op.constant([2, 1])) repeated_update = adagrad.Adagrad(3.0).apply_gradients( [(grad_repeated_index, repeated_index_update_var)]) aggregated_update = adagrad.Adagrad(3.0).apply_gradients( [(grad_aggregated, aggregated_update_var)]) variables.global_variables_initializer().run() self.assertAllClose(aggregated_update_var.eval(), repeated_index_update_var.eval()) for _ in range(3): repeated_update.run() aggregated_update.run() self.assertAllClose(aggregated_update_var.eval(), repeated_index_update_var.eval()) @test_util.run_deprecated_v1 def testSparseRepeatedIndicesByEmbeddingLookUp(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with self.cached_session(): var_repeated = resource_variable_ops.ResourceVariable( [1.0, 2.0], dtype=dtype) loss_repeated = lambda: math_ops.reduce_sum( # pylint: disable=g-long-lambda embedding_ops.embedding_lookup(var_repeated, [0, 0])) # pylint: disable=cell-var-from-loop var_aggregated = resource_variable_ops.ResourceVariable( [1.0, 2.0], dtype=dtype) loss_aggregated = lambda: 2 * math_ops.reduce_sum( # pylint: disable=g-long-lambda embedding_ops.embedding_lookup(var_aggregated, [0])) # pylint: disable=cell-var-from-loop update_op_repeated = adagrad.Adagrad(2.0).minimize( loss_repeated, var_list=[var_repeated]) update_op_aggregated = adagrad.Adagrad(2.0).minimize( loss_aggregated, var_list=[var_aggregated]) variables.global_variables_initializer().run() self.assertAllCloseAccordingToType( var_repeated.eval(), var_aggregated.eval()) for _ in range(3): update_op_repeated.run() update_op_aggregated.run() self.assertAllCloseAccordingToType( var_repeated.eval(), var_aggregated.eval()) @test_util.run_deprecated_v1 def testSparseStability(self): for dtype in [dtypes.half]: with self.cached_session(): shape = [1, 6] var0_np = np.array([[ 0.00872496, -0.106952, 0.110467, 0.226505, -0.0147257, -0.0105945 ]], dtype=dtype.as_numpy_dtype) var0 = resource_variable_ops.ResourceVariable(var0_np) grads0_np = np.array([[ -5.91278e-05, 5.31673e-05, -2.5779e-06, 4.29153e-05, -8.4877e-05, -9.48906e-05 ]], dtype=dtype.as_numpy_dtype) grads0 = ops.IndexedSlices( constant_op.constant(grads0_np), constant_op.constant([0]), constant_op.constant(shape)) ada_opt = adagrad.Adagrad(1.0) ada_update = ada_opt.apply_gradients(zip([grads0], [var0])) slot0 = ada_opt.get_slot(var0, "accumulator") init = variables.global_variables_initializer() for _ in range(100): init.run() ada_update.run() self.assertAllCloseAccordingToType( np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]), slot0.eval()) self.assertAllCloseAccordingToType( np.array([[ 0.00891194, -0.10712013, 0.11047515, 0.22636929, -0.0144573, -0.01029443 ]]), var0.eval()) @test_util.run_deprecated_v1 def testSharing(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with self.cached_session(): var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = resource_variable_ops.ResourceVariable(var0_np) var1 = resource_variable_ops.ResourceVariable(var1_np) grads0 = constant_op.constant(grads0_np) grads1 = constant_op.constant(grads1_np) learning_rate = 3.0 ada_opt = adagrad.Adagrad(learning_rate) # Apply the optimizer twice. Both applications will use # the same accums. ada_update1 = ada_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) ada_update2 = ada_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) slot0 = ada_opt.get_slot(var0, "accumulator") self.assertEqual(slot0.shape, var0.shape) slot1 = ada_opt.get_slot(var1, "accumulator") self.assertEqual(slot1.shape, var1.shape) variables.global_variables_initializer().run() # Fetch params to validate initial values. self.assertAllClose([1.0, 2.0], var0.eval()) self.assertAllClose([3.0, 4.0], var1.eval()) # Mix the first and the second adagrad for 3 steps. ada_update1.run() ada_update2.run() ada_update1.run() accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) for _ in range(3): var0_np, accum0_np = adagrad_update_numpy(var0_np, accum0_np, grads0_np, learning_rate) var1_np, accum1_np = adagrad_update_numpy(var1_np, accum1_np, grads1_np, learning_rate) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testConstructAdagradWithLR(self): opt = adagrad.Adagrad(lr=1.0) opt_2 = adagrad.Adagrad(learning_rate=0.1, lr=1.0) opt_3 = adagrad.Adagrad(learning_rate=0.1) self.assertIsInstance(opt.lr, variables.Variable) self.assertIsInstance(opt_2.lr, variables.Variable) self.assertIsInstance(opt_3.lr, variables.Variable) self.evaluate(variables.global_variables_initializer()) self.assertAllClose(self.evaluate(opt.lr), (1.0)) self.assertAllClose(self.evaluate(opt_2.lr), (1.0)) self.assertAllClose(self.evaluate(opt_3.lr), (0.1)) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/keras/optimizer_v2/adagrad_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional test for learning rate decay.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math from absl.testing import parameterized from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.framework import test_util from tensorflow.python.keras.optimizer_v2 import gradient_descent from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule # Import resource_variable_ops for the variables-to-tensor implicit conversion. from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import from tensorflow.python.ops import variables from tensorflow.python.platform import googletest def _maybe_serialized(lr_decay, serialize_and_deserialize): if serialize_and_deserialize: serialized = learning_rate_schedule.serialize(lr_decay) return learning_rate_schedule.deserialize(serialized) else: return lr_decay @parameterized.named_parameters( ("NotSerialized", False), ("Serialized", True)) class LRDecayTestV2(test_util.TensorFlowTestCase, parameterized.TestCase): @test_util.run_in_graph_and_eager_modes def testContinuous(self, serialize): self.evaluate(variables.global_variables_initializer()) step = 5 decayed_lr = learning_rate_schedule.ExponentialDecay(0.05, 10, 0.96) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = .05 * 0.96**(5.0 / 10.0) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) @test_util.run_in_graph_and_eager_modes def testStaircase(self, serialize): if context.executing_eagerly(): step = resource_variable_ops.ResourceVariable(0) self.evaluate(variables.global_variables_initializer()) decayed_lr = learning_rate_schedule.ExponentialDecay( .1, 3, 0.96, staircase=True) decayed_lr = _maybe_serialized(decayed_lr, serialize) # No change to learning rate due to staircase expected = .1 self.evaluate(step.assign(1)) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) expected = .1 self.evaluate(step.assign(2)) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) # Decayed learning rate expected = .1 * 0.96 ** (100 // 3) self.evaluate(step.assign(100)) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) @test_util.run_deprecated_v1 def testVariables(self, serialize): step = variables.Variable(1) assign_1 = step.assign(1) assign_2 = step.assign(2) assign_100 = step.assign(100) decayed_lr = learning_rate_schedule.ExponentialDecay( .1, 3, 0.96, staircase=True) decayed_lr = _maybe_serialized(decayed_lr, serialize) self.evaluate(variables.global_variables_initializer()) # No change to learning rate self.evaluate(assign_1.op) self.assertAllClose(self.evaluate(decayed_lr(step)), .1, 1e-6) self.evaluate(assign_2.op) self.assertAllClose(self.evaluate(decayed_lr(step)), .1, 1e-6) # Decayed learning rate self.evaluate(assign_100.op) expected = .1 * 0.96**(100 // 3) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) @test_util.run_in_graph_and_eager_modes def testPiecewiseConstant(self, serialize): x = resource_variable_ops.ResourceVariable(-999) decayed_lr = learning_rate_schedule.PiecewiseConstantDecay( [100, 110, 120], [1.0, 0.1, 0.01, 0.001]) decayed_lr = _maybe_serialized(decayed_lr, serialize) self.evaluate(variables.global_variables_initializer()) self.assertAllClose(self.evaluate(decayed_lr(x)), 1.0, 1e-6) self.evaluate(x.assign(100)) self.assertAllClose(self.evaluate(decayed_lr(x)), 1.0, 1e-6) self.evaluate(x.assign(105)) self.assertAllClose(self.evaluate(decayed_lr(x)), 0.1, 1e-6) self.evaluate(x.assign(110)) self.assertAllClose(self.evaluate(decayed_lr(x)), 0.1, 1e-6) self.evaluate(x.assign(120)) self.assertAllClose(self.evaluate(decayed_lr(x)), 0.01, 1e-6) self.evaluate(x.assign(999)) self.assertAllClose(self.evaluate(decayed_lr(x)), 0.001, 1e-6) def testPiecewiseFunction(self, serialize): del serialize with context.eager_mode(): v = variables.Variable(1.) def loss_fn(): return v * v learning_rate = learning_rate_schedule.PiecewiseConstantDecay( [1.], [1., 0.1]) opt = gradient_descent.SGD(learning_rate=learning_rate) @def_function.function def minimize(): with backprop.GradientTape() as tape: loss = loss_fn() g = tape.gradient(loss, [v]) opt.apply_gradients(list(zip(g, [v]))) minimize() self.assertAllEqual(v.read_value(), -1.0) @test_util.run_in_graph_and_eager_modes def testPiecewiseConstantEdgeCases(self, serialize): # Test casting boundaries from int32 to int64. x_int64 = resource_variable_ops.ResourceVariable( 0, dtype=variables.dtypes.int64) boundaries, values = [1, 2, 3], [0.4, 0.5, 0.6, 0.7] decayed_lr = learning_rate_schedule.PiecewiseConstantDecay( boundaries, values) decayed_lr = _maybe_serialized(decayed_lr, serialize) self.evaluate(variables.global_variables_initializer()) self.assertAllClose(self.evaluate(decayed_lr(x_int64)), 0.4, 1e-6) self.evaluate(x_int64.assign(1)) self.assertAllClose(self.evaluate(decayed_lr(x_int64)), 0.4, 1e-6) self.evaluate(x_int64.assign(2)) self.assertAllClose(self.evaluate(decayed_lr(x_int64)), 0.5, 1e-6) self.evaluate(x_int64.assign(3)) self.assertAllClose(self.evaluate(decayed_lr(x_int64)), 0.6, 1e-6) self.evaluate(x_int64.assign(4)) self.assertAllClose(self.evaluate(decayed_lr(x_int64)), 0.7, 1e-6) @parameterized.named_parameters( ("NotSerialized", False), ("Serialized", True)) class LinearDecayTestV2(test_util.TensorFlowTestCase, parameterized.TestCase): @test_util.run_in_graph_and_eager_modes def testHalfWay(self, serialize): step = 5 lr = 0.05 end_lr = 0.0 decayed_lr = learning_rate_schedule.PolynomialDecay(lr, 10, end_lr) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = lr * 0.5 self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) @test_util.run_in_graph_and_eager_modes def testEnd(self, serialize): step = 10 lr = 0.05 end_lr = 0.001 decayed_lr = learning_rate_schedule.PolynomialDecay(lr, 10, end_lr) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = end_lr self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) @test_util.run_in_graph_and_eager_modes def testHalfWayWithEnd(self, serialize): step = 5 lr = 0.05 end_lr = 0.001 decayed_lr = learning_rate_schedule.PolynomialDecay(lr, 10, end_lr) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = (lr + end_lr) * 0.5 self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) @test_util.run_in_graph_and_eager_modes def testBeyondEnd(self, serialize): step = 15 lr = 0.05 end_lr = 0.001 decayed_lr = learning_rate_schedule.PolynomialDecay(lr, 10, end_lr) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = end_lr self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) @test_util.run_in_graph_and_eager_modes def testBeyondEndWithCycle(self, serialize): step = 15 lr = 0.05 end_lr = 0.001 decayed_lr = learning_rate_schedule.PolynomialDecay( lr, 10, end_lr, cycle=True) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = (lr - end_lr) * 0.25 + end_lr self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) @parameterized.named_parameters( ("NotSerialized", False), ("Serialized", True)) class SqrtDecayTestV2(test_util.TensorFlowTestCase, parameterized.TestCase): @test_util.run_in_graph_and_eager_modes def testHalfWay(self, serialize): step = 5 lr = 0.05 end_lr = 0.0 power = 0.5 decayed_lr = learning_rate_schedule.PolynomialDecay( lr, 10, end_lr, power=power) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = lr * 0.5**power self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) @test_util.run_in_graph_and_eager_modes def testEnd(self, serialize): step = 10 lr = 0.05 end_lr = 0.001 power = 0.5 decayed_lr = learning_rate_schedule.PolynomialDecay( lr, 10, end_lr, power=power) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = end_lr self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) @test_util.run_in_graph_and_eager_modes def testHalfWayWithEnd(self, serialize): step = 5 lr = 0.05 end_lr = 0.001 power = 0.5 decayed_lr = learning_rate_schedule.PolynomialDecay( lr, 10, end_lr, power=power) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = (lr - end_lr) * 0.5**power + end_lr self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) @test_util.run_in_graph_and_eager_modes def testBeyondEnd(self, serialize): step = 15 lr = 0.05 end_lr = 0.001 power = 0.5 decayed_lr = learning_rate_schedule.PolynomialDecay( lr, 10, end_lr, power=power) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = end_lr self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) @test_util.run_in_graph_and_eager_modes def testBeyondEndWithCycle(self, serialize): step = 15 lr = 0.05 end_lr = 0.001 power = 0.5 decayed_lr = learning_rate_schedule.PolynomialDecay( lr, 10, end_lr, power=power, cycle=True) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = (lr - end_lr) * 0.25**power + end_lr self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) @parameterized.named_parameters( ("NotSerialized", False), ("Serialized", True)) class PolynomialDecayTestV2(test_util.TensorFlowTestCase, parameterized.TestCase): @test_util.run_in_graph_and_eager_modes def testBeginWithCycle(self, serialize): lr = 0.001 decay_steps = 10 step = 0 decayed_lr = learning_rate_schedule.PolynomialDecay( lr, decay_steps, cycle=True) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = lr self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) @parameterized.named_parameters( ("NotSerialized", False), ("Serialized", True)) class InverseDecayTestV2(test_util.TensorFlowTestCase, parameterized.TestCase): @test_util.run_in_graph_and_eager_modes def testDecay(self, serialize): initial_lr = 0.1 k = 10 decay_rate = 0.96 step = resource_variable_ops.ResourceVariable(0) decayed_lr = learning_rate_schedule.InverseTimeDecay(initial_lr, k, decay_rate) decayed_lr = _maybe_serialized(decayed_lr, serialize) self.evaluate(variables.global_variables_initializer()) for i in range(k + 1): expected = initial_lr / (1 + i / k * decay_rate) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) self.evaluate(step.assign_add(1)) @test_util.run_in_graph_and_eager_modes def testStaircase(self, serialize): initial_lr = 0.1 k = 10 decay_rate = 0.96 step = resource_variable_ops.ResourceVariable(0) decayed_lr = learning_rate_schedule.InverseTimeDecay( initial_lr, k, decay_rate, staircase=True) decayed_lr = _maybe_serialized(decayed_lr, serialize) self.evaluate(variables.global_variables_initializer()) for i in range(k + 1): expected = initial_lr / (1 + decay_rate * (i // k)) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) self.evaluate(step.assign_add(1)) @parameterized.named_parameters( ("NotSerialized", False), ("Serialized", True)) class CosineDecayTestV2(test_util.TensorFlowTestCase, parameterized.TestCase): def np_cosine_decay(self, step, decay_steps, alpha=0.0): step = min(step, decay_steps) completed_fraction = step / decay_steps decay = 0.5 * (1.0 + math.cos(math.pi * completed_fraction)) return (1.0 - alpha) * decay + alpha @test_util.run_in_graph_and_eager_modes def testDecay(self, serialize): num_training_steps = 1000 initial_lr = 1.0 for step in range(0, 1500, 250): decayed_lr = learning_rate_schedule.CosineDecay(initial_lr, num_training_steps) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = self.np_cosine_decay(step, num_training_steps) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) @test_util.run_in_graph_and_eager_modes def testAlpha(self, serialize): num_training_steps = 1000 initial_lr = 1.0 alpha = 0.1 for step in range(0, 1500, 250): decayed_lr = learning_rate_schedule.CosineDecay(initial_lr, num_training_steps, alpha) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = self.np_cosine_decay(step, num_training_steps, alpha) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) @parameterized.named_parameters( ("NotSerialized", False), ("Serialized", True)) class CosineDecayRestartsTestV2(test_util.TensorFlowTestCase, parameterized.TestCase): def np_cosine_decay_restarts(self, step, decay_steps, t_mul=2.0, m_mul=1.0, alpha=0.0): fac = 1.0 while step >= decay_steps: step -= decay_steps decay_steps *= t_mul fac *= m_mul completed_fraction = step / decay_steps decay = fac * 0.5 * (1.0 + math.cos(math.pi * completed_fraction)) return (1.0 - alpha) * decay + alpha @test_util.run_in_graph_and_eager_modes def testDecay(self, serialize): num_training_steps = 1000 initial_lr = 1.0 for step in range(0, 1500, 250): decayed_lr = learning_rate_schedule.CosineDecayRestarts( initial_lr, num_training_steps) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = self.np_cosine_decay_restarts(step, num_training_steps) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) @test_util.run_in_graph_and_eager_modes def testAlpha(self, serialize): num_training_steps = 1000 initial_lr = 1.0 alpha = 0.1 for step in range(0, 1500, 250): decayed_lr = learning_rate_schedule.CosineDecayRestarts( initial_lr, num_training_steps, alpha=alpha) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = self.np_cosine_decay_restarts( step, num_training_steps, alpha=alpha) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) @test_util.run_in_graph_and_eager_modes def testMMul(self, serialize): num_training_steps = 1000 initial_lr = 1.0 m_mul = 0.9 for step in range(0, 1500, 250): decayed_lr = learning_rate_schedule.CosineDecayRestarts( initial_lr, num_training_steps, m_mul=m_mul) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = self.np_cosine_decay_restarts( step, num_training_steps, m_mul=m_mul) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) @test_util.run_in_graph_and_eager_modes def testTMul(self, serialize): num_training_steps = 1000 initial_lr = 1.0 t_mul = 1.0 for step in range(0, 1500, 250): decayed_lr = learning_rate_schedule.CosineDecayRestarts( initial_lr, num_training_steps, t_mul=t_mul) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = self.np_cosine_decay_restarts( step, num_training_steps, t_mul=t_mul) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) @parameterized.named_parameters( ("NotSerialized", False), ("Serialized", True)) class LinearCosineDecayTestV2(test_util.TensorFlowTestCase, parameterized.TestCase): def np_linear_cosine_decay(self, step, decay_steps, alpha=0.0, beta=0.001, num_periods=0.5): step = min(step, decay_steps) linear_decayed = float(decay_steps - step) / decay_steps fraction = 2.0 * num_periods * step / float(decay_steps) cosine_decayed = 0.5 * (1.0 + math.cos(math.pi * fraction)) return (alpha + linear_decayed) * cosine_decayed + beta @test_util.run_in_graph_and_eager_modes def testDefaultDecay(self, serialize): num_training_steps = 1000 initial_lr = 1.0 for step in range(0, 1500, 250): decayed_lr = learning_rate_schedule.LinearCosineDecay( initial_lr, num_training_steps) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = self.np_linear_cosine_decay(step, num_training_steps) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) @test_util.run_in_graph_and_eager_modes def testNonDefaultDecay(self, serialize): num_training_steps = 1000 initial_lr = 1.0 for step in range(0, 1500, 250): decayed_lr = learning_rate_schedule.LinearCosineDecay( initial_lr, num_training_steps, alpha=0.1, beta=1e-4, num_periods=5) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = self.np_linear_cosine_decay( step, num_training_steps, alpha=0.1, beta=1e-4, num_periods=5) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) @parameterized.named_parameters( ("NotSerialized", False), ("Serialized", True)) class NoisyLinearCosineDecayTestV2(test_util.TensorFlowTestCase, parameterized.TestCase): @test_util.run_in_graph_and_eager_modes def testDefaultNoisyLinearCosine(self, serialize): num_training_steps = 1000 initial_lr = 1.0 for step in range(0, 1500, 250): # No numerical check because of noise decayed_lr = learning_rate_schedule.NoisyLinearCosineDecay( initial_lr, num_training_steps) decayed_lr = _maybe_serialized(decayed_lr, serialize) # Cannot be deterministically tested self.evaluate(decayed_lr(step)) @test_util.run_in_graph_and_eager_modes def testNonDefaultNoisyLinearCosine(self, serialize): num_training_steps = 1000 initial_lr = 1.0 for step in range(0, 1500, 250): # No numerical check because of noise decayed_lr = learning_rate_schedule.NoisyLinearCosineDecay( initial_lr, num_training_steps, initial_variance=0.5, variance_decay=0.1, alpha=0.1, beta=1e-4, num_periods=5) decayed_lr = _maybe_serialized(decayed_lr, serialize) # Cannot be deterministically tested self.evaluate(decayed_lr(step)) if __name__ == "__main__": googletest.main()
tensorflow-master
tensorflow/python/keras/optimizer_v2/learning_rate_schedule_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Adamax.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.keras.optimizer_v2 import adamax from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test def adamax_update_numpy(param, g_t, t, m, v, alpha=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8): m_t = beta1 * m + (1 - beta1) * g_t v_t = np.maximum(beta2 * v, np.abs(g_t)) param_t = param - (alpha / (1 - beta1**(t + 1))) * (m_t / (v_t + epsilon)) return param_t, m_t, v_t def adamax_sparse_update_numpy(param, indices, g_t, t, m, v, alpha=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8): m_t, v_t, param_t = np.copy(m), np.copy(v), np.copy(param) m_t_slice = beta1 * m[indices] + (1 - beta1) * g_t v_t_slice = np.maximum(beta2 * v[indices], np.abs(g_t)) param_t_slice = param[indices] - ( (alpha / (1 - beta1**(t + 1))) * (m_t_slice / (v_t_slice + epsilon))) m_t[indices] = m_t_slice v_t[indices] = v_t_slice param_t[indices] = param_t_slice return param_t, m_t, v_t def get_beta_accumulators(opt, dtype): local_step = math_ops.cast(opt.iterations + 1, dtype) beta_1_t = math_ops.cast(opt._get_hyper("beta_1"), dtype) beta_1_power = math_ops.pow(beta_1_t, local_step) return beta_1_power class AdamaxOptimizerTest(test.TestCase): def doTestSparse(self, use_resource=False): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with self.cached_session(): # Initialize variables for numpy implementation. zero_slots = lambda: np.zeros((3), dtype=dtype.as_numpy_dtype) # pylint: disable=cell-var-from-loop m0, v0, m1, v1 = zero_slots(), zero_slots(), zero_slots(), zero_slots() var0_np = np.array([1.0, 2.0, 3.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([4.0, 5.0, 6.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = resource_variable_ops.ResourceVariable(var0_np) var1 = resource_variable_ops.ResourceVariable(var1_np) grads0_np_indices = np.array([0, 1], dtype=np.int32) grads0 = ops.IndexedSlices( constant_op.constant(grads0_np), constant_op.constant(grads0_np_indices), constant_op.constant([3])) grads1_np_indices = np.array([2, 1], dtype=np.int32) grads1 = ops.IndexedSlices( constant_op.constant(grads1_np), constant_op.constant(grads1_np_indices), constant_op.constant([3])) opt = adamax.Adamax() update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() # Fetch params to validate initial values self.assertAllClose([1.0, 2.0, 3.0], var0.eval()) self.assertAllClose([4.0, 5.0, 6.0], var1.eval()) beta1_power = get_beta_accumulators(opt, dtype) # Run 3 steps of Adamax for t in range(3): self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power.eval()) update.run() var0_np, m0, v0 = adamax_sparse_update_numpy( var0_np, grads0_np_indices, grads0_np, t, m0, v0) var1_np, m1, v1 = adamax_sparse_update_numpy( var1_np, grads1_np_indices, grads1_np, t, m1, v1) # Validate updated params self.assertAllCloseAccordingToType(var0_np, var0.eval()) self.assertAllCloseAccordingToType(var1_np, var1.eval()) @test_util.run_deprecated_v1 def testResourceSparse(self): self.doTestSparse(use_resource=True) @test_util.run_deprecated_v1 def testSparseDevicePlacement(self): for index_dtype in [dtypes.int32, dtypes.int64]: with self.cached_session(force_gpu=test.is_gpu_available()): # If a GPU is available, tests that all optimizer ops can be placed on # it (i.e. they have GPU kernels). var = variables.Variable([[1.0], [2.0]]) indices = constant_op.constant([0, 1], dtype=index_dtype) g_sum = lambda: math_ops.reduce_sum(array_ops.gather(var, indices)) # pylint: disable=cell-var-from-loop optimizer = adamax.Adamax(3.0) minimize_op = optimizer.minimize(g_sum, var_list=[var]) variables.global_variables_initializer().run() minimize_op.run() @test_util.run_deprecated_v1 def testSparseRepeatedIndices(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with self.cached_session(): repeated_index_update_var = variables.Variable( [[1.0], [2.0]], dtype=dtype) aggregated_update_var = variables.Variable( [[1.0], [2.0]], dtype=dtype) grad_repeated_index = ops.IndexedSlices( constant_op.constant( [0.1, 0.1], shape=[2, 1], dtype=dtype), constant_op.constant([1, 1]), constant_op.constant([2, 1])) grad_aggregated = ops.IndexedSlices( constant_op.constant( [0.2], shape=[1, 1], dtype=dtype), constant_op.constant([1]), constant_op.constant([2, 1])) repeated_update = adamax.Adamax().apply_gradients( [(grad_repeated_index, repeated_index_update_var)]) aggregated_update = adamax.Adamax().apply_gradients( [(grad_aggregated, aggregated_update_var)]) variables.global_variables_initializer().run() self.assertAllClose(aggregated_update_var.eval(), repeated_index_update_var.eval()) for _ in range(3): repeated_update.run() aggregated_update.run() self.assertAllClose(aggregated_update_var.eval(), repeated_index_update_var.eval()) @test_util.run_in_graph_and_eager_modes(reset_test=True) def testBasic(self): for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]): with self.session(graph=ops.Graph()): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = resource_variable_ops.ResourceVariable( var0_np, name="var0_%d" % i) var1 = resource_variable_ops.ResourceVariable( var1_np, name="var1_%d" % i) grads0 = constant_op.constant(grads0_np) grads1 = constant_op.constant(grads1_np) opt = adamax.Adamax() if not context.executing_eagerly(): update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) if not context.executing_eagerly(): self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 3 steps of Adamax for t in range(3): beta_1_power = get_beta_accumulators(opt, dtype) self.assertAllCloseAccordingToType(0.9**(t + 1), self.evaluate(beta_1_power)) if not context.executing_eagerly(): self.evaluate(update) else: opt.apply_gradients(zip([grads0, grads1], [var0, var1])) var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0) var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1) # Validate updated params self.assertAllCloseAccordingToType( var0_np, self.evaluate(var0), rtol=1e-2) self.assertAllCloseAccordingToType( var1_np, self.evaluate(var1), rtol=1e-2) @test_util.run_in_graph_and_eager_modes(reset_test=True) def testBasicWithLearningRateDecay(self): for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]): with self.session(graph=ops.Graph()): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = resource_variable_ops.ResourceVariable( var0_np, name="var0_%d" % i) var1 = resource_variable_ops.ResourceVariable( var1_np, name="var1_%d" % i) grads0 = constant_op.constant(grads0_np) grads1 = constant_op.constant(grads1_np) learning_rate = 0.001 decay = 0.002 opt = adamax.Adamax(learning_rate=learning_rate, decay=decay) if not context.executing_eagerly(): update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) if not context.executing_eagerly(): self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 3 steps of Adamax for t in range(3): beta_1_power = get_beta_accumulators(opt, dtype) self.assertAllCloseAccordingToType(0.9**(t + 1), self.evaluate(beta_1_power)) if not context.executing_eagerly(): self.evaluate(update) else: opt.apply_gradients(zip([grads0, grads1], [var0, var1])) lr = learning_rate / (1 + decay * t) var0_np, m0, v0 = adamax_update_numpy( var0_np, grads0_np, t, m0, v0, alpha=lr) var1_np, m1, v1 = adamax_update_numpy( var1_np, grads1_np, t, m1, v1, alpha=lr) # Validate updated params self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0), rtol=1e-2) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1), rtol=1e-2) @test_util.run_deprecated_v1 def testTensorLearningRate(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = variables.Variable(var0_np) var1 = variables.Variable(var1_np) grads0 = constant_op.constant(grads0_np) grads1 = constant_op.constant(grads1_np) opt = adamax.Adamax(constant_op.constant(0.001)) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], var0.eval()) self.assertAllClose([3.0, 4.0], var1.eval()) beta1_power = get_beta_accumulators(opt, dtype) # Run 3 steps of Adamax for t in range(3): self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power.eval()) update.run() var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0) var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1) # Validate updated params self.assertAllCloseAccordingToType(var0_np, var0.eval()) self.assertAllCloseAccordingToType(var1_np, var1.eval()) @test_util.run_deprecated_v1 def testSharing(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = variables.Variable(var0_np) var1 = variables.Variable(var1_np) grads0 = constant_op.constant(grads0_np) grads1 = constant_op.constant(grads1_np) opt = adamax.Adamax() update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() beta1_power = get_beta_accumulators(opt, dtype) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], var0.eval()) self.assertAllClose([3.0, 4.0], var1.eval()) # Run 3 steps of intertwined Adamax1 and Adamax2. for t in range(3): self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power.eval()) if t % 2 == 0: update1.run() else: update2.run() var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0) var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1) # Validate updated params self.assertAllCloseAccordingToType(var0_np, var0.eval()) self.assertAllCloseAccordingToType(var1_np, var1.eval()) def testSlotsUniqueEager(self): with context.eager_mode(): v1 = resource_variable_ops.ResourceVariable(1.) v2 = resource_variable_ops.ResourceVariable(1.) opt = adamax.Adamax(1.) opt.minimize(lambda: v1 + v2, var_list=[v1, v2]) # There should be iteration, and two unique slot variables for v1 and v2. self.assertEqual(5, len(set(opt.variables()))) def testConstructAdamaxWithLR(self): opt = adamax.Adamax(lr=1.0) opt_2 = adamax.Adamax(learning_rate=0.1, lr=1.0) opt_3 = adamax.Adamax(learning_rate=0.1) self.assertIsInstance(opt.lr, variables.Variable) self.assertIsInstance(opt_2.lr, variables.Variable) self.assertIsInstance(opt_3.lr, variables.Variable) self.evaluate(variables.global_variables_initializer()) self.assertAllClose(self.evaluate(opt.lr), (1.0)) self.assertAllClose(self.evaluate(opt_2.lr), (1.0)) self.assertAllClose(self.evaluate(opt_3.lr), (0.1)) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/keras/optimizer_v2/adamax_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Adam.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.keras import optimizers from tensorflow.python.keras.optimizer_v2 import adam from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test def adam_update_numpy(param, g_t, t, m, v, lr=0.001, beta1=0.9, beta2=0.999, epsilon=1e-7): lr_t = lr * np.sqrt(1 - beta2**(t + 1)) / (1 - beta1**(t + 1)) m_t = beta1 * m + (1 - beta1) * g_t v_t = beta2 * v + (1 - beta2) * g_t * g_t param_t = param - lr_t * m_t / (np.sqrt(v_t) + epsilon) return param_t, m_t, v_t def adam_update_numpy_amsgrad(param, g_t, t, m, v, vhat, lr=0.001, beta1=0.9, beta2=0.999, epsilon=1e-7): lr_t = lr * np.sqrt(1 - beta2**(t + 1)) / (1 - beta1**(t + 1)) m_t = beta1 * m + (1 - beta1) * g_t v_t = beta2 * v + (1 - beta2) * g_t * g_t vhat_t = np.maximum(vhat, v_t) param_t = param - lr_t * m_t / (np.sqrt(vhat_t) + epsilon) return param_t, m_t, v_t, vhat_t def adam_sparse_update_numpy_amsgrad(param, indices, g_t, t, m, v, vhat, lr=0.001, beta1=0.9, beta2=0.999, epsilon=1e-7): m_t, v_t, vhat_t, param_t = (np.copy(m), np.copy(v), np.copy(vhat), np.copy(param)) lr_t = lr * np.sqrt(1 - beta2**(t + 1)) / (1 - beta1**(t + 1)) m_t_slice = beta1 * m[indices] + (1 - beta1) * g_t v_t_slice = beta2 * v[indices] + (1 - beta2) * g_t * g_t m_t[indices] = m_t_slice v_t[indices] = v_t_slice v_hat_t = np.maximum(vhat_t, v_t) v_hat_t_slice = v_hat_t[indices] param_t_slice = param[indices] - ( lr_t * (m_t_slice / (np.sqrt(v_hat_t_slice) + epsilon))) param_t[indices] = param_t_slice return param_t, m_t, v_t, vhat_t def get_beta_accumulators(opt, dtype): local_step = math_ops.cast(opt.iterations + 1, dtype) beta_1_t = math_ops.cast(opt._get_hyper("beta_1"), dtype) beta_1_power = math_ops.pow(beta_1_t, local_step) beta_2_t = math_ops.cast(opt._get_hyper("beta_2"), dtype) beta_2_power = math_ops.pow(beta_2_t, local_step) return (beta_1_power, beta_2_power) class AdamOptimizerTest(test.TestCase): @test_util.run_deprecated_v1 def testSparse(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.0, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.0, 0.01], dtype=dtype.as_numpy_dtype) var0 = resource_variable_ops.ResourceVariable(var0_np) var1 = resource_variable_ops.ResourceVariable(var1_np) grads0_np_indices = np.array([0, 2], dtype=np.int32) grads0 = ops.IndexedSlices( constant_op.constant(grads0_np[grads0_np_indices]), constant_op.constant(grads0_np_indices), constant_op.constant([3])) grads1_np_indices = np.array([0, 2], dtype=np.int32) grads1 = ops.IndexedSlices( constant_op.constant(grads1_np[grads1_np_indices]), constant_op.constant(grads1_np_indices), constant_op.constant([3])) opt = adam.Adam() update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() # Fetch params to validate initial values self.assertAllClose([1.0, 1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 3.0, 4.0], self.evaluate(var1)) beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype) # Run 3 steps of Adam for t in range(3): self.assertAllCloseAccordingToType(0.9**(t + 1), self.evaluate(beta_1_power)) self.assertAllCloseAccordingToType(0.999**(t + 1), self.evaluate(beta_2_power)) update.run() var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0) var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1) # Validate updated params self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) @test_util.run_deprecated_v1 def testSparseDevicePlacement(self): for index_dtype in [dtypes.int32, dtypes.int64]: with self.cached_session(force_gpu=test.is_gpu_available()): # If a GPU is available, tests that all optimizer ops can be placed on # it (i.e. they have GPU kernels). var = variables.Variable([[1.0], [2.0]]) indices = constant_op.constant([0, 1], dtype=index_dtype) g_sum = lambda: math_ops.reduce_sum(array_ops.gather(var, indices)) # pylint: disable=cell-var-from-loop optimizer = adam.Adam(3.0) minimize_op = optimizer.minimize(g_sum, var_list=[var]) variables.global_variables_initializer().run() minimize_op.run() @test_util.run_deprecated_v1 def testSparseRepeatedIndices(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with self.cached_session(): repeated_index_update_var = variables.Variable( [[1.0], [2.0]], dtype=dtype) aggregated_update_var = variables.Variable( [[1.0], [2.0]], dtype=dtype) grad_repeated_index = ops.IndexedSlices( constant_op.constant( [0.1, 0.1], shape=[2, 1], dtype=dtype), constant_op.constant([1, 1]), constant_op.constant([2, 1])) grad_aggregated = ops.IndexedSlices( constant_op.constant( [0.2], shape=[1, 1], dtype=dtype), constant_op.constant([1]), constant_op.constant([2, 1])) repeated_update = adam.Adam().apply_gradients( [(grad_repeated_index, repeated_index_update_var)]) aggregated_update = adam.Adam().apply_gradients( [(grad_aggregated, aggregated_update_var)]) variables.global_variables_initializer().run() self.assertAllClose(aggregated_update_var.eval(), self.evaluate(repeated_index_update_var)) for _ in range(3): repeated_update.run() aggregated_update.run() self.assertAllClose(aggregated_update_var.eval(), self.evaluate(repeated_index_update_var)) def doTestBasic(self, use_callable_params=False): for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]): with self.session(graph=ops.Graph()): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = resource_variable_ops.ResourceVariable( var0_np, name="var0_%d" % i) var1 = resource_variable_ops.ResourceVariable( var1_np, name="var1_%d" % i) grads0 = constant_op.constant(grads0_np) grads1 = constant_op.constant(grads1_np) learning_rate = lambda: 0.001 beta1 = lambda: 0.9 beta2 = lambda: 0.999 epsilon = lambda: 1e-8 if not use_callable_params: learning_rate = learning_rate() beta1 = beta1() beta2 = beta2() epsilon = epsilon() opt = adam.Adam(learning_rate=learning_rate) if not context.executing_eagerly(): update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Run 3 steps of Adam for t in range(3): beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype) self.assertAllCloseAccordingToType(0.9**(t + 1), self.evaluate(beta_1_power)) self.assertAllCloseAccordingToType(0.999**(t + 1), self.evaluate(beta_2_power)) if not context.executing_eagerly(): self.evaluate(update) else: opt.apply_gradients(zip([grads0, grads1], [var0, var1])) var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0) var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1) # Validate updated params self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) @test_util.run_in_graph_and_eager_modes(reset_test=True) def testResourceBasic(self): self.doTestBasic() def testBasicCallableParams(self): with context.eager_mode(): self.doTestBasic(use_callable_params=True) @test_util.run_in_graph_and_eager_modes(reset_test=True) def testBasicWithAmsgrad(self): for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]): with self.session(graph=ops.Graph()): # Initialize variables for numpy implementation. m0, v0, v0hat, m1, v1, v1hat = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = resource_variable_ops.ResourceVariable( var0_np, name="var0_%d" % i) var1 = resource_variable_ops.ResourceVariable( var1_np, name="var1_%d" % i) grads0 = constant_op.constant(grads0_np) grads1 = constant_op.constant(grads1_np) opt = adam.Adam(amsgrad=True) if not context.executing_eagerly(): update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Run 3 steps of Adam for t in range(3): beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype) self.assertAllCloseAccordingToType(0.9**(t + 1), self.evaluate(beta_1_power)) self.assertAllCloseAccordingToType(0.999**(t + 1), self.evaluate(beta_2_power)) if not context.executing_eagerly(): self.evaluate(update) else: opt.apply_gradients(zip([grads0, grads1], [var0, var1])) var0_np, m0, v0, v0hat = adam_update_numpy_amsgrad( var0_np, grads0_np, t, m0, v0, v0hat) var1_np, m1, v1, v1hat = adam_update_numpy_amsgrad( var1_np, grads1_np, t, m1, v1, v1hat) # Validate updated params self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) @test_util.run_in_graph_and_eager_modes def testSparseWithAmsgrad(self): # dtypes.half does not work on gpu + eager. for dtype in [dtypes.float32, dtypes.float64]: with self.cached_session(): m0 = np.array([[0.0], [0.0]]) v0 = np.array([[0.0], [0.0]]) v0hat = np.array([[0.0], [0.0]]) indices_np = np.array([1]) indices = constant_op.constant(indices_np, dtype=dtypes.int32) var0_np = np.array([[1.0], [2.0]], dtype=dtype.as_numpy_dtype) repeated_index_update_var = variables.Variable(var0_np, dtype=dtype) aggregated_update_var = variables.Variable(var0_np, dtype=dtype) grads0_np = np.array([[0.2]], dtype=dtype.as_numpy_dtype) grad_repeated_index = ops.IndexedSlices( constant_op.constant([0.1, 0.1], shape=[2, 1], dtype=dtype), constant_op.constant([1, 1]), constant_op.constant([2, 1])) grad_aggregated = ops.IndexedSlices(grads0_np, indices, constant_op.constant([2, 1])) opt_repeated = adam.Adam(amsgrad=True) opt_aggregated = adam.Adam(amsgrad=True) if not context.executing_eagerly(): repeated_update = opt_repeated.apply_gradients( [(grad_repeated_index, repeated_index_update_var)]) aggregated_update = opt_aggregated.apply_gradients( [(grad_aggregated, aggregated_update_var)]) self.evaluate(variables.global_variables_initializer()) self.assertAllClose( self.evaluate(aggregated_update_var), self.evaluate(repeated_index_update_var)) for t in range(3): if not context.executing_eagerly(): self.evaluate(repeated_update) self.evaluate(aggregated_update) else: opt_repeated.apply_gradients( [(grad_repeated_index, repeated_index_update_var)]) opt_aggregated.apply_gradients( [(grad_aggregated, aggregated_update_var)]) var0_np, m0, v0, v0hat = adam_sparse_update_numpy_amsgrad( var0_np, indices_np, grads0_np, t, m0, v0, v0hat) # Validate updated params self.assertAllCloseAccordingToType( var0_np, self.evaluate(aggregated_update_var)) self.assertAllCloseAccordingToType( self.evaluate(aggregated_update_var), self.evaluate(repeated_index_update_var)) @test_util.run_deprecated_v1 def testBasicWithLearningRateDecay(self): for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]): with self.session(graph=ops.Graph()): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = resource_variable_ops.ResourceVariable( var0_np, name="var0_%d" % i) var1 = resource_variable_ops.ResourceVariable( var1_np, name="var1_%d" % i) grads0 = constant_op.constant(grads0_np) grads1 = constant_op.constant(grads1_np) learning_rate = 0.001 beta_1 = 0.9 beta_2 = 0.999 epsilon = 1e-7 decay = 0.5 opt = adam.Adam( learning_rate=learning_rate, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon, decay=decay) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Run 3 steps of Adam for t in range(3): self.evaluate(update) lr_np = learning_rate / (1 + decay * t) var0_np, m0, v0 = adam_update_numpy( var0_np, grads0_np, t, m0, v0, lr=lr_np) var1_np, m1, v1 = adam_update_numpy( var1_np, grads1_np, t, m1, v1, lr=lr_np) # Validate updated params self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) @test_util.run_deprecated_v1 def testBasicWithLearningRateInverseTimeDecay(self): for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]): with self.session(graph=ops.Graph()): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = resource_variable_ops.ResourceVariable( var0_np, name="var0_%d" % i) var1 = resource_variable_ops.ResourceVariable( var1_np, name="var1_%d" % i) grads0 = constant_op.constant(grads0_np) grads1 = constant_op.constant(grads1_np) learning_rate = 0.001 decay = 0.5 lr_schedule = learning_rate_schedule.InverseTimeDecay( learning_rate, decay_steps=1.0, decay_rate=decay) beta_1 = 0.9 beta_2 = 0.999 epsilon = 1e-7 opt = adam.Adam( learning_rate=lr_schedule, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Run 3 steps of Adam for t in range(3): self.evaluate(update) lr_np = learning_rate / (1 + decay * t) var0_np, m0, v0 = adam_update_numpy( var0_np, grads0_np, t, m0, v0, lr=lr_np) var1_np, m1, v1 = adam_update_numpy( var1_np, grads1_np, t, m1, v1, lr=lr_np) # Validate updated params self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) @test_util.run_deprecated_v1 def testTensorLearningRate(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = variables.Variable(var0_np) var1 = variables.Variable(var1_np) grads0 = constant_op.constant(grads0_np) grads1 = constant_op.constant(grads1_np) opt = adam.Adam(constant_op.constant(0.001)) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype) # Run 3 steps of Adam for t in range(3): self.assertAllCloseAccordingToType(0.9**(t + 1), self.evaluate(beta_1_power)) self.assertAllCloseAccordingToType(0.999**(t + 1), self.evaluate(beta_2_power)) update.run() var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0) var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1) # Validate updated params self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) @test_util.run_deprecated_v1 def testSharing(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = variables.Variable(var0_np) var1 = variables.Variable(var1_np) grads0 = constant_op.constant(grads0_np) grads1 = constant_op.constant(grads1_np) opt = adam.Adam() update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 3 steps of intertwined Adam1 and Adam2. for t in range(3): self.assertAllCloseAccordingToType(0.9**(t + 1), self.evaluate(beta_1_power)) self.assertAllCloseAccordingToType(0.999**(t + 1), self.evaluate(beta_2_power)) if t % 2 == 0: update1.run() else: update2.run() var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0) var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1) # Validate updated params self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testSlotsUniqueEager(self): with context.eager_mode(): v1 = resource_variable_ops.ResourceVariable(1.) v2 = resource_variable_ops.ResourceVariable(1.) opt = adam.Adam(1.) opt.minimize(lambda: v1 + v2, var_list=[v1, v2]) # There should be iteration, and two unique slot variables for v1 and v2. self.assertEqual(5, len(set(opt.variables()))) self.assertEqual( self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations)) def testSetWeightsFromV1AdamWithoutMinimize(self): keras_v1_adam = optimizers.Adam() keras_v2_adam = adam.Adam() keras_v2_adam.set_weights(keras_v1_adam.get_weights()) keras_v1_iteration = keras_v1_adam.iterations keras_v2_iteration = keras_v2_adam.iterations self.evaluate(variables.global_variables_initializer()) self.assertEqual( self.evaluate(keras_v1_iteration), self.evaluate(keras_v2_iteration)) def testConstructAdamWithLR(self): opt = adam.Adam(lr=1.0) opt_2 = adam.Adam(learning_rate=0.1, lr=1.0) opt_3 = adam.Adam(learning_rate=0.1) self.assertIsInstance(opt.lr, variables.Variable) self.assertIsInstance(opt_2.lr, variables.Variable) self.assertIsInstance(opt_3.lr, variables.Variable) self.evaluate(variables.global_variables_initializer()) self.assertAllClose(self.evaluate(opt.lr), (1.0)) self.assertAllClose(self.evaluate(opt_2.lr), (1.0)) self.assertAllClose(self.evaluate(opt_3.lr), (0.1)) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/keras/optimizer_v2/adam_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional test for GradientDescent.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.eager import function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.keras.optimizer_v2 import gradient_descent from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule from tensorflow.python.ops import array_ops from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test class GradientDescentOptimizerTest(test.TestCase): @test_util.run_in_graph_and_eager_modes def testBasic(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.1], dtype=dtype) grads1 = constant_op.constant([0.01, 0.01], dtype=dtype) sgd = gradient_descent.SGD(3.0) sgd_op = sgd.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Run 1 step of sgd self.evaluate(sgd_op) # Validate updated params self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], self.evaluate(var0)) self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], self.evaluate(var1)) def _test_basic_sgd_with_learning_rate_decay(self, sgd, dtype): var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.1], dtype=dtype) grads1 = constant_op.constant([0.01, 0.01], dtype=dtype) if not context.executing_eagerly(): sgd_op = sgd.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Run 2 steps of sgd if not context.executing_eagerly(): self.evaluate(sgd_op) else: sgd.apply_gradients(zip([grads0, grads1], [var0, var1])) # Validate updated params self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], self.evaluate(var0)) self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], self.evaluate(var1)) if not context.executing_eagerly(): self.evaluate(sgd_op) else: sgd.apply_gradients(zip([grads0, grads1], [var0, var1])) # Validate updated params self.assertAllCloseAccordingToType( [1.0 - 3.0 * 0.1 - 2.0 * 0.1, 2.0 - 3.0 * 0.1 - 2.0 * 0.1], self.evaluate(var0)) self.assertAllCloseAccordingToType( [3.0 - 3.0 * 0.01 - 2.0 * 0.01, 4.0 - 3.0 * 0.01 - 2.0 * 0.01], self.evaluate(var1)) @test_util.run_in_graph_and_eager_modes def testBasicWithLearningRateDecay(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: learning_rate = 3.0 decay = 0.5 sgd = gradient_descent.SGD(learning_rate=learning_rate, decay=decay) self._test_basic_sgd_with_learning_rate_decay(sgd, dtype) @test_util.run_in_graph_and_eager_modes def testBasicWithLearningRateInverseTimeDecay(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: learning_rate = learning_rate_schedule.InverseTimeDecay( 3.0, decay_steps=1.0, decay_rate=0.5) sgd = gradient_descent.SGD(learning_rate=learning_rate) self._test_basic_sgd_with_learning_rate_decay(sgd, dtype) @test_util.run_in_graph_and_eager_modes def testBasicWithLearningRateInverseTimeDecaySerializeAndDeserialize(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: learning_rate = learning_rate_schedule.InverseTimeDecay( 3.0, decay_steps=1.0, decay_rate=0.5) sgd = gradient_descent.SGD(learning_rate=learning_rate) sgd = gradient_descent.SGD.from_config(sgd.get_config()) self._test_basic_sgd_with_learning_rate_decay(sgd, dtype) @test_util.run_in_graph_and_eager_modes def testBasicCallableParams(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.1], dtype=dtype) grads1 = constant_op.constant([0.01, 0.01], dtype=dtype) lr = lambda: 3.0 sgd = gradient_descent.SGD(lr) sgd_op = sgd.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Run 1 step of sgd self.evaluate(sgd_op) # Validate updated params self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], self.evaluate(var0)) self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], self.evaluate(var1)) @test_util.run_in_graph_and_eager_modes def testMinimizeResourceVariable(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([3.0], dtype=dtype) x = constant_op.constant([[4.0], [5.0]], dtype=dtype) loss = lambda: math_ops.matmul(var0, x) + var1 # pylint: disable=cell-var-from-loop sgd = gradient_descent.SGD(1.0) sgd_op = sgd.minimize(loss, [var0, var1]) self.evaluate(variables.global_variables_initializer()) # Run 1 step of sgd self.evaluate(sgd_op) # Validate updated params self.assertAllCloseAccordingToType([[1.0 - 4.0, 2.0 - 5.0]], self.evaluate(var0)) self.assertAllCloseAccordingToType([3.0 - 1.0], self.evaluate(var1)) @test_util.run_deprecated_v1 def testMinimizeSparseResourceVariable(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([3.0], dtype=dtype) x = constant_op.constant([[4.0], [5.0]], dtype=dtype) def loss(): pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop pred += var1 # pylint: disable=cell-var-from-loop return pred * pred sgd_op = gradient_descent.SGD(1.0).minimize(loss, [var0, var1]) self.evaluate(variables.global_variables_initializer()) # Run 1 step of sgd self.evaluate(sgd_op) # Validate updated params np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0 np_grad = 2 * np_pred self.assertAllCloseAccordingToType( [[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], self.evaluate(var0)) self.assertAllCloseAccordingToType([3.0 - np_grad], self.evaluate(var1)) def testTensorLearningRate(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: var0 = variables.Variable([1.0, 2.0], dtype=dtype) var1 = variables.Variable([3.0, 4.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.1], dtype=dtype) grads1 = constant_op.constant([0.01, 0.01], dtype=dtype) lrate = constant_op.constant(3.0) sgd_op = gradient_descent.SGD(lrate).apply_gradients( zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Run 1 step of sgd self.evaluate(sgd_op) # Validate updated params self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], self.evaluate(var0)) self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], self.evaluate(var1)) @test_util.run_deprecated_v1 def testGradWrtRef(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: opt = gradient_descent.SGD(3.0) values = [1.0, 3.0] vars_ = [variables.Variable([v], dtype=dtype) for v in values] loss = lambda: vars_[0] + vars_[1] # pylint: disable=cell-var-from-loop grads_and_vars = opt._compute_gradients(loss, vars_) self.evaluate(variables.global_variables_initializer()) for grad, _ in grads_and_vars: self.assertAllCloseAccordingToType([1.0], self.evaluate(grad)) @test_util.run_deprecated_v1 def testSparseBasic(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: var0 = variables.Variable([[1.0], [2.0]], dtype=dtype) var1 = variables.Variable([[3.0], [4.0]], dtype=dtype) grads0 = ops.IndexedSlices( constant_op.constant([0.1], shape=[1, 1], dtype=dtype), constant_op.constant([0]), constant_op.constant([2, 1])) grads1 = ops.IndexedSlices( constant_op.constant([0.01], shape=[1, 1], dtype=dtype), constant_op.constant([1]), constant_op.constant([2, 1])) sgd_op = gradient_descent.SGD(3.0).apply_gradients( zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Run 1 step of sgd self.evaluate(sgd_op) # Validate updated params self.assertAllCloseAccordingToType([[1.0 - 3.0 * 0.1], [2.0]], self.evaluate(var0)) self.assertAllCloseAccordingToType([[3.0], [4.0 - 3.0 * 0.01]], self.evaluate(var1)) @test_util.run_deprecated_v1 def testSparseBasicWithLearningRateDecay(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: var0 = variables.Variable([[1.0], [2.0]], dtype=dtype) var1 = variables.Variable([[3.0], [4.0]], dtype=dtype) grads0 = ops.IndexedSlices( constant_op.constant([0.1], shape=[1, 1], dtype=dtype), constant_op.constant([0]), constant_op.constant([2, 1])) grads1 = ops.IndexedSlices( constant_op.constant([0.01], shape=[1, 1], dtype=dtype), constant_op.constant([1]), constant_op.constant([2, 1])) sgd_op = gradient_descent.SGD( 3.0, decay=0.5).apply_gradients( zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Run 2 steps of sgd self.evaluate(sgd_op) # Validate updated params self.assertAllCloseAccordingToType([[1.0 - 3.0 * 0.1], [2.0]], self.evaluate(var0)) self.assertAllCloseAccordingToType([[3.0], [4.0 - 3.0 * 0.01]], self.evaluate(var1)) self.evaluate(sgd_op) # Validate updated params self.assertAllCloseAccordingToType( [[1.0 - 3.0 * 0.1 - 2.0 * 0.1], [2.0]], self.evaluate(var0)) self.assertAllCloseAccordingToType( [[3.0], [4.0 - 3.0 * 0.01 - 2.0 * 0.01]], self.evaluate(var1)) def testCapturingInDefunWhileExecutingEagerly(self): with context.eager_mode(): optimizer = gradient_descent.SGD(1.0) def step(): self.v = resource_variable_ops.ResourceVariable(1.0) with backprop.GradientTape() as tape: loss = self.v**2 grad = tape.gradient(loss, self.v) optimizer.apply_gradients([(grad, self.v)]) return self.v.read_value() compiled_step = function.defun(step) self.assertEqual(float(step()), -1.0) self.assertEqual(float(compiled_step()), -1.0) # This shouldn't fail; in particular, the learning rate tensor should # be an EagerTensor once again, not a graph Tensor. self.assertEqual(float(step()), -1.0) def testConstructSGDWithLR(self): opt = gradient_descent.SGD(lr=1.0) opt_2 = gradient_descent.SGD(learning_rate=0.1, lr=1.0) opt_3 = gradient_descent.SGD(learning_rate=0.1) self.assertIsInstance(opt.lr, variables.Variable) self.assertIsInstance(opt_2.lr, variables.Variable) self.assertIsInstance(opt_3.lr, variables.Variable) self.evaluate(variables.global_variables_initializer()) self.assertAllClose(self.evaluate(opt.lr), (1.0)) self.assertAllClose(self.evaluate(opt_2.lr), (1.0)) self.assertAllClose(self.evaluate(opt_3.lr), (0.1)) class MomentumOptimizerTest(test.TestCase): def _update_nesterov_momentum_numpy(self, var, accum, g, lr, momentum): accum = accum * momentum - g * lr var += (accum * momentum - g * lr) return var, accum @test_util.run_in_graph_and_eager_modes def testBasic(self): for _, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]): var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype, name="var0") var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype, name="var1") grads0 = constant_op.constant([0.1, 0.1], dtype=dtype) grads1 = constant_op.constant([0.01, 0.01], dtype=dtype) learning_rate = 2.0 momentum = 0.9 mom_opt = gradient_descent.SGD( learning_rate=learning_rate, momentum=momentum) # self.assertFalse(mom_opt._initial_decay) mom_update = mom_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) # Check we have slots slot0 = mom_opt.get_slot(var0, "momentum") self.assertEqual(slot0.shape, var0.shape) slot1 = mom_opt.get_slot(var1, "momentum") self.assertEqual(slot1.shape, var1.shape) # Step 1: the momentum accumulators where 0. So we should see a normal # update: v -= grad * learning_rate self.evaluate(variables.global_variables_initializer()) self.evaluate(mom_update) # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType( np.array([-0.2, -0.2]), self.evaluate(slot0)) self.assertAllCloseAccordingToType( np.array([-0.02, -0.02]), self.evaluate(slot1)) # Check that the parameters have been updated. self.assertAllCloseAccordingToType( np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), self.evaluate(var0)) self.assertAllCloseAccordingToType( np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), self.evaluate(var1)) # Step 2: the momentum accumulators contain the previous update. self.evaluate(mom_update) if context.executing_eagerly(): mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1])) # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType( np.array([(0.9 * (-0.2) - 2.0 * 0.1), (0.9 * (-0.2) - 2.0 * 0.1)]), self.evaluate(slot0)) self.assertAllCloseAccordingToType( np.array([(0.9 * (-0.02) - 2.0 * 0.01), (0.9 * (-0.02) - 2.0 * 0.01)]), self.evaluate(slot1)) # Check that the parameters have been updated. self.assertAllCloseAccordingToType( np.array([ 1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0), 2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0) ]), self.evaluate(var0)) self.assertAllCloseAccordingToType( np.array([ 2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - ((0.9 * 0.01 + 0.01) * 2.0) ]), self.evaluate(var1)) @test_util.run_deprecated_v1 def testNesterovMomentum(self): for dtype in [dtypes.float32, dtypes.float64]: var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype, name="var0") var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype, name="var1") var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) loss = lambda: 5 * var0 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop mom_op = gradient_descent.SGD( learning_rate=2.0, momentum=0.9, nesterov=True) opt_op = mom_op.minimize(loss, [var0, var1]) self.evaluate(variables.global_variables_initializer()) for _ in range(1, 5): self.evaluate(opt_op) var0_np, accum0_np = self._update_nesterov_momentum_numpy( var0_np, accum0_np, var0_np * 10, 2.0, 0.9) var1_np, accum1_np = self._update_nesterov_momentum_numpy( var1_np, accum1_np, 3, 2.0, 0.9) self.assertAllClose(var0_np, self.evaluate(var0)) self.assertAllClose(var1_np, self.evaluate(var1)) @test_util.run_deprecated_v1 def testSparseNesterovMomentum(self): for dtype in [dtypes.float32, dtypes.float64]: with self.cached_session() as sess: var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) grads = [] for t in range(1, 5): grads.append(var0_np * 10) var0_np, accum0_np = self._update_nesterov_momentum_numpy( var0_np, accum0_np, var0_np * 10, 2.0, 0.9) var1_np, accum1_np = self._update_nesterov_momentum_numpy( var1_np, accum1_np, 3, 2.0, 0.9) var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) var0 = resource_variable_ops.ResourceVariable( var0_np, dtype=dtype, name="var0") var1 = resource_variable_ops.ResourceVariable( var1_np, dtype=dtype, name="var1") mom_op = gradient_descent.SGD( learning_rate=2.0, momentum=0.9, nesterov=True) x_feed = array_ops.placeholder(dtype) y_feed = ops.IndexedSlices(x_feed, constant_op.constant([0, 1]), constant_op.constant([2])) grads_and_vars = [(y_feed, var0), (constant_op.constant([3.0, 3.0], dtype=dtype), var1)] opt_update = mom_op.apply_gradients(grads_and_vars) self.evaluate(variables.global_variables_initializer()) for t in range(1, 5): sess.run(opt_update, feed_dict={x_feed: grads[t - 1]}) var0_np, accum0_np = self._update_nesterov_momentum_numpy( var0_np, accum0_np, var0_np * 10, 2.0, 0.9) var1_np, accum1_np = self._update_nesterov_momentum_numpy( var1_np, accum1_np, 3, 2.0, 0.9) self.assertAllClose(var0_np, self.evaluate(var0)) self.assertAllClose(var1_np, self.evaluate(var1)) @test_util.run_in_graph_and_eager_modes @test_util.run_deprecated_v1 def testMinimizeSparseResourceVariable(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: # This test invokes the ResourceSparseApplyMomentum operation, which # did not have a registered GPU kernel as of April 2018. With graph # execution, the placement algorithm notices this and automatically # places the variable in CPU (host) memory. With eager execution, # the variable would be placed in GPU memory if available, which # would then conflict with the future invocation of the # ResourceSparseApplyMomentum operation. # To work around this discrepancy, for now we force the variable # to be placed on CPU. with ops.device("/cpu:0"): var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype) # pylint: disable=cell-var-from-loop def loss(): x = constant_op.constant([[4.0], [5.0]], dtype=dtype) pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) return pred * pred # pylint: enable=cell-var-from-loop opt = gradient_descent.SGD(learning_rate=1.0, momentum=0.0) sgd_op = opt.minimize(loss, [var0]) self.evaluate(variables.global_variables_initializer()) # Run 1 step of sgd self.evaluate(sgd_op) # Validate updated params self.assertAllCloseAccordingToType([[-111, -138]], self.evaluate(var0)) @test_util.run_in_graph_and_eager_modes(reset_test=True) def testMinimizeWith2DIndicesForEmbeddingLookup(self): # This test invokes the ResourceSparseApplyMomentum operation, which # did not have a registered GPU kernel as of April 2018. With graph # execution, the placement algorithm notices this and automatically # places the variable in CPU (host) memory. With eager execution, # the variable would be placed in GPU memory if available, which # would then conflict with the future invocation of the # ResourceSparseApplyMomentum operation. # To work around this discrepancy, for now we force the variable # to be placed on CPU. with ops.device("/cpu:0"): var0 = resource_variable_ops.ResourceVariable(array_ops.ones([2, 2])) def loss(): return math_ops.reduce_sum(embedding_ops.embedding_lookup(var0, [[1]])) opt = gradient_descent.SGD(learning_rate=1.0, momentum=0.0) sgd_op = opt.minimize(loss, [var0]) self.evaluate(variables.global_variables_initializer()) self.evaluate(sgd_op) self.assertAllCloseAccordingToType([[1, 1], [0, 0]], self.evaluate(var0)) @test_util.run_deprecated_v1 def testTensorLearningRateAndMomentum(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: var0 = variables.Variable([1.0, 2.0], dtype=dtype) var1 = variables.Variable([3.0, 4.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.1], dtype=dtype) grads1 = constant_op.constant([0.01, 0.01], dtype=dtype) mom_opt = gradient_descent.SGD( learning_rate=constant_op.constant(2.0), momentum=constant_op.constant(0.9)) mom_update = mom_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Check we have slots slot0 = mom_opt.get_slot(var0, "momentum") self.assertEqual(slot0.shape, var0.shape) slot1 = mom_opt.get_slot(var1, "momentum") self.assertEqual(slot1.shape, var1.shape) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Step 1: the momentum accumulators where 0. So we should see a normal # update: v -= grad * learning_rate self.evaluate(mom_update) # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType( np.array([-0.2, -0.2]), self.evaluate(slot0)) self.assertAllCloseAccordingToType( np.array([-0.02, -0.02]), self.evaluate(slot1)) # Check that the parameters have been updated. self.assertAllCloseAccordingToType( np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), self.evaluate(var0)) self.assertAllCloseAccordingToType( np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), self.evaluate(var1)) # Step 2: the momentum accumulators contain the previous update. self.evaluate(mom_update) # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType( np.array([(0.9 * (-0.2) - 2.0 * 0.1), (0.9 * (-0.2) - 2.0 * 0.1)]), self.evaluate(slot0)) self.assertAllCloseAccordingToType( np.array([(0.9 * (-0.02) - 2.0 * 0.01), (0.9 * (-0.02) - 2.0 * 0.01)]), self.evaluate(slot1)) # Check that the parameters have been updated. self.assertAllCloseAccordingToType( np.array([ 1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0), 2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0) ]), self.evaluate(var0)) self.assertAllCloseAccordingToType( np.array([ 2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - ((0.9 * 0.01 + 0.01) * 2.0) ]), self.evaluate(var1)) @test_util.run_deprecated_v1 def testSparse(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: var0 = variables.Variable(array_ops.zeros([4, 2], dtype=dtype)) var1 = variables.Variable(constant_op.constant(1.0, dtype, [4, 2])) grads0 = ops.IndexedSlices( constant_op.constant([[.1, .1]], dtype=dtype), constant_op.constant([1]), constant_op.constant([4, 2])) grads1 = ops.IndexedSlices( constant_op.constant([[.01, .01], [.01, .01]], dtype=dtype), constant_op.constant([2, 3]), constant_op.constant([4, 2])) mom_opt = gradient_descent.SGD(learning_rate=2.0, momentum=0.9) mom_update = mom_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Check we have slots slot0 = mom_opt.get_slot(var0, "momentum") self.assertEqual(slot0.shape, var0.shape) slot1 = mom_opt.get_slot(var1, "momentum") self.assertEqual(slot1.shape, var1.shape) # Fetch params to validate initial values self.assertAllClose([0, 0], self.evaluate(var0)[0]) self.assertAllClose([0, 0], self.evaluate(var0)[1]) self.assertAllClose([1, 1], self.evaluate(var1)[2]) # Step 1: the momentum accumulators are 0. So we should see a normal # update: v -= grad * learning_rate self.evaluate(mom_update) # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType( np.array([0, 0]), self.evaluate(slot0)[0]) self.assertAllCloseAccordingToType( np.array([-2.0 * .1, -2.0 * .1]), self.evaluate(slot0)[1]) self.assertAllCloseAccordingToType( np.array([-2.0 * .01, -2.0 * .01]), self.evaluate(slot1)[2]) # Check that the parameters have been updated. self.assertAllCloseAccordingToType( np.array([0, 0]), self.evaluate(var0)[0]) self.assertAllCloseAccordingToType( np.array([-(0.1 * 2.0), -(0.1 * 2.0)]), self.evaluate(var0)[1]) self.assertAllCloseAccordingToType( np.array([1.0 - (0.01 * 2.0), 1.0 - (0.01 * 2.0)]), self.evaluate(var1)[2]) # Step 2: the momentum accumulators contain the previous update. self.evaluate(mom_update) # Check that the momentum accumulators have been updated. self.assertAllClose(np.array([0, 0]), self.evaluate(slot0)[0]) self.assertAllCloseAccordingToType( np.array([(0.9 * (-0.2) - 2.0 * 0.1), (0.9 * (-0.2) - 2.0 * 0.1)]), self.evaluate(slot0)[1]) self.assertAllCloseAccordingToType( np.array([(0.9 * (-0.02) - 2.0 * 0.01), (0.9 * (-0.02) - 2.0 * 0.01)]), self.evaluate(slot1)[2]) # Check that the parameters have been updated. self.assertAllClose(np.array([0, 0]), self.evaluate(var0)[0]) self.assertAllCloseAccordingToType( np.array([ -(0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0), -(0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0) ]), self.evaluate(var0)[1]) self.assertAllCloseAccordingToType( np.array([ 0.98 - ((0.9 * 0.01 + 0.01) * 2.0), 0.98 - ((0.9 * 0.01 + 0.01) * 2.0) ]), self.evaluate(var1)[2]) @test_util.run_deprecated_v1 def testSharing(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: var0 = variables.Variable([1.0, 2.0], dtype=dtype) var1 = variables.Variable([3.0, 4.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.1], dtype=dtype) grads1 = constant_op.constant([0.01, 0.01], dtype=dtype) mom_opt = gradient_descent.SGD(learning_rate=2.0, momentum=0.9) mom_update1 = mom_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) mom_update2 = mom_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) slot0 = mom_opt.get_slot(var0, "momentum") self.assertEqual(slot0.shape, var0.shape) slot1 = mom_opt.get_slot(var1, "momentum") self.assertEqual(slot1.shape, var1.shape) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Step 1: the momentum accumulators where 0. So we should see a normal # update: v -= grad * learning_rate self.evaluate(mom_update1) # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType( np.array([-0.2, -0.2]), self.evaluate(slot0)) self.assertAllCloseAccordingToType( np.array([-0.02, -0.02]), self.evaluate(slot1)) # Check that the parameters have been updated. self.assertAllCloseAccordingToType( np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), self.evaluate(var0)) self.assertAllCloseAccordingToType( np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), self.evaluate(var1)) # Step 2: the second momentum accumulators contain the previous update. self.evaluate(mom_update2) # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType( np.array([(0.9 * (-0.2) - 2.0 * 0.1), (0.9 * (-0.2) - 2.0 * 0.1)]), self.evaluate(slot0)) self.assertAllCloseAccordingToType( np.array([(0.9 * (-0.02) - 2.0 * 0.01), (0.9 * (-0.02) - 2.0 * 0.01)]), self.evaluate(slot1)) # Check that the parameters have been updated. self.assertAllCloseAccordingToType( np.array([ 1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0), 2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0) ]), self.evaluate(var0)) self.assertAllCloseAccordingToType( np.array([ 2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - ((0.9 * 0.01 + 0.01) * 2.0) ]), self.evaluate(var1)) @test_util.run_in_graph_and_eager_modes def testConfig(self): opt = gradient_descent.SGD(learning_rate=1.0, momentum=0.9, nesterov=True) config = opt.get_config() opt2 = gradient_descent.SGD.from_config(config) lr = opt.lr lr2 = opt2.lr self.evaluate(variables.global_variables_initializer()) self.assertAllClose(self.evaluate(lr), self.evaluate(lr2)) self.assertAllClose( self.evaluate(opt._get_hyper("momentum")), self.evaluate(opt2._get_hyper("momentum"))) self.assertAllClose( self.evaluate(opt._get_hyper("decay")), self.evaluate(opt2._get_hyper("decay"))) var0 = variables.Variable([[1.0], [2.0]], dtype=dtypes.float32) loss = lambda: 3 * var0 # learning rate variable created when calling minimize. opt.minimize(loss, [var0]) self.evaluate(variables.global_variables_initializer()) config = opt.get_config() opt3 = gradient_descent.SGD.from_config(config) lr3 = opt3.lr self.evaluate(variables.global_variables_initializer()) self.assertAllClose(self.evaluate(lr), self.evaluate(lr3)) self.assertAllClose( self.evaluate(opt._get_hyper("momentum")), self.evaluate(opt3._get_hyper("momentum"))) self.assertAllClose( self.evaluate(opt._get_hyper("decay")), self.evaluate(opt3._get_hyper("decay"))) self.assertTrue(opt3.nesterov) def testNesterovWithoutMomentum(self): with self.assertRaisesRegexp(ValueError, "must be between"): gradient_descent.SGD(learning_rate=1.0, momentum=2.0) def testConstructMomentumWithLR(self): opt = gradient_descent.SGD(lr=1.0, momentum=0.9) opt_2 = gradient_descent.SGD(learning_rate=0.1, momentum=0.9, lr=1.0) opt_3 = gradient_descent.SGD(learning_rate=0.1, momentum=0.9) self.assertIsInstance(opt.lr, variables.Variable) self.assertIsInstance(opt_2.lr, variables.Variable) self.assertIsInstance(opt_3.lr, variables.Variable) self.evaluate(variables.global_variables_initializer()) self.assertAllClose(self.evaluate(opt.lr), (1.0)) self.assertAllClose(self.evaluate(opt_2.lr), (1.0)) self.assertAllClose(self.evaluate(opt_3.lr), (0.1)) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/keras/optimizer_v2/gradient_descent_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Adamax for TensorFlow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.keras import backend_config from tensorflow.python.keras.optimizer_v2 import optimizer_v2 from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.training import training_ops from tensorflow.python.util.tf_export import keras_export @keras_export('keras.optimizers.Adamax') class Adamax(optimizer_v2.OptimizerV2): """Optimizer that implements the Adamax algorithm. It is a variant of Adam based on the infinity norm. Default parameters follow those provided in the paper. Adamax is sometimes superior to adam, specially in models with embeddings. References see Section 7 of [Kingma et al., 2014](http://arxiv.org/abs/1412.6980) ([pdf](http://arxiv.org/pdf/1412.6980.pdf)). """ def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7, name='Adamax', **kwargs): """Construct a new Adamax optimizer. Initialization: ``` m_0 <- 0 (Initialize initial 1st moment vector) v_0 <- 0 (Initialize the exponentially weighted infinity norm) t <- 0 (Initialize timestep) ``` The update rule for `variable` with gradient `g` uses an optimization described at the end of section 7.1 of the paper: ``` t <- t + 1 m_t <- beta1 * m_{t-1} + (1 - beta1) * g v_t <- max(beta2 * v_{t-1}, abs(g)) variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon) ``` Similar to AdamOptimizer, the epsilon is added for numerical stability (especially to get rid of division by zero when v_t = 0). Contrast to AdamOptimizer, the sparse implementation of this algorithm (used when the gradient is an IndexedSlices object, typically because of `tf.gather` or an embedding lookup in the forward pass) only updates variable slices and corresponding `m_t`, `v_t` terms when that part of the variable was used in the forward pass. This means that the sparse behavior is contrast to the dense behavior (similar to some momentum implementations which ignore momentum unless a variable slice was actually used). Args: learning_rate: A Tensor or a floating point value. The learning rate. beta_1: A float value or a constant float tensor. The exponential decay rate for the 1st moment estimates. beta_2: A float value or a constant float tensor. The exponential decay rate for the exponentially weighted infinity norm. epsilon: A small constant for numerical stability. name: Optional name for the operations created when applying gradients. Defaults to "Adamax". **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip gradients by value, `decay` is included for backward compatibility to allow time inverse decay of learning rate. `lr` is included for backward compatibility, recommended to use `learning_rate` instead. """ super(Adamax, self).__init__(name, **kwargs) self._set_hyper('learning_rate', kwargs.get('lr', learning_rate)) self._set_hyper('decay', self._initial_decay) self._set_hyper('beta_1', beta_1) self._set_hyper('beta_2', beta_2) self.epsilon = epsilon or backend_config.epsilon() def _create_slots(self, var_list): # Separate for-loops to respect the ordering of slot variables from v1. for var in var_list: self.add_slot(var, 'm') # Create slots for the first moments. for var in var_list: self.add_slot(var, 'v') # Create slots for the second moments. def _resource_apply_dense(self, grad, var): var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr_t[var_dtype] m = self.get_slot(var, 'm') v = self.get_slot(var, 'v') beta_1_t = self._get_hyper('beta_1', var_dtype) beta_2_t = self._get_hyper('beta_2', var_dtype) local_step = math_ops.cast(self.iterations + 1, var_dtype) beta_1_power = math_ops.pow(beta_1_t, local_step) return training_ops.resource_apply_ada_max( var.handle, m.handle, v.handle, beta_1_power, lr_t, beta_1_t, beta_2_t, ops.convert_to_tensor(self.epsilon, var_dtype), grad, use_locking=self._use_locking) def _resource_apply_sparse(self, grad, var, indices): var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr_t[var_dtype] beta_1_t = self._get_hyper('beta_1', var_dtype) beta_2_t = self._get_hyper('beta_2', var_dtype) local_step = math_ops.cast(self.iterations + 1, var_dtype) beta_1_power = math_ops.pow(beta_1_t, local_step) epsilon_t = ops.convert_to_tensor(self.epsilon, var_dtype) # m_t = beta1 * m + (1 - beta1) * g_t m = self.get_slot(var, 'm') m_slice = array_ops.gather(m, indices) m_t_slice = m_slice * beta_1_t + grad * (1 - beta_1_t) with ops.control_dependencies([m_t_slice]): m_t = self._resource_scatter_update(m, indices, m_t_slice) # u_t = max(beta2 * u, abs(g_t)) v = self.get_slot(var, 'v') v_slice = array_ops.gather(v, indices) v_t_slice = math_ops.maximum(v_slice * beta_2_t, math_ops.abs(grad)) with ops.control_dependencies([v_t_slice]): v_t = self._resource_scatter_update(v, indices, v_t_slice) # theta_t = theta - lr / (1 - beta1^t) * m_t / u_t var_slice = -lr_t / (1 - beta_1_power) * ( m_t_slice / (v_t_slice + epsilon_t)) with ops.control_dependencies([var_slice]): var_update = self._resource_scatter_add(var, indices, var_slice) return control_flow_ops.group(*[var_update, m_t, v_t]) def get_config(self): config = super(Adamax, self).get_config() config.update({ 'learning_rate': self._serialize_hyperparameter('learning_rate'), 'decay': self._serialize_hyperparameter('decay'), 'beta_1': self._serialize_hyperparameter('beta_1'), 'beta_2': self._serialize_hyperparameter('beta_2'), 'epsilon': self.epsilon, }) return config
tensorflow-master
tensorflow/python/keras/optimizer_v2/adamax.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ftrl-proximal for TensorFlow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras.optimizer_v2 import optimizer_v2 from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.training import training_ops from tensorflow.python.util.tf_export import keras_export @keras_export('keras.optimizers.Ftrl') class Ftrl(optimizer_v2.OptimizerV2): r"""Optimizer that implements the FTRL algorithm. See Algorithm 1 of this [paper]( https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf). This version has support for both online L2 (the L2 penalty given in the paper above) and shrinkage-type L2 (which is the addition of an L2 penalty to the loss function). Initialization: $$t = 0$$ $$n_{0} = 0$$ $$\sigma_{0} = 0$$ $$z_{0} = 0$$ Update ($$i$$ is variable index): $$t = t + 1$$ $$n_{t,i} = n_{t-1,i} + g_{t,i}^{2}$$ $$\sigma_{t,i} = (\sqrt{n_{t,i}} - \sqrt{n_{t-1,i}}) / \alpha$$ $$z_{t,i} = z_{t-1,i} + g_{t,i} - \sigma_{t,i} * w_{t,i}$$ $$w_{t,i} = - ((\beta+\sqrt{n+{t}}) / \alpha + \lambda_{2})^{-1} * (z_{i} - sgn(z_{i}) * \lambda_{1}) if \abs{z_{i}} > \lambda_{i} else 0$$ Check the documentation for the l2_shrinkage_regularization_strength parameter for more details when shrinkage is enabled, where gradient is replaced with gradient_with_shrinkage. """ def __init__(self, learning_rate=0.001, learning_rate_power=-0.5, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0, name='Ftrl', l2_shrinkage_regularization_strength=0.0, **kwargs): r"""Construct a new FTRL optimizer. Args: learning_rate: A float value or a constant float `Tensor`. learning_rate_power: A float value, must be less or equal to zero. Controls how the learning rate decreases during training. Use zero for a fixed learning rate. initial_accumulator_value: The starting value for accumulators. Only zero or positive values are allowed. l1_regularization_strength: A float value, must be greater than or equal to zero. l2_regularization_strength: A float value, must be greater than or equal to zero. name: Optional name prefix for the operations created when applying gradients. Defaults to "Ftrl". l2_shrinkage_regularization_strength: A float value, must be greater than or equal to zero. This differs from L2 above in that the L2 above is a stabilization penalty, whereas this L2 shrinkage is a magnitude penalty. The FTRL formulation can be written as: w_{t+1} = argmin_w(\hat{g}_{1:t}w + L1*||w||_1 + L2*||w||_2^2), where \hat{g} = g + (2*L2_shrinkage*w), and g is the gradient of the loss function w.r.t. the weights w. Specifically, in the absence of L1 regularization, it is equivalent to the following update rule: w_{t+1} = w_t - lr_t / (1 + 2*L2*lr_t) * g_t - 2*L2_shrinkage*lr_t / (1 + 2*L2*lr_t) * w_t where lr_t is the learning rate at t. When input is sparse shrinkage will only happen on the active weights.\ **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip gradients by value, `decay` is included for backward compatibility to allow time inverse decay of learning rate. `lr` is included for backward compatibility, recommended to use `learning_rate` instead. Raises: ValueError: If one of the arguments is invalid. References See [paper] (https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf) """ super(Ftrl, self).__init__(name, **kwargs) if initial_accumulator_value < 0.0: raise ValueError( 'initial_accumulator_value %f needs to be positive or zero' % initial_accumulator_value) if learning_rate_power > 0.0: raise ValueError('learning_rate_power %f needs to be negative or zero' % learning_rate_power) if l1_regularization_strength < 0.0: raise ValueError( 'l1_regularization_strength %f needs to be positive or zero' % l1_regularization_strength) if l2_regularization_strength < 0.0: raise ValueError( 'l2_regularization_strength %f needs to be positive or zero' % l2_regularization_strength) if l2_shrinkage_regularization_strength < 0.0: raise ValueError( 'l2_shrinkage_regularization_strength %f needs to be positive' ' or zero' % l2_shrinkage_regularization_strength) self._set_hyper('learning_rate', learning_rate) self._set_hyper('decay', self._initial_decay) self._set_hyper('learning_rate_power', learning_rate_power) self._set_hyper('l1_regularization_strength', l1_regularization_strength) self._set_hyper('l2_regularization_strength', l2_regularization_strength) self._initial_accumulator_value = initial_accumulator_value self._l2_shrinkage_regularization_strength = ( l2_shrinkage_regularization_strength) def _create_slots(self, var_list): # Create the "accum" and "linear" slots. for var in var_list: dtype = var.dtype.base_dtype init = init_ops.constant_initializer( self._initial_accumulator_value, dtype=dtype) self.add_slot(var, 'accumulator', init) self.add_slot(var, 'linear') def _resource_apply_dense(self, grad, var): var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr_t[var_dtype] learning_rate_power = self._get_hyper('learning_rate_power', var_dtype) l1_regularization_strength = self._get_hyper('l1_regularization_strength', var_dtype) l2_regularization_strength = self._get_hyper('l2_regularization_strength', var_dtype) accum = self.get_slot(var, 'accumulator') linear = self.get_slot(var, 'linear') if self._l2_shrinkage_regularization_strength <= 0.0: return training_ops.resource_apply_ftrl( var.handle, accum.handle, linear.handle, grad, lr_t, l1_regularization_strength, l2_regularization_strength, learning_rate_power, use_locking=self._use_locking) else: return training_ops.resource_apply_ftrl_v2( var.handle, accum.handle, linear.handle, grad, lr_t, l1_regularization_strength, l2_regularization_strength, math_ops.cast(self._l2_shrinkage_regularization_strength, var_dtype), learning_rate_power, use_locking=self._use_locking) def _resource_apply_sparse(self, grad, var, indices): var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr_t[var_dtype] learning_rate_power = self._get_hyper('learning_rate_power', var_dtype) l1_regularization_strength = self._get_hyper('l1_regularization_strength', var_dtype) l2_regularization_strength = self._get_hyper('l2_regularization_strength', var_dtype) accum = self.get_slot(var, 'accumulator') linear = self.get_slot(var, 'linear') if self._l2_shrinkage_regularization_strength <= 0.0: return training_ops.resource_sparse_apply_ftrl( var.handle, accum.handle, linear.handle, grad, indices, lr_t, l1_regularization_strength, l2_regularization_strength, learning_rate_power, use_locking=self._use_locking) else: return training_ops.resource_sparse_apply_ftrl_v2( var.handle, accum.handle, linear.handle, grad, indices, lr_t, l1_regularization_strength, l2_regularization_strength, math_ops.cast(self._l2_shrinkage_regularization_strength, var_dtype), learning_rate_power, use_locking=self._use_locking) def get_config(self): config = super(Ftrl, self).get_config() config.update({ 'learning_rate': self._serialize_hyperparameter('learning_rate'), 'decay': self._serialize_hyperparameter('decay'), 'initial_accumulator_value': self._initial_accumulator_value, 'learning_rate_power': self._serialize_hyperparameter('learning_rate_power'), 'l1_regularization_strength': self._serialize_hyperparameter('l1_regularization_strength'), 'l2_regularization_strength': self._serialize_hyperparameter('l2_regularization_strength'), 'l2_shrinkage_regularization_strength': self._l2_shrinkage_regularization_strength, }) return config
tensorflow-master
tensorflow/python/keras/optimizer_v2/ftrl.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Nadam.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.keras.optimizer_v2 import nadam from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test def get_beta_accumulators(opt, dtype): local_step = math_ops.cast(opt.iterations + 1, dtype) beta_1_t = math_ops.cast(opt._get_hyper("beta_1"), dtype) beta_1_power = math_ops.pow(beta_1_t, local_step) beta_2_t = math_ops.cast(opt._get_hyper("beta_2"), dtype) beta_2_power = math_ops.pow(beta_2_t, local_step) return (beta_1_power, beta_2_power) def update_m_cache(m_cache, t, beta1=0.9): mu_t = beta1 * (1 - 0.5 * 0.96**(0.004 * (t + 1))) m_cache_t = m_cache * mu_t return m_cache_t def nadam_update_numpy(param, g_t, t, m, v, m_cache, alpha=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8): mu_t = beta1 * (1 - 0.5 * 0.96**(0.004 * (t + 1))) mu_t_1 = beta1 * (1 - 0.5 * 0.96**(0.004 * (t + 2))) m_cache_t_1 = m_cache * mu_t_1 g_prime_t = g_t / (1 - m_cache) m_t = beta1 * m + (1 - beta1) * g_t v_t = beta2 * v + (1 - beta2) * g_t * g_t m_prime_t = m_t / (1 - m_cache_t_1) v_prime_t = v_t / (1 - beta2**(t + 1)) m_bar_t = (1 - mu_t) * g_prime_t + mu_t_1 * m_prime_t param_t = param - alpha * m_bar_t / (np.sqrt(v_prime_t) + epsilon) return param_t, m_t, v_t class NadamOptimizerTest(test.TestCase): @test_util.run_deprecated_v1 def testSparse(self): sparse_epsilon = 1e-7 for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1, mcache = 0.0, 0.0, 0.0, 0.0, 1.0 var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0, 0.01], dtype=dtype.as_numpy_dtype) var0 = resource_variable_ops.ResourceVariable(var0_np) var1 = resource_variable_ops.ResourceVariable(var1_np) grads0_np_indices = np.array([0, 2], dtype=np.int32) grads0 = ops.IndexedSlices( constant_op.constant(grads0_np[grads0_np_indices]), constant_op.constant(grads0_np_indices), constant_op.constant([3])) grads1_np_indices = np.array([0, 2], dtype=np.int32) grads1 = ops.IndexedSlices( constant_op.constant(grads1_np[grads1_np_indices]), constant_op.constant(grads1_np_indices), constant_op.constant([3])) opt = nadam.Nadam(epsilon=sparse_epsilon) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() # Fetch params to validate initial values self.assertAllClose([1.0, 1.0, 2.0], var0.eval()) self.assertAllClose([3.0, 3.0, 4.0], var1.eval()) beta1_power, beta2_power = get_beta_accumulators(opt, dtype) # Run 3 steps of Nadam for t in range(3): self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power.eval()) self.assertAllCloseAccordingToType(0.999**(t + 1), beta2_power.eval()) update.run() mcache = update_m_cache(mcache, t) var0_np, m0, v0 = nadam_update_numpy( var0_np, grads0_np, t, m0, v0, mcache, epsilon=sparse_epsilon) var1_np, m1, v1 = nadam_update_numpy( var1_np, grads1_np, t, m1, v1, mcache, epsilon=sparse_epsilon) # Validate updated params self.assertAllCloseAccordingToType(var0_np, var0.eval()) self.assertAllCloseAccordingToType(var1_np, var1.eval()) @test_util.run_deprecated_v1 def testBasic(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1, mcache = 0.0, 0.0, 0.0, 0.0, 1.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = resource_variable_ops.ResourceVariable(var0_np) var1 = resource_variable_ops.ResourceVariable(var1_np) grads0 = constant_op.constant(grads0_np) grads1 = constant_op.constant(grads1_np) opt = nadam.Nadam() update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], var0.eval()) self.assertAllClose([3.0, 4.0], var1.eval()) # Run 3 steps of Nadam for t in range(3): update.run() mcache = update_m_cache(mcache, t) var0_np, m0, v0 = nadam_update_numpy(var0_np, grads0_np, t, m0, v0, mcache) var1_np, m1, v1 = nadam_update_numpy(var1_np, grads1_np, t, m1, v1, mcache) # Validate updated params self.assertAllCloseAccordingToType(var0_np, var0.eval()) self.assertAllCloseAccordingToType(var1_np, var1.eval()) def testConstructNAdamWithLR(self): opt = nadam.Nadam(lr=1.0) opt_2 = nadam.Nadam(learning_rate=0.1, lr=1.0) opt_3 = nadam.Nadam(learning_rate=0.1) self.assertIsInstance(opt.lr, variables.Variable) self.assertIsInstance(opt_2.lr, variables.Variable) self.assertIsInstance(opt_3.lr, variables.Variable) self.evaluate(variables.global_variables_initializer()) self.assertAllClose(self.evaluate(opt.lr), (1.0)) self.assertAllClose(self.evaluate(opt_2.lr), (1.0)) self.assertAllClose(self.evaluate(opt_3.lr), (0.1)) def testConstructNAdamWithScheduleDecay(self): opt = nadam.Nadam(schedule_decay=0.2) self.assertIsInstance(opt.decay, variables.Variable) self.evaluate(variables.global_variables_initializer()) self.assertAllClose(self.evaluate(opt.decay), (0.2)) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/keras/optimizer_v2/nadam_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Adagrad for TensorFlow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import ops from tensorflow.python.keras import backend_config from tensorflow.python.keras.optimizer_v2 import optimizer_v2 from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import state_ops from tensorflow.python.util.tf_export import keras_export @keras_export('keras.optimizers.Adagrad') class Adagrad(optimizer_v2.OptimizerV2): r"""Optimizer that implements the Adagrad algorithm. Adagrad is an optimizer with parameter-specific learning rates, which are adapted relative to how frequently a parameter gets updated during training. The more updates a parameter receives, the smaller the updates. Initialization: $$accum_{g_0} := \text{initial_accumulator_value}$$ Update step: $$t := t + 1$$ $$accum_{g_t} := accum_{g_{t-1}} + g^2$$ $$\theta_t := \theta_{t-1} - lr * g / (\sqrt{accum_{g_t}} + \epsilon)$$ References: * [Paper](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf). * [Introduction] (https://ppasupat.github.io/a9online/uploads/proximal_notes.pdf). """ def __init__(self, learning_rate=0.001, initial_accumulator_value=0.1, epsilon=1e-7, name='Adagrad', **kwargs): """Construct a new Adagrad optimizer. Args: learning_rate: A `Tensor` or a floating point value. The learning rate. initial_accumulator_value: A floating point value. Starting value for the accumulators, must be positive. epsilon: A floating point value. Starting value for the accumulators, must be positive. name: Optional name prefix for the operations created when applying gradients. Defaults to "Adagrad". **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip gradients by value, `decay` is included for backward compatibility to allow time inverse decay of learning rate. `lr` is included for backward compatibility, recommended to use `learning_rate` instead. Raises: ValueError: If the `initial_accumulator_value` or `epsilon` is invalid. @compatibility(eager) When eager execution is enabled, `learning_rate` can be a callable that takes no arguments and returns the actual value to use. This can be useful for changing these values across different invocations of optimizer functions. @end_compatibility """ if initial_accumulator_value < 0.0: raise ValueError('initial_accumulator_value must be non-negative: %s' % initial_accumulator_value) if epsilon is None: epsilon = backend_config.epsilon() super(Adagrad, self).__init__(name, **kwargs) self._set_hyper('learning_rate', kwargs.get('lr', learning_rate)) self._set_hyper('decay', self._initial_decay) self._initial_accumulator_value = initial_accumulator_value self.epsilon = epsilon or backend_config.epsilon() def _create_slots(self, var_list): for var in var_list: dtype = var.dtype.base_dtype init = init_ops.constant_initializer( self._initial_accumulator_value, dtype=dtype) self.add_slot(var, 'accumulator', init) def set_weights(self, weights): params = self.weights # Override set_weights for backward compatibility of Keras V1 optimizer # since it does not include iteration at head of the weight list. Set # iteration to 0. if len(params) == len(weights) + 1: weights = [np.array(0)] + weights super(Adagrad, self).set_weights(weights) @classmethod def from_config(cls, config, custom_objects=None): """Creates an optimizer from its config. This method is the reverse of `get_config`, capable of instantiating the same optimizer from the config dictionary. Arguments: config: A Python dictionary, typically the output of get_config. custom_objects: A Python dictionary mapping names to additional Python objects used to create this optimizer, such as a function used for a hyperparameter. Returns: An optimizer instance. """ if 'initial_accumulator_value' not in config: config['initial_accumulator_value'] = 0. if 'lr' in config: config['learning_rate'] = config.pop('lr') return cls(**config) def _resource_apply_dense(self, grad, var): var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr_t[var_dtype] epsilon_t = ops.convert_to_tensor(self.epsilon, var_dtype) acc = self.get_slot(var, 'accumulator') acc_t = state_ops.assign_add( acc, math_ops.square(grad), use_locking=self._use_locking) var_update = state_ops.assign_sub( var, lr_t * grad / (math_ops.sqrt(acc_t) + epsilon_t)) return var_update def _resource_apply_sparse(self, grad, var, indices): def _resource_scatter_add(x, i, v): with ops.control_dependencies( [resource_variable_ops.resource_scatter_add(x.handle, i, v)]): return x.value() var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr_t[var_dtype] epsilon_t = ops.convert_to_tensor(self.epsilon, var_dtype) acc = self.get_slot(var, 'accumulator') acc_t = _resource_scatter_add(acc, indices, math_ops.square(grad)) acc_t_slice = array_ops.gather(acc_t, indices) var_update = _resource_scatter_add( var, indices, -lr_t * grad / (math_ops.sqrt(acc_t_slice) + epsilon_t)) return var_update def get_config(self): config = super(Adagrad, self).get_config() config.update({ 'learning_rate': self._serialize_hyperparameter('learning_rate'), 'decay': self._serialize_hyperparameter('decay'), 'initial_accumulator_value': self._initial_accumulator_value, 'epsilon': self.epsilon, }) return config
tensorflow-master
tensorflow/python/keras/optimizer_v2/adagrad.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for Ftrl operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.keras.optimizer_v2 import ftrl from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import adagrad from tensorflow.python.training import gradient_descent class FtrlOptimizerTest(test.TestCase): def doTestFtrlwithoutRegularization(self, use_resource=False): for dtype in [dtypes.half, dtypes.float32]: with self.cached_session() as sess: if use_resource: var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype) else: var0 = variables.Variable([0.0, 0.0], dtype=dtype) var1 = variables.Variable([0.0, 0.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) grads1 = constant_op.constant([0.01, 0.02], dtype=dtype) opt = ftrl.Ftrl( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllClose([0.0, 0.0], v0_val) self.assertAllClose([0.0, 0.0], v1_val) # Run 3 steps FTRL for _ in range(3): update.run() v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType( np.array([-2.60260963, -4.29698515]), v0_val) self.assertAllCloseAccordingToType( np.array([-0.28432083, -0.56694895]), v1_val) @test_util.run_deprecated_v1 def testFtrlWithoutRegularization(self): self.doTestFtrlwithoutRegularization(use_resource=False) @test_util.run_deprecated_v1 def testResourceFtrlWithoutRegularization(self): self.doTestFtrlwithoutRegularization(use_resource=True) @test_util.run_deprecated_v1 def testFtrlwithoutRegularization2(self): for dtype in [dtypes.half, dtypes.float32]: with self.cached_session() as sess: var0 = variables.Variable([1.0, 2.0], dtype=dtype) var1 = variables.Variable([4.0, 3.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) grads1 = constant_op.constant([0.01, 0.02], dtype=dtype) opt = ftrl.Ftrl( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) self.assertAllCloseAccordingToType([4.0, 3.0], v1_val) # Run 3 steps FTRL for _ in range(3): update.run() v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType( np.array([-2.55607247, -3.98729396]), v0_val) self.assertAllCloseAccordingToType( np.array([-0.28232238, -0.56096673]), v1_val) @test_util.run_deprecated_v1 def testMinimizeSparseResourceVariable(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with self.cached_session(): var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype) x = constant_op.constant([[4.0], [5.0]], dtype=dtype) def loss(): pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop return pred * pred sgd_op = ftrl.Ftrl(1.0).minimize(loss, var_list=[var0]) variables.global_variables_initializer().run() # Fetch params to validate initial values self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0)) # Run 1 step of sgd sgd_op.run() # Validate updated params self.assertAllCloseAccordingToType([[0, 1]], self.evaluate(var0), atol=0.01) @test_util.run_deprecated_v1 def testFtrlWithL1(self): for dtype in [dtypes.half, dtypes.float32]: with self.cached_session() as sess: var0 = variables.Variable([1.0, 2.0], dtype=dtype) var1 = variables.Variable([4.0, 3.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) grads1 = constant_op.constant([0.01, 0.02], dtype=dtype) opt = ftrl.Ftrl( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.001, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) self.assertAllCloseAccordingToType([4.0, 3.0], v1_val) # Run 10 steps FTRL for _ in range(10): update.run() v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType( np.array([-7.66718769, -10.91273689]), v0_val) self.assertAllCloseAccordingToType( np.array([-0.93460727, -1.86147261]), v1_val) @test_util.run_deprecated_v1 def testFtrlWithL1_L2(self): for dtype in [dtypes.half, dtypes.float32]: with self.cached_session() as sess: var0 = variables.Variable([1.0, 2.0], dtype=dtype) var1 = variables.Variable([4.0, 3.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) grads1 = constant_op.constant([0.01, 0.02], dtype=dtype) opt = ftrl.Ftrl( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.001, l2_regularization_strength=2.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) self.assertAllCloseAccordingToType([4.0, 3.0], v1_val) # Run 10 steps FTRL for _ in range(10): update.run() v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType( np.array([-0.24059935, -0.46829352]), v0_val) self.assertAllCloseAccordingToType( np.array([-0.02406147, -0.04830509]), v1_val) @test_util.run_deprecated_v1 def testFtrlWithL1_L2_L2Shrinkage(self): """Test the new FTRL op with support for l2 shrinkage. The addition of this parameter which places a constant pressure on weights towards the origin causes the gradient descent trajectory to differ. The weights will tend to have smaller magnitudes with this parameter set. """ for dtype in [dtypes.half, dtypes.float32]: with self.cached_session() as sess: var0 = variables.Variable([1.0, 2.0], dtype=dtype) var1 = variables.Variable([4.0, 3.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) grads1 = constant_op.constant([0.01, 0.02], dtype=dtype) opt = ftrl.Ftrl( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.001, l2_regularization_strength=2.0, l2_shrinkage_regularization_strength=0.1) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) self.assertAllCloseAccordingToType([4.0, 3.0], v1_val) # Run 10 steps FTRL for _ in range(10): update.run() v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType( np.array([-0.22578995, -0.44345796]), v0_val) self.assertAllCloseAccordingToType( np.array([-0.14378493, -0.13229476]), v1_val) @test_util.run_deprecated_v1 def testFtrlWithL1_L2_L2ShrinkageSparse(self): """Tests the new FTRL op with support for l2 shrinkage on sparse grads.""" for dtype in [dtypes.half, dtypes.float32]: with self.cached_session() as sess: var0 = variables.Variable([[1.0], [2.0]], dtype=dtype) var1 = variables.Variable([[4.0], [3.0]], dtype=dtype) grads0 = ops.IndexedSlices( constant_op.constant([0.1], shape=[1, 1], dtype=dtype), constant_op.constant([0]), constant_op.constant([2, 1])) grads1 = ops.IndexedSlices( constant_op.constant([0.02], shape=[1, 1], dtype=dtype), constant_op.constant([1]), constant_op.constant([2, 1])) opt = ftrl.Ftrl( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.001, l2_regularization_strength=2.0, l2_shrinkage_regularization_strength=0.1) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([[1.0], [2.0]], v0_val) self.assertAllCloseAccordingToType([[4.0], [3.0]], v1_val) # Run 10 steps FTRL for _ in range(10): update.run() v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([[-0.22578995], [2.]], v0_val) self.assertAllCloseAccordingToType([[4.], [-0.13229476]], v1_val) @test_util.run_deprecated_v1 def testFtrlWithL2ShrinkageDoesNotChangeLrSchedule(self): """Verifies that l2 shrinkage in FTRL does not change lr schedule.""" for dtype in [dtypes.half, dtypes.float32]: with self.cached_session() as sess: var0 = variables.Variable([1.0, 2.0], dtype=dtype) var1 = variables.Variable([1.0, 2.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) grads1 = constant_op.constant([0.1, 0.2], dtype=dtype) opt0 = ftrl.Ftrl( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.001, l2_regularization_strength=2.0, l2_shrinkage_regularization_strength=0.1) opt1 = ftrl.Ftrl( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.001, l2_regularization_strength=2.0) update0 = opt0.apply_gradients([(grads0, var0)]) update1 = opt1.apply_gradients([(grads1, var1)]) variables.global_variables_initializer().run() v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) self.assertAllCloseAccordingToType([1.0, 2.0], v1_val) # Run 10 steps FTRL for _ in range(10): update0.run() update1.run() v0_val, v1_val = self.evaluate([var0, var1]) # var0 is experiencing L2 shrinkage so it should be smaller than var1 # in magnitude. self.assertTrue((v0_val**2 < v1_val**2).all()) accum0 = sess.run(opt0.get_slot(var0, "accumulator")) accum1 = sess.run(opt1.get_slot(var1, "accumulator")) # L2 shrinkage should not change how we update grad accumulator. self.assertAllCloseAccordingToType(accum0, accum1) def applyOptimizer(self, opt, dtype, steps=5, is_sparse=False): if is_sparse: var0 = variables.Variable([[0.0], [0.0]], dtype=dtype) var1 = variables.Variable([[0.0], [0.0]], dtype=dtype) grads0 = ops.IndexedSlices( constant_op.constant([0.1], shape=[1, 1], dtype=dtype), constant_op.constant([0]), constant_op.constant([2, 1])) grads1 = ops.IndexedSlices( constant_op.constant([0.02], shape=[1, 1], dtype=dtype), constant_op.constant([1]), constant_op.constant([2, 1])) else: var0 = variables.Variable([0.0, 0.0], dtype=dtype) var1 = variables.Variable([0.0, 0.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) grads1 = constant_op.constant([0.01, 0.02], dtype=dtype) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() sess = ops.get_default_session() v0_val, v1_val = self.evaluate([var0, var1]) if is_sparse: self.assertAllCloseAccordingToType([[0.0], [0.0]], v0_val) self.assertAllCloseAccordingToType([[0.0], [0.0]], v1_val) else: self.assertAllCloseAccordingToType([0.0, 0.0], v0_val) self.assertAllCloseAccordingToType([0.0, 0.0], v1_val) # Run Ftrl for a few steps for _ in range(steps): update.run() v0_val, v1_val = self.evaluate([var0, var1]) return v0_val, v1_val # When variables are initialized with Zero, FTRL-Proximal has two properties: # 1. Without L1&L2 but with fixed learning rate, FTRL-Proximal is identical # with GradientDescent. # 2. Without L1&L2 but with adaptive learning rate, FTRL-Proximal is identical # with Adagrad. # So, basing on these two properties, we test if our implementation of # FTRL-Proximal performs same updates as Adagrad or GradientDescent. @test_util.run_deprecated_v1 def testEquivAdagradwithoutRegularization(self): for dtype in [dtypes.half, dtypes.float32]: with self.cached_session(): val0, val1 = self.applyOptimizer( ftrl.Ftrl( 3.0, # Adagrad learning rate learning_rate_power=-0.5, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0), dtype) with self.cached_session(): val2, val3 = self.applyOptimizer( adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1), dtype) self.assertAllCloseAccordingToType(val0, val2) self.assertAllCloseAccordingToType(val1, val3) @test_util.run_deprecated_v1 def testEquivSparseAdagradwithoutRegularization(self): for dtype in [dtypes.half, dtypes.float32]: with self.cached_session(): val0, val1 = self.applyOptimizer( ftrl.Ftrl( 3.0, # Adagrad learning rate learning_rate_power=-0.5, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0), dtype, is_sparse=True) with self.cached_session(): val2, val3 = self.applyOptimizer( adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1), dtype, is_sparse=True) self.assertAllCloseAccordingToType(val0, val2) self.assertAllCloseAccordingToType(val1, val3) @test_util.run_deprecated_v1 def testEquivSparseGradientDescentwithoutRegularization(self): for dtype in [dtypes.half, dtypes.float32]: with self.cached_session(): val0, val1 = self.applyOptimizer( ftrl.Ftrl( 3.0, # Fixed learning rate learning_rate_power=-0.0, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0), dtype, is_sparse=True) with self.cached_session(): val2, val3 = self.applyOptimizer( gradient_descent.GradientDescentOptimizer(3.0), dtype, is_sparse=True) self.assertAllCloseAccordingToType(val0, val2) self.assertAllCloseAccordingToType(val1, val3) @test_util.run_deprecated_v1 def testEquivGradientDescentwithoutRegularization(self): for dtype in [dtypes.half, dtypes.float32]: with self.cached_session(): val0, val1 = self.applyOptimizer( ftrl.Ftrl( 3.0, # Fixed learning rate learning_rate_power=-0.0, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0), dtype) with self.cached_session(): val2, val3 = self.applyOptimizer( gradient_descent.GradientDescentOptimizer(3.0), dtype) self.assertAllCloseAccordingToType(val0, val2) self.assertAllCloseAccordingToType(val1, val3) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/keras/optimizer_v2/ftrl_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Adam for TensorFlow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.keras import backend_config from tensorflow.python.keras.optimizer_v2 import optimizer_v2 from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import state_ops from tensorflow.python.training import training_ops from tensorflow.python.util.tf_export import keras_export @keras_export('keras.optimizers.Adam') class Adam(optimizer_v2.OptimizerV2): """Optimizer that implements the Adam algorithm. Adam optimization is a stochastic gradient descent method that is based on adaptive estimation of first-order and second-order moments. According to the paper [Adam: A Method for Stochastic Optimization. Kingma et al., 2014](http://arxiv.org/abs/1412.6980), the method is "*computationally efficient, has little memory requirement, invariant to diagonal rescaling of gradients, and is well suited for problems that are large in terms of data/parameters*". For AMSGrad see [On The Convergence Of Adam And Beyond. Reddi et al., 5-8](https://openreview.net/pdf?id=ryQu7f-RZ). """ def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=False, name='Adam', **kwargs): r"""Construct a new Adam optimizer. If amsgrad = False: Initialization: $$m_0 := 0 \text{(Initialize initial 1st moment vector)}$$ $$v_0 := 0 \text{(Initialize initial 2nd moment vector)}$$ $$t := 0 \text{(Initialize timestep)}$$ The update rule for `variable` with gradient `g` uses an optimization described at the end of section 2 of the paper: $$t := t + 1$$ $$lr_t := \text{learning\_rate} * \sqrt{1 - beta_2^t} / (1 - beta_1^t)$$ $$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$ $$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$ $$variable := variable - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$ If amsgrad = True: Initialization: $$m_0 := 0 \text{(Initialize initial 1st moment vector)}$$ $$v_0 := 0 \text{(Initialize initial 2nd moment vector)}$$ $$v_hat_0 := 0 \text{(Initialize initial 2nd moment vector)}$$ $$t := 0 \text{(Initialize timestep)}$$ The update rule for `variable` with gradient `g` uses an optimization described at the end of section 2 of the paper: $$t := t + 1$$ $$lr_t := \text{learning\_rate} * \sqrt{1 - beta_2^t} / (1 - beta_1^t)$$ $$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$ $$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$ $$v_hat_t := max(v_hat_{t-1}, v_t) $$variable := variable - lr_t * m_t / (\sqrt{v_hat_t} + \epsilon)$$ The default value of 1e-7 for epsilon might not be a good default in general. For example, when training an Inception network on ImageNet a current good choice is 1.0 or 0.1. Note that since AdamOptimizer uses the formulation just before Section 2.1 of the Kingma and Ba paper rather than the formulation in Algorithm 1, the "epsilon" referred to here is "epsilon hat" in the paper. The sparse implementation of this algorithm (used when the gradient is an IndexedSlices object, typically because of `tf.gather` or an embedding lookup in the forward pass) does apply momentum to variable slices even if they were not used in the forward pass (meaning they have a gradient equal to zero). Momentum decay (beta1) is also applied to the entire momentum accumulator. This means that the sparse behavior is equivalent to the dense behavior (in contrast to some momentum implementations which ignore momentum unless a variable slice was actually used). Args: learning_rate: A Tensor or a floating point value. The learning rate. beta_1: A float value or a constant float tensor. The exponential decay rate for the 1st moment estimates. beta_2: A float value or a constant float tensor. The exponential decay rate for the 2nd moment estimates. epsilon: A small constant for numerical stability. This epsilon is "epsilon hat" in the Kingma and Ba paper (in the formula just before Section 2.1), not the epsilon in Algorithm 1 of the paper. amsgrad: boolean. Whether to apply AMSGrad variant of this algorithm from the paper "On the Convergence of Adam and beyond". name: Optional name for the operations created when applying gradients. Defaults to "Adam". @compatibility(eager) When eager execution is enabled, `learning_rate`, `beta_1`, `beta_2`, and `epsilon` can each be a callable that takes no arguments and returns the actual value to use. This can be useful for changing these values across different invocations of optimizer functions. @end_compatibility **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip gradients by value, `decay` is included for backward compatibility to allow time inverse decay of learning rate. `lr` is included for backward compatibility, recommended to use `learning_rate` instead. """ super(Adam, self).__init__(name, **kwargs) self._set_hyper('learning_rate', kwargs.get('lr', learning_rate)) self._set_hyper('decay', self._initial_decay) self._set_hyper('beta_1', beta_1) self._set_hyper('beta_2', beta_2) self.epsilon = epsilon or backend_config.epsilon() self.amsgrad = amsgrad def _create_slots(self, var_list): # Create slots for the first and second moments. # Separate for-loops to respect the ordering of slot variables from v1. for var in var_list: self.add_slot(var, 'm') for var in var_list: self.add_slot(var, 'v') if self.amsgrad: for var in var_list: self.add_slot(var, 'vhat') def set_weights(self, weights): params = self.weights # If the weights are generated by Keras V1 optimizer, it includes vhats # even without amsgrad, i.e, V1 optimizer has 3x + 1 variables, while V2 # optimizer has 2x + 1 variables. Filter vhats out for compatibility. num_vars = int((len(params) - 1) / 2) if len(weights) == 3 * num_vars + 1: weights = weights[:len(params)] super(Adam, self).set_weights(weights) def _resource_apply_dense(self, grad, var): var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr_t[var_dtype] m = self.get_slot(var, 'm') v = self.get_slot(var, 'v') beta_1_t = self._get_hyper('beta_1', var_dtype) beta_2_t = self._get_hyper('beta_2', var_dtype) epsilon_t = ops.convert_to_tensor(self.epsilon, var_dtype) local_step = math_ops.cast(self.iterations + 1, var_dtype) beta_1_power = math_ops.pow(beta_1_t, local_step) beta_2_power = math_ops.pow(beta_2_t, local_step) if not self.amsgrad: return training_ops.resource_apply_adam( var.handle, m.handle, v.handle, beta_1_power, beta_2_power, lr_t, beta_1_t, beta_2_t, epsilon_t, grad, use_locking=self._use_locking) else: vhat = self.get_slot(var, 'vhat') return training_ops.resource_apply_adam_with_amsgrad( var.handle, m.handle, v.handle, vhat.handle, beta_1_power, beta_2_power, lr_t, beta_1_t, beta_2_t, epsilon_t, grad, use_locking=self._use_locking) def _resource_apply_sparse(self, grad, var, indices): var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr_t[var_dtype] beta_1_t = self._get_hyper('beta_1', var_dtype) beta_2_t = self._get_hyper('beta_2', var_dtype) local_step = math_ops.cast(self.iterations + 1, var_dtype) beta_1_power = math_ops.pow(beta_1_t, local_step) beta_2_power = math_ops.pow(beta_2_t, local_step) epsilon_t = ops.convert_to_tensor(self.epsilon, var_dtype) lr = (lr_t * math_ops.sqrt(1 - beta_2_power) / (1 - beta_1_power)) # m_t = beta1 * m + (1 - beta1) * g_t m = self.get_slot(var, 'm') m_scaled_g_values = grad * (1 - beta_1_t) m_t = state_ops.assign(m, m * beta_1_t, use_locking=self._use_locking) with ops.control_dependencies([m_t]): m_t = self._resource_scatter_add(m, indices, m_scaled_g_values) # v_t = beta2 * v + (1 - beta2) * (g_t * g_t) v = self.get_slot(var, 'v') v_scaled_g_values = (grad * grad) * (1 - beta_2_t) v_t = state_ops.assign(v, v * beta_2_t, use_locking=self._use_locking) with ops.control_dependencies([v_t]): v_t = self._resource_scatter_add(v, indices, v_scaled_g_values) if not self.amsgrad: v_sqrt = math_ops.sqrt(v_t) var_update = state_ops.assign_sub( var, lr * m_t / (v_sqrt + epsilon_t), use_locking=self._use_locking) return control_flow_ops.group(*[var_update, m_t, v_t]) else: v_hat = self.get_slot(var, 'vhat') v_hat_t = math_ops.maximum(v_hat, v_t) with ops.control_dependencies([v_hat_t]): v_hat_t = state_ops.assign( v_hat, v_hat_t, use_locking=self._use_locking) v_hat_sqrt = math_ops.sqrt(v_hat_t) var_update = state_ops.assign_sub( var, lr * m_t / (v_hat_sqrt + epsilon_t), use_locking=self._use_locking) return control_flow_ops.group(*[var_update, m_t, v_t, v_hat_t]) def get_config(self): config = super(Adam, self).get_config() config.update({ 'learning_rate': self._serialize_hyperparameter('learning_rate'), 'decay': self._serialize_hyperparameter('decay'), 'beta_1': self._serialize_hyperparameter('beta_1'), 'beta_2': self._serialize_hyperparameter('beta_2'), 'epsilon': self.epsilon, 'amsgrad': self.amsgrad, }) return config
tensorflow-master
tensorflow/python/keras/optimizer_v2/adam.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Version 2 of class Optimizer.""" # pylint: disable=g-bad-name from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import functools import six from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx from tensorflow.python.distribute import reduce_util as ds_reduce_util from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.keras import backend from tensorflow.python.keras import initializers from tensorflow.python.keras.engine import base_layer_utils from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule from tensorflow.python.keras.utils import generic_utils from tensorflow.python.keras.utils import tf_utils from tensorflow.python.ops import array_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import gradients from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables as tf_variables from tensorflow.python.platform import tf_logging as logging from tensorflow.python.saved_model import revived_types from tensorflow.python.training.tracking import base as trackable from tensorflow.python.util import nest from tensorflow.python.util.tf_export import keras_export def _deduplicate_indexed_slices(values, indices): """Sums `values` associated with any non-unique `indices`. Args: values: A `Tensor` with rank >= 1. indices: A one-dimensional integer `Tensor`, indexing into the first dimension of `values` (as in an IndexedSlices object). Returns: A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a de-duplicated version of `indices` and `summed_values` contains the sum of `values` slices associated with each unique index. """ unique_indices, new_index_positions = array_ops.unique(indices) summed_values = math_ops.unsorted_segment_sum( values, new_index_positions, array_ops.shape(unique_indices)[0]) return (summed_values, unique_indices) @six.add_metaclass(abc.ABCMeta) @keras_export("keras.optimizers.Optimizer") class OptimizerV2(trackable.Trackable): """Updated base class for optimizers. This class defines the API to add Ops to train a model. You never use this class directly, but instead instantiate one of its subclasses such as `tf.keras.optimizers.SGD`, `tf.keras.optimizers.Adam`. ### Usage ```python # Create an optimizer with the desired parameters. opt = tf.keras.optimizers.SGD(learning_rate=0.1) # `loss` is a callable that takes no argument and returns the value # to minimize. loss = lambda: 3 * var1 * var1 + 2 * var2 * var2 # In graph mode, returns op that minimizes the loss by updating the listed # variables. opt_op = opt.minimize(loss, var_list=[var1, var2]) opt_op.run() # In eager mode, simply call minimize to update the list of variables. opt.minimize(loss, var_list=[var1, var2]) ``` ### Custom training loop with Keras models In Keras models, sometimes variables are created when the model is first called, instead of construction time. Examples include 1) sequential models without input shape pre-defined, or 2) subclassed models. Pass var_list as callable in these cases. Example: ```python opt = tf.keras.optimizers.SGD(learning_rate=0.1) model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(num_hidden, activation='relu')) model.add(tf.keras.layers.Dense(num_classes, activation='sigmoid') loss_fn = lambda: tf.keras.losses.mse(model(input), output) var_list_fn = lambda: model.trainable_weights for input, output in data: opt.minimize(loss_fn, var_list_fn) ``` ### Processing gradients before applying them. Calling `minimize()` takes care of both computing the gradients and applying them to the variables. If you want to process the gradients before applying them you can instead use the optimizer in three steps: 1. Compute the gradients with `tf.GradientTape`. 2. Process the gradients as you wish. 3. Apply the processed gradients with `apply_gradients()`. Example: ```python # Create an optimizer. opt = tf.keras.optimizers.SGD(learning_rate=0.1) # Compute the gradients for a list of variables. with tf.GradientTape() as tape: loss = <call_loss_function> vars = <list_of_variables> grads = tape.gradient(loss, vars) processed_grads = [process_gradient(g) for g in grads] grads_and_vars = zip(processed_grads, var_list) # grads_and_vars is a list of tuples (gradient, variable). Do whatever you # need to the 'gradient' part, for example cap them, etc. capped_grads_and_vars = [(MyCapper(gv[0]), gv[1]) for gv in grads_and_vars] # Ask the optimizer to apply the capped gradients. opt.apply_gradients(capped_grads_and_vars) ``` ### Use with `tf.distribute.Strategy`. This optimizer class is `tf.distribute.Strategy` aware, which means it automatically sums gradients across all replicas. To average gradients, you divide your loss by the global batch size, which is done automatically if you use `tf.keras` built-in training or evaluation loops. See the `reduction` argument of your loss which should be set to `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` for averaging or `tf.keras.losses.Reduction.SUM` for not. If you are not using these and you want to average gradients, you should use `tf.math.reduce_sum` to add up your per-example losses and then divide by the global batch size. Note that when using `tf.distribute.Strategy`, the first component of a tensor's shape is the *replica-local* batch size, which is off by a factor equal to the number of replicas being used to compute a single step. As a result, using `tf.math.reduce_mean` will give the wrong answer, resulting in gradients that can be many times too big. ### Variable Constraint All Keras optimizers respect variable constraints. If constraint function is passed to any variable, the constraint will be applied to the variable after the gradient has been applied to the variable. Important: If gradient is sparse tensor, variable constraint is not supported. ### Thread Compatibility The entire optimizer is currently thread compatible, not thread-safe. The user needs to perform synchronization if necessary. ### Slots Many optimizer subclasses, such as `Adam` and `Adagrad` allocate and manage additional variables associated with the variables to train. These are called <i>Slots</i>. Slots have names and you can ask the optimizer for the names of the slots that it uses. Once you have a slot name you can ask the optimizer for the variable it created to hold the slot value. This can be useful if you want to log debug a training algorithm, report stats about the slots, etc. ### Hyper parameters These are arguments passed to the optimizer subclass constructor (the `__init__` method), and then passed to `self._set_hyper()`. They can be either regular Python values (like 1.0), tensors, or callables. If they are callable, the callable will be called during `apply_gradients()` to get the value for the hyper parameter. Hyper parameters can be overwritten through user code: Example: ```python # Create an optimizer with the desired parameters. opt = tf.keras.optimizers.SGD(learning_rate=0.1) # `loss` is a callable that takes no argument and returns the value # to minimize. loss = lambda: 3 * var1 + 2 * var2 # In eager mode, simply call minimize to update the list of variables. opt.minimize(loss, var_list=[var1, var2]) # update learning rate opt.learning_rate = 0.05 opt.minimize(loss, var_list=[var1, var2]) ``` ### Write a customized optimizer. If you intend to create your own optimization algorithm, simply inherit from this class and override the following methods: - resource_apply_dense (update variable given gradient tensor is dense) - resource_apply_sparse (update variable given gradient tensor is sparse) - create_slots (if your optimizer algorithm requires additional variables) - get_config (serialization of the optimizer, include all hyper parameters) """ def __init__(self, name, **kwargs): """Create a new Optimizer. This must be called by the constructors of subclasses. Note that Optimizer instances should not bind to a single graph, and so shouldn't keep Tensors as member variables. Generally you should be able to use the _set_hyper()/state.get_hyper() facility instead. This class in stateful and thread-compatible. Args: name: A non-empty string. The name to use for accumulators created for the optimizer. **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip gradients by value, `decay` is included for backward compatibility to allow time inverse decay of learning rate. `lr` is included for backward compatibility, recommended to use `learning_rate` instead. Raises: ValueError: If name is malformed. RuntimeError: If _create_slots has been overridden instead of _create_vars. """ allowed_kwargs = {"clipnorm", "clipvalue", "lr", "decay"} for k in kwargs: if k not in allowed_kwargs: raise TypeError("Unexpected keyword argument " "passed to optimizer: " + str(k)) # checks that all keyword arguments are non-negative. if kwargs[k] < 0: raise ValueError("Expected {} >= 0, received: {}".format(k, kwargs[k])) self._use_locking = True self._init_set_name(name) self._hyper = {} # dict: {variable name : {slot name : variable}} self._slots = {} self._slot_names = [] self._weights = [] self._iterations = None # For implementing Trackable. Stores information about how to restore # slot variables which have not yet been created # (trackable._CheckpointPosition objects). # {slot_name : # {_var_key(variable_to_train): [checkpoint_position, ... ], ... }, # ... } self._deferred_slot_restorations = {} decay = kwargs.pop("decay", 0.0) if decay < 0.: raise ValueError("decay cannot be less than 0: {}".format(decay)) self._initial_decay = decay if "clipnorm" in kwargs: self.clipnorm = kwargs.pop("clipnorm") if "clipvalue" in kwargs: self.clipvalue = kwargs.pop("clipvalue") self._hypers_created = False def minimize(self, loss, var_list, grad_loss=None, name=None): """Minimize `loss` by updating `var_list`. This method simply computes gradient using `tf.GradientTape` and calls `apply_gradients()`. If you want to process the gradient before applying then call `tf.GradientTape` and `apply_gradients()` explicitly instead of using this function. Args: loss: A callable taking no arguments which returns the value to minimize. var_list: list or tuple of `Variable` objects to update to minimize `loss`, or a callable returning the list or tuple of `Variable` objects. Use callable when the variable list would otherwise be incomplete before `minimize` since the variables are created at the first time `loss` is called. grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`. name: Optional name for the returned operation. Returns: An Operation that updates the variables in `var_list`. If `global_step` was not `None`, that operation also increments `global_step`. Raises: ValueError: If some of the variables are not `Variable` objects. """ grads_and_vars = self._compute_gradients( loss, var_list=var_list, grad_loss=grad_loss) return self.apply_gradients(grads_and_vars, name=name) def _compute_gradients(self, loss, var_list, grad_loss=None): """Compute gradients of `loss` for the variables in `var_list`. This is the first part of `minimize()`. It returns a list of (gradient, variable) pairs where "gradient" is the gradient for "variable". Note that "gradient" can be a `Tensor`, an `IndexedSlices`, or `None` if there is no gradient for the given variable. Args: loss: A callable taking no arguments which returns the value to minimize. var_list: list or tuple of `Variable` objects to update to minimize `loss`, or a callable returning the list or tuple of `Variable` objects. Use callable when the variable list would otherwise be incomplete before `minimize` and the variables are created at the first time when `loss` is called. grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`. Returns: A list of (gradient, variable) pairs. Variable is always present, but gradient can be `None`. Raises: TypeError: If `var_list` contains anything else than `Variable` objects. ValueError: If some arguments are invalid, or var_list is None. """ # TODO(josh11b): Test that we handle weight decay in a reasonable way. with backprop.GradientTape() as tape: if not callable(var_list): tape.watch(var_list) loss_value = loss() if callable(var_list): var_list = var_list() var_list = nest.flatten(var_list) with backend.name_scope(self._name + "/gradients"): grads = tape.gradient(loss_value, var_list, grad_loss) if hasattr(self, "clipnorm"): grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads] if hasattr(self, "clipvalue"): grads = [ clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue) for g in grads ] grads_and_vars = list(zip(grads, var_list)) self._assert_valid_dtypes([ v for g, v in grads_and_vars if g is not None and v.dtype != dtypes.resource ]) return grads_and_vars def get_gradients(self, loss, params): """Returns gradients of `loss` with respect to `params`. Arguments: loss: Loss tensor. params: List of variables. Returns: List of gradient tensors. Raises: ValueError: In case any gradient cannot be computed (e.g. if gradient function not implemented). """ params = nest.flatten(params) with backend.get_graph().as_default(), backend.name_scope(self._name + "/gradients"): grads = gradients.gradients(loss, params) for grad, param in zip(grads, params): if grad is None: raise ValueError("Variable {} has `None` for gradient. " "Please make sure that all of your ops have a " "gradient defined (i.e. are differentiable). " "Common ops without gradient: " "K.argmax, K.round, K.eval.".format(param)) if hasattr(self, "clipnorm"): grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads] if hasattr(self, "clipvalue"): grads = [ clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue) for g in grads ] return grads def apply_gradients(self, grads_and_vars, name=None): """Apply gradients to variables. This is the second part of `minimize()`. It returns an `Operation` that applies gradients. Args: grads_and_vars: List of (gradient, variable) pairs. name: Optional name for the returned operation. Default to the name passed to the `Optimizer` constructor. Returns: An `Operation` that applies the specified gradients. If `global_step` was not None, that operation also increments `global_step`. Raises: TypeError: If `grads_and_vars` is malformed. ValueError: If none of the variables have gradients. """ grads_and_vars = _filter_grads(grads_and_vars) var_list = [v for (_, v) in grads_and_vars] with backend.name_scope(self._name): # Create iteration if necessary. with ops.init_scope(): _ = self.iterations self._create_hypers() self._create_slots(var_list) self._prepare(var_list) return distribute_ctx.get_replica_context().merge_call( self._distributed_apply, args=(grads_and_vars,), kwargs={"name": name}) def _distributed_apply(self, distribution, grads_and_vars, name): """`apply_gradients` using a `DistributionStrategy`.""" reduced_grads = distribution.extended.batch_reduce_to( ds_reduce_util.ReduceOp.SUM, grads_and_vars) var_list = [v for _, v in grads_and_vars] grads_and_vars = zip(reduced_grads, var_list) def apply_grad_to_update_var(var, grad): """Apply gradient to variable.""" if isinstance(var, ops.Tensor): raise NotImplementedError("Trying to update a Tensor ", var) if isinstance(grad, ops.IndexedSlices): if var.constraint is not None: raise RuntimeError( "Cannot use a constraint function on a sparse variable.") return self._resource_apply_sparse_duplicate_indices( grad.values, var, grad.indices) update_op = self._resource_apply_dense(grad, var) if var.constraint is not None: with ops.control_dependencies([update_op]): return var.assign(var.constraint(var)) else: return update_op update_ops = [] with backend.name_scope(name or self._name): for grad, var in grads_and_vars: scope_name = ("" if ops.executing_eagerly_outside_functions() else "_" + var.op.name) with backend.name_scope("update" + scope_name): update_ops.extend( distribution.extended.update( var, apply_grad_to_update_var, args=(grad,), group=False)) any_symbolic = any(isinstance(i, ops.Operation) or tf_utils.is_symbolic_tensor(i) for i in update_ops) if not context.executing_eagerly() or any_symbolic: # If the current context is graph mode or any of the update ops are # symbolic then the step update should be carried out under a graph # context. (eager updates execute immediately) with ops._get_graph_from_inputs(update_ops).as_default(): # pylint: disable=protected-access with ops.control_dependencies(update_ops): return self._iterations.assign_add(1).op return self._iterations.assign_add(1) def get_updates(self, loss, params): grads = self.get_gradients(loss, params) grads_and_vars = list(zip(grads, params)) self._assert_valid_dtypes([ v for g, v in grads_and_vars if g is not None and v.dtype != dtypes.resource ]) return [self.apply_gradients(grads_and_vars)] def _set_hyper(self, name, value): """set hyper `name` to value. value can be callable, tensor, numeric.""" if isinstance(value, trackable.Trackable): self._track_trackable(value, name, overwrite=True) if name not in self._hyper: self._hyper[name] = value else: prev_value = self._hyper[name] if (callable(prev_value) or isinstance(prev_value, (ops.Tensor, int, float, learning_rate_schedule.LearningRateSchedule)) or isinstance(value, learning_rate_schedule.LearningRateSchedule)): self._hyper[name] = value else: backend.set_value(self._hyper[name], value) def _get_hyper(self, name, dtype=None): if not self._hypers_created: self._create_hypers() value = self._hyper[name] if isinstance(value, learning_rate_schedule.LearningRateSchedule): return value if callable(value): value = value() if dtype: return math_ops.cast(value, dtype) else: return value def __getattribute__(self, name): """Overridden to support hyperparameter access.""" try: return super(OptimizerV2, self).__getattribute__(name) except AttributeError as e: # Needed to avoid infinite recursion with __setattr__. if name == "_hyper": raise e # Backwards compatibility with Keras optimizers. if name == "lr": name = "learning_rate" if name in self._hyper: return self._get_hyper(name) raise e def __setattr__(self, name, value): """Override setattr to support dynamic hyperparameter setting.""" # Backwards compatibility with Keras optimizers. if name == "lr": name = "learning_rate" if hasattr(self, "_hyper") and name in self._hyper: self._set_hyper(name, value) else: super(OptimizerV2, self).__setattr__(name, value) def get_slot_names(self): """A list of names for this optimizer's slots.""" return self._slot_names def add_slot(self, var, slot_name, initializer="zeros"): """Add a new slot variable for `var`.""" if slot_name not in self._slot_names: self._slot_names.append(slot_name) var_key = _var_key(var) slot_dict = self._slots.setdefault(var_key, {}) weight = slot_dict.get(slot_name, None) if weight is None: if isinstance(initializer, six.string_types) or callable(initializer): initializer = initializers.get(initializer) initial_value = functools.partial( initializer, shape=var.shape, dtype=var.dtype) else: initial_value = initializer strategy = distribute_ctx.get_strategy() with strategy.extended.colocate_vars_with(var): weight = tf_variables.Variable( name="%s/%s" % (var._shared_name, slot_name), # pylint: disable=protected-access dtype=var.dtype, trainable=False, initial_value=initial_value) backend.track_variable(weight) slot_dict[slot_name] = weight self._restore_slot_variable( slot_name=slot_name, variable=var, slot_variable=weight) self._weights.append(weight) return weight def get_slot(self, var, slot_name): var_key = _var_key(var) slot_dict = self._slots[var_key] return slot_dict[slot_name] def _prepare(self, var_list): # pre-build the decayed learning rate only if learning rate exists. if var_list and "learning_rate" in self._hyper: var_dtypes = set([var.dtype.base_dtype for var in var_list]) self._decayed_lr_t = {} for var_dtype in var_dtypes: self._decayed_lr_t[var_dtype] = self._decayed_lr(var_dtype) def _create_hypers(self): if self._hypers_created: return # Iterate hyper values deterministically. for name, value in sorted(self._hyper.items()): if isinstance(value, ops.Tensor) or callable(value): continue else: self._hyper[name] = self.add_weight( name, shape=[], trainable=False, initializer=value, aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA) self._hypers_created = True @property def iterations(self): """Variable. The number of training steps this Optimizer has run.""" if self._iterations is None: self._iterations = self.add_weight( "iter", shape=[], dtype=dtypes.int64, trainable=False, aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA) self._weights.append(self._iterations) return self._iterations @iterations.setter def iterations(self, variable): if self._iterations is not None: raise RuntimeError("Cannot set `iterations` to a new Variable after " "the Optimizer weights have been created") self._iterations = variable self._weights.append(self._iterations) def _decayed_lr(self, var_dtype): """Get decayed learning rate as a Tensor with dtype=var_dtype.""" lr_t = self._get_hyper("learning_rate", var_dtype) if isinstance(lr_t, learning_rate_schedule.LearningRateSchedule): local_step = math_ops.cast(self.iterations, var_dtype) lr_t = math_ops.cast(lr_t(local_step), var_dtype) if self._initial_decay > 0.: local_step = math_ops.cast(self.iterations, var_dtype) decay_t = self._get_hyper("decay", var_dtype) lr_t = lr_t / (1. + decay_t * local_step) return lr_t @abc.abstractmethod def get_config(self): """Returns the config of the optimimizer. An optimizer config is a Python dictionary (serializable) containing the configuration of an optimizer. The same optimizer can be reinstantiated later (without any saved state) from this configuration. Returns: Python dictionary. """ config = {"name": self._name} if hasattr(self, "clipnorm"): config["clipnorm"] = self.clipnorm if hasattr(self, "clipvalue"): config["clipvalue"] = self.clipvalue return config @classmethod def from_config(cls, config, custom_objects=None): """Creates an optimizer from its config. This method is the reverse of `get_config`, capable of instantiating the same optimizer from the config dictionary. Arguments: config: A Python dictionary, typically the output of get_config. custom_objects: A Python dictionary mapping names to additional Python objects used to create this optimizer, such as a function used for a hyperparameter. Returns: An optimizer instance. """ if "lr" in config: config["learning_rate"] = config.pop("lr") if "learning_rate" in config: if isinstance(config["learning_rate"], dict): config["learning_rate"] = learning_rate_schedule.deserialize( config["learning_rate"], custom_objects=custom_objects) return cls(**config) def _serialize_hyperparameter(self, hyperparameter_name): """Serialize a hyperparameter that can be a float, callable, or Tensor.""" value = self._hyper[hyperparameter_name] if isinstance(value, learning_rate_schedule.LearningRateSchedule): return learning_rate_schedule.serialize(value) if callable(value): return value() if tensor_util.is_tensor(value): return backend.get_value(value) return value def variables(self): """Returns variables of this Optimizer based on the order created.""" return self._weights @property def weights(self): """Returns variables of this Optimizer based on the order created.""" return self._weights def get_weights(self): params = self.weights return backend.batch_get_value(params) # TODO(tanzheny): Maybe share this logic with base_layer. def set_weights(self, weights): params = self.weights if len(params) != len(weights): raise ValueError( "You called `set_weights(weights)` on optimizer " + self._name + " with a weight list of length " + str(len(weights)) + ", but the optimizer was expecting " + str(len(params)) + " weights. Provided weights: " + str(weights)[:50] + "...") if not params: return weight_value_tuples = [] param_values = backend.batch_get_value(params) for pv, p, w in zip(param_values, params, weights): if pv.shape != w.shape: raise ValueError("Optimizer weight shape " + str(pv.shape) + " not compatible with " "provided weight shape " + str(w.shape)) weight_value_tuples.append((p, w)) backend.batch_set_value(weight_value_tuples) def add_weight(self, name, shape, dtype=None, initializer="zeros", trainable=None, synchronization=tf_variables.VariableSynchronization.AUTO, aggregation=tf_variables.VariableAggregation.NONE): if dtype is None: dtype = dtypes.float32 if isinstance(initializer, six.string_types) or callable(initializer): initializer = initializers.get(initializer) if synchronization == tf_variables.VariableSynchronization.ON_READ: if trainable: raise ValueError( "Synchronization value can be set to " "VariableSynchronization.ON_READ only for non-trainable variables. " "You have specified trainable=True and " "synchronization=VariableSynchronization.ON_READ.") else: # Set trainable to be false when variable is to be synced on read. trainable = False elif trainable is None: trainable = True variable = self._add_variable_with_custom_getter( name=name, shape=shape, getter=base_layer_utils.make_variable, overwrite=True, initializer=initializer, dtype=dtype, trainable=trainable, use_resource=True, synchronization=synchronization, aggregation=aggregation) backend.track_variable(variable) return variable def _init_set_name(self, name, zero_based=True): if not name: self._name = backend.unique_object_name( generic_utils.to_snake_case(self.__class__.__name__), zero_based=zero_based) else: self._name = name def _assert_valid_dtypes(self, tensors): """Asserts tensors are all valid types (see `_valid_dtypes`). Args: tensors: Tensors to check. Raises: ValueError: If any tensor is not a valid type. """ valid_dtypes = self._valid_dtypes() for t in tensors: dtype = t.dtype.base_dtype if dtype not in valid_dtypes: raise ValueError("Invalid type %r for %s, expected: %s." % (dtype, t.name, [v for v in valid_dtypes])) def _valid_dtypes(self): """Valid types for loss, variables and gradients. Subclasses should override to allow other float types. Returns: Valid types for loss, variables and gradients. """ return set( [dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64]) def _call_if_callable(self, param): """Call the function if param is callable.""" return param() if callable(param) else param def _resource_apply_dense(self, grad, handle): """Add ops to apply dense gradients to the variable `handle`. Args: grad: a `Tensor` representing the gradient. handle: a `Tensor` of dtype `resource` which points to the variable to be updated. Returns: An `Operation` which updates the value of the variable. """ raise NotImplementedError() def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices): """Add ops to apply sparse gradients to `handle`, with repeated indices. Optimizers which override this method must deal with repeated indices. See the docstring of `_apply_sparse_duplicate_indices` for details. By default the correct behavior, to sum non-unique indices and their associated gradients, is enforced by first pre-processing `grad` and `indices` and passing them on to `_resource_apply_sparse`. Optimizers which deal correctly with duplicate indices may instead override this method to avoid the overhead of summing. Args: grad: a `Tensor` representing the gradient for the affected indices. handle: a `Tensor` of dtype `resource` which points to the variable to be updated. indices: a `Tensor` of integral type representing the indices for which the gradient is nonzero. Indices may be repeated. Returns: An `Operation` which updates the value of the variable. """ summed_grad, unique_indices = _deduplicate_indexed_slices( values=grad, indices=indices) return self._resource_apply_sparse(summed_grad, handle, unique_indices) def _resource_apply_sparse(self, grad, handle, indices): """Add ops to apply sparse gradients to the variable `handle`. Similar to `_apply_sparse`, the `indices` argument to this method has been de-duplicated. Optimizers which deal correctly with non-unique indices may instead override `_resource_apply_sparse_duplicate_indices` to avoid this overhead. Args: grad: a `Tensor` representing the gradient for the affected indices. handle: a `Tensor` of dtype `resource` which points to the variable to be updated. indices: a `Tensor` of integral type representing the indices for which the gradient is nonzero. Indices are unique. Returns: An `Operation` which updates the value of the variable. """ raise NotImplementedError() def _resource_scatter_add(self, x, i, v): with ops.control_dependencies( [resource_variable_ops.resource_scatter_add(x.handle, i, v)]): return x.value() def _resource_scatter_update(self, x, i, v): with ops.control_dependencies( [resource_variable_ops.resource_scatter_update(x.handle, i, v)]): return x.value() # --------------- # For implementing the trackable interface # --------------- def _restore_slot_variable(self, slot_name, variable, slot_variable): """Restore a newly created slot variable's value.""" variable_key = _var_key(variable) deferred_restorations = self._deferred_slot_restorations.get( slot_name, {}).pop(variable_key, []) # Iterate over restores, highest restore UID first to minimize the number # of assignments. deferred_restorations.sort(key=lambda position: position.restore_uid, reverse=True) for checkpoint_position in deferred_restorations: checkpoint_position.restore(slot_variable) def _create_or_restore_slot_variable( self, slot_variable_position, slot_name, variable): """Restore a slot variable's value, possibly creating it. Called when a variable which has an associated slot variable is created or restored. When executing eagerly, we create the slot variable with a restoring initializer. No new variables are created when graph building. Instead, _restore_slot_variable catches these after normal creation and adds restore ops to the graph. This method is nonetheless important when graph building for the case when a slot variable has already been created but `variable` has just been added to a dependency graph (causing us to realize that the slot variable needs to be restored). Args: slot_variable_position: A `trackable._CheckpointPosition` object indicating the slot variable `Trackable` object to be restored. slot_name: The name of this `Optimizer`'s slot to restore into. variable: The variable object this slot is being created for. """ variable_key = _var_key(variable) slot_dict = self._slots.get(variable_key, {}) slot_variable = slot_dict.get(slot_name, None) if (slot_variable is None and context.executing_eagerly() and slot_variable_position.is_simple_variable() # Defer slot variable creation if there is an active variable creator # scope. Generally we'd like to eagerly create/restore slot variables # when possible, but this may mean that scopes intended to catch # `variable` also catch its eagerly created slot variable # unintentionally (specifically make_template would add a dependency on # a slot variable if not for this case). Deferring is mostly harmless # (aside from double initialization), and makes variable creator scopes # behave the same way they do when graph building. and not ops.get_default_graph()._variable_creator_stack): # pylint: disable=protected-access initializer = trackable.CheckpointInitialValue( checkpoint_position=slot_variable_position) slot_variable = self.add_slot( var=variable, initializer=initializer, slot_name=slot_name) # Slot variables are not owned by any one object (because we don't want to # save the slot variable if the optimizer is saved without the non-slot # variable, or if the non-slot variable is saved without the optimizer; # it's a dependency hypergraph with edges of the form (optimizer, non-slot # variable, variable)). So we don't _track_ slot variables anywhere, and # instead special-case this dependency and otherwise pretend it's a normal # graph. if slot_variable is not None: # If we've either made this slot variable, or if we've pulled out an # existing slot variable, we should restore it. slot_variable_position.restore(slot_variable) else: # We didn't make the slot variable. Defer restoring until it gets created # normally. We keep a list rather than the one with the highest restore # UID in case slot variables have their own dependencies, in which case # those could differ between restores. self._deferred_slot_restorations.setdefault( slot_name, {}).setdefault(variable_key, []).append( slot_variable_position) def _filter_grads(grads_and_vars): """Filter out iterable with grad equal to None.""" grads_and_vars = tuple(grads_and_vars) if not grads_and_vars: return grads_and_vars filtered = [] vars_with_empty_grads = [] for grad, var in grads_and_vars: if grad is None: vars_with_empty_grads.append(var) else: filtered.append((grad, var)) filtered = tuple(filtered) if not filtered: raise ValueError("No gradients provided for any variable: %s." % ([v.name for _, v in grads_and_vars],)) if vars_with_empty_grads: logging.warning( ("Gradients does not exist for variables %s when minimizing the loss."), ([v.name for v in vars_with_empty_grads])) return filtered def _var_key(var): """Key for representing a primary variable, for looking up slots. In graph mode the name is derived from the var shared name. In eager mode the name is derived from the var unique id. If distribution strategy exists, get the primary variable first. Args: var: the variable. Returns: the unique name of the variable. """ # pylint: disable=protected-access # Get the distributed variable if it exists. if hasattr(var, "_distributed_container"): var = var._distributed_container() if var._in_graph_mode: return var._shared_name return var._unique_id def _get_slot_key_from_var(var, slot_name): """Get the slot key for the variable: var_name/slot_name.""" name = _var_key(var) return name + "/" + slot_name class RestoredOptimizer(OptimizerV2): """A non-functional Optimizer implementation for checkpoint compatibility. Holds slot variables and hyperparameters when an optimizer is restored from a SavedModel. These variables may be referenced in functions along with ops created by the original optimizer, but currently we do not support using the optimizer object iself (e.g. through `apply_gradients`). """ # TODO(allenl): Make the restored optimizer functional by tracing its apply # methods. def __init__(self): super(RestoredOptimizer, self).__init__("RestoredOptimizer") self._hypers_created = True def get_config(self): # TODO(allenl): Save and restore the Optimizer's config raise NotImplementedError( "Restoring functional Optimzers from SavedModels is not currently " "supported. Please file a feature request if this limitation bothers " "you.") revived_types.register_revived_type( "optimizer", lambda obj: isinstance(obj, OptimizerV2), versions=[revived_types.VersionedTypeRegistration( object_factory=lambda proto: RestoredOptimizer(), version=1, min_producer_version=1, min_consumer_version=1, setter=RestoredOptimizer._set_hyper # pylint: disable=protected-access )])
tensorflow-master
tensorflow/python/keras/optimizer_v2/optimizer_v2.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Adadelta Optimizer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.keras.optimizer_v2 import adadelta from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test class AdadeltaOptimizerTest(test.TestCase): def doTestBasic(self, use_resource=False, use_callable_params=False): num_updates = 4 # number of ADADELTA steps to perform for dtype in [dtypes.half, dtypes.float32]: for grad in [0.2, 0.1, 0.01]: for lr in [1.0, 0.5, 0.1]: var0_init = [1.0, 2.0] var1_init = [3.0, 4.0] if use_resource: var0 = resource_variable_ops.ResourceVariable( var0_init, dtype=dtype) var1 = resource_variable_ops.ResourceVariable( var1_init, dtype=dtype) else: var0 = variables.Variable(var0_init, dtype=dtype) var1 = variables.Variable(var1_init, dtype=dtype) grads = constant_op.constant([grad, grad], dtype=dtype) accum = 0.0 accum_update = 0.0 # ADADELTA gradient optimizer rho = 0.95 epsilon = 1e-8 if use_callable_params: adadelta_opt = adadelta.Adadelta( learning_rate=lambda: lr, # pylint: disable=cell-var-from-loop rho=lambda: rho, # pylint: disable=cell-var-from-loop epsilon=epsilon) # pylint: disable=cell-var-from-loop else: adadelta_opt = adadelta.Adadelta( learning_rate=lr, rho=rho, epsilon=epsilon) if not context.executing_eagerly(): adadelta_update = adadelta_opt.apply_gradients( zip([grads, grads], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Assign slots slot = [None] * 2 slot_update = [None] * 2 slot[0] = adadelta_opt.get_slot(var0, "accum_grad") self.assertEqual(slot[0].shape, var0.shape) slot_update[0] = adadelta_opt.get_slot(var0, "accum_var") self.assertEqual(slot_update[0].shape, var0.shape) slot[1] = adadelta_opt.get_slot(var1, "accum_grad") self.assertEqual(slot[1].shape, var1.shape) slot_update[1] = adadelta_opt.get_slot(var1, "accum_var") self.assertEqual(slot_update[1].shape, var1.shape) # Fetch params to validate initial values self.assertAllClose(var0_init, self.evaluate(var0)) self.assertAllClose(var1_init, self.evaluate(var1)) update = [None] * num_updates tot_update = 0 for step in range(num_updates): # Run adadelta update for comparison if not context.executing_eagerly(): self.evaluate(adadelta_update) else: adadelta_opt.apply_gradients(zip([grads, grads], [var0, var1])) # Perform initial update without previous accum values accum = accum * rho + (grad**2) * (1 - rho) update[step] = ( np.sqrt(accum_update + epsilon) * (1. / np.sqrt(accum + epsilon)) * grad) accum_update = ( accum_update * rho + (update[step]**2) * (1.0 - rho)) tot_update += update[step] * lr if not context.executing_eagerly(): # Check that the accumulators have been updated # TODO(lxuechen): This is hard to test in eager mode for slot_idx in range(2): self.assertAllCloseAccordingToType( np.array([accum, accum], dtype=dtype.as_numpy_dtype()), self.evaluate(slot[slot_idx]), rtol=1e-5) self.assertAllCloseAccordingToType( np.array( [accum_update, accum_update], dtype=dtype.as_numpy_dtype()), self.evaluate(slot_update[slot_idx]), rtol=1e-5) # Check that the parameters have been updated self.assertAllCloseAccordingToType( np.array( [var0_init[0] - tot_update, var0_init[1] - tot_update], dtype=dtype.as_numpy_dtype()), self.evaluate(var0), rtol=1e-5) self.assertAllCloseAccordingToType( np.array( [var1_init[0] - tot_update, var1_init[1] - tot_update], dtype=dtype.as_numpy_dtype()), self.evaluate(var1), rtol=1e-5) @test_util.run_in_graph_and_eager_modes(reset_test=True) def testResourceBasic(self): self.doTestBasic(use_resource=True) def testBasicCallableParams(self): with context.eager_mode(): self.doTestBasic(use_resource=True, use_callable_params=True) @test_util.run_deprecated_v1 def testMinimizeSparseResourceVariable(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with self.cached_session(): var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype) x = constant_op.constant([[4.0], [5.0]], dtype=dtype) def loss(): pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop return pred * pred sgd_op = adadelta.Adadelta(1.0, 1.0, 1.0).minimize( loss, var_list=[var0]) variables.global_variables_initializer().run() # Fetch params to validate initial values self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0)) # Run 1 step of sgd sgd_op.run() # Validate updated params self.assertAllCloseAccordingToType([[-111, -138]], self.evaluate(var0)) def testConstructAdadeltaWithLR(self): opt = adadelta.Adadelta(lr=1.0, rho=0.9, epsilon=1.) opt_2 = adadelta.Adadelta(learning_rate=0.1, rho=0.9, epsilon=1., lr=1.0) opt_3 = adadelta.Adadelta(learning_rate=0.1, rho=0.9, epsilon=1.) self.assertIsInstance(opt.lr, variables.Variable) self.assertIsInstance(opt_2.lr, variables.Variable) self.assertIsInstance(opt_3.lr, variables.Variable) self.evaluate(variables.global_variables_initializer()) self.assertAllClose(self.evaluate(opt.lr), (1.0)) self.assertAllClose(self.evaluate(opt_2.lr), (1.0)) self.assertAllClose(self.evaluate(opt_3.lr), (0.1)) def testConstructAdadeltaWithEpsilonValues(self): opt = adadelta.Adadelta(epsilon=None) self.assertEqual(opt.epsilon, 1e-7) opt = adadelta.Adadelta(epsilon=1e-8) self.assertEqual(opt.epsilon, 1e-8) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/keras/optimizer_v2/adadelta_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional test for OptimizerV2.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python import keras from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.keras import backend from tensorflow.python.keras import callbacks from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import losses from tensorflow.python.keras import optimizers from tensorflow.python.keras import testing_utils from tensorflow.python.keras.engine import input_layer from tensorflow.python.keras.engine import sequential from tensorflow.python.keras.engine import training from tensorflow.python.keras.layers import core from tensorflow.python.keras.optimizer_v2 import adadelta from tensorflow.python.keras.optimizer_v2 import adagrad from tensorflow.python.keras.optimizer_v2 import adam from tensorflow.python.keras.optimizer_v2 import adamax from tensorflow.python.keras.optimizer_v2 import gradient_descent from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule from tensorflow.python.keras.optimizer_v2 import nadam from tensorflow.python.keras.optimizer_v2 import optimizer_v2 from tensorflow.python.keras.optimizer_v2 import rmsprop from tensorflow.python.ops import array_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import momentum from tensorflow.python.training import training_util class OptimizerTest(test.TestCase): @test_util.run_in_graph_and_eager_modes def testBasic(self): for _, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]): with self.cached_session(): var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype) loss = lambda: 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop sgd = gradient_descent.SGD(3.0) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 1 step of sgd through optimizer opt_op = sgd.minimize(loss, var_list=[var0, var1]) self.evaluate(variables.global_variables_initializer()) self.evaluate(opt_op) # Validate updated params self.assertAllClose([-14., -13.], self.evaluate(var0)) self.assertAllClose([-6., -5.], self.evaluate(var1)) @test_util.run_in_graph_and_eager_modes def testAdaptiveLearningRate(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype) def loss(): return 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop sgd = gradient_descent.SGD(1.0) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 1 step of sgd through optimizer opt_op = sgd.minimize(loss, [var0, var1]) self.evaluate(variables.global_variables_initializer()) self.evaluate(opt_op) # Validate updated params # var0 = [1., 2.] - 1.0 * [5, 5] self.assertAllClose([-4., -3.], self.evaluate(var0)) # var1 = [3., 4.] - 1.0 * [3, 3] self.assertAllClose([0., 1.], self.evaluate(var1)) sgd.learning_rate = 0.5 if context.executing_eagerly(): sgd.minimize(loss, [var0, var1]) else: self.evaluate(opt_op) # Validate updated params # var0 = [-4., -3.] - 0.5 * [5, 5] self.assertAllClose([-6.5, -5.5], self.evaluate(var0)) # var1 = [0., 1.] - 0.5 * [3, 3] self.assertAllClose([-1.5, -0.5], self.evaluate(var1)) sgd.learning_rate = learning_rate_schedule.InverseTimeDecay( 0.5, decay_steps=1.0, decay_rate=0.5) if context.executing_eagerly(): sgd.minimize(loss, [var0, var1]) else: self.evaluate(opt_op) @test_util.run_in_graph_and_eager_modes def testPrecomputedGradient(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with self.cached_session(): var0 = variables.Variable([1.0, 2.0], dtype=dtype) var1 = variables.Variable([3.0, 4.0], dtype=dtype) loss = lambda: 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop grad_loss = constant_op.constant([42, -42], dtype=dtype) sgd = gradient_descent.SGD(3.0) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 1 step of sgd through optimizer opt_op = sgd.minimize(loss, var_list=[var0, var1], grad_loss=grad_loss) self.evaluate(variables.global_variables_initializer()) self.evaluate(opt_op) # Validate updated params self.assertAllClose([1.0 - 3 * 5 * 42.0, 2.0 - 3 * 5 * (-42.0)], self.evaluate(var0)) self.assertAllClose([3.0 - 3 * 3 * 42.0, 4.0 - 3 * 3 * (-42.0)], self.evaluate(var1)) @test_util.run_in_graph_and_eager_modes def testNoGradients(self): for _, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]): with self.cached_session(): var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype) loss = lambda: 5 * var0 # pylint: disable=cell-var-from-loop sgd_op = gradient_descent.SGD(3.0) with self.assertRaisesRegexp(ValueError, 'No gradients'): # var1 has no gradient sgd_op.minimize(loss, var_list=[var1]) @test_util.run_in_graph_and_eager_modes def testNoGradientsForAnyVariables_Minimize(self): for _, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]): with self.cached_session(): var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype) loss = lambda: constant_op.constant(5.0) sgd_op = gradient_descent.SGD(3.0) with self.assertRaisesRegexp(ValueError, 'No gradients provided for any variable'): sgd_op.minimize(loss, var_list=[var0, var1]) @test_util.run_in_graph_and_eager_modes def testNoGradientsForAnyVariables_ApplyGradients(self): for _, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]): with self.cached_session(): var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype) sgd_op = gradient_descent.SGD(3.0) with self.assertRaisesRegexp(ValueError, 'No gradients provided for any variable'): sgd_op.apply_gradients([(None, var0), (None, var1)]) @test_util.run_in_graph_and_eager_modes def testGradientsAsVariables(self): for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]): with self.cached_session(): var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype) loss = lambda: 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop sgd = gradient_descent.SGD(3.0) grads_and_vars = sgd._compute_gradients(loss, [var0, var1]) # Convert gradients to tf.Variables converted_grads = [ resource_variable_ops.ResourceVariable( array_ops.zeros([2], dtype), name='c_%d_%d' % (i, j)) for j, gv in enumerate(grads_and_vars) ] convert_ops = [ state_ops.assign(converted_grads[j], gv[0]) for j, gv in enumerate(grads_and_vars) ] # Run convert_ops to achieve the gradients converting self.evaluate(variables.global_variables_initializer()) self.evaluate(convert_ops) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 1 step of sgd through optimizer converted_grads_and_vars = list(zip(converted_grads, [var0, var1])) opt_op = sgd.apply_gradients(converted_grads_and_vars) self.evaluate(variables.global_variables_initializer()) self.evaluate(convert_ops) self.evaluate(opt_op) # Validate updated params self.assertAllClose([-14., -13.], self.evaluate(var0)) self.assertAllClose([-6., -5.], self.evaluate(var1)) @test_util.run_in_graph_and_eager_modes def testComputeGradientsWithTensors(self): with self.cached_session(): x = ops.convert_to_tensor(1.0) def f(): return x * x sgd = gradient_descent.SGD(3.0) grads_and_vars = sgd._compute_gradients(f, [x]) self.assertEqual(1, len(grads_and_vars)) grad, x_as_var = grads_and_vars[0] self.assertIs(x, x_as_var) self.assertEqual(2.0, self.evaluate(grad)) with self.assertRaises(NotImplementedError): sgd.apply_gradients(grads_and_vars) @test_util.run_in_graph_and_eager_modes def testConstraint(self): constraint_01 = lambda x: clip_ops.clip_by_value(x, -0.1, 0.) constraint_0 = lambda x: clip_ops.clip_by_value(x, 0., 1.) with self.cached_session(): var0 = variables.Variable([1.0, 2.0], constraint=constraint_01) var1 = variables.Variable([3.0, 4.0], constraint=constraint_0) loss = lambda: 5 * var0 + 3 * var1 sgd = gradient_descent.SGD(3.0) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 1 step of sgd through optimizer opt_op = sgd.minimize(loss, var_list=[var0, var1]) self.evaluate(variables.global_variables_initializer()) self.evaluate(opt_op) # Validate updated params self.assertAllClose([-0.1, -0.1], self.evaluate(var0)) self.assertAllClose([0., 0.], self.evaluate(var1)) @test_util.run_in_graph_and_eager_modes def testIterationWithoutMinimize(self): with self.cached_session(): sgd = gradient_descent.SGD(3.0) self.evaluate(sgd.iterations.initializer) self.assertEqual(0, self.evaluate(sgd.iterations)) @test_util.run_in_graph_and_eager_modes def testConfig(self): with self.cached_session(): opt = gradient_descent.SGD(learning_rate=1.0) config = opt.get_config() opt2 = gradient_descent.SGD.from_config(config) lr = opt._get_hyper('learning_rate') lr2 = opt2._get_hyper('learning_rate') self.evaluate(variables.global_variables_initializer()) # assert both are equal float values. self.assertEqual(self.evaluate(lr), self.evaluate(lr2)) var0 = variables.Variable([[1.0], [2.0]], dtype=dtypes.float32) loss = lambda: 3 * var0 # learning rate variable created when calling minimize. opt.minimize(loss, [var0]) opt3 = gradient_descent.SGD.from_config(config) lr3 = opt3._get_hyper('learning_rate') self.evaluate(variables.global_variables_initializer()) self.assertEqual(self.evaluate(lr), self.evaluate(lr3)) @test_util.run_in_graph_and_eager_modes def testConfigWithLearningRateDecay(self): with self.cached_session(): var0 = variables.Variable([[1.0], [2.0]], dtype=dtypes.float32) for decay_schedule in [ learning_rate_schedule.InverseTimeDecay( 0.5, decay_steps=1.0, decay_rate=0.1), learning_rate_schedule.PiecewiseConstantDecay( [5], [1., .5]) ]: step = 10 opt = gradient_descent.SGD(decay_schedule) config = opt.get_config() opt2 = gradient_descent.SGD.from_config(config) # assert both are equal float values. self.assertAllEqual( decay_schedule(step), opt._get_hyper('learning_rate')(step)) self.assertAllEqual( decay_schedule(step), opt2._get_hyper('learning_rate')(step)) loss = lambda: 3 * var0 # learning rate variable is created when calling minimize. opt.minimize(loss, [var0]) self.evaluate(variables.global_variables_initializer()) config = opt.get_config() opt3 = gradient_descent.SGD.from_config(config) self.assertAllEqual( self.evaluate(opt._get_hyper('learning_rate')(step)), opt3._get_hyper('learning_rate')(step)) @test_util.run_in_graph_and_eager_modes def testGradClipValue(self): with self.cached_session(): var = resource_variable_ops.ResourceVariable([1.0, 2.0]) loss = lambda: 3 * var opt = gradient_descent.SGD(learning_rate=1.0, clipvalue=1.0) opt_op = opt.minimize(loss, [var]) self.evaluate(variables.global_variables_initializer()) self.evaluate(opt_op) self.assertAllClose([0., 1.], self.evaluate(var)) @test_util.run_in_graph_and_eager_modes def testGradClipNorm(self): with self.cached_session(): var = resource_variable_ops.ResourceVariable([1.0]) loss = lambda: 3 * var opt = gradient_descent.SGD(learning_rate=1.0, clipnorm=1.0) opt_op = opt.minimize(loss, [var]) self.evaluate(variables.global_variables_initializer()) self.evaluate(opt_op) self.assertAllClose([0.], self.evaluate(var)) @test_util.run_in_graph_and_eager_modes def testInvalidClipNorm(self): with self.assertRaisesRegexp(ValueError, '>= 0'): gradient_descent.SGD(learning_rate=1.0, clipnorm=-1.0) @test_util.run_in_graph_and_eager_modes def testInvalidKwargs(self): with self.assertRaisesRegexp(TypeError, 'Unexpected keyword argument'): gradient_descent.SGD(learning_rate=1.0, invalidkwargs=1.0) @test_util.run_in_graph_and_eager_modes def testWeights(self): with self.cached_session(): opt1 = adam.Adam(learning_rate=1.0) var1 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtypes.float32) loss1 = lambda: 3 * var1 opt_op_1 = opt1.minimize(loss1, [var1]) self.evaluate(variables.global_variables_initializer()) config = opt1.get_config() opt2 = adam.Adam.from_config(config) var2 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtypes.float32) loss2 = lambda: 3 * var2 opt_op_2 = opt2.minimize(loss2, [var2]) weights = opt1.get_weights() # Assert set_weights and both variables get updated to same value. self.evaluate(variables.global_variables_initializer()) opt2.set_weights(weights) self.evaluate([opt_op_1, opt_op_2]) self.assertAllClose(self.evaluate(var1), self.evaluate(var2)) self.assertEqual(1, self.evaluate(opt1.iterations)) self.assertEqual(1, self.evaluate(opt2.iterations)) var3 = resource_variable_ops.ResourceVariable([1.0, 2.0, 3.0], dtype=dtypes.float32) var4 = resource_variable_ops.ResourceVariable([4.0, 5.0, 6.0], dtype=dtypes.float32) loss3 = lambda: 3 * var3 + 5 * var4 opt_op_3 = opt1.minimize(loss3, [var3, var4]) # Assert set_weights with ValueError since weight list does not match. self.evaluate(variables.global_variables_initializer()) weights = opt1.get_weights() with self.assertRaisesRegexp(ValueError, 'but the optimizer was'): opt2.set_weights(weights) # Assert set_weights and variables get updated to same value. var5 = resource_variable_ops.ResourceVariable([1.0, 2.0, 3.0], dtype=dtypes.float32) var6 = resource_variable_ops.ResourceVariable([4.0, 5.0, 6.0], dtype=dtypes.float32) loss4 = lambda: 3 * var5 + 5 * var6 opt_op_4 = opt2.minimize(loss4, [var5, var6]) self.evaluate(variables.global_variables_initializer()) opt2.set_weights(weights) self.evaluate([opt_op_3, opt_op_4]) self.assertAllClose( self.evaluate([var3, var4]), self.evaluate([var5, var6])) @test_util.run_in_graph_and_eager_modes def testGettingHyperParameters(self): opt = adam.Adam(learning_rate=1.0) var = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtypes.float32) loss = lambda: 3 * var opt_op = opt.minimize(loss, [var]) self.evaluate(variables.global_variables_initializer()) self.evaluate(opt_op) lr = self.evaluate(opt.lr) self.assertEqual(1.0, lr) opt.lr = 2.0 lr = self.evaluate(opt.lr) self.assertEqual(2.0, lr) self.evaluate(opt.lr.assign(3.0)) lr = self.evaluate(opt.lr) self.assertEqual(3.0, lr) with self.assertRaises(AttributeError): opt.not_an_attr += 3 @test_util.run_in_graph_and_eager_modes def testGettingHyperParametersWithLrInConstructor(self): opt = gradient_descent.SGD(lr=3.0) var = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtypes.float32) loss = lambda: 3 * var opt_op = opt.minimize(loss, [var]) self.evaluate(variables.global_variables_initializer()) self.evaluate(opt_op) self.assertTrue(isinstance(opt.lr, resource_variable_ops.ResourceVariable)) self.assertTrue( isinstance(opt.learning_rate, resource_variable_ops.ResourceVariable)) lr = self.evaluate(opt.lr) self.assertEqual(3.0, lr) opt.lr = 2.0 lr = self.evaluate(opt.lr) self.assertEqual(2.0, lr) self.evaluate(opt.lr.assign(4.0)) lr = self.evaluate(opt.lr) self.assertEqual(4.0, lr) @test_util.run_in_graph_and_eager_modes def testOptimizerWithKerasModel(self): a = input_layer.Input(shape=(3,), name='input_a') b = input_layer.Input(shape=(3,), name='input_b') dense = core.Dense(4, name='dense') c = dense(a) d = dense(b) e = core.Dropout(0.5, name='dropout')(c) model = training.Model([a, b], [d, e]) optimizer = gradient_descent.SGD(learning_rate=0.001) loss = 'mse' model.compile(optimizer, loss, metrics=['mae']) input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_d_np = np.random.random((10, 4)) output_e_np = np.random.random((10, 4)) model.fit([input_a_np, input_b_np], [output_d_np, output_e_np], epochs=1, batch_size=5) @test_util.run_in_graph_and_eager_modes def testOptimizerWithCallbacks(self): np.random.seed(1331) input_np = np.random.random((10, 3)) output_np = np.random.random((10, 4)) a = input_layer.Input(shape=(3,), name='input_a') model = sequential.Sequential() model.add(core.Dense(4, name='dense')) model.add(core.Dropout(0.5, name='dropout')) model(a) optimizer = gradient_descent.SGD(learning_rate=0.1) model.compile(optimizer, loss='mse', metrics=['mae']) # This does not reduce the LR after the first epoch (due to low delta). cbks = [ callbacks.ReduceLROnPlateau( monitor='val_loss', factor=0.1, min_delta=0, patience=1, cooldown=5) ] model.fit( input_np, output_np, batch_size=10, validation_data=(input_np, output_np), callbacks=cbks, epochs=2, verbose=0) self.assertAllClose( float(backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4) # This should reduce the LR after the first epoch (due to high delta). cbks = [ callbacks.ReduceLROnPlateau( monitor='val_loss', factor=0.1, min_delta=10, patience=1, cooldown=5) ] model.fit( input_np, output_np, batch_size=10, validation_data=(input_np, output_np), callbacks=cbks, epochs=2, verbose=2) self.assertAllClose( float(backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4) def testOptimizerSetIterations(self): global_step = training_util.get_or_create_global_step() opt = adam.Adam(learning_rate=1.0) opt.iterations = global_step var = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtypes.float32) self.evaluate(variables.global_variables_initializer()) init_step_value = self.evaluate(global_step) loss = lambda: 3 * var opt_op = opt.minimize(loss, [var]) self.evaluate(variables.global_variables_initializer()) self.evaluate(opt_op) new_step_value = self.evaluate(global_step) self.assertEqual(new_step_value, init_step_value + 1) @test_util.run_in_graph_and_eager_modes def testOptimizerWithCallableVarList(self): train_samples = 20 input_dim = 1 num_classes = 2 (x, y), _ = testing_utils.get_test_data( train_samples=train_samples, test_samples=10, input_shape=(input_dim,), num_classes=num_classes) y = keras.utils.to_categorical(y) num_hidden = 1 model = testing_utils.get_small_sequential_mlp( num_hidden=num_hidden, num_classes=num_classes) opt = adam.Adam() loss = lambda: losses.mean_squared_error(model(x), y) var_list = lambda: model.trainable_weights with self.assertRaisesRegexp( ValueError, 'Weights for model .* have not yet been created'): var_list() train_op = opt.minimize(loss, var_list) if not context.executing_eagerly(): self.evaluate(variables.global_variables_initializer()) self.assertEqual( [[0.]], self.evaluate(opt.get_slot(var_list()[0], 'm'))) self.evaluate(train_op) self.assertNotEqual( [[0.]], self.evaluate(opt.get_slot(var_list()[0], 'm'))) self.assertLen(var_list(), 4) def testVarKey(self): with context.graph_mode(): a = variables.Variable([1., 2.], name='var') b = variables.Variable([1.], name='var') self.assertTrue(a._in_graph_mode) self.assertTrue(b._in_graph_mode) var_key = optimizer_v2._var_key(a) self.assertEqual('var', var_key) var_key = optimizer_v2._var_key(b) self.assertEqual('var_1', var_key) def testVarName(self): with context.graph_mode(): var = variables.Variable([1., 2.], name='var') loss = var + 1. opt = adam.Adam() opt.get_updates(loss, [var]) opt_vars = opt.variables() self.assertLen(opt_vars, 3) self.assertEqual('Adam/iter:0', opt_vars[0].name) self.assertEqual('Adam/var/m:0', opt_vars[1].name) var_2 = variables.Variable([1., 2.], name='var_2') loss = var_2 + 1. with backend.name_scope('outter'): opt.get_updates(loss, [var_2]) opt_vars = opt.variables() self.assertLen(opt_vars, 5) self.assertEqual('outter/Adam/var_2/m:0', opt_vars[3].name) @keras_parameterized.run_with_all_model_types class OptimizersCompatibilityTest(keras_parameterized.TestCase): def _testOptimizersCompatibility(self, opt_v1, opt_v2, test_weights=True): np.random.seed(1331) with self.cached_session(): train_samples = 20 input_dim = 3 num_classes = 2 (x, y), _ = testing_utils.get_test_data( train_samples=train_samples, test_samples=10, input_shape=(input_dim,), num_classes=num_classes) y = keras.utils.to_categorical(y) num_hidden = 5 model_v1 = testing_utils.get_small_sequential_mlp( num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim) model_v1.compile(opt_v1, loss='categorical_crossentropy', metrics=[]) model_v1.fit(x, y, batch_size=5, epochs=1) model_v2 = testing_utils.get_small_sequential_mlp( num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim) model_v2.set_weights(model_v1.get_weights()) model_v2.compile(opt_v2, loss='categorical_crossentropy', metrics=[]) model_v2._make_train_function() if test_weights: opt_v2.set_weights(opt_v1.get_weights()) hist_1 = model_v1.fit(x, y, batch_size=5, epochs=1, shuffle=False) hist_2 = model_v2.fit(x, y, batch_size=5, epochs=1, shuffle=False) self.assertAllClose(model_v1.get_weights(), model_v2.get_weights(), rtol=1e-5, atol=1e-5) self.assertAllClose(hist_1.history['loss'], hist_2.history['loss'], rtol=1e-5, atol=1e-5) def testAdadeltaCompatibility(self): opt_v1 = optimizers.Adadelta(lr=0.01) opt_v2 = adadelta.Adadelta(learning_rate=0.01) self._testOptimizersCompatibility(opt_v1, opt_v2) def testAdagradCompatibility(self): opt_v1 = optimizers.Adagrad(lr=0.01) opt_v2 = adagrad.Adagrad(learning_rate=0.01) self._testOptimizersCompatibility(opt_v1, opt_v2) def testAdamCompatibility(self): opt_v1 = optimizers.Adam() opt_v2 = adam.Adam() self._testOptimizersCompatibility(opt_v1, opt_v2) def testAdamaxCompatibility(self): opt_v1 = optimizers.Adamax(lr=0.01) opt_v2 = adamax.Adamax(learning_rate=0.01) self._testOptimizersCompatibility(opt_v1, opt_v2) def testNadamCompatibility(self): opt_v1 = optimizers.Nadam(lr=0.001) opt_v2 = nadam.Nadam(learning_rate=0.001) self._testOptimizersCompatibility(opt_v1, opt_v2) def testMomentumCompatibility(self): opt_v1 = optimizers.SGD(lr=0.01, momentum=0.9) opt_v2 = gradient_descent.SGD(learning_rate=0.01, momentum=0.9) self._testOptimizersCompatibility(opt_v1, opt_v2) def testRMSpropCompatibility(self): opt_v1 = optimizers.RMSprop() opt_v2 = rmsprop.RMSprop() self._testOptimizersCompatibility(opt_v1, opt_v2) def testSGDCompatibility(self): opt_v1 = optimizers.SGD(lr=0.01) opt_v2 = gradient_descent.SGD(learning_rate=0.01) self._testOptimizersCompatibility(opt_v1, opt_v2, False) def testNumericEquivalenceForNesterovMomentum(self): np.random.seed(1331) with self.cached_session(): train_samples = 20 input_dim = 3 num_classes = 2 (x, y), _ = testing_utils.get_test_data( train_samples=train_samples, test_samples=10, input_shape=(input_dim,), num_classes=num_classes) y = keras.utils.to_categorical(y) num_hidden = 5 model_k_v1 = testing_utils.get_small_sequential_mlp( num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim) model_k_v2 = testing_utils.get_small_sequential_mlp( num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim) model_k_v2.set_weights(model_k_v1.get_weights()) model_tf = testing_utils.get_small_sequential_mlp( num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim) model_tf.set_weights(model_k_v2.get_weights()) opt_k_v1 = optimizers.SGD(momentum=0.9, nesterov=True) opt_k_v2 = gradient_descent.SGD(momentum=0.9, nesterov=True) opt_tf = momentum.MomentumOptimizer( learning_rate=0.01, momentum=0.9, use_nesterov=True) model_k_v1.compile(opt_k_v1, loss='categorical_crossentropy', metrics=[]) model_k_v2.compile(opt_k_v2, loss='categorical_crossentropy', metrics=[]) model_tf.compile(opt_tf, loss='categorical_crossentropy', metrics=[]) hist_k_v1 = model_k_v1.fit(x, y, batch_size=5, epochs=10, shuffle=False) hist_k_v2 = model_k_v2.fit(x, y, batch_size=5, epochs=10, shuffle=False) hist_tf = model_tf.fit(x, y, batch_size=5, epochs=10, shuffle=False) self.assertAllClose(model_k_v1.get_weights(), model_tf.get_weights()) self.assertAllClose(model_k_v1.get_weights(), model_k_v2.get_weights()) self.assertAllClose(opt_k_v1.get_weights(), opt_k_v2.get_weights()) self.assertAllClose(hist_k_v1.history['loss'], hist_tf.history['loss']) self.assertAllClose(hist_k_v1.history['loss'], hist_k_v2.history['loss']) def testNumericEquivalenceForAmsgrad(self): np.random.seed(1331) with self.cached_session(): train_samples = 20 input_dim = 3 num_classes = 2 (x, y), _ = testing_utils.get_test_data( train_samples=train_samples, test_samples=10, input_shape=(input_dim,), num_classes=num_classes) y = keras.utils.to_categorical(y) num_hidden = 5 model_k_v1 = testing_utils.get_small_sequential_mlp( num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim) model_k_v2 = testing_utils.get_small_sequential_mlp( num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim) model_k_v2.set_weights(model_k_v1.get_weights()) opt_k_v1 = optimizers.Adam(amsgrad=True) opt_k_v2 = adam.Adam(amsgrad=True) model_k_v1.compile(opt_k_v1, loss='categorical_crossentropy', metrics=[]) model_k_v2.compile(opt_k_v2, loss='categorical_crossentropy', metrics=[]) hist_k_v1 = model_k_v1.fit(x, y, batch_size=5, epochs=10, shuffle=False) hist_k_v2 = model_k_v2.fit(x, y, batch_size=5, epochs=10, shuffle=False) self.assertAllClose(model_k_v1.get_weights(), model_k_v2.get_weights()) self.assertAllClose(opt_k_v1.get_weights(), opt_k_v2.get_weights()) self.assertAllClose(hist_k_v1.history['loss'], hist_k_v2.history['loss']) # Note: These tests are kept in a separate class to avoid bugs in some # distributions of Python that break AutoGraph which is used by tf.function. class OptimizerWithFunctionTest(test.TestCase): def testBasic(self): with context.eager_mode(): var = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtypes.float32) loss = lambda: 3 * var opt = adam.Adam(learning_rate=1.0) @def_function.function def fn(): opt.minimize(loss, [var]) return var self.assertAllClose([0., 1.], fn(), atol=1e-4) self.assertAllClose([-1, 0.], fn(), atol=1e-4) def testVarKeyWithVarCreatedInEager(self): with context.eager_mode(): a = variables.Variable([1., 2.], name='var') b = variables.Variable([1.], name='var') @test_util.also_run_as_tf_function def var_key_test(): self.assertFalse(a._in_graph_mode) self.assertFalse(b._in_graph_mode) var_key_a = optimizer_v2._var_key(a) self.assertStartsWith(var_key_a, 'var_') var_key_b = optimizer_v2._var_key(b) self.assertStartsWith(var_key_b, 'var_') self.assertNotEquals(var_key_a, var_key_b) var_key_test() def testLearningRateDecayUsedInTwoFunctions(self): with context.eager_mode(): a = variables.Variable([1., 2.], name='var') b = variables.Variable([1.], name='var') learning_rate_decay = learning_rate_schedule.InverseTimeDecay( 0.5, decay_steps=1.0, decay_rate=0.5) opt = adam.Adam(learning_rate=learning_rate_decay) loss_a = lambda: 3 * a loss_b = lambda: 2 * b @def_function.function def fn_a(): opt.minimize(loss_a, [a]) return a @def_function.function def fn_b(): opt.minimize(loss_b, [b]) return b fn_a() fn_b() if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/keras/optimizer_v2/optimizer_v2_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Nadam for TensorFlow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.keras import backend_config from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule from tensorflow.python.keras.optimizer_v2 import optimizer_v2 from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import state_ops from tensorflow.python.util.tf_export import keras_export @keras_export('keras.optimizers.Nadam') class Nadam(optimizer_v2.OptimizerV2): r"""Optimizer that implements the NAdam algorithm. Much like Adam is essentially RMSprop with momentum, Nadam is Adam with Nesterov momentum. Initialization: $$m_0 := 0 \text{(Initialize 1st moment vector)}$$ $$v_0 := 0 \text{(Initialize 2nd moment vector)}$$ $$mu_0 := 1$$ $$t := 0 \text{(Initialize timestep)}$$ Computes: $$t := t + 1$$ $$\mu_t := \beta_1 * (1 - 0.5 * 0.96^{0.004 * t})$$ $$g' := g / (1 - \prod_{i=1}^{t}{\mu_i})$$ $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$ $$m' := m_t / (1 - \prod_{i=1}^{t+1}{\mu_i})$$ $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$ $$v' := v_t / (1 - \beta_2^t)$$ $$\bar{m} := (1 - \mu_t) * g' + \mu_{t+1} * m'$$ $$\theta_t := \theta_{t-1} - lr * \bar{m} / (\sqrt{v'} + \epsilon)$$ gradient is evaluated at theta(t) + momentum * v(t), and the variables always store theta + beta_1 * m / sqrt(v) instead of theta. References See [Dozat, T., 2015](http://cs229.stanford.edu/proj2015/054_report.pdf). """ def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7, name='Nadam', **kwargs): """Construct a new Nadam optimizer. Args: learning_rate: A Tensor or a floating point value. The learning rate. beta_1: A float value or a constant float tensor. The exponential decay rate for the 1st moment estimates. beta_2: A float value or a constant float tensor. The exponential decay rate for the exponentially weighted infinity norm. epsilon: A small constant for numerical stability. name: Optional name for the operations created when applying gradients. Defaults to "Adamax". **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip gradients by value, `decay` is included for backward compatibility to allow time inverse decay of learning rate. `lr` is included for backward compatibility, recommended to use `learning_rate` instead. """ # Backwards compatibility with keras NAdam optimizer. kwargs['decay'] = kwargs.pop('schedule_decay', 0.004) learning_rate = kwargs.get('lr', learning_rate) if isinstance(learning_rate, learning_rate_schedule.LearningRateSchedule): raise ValueError('The Nadam optimizer does not support ' 'tf.keras.optimizers.LearningRateSchedules as the ' 'learning rate.') super(Nadam, self).__init__(name, **kwargs) self._set_hyper('learning_rate', kwargs.get('lr', learning_rate)) self._set_hyper('decay', self._initial_decay) self._set_hyper('beta_1', beta_1) self._set_hyper('beta_2', beta_2) self.epsilon = epsilon or backend_config.epsilon() self._m_cache = None def _create_slots(self, var_list): var_dtype = var_list[0].dtype.base_dtype if self._m_cache is None: self._m_cache = self.add_weight( 'momentum_cache', shape=[], dtype=var_dtype, initializer='ones', trainable=False) self._weights.append(self._m_cache) # Separate for-loops to respect the ordering of slot variables from v1. for var in var_list: # Create slots for the first moments. self.add_slot(var, 'm') for var in var_list: # Create slots for the second moments. self.add_slot(var, 'v') def _prepare(self, var_list): var_dtype = var_list[0].dtype.base_dtype beta_1_t = self._get_hyper('beta_1', var_dtype) local_step = math_ops.cast(self.iterations + 1, var_dtype) decay_base = math_ops.cast(0.96, var_dtype) self.m_cache_t = beta_1_t * ( 1. - 0.5 * (math_ops.pow(decay_base, self._initial_decay * local_step))) self.m_cache_t_1 = beta_1_t * ( 1. - 0.5 * (math_ops.pow(decay_base, self._initial_decay * (local_step + 1)))) m_schedule_new = self._m_cache * self.m_cache_t self.m_schedule_new = state_ops.assign( self._m_cache, m_schedule_new, use_locking=self._use_locking) self.m_schedule_next = self.m_schedule_new * self.m_cache_t_1 def _resource_apply_dense(self, grad, var): var_dtype = var.dtype.base_dtype lr_t = self._get_hyper('learning_rate', var_dtype) epsilon_t = ops.convert_to_tensor(self.epsilon, var_dtype) m = self.get_slot(var, 'm') v = self.get_slot(var, 'v') beta_1_t = self._get_hyper('beta_1', var_dtype) beta_2_t = self._get_hyper('beta_2', var_dtype) local_step = math_ops.cast(self.iterations + 1, var_dtype) g_prime = grad / (1. - self.m_schedule_new) m_t = beta_1_t * m + (1 - beta_1_t) * grad m_t = state_ops.assign(m, m_t, use_locking=self._use_locking) m_t_prime = m_t / (1. - self.m_schedule_next) v_t = beta_2_t * v + (1 - beta_2_t) * math_ops.square(grad) v_t = state_ops.assign(v, v_t, use_locking=self._use_locking) v_t_prime = v_t / (1. - math_ops.pow(beta_2_t, local_step)) m_t_bar = (1. - self.m_cache_t) * g_prime + self.m_cache_t_1 * m_t_prime var_t = var - lr_t * m_t_bar / (math_ops.sqrt(v_t_prime) + epsilon_t) return state_ops.assign(var, var_t, use_locking=self._use_locking).op def _resource_apply_sparse(self, grad, var, indices): var_dtype = var.dtype.base_dtype lr_t = self._get_hyper('learning_rate', var_dtype) epsilon_t = ops.convert_to_tensor(self.epsilon, var_dtype) v = self.get_slot(var, 'v') beta_1_t = self._get_hyper('beta_1', var_dtype) beta_2_t = self._get_hyper('beta_2', var_dtype) local_step = math_ops.cast(self.iterations + 1, var_dtype) g_prime = grad / (1. - self.m_schedule_new) # m_t = beta1 * m + (1 - beta1) * g_t m = self.get_slot(var, 'm') m_scaled_g_values = grad * (1 - beta_1_t) m_t = state_ops.assign(m, m * beta_1_t, use_locking=self._use_locking) with ops.control_dependencies([m_t]): m_t = self._resource_scatter_add(m, indices, m_scaled_g_values) m_t_slice = array_ops.gather(m_t, indices) m_t_prime = m_t_slice / (1. - self.m_schedule_next) m_t_bar = (1. - self.m_cache_t) * g_prime + self.m_cache_t_1 * m_t_prime # v_t = beta2 * v + (1 - beta2) * (g_t * g_t) v = self.get_slot(var, 'v') v_scaled_g_values = (grad * grad) * (1 - beta_2_t) v_t = state_ops.assign(v, v * beta_2_t, use_locking=self._use_locking) with ops.control_dependencies([v_t]): v_t = self._resource_scatter_add(v, indices, v_scaled_g_values) v_t_slice = array_ops.gather(v_t, indices) v_t_prime = v_t_slice / (1. - math_ops.pow(beta_2_t, local_step)) v_prime_sqrt = math_ops.sqrt(v_t_prime) var_update = self._resource_scatter_add( var, indices, -lr_t * m_t_bar / (v_prime_sqrt + epsilon_t)) return control_flow_ops.group(*[var_update, m_t_bar, v_t]) def get_config(self): config = super(Nadam, self).get_config() config.update({ 'learning_rate': self._serialize_hyperparameter('learning_rate'), 'decay': self._serialize_hyperparameter('decay'), 'beta_1': self._serialize_hyperparameter('beta_1'), 'beta_2': self._serialize_hyperparameter('beta_2'), 'epsilon': self.epsilon, }) return config
tensorflow-master
tensorflow/python/keras/optimizer_v2/nadam.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for rmsprop.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import itertools import math from absl.testing import parameterized import numpy as np from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule from tensorflow.python.keras.optimizer_v2 import rmsprop from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test _DATA_TYPES = [dtypes.half, dtypes.float32] _TEST_PARAM_VALUES = [ # learning_rate, rho, momentum, epsilon, centered [0.05, 0.9, 0.0, 1e-3, True], [0.05, 0.9, 0.0, 1e-3, False], [0.1, 0.9, 0.0, 1e-3, True], [0.01, 0.9, 0.0, 1e-5, True], [0.01, 0.9, 0.9, 1e-5, True], ] _TESTPARAMS = [ [data_type] + values for data_type, values in itertools.product(_DATA_TYPES, _TEST_PARAM_VALUES) ] class RMSpropOptimizerTest(test.TestCase): def _rmsprop_update_numpy(self, var, g, mg, rms, mom, lr, rho, momentum, epsilon, centered): rms_t = rms * rho + (1 - rho) * g * g if centered: mg_t = mg * rho + (1 - rho) * g denom_t = rms_t - mg_t * mg_t else: mg_t = mg denom_t = rms_t if momentum > 0.: mom_t = momentum * mom + lr * g / (np.sqrt(denom_t + epsilon)) var_t = var - mom_t else: mom_t = mom var_t = var - lr * g / (np.sqrt(denom_t) + epsilon) return var_t, mg_t, rms_t, mom_t def _sparse_rmsprop_update_numpy(self, var, gindexs, gvalues, mg, rms, mom, lr, rho, momentum, epsilon, centered): mg_t = copy.deepcopy(mg) rms_t = copy.deepcopy(rms) mom_t = copy.deepcopy(mom) var_t = copy.deepcopy(var) for i in range(len(gindexs)): gindex = gindexs[i] gvalue = gvalues[i] rms_t[gindex] = rms[gindex] * rho + (1 - rho) * gvalue * gvalue if centered: mg_t[gindex] = mg_t[gindex] * rho + (1 - rho) * gvalue denom_t = rms_t[gindex] - mg_t[gindex] * mg_t[gindex] else: denom_t = rms_t[gindex] if momentum > 0.: mom_t[gindex] = momentum * mom[gindex] + lr * gvalue / np.sqrt(denom_t + epsilon) var_t[gindex] = var[gindex] - mom_t[gindex] else: mom_t[gindex] = mom[gindex] var_t[gindex] = var[gindex] - lr * gvalue / (np.sqrt(denom_t) + epsilon) return var_t, mg_t, rms_t, mom_t @test_util.run_deprecated_v1 def testDense(self): for (dtype, learning_rate, rho, momentum, epsilon, centered) in _TESTPARAMS: with test_util.use_gpu(): # Initialize variables for numpy implementation. var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.2], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.2], dtype=dtype.as_numpy_dtype) var0 = resource_variable_ops.ResourceVariable(var0_np, dtype=dtype) var1 = resource_variable_ops.ResourceVariable(var1_np, dtype=dtype) grads0 = constant_op.constant(grads0_np, dtype=dtype) grads1 = constant_op.constant(grads1_np, dtype=dtype) opt = rmsprop.RMSprop( learning_rate=learning_rate, rho=rho, momentum=momentum, epsilon=epsilon, centered=centered) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) if centered: mg0 = opt.get_slot(var0, "mg") mg1 = opt.get_slot(var1, "mg") else: mg0 = None mg1 = None if momentum > 0.: mom0 = opt.get_slot(var0, "momentum") mom1 = opt.get_slot(var1, "momentum") else: mom0 = None mom1 = None rms0 = opt.get_slot(var0, "rms") self.assertTrue(rms0 is not None) rms1 = opt.get_slot(var1, "rms") self.assertTrue(rms1 is not None) mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) rms0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) rms1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 3 steps of RMSprop for _ in range(1, 4): self.evaluate(update) var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy( var0_np, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate, rho, momentum, epsilon, centered) var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy( var1_np, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate, rho, momentum, epsilon, centered) # Validate updated params if centered: self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0)) self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1)) if momentum > 0.: self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0)) self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1)) self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0)) self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1)) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) @test_util.run_deprecated_v1 def testDenseWithLearningRateDecay(self): var0_np = np.array([1.0, 2.0]) grads0_np = np.array([0.1, 0.2]) var1_np = np.array([3.0, 4.0]) grads1_np = np.array([0.01, 0.2]) var0 = resource_variable_ops.ResourceVariable(var0_np) var1 = resource_variable_ops.ResourceVariable(var1_np) grads0 = constant_op.constant(grads0_np) grads1 = constant_op.constant(grads1_np) learning_rate = 0.01 rho = 0.9 momentum = 0.0 epsilon = 1e-7 centered = False decay = 0.5 opt = rmsprop.RMSprop( learning_rate=learning_rate, rho=rho, momentum=momentum, epsilon=epsilon, centered=centered, decay=decay) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) rms0 = opt.get_slot(var0, "rms") self.assertTrue(rms0 is not None) rms1 = opt.get_slot(var1, "rms") self.assertTrue(rms1 is not None) if momentum > 0.: mom0 = opt.get_slot(var0, "momentum") mom1 = opt.get_slot(var1, "momentum") else: mom0 = None mom1 = None mg0_np = np.array([0.0, 0.0]) mg1_np = np.array([0.0, 0.0]) rms0_np = np.array([0.0, 0.0]) rms1_np = np.array([0.0, 0.0]) mom0_np = np.array([0.0, 0.0]) mom1_np = np.array([0.0, 0.0]) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 4 steps of RMSprop for t in range(2): self.evaluate(update) lr = learning_rate / (1 + decay * t) var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy( var0_np, grads0_np, mg0_np, rms0_np, mom0_np, lr, rho, momentum, epsilon, centered) var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy( var1_np, grads1_np, mg1_np, rms1_np, mom1_np, lr, rho, momentum, epsilon, centered) # Validate updated params self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0)) self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1)) if momentum > 0.: self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0)) self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1)) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) @test_util.run_deprecated_v1 def testDenseWithLearningRateInverseTimeDecay(self): var0_np = np.array([1.0, 2.0]) grads0_np = np.array([0.1, 0.2]) var1_np = np.array([3.0, 4.0]) grads1_np = np.array([0.01, 0.2]) var0 = resource_variable_ops.ResourceVariable(var0_np) var1 = resource_variable_ops.ResourceVariable(var1_np) grads0 = constant_op.constant(grads0_np) grads1 = constant_op.constant(grads1_np) learning_rate = 0.01 rho = 0.9 momentum = 0.0 epsilon = 1e-7 centered = False decay = 0.5 lr_schedule = learning_rate_schedule.InverseTimeDecay( learning_rate, decay_steps=1.0, decay_rate=decay) opt = rmsprop.RMSprop( learning_rate=lr_schedule, rho=rho, momentum=momentum, epsilon=epsilon, centered=centered) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) rms0 = opt.get_slot(var0, "rms") self.assertTrue(rms0 is not None) rms1 = opt.get_slot(var1, "rms") self.assertTrue(rms1 is not None) if momentum > 0.: mom0 = opt.get_slot(var0, "momentum") mom1 = opt.get_slot(var1, "momentum") else: mom0 = None mom1 = None mg0_np = np.array([0.0, 0.0]) mg1_np = np.array([0.0, 0.0]) rms0_np = np.array([0.0, 0.0]) rms1_np = np.array([0.0, 0.0]) mom0_np = np.array([0.0, 0.0]) mom1_np = np.array([0.0, 0.0]) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 4 steps of RMSprop for t in range(2): self.evaluate(update) lr = learning_rate / (1 + decay * t) var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy( var0_np, grads0_np, mg0_np, rms0_np, mom0_np, lr, rho, momentum, epsilon, centered) var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy( var1_np, grads1_np, mg1_np, rms1_np, mom1_np, lr, rho, momentum, epsilon, centered) # Validate updated params self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0)) self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1)) if momentum > 0.: self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0)) self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1)) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) @test_util.run_deprecated_v1 def testMinimizeSparseResourceVariable(self): for dtype in [dtypes.float32, dtypes.float64]: with self.cached_session(): var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype) x = constant_op.constant([[4.0], [5.0]], dtype=dtype) def loss(): pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop return pred * pred sgd_op = rmsprop.RMSprop( learning_rate=1.0, rho=0.0, momentum=0.0, epsilon=0.0, centered=False).minimize( loss, var_list=[var0]) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0)) # Run 1 step of sgd self.evaluate(sgd_op) # Validate updated params self.assertAllCloseAccordingToType([[0., 1.]], self.evaluate(var0), atol=0.01) @test_util.run_deprecated_v1 def testMinimizeSparseResourceVariableCentered(self): for dtype in [dtypes.float32, dtypes.float64]: with self.cached_session(): var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype) x = constant_op.constant([[4.0], [5.0]], dtype=dtype) def loss(): pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop return pred * pred # loss = lambda: pred * pred # pylint: disable=cell-var-from-loop sgd_op = rmsprop.RMSprop( learning_rate=1.0, rho=0.0, momentum=0.0, epsilon=1.0, centered=True).minimize( loss, var_list=[var0]) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0)) # Run 1 step of sgd self.evaluate(sgd_op) # Validate updated params self.assertAllCloseAccordingToType([[-111, -138]], self.evaluate(var0), atol=0.01) @test_util.run_deprecated_v1 def testSparse(self): for (dtype, learning_rate, rho, momentum, epsilon, centered) in _TESTPARAMS: with test_util.use_gpu(): # Initialize variables for numpy implementation. var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01], dtype=dtype.as_numpy_dtype) var0 = variables.Variable(var0_np) var1 = variables.Variable(var1_np) grads0_np_indices = np.array([0], dtype=np.int32) grads0 = ops.IndexedSlices( constant_op.constant(grads0_np), constant_op.constant(grads0_np_indices), constant_op.constant([1])) grads1_np_indices = np.array([1], dtype=np.int32) grads1 = ops.IndexedSlices( constant_op.constant(grads1_np), constant_op.constant(grads1_np_indices), constant_op.constant([1])) opt = rmsprop.RMSprop( learning_rate=learning_rate, rho=rho, momentum=momentum, epsilon=epsilon, centered=centered) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) if centered: mg0 = opt.get_slot(var0, "mg") self.assertEqual(mg0 is not None, centered) mg1 = opt.get_slot(var1, "mg") self.assertEqual(mg1 is not None, centered) else: mg0 = None mg1 = None rms0 = opt.get_slot(var0, "rms") self.assertTrue(rms0 is not None) rms1 = opt.get_slot(var1, "rms") self.assertTrue(rms1 is not None) if momentum > 0.: mom0 = opt.get_slot(var0, "momentum") mom1 = opt.get_slot(var1, "momentum") else: mom0 = None mom1 = None mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) rms0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) rms1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 3 steps of RMSprop for _ in range(1, 4): self.evaluate(update) var0_np, mg0_np, rms0_np, mom0_np = self._sparse_rmsprop_update_numpy( var0_np, grads0_np_indices, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate, rho, momentum, epsilon, centered) var1_np, mg1_np, rms1_np, mom1_np = self._sparse_rmsprop_update_numpy( var1_np, grads1_np_indices, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate, rho, momentum, epsilon, centered) # Validate updated params if centered: self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0)) self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1)) self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0)) self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1)) if momentum > 0.: self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0)) self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1)) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testCallableParams(self): with context.eager_mode(): for dtype in [dtypes.half, dtypes.float32]: var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.1], dtype=dtype) grads1 = constant_op.constant([0.01, 0.01], dtype=dtype) learning_rate = lambda: 2.0 rho = lambda: 0.9 momentum = lambda: 0.0 epsilon = 1.0 opt = rmsprop.RMSprop(learning_rate, rho, momentum, epsilon) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Step 1: the rms accumulators where 1. So we should see a normal # update: v -= grad * learning_rate opt.apply_gradients(zip([grads0, grads1], [var0, var1])) # Check the parameters. self.assertAllCloseAccordingToType( np.array([ 1.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)), 2.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)) ]), self.evaluate(var0)) self.assertAllCloseAccordingToType( np.array([ 3.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)), 4.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)) ]), self.evaluate(var1)) # Step 2: the root mean square accumulators contain the previous update. opt.apply_gradients(zip([grads0, grads1], [var0, var1])) # Check the parameters. self.assertAllCloseAccordingToType( np.array([ 1.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)) - (0.1 * 2.0 / math.sqrt(0.001 * 0.9 + 0.001 + 1.0)), 2.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)) - (0.1 * 2.0 / math.sqrt(0.001 * 0.9 + 0.001 + 1.0)) ]), self.evaluate(var0)) self.assertAllCloseAccordingToType( np.array([ 3.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)) - (0.01 * 2.0 / math.sqrt(0.00001 * 0.9 + 1e-5 + 1.0)), 4.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)) - (0.01 * 2.0 / math.sqrt(0.00001 * 0.9 + 1e-5 + 1.0)) ]), self.evaluate(var1)) def testConstructRMSpropWithLR(self): opt = rmsprop.RMSprop(lr=1.0) opt_2 = rmsprop.RMSprop(learning_rate=0.1, lr=1.0) opt_3 = rmsprop.RMSprop(learning_rate=0.1) self.assertIsInstance(opt.lr, variables.Variable) self.assertIsInstance(opt_2.lr, variables.Variable) self.assertIsInstance(opt_3.lr, variables.Variable) self.evaluate(variables.global_variables_initializer()) self.assertAllClose(self.evaluate(opt.lr), (1.0)) self.assertAllClose(self.evaluate(opt_2.lr), (1.0)) self.assertAllClose(self.evaluate(opt_3.lr), (0.1)) def testSlotsUniqueEager(self): with context.eager_mode(): v1 = variables.Variable(1.) v2 = variables.Variable(1.) opt = rmsprop.RMSprop(1., momentum=0., centered=False) opt.minimize(lambda: v1 + v2, var_list=[v1, v2]) # There should be iteration, and one unique slot variable for v1 and v2. self.assertEqual(3, len(set(opt.variables()))) self.assertEqual( self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations)) opt = rmsprop.RMSprop(learning_rate=1., momentum=0.2, centered=False) opt.minimize(lambda: v1 + v2, var_list=[v1, v2]) # There should be iteration, and two unique slot variables for v1 and v2. self.assertEqual(5, len(set(opt.variables()))) self.assertEqual( self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations)) opt = rmsprop.RMSprop(learning_rate=1., momentum=0.2, centered=True) opt.minimize(lambda: v1 + v2, var_list=[v1, v2]) # There should be iteration, and three unique slot variables for v1 and v2 self.assertEqual(7, len(set(opt.variables()))) self.assertEqual( self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations)) class SlotColocationTest(test.TestCase, parameterized.TestCase): @parameterized.parameters([True, False]) @test_util.run_gpu_only @test_util.run_in_graph_and_eager_modes def testRunMinimizeOnGPUForCPUVariables(self, use_resource): with ops.device("/device:CPU:0"): if use_resource: var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtypes.float32) var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtypes.float32) else: var0 = variables.Variable([1.0, 2.0], dtype=dtypes.float32) var1 = variables.Variable([3.0, 4.0], dtype=dtypes.float32) def loss(): return 5 * var0 + 3 * var1 opt = rmsprop.RMSprop( learning_rate=1.0, decay=0.9, momentum=0.5, epsilon=1.0) # Fetch params to validate initial values self.evaluate(variables.global_variables_initializer()) self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 1 step through optimizer on GPU. # Slot variables are created the first time optimizer is used on some # variable. This tests that slot variables will be colocated with the base # variable. with ops.device("/device:GPU:0"): # Note that for eager execution, minimize expects a function instead of a # Tensor. opt_op = opt.minimize(loss, [var0, var1]) self.evaluate(variables.global_variables_initializer()) self.evaluate(opt_op) # Validate updated params, All variables should have decreased. self.assertTrue(all(v < 0.0 for v in self.evaluate(var0)), msg="updated variables: %s" % self.evaluate(var0)) self.assertTrue(all(v < 2.0 for v in self.evaluate(var1)), msg="updated variables: %s" % self.evaluate(var1)) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/keras/optimizer_v2/rmsprop_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Adadelta for TensorFlow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import ops from tensorflow.python.keras import backend_config from tensorflow.python.keras.optimizer_v2 import optimizer_v2 from tensorflow.python.training import training_ops from tensorflow.python.util.tf_export import keras_export @keras_export('keras.optimizers.Adadelta') class Adadelta(optimizer_v2.OptimizerV2): r"""Optimizer that implements the Adadelta algorithm. Adadelta optimization is a stochastic gradient descent method that is based on adaptive learning rate per dimension to address two drawbacks: 1) the continual decay of learning rates throughout training 2) the need for a manually selected global learning rate Two accumulation steps are required: 1) the accumulation of gradients squared, 2) the accumulation of updates squared. Initialization: $$E[g^2]_0 := 0 \text{(Initialize gradient 2nd order moment vector)}$$ $$E[\Delta x^2]_0 := 0 \text{(Initialize 2nd order variable update)}$$ $$t := t + 1$$ $$E[g^2]_t := \rho * E[g^2]_{t-1} + (1 - \rho) * g^2$$ $$\Delta x_t = -RMS[\Delta x]_{t-1} * g_t / RMS[g]_t$$ $$E[\Delta x^2]_t := \rho * E[\Delta x^2]_{t-1} + (1 - \rho) * \Delta x_t^2$$ $$x_t := x_{t-1} + \Delta x_{t}$$ References See [M. D. Zeiler](http://arxiv.org/abs/1212.5701) ([pdf](http://arxiv.org/pdf/1212.5701v1.pdf)) """ def __init__(self, learning_rate=0.001, rho=0.95, epsilon=1e-7, name='Adadelta', **kwargs): """Construct a new Adadelta optimizer. Adadelta is a more robust extension of Adagrad that adapts learning rates based on a moving window of gradient updates, instead of accumulating all past gradients. This way, Adadelta continues learning even when many updates have been done. Compared to Adagrad, in the original version of Adadelta you don't have to set an initial learning rate. In this version, initial learning rate can be set, as in most other Keras optimizers. Args: learning_rate: A `Tensor` or a floating point value. The learning rate. To match the exact form in the original paper use 1.0. rho: A `Tensor` or a floating point value. The decay rate. epsilon: A `Tensor` or a floating point value. A constant epsilon used to better conditioning the grad update. name: Optional name prefix for the operations created when applying gradients. Defaults to "Adadelta". **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip gradients by value, `decay` is included for backward compatibility to allow time inverse decay of learning rate. `lr` is included for backward compatibility, recommended to use `learning_rate` instead. @compatibility(eager) When eager execution is enabled, `learning_rate`, `rho`, and `epsilon` can each be a callable that takes no arguments and returns the actual value to use. This can be useful for changing these values across different invocations of optimizer functions. @end_compatibility """ if epsilon is None: epsilon = backend_config.epsilon() super(Adadelta, self).__init__(name, **kwargs) self._set_hyper('learning_rate', kwargs.get('lr', learning_rate)) self._set_hyper('decay', self._initial_decay) self._set_hyper('rho', rho) self.epsilon = epsilon or backend_config.epsilon() def _create_slots(self, var_list): # Separate for-loops to respect the ordering of slot variables from v1. for v in var_list: self.add_slot(v, 'accum_grad') for v in var_list: self.add_slot(v, 'accum_var') def set_weights(self, weights): params = self.weights # Override set_weights for backward compatibility of Keras V1 optimizer # since it does not include iteration at head of the weight list. Set # iteration to 0. if len(params) == len(weights) + 1: weights = [np.array(0)] + weights super(Adadelta, self).set_weights(weights) def _resource_apply_dense(self, grad, var): var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr_t[var_dtype] accum_grad = self.get_slot(var, 'accum_grad') accum_var = self.get_slot(var, 'accum_var') return training_ops.resource_apply_adadelta( var.handle, accum_grad.handle, accum_var.handle, lr_t, self._get_hyper('rho', var_dtype), ops.convert_to_tensor(self.epsilon, var_dtype), grad, use_locking=self._use_locking) def _resource_apply_sparse(self, grad, var, indices): var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr_t[var_dtype] accum_grad = self.get_slot(var, 'accum_grad') accum_var = self.get_slot(var, 'accum_var') return training_ops.resource_sparse_apply_adadelta( var.handle, accum_grad.handle, accum_var.handle, lr_t, self._get_hyper('rho', var_dtype), ops.convert_to_tensor(self.epsilon, var_dtype), grad, indices, use_locking=self._use_locking) def get_config(self): config = super(Adadelta, self).get_config() config.update({ 'learning_rate': self._serialize_hyperparameter('learning_rate'), 'decay': self._serialize_hyperparameter('decay'), 'rho': self._serialize_hyperparameter('rho'), 'epsilon': self.epsilon, }) return config
tensorflow-master
tensorflow/python/keras/optimizer_v2/adadelta.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=invalid-name """Inception-ResNet V2 model for Keras. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from keras_applications import inception_resnet_v2 from tensorflow.python.keras.applications import keras_modules_injection from tensorflow.python.util.tf_export import keras_export @keras_export('keras.applications.inception_resnet_v2.InceptionResNetV2', 'keras.applications.InceptionResNetV2') @keras_modules_injection def InceptionResNetV2(*args, **kwargs): return inception_resnet_v2.InceptionResNetV2(*args, **kwargs) @keras_export('keras.applications.inception_resnet_v2.decode_predictions') @keras_modules_injection def decode_predictions(*args, **kwargs): return inception_resnet_v2.decode_predictions(*args, **kwargs) @keras_export('keras.applications.inception_resnet_v2.preprocess_input') @keras_modules_injection def preprocess_input(*args, **kwargs): return inception_resnet_v2.preprocess_input(*args, **kwargs)
tensorflow-master
tensorflow/python/keras/applications/inception_resnet_v2.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for ImageNet data preprocessing & prediction decoding. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from keras_applications import imagenet_utils from tensorflow.python.keras.applications import keras_modules_injection from tensorflow.python.util.tf_export import keras_export @keras_export('keras.applications.imagenet_utils.decode_predictions') @keras_modules_injection def decode_predictions(*args, **kwargs): return imagenet_utils.decode_predictions(*args, **kwargs) @keras_export('keras.applications.imagenet_utils.preprocess_input') @keras_modules_injection def preprocess_input(*args, **kwargs): return imagenet_utils.preprocess_input(*args, **kwargs)
tensorflow-master
tensorflow/python/keras/applications/imagenet_utils.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=invalid-name """DenseNet models for Keras. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from keras_applications import densenet from tensorflow.python.keras.applications import keras_modules_injection from tensorflow.python.util.tf_export import keras_export @keras_export('keras.applications.densenet.DenseNet121', 'keras.applications.DenseNet121') @keras_modules_injection def DenseNet121(*args, **kwargs): return densenet.DenseNet121(*args, **kwargs) @keras_export('keras.applications.densenet.DenseNet169', 'keras.applications.DenseNet169') @keras_modules_injection def DenseNet169(*args, **kwargs): return densenet.DenseNet169(*args, **kwargs) @keras_export('keras.applications.densenet.DenseNet201', 'keras.applications.DenseNet201') @keras_modules_injection def DenseNet201(*args, **kwargs): return densenet.DenseNet201(*args, **kwargs) @keras_export('keras.applications.densenet.decode_predictions') @keras_modules_injection def decode_predictions(*args, **kwargs): return densenet.decode_predictions(*args, **kwargs) @keras_export('keras.applications.densenet.preprocess_input') @keras_modules_injection def preprocess_input(*args, **kwargs): return densenet.preprocess_input(*args, **kwargs)
tensorflow-master
tensorflow/python/keras/applications/densenet.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=invalid-name """MobileNet v2 models for Keras. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from keras_applications import mobilenet_v2 from tensorflow.python.keras.applications import keras_modules_injection from tensorflow.python.util.tf_export import keras_export @keras_export('keras.applications.mobilenet_v2.MobileNetV2', 'keras.applications.MobileNetV2') @keras_modules_injection def MobileNetV2(*args, **kwargs): return mobilenet_v2.MobileNetV2(*args, **kwargs) @keras_export('keras.applications.mobilenet_v2.decode_predictions') @keras_modules_injection def decode_predictions(*args, **kwargs): return mobilenet_v2.decode_predictions(*args, **kwargs) @keras_export('keras.applications.mobilenet_v2.preprocess_input') @keras_modules_injection def preprocess_input(*args, **kwargs): return mobilenet_v2.preprocess_input(*args, **kwargs)
tensorflow-master
tensorflow/python/keras/applications/mobilenet_v2.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras Applications are canned architectures with pre-trained weights.""" # pylint: disable=g-import-not-at-top # pylint: disable=g-bad-import-order from __future__ import absolute_import from __future__ import division from __future__ import print_function import keras_applications from tensorflow.python.keras import backend from tensorflow.python.keras import engine from tensorflow.python.keras import layers from tensorflow.python.keras import models from tensorflow.python.keras import utils from tensorflow.python.util import tf_inspect def keras_modules_injection(base_fun): """Decorator injecting tf.keras replacements for Keras modules. Arguments: base_fun: Application function to decorate (e.g. `MobileNet`). Returns: Decorated function that injects keyword argument for the tf.keras modules required by the Applications. """ def wrapper(*args, **kwargs): kwargs['backend'] = backend if 'layers' not in kwargs: kwargs['layers'] = layers kwargs['models'] = models kwargs['utils'] = utils return base_fun(*args, **kwargs) return wrapper from tensorflow.python.keras.applications.densenet import DenseNet121 from tensorflow.python.keras.applications.densenet import DenseNet169 from tensorflow.python.keras.applications.densenet import DenseNet201 from tensorflow.python.keras.applications.imagenet_utils import decode_predictions from tensorflow.python.keras.applications.imagenet_utils import preprocess_input from tensorflow.python.keras.applications.inception_resnet_v2 import InceptionResNetV2 from tensorflow.python.keras.applications.inception_v3 import InceptionV3 from tensorflow.python.keras.applications.mobilenet import MobileNet from tensorflow.python.keras.applications.mobilenet_v2 import MobileNetV2 from tensorflow.python.keras.applications.nasnet import NASNetLarge from tensorflow.python.keras.applications.nasnet import NASNetMobile from tensorflow.python.keras.applications.resnet import ResNet50 from tensorflow.python.keras.applications.resnet import ResNet101 from tensorflow.python.keras.applications.resnet import ResNet152 from tensorflow.python.keras.applications.resnet_v2 import ResNet50V2 from tensorflow.python.keras.applications.resnet_v2 import ResNet101V2 from tensorflow.python.keras.applications.resnet_v2 import ResNet152V2 from tensorflow.python.keras.applications.vgg16 import VGG16 from tensorflow.python.keras.applications.vgg19 import VGG19 from tensorflow.python.keras.applications.xception import Xception
tensorflow-master
tensorflow/python/keras/applications/__init__.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=invalid-name """MobileNet v1 models for Keras. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from keras_applications import mobilenet from tensorflow.python.keras.applications import keras_modules_injection from tensorflow.python.util.tf_export import keras_export @keras_export('keras.applications.mobilenet.MobileNet', 'keras.applications.MobileNet') @keras_modules_injection def MobileNet(*args, **kwargs): return mobilenet.MobileNet(*args, **kwargs) @keras_export('keras.applications.mobilenet.decode_predictions') @keras_modules_injection def decode_predictions(*args, **kwargs): return mobilenet.decode_predictions(*args, **kwargs) @keras_export('keras.applications.mobilenet.preprocess_input') @keras_modules_injection def preprocess_input(*args, **kwargs): return mobilenet.preprocess_input(*args, **kwargs)
tensorflow-master
tensorflow/python/keras/applications/mobilenet.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=invalid-name """Inception V3 model for Keras. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from keras_applications import inception_v3 from tensorflow.python.keras.applications import keras_modules_injection from tensorflow.python.util.tf_export import keras_export @keras_export('keras.applications.inception_v3.InceptionV3', 'keras.applications.InceptionV3') @keras_modules_injection def InceptionV3(*args, **kwargs): return inception_v3.InceptionV3(*args, **kwargs) @keras_export('keras.applications.inception_v3.decode_predictions') @keras_modules_injection def decode_predictions(*args, **kwargs): return inception_v3.decode_predictions(*args, **kwargs) @keras_export('keras.applications.inception_v3.preprocess_input') @keras_modules_injection def preprocess_input(*args, **kwargs): return inception_v3.preprocess_input(*args, **kwargs)
tensorflow-master
tensorflow/python/keras/applications/inception_v3.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=invalid-name """ResNet models for Keras. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from keras_applications import resnet from tensorflow.python.keras.applications import keras_modules_injection from tensorflow.python.util.tf_export import keras_export @keras_export('keras.applications.resnet50.ResNet50', 'keras.applications.resnet.ResNet50', 'keras.applications.ResNet50') @keras_modules_injection def ResNet50(*args, **kwargs): return resnet.ResNet50(*args, **kwargs) @keras_export('keras.applications.resnet.ResNet101', 'keras.applications.ResNet101') @keras_modules_injection def ResNet101(*args, **kwargs): return resnet.ResNet101(*args, **kwargs) @keras_export('keras.applications.resnet.ResNet152', 'keras.applications.ResNet152') @keras_modules_injection def ResNet152(*args, **kwargs): return resnet.ResNet152(*args, **kwargs) @keras_export('keras.applications.resnet50.decode_predictions', 'keras.applications.resnet.decode_predictions') @keras_modules_injection def decode_predictions(*args, **kwargs): return resnet.decode_predictions(*args, **kwargs) @keras_export('keras.applications.resnet50.preprocess_input', 'keras.applications.resnet.preprocess_input') @keras_modules_injection def preprocess_input(*args, **kwargs): return resnet.preprocess_input(*args, **kwargs)
tensorflow-master
tensorflow/python/keras/applications/resnet.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=invalid-name """VGG16 model for Keras. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from keras_applications import vgg16 from tensorflow.python.keras.applications import keras_modules_injection from tensorflow.python.util.tf_export import keras_export @keras_export('keras.applications.vgg16.VGG16', 'keras.applications.VGG16') @keras_modules_injection def VGG16(*args, **kwargs): return vgg16.VGG16(*args, **kwargs) @keras_export('keras.applications.vgg16.decode_predictions') @keras_modules_injection def decode_predictions(*args, **kwargs): return vgg16.decode_predictions(*args, **kwargs) @keras_export('keras.applications.vgg16.preprocess_input') @keras_modules_injection def preprocess_input(*args, **kwargs): return vgg16.preprocess_input(*args, **kwargs)
tensorflow-master
tensorflow/python/keras/applications/vgg16.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Integration tests for Keras applications.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.keras import applications from tensorflow.python.platform import test MODEL_LIST = [ (applications.ResNet50, 2048), (applications.ResNet101, 2048), (applications.ResNet152, 2048), (applications.ResNet50V2, 2048), (applications.ResNet101V2, 2048), (applications.ResNet152V2, 2048), (applications.VGG16, 512), (applications.VGG19, 512), (applications.Xception, 2048), (applications.InceptionV3, 2048), (applications.InceptionResNetV2, 1536), (applications.MobileNet, 1024), (applications.MobileNetV2, 1280), (applications.DenseNet121, 1024), (applications.DenseNet169, 1664), (applications.DenseNet201, 1920), (applications.NASNetMobile, 1056), ] class ApplicationsTest(test.TestCase, parameterized.TestCase): @parameterized.parameters(*MODEL_LIST) def test_feature_extration_model(self, model_fn, output_dim): model = model_fn(include_top=False, weights=None) self.assertLen(model.output_shape, 4) self.assertEqual(model.output_shape[-1], output_dim) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/keras/applications/applications_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=invalid-name """VGG19 model for Keras. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from keras_applications import vgg19 from tensorflow.python.keras.applications import keras_modules_injection from tensorflow.python.util.tf_export import keras_export @keras_export('keras.applications.vgg19.VGG19', 'keras.applications.VGG19') @keras_modules_injection def VGG19(*args, **kwargs): return vgg19.VGG19(*args, **kwargs) @keras_export('keras.applications.vgg19.decode_predictions') @keras_modules_injection def decode_predictions(*args, **kwargs): return vgg19.decode_predictions(*args, **kwargs) @keras_export('keras.applications.vgg19.preprocess_input') @keras_modules_injection def preprocess_input(*args, **kwargs): return vgg19.preprocess_input(*args, **kwargs)
tensorflow-master
tensorflow/python/keras/applications/vgg19.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=invalid-name """Xception V1 model for Keras. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from keras_applications import xception from tensorflow.python.keras.applications import keras_modules_injection from tensorflow.python.util.tf_export import keras_export @keras_export('keras.applications.xception.Xception', 'keras.applications.Xception') @keras_modules_injection def Xception(*args, **kwargs): return xception.Xception(*args, **kwargs) @keras_export('keras.applications.xception.decode_predictions') @keras_modules_injection def decode_predictions(*args, **kwargs): return xception.decode_predictions(*args, **kwargs) @keras_export('keras.applications.xception.preprocess_input') @keras_modules_injection def preprocess_input(*args, **kwargs): return xception.preprocess_input(*args, **kwargs)
tensorflow-master
tensorflow/python/keras/applications/xception.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=invalid-name """NASNet-A models for Keras. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from keras_applications import nasnet from tensorflow.python.keras.applications import keras_modules_injection from tensorflow.python.util.tf_export import keras_export @keras_export('keras.applications.nasnet.NASNetMobile', 'keras.applications.NASNetMobile') @keras_modules_injection def NASNetMobile(*args, **kwargs): return nasnet.NASNetMobile(*args, **kwargs) @keras_export('keras.applications.nasnet.NASNetLarge', 'keras.applications.NASNetLarge') @keras_modules_injection def NASNetLarge(*args, **kwargs): return nasnet.NASNetLarge(*args, **kwargs) @keras_export('keras.applications.nasnet.decode_predictions') @keras_modules_injection def decode_predictions(*args, **kwargs): return nasnet.decode_predictions(*args, **kwargs) @keras_export('keras.applications.nasnet.preprocess_input') @keras_modules_injection def preprocess_input(*args, **kwargs): return nasnet.preprocess_input(*args, **kwargs)
tensorflow-master
tensorflow/python/keras/applications/nasnet.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=invalid-name """ResNet v2 models for Keras. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from keras_applications import resnet_v2 from tensorflow.python.keras.applications import keras_modules_injection from tensorflow.python.util.tf_export import keras_export @keras_export('keras.applications.resnet_v2.ResNet50V2', 'keras.applications.ResNet50V2') @keras_modules_injection def ResNet50V2(*args, **kwargs): return resnet_v2.ResNet50V2(*args, **kwargs) @keras_export('keras.applications.resnet_v2.ResNet101V2', 'keras.applications.ResNet101V2') @keras_modules_injection def ResNet101V2(*args, **kwargs): return resnet_v2.ResNet101V2(*args, **kwargs) @keras_export('keras.applications.resnet_v2.ResNet152V2', 'keras.applications.ResNet152V2') @keras_modules_injection def ResNet152V2(*args, **kwargs): return resnet_v2.ResNet152V2(*args, **kwargs) @keras_export('keras.applications.resnet_v2.decode_predictions') @keras_modules_injection def decode_predictions(*args, **kwargs): return resnet_v2.decode_predictions(*args, **kwargs) @keras_export('keras.applications.resnet_v2.preprocess_input') @keras_modules_injection def preprocess_input(*args, **kwargs): return resnet_v2.preprocess_input(*args, **kwargs)
tensorflow-master
tensorflow/python/keras/applications/resnet_v2.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for sequence data preprocessing utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from math import ceil import numpy as np from tensorflow.python import keras from tensorflow.python.platform import test class TestSequence(test.TestCase): def test_pad_sequences(self): a = [[1], [1, 2], [1, 2, 3]] # test padding b = keras.preprocessing.sequence.pad_sequences(a, maxlen=3, padding='pre') self.assertAllClose(b, [[0, 0, 1], [0, 1, 2], [1, 2, 3]]) b = keras.preprocessing.sequence.pad_sequences(a, maxlen=3, padding='post') self.assertAllClose(b, [[1, 0, 0], [1, 2, 0], [1, 2, 3]]) # test truncating b = keras.preprocessing.sequence.pad_sequences( a, maxlen=2, truncating='pre') self.assertAllClose(b, [[0, 1], [1, 2], [2, 3]]) b = keras.preprocessing.sequence.pad_sequences( a, maxlen=2, truncating='post') self.assertAllClose(b, [[0, 1], [1, 2], [1, 2]]) # test value b = keras.preprocessing.sequence.pad_sequences(a, maxlen=3, value=1) self.assertAllClose(b, [[1, 1, 1], [1, 1, 2], [1, 2, 3]]) def test_pad_sequences_vector(self): a = [[[1, 1]], [[2, 1], [2, 2]], [[3, 1], [3, 2], [3, 3]]] # test padding b = keras.preprocessing.sequence.pad_sequences(a, maxlen=3, padding='pre') self.assertAllClose(b, [[[0, 0], [0, 0], [1, 1]], [[0, 0], [2, 1], [2, 2]], [[3, 1], [3, 2], [3, 3]]]) b = keras.preprocessing.sequence.pad_sequences(a, maxlen=3, padding='post') self.assertAllClose(b, [[[1, 1], [0, 0], [0, 0]], [[2, 1], [2, 2], [0, 0]], [[3, 1], [3, 2], [3, 3]]]) # test truncating b = keras.preprocessing.sequence.pad_sequences( a, maxlen=2, truncating='pre') self.assertAllClose(b, [[[0, 0], [1, 1]], [[2, 1], [2, 2]], [[3, 2], [3, 3]]]) b = keras.preprocessing.sequence.pad_sequences( a, maxlen=2, truncating='post') self.assertAllClose(b, [[[0, 0], [1, 1]], [[2, 1], [2, 2]], [[3, 1], [3, 2]]]) # test value b = keras.preprocessing.sequence.pad_sequences(a, maxlen=3, value=1) self.assertAllClose(b, [[[1, 1], [1, 1], [1, 1]], [[1, 1], [2, 1], [2, 2]], [[3, 1], [3, 2], [3, 3]]]) def test_make_sampling_table(self): a = keras.preprocessing.sequence.make_sampling_table(3) self.assertAllClose( a, np.asarray([0.00315225, 0.00315225, 0.00547597]), rtol=.1) def test_skipgrams(self): # test with no window size and binary labels couples, labels = keras.preprocessing.sequence.skipgrams( np.arange(3), vocabulary_size=3) for couple in couples: self.assertIn(couple[0], [0, 1, 2]) self.assertIn(couple[1], [0, 1, 2]) # test window size and categorical labels couples, labels = keras.preprocessing.sequence.skipgrams( np.arange(5), vocabulary_size=5, window_size=1, categorical=True) for couple in couples: self.assertLessEqual(couple[0] - couple[1], 3) for l in labels: self.assertEqual(len(l), 2) def test_remove_long_seq(self): a = [[[1, 1]], [[2, 1], [2, 2]], [[3, 1], [3, 2], [3, 3]]] new_seq, new_label = keras.preprocessing.sequence._remove_long_seq( maxlen=3, seq=a, label=['a', 'b', ['c', 'd']]) self.assertEqual(new_seq, [[[1, 1]], [[2, 1], [2, 2]]]) self.assertEqual(new_label, ['a', 'b']) def test_TimeseriesGenerator(self): data = np.array([[i] for i in range(50)]) targets = np.array([[i] for i in range(50)]) data_gen = keras.preprocessing.sequence.TimeseriesGenerator( data, targets, length=10, sampling_rate=2, batch_size=2) self.assertEqual(len(data_gen), 20) self.assertAllClose(data_gen[0][0], np.array([[[0], [2], [4], [6], [8]], [[1], [3], [5], [7], [9]]])) self.assertAllClose(data_gen[0][1], np.array([[10], [11]])) self.assertAllClose(data_gen[1][0], np.array([[[2], [4], [6], [8], [10]], [[3], [5], [7], [9], [11]]])) self.assertAllClose(data_gen[1][1], np.array([[12], [13]])) data_gen = keras.preprocessing.sequence.TimeseriesGenerator( data, targets, length=10, sampling_rate=2, reverse=True, batch_size=2) self.assertEqual(len(data_gen), 20) self.assertAllClose(data_gen[0][0], np.array([[[8], [6], [4], [2], [0]], [[9], [7], [5], [3], [1]]])) self.assertAllClose(data_gen[0][1], np.array([[10], [11]])) data_gen = keras.preprocessing.sequence.TimeseriesGenerator( data, targets, length=10, sampling_rate=2, shuffle=True, batch_size=1) batch = data_gen[0] r = batch[1][0][0] self.assertAllClose(batch[0], np.array([[[r - 10], [r - 8], [r - 6], [r - 4], [r - 2]]])) self.assertAllClose(batch[1], np.array([ [r], ])) data_gen = keras.preprocessing.sequence.TimeseriesGenerator( data, targets, length=10, sampling_rate=2, stride=2, batch_size=2) self.assertEqual(len(data_gen), 10) self.assertAllClose(data_gen[1][0], np.array([[[4], [6], [8], [10], [12]], [[6], [8], [10], [12], [14]]])) self.assertAllClose(data_gen[1][1], np.array([[14], [16]])) data_gen = keras.preprocessing.sequence.TimeseriesGenerator( data, targets, length=10, sampling_rate=2, start_index=10, end_index=30, batch_size=2) self.assertEqual(len(data_gen), 6) self.assertAllClose(data_gen[0][0], np.array([[[10], [12], [14], [16], [18]], [[11], [13], [15], [17], [19]]])) self.assertAllClose(data_gen[0][1], np.array([[20], [21]])) data = np.array([np.random.random_sample((1, 2, 3, 4)) for i in range(50)]) targets = np.array([np.random.random_sample((3, 2, 1)) for i in range(50)]) data_gen = keras.preprocessing.sequence.TimeseriesGenerator( data, targets, length=10, sampling_rate=2, start_index=10, end_index=30, batch_size=2) self.assertEqual(len(data_gen), 6) self.assertAllClose(data_gen[0][0], np.array( [np.array(data[10:19:2]), np.array(data[11:20:2])])) self.assertAllClose(data_gen[0][1], np.array([targets[20], targets[21]])) with self.assertRaises(ValueError) as context: keras.preprocessing.sequence.TimeseriesGenerator(data, targets, length=50) error = str(context.exception) self.assertIn('`start_index+length=50 > end_index=49` is disallowed', error) def test_TimeSeriesGenerator_doesnt_miss_any_sample(self): x = np.array([[i] for i in range(10)]) for length in range(3, 10): g = keras.preprocessing.sequence.TimeseriesGenerator( x, x, length=length, batch_size=1) expected = max(0, len(x) - length) actual = len(g) self.assertEqual(expected, actual) if actual > 0: # All elements in range(length, 10) should be used as current step expected = np.arange(length, 10).reshape(-1, 1) y = np.concatenate([g[ix][1] for ix in range(len(g))], axis=0) self.assertAllClose(y, expected) x = np.array([[i] for i in range(23)]) strides = (1, 1, 5, 7, 3, 5, 3) lengths = (3, 3, 4, 3, 1, 3, 7) batch_sizes = (6, 6, 6, 5, 6, 6, 6) shuffles = (False, True, True, False, False, False, False) for stride, length, batch_size, shuffle in zip(strides, lengths, batch_sizes, shuffles): g = keras.preprocessing.sequence.TimeseriesGenerator( x, x, length=length, sampling_rate=1, stride=stride, start_index=0, end_index=None, shuffle=shuffle, reverse=False, batch_size=batch_size) if shuffle: # all batches have the same size when shuffle is True. expected_sequences = ceil( (23 - length) / float(batch_size * stride)) * batch_size else: # last batch will be different if `(samples - length) / stride` # is not a multiple of `batch_size`. expected_sequences = ceil((23 - length) / float(stride)) expected_batches = ceil(expected_sequences / float(batch_size)) y = [g[ix][1] for ix in range(len(g))] actual_sequences = sum(len(iy) for iy in y) actual_batches = len(y) self.assertEqual(expected_sequences, actual_sequences) self.assertEqual(expected_batches, actual_batches) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/keras/preprocessing/sequence_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for preprocessing sequence data. """ # pylint: disable=invalid-name from __future__ import absolute_import from __future__ import division from __future__ import print_function from keras_preprocessing import sequence from tensorflow.python.keras import utils from tensorflow.python.util.tf_export import keras_export pad_sequences = sequence.pad_sequences make_sampling_table = sequence.make_sampling_table skipgrams = sequence.skipgrams # TODO(fchollet): consider making `_remove_long_seq` public. _remove_long_seq = sequence._remove_long_seq # pylint: disable=protected-access @keras_export('keras.preprocessing.sequence.TimeseriesGenerator') class TimeseriesGenerator(sequence.TimeseriesGenerator, utils.Sequence): """Utility class for generating batches of temporal data. This class takes in a sequence of data-points gathered at equal intervals, along with time series parameters such as stride, length of history, etc., to produce batches for training/validation. # Arguments data: Indexable generator (such as list or Numpy array) containing consecutive data points (timesteps). The data should be at 2D, and axis 0 is expected to be the time dimension. targets: Targets corresponding to timesteps in `data`. It should have same length as `data`. length: Length of the output sequences (in number of timesteps). sampling_rate: Period between successive individual timesteps within sequences. For rate `r`, timesteps `data[i]`, `data[i-r]`, ... `data[i - length]` are used for create a sample sequence. stride: Period between successive output sequences. For stride `s`, consecutive output samples would be centered around `data[i]`, `data[i+s]`, `data[i+2*s]`, etc. start_index: Data points earlier than `start_index` will not be used in the output sequences. This is useful to reserve part of the data for test or validation. end_index: Data points later than `end_index` will not be used in the output sequences. This is useful to reserve part of the data for test or validation. shuffle: Whether to shuffle output samples, or instead draw them in chronological order. reverse: Boolean: if `true`, timesteps in each output sample will be in reverse chronological order. batch_size: Number of timeseries samples in each batch (except maybe the last one). # Returns A [Sequence](/utils/#sequence) instance. # Examples ```python from keras.preprocessing.sequence import TimeseriesGenerator import numpy as np data = np.array([[i] for i in range(50)]) targets = np.array([[i] for i in range(50)]) data_gen = TimeseriesGenerator(data, targets, length=10, sampling_rate=2, batch_size=2) assert len(data_gen) == 20 batch_0 = data_gen[0] x, y = batch_0 assert np.array_equal(x, np.array([[[0], [2], [4], [6], [8]], [[1], [3], [5], [7], [9]]])) assert np.array_equal(y, np.array([[10], [11]])) ``` """ pass keras_export('keras.preprocessing.sequence.pad_sequences')(pad_sequences) keras_export( 'keras.preprocessing.sequence.make_sampling_table')(make_sampling_table) keras_export('keras.preprocessing.sequence.skipgrams')(skipgrams)
tensorflow-master
tensorflow/python/keras/preprocessing/sequence.py
# -*- coding: utf-8 -*- # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for text data preprocessing utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python import keras from tensorflow.python.platform import test class TestText(test.TestCase): def test_one_hot(self): text = 'The cat sat on the mat.' encoded = keras.preprocessing.text.one_hot(text, 5) self.assertEqual(len(encoded), 6) self.assertLessEqual(np.max(encoded), 4) self.assertGreaterEqual(np.min(encoded), 0) # Test on unicode. text = u'The cat sat on the mat.' encoded = keras.preprocessing.text.one_hot(text, 5) self.assertEqual(len(encoded), 6) self.assertLessEqual(np.max(encoded), 4) self.assertGreaterEqual(np.min(encoded), 0) def test_tokenizer(self): texts = [ 'The cat sat on the mat.', 'The dog sat on the log.', 'Dogs and cats living together.' ] tokenizer = keras.preprocessing.text.Tokenizer(num_words=10) tokenizer.fit_on_texts(texts) sequences = [] for seq in tokenizer.texts_to_sequences_generator(texts): sequences.append(seq) self.assertLess(np.max(np.max(sequences)), 10) self.assertEqual(np.min(np.min(sequences)), 1) tokenizer.fit_on_sequences(sequences) for mode in ['binary', 'count', 'tfidf', 'freq']: matrix = tokenizer.texts_to_matrix(texts, mode) self.assertEqual(matrix.shape, (3, 10)) def test_hashing_trick_hash(self): text = 'The cat sat on the mat.' encoded = keras.preprocessing.text.hashing_trick(text, 5) self.assertEqual(len(encoded), 6) self.assertLessEqual(np.max(encoded), 4) self.assertGreaterEqual(np.min(encoded), 1) def test_hashing_trick_md5(self): text = 'The cat sat on the mat.' encoded = keras.preprocessing.text.hashing_trick( text, 5, hash_function='md5') self.assertEqual(len(encoded), 6) self.assertLessEqual(np.max(encoded), 4) self.assertGreaterEqual(np.min(encoded), 1) def test_tokenizer_oov_flag(self): x_train = ['This text has only known words'] x_test = ['This text has some unknown words'] # 2 OOVs: some, unknown # Default, without OOV flag tokenizer = keras.preprocessing.text.Tokenizer() tokenizer.fit_on_texts(x_train) x_test_seq = tokenizer.texts_to_sequences(x_test) self.assertEqual(len(x_test_seq[0]), 4) # discards 2 OOVs # With OOV feature tokenizer = keras.preprocessing.text.Tokenizer(oov_token='<unk>') tokenizer.fit_on_texts(x_train) x_test_seq = tokenizer.texts_to_sequences(x_test) self.assertEqual(len(x_test_seq[0]), 6) # OOVs marked in place def test_sequential_fit(self): texts = [ 'The cat sat on the mat.', 'The dog sat on the log.', 'Dogs and cats living together.' ] word_sequences = [['The', 'cat', 'is', 'sitting'], ['The', 'dog', 'is', 'standing']] tokenizer = keras.preprocessing.text.Tokenizer() tokenizer.fit_on_texts(texts) tokenizer.fit_on_texts(word_sequences) self.assertEqual(tokenizer.document_count, 5) tokenizer.texts_to_matrix(texts) tokenizer.texts_to_matrix(word_sequences) def test_text_to_word_sequence(self): text = 'hello! ? world!' seq = keras.preprocessing.text.text_to_word_sequence(text) self.assertEqual(seq, ['hello', 'world']) def test_text_to_word_sequence_multichar_split(self): text = 'hello!stop?world!' seq = keras.preprocessing.text.text_to_word_sequence(text, split='stop') self.assertEqual(seq, ['hello', 'world']) def test_text_to_word_sequence_unicode(self): text = u'ali! veli? kırk dokuz elli' seq = keras.preprocessing.text.text_to_word_sequence(text) self.assertEqual(seq, [u'ali', u'veli', u'kırk', u'dokuz', u'elli']) def test_text_to_word_sequence_unicode_multichar_split(self): text = u'ali!stopveli?stopkırkstopdokuzstopelli' seq = keras.preprocessing.text.text_to_word_sequence(text, split='stop') self.assertEqual(seq, [u'ali', u'veli', u'kırk', u'dokuz', u'elli']) def test_tokenizer_unicode(self): texts = [ u'ali veli kırk dokuz elli', u'ali veli kırk dokuz elli veli kırk dokuz' ] tokenizer = keras.preprocessing.text.Tokenizer(num_words=5) tokenizer.fit_on_texts(texts) self.assertEqual(len(tokenizer.word_counts), 5) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/keras/preprocessing/text_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras data preprocessing utils.""" # pylint: disable=g-import-not-at-top from __future__ import absolute_import from __future__ import division from __future__ import print_function import keras_preprocessing from tensorflow.python.keras import backend from tensorflow.python.keras import utils # This exists for compatibility with prior version of keras_preprocessing. # TODO(fchollet): remove in the future. keras_preprocessing.set_keras_submodules(backend=backend, utils=utils) from tensorflow.python.keras.preprocessing import image from tensorflow.python.keras.preprocessing import sequence from tensorflow.python.keras.preprocessing import text del absolute_import del division del print_function
tensorflow-master
tensorflow/python/keras/preprocessing/__init__.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for image preprocessing utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import shutil import tempfile import numpy as np from tensorflow.python import keras from tensorflow.python.platform import test try: import PIL # pylint:disable=g-import-not-at-top except ImportError: PIL = None def _generate_test_images(): img_w = img_h = 20 rgb_images = [] gray_images = [] for _ in range(8): bias = np.random.rand(img_w, img_h, 1) * 64 variance = np.random.rand(img_w, img_h, 1) * (255 - 64) imarray = np.random.rand(img_w, img_h, 3) * variance + bias im = keras.preprocessing.image.array_to_img(imarray, scale=False) rgb_images.append(im) imarray = np.random.rand(img_w, img_h, 1) * variance + bias im = keras.preprocessing.image.array_to_img(imarray, scale=False) gray_images.append(im) return [rgb_images, gray_images] class TestImage(test.TestCase): def test_image_data_generator(self): if PIL is None: return # Skip test if PIL is not available. for test_images in _generate_test_images(): img_list = [] for im in test_images: img_list.append(keras.preprocessing.image.img_to_array(im)[None, ...]) images = np.vstack(img_list) generator = keras.preprocessing.image.ImageDataGenerator( featurewise_center=True, samplewise_center=True, featurewise_std_normalization=True, samplewise_std_normalization=True, zca_whitening=True, rotation_range=90., width_shift_range=0.1, height_shift_range=0.1, shear_range=0.5, zoom_range=0.2, channel_shift_range=0., brightness_range=(1, 5), fill_mode='nearest', cval=0.5, horizontal_flip=True, vertical_flip=True) # Basic test before fit x = np.random.random((32, 10, 10, 3)) generator.flow(x) # Fit generator.fit(images, augment=True) for x, _ in generator.flow( images, np.arange(images.shape[0]), shuffle=True): self.assertEqual(x.shape[1:], images.shape[1:]) break def test_image_data_generator_with_split_value_error(self): with self.assertRaises(ValueError): keras.preprocessing.image.ImageDataGenerator(validation_split=5) def test_image_data_generator_invalid_data(self): generator = keras.preprocessing.image.ImageDataGenerator( featurewise_center=True, samplewise_center=True, featurewise_std_normalization=True, samplewise_std_normalization=True, zca_whitening=True, data_format='channels_last') # Test fit with invalid data with self.assertRaises(ValueError): x = np.random.random((3, 10, 10)) generator.fit(x) # Test flow with invalid data with self.assertRaises(ValueError): generator.flow(np.arange(5)) # Invalid number of channels: will work but raise a warning x = np.random.random((32, 10, 10, 5)) generator.flow(x) with self.assertRaises(ValueError): generator = keras.preprocessing.image.ImageDataGenerator( data_format='unknown') generator = keras.preprocessing.image.ImageDataGenerator( zoom_range=(2, 2)) def test_image_data_generator_fit(self): generator = keras.preprocessing.image.ImageDataGenerator( featurewise_center=True, samplewise_center=True, featurewise_std_normalization=True, samplewise_std_normalization=True, zca_whitening=True, data_format='channels_last') # Test grayscale x = np.random.random((32, 10, 10, 1)) generator.fit(x) # Test RBG x = np.random.random((32, 10, 10, 3)) generator.fit(x) generator = keras.preprocessing.image.ImageDataGenerator( featurewise_center=True, samplewise_center=True, featurewise_std_normalization=True, samplewise_std_normalization=True, zca_whitening=True, data_format='channels_first') # Test grayscale x = np.random.random((32, 1, 10, 10)) generator.fit(x) # Test RBG x = np.random.random((32, 3, 10, 10)) generator.fit(x) def test_directory_iterator(self): if PIL is None: return # Skip test if PIL is not available. num_classes = 2 temp_dir = self.get_temp_dir() self.addCleanup(shutil.rmtree, temp_dir) # create folders and subfolders paths = [] for cl in range(num_classes): class_directory = 'class-{}'.format(cl) classpaths = [ class_directory, os.path.join(class_directory, 'subfolder-1'), os.path.join(class_directory, 'subfolder-2'), os.path.join( class_directory, 'subfolder-1', 'sub-subfolder') ] for path in classpaths: os.mkdir(os.path.join(temp_dir, path)) paths.append(classpaths) # save the images in the paths count = 0 filenames = [] for test_images in _generate_test_images(): for im in test_images: # rotate image class im_class = count % num_classes # rotate subfolders classpaths = paths[im_class] filename = os.path.join(classpaths[count % len(classpaths)], 'image-{}.jpg'.format(count)) filenames.append(filename) im.save(os.path.join(temp_dir, filename)) count += 1 # Test image loading util fname = os.path.join(temp_dir, filenames[0]) _ = keras.preprocessing.image.load_img(fname) _ = keras.preprocessing.image.load_img(fname, grayscale=True) _ = keras.preprocessing.image.load_img(fname, target_size=(10, 10)) _ = keras.preprocessing.image.load_img(fname, target_size=(10, 10), interpolation='bilinear') # create iterator generator = keras.preprocessing.image.ImageDataGenerator() dir_iterator = generator.flow_from_directory(temp_dir) # check number of classes and images self.assertEqual(len(dir_iterator.class_indices), num_classes) self.assertEqual(len(dir_iterator.classes), count) self.assertEqual(set(dir_iterator.filenames), set(filenames)) def preprocessing_function(x): """This will fail if not provided by a Numpy array. Note: This is made to enforce backward compatibility. Args: x: A numpy array. Returns: An array of zeros with the same shape as the given array. """ self.assertEqual(x.shape, (26, 26, 3)) self.assertIs(type(x), np.ndarray) return np.zeros_like(x) # Test usage as Sequence generator = keras.preprocessing.image.ImageDataGenerator( preprocessing_function=preprocessing_function) dir_seq = generator.flow_from_directory( str(temp_dir), target_size=(26, 26), color_mode='rgb', batch_size=3, class_mode='categorical') self.assertEqual(len(dir_seq), count // 3 + 1) x1, y1 = dir_seq[1] self.assertEqual(x1.shape, (3, 26, 26, 3)) self.assertEqual(y1.shape, (3, num_classes)) x1, y1 = dir_seq[5] self.assertTrue((x1 == 0).all()) def directory_iterator_with_validation_split_test_helper( self, validation_split): if PIL is None: return # Skip test if PIL is not available. num_classes = 2 tmp_folder = tempfile.mkdtemp(prefix='test_images') # create folders and subfolders paths = [] for cl in range(num_classes): class_directory = 'class-{}'.format(cl) classpaths = [ class_directory, os.path.join(class_directory, 'subfolder-1'), os.path.join(class_directory, 'subfolder-2'), os.path.join(class_directory, 'subfolder-1', 'sub-subfolder') ] for path in classpaths: os.mkdir(os.path.join(tmp_folder, path)) paths.append(classpaths) # save the images in the paths count = 0 filenames = [] for test_images in _generate_test_images(): for im in test_images: # rotate image class im_class = count % num_classes # rotate subfolders classpaths = paths[im_class] filename = os.path.join(classpaths[count % len(classpaths)], 'image-{}.jpg'.format(count)) filenames.append(filename) im.save(os.path.join(tmp_folder, filename)) count += 1 # create iterator generator = keras.preprocessing.image.ImageDataGenerator( validation_split=validation_split) with self.assertRaises(ValueError): generator.flow_from_directory(tmp_folder, subset='foo') num_validation = int(count * validation_split) num_training = count - num_validation train_iterator = generator.flow_from_directory( tmp_folder, subset='training') self.assertEqual(train_iterator.samples, num_training) valid_iterator = generator.flow_from_directory( tmp_folder, subset='validation') self.assertEqual(valid_iterator.samples, num_validation) # check number of classes and images self.assertEqual(len(train_iterator.class_indices), num_classes) self.assertEqual(len(train_iterator.classes), num_training) self.assertEqual( len(set(train_iterator.filenames) & set(filenames)), num_training) shutil.rmtree(tmp_folder) def test_directory_iterator_with_validation_split_25_percent(self): self.directory_iterator_with_validation_split_test_helper(0.25) def test_directory_iterator_with_validation_split_40_percent(self): self.directory_iterator_with_validation_split_test_helper(0.40) def test_directory_iterator_with_validation_split_50_percent(self): self.directory_iterator_with_validation_split_test_helper(0.50) def test_img_utils(self): if PIL is None: return # Skip test if PIL is not available. height, width = 10, 8 # Test channels_first data format x = np.random.random((3, height, width)) img = keras.preprocessing.image.array_to_img( x, data_format='channels_first') self.assertEqual(img.size, (width, height)) x = keras.preprocessing.image.img_to_array( img, data_format='channels_first') self.assertEqual(x.shape, (3, height, width)) # Test 2D x = np.random.random((1, height, width)) img = keras.preprocessing.image.array_to_img( x, data_format='channels_first') self.assertEqual(img.size, (width, height)) x = keras.preprocessing.image.img_to_array( img, data_format='channels_first') self.assertEqual(x.shape, (1, height, width)) # Test channels_last data format x = np.random.random((height, width, 3)) img = keras.preprocessing.image.array_to_img(x, data_format='channels_last') self.assertEqual(img.size, (width, height)) x = keras.preprocessing.image.img_to_array(img, data_format='channels_last') self.assertEqual(x.shape, (height, width, 3)) # Test 2D x = np.random.random((height, width, 1)) img = keras.preprocessing.image.array_to_img(x, data_format='channels_last') self.assertEqual(img.size, (width, height)) x = keras.preprocessing.image.img_to_array(img, data_format='channels_last') self.assertEqual(x.shape, (height, width, 1)) def test_batch_standardize(self): if PIL is None: return # Skip test if PIL is not available. # ImageDataGenerator.standardize should work on batches for test_images in _generate_test_images(): img_list = [] for im in test_images: img_list.append(keras.preprocessing.image.img_to_array(im)[None, ...]) images = np.vstack(img_list) generator = keras.preprocessing.image.ImageDataGenerator( featurewise_center=True, samplewise_center=True, featurewise_std_normalization=True, samplewise_std_normalization=True, zca_whitening=True, rotation_range=90., width_shift_range=0.1, height_shift_range=0.1, shear_range=0.5, zoom_range=0.2, channel_shift_range=0., brightness_range=(1, 5), fill_mode='nearest', cval=0.5, horizontal_flip=True, vertical_flip=True) generator.fit(images, augment=True) transformed = np.copy(images) for i, im in enumerate(transformed): transformed[i] = generator.random_transform(im) transformed = generator.standardize(transformed) def test_img_transforms(self): x = np.random.random((3, 200, 200)) _ = keras.preprocessing.image.random_rotation(x, 20) _ = keras.preprocessing.image.random_shift(x, 0.2, 0.2) _ = keras.preprocessing.image.random_shear(x, 2.) _ = keras.preprocessing.image.random_zoom(x, (0.5, 0.5)) _ = keras.preprocessing.image.apply_channel_shift(x, 2, 2) _ = keras.preprocessing.image.apply_affine_transform(x, 2) with self.assertRaises(ValueError): keras.preprocessing.image.random_zoom(x, (0, 0, 0)) _ = keras.preprocessing.image.random_channel_shift(x, 2.) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/keras/preprocessing/image_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for text input preprocessing. """ # pylint: disable=invalid-name from __future__ import absolute_import from __future__ import division from __future__ import print_function from keras_preprocessing import text from tensorflow.python.util.tf_export import keras_export text_to_word_sequence = text.text_to_word_sequence one_hot = text.one_hot hashing_trick = text.hashing_trick Tokenizer = text.Tokenizer keras_export( 'keras.preprocessing.text.text_to_word_sequence')(text_to_word_sequence) keras_export('keras.preprocessing.text.one_hot')(one_hot) keras_export('keras.preprocessing.text.hashing_trick')(hashing_trick) keras_export('keras.preprocessing.text.Tokenizer')(Tokenizer)
tensorflow-master
tensorflow/python/keras/preprocessing/text.py