python_code
stringlengths
0
679k
repo_name
stringlengths
9
41
file_path
stringlengths
6
149
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for binary data file utilities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib import multiprocessing # pylint: disable=wrong-import-order import numpy as np import pandas as pd import tensorflow as tf # pylint: enable=wrong-import-order from official.utils.data import file_io from official.utils.misc import keras_utils _RAW_ROW = "raw_row" _DUMMY_COL = "column_0" _DUMMY_VEC_COL = "column_1" _DUMMY_VEC_LEN = 4 _ROWS_PER_CORE = 4 _TEST_CASES = [ # One batch of one dict(row_count=1, cpu_count=1, expected=[ [[0]] ]), dict(row_count=10, cpu_count=1, expected=[ [[0, 1, 2, 3]], [[4, 5, 6, 7]], [[8, 9]] ]), dict(row_count=21, cpu_count=1, expected=[ [[0, 1, 2, 3]], [[4, 5, 6, 7]], [[8, 9, 10, 11]], [[12, 13, 14, 15]], [[16, 17, 18, 19]], [[20]] ]), dict(row_count=1, cpu_count=4, expected=[ [[0]] ]), dict(row_count=10, cpu_count=4, expected=[ [[0, 1], [2, 3, 4], [5, 6], [7, 8, 9]] ]), dict(row_count=21, cpu_count=4, expected=[ [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]], [[16], [17], [18], [19, 20]] ]), dict(row_count=10, cpu_count=8, expected=[ [[0], [1], [2], [3, 4], [5], [6], [7], [8, 9]] ]), dict(row_count=40, cpu_count=8, expected=[ [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23], [24, 25, 26, 27], [28, 29, 30, 31]], [[32], [33], [34], [35], [36], [37], [38], [39]] ]), ] _FEATURE_MAP = { _RAW_ROW: tf.io.FixedLenFeature([1], dtype=tf.int64), _DUMMY_COL: tf.io.FixedLenFeature([1], dtype=tf.int64), _DUMMY_VEC_COL: tf.io.FixedLenFeature([_DUMMY_VEC_LEN], dtype=tf.float32) } @contextlib.contextmanager def fixed_core_count(cpu_count): """Override CPU count. file_io.py uses the cpu_count function to scale to the size of the instance. However, this is not desirable for testing because it can make the test flaky. Instead, this context manager fixes the count for more robust testing. Args: cpu_count: How many cores multiprocessing claims to have. Yields: Nothing. (for context manager only) """ old_count_fn = multiprocessing.cpu_count multiprocessing.cpu_count = lambda: cpu_count yield multiprocessing.cpu_count = old_count_fn class BaseTest(tf.test.TestCase): def setUp(self): super(BaseTest, self).setUp() if keras_utils.is_v2_0: tf.compat.v1.disable_eager_execution() def _test_sharding(self, row_count, cpu_count, expected): df = pd.DataFrame({_DUMMY_COL: list(range(row_count))}) with fixed_core_count(cpu_count): shards = list(file_io.iter_shard_dataframe(df, _ROWS_PER_CORE)) result = [[j[_DUMMY_COL].tolist() for j in i] for i in shards] self.assertAllEqual(expected, result) def test_tiny_rows_low_core(self): self._test_sharding(**_TEST_CASES[0]) def test_small_rows_low_core(self): self._test_sharding(**_TEST_CASES[1]) def test_large_rows_low_core(self): self._test_sharding(**_TEST_CASES[2]) def test_tiny_rows_medium_core(self): self._test_sharding(**_TEST_CASES[3]) def test_small_rows_medium_core(self): self._test_sharding(**_TEST_CASES[4]) def test_large_rows_medium_core(self): self._test_sharding(**_TEST_CASES[5]) def test_small_rows_large_core(self): self._test_sharding(**_TEST_CASES[6]) def test_large_rows_large_core(self): self._test_sharding(**_TEST_CASES[7]) def _serialize_deserialize(self, num_cores=1, num_rows=20): np.random.seed(1) df = pd.DataFrame({ # Serialization order is only deterministic for num_cores=1. raw_row is # used in validation after the deserialization. _RAW_ROW: np.array(range(num_rows), dtype=np.int64), _DUMMY_COL: np.random.randint(0, 35, size=(num_rows,)), _DUMMY_VEC_COL: [ np.array([np.random.random() for _ in range(_DUMMY_VEC_LEN)]) for i in range(num_rows) # pylint: disable=unused-variable ] }) with fixed_core_count(num_cores): buffer_path = file_io.write_to_temp_buffer( df, self.get_temp_dir(), [_RAW_ROW, _DUMMY_COL, _DUMMY_VEC_COL]) with self.session(graph=tf.Graph()) as sess: dataset = tf.data.TFRecordDataset(buffer_path) dataset = dataset.batch(1).map( lambda x: tf.io.parse_example(serialized=x, features=_FEATURE_MAP)) data_iter = tf.compat.v1.data.make_one_shot_iterator(dataset) seen_rows = set() for i in range(num_rows+5): row = data_iter.get_next() try: row_id, val_0, val_1 = sess.run( [row[_RAW_ROW], row[_DUMMY_COL], row[_DUMMY_VEC_COL]]) row_id, val_0, val_1 = row_id[0][0], val_0[0][0], val_1[0] assert row_id not in seen_rows seen_rows.add(row_id) self.assertEqual(val_0, df[_DUMMY_COL][row_id]) self.assertAllClose(val_1, df[_DUMMY_VEC_COL][row_id]) self.assertLess(i, num_rows, msg="Too many rows.") except tf.errors.OutOfRangeError: self.assertGreaterEqual(i, num_rows, msg="Too few rows.") file_io._GARBAGE_COLLECTOR.purge() assert not tf.io.gfile.exists(buffer_path) def test_serialize_deserialize_0(self): self._serialize_deserialize(num_cores=1) def test_serialize_deserialize_1(self): self._serialize_deserialize(num_cores=2) def test_serialize_deserialize_2(self): self._serialize_deserialize(num_cores=8) if __name__ == "__main__": tf.test.main()
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/utils/data/file_io_test.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A light weight utilities to train NLP models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import os import time from absl import logging import tensorflow as tf from horovod.tensorflow.compression import Compression from dllogger import Verbosity from optimization import GradientAccumulator from official.utils.misc import distribution_utils from official.utils.misc import tpu_lib _SUMMARY_TXT = 'training_summary.txt' _MIN_SUMMARY_STEPS = 10 def _save_checkpoint(checkpoint, model_dir, checkpoint_prefix): """Saves model to with provided checkpoint prefix.""" checkpoint_path = os.path.join(model_dir, checkpoint_prefix) saved_path = checkpoint.save(checkpoint_path) logging.info('Saving model as TF checkpoint: %s', saved_path) return def _get_input_iterator(input_fn, strategy): """Returns distributed dataset iterator.""" # When training with TPU pods, datasets needs to be cloned across # workers. Since Dataset instance cannot be cloned in eager mode, we instead # pass callable that returns a dataset. if not callable(input_fn): raise ValueError('`input_fn` should be a closure that returns a dataset.') if strategy is None: input_data = input_fn() iterator = iter(input_data) else: iterator = iter( strategy.experimental_distribute_datasets_from_function(input_fn)) return iterator def _float_metric_value(metric): """Gets the value of a float-value keras metric.""" return metric.result().numpy().astype(float) def steps_to_run(current_step, steps_per_epoch, steps_per_loop): """Calculates steps to run on device.""" if steps_per_loop <= 0: raise ValueError('steps_per_loop should be positive integer.') if steps_per_loop == 1: return steps_per_loop remainder_in_epoch = current_step % steps_per_epoch if remainder_in_epoch != 0: return min(steps_per_epoch - remainder_in_epoch, steps_per_loop) else: return steps_per_loop def write_txt_summary(training_summary, summary_dir): """Writes a summary text file to record stats.""" summary_path = os.path.join(summary_dir, _SUMMARY_TXT) with tf.io.gfile.GFile(summary_path, 'wb') as f: logging.info('Training Summary: \n%s', str(training_summary)) f.write(json.dumps(training_summary, indent=4)) def run_customized_training_loop( # pylint: disable=invalid-name _sentinel=None, # pylint: enable=invalid-name strategy=None, model_fn=None, loss_fn=None, model_dir=None, train_input_fn=None, steps_per_epoch=None, num_accumulative_step=1, steps_per_loop=1, epochs=1, eval_input_fn=None, eval_steps=None, metric_fn=None, init_checkpoint=None, custom_callbacks=None, run_eagerly=False, hvd=None, sub_model_export_name=None, params=None): """Run BERT pretrain model training using low-level API. Arguments: _sentinel: Used to prevent positional parameters. Internal, do not use. strategy: Distribution strategy on which to run low level training loop. model_fn: Function that returns a tuple (model, sub_model). Caller of this function should add optimizer to the `model` via calling `model.compile()` API or manually setting `model.optimizer` attribute. Second element of the returned tuple(sub_model) is an optional sub model to be used for initial checkpoint -- if provided. loss_fn: Function with signature func(labels, logits) and returns a loss tensor. model_dir: Model directory used during training for restoring/saving model weights. train_input_fn: Function that returns a tf.data.Dataset used for training. steps_per_epoch: Number of steps to run per epoch. At the end of each epoch, model checkpoint will be saved and evaluation will be conducted if evaluation dataset is provided. steps_per_loop: Number of steps per graph-mode loop. In order to reduce communication in eager context, training logs are printed every steps_per_loop. epochs: Number of epochs to train. eval_input_fn: Function that returns evaluation dataset. If none, evaluation is skipped. eval_steps: Number of steps to run evaluation. Required if `eval_input_fn` is not none. metric_fn: A metrics function that returns a Keras Metric object to record evaluation result using evaluation dataset or with training dataset after every epoch. init_checkpoint: Optional checkpoint to load to `sub_model` returned by `model_fn`. custom_callbacks: A list of Keras Callbacks objects to run during training. More specifically, `on_batch_begin()`, `on_batch_end()`, methods are invoked during training. run_eagerly: Whether to run model training in pure eager execution. This should be disable for TPUStrategy. sub_model_export_name: If not None, will export `sub_model` returned by `model_fn` into checkpoint files. The name of intermediate checkpoint file is {sub_model_export_name}_step_{step}.ckpt and the last checkpint's name is {sub_model_export_name}.ckpt; if None, `sub_model` will not be exported as checkpoint. Returns: Trained model. Raises: ValueError: (1) When model returned by `model_fn` does not have optimizer attribute or when required parameters are set to none. (2) eval args are not specified correctly. (3) metric_fn must be a callable if specified. (4) sub_model_checkpoint_name is specified, but `sub_model` returned by `model_fn` is None. """ if _sentinel is not None: raise ValueError('only call `run_customized_training_loop()` ' 'with named arguments.') required_arguments = [ model_fn, loss_fn, model_dir, steps_per_epoch, train_input_fn ] if [arg for arg in required_arguments if arg is None]: raise ValueError('`model_fn`, `loss_fn`, `model_dir`, ' '`steps_per_loop` and `steps_per_epoch` are required ' 'parameters.') if steps_per_loop > steps_per_epoch: logging.error( 'steps_per_loop: %d is specified to be greater than ' ' steps_per_epoch: %d, we will use steps_per_epoch as' ' steps_per_loop.', steps_per_loop, steps_per_epoch) steps_per_loop = steps_per_epoch assert tf.executing_eagerly() if run_eagerly: if steps_per_loop > 1: raise ValueError( 'steps_per_loop is used for performance optimization. When you want ' 'to run eagerly, you cannot leverage graph mode loop.') if isinstance(strategy, tf.distribute.experimental.TPUStrategy): raise ValueError( 'TPUStrategy should not run eagerly as it heavily replies on graph' ' optimization for the distributed system.') if eval_input_fn and (eval_steps is None or metric_fn is None): raise ValueError( '`eval_step` and `metric_fn` are required when `eval_input_fn ` ' 'is not none.') if metric_fn and not callable(metric_fn): raise ValueError( 'if `metric_fn` is specified, metric_fn must be a callable.') total_training_steps = steps_per_epoch * epochs # To reduce unnecessary send/receive input pipeline operation, we place input # pipeline ops in worker task. train_iterator = _get_input_iterator(train_input_fn, strategy) with distribution_utils.get_strategy_scope(strategy): # To correctly place the model weights on accelerators, # model and optimizer should be created in scope. model, sub_model = model_fn() first_batch = True if not hasattr(model, 'optimizer'): raise ValueError('User should set optimizer attribute to model ' 'inside `model_fn`.') if sub_model_export_name and sub_model is None: raise ValueError('sub_model_export_name is specified as %s, but ' 'sub_model is None.' % sub_model_export_name) optimizer = model.optimizer use_float16 = isinstance( optimizer, tf.keras.mixed_precision.LossScaleOptimizer) if init_checkpoint: logging.info( 'Checkpoint file %s found and restoring from ' 'initial checkpoint for core model.', init_checkpoint) checkpoint = tf.train.Checkpoint(model=sub_model) checkpoint.restore(init_checkpoint).assert_existing_objects_matched() logging.info('Loading from checkpoint file completed') train_loss_metric = tf.keras.metrics.Mean( 'training_loss', dtype=tf.float32) eval_metrics = [metric_fn()] if metric_fn else [] # If evaluation is required, make a copy of metric as it will be used by # both train and evaluation. train_metrics = [ metric.__class__.from_config(metric.get_config()) for metric in eval_metrics ] # Create summary writers if not hvd or hvd.rank() == 0: summary_dir = os.path.join(model_dir, 'summaries') eval_summary_writer = tf.summary.create_file_writer( os.path.join(summary_dir, 'eval')) if steps_per_loop >= _MIN_SUMMARY_STEPS: # Only writes summary when the stats are collected sufficiently over # enough steps. train_summary_writer = tf.summary.create_file_writer( os.path.join(summary_dir, 'train')) else: train_summary_writer = None else: eval_summary_writer = None train_summary_writer = None eval_input_fn = None # Collects training variables. training_vars = model.trainable_variables accum_gradients = GradientAccumulator() def _replicated_step(inputs, first_batch=False): """Replicated training step.""" inputs, labels = inputs with tf.GradientTape() as tape: model_outputs = model(inputs, training=True) loss = loss_fn(labels, model_outputs) if use_float16: scaled_loss = optimizer.get_scaled_loss(loss) if hvd: tape = hvd.DistributedGradientTape(tape, sparse_as_dense=True, compression=Compression.fp16 if use_float16 else Compression.none) if use_float16: scaled_grads = tape.gradient(scaled_loss, training_vars) grads = optimizer.get_unscaled_gradients(scaled_grads) else: grads = tape.gradient(loss, training_vars) (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0) optimizer.apply_gradients(zip(grads, training_vars)) if hvd and first_batch: hvd.broadcast_variables(model.variables, 0) hvd.broadcast_variables(optimizer.variables(), 0) # For reporting, the metric takes the mean of losses. train_loss_metric.update_state(loss) for metric in train_metrics: metric.update_state(labels, model_outputs) def forward(inputs): inputs, labels = inputs with tf.GradientTape() as tape: model_outputs = model(inputs, training=True) loss = loss_fn(labels, model_outputs) if use_float16: scaled_loss = optimizer.get_scaled_loss(loss) if use_float16: scaled_grads = tape.gradient(scaled_loss, training_vars) grads = optimizer.get_unscaled_gradients(scaled_grads) else: grads = tape.gradient(loss, training_vars) # For reporting, the metric takes the mean of losses. train_loss_metric.update_state(loss) for metric in train_metrics: metric.update_state(labels, model_outputs) accum_gradients.add_gradients(grads) def step(num_grad_accumulates): gradients = accum_gradients.gradients if hvd: gradients = [None if g is None else hvd.allreduce(g / tf.cast(num_grad_accumulates, g.dtype), compression=Compression.fp16 if use_float16 else Compression.none) for g in gradients] else: gradients = [None if g is None else g / tf.cast(num_grad_accumulates, g.dtype) for g in gradients] (gradients, _) = tf.clip_by_global_norm(gradients, clip_norm=1.0) optimizer.apply_gradients(zip(gradients, training_vars)) accum_gradients.reset() @tf.function def train_steps_strategy(iterator, steps, num_grad_accumulates): """Performs distributed training steps in a loop. Args: iterator: the distributed iterator of training datasets. steps: an tf.int32 integer tensor to specify number of steps to run inside host training loop. Raises: ValueError: Any of the arguments or tensor shapes are invalid. """ if not isinstance(steps, tf.Tensor): raise ValueError('steps should be an Tensor. Python object may cause ' 'retracing.') if num_grad_accumulates != 1: for _ in tf.range(steps*num_grad_accumulates): strategy.experimental_run_v2(forward, args=(next(iterator),)) if _ == 0 or (_ + 1) % num_grad_accumulates == 0: strategy.experimental_run_v2(step, args=(num_grad_accumulates,)) else: for _ in tf.range(steps): strategy.experimental_run_v2(_replicated_step, args=(next(iterator),)) @tf.function def train_steps(iterator, steps, num_grad_accumulates, first_batch): if not isinstance(steps, tf.Tensor): raise ValueError('steps should be an Tensor. Python object may cause ' 'retracing.') if num_grad_accumulates != 1: for _ in tf.range(steps*num_grad_accumulates): forward(next(iterator)) if _ == 0 or (_ + 1) % num_grad_accumulates == 0: step(num_grad_accumulates) if hvd and _ == 0 and first_batch: hvd.broadcast_variables(model.variables, 0) hvd.broadcast_variables(optimizer.variables(), 0) else: for _ in tf.range(steps): _replicated_step(next(iterator), (first_batch and _ == 0)) def train_single_step_strategy(iterator, num_grad_accumulates): """Performs a distributed training step. Args: iterator: the distributed iterator of training datasets. Raises: ValueError: Any of the arguments or tensor shapes are invalid. """ if num_grad_accumulates != 1: for _ in tf.range(num_grad_accumulates): strategy.experimental_run_v2(forward, args=(next(iterator),)) if _ == 0 or (_ + 1) % num_grad_accumulates == 0: strategy.experimental_run_v2(step, args=(num_grad_accumulates,)) else: strategy.experimental_run_v2(_replicated_step, args=(next(iterator),)) def train_single_step(iterator, num_grad_accumulates, first_batch): """Performs a distributed training step. Args: iterator: the distributed iterator of training datasets. Raises: ValueError: Any of the arguments or tensor shapes are invalid. """ if num_grad_accumulates != 1: for _ in tf.range(num_grad_accumulates): forward(next(iterator)) if _ == 0 or (_ + 1) % num_grad_accumulates == 0: step(num_grad_accumulates) if hvd and _ == 0 and first_batch: hvd.broadcast_variables(model.variables, 0) hvd.broadcast_variables(optimizer.variables(), 0) else: _replicated_step(next(iterator), first_batch) def test_step(iterator): """Calculates evaluation metrics on distributed devices.""" def _test_step_fn(inputs): """Replicated accuracy calculation.""" inputs, labels = inputs model_outputs = model(inputs, training=False) for metric in eval_metrics: metric.update_state(labels, model_outputs) if strategy: strategy.experimental_run_v2(_test_step_fn, args=(next(iterator),)) else: _test_step_fn(next(iterator)) if not run_eagerly: train_single_step = tf.function(train_single_step) test_step = tf.function(test_step) def _run_evaluation(current_training_step, test_iterator): """Runs validation steps and aggregate metrics.""" for _ in range(eval_steps): test_step(test_iterator) with eval_summary_writer.as_default(): for metric in eval_metrics + model.metrics: metric_value = _float_metric_value(metric) logging.info('Step: [%d] Validation %s = %f', current_training_step, metric.name, metric_value) tf.summary.scalar( metric.name, metric_value, step=current_training_step) eval_summary_writer.flush() def _run_callbacks_on_batch_begin(batch): """Runs custom callbacks at the start of every step.""" if not custom_callbacks: return for callback in custom_callbacks: callback.on_batch_begin(batch) def _run_callbacks_on_batch_end(batch): """Runs custom callbacks at the end of every step.""" if not custom_callbacks: return for callback in custom_callbacks: callback.on_batch_end(batch) # Training loop starts here. checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer) sub_model_checkpoint = tf.train.Checkpoint( model=sub_model) if sub_model_export_name else None latest_checkpoint_file = tf.train.latest_checkpoint(model_dir) if latest_checkpoint_file: logging.info( 'Checkpoint file %s found and restoring from ' 'checkpoint', latest_checkpoint_file) checkpoint.restore(latest_checkpoint_file) logging.info('Loading from checkpoint file completed') current_step = optimizer.iterations.numpy() checkpoint_name = 'ctl_step_{step}.ckpt' manager = tf.train.CheckpointManager(checkpoint, model_dir, max_to_keep=3) FLAGS = params['FLAGS'] steps_from_save = 0 start_time = time.time() total_wo = 0 total_steps_wo = 0 perf_wo = 0 perf_wo_n = 0 first_steps = current_step total_running_steps = total_training_steps - first_steps global_batch_size = FLAGS.train_batch_size * num_accumulative_step if hvd: global_batch_size *= hvd.size() while current_step < total_training_steps: # Training loss/metric are taking average over steps inside micro # training loop. We reset the their values before each round. t0 = time.time() train_loss_metric.reset_states() for metric in train_metrics + model.metrics: metric.reset_states() _run_callbacks_on_batch_begin(current_step) # Runs several steps in the host while loop. steps = steps_to_run(current_step, steps_per_epoch, steps_per_loop) t0_wo = time.time() if steps == 1: # TODO(zongweiz): merge with train_steps once tf.while_loop # GPU performance bugs are fixed. if strategy: train_single_step_strategy(train_iterator, num_accumulative_step) else: train_single_step(train_iterator, num_accumulative_step, first_batch) else: # Converts steps to a Tensor to avoid tf.function retracing. if strategy: train_steps_strategy(train_iterator, tf.convert_to_tensor(steps, dtype=tf.int32), num_accumulative_step) else: train_steps(train_iterator, tf.convert_to_tensor(steps, dtype=tf.int32), num_accumulative_step, first_batch) elapse_wo = time.time() - t0_wo first_batch = False _run_callbacks_on_batch_end(current_step) current_step += steps train_loss = _float_metric_value(train_loss_metric) elapse_time = time.time() - t0 # Updates training logging. training_status = 'Train Step: %d/%d / loss = %s / time = %.3f sec' % ( current_step, total_training_steps, train_loss, elapse_time) steps_from_save += steps if (not hvd or hvd.rank() == 0) and steps_from_save >= FLAGS.save_checkpoint_steps: save_path = manager.save() logging.info('Saved checkpoint to {}'.format(save_path)) steps_from_save = 0 if train_summary_writer: with train_summary_writer.as_default(): tf.summary.scalar( train_loss_metric.name, train_loss, step=current_step) for metric in train_metrics + model.metrics: metric_value = _float_metric_value(metric) training_status += ' %s = %f' % (metric.name, metric_value) tf.summary.scalar(metric.name, metric_value, step=current_step) train_summary_writer.flush() if not hvd or hvd.rank() == 0: if use_float16: logging.info('Step: %d Lr %g Loss scale %g' % (current_step, optimizer._optimizer._decayed_lr('float32'), optimizer.loss_scale)) logging.info(training_status) logging.info('Perf %.2f' % (steps * global_batch_size / elapse_wo)) if current_step > first_steps + steps * 2: total_wo += elapse_wo total_steps_wo += steps perf_wo += steps * global_batch_size / elapse_wo perf_wo_n += 1 # Saves model checkpoints and run validation steps at every epoch end. if current_step % steps_per_epoch == 0: # To avoid repeated model saving, we do not save after the last # step of training. if current_step < total_training_steps and (not hvd or hvd.rank() == 0): manager.save() if sub_model_export_name: _save_checkpoint( sub_model_checkpoint, model_dir, '%s_step_%d.ckpt' % (sub_model_export_name, current_step)) if eval_input_fn: logging.info('Running evaluation after step: %s.', current_step) _run_evaluation(current_step, _get_input_iterator(eval_input_fn, strategy)) # Re-initialize evaluation metric. for metric in eval_metrics + model.metrics: metric.reset_states() total_time = time.time() - start_time if not hvd or hvd.rank() == 0: _save_checkpoint(checkpoint, model_dir, checkpoint_name.format(step=current_step)) if sub_model_export_name: _save_checkpoint(sub_model_checkpoint, model_dir, '%s.ckpt' % sub_model_export_name) if eval_input_fn: logging.info('Running final evaluation after training is complete.') _run_evaluation(current_step, _get_input_iterator(eval_input_fn, strategy)) training_summary = { 'total_training_steps': total_training_steps, 'train_loss': _float_metric_value(train_loss_metric), } if eval_metrics: # TODO(hongkuny): Cleans up summary reporting in text. training_summary['last_train_metrics'] = _float_metric_value( train_metrics[0]) training_summary['eval_metrics'] = _float_metric_value(eval_metrics[0]) write_txt_summary(training_summary, summary_dir) dllogging = params['dllogging'] if 'dllogging' in params else None total_sentences = total_running_steps * global_batch_size total_sentences_wo = total_steps_wo * global_batch_size logging.info("-----------------------------") logging.info(" Batch size = %d", FLAGS.train_batch_size) logging.info(" Num steps = %d", total_training_steps) logging.info(" LR = %g", FLAGS.learning_rate) if hvd: logging.info("Multi-GPU training with TF Horovod") logging.info("hvd.size() = %d", hvd.size()) logging.info("Total Training Time = %0.2f for Sequences = %d", total_time, total_sentences) if total_time != 0: logging.info("Throughput Average (sequences/sec) with overhead = %0.2f", total_sentences/total_time) if perf_wo_n != 0: logging.info("Throughput Average (sequences/sec) = %0.2f", perf_wo/perf_wo_n) logging.info("-----------------------------") if dllogging and perf_wo_n != 0: dllogging.logger.log(step=(), data={"throughput_train": perf_wo/perf_wo_n}, verbosity=Verbosity.DEFAULT) dllogging.logger.log(step=(), data={"total_loss": training_summary['train_loss']}, verbosity=Verbosity.DEFAULT) return model
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/modeling/model_training_utils.py
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/modeling/__init__.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Common TF utilities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import six import tensorflow as tf from official.modeling import activations def pack_inputs(inputs): """Pack a list of `inputs` tensors to a tuple. Args: inputs: a list of tensors. Returns: a tuple of tensors. if any input is None, replace it with a special constant tensor. """ inputs = tf.nest.flatten(inputs) outputs = [] for x in inputs: if x is None: outputs.append(tf.constant(0, shape=[], dtype=tf.int32)) else: outputs.append(x) return tuple(outputs) def unpack_inputs(inputs): """unpack a tuple of `inputs` tensors to a tuple. Args: inputs: a list of tensors. Returns: a tuple of tensors. if any input is a special constant tensor, replace it with None. """ inputs = tf.nest.flatten(inputs) outputs = [] for x in inputs: if is_special_none_tensor(x): outputs.append(None) else: outputs.append(x) x = tuple(outputs) # To trick the very pointless 'unbalanced-tuple-unpacking' pylint check # from triggering. if len(x) == 1: return x[0] return tuple(outputs) def is_special_none_tensor(tensor): """Checks if a tensor is a special None Tensor.""" return tensor.shape.ndims == 0 and tensor.dtype == tf.int32 # TODO(hongkuny): consider moving custom string-map lookup to keras api. def get_activation(identifier): """Maps a identifier to a Python function, e.g., "relu" => `tf.nn.relu`. It checks string first and if it is one of customized activation not in TF, the corresponding activation will be returned. For non-customized activation names and callable identifiers, always fallback to tf.keras.activations.get. Args: identifier: String name of the activation function or callable. Returns: A Python function corresponding to the activation function. """ if isinstance(identifier, six.string_types): name_to_fn = { "gelu": activations.gelu, "simple_swish": activations.simple_swish, "hard_swish": activations.hard_swish, "identity": activations.identity, } identifier = str(identifier).lower() if identifier in name_to_fn: return tf.keras.activations.get(name_to_fn[identifier]) return tf.keras.activations.get(identifier) def get_shape_list(tensor, expected_rank=None, name=None): """Returns a list of the shape of tensor, preferring static dimensions. Args: tensor: A tf.Tensor object to find the shape of. expected_rank: (optional) int. The expected rank of `tensor`. If this is specified and the `tensor` has a different rank, and exception will be thrown. name: Optional name of the tensor for the error message. Returns: A list of dimensions of the shape of tensor. All static dimensions will be returned as python integers, and dynamic dimensions will be returned as tf.Tensor scalars. """ if expected_rank is not None: assert_rank(tensor, expected_rank, name) shape = tensor.shape.as_list() non_static_indexes = [] for (index, dim) in enumerate(shape): if dim is None: non_static_indexes.append(index) if not non_static_indexes: return shape dyn_shape = tf.shape(tensor) for index in non_static_indexes: shape[index] = dyn_shape[index] return shape def assert_rank(tensor, expected_rank, name=None): """Raises an exception if the tensor rank is not of the expected rank. Args: tensor: A tf.Tensor to check the rank of. expected_rank: Python integer or list of integers, expected rank. name: Optional name of the tensor for the error message. Raises: ValueError: If the expected shape doesn't match the actual shape. """ expected_rank_dict = {} if isinstance(expected_rank, six.integer_types): expected_rank_dict[expected_rank] = True else: for x in expected_rank: expected_rank_dict[x] = True actual_rank = tensor.shape.ndims if actual_rank not in expected_rank_dict: raise ValueError( "For the tensor `%s`, the actual tensor rank `%d` (shape = %s) is not " "equal to the expected tensor rank `%s`" % (name, actual_rank, str(tensor.shape), str(expected_rank)))
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/modeling/tf_utils.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for official.modeling.training.model_training_utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow.python.distribute import combinations from tensorflow.python.distribute import strategy_combinations from official.modeling import model_training_utils def eager_strategy_combinations(): return combinations.combine( distribution=[ strategy_combinations.default_strategy, strategy_combinations.tpu_strategy, strategy_combinations.one_device_strategy_gpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_two_gpus, ], mode='eager', ) def eager_gpu_strategy_combinations(): return combinations.combine( distribution=[ strategy_combinations.default_strategy, strategy_combinations.one_device_strategy_gpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_two_gpus, ], mode='eager', ) def create_fake_data_input_fn(batch_size, features_shape, num_classes): """Creates a dummy input function with the given feature and label shapes. Args: batch_size: integer. features_shape: list[int]. Feature shape for an individual example. num_classes: integer. Number of labels. Returns: An input function that is usable in the executor. """ def _dataset_fn(input_context=None): """An input function for generating fake data.""" local_batch_size = input_context.get_per_replica_batch_size(batch_size) features = np.random.rand(64, *features_shape) labels = np.random.randint(2, size=[64, num_classes]) # Convert the inputs to a Dataset. dataset = tf.data.Dataset.from_tensor_slices((features, labels)) dataset = dataset.shard(input_context.num_input_pipelines, input_context.input_pipeline_id) def _assign_dtype(features, labels): features = tf.cast(features, tf.float32) labels = tf.cast(labels, tf.float32) return features, labels # Shuffle, repeat, and batch the examples. dataset = dataset.map(_assign_dtype) dataset = dataset.shuffle(64).repeat() dataset = dataset.batch(local_batch_size, drop_remainder=True) dataset = dataset.prefetch(buffer_size=64) return dataset return _dataset_fn def create_model_fn(input_shape, num_classes, use_float16=False): def _model_fn(): """A one-layer softmax model suitable for testing.""" input_layer = tf.keras.layers.Input(shape=input_shape) x = tf.keras.layers.Dense(num_classes, activation='relu')(input_layer) output_layer = tf.keras.layers.Dense(num_classes, activation='softmax')(x) sub_model = tf.keras.models.Model(input_layer, x, name='sub_model') model = tf.keras.models.Model(input_layer, output_layer, name='model') model.add_metric( tf.reduce_mean(input_layer), name='mean_input', aggregation='mean') model.optimizer = tf.keras.optimizers.SGD(learning_rate=0.1, momentum=0.9) if use_float16: model.optimizer = ( tf.keras.mixed_precision.experimental.LossScaleOptimizer( model.optimizer, loss_scale='dynamic')) return model, sub_model return _model_fn def metric_fn(): """Gets a tf.keras metric object.""" return tf.keras.metrics.CategoricalAccuracy(name='accuracy', dtype=tf.float32) def summaries_with_matching_keyword(keyword, summary_dir): """Yields summary protos matching given keyword from event file.""" event_paths = tf.io.gfile.glob(os.path.join(summary_dir, 'events*')) for event in tf.compat.v1.train.summary_iterator(event_paths[-1]): if event.summary is not None: for value in event.summary.value: if keyword in value.tag: tf.compat.v1.logging.error(event) yield event.summary def check_eventfile_for_keyword(keyword, summary_dir): """Checks event files for the keyword.""" return any(summaries_with_matching_keyword(keyword, summary_dir)) class ModelTrainingUtilsTest(tf.test.TestCase, parameterized.TestCase): def setUp(self): super(ModelTrainingUtilsTest, self).setUp() self._model_fn = create_model_fn(input_shape=[128], num_classes=3) def run_training(self, strategy, model_dir, steps_per_loop, run_eagerly): input_fn = create_fake_data_input_fn( batch_size=8, features_shape=[128], num_classes=3) model_training_utils.run_customized_training_loop( strategy=strategy, model_fn=self._model_fn, loss_fn=tf.keras.losses.categorical_crossentropy, model_dir=model_dir, steps_per_epoch=20, steps_per_loop=steps_per_loop, epochs=2, train_input_fn=input_fn, eval_input_fn=input_fn, eval_steps=10, init_checkpoint=None, metric_fn=metric_fn, custom_callbacks=None, run_eagerly=run_eagerly) @combinations.generate(eager_strategy_combinations()) def test_train_eager_single_step(self, distribution): model_dir = self.get_temp_dir() if isinstance(distribution, tf.distribute.experimental.TPUStrategy): with self.assertRaises(ValueError): self.run_training( distribution, model_dir, steps_per_loop=1, run_eagerly=True) else: self.run_training( distribution, model_dir, steps_per_loop=1, run_eagerly=True) @combinations.generate(eager_gpu_strategy_combinations()) def test_train_eager_mixed_precision(self, distribution): model_dir = self.get_temp_dir() policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16') tf.keras.mixed_precision.experimental.set_policy(policy) self._model_fn = create_model_fn( input_shape=[128], num_classes=3, use_float16=True) self.run_training( distribution, model_dir, steps_per_loop=1, run_eagerly=True) @combinations.generate(eager_strategy_combinations()) def test_train_check_artifacts(self, distribution): model_dir = self.get_temp_dir() self.run_training( distribution, model_dir, steps_per_loop=10, run_eagerly=False) # Two checkpoints should be saved after two epochs. self.assertNotEmpty(tf.io.gfile.glob(os.path.join(model_dir, 'ctl_step_*'))) self.assertNotEmpty( tf.io.gfile.glob( os.path.join(model_dir, 'summaries/training_summary*'))) # Loss and accuracy values should be written into summaries. self.assertTrue( check_eventfile_for_keyword('loss', os.path.join(model_dir, 'summaries/train'))) self.assertTrue( check_eventfile_for_keyword('accuracy', os.path.join(model_dir, 'summaries/train'))) self.assertTrue( check_eventfile_for_keyword('mean_input', os.path.join(model_dir, 'summaries/train'))) self.assertTrue( check_eventfile_for_keyword('accuracy', os.path.join(model_dir, 'summaries/eval'))) self.assertTrue( check_eventfile_for_keyword('mean_input', os.path.join(model_dir, 'summaries/eval'))) if __name__ == '__main__': assert tf.version.VERSION.startswith('2.') tf.test.main()
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/modeling/model_training_utils_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the customized Swish activation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import from official.modeling import activations @keras_parameterized.run_all_keras_modes class CustomizedSwishTest(keras_parameterized.TestCase): def _hard_swish_np(self, x): x = np.float32(x) return x * np.clip(x + 3, 0, 6) / 6 def test_simple_swish(self): features = [[.25, 0, -.25], [-1, -2, 3]] customized_swish_data = activations.simple_swish(features) swish_data = tf.nn.swish(features) self.assertAllClose(customized_swish_data, swish_data) def test_hard_swish(self): features = [[.25, 0, -.25], [-1, -2, 3]] customized_swish_data = activations.hard_swish(features) swish_data = self._hard_swish_np(features) self.assertAllClose(customized_swish_data, swish_data) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/modeling/activations/swish_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Gaussian error linear unit.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import tensorflow as tf @tf.keras.utils.register_keras_serializable(package='Text') def gelu(x): """Gaussian Error Linear Unit. This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415 Args: x: float Tensor to perform activation. Returns: `x` with the GELU activation applied. """ cdf = 0.5 * (1.0 + tf.tanh( (math.sqrt(2 / math.pi) * (x + 0.044715 * tf.pow(x, 3))))) return x * cdf
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/modeling/activations/gelu.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the Gaussian error linear unit.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import from official.modeling import activations @keras_parameterized.run_all_keras_modes class GeluTest(keras_parameterized.TestCase): def test_gelu(self): expected_data = [[0.14967535, 0., -0.10032465], [-0.15880796, -0.04540223, 2.9963627]] gelu_data = activations.gelu([[.25, 0, -.25], [-1, -2, 3]]) self.assertAllClose(expected_data, gelu_data) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/modeling/activations/gelu_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Activations package definition.""" from official.modeling.activations.gelu import gelu from official.modeling.activations.swish import hard_swish from official.modeling.activations.swish import identity from official.modeling.activations.swish import simple_swish
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/modeling/activations/__init__.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Customized Swish activation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf @tf.keras.utils.register_keras_serializable(package='Text') def simple_swish(features): """Computes the Swish activation function. The tf.nn.swish operation uses a custom gradient to reduce memory usage. Since saving custom gradients in SavedModel is currently not supported, and one would not be able to use an exported TF-Hub module for fine-tuning, we provide this wrapper that can allow to select whether to use the native TensorFlow swish operation, or whether to use a customized operation that has uses default TensorFlow gradient computation. Args: features: A `Tensor` representing preactivation values. Returns: The activation value. """ features = tf.convert_to_tensor(features) return features * tf.nn.sigmoid(features) @tf.keras.utils.register_keras_serializable(package='Text') def hard_swish(features): """Computes a hard version of the swish function. This operation can be used to reduce computational cost and improve quantization for edge devices. Args: features: A `Tensor` representing preactivation values. Returns: The activation value. """ features = tf.convert_to_tensor(features) return features * tf.nn.relu6(features + tf.constant(3.)) * (1. / 6.) @tf.keras.utils.register_keras_serializable(package='Text') def identity(features): """Computes the identity function. Useful for helping in quantization. Args: features: A `Tensor` representing preactivation values. Returns: The activation value. """ features = tf.convert_to_tensor(features) return tf.identity(features)
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/modeling/activations/swish.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Custom training loop for running TensorFlow 2.0 models.""" from __future__ import absolute_import from __future__ import division # from __future__ import google_type_annotations from __future__ import print_function import json import os from absl import flags from absl import logging import numpy as np import tensorflow as tf # pylint: disable=unused-import,g-import-not-at-top,redefined-outer-name,reimported from typing import Optional, Dict, List, Text, Callable, Union, Iterator, Any from official.modeling.hyperparams import params_dict from official.utils.misc import tpu_lib FLAGS = flags.FLAGS def strategy_flags_dict(): """Returns TPU related flags in a dictionary.""" return { # TPUStrategy related flags. 'tpu': FLAGS.tpu, # MultiWorkerMirroredStrategy related flags. 'worker_hosts': FLAGS.worker_hosts, 'task_index': FLAGS.task_index, } def hparam_flags_dict(): """Returns model params related flags in a dictionary.""" return { 'data_dir': FLAGS.data_dir, 'model_dir': FLAGS.model_dir, 'train_batch_size': FLAGS.train_batch_size, 'eval_batch_size': FLAGS.eval_batch_size, 'precision': FLAGS.precision, 'config_file': FLAGS.config_file, 'params_override': FLAGS.params_override, } def _save_checkpoint(checkpoint, model_dir, checkpoint_prefix): """Saves model to model_dir with provided checkpoint prefix.""" checkpoint_path = os.path.join(model_dir, checkpoint_prefix) saved_path = checkpoint.save(checkpoint_path) logging.info('Saving model as TF checkpoint: %s', saved_path) def _steps_to_run(current_step, total_steps, steps_per_loop): """Calculates steps to run on device.""" if steps_per_loop <= 0: raise ValueError('steps_per_loop should be positive integer.') return min(total_steps - current_step, steps_per_loop) def _no_metric(): return None class SummaryWriter(object): """Simple SummaryWriter for writing dictionary of metrics. Attributes: _writer: The tf.SummaryWriter. """ def __init__(self, model_dir: Text, name: Text): """Inits SummaryWriter with paths. Arguments: model_dir: the model folder path. name: the summary subfolder name. """ self._writer = tf.summary.create_file_writer(os.path.join(model_dir, name)) def __call__(self, metrics: Union[Dict[Text, float], float], step: int): """Write metrics to summary with the given writer. Args: metrics: a dictionary of metrics values. Prefer dictionary. step: integer. The training step. """ if not isinstance(metrics, dict): # Support scalar metric without name. logging.warning('Warning: summary writer prefer metrics as dictionary.') metrics = {'metric': metrics} with self._writer.as_default(): for k, v in metrics.items(): tf.summary.scalar(k, v, step=step) self._writer.flush() class DistributedExecutor(object): """Interface to train and eval models with tf.distribute.Strategy. Arguments: strategy: an instance of tf.distribute.Strategy. params: Model configuration needed to run distribution strategy. model_fn: Keras model function. Signature: (params: ParamsDict) -> tf.keras.models.Model. loss_fn: loss function. Signature: (y_true: Tensor, y_pred: Tensor) -> Tensor metric_fn: metric function. Signature: () -> tf.keras.metrics.Metric. is_multi_host: Set to True when using multi hosts for training, like multi worker GPU or TPU pod (slice). Otherwise, False. """ def __init__(self, strategy, params, model_fn, loss_fn, is_multi_host=False): self._params = params self._model_fn = model_fn self._loss_fn = loss_fn self._strategy = strategy self._checkpoint_name = 'ctl_step_{step}.ckpt' self._is_multi_host = is_multi_host @property def checkpoint_name(self): """Returns default checkpoint name.""" return self._checkpoint_name @checkpoint_name.setter def checkpoint_name(self, name): """Sets default summary writer for the current thread.""" self._checkpoint_name = name def loss_fn(self): return self._loss_fn() def model_fn(self, params): return self._model_fn(params) def _save_config(self, model_dir): """Save parameters to config files if model_dir is defined.""" logging.info('Save config to model_dir %s.', model_dir) if model_dir: if not tf.io.gfile.exists(model_dir): tf.io.gfile.makedirs(model_dir) self._params.lock() params_dict.save_params_dict_to_yaml(self._params, model_dir + '/params.yaml') else: logging.warning('model_dir is empty, so skip the save config.') def _get_input_iterator( self, input_fn: Callable[..., tf.data.Dataset], strategy: tf.distribute.Strategy) -> Optional[Iterator[Any]]: """Returns distributed dataset iterator. Args: input_fn: (params: dict) -> tf.data.Dataset. strategy: an instance of tf.distribute.Strategy. Returns: An iterator that yields input tensors. """ if input_fn is None: return None # When training with multiple TPU workers, datasets needs to be cloned # across workers. Since Dataset instance cannot be cloned in eager mode, # we instead pass callable that returns a dataset. if self._is_multi_host: return iter( strategy.experimental_distribute_datasets_from_function(input_fn)) else: input_data = input_fn() return iter(strategy.experimental_distribute_dataset(input_data)) def _create_replicated_step(self, strategy, model, loss_fn, optimizer, metric=None): def _replicated_step(inputs): """Replicated training step.""" inputs, labels = inputs with tf.GradientTape() as tape: outputs = model(inputs, training=True) prediction_loss = loss_fn(labels, outputs) loss = tf.reduce_mean(prediction_loss) loss = loss / strategy.num_replicas_in_sync if isinstance(metric, tf.keras.metrics.Metric): metric.update_state(labels, outputs) else: logging.error('train metric is not an instance of ' 'tf.keras.metrics.Metric.') grads = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) return loss return _replicated_step def _create_train_step(self, strategy, model, loss_fn, optimizer, metric=None): """Creates a distributed training step. Args: strategy: an instance of tf.distribute.Strategy. model: (Tensor, bool) -> Tensor. model function. loss_fn: (y_true: Tensor, y_pred: Tensor) -> Tensor. optimizer: tf.keras.optimizers.Optimizer. iterator: an iterator that yields input tensors. metric: tf.keras.metrics.Metric subclass. Returns: The training step callable. """ _replicated_step = self._create_replicated_step(strategy, model, loss_fn, optimizer, metric) @tf.function def train_step(iterator, num_steps): """Performs a distributed training step. Args: iterator: an iterator that yields input tensors. Returns: The loss tensor. """ if not isinstance(num_steps, tf.Tensor): raise ValueError('steps should be an Tensor. Python object may cause ' 'retracing.') per_replica_losses = strategy.experimental_run_v2( _replicated_step, args=(next(iterator),)) for _ in tf.range(num_steps - 1): per_replica_losses = strategy.experimental_run_v2( _replicated_step, args=(next(iterator),)) # For reporting, we returns the mean of losses. loss = strategy.reduce( tf.distribute.ReduceOp.MEAN, per_replica_losses, axis=None) return loss return train_step def _create_test_step(self, strategy, model, metric): """Creates a distributed test step.""" @tf.function def test_step(iterator): """Calculates evaluation metrics on distributed devices.""" if not metric: logging.info('Skip test_step because metric is None (%s)', metric) return None, None if not isinstance(metric, tf.keras.metrics.Metric): raise ValueError( 'Metric must be an instance of tf.keras.metrics.Metric ' 'for running in test_step. Actual {}'.format(metric)) def _test_step_fn(inputs): """Replicated accuracy calculation.""" inputs, labels = inputs model_outputs = model(inputs, training=False) metric.update_state(labels, model_outputs) return labels, model_outputs return strategy.experimental_run_v2(_test_step_fn, args=(next(iterator),)) return test_step def train(self, train_input_fn: Callable[[params_dict.ParamsDict], tf.data.Dataset], eval_input_fn: Callable[[params_dict.ParamsDict], tf.data.Dataset] = None, model_dir: Text = None, total_steps: int = 1, iterations_per_loop: int = 1, train_metric_fn: Callable[[], Any] = None, eval_metric_fn: Callable[[], Any] = None, summary_writer_fn: Callable[[Text, Text], SummaryWriter] = SummaryWriter, init_checkpoint: Callable[[tf.keras.Model], Any] = None, custom_callbacks: List[tf.keras.callbacks.Callback] = None, save_config: bool = True): """Runs distributed training. Args: train_input_fn: (params: dict) -> tf.data.Dataset training data input function. eval_input_fn: (Optional) same type as train_input_fn. If not None, will trigger evaluting metric on eval data. If None, will not run eval step. model_dir: the folder path for model checkpoints. total_steps: total training steps. iterations_per_loop: train steps per loop. After each loop, this job will update metrics like loss and save checkpoint. train_metric_fn: metric_fn for evaluation in train_step. eval_metric_fn: metric_fn for evaluation in test_step. summary_writer_fn: function to create summary writer. init_checkpoint: function to load checkpoint. custom_callbacks: A list of Keras Callbacks objects to run during training. More specifically, `on_batch_begin()`, `on_batch_end()`, methods are invoked during training. save_config: bool. Whether to save params to model_dir. Returns: The training loss and eval metrics. """ assert train_input_fn is not None if train_metric_fn and not callable(train_metric_fn): raise ValueError('if `train_metric_fn` is specified, ' 'train_metric_fn must be a callable.') if eval_metric_fn and not callable(eval_metric_fn): raise ValueError('if `eval_metric_fn` is specified, ' 'eval_metric_fn must be a callable.') train_metric_fn = train_metric_fn or _no_metric eval_metric_fn = eval_metric_fn or _no_metric if custom_callbacks and iterations_per_loop != 1: logging.error( 'It is sematically wrong to run callbacks when ' 'iterations_per_loop is not one (%s)', iterations_per_loop) def _run_callbacks_on_batch_begin(batch): """Runs custom callbacks at the start of every step.""" if not custom_callbacks: return for callback in custom_callbacks: if callback: callback.on_batch_begin(batch) def _run_callbacks_on_batch_end(batch): """Runs custom callbacks at the end of every step.""" if not custom_callbacks: return for callback in custom_callbacks: if callback: callback.on_batch_end(batch) if save_config: self._save_config(model_dir) if FLAGS.save_checkpoint_freq: save_freq = FLAGS.save_checkpoint_freq else: save_freq = iterations_per_loop params = self._params strategy = self._strategy # To reduce unnecessary send/receive input pipeline operation, we place # input pipeline ops in worker task. train_iterator = self._get_input_iterator(train_input_fn, strategy) train_loss = None eval_metric_result = None with strategy.scope(): # To correctly place the model weights on accelerators, # model and optimizer should be created in scope. model = self.model_fn(params.as_dict()) if not hasattr(model, 'optimizer'): raise ValueError('User should set optimizer attribute to model ' 'inside `model_fn`.') optimizer = model.optimizer # Training loop starts here. checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer) latest_checkpoint_file = tf.train.latest_checkpoint(model_dir) initial_step = 0 if latest_checkpoint_file: logging.info( 'Checkpoint file %s found and restoring from ' 'checkpoint', latest_checkpoint_file) checkpoint.restore(latest_checkpoint_file) initial_step = optimizer.iterations.numpy() logging.info('Loading from checkpoint file completed. Init step %d', initial_step) elif init_checkpoint: logging.info('Restoring from init checkpoint function') init_checkpoint(model) logging.info('Loading from init checkpoint file completed') current_step = optimizer.iterations.numpy() checkpoint_name = self.checkpoint_name eval_metric = eval_metric_fn() train_metric = train_metric_fn() train_summary_writer = summary_writer_fn(model_dir, 'eval_train') test_summary_writer = summary_writer_fn(model_dir, 'eval_test') # Continue training loop. train_step = self._create_train_step( strategy=strategy, model=model, loss_fn=self.loss_fn(), optimizer=optimizer, metric=train_metric) test_step = None if eval_input_fn and eval_metric: test_step = self._create_test_step(strategy, model, metric=eval_metric) logging.info('Training started') last_save_checkpoint_step = current_step while current_step < total_steps: num_steps = _steps_to_run(current_step, total_steps, iterations_per_loop) _run_callbacks_on_batch_begin(current_step) train_loss = train_step(train_iterator, tf.convert_to_tensor(num_steps, dtype=tf.int32)) _run_callbacks_on_batch_end(current_step) current_step += num_steps train_loss = tf.nest.map_structure(lambda x: x.numpy().astype(float), train_loss) if not isinstance(train_loss, dict): train_loss = {'total_loss': train_loss} if np.isnan(train_loss['total_loss']): raise ValueError('total loss is NaN.') if train_metric: train_metric_result = train_metric.result() if isinstance(train_metric, tf.keras.metrics.Metric): train_metric_result = tf.nest.map_structure( lambda x: x.numpy().astype(float), train_metric_result) if not isinstance(train_metric_result, dict): train_metric_result = {'metric': train_metric_result} train_metric_result.update(train_loss) else: train_metric_result = train_loss if callable(optimizer.lr): train_metric_result.update( {'learning_rate': optimizer.lr(current_step).numpy()}) else: train_metric_result.update({'learning_rate': optimizer.lr.numpy()}) logging.info('Train Step: %d/%d / loss = %s / training metric = %s', current_step, total_steps, train_loss, train_metric_result) train_summary_writer( metrics=train_metric_result, step=optimizer.iterations) # Saves model checkpoints and run validation steps at every # iterations_per_loop steps. # To avoid repeated model saving, we do not save after the last # step of training. if save_freq > 0 and current_step < total_steps and ( current_step - last_save_checkpoint_step) >= save_freq: _save_checkpoint(checkpoint, model_dir, checkpoint_name.format(step=current_step)) last_save_checkpoint_step = current_step if test_step: eval_iterator = self._get_input_iterator(eval_input_fn, strategy) eval_metric_result = self._run_evaluation(test_step, current_step, eval_metric, eval_iterator) logging.info('Step: %s evalation metric = %s.', current_step, eval_metric_result) test_summary_writer( metrics=eval_metric_result, step=optimizer.iterations) # Re-initialize evaluation metric, except the last step. if eval_metric and current_step < total_steps: eval_metric.reset_states() if train_metric and current_step < total_steps: train_metric.reset_states() # Reaches the end of training and saves the last checkpoint. if last_save_checkpoint_step < total_steps: _save_checkpoint(checkpoint, model_dir, checkpoint_name.format(step=current_step)) if test_step: logging.info('Running final evaluation after training is complete.') eval_iterator = self._get_input_iterator(eval_input_fn, strategy) eval_metric_result = self._run_evaluation(test_step, current_step, eval_metric, eval_iterator) logging.info('Final evaluation metric = %s.', eval_metric_result) test_summary_writer( metrics=eval_metric_result, step=optimizer.iterations) return train_loss, eval_metric_result def _run_evaluation(self, test_step, current_training_step, metric, test_iterator): """Runs validation steps and aggregate metrics.""" if not test_iterator or not metric: logging.warning( 'Both test_iterator (%s) and metrics (%s) must not be None.', test_iterator, metric) return None logging.info('Running evaluation after step: %s.', current_training_step) while True: try: test_step(test_iterator) except (StopIteration, tf.errors.OutOfRangeError): break metric_result = metric.result() if isinstance(metric, tf.keras.metrics.Metric): metric_result = metric_result.numpy().astype(float) logging.info('Step: [%d] Validation metric = %f', current_training_step, metric_result) return metric_result def evaluate_from_model_dir( self, model_dir: Text, eval_input_fn: Callable[[params_dict.ParamsDict], tf.data.Dataset], eval_metric_fn: Callable[[], Any], total_steps: int = -1, eval_timeout: int = None, min_eval_interval: int = 180, summary_writer_fn: Callable[[Text, Text], SummaryWriter] = SummaryWriter): """Runs distributed evaluation on model folder. Args: eval_input_fn: (Optional) same type as train_input_fn. If not None, will trigger evaluting metric on eval data. If None, will not run eval step. eval_metric_fn: metric_fn for evaluation in test_step. model_dir: the folder for storing model checkpoints. total_steps: total training steps. If the current step reaches the total_steps, the evaluation loop will stop. eval_timeout: The maximum number of seconds to wait between checkpoints. If left as None, then the process will wait indefinitely. Used by tf.train.checkpoints_iterator. min_eval_interval: The minimum number of seconds between yielding checkpoints. Used by tf.train.checkpoints_iterator. summary_writer_fn: function to create summary writer. Returns: Eval metrics dictionary of the last checkpoint. """ if not model_dir: raise ValueError('model_dir must be set.') def terminate_eval(): tf.logging.info('Terminating eval after %d seconds of no checkpoints' % eval_timeout) return True summary_writer = summary_writer_fn(model_dir, 'eval') # Read checkpoints from the given model directory # until `eval_timeout` seconds elapses. for checkpoint_path in tf.train.checkpoints_iterator( model_dir, min_interval_secs=min_eval_interval, timeout=eval_timeout, timeout_fn=terminate_eval): eval_metric_result, current_step = self.evaluate_checkpoint( checkpoint_path=checkpoint_path, eval_input_fn=eval_input_fn, eval_metric_fn=eval_metric_fn, summary_writer=summary_writer) if total_steps > 0 and current_step >= total_steps: logging.info('Evaluation finished after training step %d', current_step) break return eval_metric_result def evaluate_checkpoint(self, checkpoint_path: Text, eval_input_fn: Callable[[params_dict.ParamsDict], tf.data.Dataset], eval_metric_fn: Callable[[], Any], summary_writer: SummaryWriter = None): """Runs distributed evaluation on the one checkpoint. Args: eval_input_fn: (Optional) same type as train_input_fn. If not None, will trigger evaluting metric on eval data. If None, will not run eval step. eval_metric_fn: metric_fn for evaluation in test_step. checkpoint_path: the checkpoint to evaluate. summary_writer_fn: function to create summary writer. Returns: Eval metrics dictionary of the last checkpoint. """ if not callable(eval_metric_fn): raise ValueError('if `eval_metric_fn` is specified, ' 'eval_metric_fn must be a callable.') params = self._params strategy = self._strategy # To reduce unnecessary send/receive input pipeline operation, we place # input pipeline ops in worker task. with strategy.scope(): # To correctly place the model weights on accelerators, # model and optimizer should be created in scope. model = self.model_fn(params.as_dict()) checkpoint = tf.train.Checkpoint(model=model) eval_metric = eval_metric_fn() assert eval_metric, 'eval_metric does not exist' test_step = self._create_test_step(strategy, model, metric=eval_metric) logging.info('Starting to evaluate.') if not checkpoint_path: raise ValueError('checkpoint path is empty') reader = tf.compat.v1.train.NewCheckpointReader(checkpoint_path) current_step = reader.get_tensor( 'optimizer/iter/.ATTRIBUTES/VARIABLE_VALUE') logging.info( 'Checkpoint file %s found and restoring from ' 'checkpoint', checkpoint_path) checkpoint.restore(checkpoint_path) eval_iterator = self._get_input_iterator(eval_input_fn, strategy) eval_metric_result = self._run_evaluation(test_step, current_step, eval_metric, eval_iterator) logging.info('Step: %s evalation metric = %s.', current_step, eval_metric_result) summary_writer(metrics=eval_metric_result, step=current_step) eval_metric.reset_states() return eval_metric_result, current_step def predict(self): return NotImplementedError('Unimplmented function.') # TODO(yeqing): Add unit test for MultiWorkerMirroredStrategy. class ExecutorBuilder(object): """Builder of DistributedExecutor. Example 1: Builds an executor with supported Strategy. builder = ExecutorBuilder( strategy_type='tpu', strategy_config={'tpu': '/bns/xxx'}) dist_executor = builder.build_executor( params=params, model_fn=my_model_fn, loss_fn=my_loss_fn, metric_fn=my_metric_fn) Example 2: Builds an executor with customized Strategy. builder = ExecutorBuilder() builder.strategy = <some customized Strategy> dist_executor = builder.build_executor( params=params, model_fn=my_model_fn, loss_fn=my_loss_fn, metric_fn=my_metric_fn) Example 3: Builds a customized executor with customized Strategy. class MyDistributedExecutor(DistributedExecutor): # implementation ... builder = ExecutorBuilder() builder.strategy = <some customized Strategy> dist_executor = builder.build_executor( class_ctor=MyDistributedExecutor, params=params, model_fn=my_model_fn, loss_fn=my_loss_fn, metric_fn=my_metric_fn) Args: strategy_type: string. One of 'tpu', 'mirrored', 'multi_worker_mirrored'. If None. User is responsible to set the strategy before calling build_executor(...). strategy_config: necessary config for constructing the proper Strategy. Check strategy_flags_dict() for examples of the structure. """ def __init__(self, strategy_type=None, strategy_config=None): self._strategy_config = strategy_config self._strategy = self._build_strategy(strategy_type) @property def strategy(self): """Returns default checkpoint name.""" return self._strategy @strategy.setter def strategy(self, new_strategy): """Sets default summary writer for the current thread.""" self._strategy = new_strategy def _build_strategy(self, strategy_type): """Builds tf.distribute.Strategy instance. Args: strategy_type: string. One of 'tpu', 'one_device_gpu', 'mirrored', 'multi_worker_mirrored'. Returns: An tf.distribute.Strategy object. Returns None if strategy_type is None. """ if strategy_type is None: return None if strategy_type == 'tpu': return self._build_tpu_strategy() elif strategy_type == 'one_device_gpu': return tf.distribute.OneDeviceStrategy("device:GPU:0") elif strategy_type == 'mirrored': return self._build_mirrored_strategy() elif strategy_type == 'multi_worker_mirrored': return self._build_multiworker_mirrored_strategy() else: raise NotImplementedError('Unsupport accelerator type "%s"' % strategy_type) def _build_mirrored_strategy(self): """Builds a MirroredStrategy object.""" return tf.distribute.MirroredStrategy() def _build_tpu_strategy(self): """Builds a TPUStrategy object.""" tpu = self._strategy_config.tpu logging.info('Use TPU at %s', tpu if tpu is not None else '') cluster_resolver = tpu_lib.tpu_initialize(tpu) strategy = tf.distribute.experimental.TPUStrategy(cluster_resolver) return strategy def _build_multiworker_mirrored_strategy(self): """Builds a MultiWorkerMirroredStrategy object.""" worker_hosts = self._strategy_config.worker_hosts if worker_hosts is not None: # Set TF_CONFIG environment variable worker_hosts = worker_hosts.split(',') task_index = self._strategy_config.task_index os.environ['TF_CONFIG'] = json.dumps({ 'cluster': { 'worker': worker_hosts }, 'task': { 'type': 'worker', 'index': task_index } }) multiworker_strategy = ( tf.distribute.experimental.MultiWorkerMirroredStrategy()) return multiworker_strategy def build_executor(self, class_ctor=DistributedExecutor, params=None, model_fn=None, loss_fn=None, **kwargs): """Creates an executor according to strategy type. See doc string of the DistributedExecutor.__init__ for more information of the input arguments. Args: class_ctor: A constructor of executor (default: DistributedExecutor). params: ParamsDict, all the model parameters and runtime parameters. model_fn: Keras model function. loss_fn: loss function. **kwargs: other arguments to the executor constructor. Returns: An instance of DistributedExecutor or its subclass. """ if self._strategy is None: raise ValueError('`strategy` should not be None. You need to specify ' '`strategy_type` in the builder contructor or directly ' 'set the `strategy` property of the builder.') return class_ctor( strategy=self._strategy, params=params, model_fn=model_fn, loss_fn=loss_fn, **kwargs)
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/modeling/training/distributed_executor.py
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/modeling/training/__init__.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A parameter dictionary class which supports the nest structure.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import re import six import tensorflow as tf import yaml # regex pattern that matches on key-value pairs in a comma-separated # key-value pair string. It splits each k-v pair on the = sign, and # matches on values that are within single quotes, double quotes, single # values (e.g. floats, ints, etc.), and a lists within brackets. _PARAM_RE = re.compile(r""" (?P<name>[a-zA-Z][\w\.]*) # variable name: "var" or "x" \s*=\s* ((?P<val>\'(.*?)\' # single quote | \"(.*?)\" # double quote | [^,\[]* # single value | \[[^\]]*\])) # list of values ($|,\s*)""", re.VERBOSE) class ParamsDict(object): """A hyperparameter container class.""" RESERVED_ATTR = ['_locked', '_restrictions'] def __init__(self, default_params=None, restrictions=None): """Instantiate a ParamsDict. Instantiate a ParamsDict given a set of default parameters and a list of restrictions. Upon initialization, it validates itself by checking all the defined restrictions, and raise error if it finds inconsistency. Args: default_params: a Python dict or another ParamsDict object including the default parameters to initialize. restrictions: a list of strings, which define a list of restrictions to ensure the consistency of different parameters internally. Each restriction string is defined as a binary relation with a set of operators, including {'==', '!=', '<', '<=', '>', '>='}. """ self._locked = False self._restrictions = [] if restrictions: self._restrictions = restrictions if default_params is None: default_params = {} self.override(default_params, is_strict=False) self.validate() def _set(self, k, v): if isinstance(v, dict): self.__dict__[k] = ParamsDict(v) else: self.__dict__[k] = copy.deepcopy(v) def __setattr__(self, k, v): """Sets the value of the existing key. Note that this does not allow directly defining a new key. Use the `override` method with `is_strict=False` instead. Args: k: the key string. v: the value to be used to set the key `k`. Raises: KeyError: if k is not defined in the ParamsDict. """ if k not in ParamsDict.RESERVED_ATTR: if k not in self.__dict__.keys(): raise KeyError('The key `%{}` does not exist. ' 'To extend the existing keys, use ' '`override` with `is_strict` = True.'.format(k)) if self._locked: raise ValueError('The ParamsDict has been locked. ' 'No change is allowed.') self._set(k, v) def __getattr__(self, k): """Gets the value of the existing key. Args: k: the key string. Returns: the value of the key. Raises: KeyError: if k is not defined in the ParamsDict. """ if k not in self.__dict__.keys(): raise KeyError('The key `{}` does not exist. '.format(k)) return self.__dict__[k] def __contains__(self, key): """Implements the membership test operator.""" return key in self.__dict__ def get(self, key, value=None): """Accesses through built-in dictionary get method.""" return self.__dict__.get(key, value) def override(self, override_params, is_strict=True): """Override the ParamsDict with a set of given params. Args: override_params: a dict or a ParamsDict specifying the parameters to be overridden. is_strict: a boolean specifying whether override is strict or not. If True, keys in `override_params` must be present in the ParamsDict. If False, keys in `override_params` can be different from what is currently defined in the ParamsDict. In this case, the ParamsDict will be extended to include the new keys. """ if self._locked: raise ValueError('The ParamsDict has been locked. No change is allowed.') if isinstance(override_params, ParamsDict): override_params = override_params.as_dict() self._override(override_params, is_strict) # pylint: disable=protected-access def _override(self, override_dict, is_strict=True): """The implementation of `override`.""" for k, v in six.iteritems(override_dict): if k in ParamsDict.RESERVED_ATTR: raise KeyError('The key `%{}` is internally reserved. ' 'Can not be overridden.') if k not in self.__dict__.keys(): if is_strict: raise KeyError('The key `{}` does not exist. ' 'To extend the existing keys, use ' '`override` with `is_strict` = False.'.format(k)) else: self._set(k, v) else: if isinstance(v, dict): self.__dict__[k]._override(v, is_strict) # pylint: disable=protected-access elif isinstance(v, ParamsDict): self.__dict__[k]._override(v.as_dict(), is_strict) # pylint: disable=protected-access else: self.__dict__[k] = copy.deepcopy(v) def lock(self): """Makes the ParamsDict immutable.""" self._locked = True def as_dict(self): """Returns a dict representation of ParamsDict. For the nested ParamsDict, a nested dict will be returned. """ params_dict = {} for k, v in six.iteritems(self.__dict__): if k not in ParamsDict.RESERVED_ATTR: if isinstance(v, ParamsDict): params_dict[k] = v.as_dict() else: params_dict[k] = copy.deepcopy(v) return params_dict def validate(self): """Validate the parameters consistency based on the restrictions. This method validates the internal consistency using the pre-defined list of restrictions. A restriction is defined as a string which specfiies a binary operation. The supported binary operations are {'==', '!=', '<', '<=', '>', '>='}. Note that the meaning of these operators are consistent with the underlying Python immplementation. Users should make sure the define restrictions on their type make sense. For example, for a ParamsDict like the following ``` a: a1: 1 a2: 2 b: bb: bb1: 10 bb2: 20 ccc: a1: 1 a3: 3 ``` one can define two restrictions like this ['a.a1 == b.ccc.a1', 'a.a2 <= b.bb.bb2'] What it enforces are: - a.a1 = 1 == b.ccc.a1 = 2 - a.a2 = 2 <= b.bb.bb2 = 20 Raises: KeyError: if any of the following happens (1) any of parameters in any of restrictions is not defined in ParamsDict, (2) any inconsistency violating the restriction is found. ValueError: if the restriction defined in the string is not supported. """ def _get_kv(dotted_string, params_dict): tokenized_params = dotted_string.split('.') v = params_dict for t in tokenized_params: v = v[t] return tokenized_params[-1], v def _get_kvs(tokens, params_dict): if len(tokens) != 2: raise ValueError('Only support binary relation in restriction.') stripped_tokens = [t.strip() for t in tokens] left_k, left_v = _get_kv(stripped_tokens[0], params_dict) right_k, right_v = _get_kv(stripped_tokens[1], params_dict) return left_k, left_v, right_k, right_v params_dict = self.as_dict() for restriction in self._restrictions: if '==' in restriction: tokens = restriction.split('==') _, left_v, _, right_v = _get_kvs(tokens, params_dict) if left_v != right_v: raise KeyError('Found inconsistncy between key `{}` and key `{}`.' .format(tokens[0], tokens[1])) elif '!=' in restriction: tokens = restriction.split('!=') _, left_v, _, right_v = _get_kvs(tokens, params_dict) if left_v == right_v: raise KeyError('Found inconsistncy between key `{}` and key `{}`.' .format(tokens[0], tokens[1])) elif '<' in restriction: tokens = restriction.split('<') _, left_v, _, right_v = _get_kvs(tokens, params_dict) if left_v >= right_v: raise KeyError('Found inconsistncy between key `{}` and key `{}`.' .format(tokens[0], tokens[1])) elif '<=' in restriction: tokens = restriction.split('<=') _, left_v, _, right_v = _get_kvs(tokens, params_dict) if left_v > right_v: raise KeyError('Found inconsistncy between key `{}` and key `{}`.' .format(tokens[0], tokens[1])) elif '>' in restriction: tokens = restriction.split('>') _, left_v, _, right_v = _get_kvs(tokens, params_dict) if left_v <= right_v: raise KeyError('Found inconsistncy between key `{}` and key `{}`.' .format(tokens[0], tokens[1])) elif '>=' in restriction: tokens = restriction.split('>=') _, left_v, _, right_v = _get_kvs(tokens, params_dict) if left_v < right_v: raise KeyError('Found inconsistncy between key `{}` and key `{}`.' .format(tokens[0], tokens[1])) else: raise ValueError('Unsupported relation in restriction.') def read_yaml_to_params_dict(file_path): """Reads a YAML file to a ParamsDict.""" with tf.io.gfile.GFile(file_path, 'r') as f: params_dict = yaml.load(f) return ParamsDict(params_dict) def save_params_dict_to_yaml(params, file_path): """Saves the input ParamsDict to a YAML file.""" with tf.io.gfile.GFile(file_path, 'w') as f: def _my_list_rep(dumper, data): # u'tag:yaml.org,2002:seq' is the YAML internal tag for sequence. return dumper.represent_sequence( u'tag:yaml.org,2002:seq', data, flow_style=True) yaml.add_representer(list, _my_list_rep) yaml.dump(params.as_dict(), f, default_flow_style=False) def nested_csv_str_to_json_str(csv_str): """Converts a nested (using '.') comma-separated k=v string to a JSON string. Converts a comma-separated string of key/value pairs that supports nesting of keys to a JSON string. Nesting is implemented using '.' between levels for a given key. Spacing between commas and = is supported (e.g. there is no difference between "a=1,b=2", "a = 1, b = 2", or "a=1, b=2") but there should be no spaces before keys or after values (e.g. " a=1,b=2" and "a=1,b=2 " are not supported). Note that this will only support values supported by CSV, meaning values such as nested lists (e.g. "a=[[1,2,3],[4,5,6]]") are not supported. Strings are supported as well, e.g. "a='hello'". An example conversion would be: "a=1, b=2, c.a=2, c.b=3, d.a.a=5" to "{ a: 1, b : 2, c: {a : 2, b : 3}, d: {a: {a : 5}}}" Args: csv_str: the comma separated string. Returns: the converted JSON string. Raises: ValueError: If csv_str is not in a comma separated string or if the string is formatted incorrectly. """ if not csv_str: return '' formatted_entries = [] nested_map = collections.defaultdict(list) pos = 0 while pos < len(csv_str): m = _PARAM_RE.match(csv_str, pos) if not m: raise ValueError('Malformed hyperparameter value while parsing ' 'CSV string: %s' % csv_str[pos:]) pos = m.end() # Parse the values. m_dict = m.groupdict() name = m_dict['name'] v = m_dict['val'] # If a GCS path (e.g. gs://...) is provided, wrap this in quotes # as yaml.load would otherwise throw an exception if re.match(r'(?=[^\"\'])(?=[gs://])', v): v = '\'{}\''.format(v) name_nested = name.split('.') if len(name_nested) > 1: grouping = name_nested[0] value = '.'.join(name_nested[1:]) + '=' + v nested_map[grouping].append(value) else: formatted_entries.append('%s : %s' % (name, v)) for grouping, value in nested_map.items(): value = ','.join(value) value = nested_csv_str_to_json_str(value) formatted_entries.append('%s : %s' % (grouping, value)) return '{' + ', '.join(formatted_entries) + '}' def override_params_dict(params, dict_or_string_or_yaml_file, is_strict): """Override a given ParamsDict using a dict, JSON/YAML/CSV string or YAML file. The logic of the function is outlined below: 1. Test that the input is a dict. If not, proceed to 2. 2. Tests that the input is a string. If not, raise unknown ValueError 2.1. Test if the string is in a CSV format. If so, parse. If not, proceed to 2.2. 2.2. Try loading the string as a YAML/JSON. If successful, parse to dict and use it to override. If not, proceed to 2.3. 2.3. Try using the string as a file path and load the YAML file. Args: params: a ParamsDict object to be overridden. dict_or_string_or_yaml_file: a Python dict, JSON/YAML/CSV string or path to a YAML file specifying the parameters to be overridden. is_strict: a boolean specifying whether override is strict or not. Returns: params: the overridden ParamsDict object. Raises: ValueError: if failed to override the parameters. """ if not dict_or_string_or_yaml_file: return params if isinstance(dict_or_string_or_yaml_file, dict): params.override(dict_or_string_or_yaml_file, is_strict) elif isinstance(dict_or_string_or_yaml_file, six.string_types): try: dict_or_string_or_yaml_file = ( nested_csv_str_to_json_str(dict_or_string_or_yaml_file)) except ValueError: pass params_dict = yaml.load(dict_or_string_or_yaml_file) if isinstance(params_dict, dict): params.override(params_dict, is_strict) else: with tf.io.gfile.GFile(dict_or_string_or_yaml_file) as f: params.override(yaml.load(f), is_strict) else: raise ValueError('Unknown input type to parse.') return params
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/modeling/hyperparams/params_dict.py
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/modeling/hyperparams/__init__.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for official.modeling.hyperparams.params_dict.py.""" import os import tensorflow as tf import yaml from official.modeling.hyperparams import params_dict class ParamsDictTest(tf.test.TestCase): def test_init_from_an_empty_dict(self): params = params_dict.ParamsDict() with self.assertRaises(KeyError): _ = params.a with self.assertRaises(KeyError): params.a = 'aa' def test_init_from_a_dict(self): params = params_dict.ParamsDict({'a': 'aa', 'b': 2}) self.assertEqual(params.a, 'aa') self.assertEqual(params.b, 2) def test_init_from_a_param_dict(self): params_init = params_dict.ParamsDict({'a': 'aa', 'b': 2}) params = params_dict.ParamsDict(params_init) self.assertEqual(params.a, 'aa') self.assertEqual(params.b, 2) def test_lock(self): params = params_dict.ParamsDict({'a': 1, 'b': 2}) params.lock() with self.assertRaises(ValueError): params.a = 10 with self.assertRaises(ValueError): params.override({'b': 20}) def test_setattr(self): params = params_dict.ParamsDict() params.override( {'a': 'aa', 'b': 2, 'c': None}, is_strict=False) params.c = 'ccc' self.assertEqual(params.a, 'aa') self.assertEqual(params.b, 2) self.assertEqual(params.c, 'ccc') def test_getattr(self): params = params_dict.ParamsDict() params.override( {'a': 'aa', 'b': 2, 'c': None}, is_strict=False) self.assertEqual(params.a, 'aa') self.assertEqual(params.b, 2) self.assertEqual(params.c, None) def test_contains(self): params = params_dict.ParamsDict() params.override( {'a': 'aa'}, is_strict=False) self.assertIn('a', params) self.assertNotIn('b', params) def test_get(self): params = params_dict.ParamsDict() params.override( {'a': 'aa'}, is_strict=False) self.assertEqual(params.get('a'), 'aa') self.assertEqual(params.get('b', 2), 2) self.assertEqual(params.get('b'), None) def test_override_is_strict_true(self): params = params_dict.ParamsDict( {'a': 'aa', 'b': 2, 'c': {'c1': 'cc', 'c2': 20}}) params.override({'a': 2, 'c': {'c1': 'ccc'}}, is_strict=True) self.assertEqual(params.a, 2) self.assertEqual(params.c.c1, 'ccc') with self.assertRaises(KeyError): params.override({'d': 'ddd'}, is_strict=True) with self.assertRaises(KeyError): params.override({'c': {'c3': 30}}, is_strict=True) def test_override_is_strict_false(self): params = params_dict.ParamsDict( {'a': 'aa', 'b': 2, 'c': {'c1': 10, 'c2': 20}}) params.override({'a': 2, 'c': {'c3': 3000}}, is_strict=False) self.assertEqual(params.a, 2) self.assertEqual(params.c.c3, 3000) params.override({'d': 'ddd'}, is_strict=False) self.assertEqual(params.d, 'ddd') params.override({'c': {'c4': 4444}}, is_strict=False) self.assertEqual(params.c.c4, 4444) def test_as_dict(self): params = params_dict.ParamsDict( {'a': 'aa', 'b': 2, 'c': {'c1': 10, 'c2': 20}}) params_d = params.as_dict() self.assertEqual(params_d['a'], 'aa') self.assertEqual(params_d['b'], 2) self.assertEqual(params_d['c']['c1'], 10) self.assertEqual(params_d['c']['c2'], 20) def test_validate(self): # Raise error due to the unknown parameter. with self.assertRaises(KeyError): params = params_dict.ParamsDict( {'a': 1, 'b': {'a': 11}}, ['a == c']) # OK to check equality of two nested dicts. params = params_dict.ParamsDict( {'a': 1, 'b': {'a': 10}, 'c': {'a': 10}}, ['b == c']) # Raise error due to inconsistency with self.assertRaises(KeyError): params = params_dict.ParamsDict( {'a': 1, 'c': {'a': 10}}, ['a == c.a']) # Valid rule. params = params_dict.ParamsDict( {'a': 1, 'c': {'a': 1}}, ['a == c.a']) # Overridding violates the existing rule, raise error upon validate. params.override({'a': 11}) with self.assertRaises(KeyError): params.validate() class ParamsDictIOTest(tf.test.TestCase): def write_temp_file(self, filename, text): temp_file = os.path.join(self.get_temp_dir(), filename) with tf.io.gfile.GFile(temp_file, 'w') as writer: writer.write(text) return temp_file def test_save_params_dict_to_yaml(self): params = params_dict.ParamsDict( {'a': 'aa', 'b': 2, 'c': {'c1': 10, 'c2': 20}}) output_yaml_file = os.path.join(self.get_temp_dir(), 'params.yaml') params_dict.save_params_dict_to_yaml(params, output_yaml_file) with tf.io.gfile.GFile(output_yaml_file, 'r') as f: params_d = yaml.load(f) self.assertEqual(params.a, params_d['a']) self.assertEqual(params.b, params_d['b']) self.assertEqual(params.c.c1, params_d['c']['c1']) self.assertEqual(params.c.c2, params_d['c']['c2']) def test_read_yaml_to_params_dict(self): input_yaml_file = self.write_temp_file( 'params.yaml', r""" a: 'aa' b: 2 c: c1: 10 c2: 20 """) params = params_dict.read_yaml_to_params_dict(input_yaml_file) self.assertEqual(params.a, 'aa') self.assertEqual(params.b, 2) self.assertEqual(params.c.c1, 10) self.assertEqual(params.c.c2, 20) def test_override_params_dict_using_dict(self): params = params_dict.ParamsDict({ 'a': 1, 'b': 2.5, 'c': [3, 4], 'd': 'hello', 'e': False}) override_dict = {'b': 5.2, 'c': [30, 40]} params = params_dict.override_params_dict( params, override_dict, is_strict=True) self.assertEqual(1, params.a) self.assertEqual(5.2, params.b) self.assertEqual([30, 40], params.c) self.assertEqual('hello', params.d) self.assertEqual(False, params.e) def test_override_params_dict_using_yaml_string(self): params = params_dict.ParamsDict({ 'a': 1, 'b': 2.5, 'c': [3, 4], 'd': 'hello', 'e': False}) override_yaml_string = "'b': 5.2\n'c': [30, 40]" params = params_dict.override_params_dict( params, override_yaml_string, is_strict=True) self.assertEqual(1, params.a) self.assertEqual(5.2, params.b) self.assertEqual([30, 40], params.c) self.assertEqual('hello', params.d) self.assertEqual(False, params.e) def test_override_params_dict_using_json_string(self): params = params_dict.ParamsDict({ 'a': 1, 'b': {'b1': 2, 'b2': [2, 3],}, 'd': {'d1': {'d2': 'hello'}}, 'e': False}) override_json_string = "{ b: { b2: [3, 4] }, d: { d1: { d2: 'hi' } } }" params = params_dict.override_params_dict( params, override_json_string, is_strict=True) self.assertEqual(1, params.a) self.assertEqual(2, params.b.b1) self.assertEqual([3, 4], params.b.b2) self.assertEqual('hi', params.d.d1.d2) self.assertEqual(False, params.e) def test_override_params_dict_using_csv_string(self): params = params_dict.ParamsDict({ 'a': 1, 'b': {'b1': 2, 'b2': [2, 3],}, 'd': {'d1': {'d2': 'hello'}}, 'e': False}) override_csv_string = "b.b2=[3,4], d.d1.d2='hi, world', e=gs://test" params = params_dict.override_params_dict( params, override_csv_string, is_strict=True) self.assertEqual(1, params.a) self.assertEqual(2, params.b.b1) self.assertEqual([3, 4], params.b.b2) self.assertEqual('hi, world', params.d.d1.d2) self.assertEqual('gs://test', params.e) def test_override_params_dict_using_yaml_file(self): params = params_dict.ParamsDict({ 'a': 1, 'b': 2.5, 'c': [3, 4], 'd': 'hello', 'e': False}) override_yaml_file = self.write_temp_file( 'params.yaml', r""" b: 5.2 c: [30, 40] """) params = params_dict.override_params_dict( params, override_yaml_file, is_strict=True) self.assertEqual(1, params.a) self.assertEqual(5.2, params.b) self.assertEqual([30, 40], params.c) self.assertEqual('hello', params.d) self.assertEqual(False, params.e) class IOTest(tf.test.TestCase): def test_basic_csv_str_to_json_str(self): csv_str = 'a=1,b=2,c=3' json_str = '{a : 1, b : 2, c : 3}' converted_csv_str = params_dict.nested_csv_str_to_json_str(csv_str) self.assertEqual(converted_csv_str, json_str) def test_basic_csv_str_load(self): csv_str = 'a=1,b=2,c=3' expected_output = {'a': 1, 'b': 2, 'c': 3} converted_csv_str = params_dict.nested_csv_str_to_json_str(csv_str) converted_dict = yaml.load(converted_csv_str) self.assertDictEqual(converted_dict, expected_output) def test_basic_nested_csv_str_to_json_str(self): csv_str = 'a=1,b.b1=2' json_str = '{a : 1, b : {b1 : 2}}' converted_csv_str = params_dict.nested_csv_str_to_json_str(csv_str) self.assertEqual(converted_csv_str, json_str) def test_basic_nested_csv_str_load(self): csv_str = 'a=1,b.b1=2,c.c1=3' expected_output = {'a': 1, 'b': {'b1': 2}, 'c': {'c1': 3}} converted_csv_str = params_dict.nested_csv_str_to_json_str(csv_str) converted_dict = yaml.load(converted_csv_str) self.assertDictEqual(converted_dict, expected_output) def test_complex_nested_csv_str_to_json_str(self): csv_str = 'a.aa.aaa.aaaaa.a=1' json_str = '{a : {aa : {aaa : {aaaaa : {a : 1}}}}}' converted_csv_str = params_dict.nested_csv_str_to_json_str(csv_str) self.assertEqual(converted_csv_str, json_str) def test_complex_nested_csv_str_load(self): csv_str = 'a.aa.aaa.aaaaa.a=1,a.a=2' expected_output = {'a': {'aa': {'aaa': {'aaaaa': {'a': 1}}}, 'a': 2}} converted_csv_str = params_dict.nested_csv_str_to_json_str(csv_str) converted_dict = yaml.load(converted_csv_str) self.assertDictEqual(converted_dict, expected_output) def test_csv_str_load_supported_datatypes(self): csv_str = 'a=1,b=2.,c=[1,2,3],d=\'hello, there\',e=\"Hi.\"' converted_csv_str = params_dict.nested_csv_str_to_json_str(csv_str) converted_dict = yaml.load(converted_csv_str) self.assertEqual(converted_dict['a'], 1) self.assertEqual(converted_dict['b'], 2.) self.assertEqual(converted_dict['c'], [1, 2, 3]) self.assertEqual(converted_dict['d'], 'hello, there') self.assertEqual(converted_dict['e'], 'Hi.') def test_csv_str_load_unsupported_datatypes(self): csv_str = 'a=[[1,2,3],[4,5,6]]' self.assertRaises(ValueError, params_dict.nested_csv_str_to_json_str, csv_str) def test_csv_str_to_json_str_spacing(self): csv_str1 = 'a=1,b=2,c=3' csv_str2 = 'a = 1, b = 2, c = 3' json_str = '{a : 1, b : 2, c : 3}' converted_csv_str1 = params_dict.nested_csv_str_to_json_str(csv_str1) converted_csv_str2 = params_dict.nested_csv_str_to_json_str(csv_str2) self.assertEqual(converted_csv_str1, converted_csv_str2) self.assertEqual(converted_csv_str1, json_str) self.assertEqual(converted_csv_str2, json_str) def test_gcs_added_quotes(self): csv_str = 'a=gs://abc, b=gs://def' expected_output = '{a : \'gs://abc\', b : \'gs://def\'}' converted_csv_str = params_dict.nested_csv_str_to_json_str(csv_str) self.assertEqual(converted_csv_str, expected_output) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/modeling/hyperparams/params_dict_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The main BERT model and related functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import json import math import six import tensorflow as tf from tensorflow.python.util import deprecation from official.modeling import tf_utils class BertConfig(object): """Configuration for `BertModel`.""" def __init__(self, vocab_size, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, initializer_range=0.02, backward_compatible=True): """Constructs BertConfig. Args: vocab_size: Vocabulary size of `inputs_ids` in `BertModel`. hidden_size: Size of the encoder layers and the pooler layer. num_hidden_layers: Number of hidden layers in the Transformer encoder. num_attention_heads: Number of attention heads for each attention layer in the Transformer encoder. intermediate_size: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act: The non-linear activation function (function or string) in the encoder and pooler. hidden_dropout_prob: The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob: The dropout ratio for the attention probabilities. max_position_embeddings: The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size: The vocabulary size of the `token_type_ids` passed into `BertModel`. initializer_range: The stdev of the truncated_normal_initializer for initializing all weight matrices. backward_compatible: Boolean, whether the variables shape are compatible with checkpoints converted from TF 1.x BERT. """ self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.backward_compatible = backward_compatible @classmethod def from_dict(cls, json_object): """Constructs a `BertConfig` from a Python dictionary of parameters.""" config = BertConfig(vocab_size=None) for (key, value) in six.iteritems(json_object): config.__dict__[key] = value return config @classmethod def from_json_file(cls, json_file): """Constructs a `BertConfig` from a json file of parameters.""" with tf.io.gfile.GFile(json_file, "r") as reader: text = reader.read() return cls.from_dict(json.loads(text)) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" class AlbertConfig(BertConfig): """Configuration for `ALBERT`.""" def __init__(self, embedding_size, num_hidden_groups=1, inner_group_num=1, **kwargs): """Constructs AlbertConfig. Args: embedding_size: Size of the factorized word embeddings. num_hidden_groups: Number of group for the hidden layers, parameters in the same group are shared. Note that this value and also the following 'inner_group_num' has to be 1 for now, because all released ALBERT models set them to 1. We may support arbitary valid values in future. inner_group_num: Number of inner repetition of attention and ffn. **kwargs: The remaining arguments are the same as above 'BertConfig'. """ super(AlbertConfig, self).__init__(**kwargs) self.embedding_size = embedding_size # TODO(chendouble): 'inner_group_num' and 'num_hidden_groups' are always 1 # in the released ALBERT. Support other values in AlbertTransformerEncoder # if needed. if inner_group_num != 1 or num_hidden_groups != 1: raise ValueError("We only support 'inner_group_num' and " "'num_hidden_groups' as 1.") @classmethod def from_dict(cls, json_object): """Constructs a `AlbertConfig` from a Python dictionary of parameters.""" config = AlbertConfig(embedding_size=None, vocab_size=None) for (key, value) in six.iteritems(json_object): config.__dict__[key] = value return config @deprecation.deprecated(None, "The function should not be used any more.") def get_bert_model(input_word_ids, input_mask, input_type_ids, config=None, name=None, float_type=tf.float32): """Wraps the core BERT model as a keras.Model.""" bert_model_layer = BertModel(config=config, float_type=float_type, name=name) pooled_output, sequence_output = bert_model_layer(input_word_ids, input_mask, input_type_ids) bert_model = tf.keras.Model( inputs=[input_word_ids, input_mask, input_type_ids], outputs=[pooled_output, sequence_output]) return bert_model class BertModel(tf.keras.layers.Layer): """BERT model ("Bidirectional Encoder Representations from Transformers"). Example usage: ```python # Already been converted into WordPiece token ids input_word_ids = tf.constant([[31, 51, 99], [15, 5, 0]]) input_mask = tf.constant([[1, 1, 1], [1, 1, 0]]) input_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]]) config = modeling.BertConfig(vocab_size=32000, hidden_size=512, num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024) pooled_output, sequence_output = modeling.BertModel(config=config)( input_word_ids=input_word_ids, input_mask=input_mask, input_type_ids=input_type_ids) ... ``` """ @deprecation.deprecated( None, "Please use `nlp.modeling.networks.TransformerEncoder` instead.") def __init__(self, config, float_type=tf.float32, **kwargs): super(BertModel, self).__init__(**kwargs) self.config = ( BertConfig.from_dict(config) if isinstance(config, dict) else copy.deepcopy(config)) self.float_type = float_type def build(self, unused_input_shapes): """Implements build() for the layer.""" self.embedding_lookup = EmbeddingLookup( vocab_size=self.config.vocab_size, embedding_size=self.config.hidden_size, initializer_range=self.config.initializer_range, dtype=tf.float32, name="word_embeddings") self.embedding_postprocessor = EmbeddingPostprocessor( use_type_embeddings=True, token_type_vocab_size=self.config.type_vocab_size, use_position_embeddings=True, max_position_embeddings=self.config.max_position_embeddings, dropout_prob=self.config.hidden_dropout_prob, initializer_range=self.config.initializer_range, dtype=tf.float32, name="embedding_postprocessor") self.encoder = Transformer( num_hidden_layers=self.config.num_hidden_layers, hidden_size=self.config.hidden_size, num_attention_heads=self.config.num_attention_heads, intermediate_size=self.config.intermediate_size, intermediate_activation=self.config.hidden_act, hidden_dropout_prob=self.config.hidden_dropout_prob, attention_probs_dropout_prob=self.config.attention_probs_dropout_prob, initializer_range=self.config.initializer_range, backward_compatible=self.config.backward_compatible, float_type=self.float_type, name="encoder") self.pooler_transform = tf.keras.layers.Dense( units=self.config.hidden_size, activation="tanh", kernel_initializer=get_initializer(self.config.initializer_range), name="pooler_transform") super(BertModel, self).build(unused_input_shapes) def __call__(self, input_word_ids, input_mask=None, input_type_ids=None, **kwargs): inputs = tf_utils.pack_inputs([input_word_ids, input_mask, input_type_ids]) return super(BertModel, self).__call__(inputs, **kwargs) def call(self, inputs, mode="bert"): """Implements call() for the layer. Args: inputs: packed input tensors. mode: string, `bert` or `encoder`. Returns: Output tensor of the last layer for BERT training (mode=`bert`) which is a float Tensor of shape [batch_size, seq_length, hidden_size] or a list of output tensors for encoder usage (mode=`encoder`). """ unpacked_inputs = tf_utils.unpack_inputs(inputs) input_word_ids = unpacked_inputs[0] input_mask = unpacked_inputs[1] input_type_ids = unpacked_inputs[2] word_embeddings = self.embedding_lookup(input_word_ids) embedding_tensor = self.embedding_postprocessor( word_embeddings=word_embeddings, token_type_ids=input_type_ids) if self.float_type == tf.float16: embedding_tensor = tf.cast(embedding_tensor, tf.float16) attention_mask = None if input_mask is not None: attention_mask = create_attention_mask_from_input_mask( input_word_ids, input_mask) if mode == "encoder": return self.encoder( embedding_tensor, attention_mask, return_all_layers=True) sequence_output = self.encoder(embedding_tensor, attention_mask) first_token_tensor = tf.squeeze(sequence_output[:, 0:1, :], axis=1) pooled_output = self.pooler_transform(first_token_tensor) return (pooled_output, sequence_output) def get_config(self): config = {"config": self.config.to_dict()} base_config = super(BertModel, self).get_config() return dict(list(base_config.items()) + list(config.items())) class EmbeddingLookup(tf.keras.layers.Layer): """Looks up words embeddings for id tensor.""" def __init__(self, vocab_size, embedding_size=768, initializer_range=0.02, **kwargs): super(EmbeddingLookup, self).__init__(**kwargs) self.vocab_size = vocab_size self.embedding_size = embedding_size self.initializer_range = initializer_range def build(self, unused_input_shapes): """Implements build() for the layer.""" self.embeddings = self.add_weight( "embeddings", shape=[self.vocab_size, self.embedding_size], initializer=get_initializer(self.initializer_range), dtype=self.dtype) super(EmbeddingLookup, self).build(unused_input_shapes) def call(self, inputs): """Implements call() for the layer.""" input_shape = tf_utils.get_shape_list(inputs) flat_input = tf.reshape(inputs, [-1]) output = tf.gather(self.embeddings, flat_input) output = tf.reshape(output, input_shape + [self.embedding_size]) return output class EmbeddingPostprocessor(tf.keras.layers.Layer): """Performs various post-processing on a word embedding tensor.""" def __init__(self, use_type_embeddings=False, token_type_vocab_size=None, use_position_embeddings=True, max_position_embeddings=512, dropout_prob=0.0, initializer_range=0.02, initializer=None, **kwargs): super(EmbeddingPostprocessor, self).__init__(**kwargs) self.use_type_embeddings = use_type_embeddings self.token_type_vocab_size = token_type_vocab_size self.use_position_embeddings = use_position_embeddings self.max_position_embeddings = max_position_embeddings self.dropout_prob = dropout_prob self.initializer_range = initializer_range if not initializer: self.initializer = get_initializer(self.initializer_range) else: self.initializer = initializer if self.use_type_embeddings and not self.token_type_vocab_size: raise ValueError("If `use_type_embeddings` is True, then " "`token_type_vocab_size` must be specified.") def build(self, input_shapes): """Implements build() for the layer.""" (word_embeddings_shape, _) = input_shapes width = word_embeddings_shape.as_list()[-1] self.type_embeddings = None if self.use_type_embeddings: self.type_embeddings = self.add_weight( "type_embeddings", shape=[self.token_type_vocab_size, width], initializer=get_initializer(self.initializer_range), dtype=self.dtype) self.position_embeddings = None if self.use_position_embeddings: self.position_embeddings = self.add_weight( "position_embeddings", shape=[self.max_position_embeddings, width], initializer=get_initializer(self.initializer_range), dtype=self.dtype) self.output_layer_norm = tf.keras.layers.LayerNormalization( name="layer_norm", axis=-1, epsilon=1e-12, dtype=tf.float32) self.output_dropout = tf.keras.layers.Dropout( rate=self.dropout_prob, dtype=tf.float32) super(EmbeddingPostprocessor, self).build(input_shapes) def __call__(self, word_embeddings, token_type_ids=None, **kwargs): inputs = tf_utils.pack_inputs([word_embeddings, token_type_ids]) return super(EmbeddingPostprocessor, self).__call__(inputs, **kwargs) def call(self, inputs): """Implements call() for the layer.""" unpacked_inputs = tf_utils.unpack_inputs(inputs) word_embeddings = unpacked_inputs[0] token_type_ids = unpacked_inputs[1] input_shape = tf_utils.get_shape_list(word_embeddings, expected_rank=3) batch_size = input_shape[0] seq_length = input_shape[1] width = input_shape[2] output = word_embeddings if self.use_type_embeddings: flat_token_type_ids = tf.reshape(token_type_ids, [-1]) token_type_embeddings = tf.gather(self.type_embeddings, flat_token_type_ids) token_type_embeddings = tf.reshape(token_type_embeddings, [batch_size, seq_length, width]) output += token_type_embeddings if self.use_position_embeddings: position_embeddings = tf.expand_dims( tf.slice(self.position_embeddings, [0, 0], [seq_length, width]), axis=0) output += position_embeddings output = self.output_layer_norm(output) output = self.output_dropout(output) return output class Attention(tf.keras.layers.Layer): """Performs multi-headed attention from `from_tensor` to `to_tensor`. This is an implementation of multi-headed attention based on "Attention is all you Need". If `from_tensor` and `to_tensor` are the same, then this is self-attention. Each timestep in `from_tensor` attends to the corresponding sequence in `to_tensor`, and returns a fixed-with vector. This function first projects `from_tensor` into a "query" tensor and `to_tensor` into "key" and "value" tensors. These are (effectively) a list of tensors of length `num_attention_heads`, where each tensor is of shape [batch_size, seq_length, size_per_head]. Then, the query and key tensors are dot-producted and scaled. These are softmaxed to obtain attention probabilities. The value tensors are then interpolated by these probabilities, then concatenated back to a single tensor and returned. In practice, the multi-headed attention are done with tf.einsum as follows: Input_tensor: [BFD] Wq, Wk, Wv: [DNH] Q:[BFNH] = einsum('BFD,DNH->BFNH', Input_tensor, Wq) K:[BTNH] = einsum('BTD,DNH->BTNH', Input_tensor, Wk) V:[BTNH] = einsum('BTD,DNH->BTNH', Input_tensor, Wv) attention_scores:[BNFT] = einsum('BTNH,BFNH->BNFT', K, Q) / sqrt(H) attention_probs:[BNFT] = softmax(attention_scores) context_layer:[BFNH] = einsum('BNFT,BTNH->BFNH', attention_probs, V) Wout:[DNH] Output:[BFD] = einsum('BFNH,DNH>BFD', context_layer, Wout) """ def __init__(self, num_attention_heads=12, size_per_head=64, attention_probs_dropout_prob=0.0, initializer_range=0.02, backward_compatible=False, **kwargs): super(Attention, self).__init__(**kwargs) self.num_attention_heads = num_attention_heads self.size_per_head = size_per_head self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.backward_compatible = backward_compatible def build(self, unused_input_shapes): """Implements build() for the layer.""" self.query_dense = self._projection_dense_layer("query") self.key_dense = self._projection_dense_layer("key") self.value_dense = self._projection_dense_layer("value") self.attention_probs_dropout = tf.keras.layers.Dropout( rate=self.attention_probs_dropout_prob) super(Attention, self).build(unused_input_shapes) def reshape_to_matrix(self, input_tensor): """Reshape N > 2 rank tensor to rank 2 tensor for performance.""" ndims = input_tensor.shape.ndims if ndims < 2: raise ValueError("Input tensor must have at least rank 2." "Shape = %s" % (input_tensor.shape)) if ndims == 2: return input_tensor width = input_tensor.shape[-1] output_tensor = tf.reshape(input_tensor, [-1, width]) return output_tensor def __call__(self, from_tensor, to_tensor, attention_mask=None, **kwargs): inputs = tf_utils.pack_inputs([from_tensor, to_tensor, attention_mask]) return super(Attention, self).__call__(inputs, **kwargs) def call(self, inputs): """Implements call() for the layer.""" (from_tensor, to_tensor, attention_mask) = tf_utils.unpack_inputs(inputs) # Scalar dimensions referenced here: # B = batch size (number of sequences) # F = `from_tensor` sequence length # T = `to_tensor` sequence length # N = `num_attention_heads` # H = `size_per_head` # `query_tensor` = [B, F, N ,H] query_tensor = self.query_dense(from_tensor) # `key_tensor` = [B, T, N, H] key_tensor = self.key_dense(to_tensor) # `value_tensor` = [B, T, N, H] value_tensor = self.value_dense(to_tensor) # Take the dot product between "query" and "key" to get the raw # attention scores. attention_scores = tf.einsum("BTNH,BFNH->BNFT", key_tensor, query_tensor) attention_scores = tf.multiply(attention_scores, 1.0 / math.sqrt(float(self.size_per_head))) if attention_mask is not None: # `attention_mask` = [B, 1, F, T] attention_mask = tf.expand_dims(attention_mask, axis=[1]) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. adder = (1.0 - tf.cast(attention_mask, attention_scores.dtype)) * -10000.0 # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_scores += adder # Normalize the attention scores to probabilities. # `attention_probs` = [B, N, F, T] attention_probs = tf.nn.softmax(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.attention_probs_dropout(attention_probs) # `context_layer` = [B, F, N, H] context_tensor = tf.einsum("BNFT,BTNH->BFNH", attention_probs, value_tensor) return context_tensor def _projection_dense_layer(self, name): """A helper to define a projection layer.""" return Dense3D( num_attention_heads=self.num_attention_heads, size_per_head=self.size_per_head, kernel_initializer=get_initializer(self.initializer_range), output_projection=False, backward_compatible=self.backward_compatible, name=name) class Dense3D(tf.keras.layers.Layer): """A Dense Layer using 3D kernel with tf.einsum implementation. Attributes: num_attention_heads: An integer, number of attention heads for each multihead attention layer. size_per_head: An integer, hidden size per attention head. hidden_size: An integer, dimension of the hidden layer. kernel_initializer: An initializer for the kernel weight. bias_initializer: An initializer for the bias. activation: An activation function to use. If nothing is specified, no activation is applied. use_bias: A bool, whether the layer uses a bias. output_projection: A bool, whether the Dense3D layer is used for output linear projection. backward_compatible: A bool, whether the variables shape are compatible with checkpoints converted from TF 1.x. """ def __init__(self, num_attention_heads=12, size_per_head=72, kernel_initializer=None, bias_initializer="zeros", activation=None, use_bias=True, output_projection=False, backward_compatible=False, **kwargs): """Inits Dense3D.""" super(Dense3D, self).__init__(**kwargs) self.num_attention_heads = num_attention_heads self.size_per_head = size_per_head self.hidden_size = num_attention_heads * size_per_head self.kernel_initializer = kernel_initializer self.bias_initializer = bias_initializer self.activation = activation self.use_bias = use_bias self.output_projection = output_projection self.backward_compatible = backward_compatible @property def compatible_kernel_shape(self): if self.output_projection: return [self.hidden_size, self.hidden_size] return [self.last_dim, self.hidden_size] @property def compatible_bias_shape(self): return [self.hidden_size] @property def kernel_shape(self): if self.output_projection: return [self.num_attention_heads, self.size_per_head, self.hidden_size] return [self.last_dim, self.num_attention_heads, self.size_per_head] @property def bias_shape(self): if self.output_projection: return [self.hidden_size] return [self.num_attention_heads, self.size_per_head] def build(self, input_shape): """Implements build() for the layer.""" dtype = tf.as_dtype(self.dtype or tf.keras.backend.floatx()) if not (dtype.is_floating or dtype.is_complex): raise TypeError("Unable to build `Dense3D` layer with non-floating " "point (and non-complex) dtype %s" % (dtype,)) input_shape = tf.TensorShape(input_shape) if tf.compat.dimension_value(input_shape[-1]) is None: raise ValueError("The last dimension of the inputs to `Dense3D` " "should be defined. Found `None`.") self.last_dim = tf.compat.dimension_value(input_shape[-1]) self.input_spec = tf.keras.layers.InputSpec( min_ndim=3, axes={-1: self.last_dim}) # Determines variable shapes. if self.backward_compatible: kernel_shape = self.compatible_kernel_shape bias_shape = self.compatible_bias_shape else: kernel_shape = self.kernel_shape bias_shape = self.bias_shape self.kernel = self.add_weight( "kernel", shape=kernel_shape, initializer=self.kernel_initializer, dtype=self.dtype, trainable=True) if self.use_bias: self.bias = self.add_weight( "bias", shape=bias_shape, initializer=self.bias_initializer, dtype=self.dtype, trainable=True) else: self.bias = None super(Dense3D, self).build(input_shape) def call(self, inputs): """Implements ``call()`` for Dense3D. Args: inputs: A float tensor of shape [batch_size, sequence_length, hidden_size] when output_projection is False, otherwise a float tensor of shape [batch_size, sequence_length, num_heads, dim_per_head]. Returns: The projected tensor with shape [batch_size, sequence_length, num_heads, dim_per_head] when output_projection is False, otherwise [batch_size, sequence_length, hidden_size]. """ if self.backward_compatible: kernel = tf.keras.backend.reshape(self.kernel, self.kernel_shape) bias = ( tf.keras.backend.reshape(self.bias, self.bias_shape) if self.use_bias else None) else: kernel = self.kernel bias = self.bias if self.output_projection: ret = tf.einsum("abcd,cde->abe", inputs, kernel) else: ret = tf.einsum("abc,cde->abde", inputs, kernel) if self.use_bias: ret += bias if self.activation is not None: return self.activation(ret) return ret class Dense2DProjection(tf.keras.layers.Layer): """A 2D projection layer with tf.einsum implementation.""" def __init__(self, output_size, kernel_initializer=None, bias_initializer="zeros", activation=None, fp32_activation=False, **kwargs): super(Dense2DProjection, self).__init__(**kwargs) self.output_size = output_size self.kernel_initializer = kernel_initializer self.bias_initializer = bias_initializer self.activation = activation self.fp32_activation = fp32_activation def build(self, input_shape): """Implements build() for the layer.""" dtype = tf.as_dtype(self.dtype or tf.keras.backend.floatx()) if not (dtype.is_floating or dtype.is_complex): raise TypeError("Unable to build `Dense2DProjection` layer with " "non-floating point (and non-complex) " "dtype %s" % (dtype,)) input_shape = tf.TensorShape(input_shape) if tf.compat.dimension_value(input_shape[-1]) is None: raise ValueError("The last dimension of the inputs to " "`Dense2DProjection` should be defined. " "Found `None`.") last_dim = tf.compat.dimension_value(input_shape[-1]) self.input_spec = tf.keras.layers.InputSpec(min_ndim=3, axes={-1: last_dim}) self.kernel = self.add_weight( "kernel", shape=[last_dim, self.output_size], initializer=self.kernel_initializer, dtype=self.dtype, trainable=True) self.bias = self.add_weight( "bias", shape=[self.output_size], initializer=self.bias_initializer, dtype=self.dtype, trainable=True) super(Dense2DProjection, self).build(input_shape) def call(self, inputs): """Implements call() for Dense2DProjection. Args: inputs: float Tensor of shape [batch, from_seq_length, num_attention_heads, size_per_head]. Returns: A 3D Tensor. """ ret = tf.einsum("abc,cd->abd", inputs, self.kernel) ret += self.bias if self.activation is not None: if self.dtype == tf.float16 and self.fp32_activation: ret = tf.cast(ret, tf.float32) return self.activation(ret) return ret class TransformerBlock(tf.keras.layers.Layer): """Single transformer layer. It has two sub-layers. The first is a multi-head self-attention mechanism, and the second is a positionwise fully connected feed-forward network. """ def __init__(self, hidden_size=768, num_attention_heads=12, intermediate_size=3072, intermediate_activation="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, backward_compatible=False, float_type=tf.float32, **kwargs): super(TransformerBlock, self).__init__(**kwargs) self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.intermediate_activation = tf_utils.get_activation( intermediate_activation) self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.backward_compatible = backward_compatible self.float_type = float_type if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (self.hidden_size, self.num_attention_heads)) self.attention_head_size = int(self.hidden_size / self.num_attention_heads) def build(self, unused_input_shapes): """Implements build() for the layer.""" self.attention_layer = Attention( num_attention_heads=self.num_attention_heads, size_per_head=self.attention_head_size, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, backward_compatible=self.backward_compatible, name="self_attention") self.attention_output_dense = Dense3D( num_attention_heads=self.num_attention_heads, size_per_head=int(self.hidden_size / self.num_attention_heads), kernel_initializer=get_initializer(self.initializer_range), output_projection=True, backward_compatible=self.backward_compatible, name="self_attention_output") self.attention_dropout = tf.keras.layers.Dropout( rate=self.hidden_dropout_prob) self.attention_layer_norm = ( tf.keras.layers.LayerNormalization( name="self_attention_layer_norm", axis=-1, epsilon=1e-12, # We do layer norm in float32 for numeric stability. dtype=tf.float32)) self.intermediate_dense = Dense2DProjection( output_size=self.intermediate_size, kernel_initializer=get_initializer(self.initializer_range), activation=self.intermediate_activation, # Uses float32 so that gelu activation is done in float32. fp32_activation=True, name="intermediate") self.output_dense = Dense2DProjection( output_size=self.hidden_size, kernel_initializer=get_initializer(self.initializer_range), name="output") self.output_dropout = tf.keras.layers.Dropout(rate=self.hidden_dropout_prob) self.output_layer_norm = tf.keras.layers.LayerNormalization( name="output_layer_norm", axis=-1, epsilon=1e-12, dtype=tf.float32) super(TransformerBlock, self).build(unused_input_shapes) def common_layers(self): """Explicitly gets all layer objects inside a Transformer encoder block.""" return [ self.attention_layer, self.attention_output_dense, self.attention_dropout, self.attention_layer_norm, self.intermediate_dense, self.output_dense, self.output_dropout, self.output_layer_norm ] def __call__(self, input_tensor, attention_mask=None, **kwargs): inputs = tf_utils.pack_inputs([input_tensor, attention_mask]) return super(TransformerBlock, self).__call__(inputs, **kwargs) def call(self, inputs): """Implements call() for the layer.""" (input_tensor, attention_mask) = tf_utils.unpack_inputs(inputs) attention_output = self.attention_layer( from_tensor=input_tensor, to_tensor=input_tensor, attention_mask=attention_mask) attention_output = self.attention_output_dense(attention_output) attention_output = self.attention_dropout(attention_output) # Use float32 in keras layer norm and the gelu activation in the # intermediate dense layer for numeric stability attention_output = self.attention_layer_norm(input_tensor + attention_output) if self.float_type == tf.float16: attention_output = tf.cast(attention_output, tf.float16) intermediate_output = self.intermediate_dense(attention_output) if self.float_type == tf.float16: intermediate_output = tf.cast(intermediate_output, tf.float16) layer_output = self.output_dense(intermediate_output) layer_output = self.output_dropout(layer_output) # Use float32 in keras layer norm for numeric stability layer_output = self.output_layer_norm(layer_output + attention_output) if self.float_type == tf.float16: layer_output = tf.cast(layer_output, tf.float16) return layer_output class Transformer(tf.keras.layers.Layer): """Multi-headed, multi-layer Transformer from "Attention is All You Need". This is almost an exact implementation of the original Transformer encoder. See the original paper: https://arxiv.org/abs/1706.03762 Also see: https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py """ def __init__(self, num_hidden_layers=12, hidden_size=768, num_attention_heads=12, intermediate_size=3072, intermediate_activation="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, backward_compatible=False, float_type=tf.float32, **kwargs): super(Transformer, self).__init__(**kwargs) self.num_hidden_layers = num_hidden_layers self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.intermediate_activation = tf_utils.get_activation( intermediate_activation) self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.backward_compatible = backward_compatible self.float_type = float_type def build(self, unused_input_shapes): """Implements build() for the layer.""" self.layers = [] for i in range(self.num_hidden_layers): self.layers.append( TransformerBlock( hidden_size=self.hidden_size, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, intermediate_activation=self.intermediate_activation, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, backward_compatible=self.backward_compatible, float_type=self.float_type, name=("layer_%d" % i))) super(Transformer, self).build(unused_input_shapes) def __call__(self, input_tensor, attention_mask=None, **kwargs): inputs = tf_utils.pack_inputs([input_tensor, attention_mask]) return super(Transformer, self).__call__(inputs=inputs, **kwargs) def call(self, inputs, return_all_layers=False): """Implements call() for the layer. Args: inputs: packed inputs. return_all_layers: bool, whether to return outputs of all layers inside encoders. Returns: Output tensor of the last layer or a list of output tensors. """ unpacked_inputs = tf_utils.unpack_inputs(inputs) input_tensor = unpacked_inputs[0] attention_mask = unpacked_inputs[1] output_tensor = input_tensor all_layer_outputs = [] for layer in self.layers: output_tensor = layer(output_tensor, attention_mask) all_layer_outputs.append(output_tensor) if return_all_layers: return all_layer_outputs return all_layer_outputs[-1] def get_initializer(initializer_range=0.02): """Creates a `tf.initializers.truncated_normal` with the given range. Args: initializer_range: float, initializer range for stddev. Returns: TruncatedNormal initializer with stddev = `initializer_range`. """ return tf.keras.initializers.TruncatedNormal(stddev=initializer_range) def create_attention_mask_from_input_mask(from_tensor, to_mask): """Create 3D attention mask from a 2D tensor mask. Args: from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...]. to_mask: int32 Tensor of shape [batch_size, to_seq_length]. Returns: float Tensor of shape [batch_size, from_seq_length, to_seq_length]. """ from_shape = tf_utils.get_shape_list(from_tensor, expected_rank=[2, 3]) batch_size = from_shape[0] from_seq_length = from_shape[1] to_shape = tf_utils.get_shape_list(to_mask, expected_rank=2) to_seq_length = to_shape[1] to_mask = tf.cast( tf.reshape(to_mask, [batch_size, 1, to_seq_length]), dtype=from_tensor.dtype) # We don't assume that `from_tensor` is a mask (although it could be). We # don't actually care if we attend *from* padding tokens (only *to* padding) # tokens so we create a tensor of all ones. # # `broadcast_ones` = [batch_size, from_seq_length, 1] broadcast_ones = tf.ones( shape=[batch_size, from_seq_length, 1], dtype=from_tensor.dtype) # Here we broadcast along two dimensions to create the mask. mask = broadcast_ones * to_mask return mask
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/bert_modeling.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """BERT models that are compatible with TF 2.0.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import tensorflow_hub as hub from official.modeling import tf_utils from official.nlp import bert_modeling from official.nlp.modeling import losses from official.nlp.modeling import networks from official.nlp.modeling.networks import bert_classifier from official.nlp.modeling.networks import bert_pretrainer from official.nlp.modeling.networks import bert_span_labeler def gather_indexes(sequence_tensor, positions): """Gathers the vectors at the specific positions. Args: sequence_tensor: Sequence output of `BertModel` layer of shape (`batch_size`, `seq_length`, num_hidden) where num_hidden is number of hidden units of `BertModel` layer. positions: Positions ids of tokens in sequence to mask for pretraining of with dimension (batch_size, max_predictions_per_seq) where `max_predictions_per_seq` is maximum number of tokens to mask out and predict per each sequence. Returns: Masked out sequence tensor of shape (batch_size * max_predictions_per_seq, num_hidden). """ sequence_shape = tf_utils.get_shape_list( sequence_tensor, name='sequence_output_tensor') batch_size = sequence_shape[0] seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = tf.keras.backend.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) flat_positions = tf.keras.backend.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = tf.keras.backend.reshape( sequence_tensor, [batch_size * seq_length, width]) output_tensor = tf.gather(flat_sequence_tensor, flat_positions) return output_tensor class BertPretrainLossAndMetricLayer(tf.keras.layers.Layer): """Returns layer that computes custom loss and metrics for pretraining.""" def __init__(self, vocab_size, **kwargs): super(BertPretrainLossAndMetricLayer, self).__init__(**kwargs) self._vocab_size = vocab_size self.config = { 'vocab_size': vocab_size, } def __call__(self, lm_output, sentence_output=None, lm_label_ids=None, lm_label_weights=None, sentence_labels=None, **kwargs): inputs = tf_utils.pack_inputs([ lm_output, sentence_output, lm_label_ids, lm_label_weights, sentence_labels ]) return super(BertPretrainLossAndMetricLayer, self).__call__(inputs, **kwargs) def _add_metrics(self, lm_output, lm_labels, lm_label_weights, lm_example_loss, sentence_output, sentence_labels, next_sentence_loss): """Adds metrics.""" masked_lm_accuracy = tf.keras.metrics.sparse_categorical_accuracy( lm_labels, lm_output) numerator = tf.reduce_sum(masked_lm_accuracy * lm_label_weights) denominator = tf.reduce_sum(lm_label_weights) + 1e-5 masked_lm_accuracy = numerator / denominator self.add_metric( masked_lm_accuracy, name='masked_lm_accuracy', aggregation='mean') self.add_metric(lm_example_loss, name='lm_example_loss', aggregation='mean') next_sentence_accuracy = tf.keras.metrics.sparse_categorical_accuracy( sentence_labels, sentence_output) self.add_metric( next_sentence_accuracy, name='next_sentence_accuracy', aggregation='mean') self.add_metric( next_sentence_loss, name='next_sentence_loss', aggregation='mean') def call(self, inputs): """Implements call() for the layer.""" unpacked_inputs = tf_utils.unpack_inputs(inputs) lm_output = unpacked_inputs[0] sentence_output = unpacked_inputs[1] lm_label_ids = unpacked_inputs[2] lm_label_weights = tf.keras.backend.cast(unpacked_inputs[3], tf.float32) sentence_labels = unpacked_inputs[4] mask_label_loss = losses.weighted_sparse_categorical_crossentropy_loss( labels=lm_label_ids, predictions=lm_output, weights=lm_label_weights) sentence_loss = losses.weighted_sparse_categorical_crossentropy_loss( labels=sentence_labels, predictions=sentence_output) loss = mask_label_loss + sentence_loss batch_shape = tf.slice(tf.keras.backend.shape(sentence_labels), [0], [1]) # TODO(hongkuny): Avoids the hack and switches add_loss. final_loss = tf.fill(batch_shape, loss) self._add_metrics(lm_output, lm_label_ids, lm_label_weights, mask_label_loss, sentence_output, sentence_labels, sentence_loss) return final_loss def get_transformer_encoder(bert_config, sequence_length, float_dtype=tf.float32): """Gets a 'TransformerEncoder' object. Args: bert_config: A 'modeling.BertConfig' or 'modeling.AlbertConfig' object. sequence_length: Maximum sequence length of the training data. float_dtype: tf.dtype, tf.float32 or tf.float16. Returns: A networks.TransformerEncoder object. """ kwargs = dict( vocab_size=bert_config.vocab_size, hidden_size=bert_config.hidden_size, num_layers=bert_config.num_hidden_layers, num_attention_heads=bert_config.num_attention_heads, intermediate_size=bert_config.intermediate_size, activation=tf_utils.get_activation(bert_config.hidden_act), dropout_rate=bert_config.hidden_dropout_prob, attention_dropout_rate=bert_config.attention_probs_dropout_prob, sequence_length=sequence_length, max_sequence_length=bert_config.max_position_embeddings, type_vocab_size=bert_config.type_vocab_size, initializer=tf.keras.initializers.TruncatedNormal( stddev=bert_config.initializer_range), float_dtype=float_dtype.name) if isinstance(bert_config, bert_modeling.AlbertConfig): kwargs['embedding_width'] = bert_config.embedding_size return networks.AlbertTransformerEncoder(**kwargs) else: assert isinstance(bert_config, bert_modeling.BertConfig) return networks.TransformerEncoder(**kwargs) def pretrain_model(bert_config, seq_length, max_predictions_per_seq, float_type, initializer=None): """Returns model to be used for pre-training. Args: bert_config: Configuration that defines the core BERT model. seq_length: Maximum sequence length of the training data. max_predictions_per_seq: Maximum number of tokens in sequence to mask out and use for pretraining. initializer: Initializer for weights in BertPretrainer. Returns: Pretraining model as well as core BERT submodel from which to save weights after pretraining. """ input_word_ids = tf.keras.layers.Input( shape=(seq_length,), name='input_word_ids', dtype=tf.int32) input_mask = tf.keras.layers.Input( shape=(seq_length,), name='input_mask', dtype=tf.int32) input_type_ids = tf.keras.layers.Input( shape=(seq_length,), name='input_type_ids', dtype=tf.int32) masked_lm_positions = tf.keras.layers.Input( shape=(max_predictions_per_seq,), name='masked_lm_positions', dtype=tf.int32) masked_lm_ids = tf.keras.layers.Input( shape=(max_predictions_per_seq,), name='masked_lm_ids', dtype=tf.int32) masked_lm_weights = tf.keras.layers.Input( shape=(max_predictions_per_seq,), name='masked_lm_weights', dtype=tf.int32) next_sentence_labels = tf.keras.layers.Input( shape=(1,), name='next_sentence_labels', dtype=tf.int32) transformer_encoder = get_transformer_encoder(bert_config, seq_length, float_type) if initializer is None: initializer = tf.keras.initializers.TruncatedNormal( stddev=bert_config.initializer_range) pretrainer_model = bert_pretrainer.BertPretrainer( network=transformer_encoder, num_classes=2, # The next sentence prediction label has two classes. num_token_predictions=max_predictions_per_seq, initializer=initializer, float_type=float_type, output='predictions') lm_output, sentence_output = pretrainer_model( [input_word_ids, input_mask, input_type_ids, masked_lm_positions]) pretrain_loss_layer = BertPretrainLossAndMetricLayer( vocab_size=bert_config.vocab_size) output_loss = pretrain_loss_layer(lm_output, sentence_output, masked_lm_ids, masked_lm_weights, next_sentence_labels) keras_model = tf.keras.Model( inputs={ 'input_word_ids': input_word_ids, 'input_mask': input_mask, 'input_type_ids': input_type_ids, 'masked_lm_positions': masked_lm_positions, 'masked_lm_ids': masked_lm_ids, 'masked_lm_weights': masked_lm_weights, 'next_sentence_labels': next_sentence_labels, }, outputs=output_loss) return keras_model, transformer_encoder class BertSquadLogitsLayer(tf.keras.layers.Layer): """Returns a layer that computes custom logits for BERT squad model.""" def __init__(self, initializer=None, float_type=tf.float32, **kwargs): super(BertSquadLogitsLayer, self).__init__(**kwargs) self.initializer = initializer self.float_type = float_type def build(self, unused_input_shapes): """Implements build() for the layer.""" self.final_dense = tf.keras.layers.Dense( units=2, kernel_initializer=self.initializer, name='final_dense') super(BertSquadLogitsLayer, self).build(unused_input_shapes) def call(self, inputs): """Implements call() for the layer.""" sequence_output = inputs input_shape = tf_utils.get_shape_list( sequence_output, name='sequence_output_tensor') sequence_length = input_shape[1] num_hidden_units = input_shape[2] final_hidden_input = tf.keras.backend.reshape(sequence_output, [-1, num_hidden_units]) logits = self.final_dense(final_hidden_input) logits = tf.keras.backend.reshape(logits, [-1, sequence_length, 2]) logits = tf.transpose(logits, [2, 0, 1]) unstacked_logits = tf.unstack(logits, axis=0) if self.float_type == tf.float16: unstacked_logits = tf.cast(unstacked_logits, tf.float32) return unstacked_logits[0], unstacked_logits[1] def squad_model(bert_config, max_seq_length, float_type, initializer=None, hub_module_url=None): """Returns BERT Squad model along with core BERT model to import weights. Args: bert_config: BertConfig, the config defines the core Bert model. max_seq_length: integer, the maximum input sequence length. float_type: tf.dtype, tf.float32 or tf.bfloat16. initializer: Initializer for the final dense layer in the span labeler. Defaulted to TruncatedNormal initializer. hub_module_url: TF-Hub path/url to Bert module. Returns: A tuple of (1) keras model that outputs start logits and end logits and (2) the core BERT transformer encoder. """ if initializer is None: initializer = tf.keras.initializers.TruncatedNormal( stddev=bert_config.initializer_range) if not hub_module_url: bert_encoder = get_transformer_encoder(bert_config, max_seq_length, float_type) return bert_span_labeler.BertSpanLabeler( network=bert_encoder, initializer=initializer), bert_encoder input_word_ids = tf.keras.layers.Input( shape=(max_seq_length,), dtype=tf.int32, name='input_word_ids') input_mask = tf.keras.layers.Input( shape=(max_seq_length,), dtype=tf.int32, name='input_mask') input_type_ids = tf.keras.layers.Input( shape=(max_seq_length,), dtype=tf.int32, name='input_type_ids') core_model = hub.KerasLayer(hub_module_url, trainable=True) _, sequence_output = core_model( [input_word_ids, input_mask, input_type_ids]) # Sets the shape manually due to a bug in TF shape inference. # TODO(hongkuny): remove this once shape inference is correct. sequence_output.set_shape((None, max_seq_length, bert_config.hidden_size)) squad_logits_layer = BertSquadLogitsLayer( initializer=initializer, float_type=float_type, name='squad_logits') start_logits, end_logits = squad_logits_layer(sequence_output) squad = tf.keras.Model( inputs={ 'input_word_ids': input_word_ids, 'input_mask': input_mask, 'input_type_ids': input_type_ids, }, outputs=[start_logits, end_logits], name='squad_model') return squad, core_model def classifier_model(bert_config, float_type, num_labels, max_seq_length, final_layer_initializer=None, hub_module_url=None): """BERT classifier model in functional API style. Construct a Keras model for predicting `num_labels` outputs from an input with maximum sequence length `max_seq_length`. Args: bert_config: BertConfig or AlbertConfig, the config defines the core BERT or ALBERT model. float_type: dtype, tf.float32 or tf.bfloat16. num_labels: integer, the number of classes. max_seq_length: integer, the maximum input sequence length. final_layer_initializer: Initializer for final dense layer. Defaulted TruncatedNormal initializer. hub_module_url: TF-Hub path/url to Bert module. Returns: Combined prediction model (words, mask, type) -> (one-hot labels) BERT sub-model (words, mask, type) -> (bert_outputs) """ if final_layer_initializer is not None: initializer = final_layer_initializer else: initializer = tf.keras.initializers.TruncatedNormal( stddev=bert_config.initializer_range) if not hub_module_url: bert_encoder = get_transformer_encoder(bert_config, max_seq_length) return bert_classifier.BertClassifier( bert_encoder, num_classes=num_labels, dropout_rate=bert_config.hidden_dropout_prob, initializer=initializer), bert_encoder input_word_ids = tf.keras.layers.Input( shape=(max_seq_length,), dtype=tf.int32, name='input_word_ids') input_mask = tf.keras.layers.Input( shape=(max_seq_length,), dtype=tf.int32, name='input_mask') input_type_ids = tf.keras.layers.Input( shape=(max_seq_length,), dtype=tf.int32, name='input_type_ids') bert_model = hub.KerasLayer(hub_module_url, trainable=True) pooled_output, _ = bert_model([input_word_ids, input_mask, input_type_ids]) output = tf.keras.layers.Dropout(rate=bert_config.hidden_dropout_prob)( pooled_output) output = tf.keras.layers.Dense( num_labels, kernel_initializer=initializer, name='output', dtype=float_type)( output) return tf.keras.Model( inputs={ 'input_word_ids': input_word_ids, 'input_mask': input_mask, 'input_type_ids': input_type_ids }, outputs=output), bert_model
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/bert_models.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Defines Transformer model parameters.""" from collections import defaultdict BASE_PARAMS = defaultdict( lambda: None, # Set default value to None. # Input params default_batch_size=2048, # Maximum number of tokens per batch of examples. default_batch_size_tpu=32768, max_length=256, # Maximum number of tokens per example. # Model params initializer_gain=1.0, # Used in trainable variable initialization. vocab_size=33708, # Number of tokens defined in the vocabulary file. hidden_size=512, # Model dimension in the hidden layers. num_hidden_layers=6, # Number of layers in the encoder and decoder stacks. num_heads=8, # Number of heads to use in multi-headed attention. filter_size=2048, # Inner layer dimension in the feedforward network. # Dropout values (only used when training) layer_postprocess_dropout=0.1, attention_dropout=0.1, relu_dropout=0.1, # Training params label_smoothing=0.1, learning_rate=2.0, learning_rate_decay_rate=1.0, learning_rate_warmup_steps=16000, # Optimizer params optimizer_adam_beta1=0.9, optimizer_adam_beta2=0.997, optimizer_adam_epsilon=1e-09, # Default prediction params extra_decode_length=50, beam_size=4, alpha=0.6, # used to calculate length normalization in beam search # TPU specific parameters use_tpu=False, static_batch=False, allow_ffn_pad=True, ) BIG_PARAMS = BASE_PARAMS.copy() BIG_PARAMS.update( default_batch_size=4096, # default batch size is smaller than for BASE_PARAMS due to memory limits. default_batch_size_tpu=16384, hidden_size=1024, filter_size=4096, num_heads=16, ) # Parameters for running the model in multi gpu. These should not change the # params that modify the model shape (such as the hidden_size or num_heads). BASE_MULTI_GPU_PARAMS = BASE_PARAMS.copy() BASE_MULTI_GPU_PARAMS.update( learning_rate_warmup_steps=8000 ) BIG_MULTI_GPU_PARAMS = BIG_PARAMS.copy() BIG_MULTI_GPU_PARAMS.update( layer_postprocess_dropout=0.3, learning_rate_warmup_steps=8000 ) # Parameters for testing the model TINY_PARAMS = BASE_PARAMS.copy() TINY_PARAMS.update( default_batch_size=1024, default_batch_size_tpu=1024, hidden_size=32, num_heads=4, filter_size=256, )
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/transformer/model_params.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Transformer model helper methods.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import numpy as np import tensorflow as tf # Very low numbers to represent -infinity. We do not actually use -Inf, since we # want to be able to multiply these values by zero to get zero. (-Inf * 0 = NaN) _NEG_INF_FP32 = -1e9 _NEG_INF_FP16 = np.finfo(np.float16).min def get_position_encoding( length, hidden_size, min_timescale=1.0, max_timescale=1.0e4): """Return positional encoding. Calculates the position encoding as a mix of sine and cosine functions with geometrically increasing wavelengths. Defined and formulized in Attention is All You Need, section 3.5. Args: length: Sequence length. hidden_size: Size of the min_timescale: Minimum scale that will be applied at each position max_timescale: Maximum scale that will be applied at each position Returns: Tensor with shape [length, hidden_size] """ # We compute the positional encoding in float32 even if the model uses # float16, as many of the ops used, like log and exp, are numerically unstable # in float16. position = tf.cast(tf.range(length), tf.float32) num_timescales = hidden_size // 2 log_timescale_increment = ( math.log(float(max_timescale) / float(min_timescale)) / (tf.cast(num_timescales, tf.float32) - 1)) inv_timescales = min_timescale * tf.exp( tf.cast(tf.range(num_timescales), tf.float32) * -log_timescale_increment) scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0) signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) return signal def get_decoder_self_attention_bias(length, dtype=tf.float32): """Calculate bias for decoder that maintains model's autoregressive property. Creates a tensor that masks out locations that correspond to illegal connections, so prediction at position i cannot draw information from future positions. Args: length: int length of sequences in batch. dtype: The dtype of the return value. Returns: float tensor of shape [1, 1, length, length] """ neg_inf = _NEG_INF_FP16 if dtype == tf.float16 else _NEG_INF_FP32 with tf.name_scope("decoder_self_attention_bias"): valid_locs = tf.linalg.band_part(tf.ones([length, length], dtype=dtype), -1, 0) valid_locs = tf.reshape(valid_locs, [1, 1, length, length]) decoder_bias = neg_inf * (1.0 - valid_locs) return decoder_bias def get_padding(x, padding_value=0, dtype=tf.float32): """Return float tensor representing the padding values in x. Args: x: int tensor with any shape padding_value: int which represents padded values in input dtype: The dtype of the return value. Returns: float tensor with same shape as x containing values 0 or 1. 0 -> non-padding, 1 -> padding """ with tf.name_scope("padding"): return tf.cast(tf.equal(x, padding_value), dtype) def get_padding_bias(x, padding_value=0, dtype=tf.float32): """Calculate bias tensor from padding values in tensor. Bias tensor that is added to the pre-softmax multi-headed attention logits, which has shape [batch_size, num_heads, length, length]. The tensor is zero at non-padding locations, and -1e9 (negative infinity) at padding locations. Args: x: int tensor with shape [batch_size, length] padding_value: int which represents padded values in input dtype: The dtype of the return value Returns: Attention bias tensor of shape [batch_size, 1, 1, length]. """ with tf.name_scope("attention_bias"): padding = get_padding(x, padding_value, dtype) attention_bias = padding * _NEG_INF_FP32 attention_bias = tf.expand_dims( tf.expand_dims(attention_bias, axis=1), axis=1) return attention_bias
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/transformer/model_utils.py
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/transformer/__init__.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test Transformer model helper methods.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from official.nlp.transformer import model_utils NEG_INF = -1e9 class ModelUtilsTest(tf.test.TestCase): def test_get_padding(self): x = tf.constant([[1, 0, 0, 0, 2], [3, 4, 0, 0, 0], [0, 5, 6, 0, 7]]) padding = model_utils.get_padding(x, padding_value=0) self.assertAllEqual([[0, 1, 1, 1, 0], [0, 0, 1, 1, 1], [1, 0, 0, 1, 0]], padding) def test_get_padding_bias(self): x = tf.constant([[1, 0, 0, 0, 2], [3, 4, 0, 0, 0], [0, 5, 6, 0, 7]]) bias = model_utils.get_padding_bias(x) bias_shape = tf.shape(bias) flattened_bias = tf.reshape(bias, [3, 5]) self.assertAllEqual([[0, NEG_INF, NEG_INF, NEG_INF, 0], [0, 0, NEG_INF, NEG_INF, NEG_INF], [NEG_INF, 0, 0, NEG_INF, 0]], flattened_bias) self.assertAllEqual([3, 1, 1, 5], bias_shape) def test_get_decoder_self_attention_bias(self): length = 5 bias = model_utils.get_decoder_self_attention_bias(length) self.assertAllEqual([[[[0, NEG_INF, NEG_INF, NEG_INF, NEG_INF], [0, 0, NEG_INF, NEG_INF, NEG_INF], [0, 0, 0, NEG_INF, NEG_INF], [0, 0, 0, 0, NEG_INF], [0, 0, 0, 0, 0]]]], bias) if __name__ == "__main__": assert tf.version.VERSION.startswith('2.') tf.test.main()
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/transformer/model_utils_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test beam search helper methods.""" import tensorflow.compat.v1 as tf from official.nlp.transformer import beam_search_v1 as beam_search class BeamSearchHelperTests(tf.test.TestCase): def setUp(self): super(BeamSearchHelperTests, self).setUp() tf.compat.v1.disable_eager_execution() def test_expand_to_beam_size(self): x = tf.ones([7, 4, 2, 5]) x = beam_search._expand_to_beam_size(x, 3) with self.session() as sess: shape = sess.run(tf.shape(x)) self.assertAllEqual([7, 3, 4, 2, 5], shape) def test_shape_list(self): y = tf.compat.v1.placeholder(dtype=tf.int32, shape=[]) x = tf.ones([7, y, 2, 5]) shape = beam_search._shape_list(x) self.assertIsInstance(shape[0], int) self.assertIsInstance(shape[1], tf.Tensor) self.assertIsInstance(shape[2], int) self.assertIsInstance(shape[3], int) def test_get_shape_keep_last_dim(self): y = tf.constant(4.0) x = tf.ones([7, tf.cast(tf.sqrt(y), tf.int32), 2, 5]) shape = beam_search._get_shape_keep_last_dim(x) self.assertAllEqual([None, None, None, 5], shape.as_list()) def test_flatten_beam_dim(self): x = tf.ones([7, 4, 2, 5]) x = beam_search._flatten_beam_dim(x) with self.session() as sess: shape = sess.run(tf.shape(x)) self.assertAllEqual([28, 2, 5], shape) def test_unflatten_beam_dim(self): x = tf.ones([28, 2, 5]) x = beam_search._unflatten_beam_dim(x, 7, 4) with self.session() as sess: shape = sess.run(tf.shape(x)) self.assertAllEqual([7, 4, 2, 5], shape) def test_gather_beams(self): x = tf.reshape(tf.range(24), [2, 3, 4]) # x looks like: [[[ 0 1 2 3] # [ 4 5 6 7] # [ 8 9 10 11]] # # [[12 13 14 15] # [16 17 18 19] # [20 21 22 23]]] y = beam_search._gather_beams(x, [[1, 2], [0, 2]], 2, 2) with self.session() as sess: y = sess.run(y) self.assertAllEqual([[[4, 5, 6, 7], [8, 9, 10, 11]], [[12, 13, 14, 15], [20, 21, 22, 23]]], y) def test_gather_topk_beams(self): x = tf.reshape(tf.range(24), [2, 3, 4]) x_scores = [[0, 1, 1], [1, 0, 1]] y = beam_search._gather_topk_beams(x, x_scores, 2, 2) with self.session() as sess: y = sess.run(y) self.assertAllEqual([[[4, 5, 6, 7], [8, 9, 10, 11]], [[12, 13, 14, 15], [20, 21, 22, 23]]], y) if __name__ == "__main__": tf.test.main()
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/transformer/beam_search_v1_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Beam search to find the translated sequence with the highest probability. Source implementation from Tensor2Tensor: https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/beam_search.py """ import numpy as np import tensorflow.compat.v1 as tf from tensorflow.python.util import nest def inf(dtype): """Returns a value close to infinity, but is still finite in `dtype`. This is useful to get a very large value that is still zero when multiplied by zero. The floating-point "Inf" value is NaN when multiplied by zero. Args: dtype: A dtype. The returned value will be finite when casted to this dtype. Returns: A very large value. """ if dtype == "float32" or dtype == "bfloat16": return 1e7 elif dtype == "float16": # Disable no-member lint error, as the linter thinks np.float16 does not # exist for some reason. return np.finfo(np.float16).max # pylint: disable=no-member else: raise AssertionError('Invalid dtype: %s' % dtype) class _StateKeys(object): """Keys to dictionary storing the state of the beam search loop.""" # Variable storing the loop index. CUR_INDEX = "CUR_INDEX" # Top sequences that are alive for each batch item. Alive sequences are ones # that have not generated an EOS token. Sequences that reach EOS are marked as # finished and moved to the FINISHED_SEQ tensor. # Has shape [batch_size, beam_size, CUR_INDEX + 1] ALIVE_SEQ = "ALIVE_SEQ" # Log probabilities of each alive sequence. Shape [batch_size, beam_size] ALIVE_LOG_PROBS = "ALIVE_LOG_PROBS" # Dictionary of cached values for each alive sequence. The cache stores # the encoder output, attention bias, and the decoder attention output from # the previous iteration. ALIVE_CACHE = "ALIVE_CACHE" # Top finished sequences for each batch item. # Has shape [batch_size, beam_size, CUR_INDEX + 1]. Sequences that are # shorter than CUR_INDEX + 1 are padded with 0s. FINISHED_SEQ = "FINISHED_SEQ" # Scores for each finished sequence. Score = log probability / length norm # Shape [batch_size, beam_size] FINISHED_SCORES = "FINISHED_SCORES" # Flags indicating which sequences in the finished sequences are finished. # At the beginning, all of the sequences in FINISHED_SEQ are filler values. # True -> finished sequence, False -> filler. Shape [batch_size, beam_size] FINISHED_FLAGS = "FINISHED_FLAGS" class SequenceBeamSearch(object): """Implementation of beam search loop.""" def __init__(self, symbols_to_logits_fn, vocab_size, batch_size, beam_size, alpha, max_decode_length, eos_id, padded_decode, dtype=tf.float32): """Initialize sequence beam search. Args: symbols_to_logits_fn: A function to provide logits, which is the interface to the Transformer model. The passed in arguments are: ids -> A tensor with shape [batch_size * beam_size, index]. index -> A scalar. cache -> A nested dictionary of tensors [batch_size * beam_size, ...]. The function must return a tuple of logits and the updated cache: logits -> A tensor with shape [batch * beam_size, vocab_size]. updated cache -> A nested dictionary with the same structure as the input cache. vocab_size: An integer, the size of the vocabulary, used for topk computation. batch_size: An integer, the decode batch size. beam_size: An integer, number of beams for beam search. alpha: A float, defining the strength of length normalization. max_decode_length: An integer, the maximum number of steps to decode a sequence. eos_id: An integer. ID of end of sentence token. padded_decode: A bool, indicating if max_sequence_length padding is used for beam search. dtype: A tensorflow data type used for score computation. The default is tf.float32. """ self.symbols_to_logits_fn = symbols_to_logits_fn self.vocab_size = vocab_size self.batch_size = batch_size self.beam_size = beam_size self.alpha = alpha self.max_decode_length = max_decode_length self.eos_id = eos_id self.padded_decode = padded_decode self.dtype = tf.as_dtype(dtype) def search(self, initial_ids, initial_cache): """Beam search for sequences with highest scores.""" state, state_shapes = self._create_initial_state(initial_ids, initial_cache) finished_state = tf.while_loop( self._continue_search, self._search_step, loop_vars=[state], shape_invariants=[state_shapes], parallel_iterations=1, back_prop=False) finished_state = finished_state[0] alive_seq = finished_state[_StateKeys.ALIVE_SEQ] alive_log_probs = finished_state[_StateKeys.ALIVE_LOG_PROBS] finished_seq = finished_state[_StateKeys.FINISHED_SEQ] finished_scores = finished_state[_StateKeys.FINISHED_SCORES] finished_flags = finished_state[_StateKeys.FINISHED_FLAGS] # Account for corner case where there are no finished sequences for a # particular batch item. In that case, return alive sequences for that batch # item. finished_seq = tf.where( tf.reduce_any(finished_flags, 1), finished_seq, alive_seq) finished_scores = tf.where( tf.reduce_any(finished_flags, 1), finished_scores, alive_log_probs) return finished_seq, finished_scores def _create_initial_state(self, initial_ids, initial_cache): """Return initial state dictionary and its shape invariants. Args: initial_ids: initial ids to pass into the symbols_to_logits_fn. int tensor with shape [batch_size, 1] initial_cache: dictionary storing values to be passed into the symbols_to_logits_fn. Returns: state and shape invariant dictionaries with keys from _StateKeys """ for key, value in initial_cache.items(): for inner_value in nest.flatten(value): if inner_value.dtype != self.dtype: raise TypeError( "initial_cache element for key '%s' has dtype %s that does not " "match SequenceBeamSearch's dtype of %s. Value: %s" % (key, value.dtype.name, self.dtype.name, inner_value)) # Current loop index (starts at 0) cur_index = tf.constant(0) # Create alive sequence with shape [batch_size, beam_size, 1] alive_seq = _expand_to_beam_size(initial_ids, self.beam_size) alive_seq = tf.expand_dims(alive_seq, axis=2) if self.padded_decode: alive_seq = tf.tile(alive_seq, [1, 1, self.max_decode_length + 1]) # Create tensor for storing initial log probabilities. # Assume initial_ids are prob 1.0 initial_log_probs = tf.constant( [[0.] + [-float("inf")] * (self.beam_size - 1)], dtype=self.dtype) alive_log_probs = tf.tile(initial_log_probs, [self.batch_size, 1]) # Expand all values stored in the dictionary to the beam size, so that each # beam has a separate cache. alive_cache = nest.map_structure( lambda t: _expand_to_beam_size(t, self.beam_size), initial_cache) # Initialize tensor storing finished sequences with filler values. finished_seq = tf.zeros(tf.shape(alive_seq), tf.int32) # Set scores of the initial finished seqs to negative infinity. finished_scores = tf.ones([self.batch_size, self.beam_size], dtype=self.dtype) * -inf(self.dtype) # Initialize finished flags with all False values. finished_flags = tf.zeros([self.batch_size, self.beam_size], tf.bool) # Create state dictionary state = { _StateKeys.CUR_INDEX: cur_index, _StateKeys.ALIVE_SEQ: alive_seq, _StateKeys.ALIVE_LOG_PROBS: alive_log_probs, _StateKeys.ALIVE_CACHE: alive_cache, _StateKeys.FINISHED_SEQ: finished_seq, _StateKeys.FINISHED_SCORES: finished_scores, _StateKeys.FINISHED_FLAGS: finished_flags } # Create state invariants for each value in the state dictionary. Each # dimension must be a constant or None. A None dimension means either: # 1) the dimension's value is a tensor that remains the same but may # depend on the input sequence to the model (e.g. batch size). # 2) the dimension may have different values on different iterations. if self.padded_decode: state_shape_invariants = { _StateKeys.CUR_INDEX: tf.TensorShape([]), _StateKeys.ALIVE_SEQ: tf.TensorShape( [self.batch_size, self.beam_size, self.max_decode_length + 1]), _StateKeys.ALIVE_LOG_PROBS: tf.TensorShape([self.batch_size, self.beam_size]), _StateKeys.ALIVE_CACHE: nest.map_structure(_get_shape, alive_cache), _StateKeys.FINISHED_SEQ: tf.TensorShape( [self.batch_size, self.beam_size, self.max_decode_length + 1]), _StateKeys.FINISHED_SCORES: tf.TensorShape([self.batch_size, self.beam_size]), _StateKeys.FINISHED_FLAGS: tf.TensorShape([self.batch_size, self.beam_size]) } else: state_shape_invariants = { _StateKeys.CUR_INDEX: tf.TensorShape([]), _StateKeys.ALIVE_SEQ: tf.TensorShape([None, self.beam_size, None]), _StateKeys.ALIVE_LOG_PROBS: tf.TensorShape([None, self.beam_size]), _StateKeys.ALIVE_CACHE: nest.map_structure(_get_shape_keep_last_dim, alive_cache), _StateKeys.FINISHED_SEQ: tf.TensorShape([None, self.beam_size, None]), _StateKeys.FINISHED_SCORES: tf.TensorShape([None, self.beam_size]), _StateKeys.FINISHED_FLAGS: tf.TensorShape([None, self.beam_size]) } return state, state_shape_invariants def _continue_search(self, state): """Return whether to continue the search loop. The loops should terminate when 1) when decode length has been reached, or 2) when the worst score in the finished sequences is better than the best score in the alive sequences (i.e. the finished sequences are provably unchanging) Args: state: A dictionary with the current loop state. Returns: Bool tensor with value True if loop should continue, False if loop should terminate. """ i = state[_StateKeys.CUR_INDEX] alive_log_probs = state[_StateKeys.ALIVE_LOG_PROBS] finished_scores = state[_StateKeys.FINISHED_SCORES] finished_flags = state[_StateKeys.FINISHED_FLAGS] not_at_max_decode_length = tf.less(i, self.max_decode_length) # Calculate largest length penalty (the larger penalty, the better score). max_length_norm = _length_normalization(self.alpha, self.max_decode_length, dtype=self.dtype) # Get the best possible scores from alive sequences. best_alive_scores = alive_log_probs[:, 0] / max_length_norm # Compute worst score in finished sequences for each batch element finished_scores *= tf.cast(finished_flags, self.dtype) # set filler scores to zero lowest_finished_scores = tf.reduce_min(finished_scores, axis=1) # If there are no finished sequences in a batch element, then set the lowest # finished score to -INF for that element. finished_batches = tf.reduce_any(finished_flags, 1) lowest_finished_scores += ((1.0 - tf.cast(finished_batches, self.dtype)) * -inf(self.dtype)) worst_finished_score_better_than_best_alive_score = tf.reduce_all( tf.greater(lowest_finished_scores, best_alive_scores) ) return tf.logical_and( not_at_max_decode_length, tf.logical_not(worst_finished_score_better_than_best_alive_score) ) def _search_step(self, state): """Beam search loop body. Grow alive sequences by a single ID. Sequences that have reached the EOS token are marked as finished. The alive and finished sequences with the highest log probabilities and scores are returned. A sequence's finished score is calculating by dividing the log probability by the length normalization factor. Without length normalization, the search is more likely to return shorter sequences. Args: state: A dictionary with the current loop state. Returns: new state dictionary. """ # Grow alive sequences by one token. new_seq, new_log_probs, topk_ids, new_cache = self._grow_alive_seq(state) new_finished_flags = tf.equal(topk_ids, self.eos_id) # Collect top beam_size alive sequences alive_state = self._get_new_alive_state(new_seq, new_log_probs, new_finished_flags, new_cache) # Combine newly finished sequences with existing finished sequences, and # collect the top k scoring sequences. finished_state = self._get_new_finished_state(state, new_seq, new_log_probs, new_finished_flags) # Increment loop index and create new state dictionary new_state = {_StateKeys.CUR_INDEX: state[_StateKeys.CUR_INDEX] + 1} new_state.update(alive_state) new_state.update(finished_state) return [new_state] def _grow_alive_seq(self, state): """Grow alive sequences by one token, and collect top 2*beam_size sequences. 2*beam_size sequences are collected because some sequences may have reached the EOS token. 2*beam_size ensures that at least beam_size sequences are still alive. Args: state: A dictionary with the current loop state. Returns: Tuple of (Top 2*beam_size sequences [batch_size, 2 * beam_size, cur_index + 1], Scores of returned sequences [batch_size, 2 * beam_size], New alive cache, for each of the 2 * beam_size sequences) """ i = state[_StateKeys.CUR_INDEX] alive_seq = state[_StateKeys.ALIVE_SEQ] alive_log_probs = state[_StateKeys.ALIVE_LOG_PROBS] alive_cache = state[_StateKeys.ALIVE_CACHE] beams_to_keep = 2 * self.beam_size # Get logits for the next candidate IDs for the alive sequences. Get the new # cache values at the same time. if self.padded_decode: flat_ids = tf.reshape( tf.slice(alive_seq, [0, 0, i], [self.batch_size, self.beam_size, 1]), [self.batch_size * self.beam_size, -1]) else: flat_ids = _flatten_beam_dim(alive_seq) # [batch_size * beam_size] flat_cache = nest.map_structure(_flatten_beam_dim, alive_cache) flat_logits, flat_cache = self.symbols_to_logits_fn(flat_ids, i, flat_cache) # Unflatten logits to shape [batch_size, beam_size, vocab_size] logits = _unflatten_beam_dim(flat_logits, self.batch_size, self.beam_size) new_cache = nest.map_structure( lambda t: _unflatten_beam_dim(t, self.batch_size, self.beam_size), flat_cache) # Convert logits to normalized log probs candidate_log_probs = _log_prob_from_logits(logits) # Calculate new log probabilities if each of the alive sequences were # extended # by the the candidate IDs. # Shape [batch_size, beam_size, vocab_size] log_probs = candidate_log_probs + tf.expand_dims(alive_log_probs, axis=2) # Each batch item has beam_size * vocab_size candidate sequences. For each # batch item, get the k candidates with the highest log probabilities. flat_log_probs = tf.reshape(log_probs, [-1, self.beam_size * self.vocab_size]) topk_log_probs, topk_indices = tf.nn.top_k(flat_log_probs, k=beams_to_keep) # Extract the alive sequences that generate the highest log probabilities # after being extended. topk_beam_indices = topk_indices // self.vocab_size topk_seq, new_cache = _gather_beams( [alive_seq, new_cache], topk_beam_indices, self.batch_size, beams_to_keep) # Append the most probable IDs to the topk sequences topk_ids = topk_indices % self.vocab_size if self.padded_decode: topk_seq = tf.transpose(topk_seq, perm=[2, 0, 1]) # TODO(b/145533236, hongkuny): Reverts once TF fix the validation. topk_seq = tf.tensor_scatter_nd_update(topk_seq, [[i + 1]], tf.expand_dims(topk_ids, axis=0)) topk_seq = tf.transpose(topk_seq, perm=[1, 2, 0]) else: topk_seq = tf.concat([topk_seq, tf.expand_dims(topk_ids, axis=2)], axis=2) return topk_seq, topk_log_probs, topk_ids, new_cache def _get_new_alive_state(self, new_seq, new_log_probs, new_finished_flags, new_cache): """Gather the top k sequences that are still alive. Args: new_seq: New sequences generated by growing the current alive sequences int32 tensor with shape [batch_size, 2 * beam_size, cur_index + 1] new_log_probs: Log probabilities of new sequences float32 tensor with shape [batch_size, beam_size] new_finished_flags: A boolean Tensor indicates which sequences are live inside the beam. new_cache: Dict of cached values for each sequence. Returns: Dictionary with alive keys from _StateKeys: {Top beam_size sequences that are still alive (don't end with eos_id) Log probabilities of top alive sequences Dict cache storing decoder states for top alive sequences} """ # To prevent finished sequences from being considered, set log probs to -inf new_log_probs += tf.cast(new_finished_flags, self.dtype) * -inf(self.dtype) top_alive_seq, top_alive_log_probs, top_alive_cache = _gather_topk_beams( [new_seq, new_log_probs, new_cache], new_log_probs, self.batch_size, self.beam_size) return { _StateKeys.ALIVE_SEQ: top_alive_seq, _StateKeys.ALIVE_LOG_PROBS: top_alive_log_probs, _StateKeys.ALIVE_CACHE: top_alive_cache } def _get_new_finished_state(self, state, new_seq, new_log_probs, new_finished_flags): """Combine new and old finished sequences, and gather the top k sequences. Args: state: A dictionary with the current loop state. new_seq: New sequences generated by growing the current alive sequences int32 tensor with shape [batch_size, beam_size, i + 1] new_log_probs: Log probabilities of new sequences float32 tensor with shape [batch_size, beam_size] new_finished_flags: A boolean Tensor indicates which sequences are live inside the beam. Returns: Dictionary with finished keys from _StateKeys: {Top beam_size finished sequences based on score, Scores of finished sequences, Finished flags of finished sequences} """ i = state[_StateKeys.CUR_INDEX] finished_seq = state[_StateKeys.FINISHED_SEQ] finished_scores = state[_StateKeys.FINISHED_SCORES] finished_flags = state[_StateKeys.FINISHED_FLAGS] # First append a column of 0-ids to finished_seq to increment the length. # New shape of finished_seq: [batch_size, beam_size, i + 1] if not self.padded_decode: finished_seq = tf.concat([ finished_seq, tf.zeros([self.batch_size, self.beam_size, 1], tf.int32) ], axis=2) # Calculate new seq scores from log probabilities. length_norm = _length_normalization(self.alpha, i + 1, dtype=self.dtype) new_scores = new_log_probs / length_norm # Set the scores of the still-alive seq in new_seq to large negative values. new_scores += ((1. - tf.cast(new_finished_flags, self.dtype)) * -inf(self.dtype)) # Combine sequences, scores, and flags. finished_seq = tf.concat([finished_seq, new_seq], axis=1) finished_scores = tf.concat([finished_scores, new_scores], axis=1) finished_flags = tf.concat([finished_flags, new_finished_flags], axis=1) # Return the finished sequences with the best scores. top_finished_seq, top_finished_scores, top_finished_flags = ( _gather_topk_beams([finished_seq, finished_scores, finished_flags], finished_scores, self.batch_size, self.beam_size)) return { _StateKeys.FINISHED_SEQ: top_finished_seq, _StateKeys.FINISHED_SCORES: top_finished_scores, _StateKeys.FINISHED_FLAGS: top_finished_flags } def sequence_beam_search( symbols_to_logits_fn, initial_ids, initial_cache, vocab_size, beam_size, alpha, max_decode_length, eos_id, padded_decode=False): """Search for sequence of subtoken ids with the largest probability. Args: symbols_to_logits_fn: A function that takes in ids, index, and cache as arguments. The passed in arguments will have shape: ids -> A tensor with shape [batch_size * beam_size, index]. index -> A scalar. cache -> A nested dictionary of tensors [batch_size * beam_size, ...]. The function must return a tuple of logits and new cache: logits -> A tensor with shape [batch * beam_size, vocab_size]. new cache -> A nested dictionary with the same shape/structure as the inputted cache. initial_ids: An int32 tensor with shape [batch_size]. Starting ids for each batch item. initial_cache: A dictionary, containing starting decoder variables information. vocab_size: An integer, the size of the vocabulary, used for topk computation. beam_size: An integer, the number of beams. alpha: A float, defining the strength of length normalization. max_decode_length: An integer, the maximum length to decoded a sequence. eos_id: An integer, ID of eos token, used to determine when a sequence has finished. padded_decode: A bool, indicating if max_sequence_length padding is used for beam search. Returns: Top decoded sequences [batch_size, beam_size, max_decode_length] sequence scores [batch_size, beam_size] """ batch_size = ( initial_ids.shape.as_list()[0] if padded_decode else tf.shape(initial_ids)[0]) sbs = SequenceBeamSearch(symbols_to_logits_fn, vocab_size, batch_size, beam_size, alpha, max_decode_length, eos_id, padded_decode) return sbs.search(initial_ids, initial_cache) def _log_prob_from_logits(logits): return logits - tf.reduce_logsumexp(logits, axis=2, keepdims=True) def _length_normalization(alpha, length, dtype=tf.float32): """Return length normalization factor.""" return tf.pow(((5. + tf.cast(length, dtype)) / 6.), alpha) def _expand_to_beam_size(tensor, beam_size): """Tiles a given tensor by beam_size. Args: tensor: tensor to tile [batch_size, ...] beam_size: How much to tile the tensor by. Returns: Tiled tensor [batch_size, beam_size, ...] """ tensor = tf.expand_dims(tensor, axis=1) tile_dims = [1] * tensor.shape.ndims tile_dims[1] = beam_size return tf.tile(tensor, tile_dims) def _shape_list(tensor): """Return a list of the tensor's shape, and ensure no None values in list.""" # Get statically known shape (may contain None's for unknown dimensions) shape = tensor.get_shape().as_list() # Ensure that the shape values are not None dynamic_shape = tf.shape(tensor) for i in range(len(shape)): # pylint: disable=consider-using-enumerate if shape[i] is None: shape[i] = dynamic_shape[i] return shape def _get_shape_keep_last_dim(tensor): shape_list = _shape_list(tensor) # Only the last for i in range(len(shape_list) - 1): shape_list[i] = None if isinstance(shape_list[-1], tf.Tensor): shape_list[-1] = None return tf.TensorShape(shape_list) def _get_shape(tensor): """Return the shape of the input tensor.""" return tf.TensorShape(_shape_list(tensor)) def _flatten_beam_dim(tensor): """Reshapes first two dimensions in to single dimension. Args: tensor: Tensor to reshape of shape [A, B, ...] Returns: Reshaped tensor of shape [A*B, ...] """ shape = _shape_list(tensor) shape[0] *= shape[1] shape.pop(1) # Remove beam dim return tf.reshape(tensor, shape) def _unflatten_beam_dim(tensor, batch_size, beam_size): """Reshapes first dimension back to [batch_size, beam_size]. Args: tensor: Tensor to reshape of shape [batch_size*beam_size, ...] batch_size: Tensor, original batch size. beam_size: int, original beam size. Returns: Reshaped tensor of shape [batch_size, beam_size, ...] """ shape = _shape_list(tensor) new_shape = [batch_size, beam_size] + shape[1:] return tf.reshape(tensor, new_shape) def _gather_beams(nested, beam_indices, batch_size, new_beam_size): """Gather beams from nested structure of tensors. Each tensor in nested represents a batch of beams, where beam refers to a single search state (beam search involves searching through multiple states in parallel). This function is used to gather the top beams, specified by beam_indices, from the nested tensors. Args: nested: Nested structure (tensor, list, tuple or dict) containing tensors with shape [batch_size, beam_size, ...]. beam_indices: int32 tensor with shape [batch_size, new_beam_size]. Each value in beam_indices must be between [0, beam_size), and are not necessarily unique. batch_size: int size of batch new_beam_size: int number of beams to be pulled from the nested tensors. Returns: Nested structure containing tensors with shape [batch_size, new_beam_size, ...] """ # Computes the i'th coodinate that contains the batch index for gather_nd. # Batch pos is a tensor like [[0,0,0,0,],[1,1,1,1],..]. batch_pos = tf.range(batch_size * new_beam_size) // new_beam_size batch_pos = tf.reshape(batch_pos, [batch_size, new_beam_size]) # Create coordinates to be passed to tf.gather_nd. Stacking creates a tensor # with shape [batch_size, beam_size, 2], where the last dimension contains # the (i, j) gathering coordinates. coordinates = tf.stack([batch_pos, beam_indices], axis=2) return nest.map_structure( lambda state: tf.gather_nd(state, coordinates), nested) def _gather_topk_beams(nested, score_or_log_prob, batch_size, beam_size): """Gather top beams from nested structure.""" _, topk_indexes = tf.nn.top_k(score_or_log_prob, k=beam_size) return _gather_beams(nested, topk_indexes, batch_size, beam_size)
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/transformer/beam_search_v1.py
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/__init__.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for masked LM loss.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import from official.nlp.modeling import networks from official.nlp.modeling.losses import weighted_sparse_categorical_crossentropy @keras_parameterized.run_all_keras_modes class ClassificationLossTest(keras_parameterized.TestCase): def create_lm_model(self, vocab_size, sequence_length, hidden_size, num_predictions, output="predictions"): # First, create a transformer stack that we can use to get the LM's # vocabulary weight. xformer_stack = networks.TransformerEncoder( vocab_size=vocab_size, num_layers=1, sequence_length=sequence_length, hidden_size=hidden_size, num_attention_heads=4, ) word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) lm_outputs, _ = xformer_stack([word_ids, mask, type_ids]) # Create a maskedLM from the transformer stack. test_network = networks.MaskedLM( num_predictions=num_predictions, input_width=lm_outputs.shape[-1], source_network=xformer_stack, output=output) # Create a model from the masked LM layer. lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size)) masked_lm_positions = tf.keras.Input( shape=(num_predictions,), dtype=tf.int32) output = test_network([lm_input_tensor, masked_lm_positions]) return tf.keras.Model([lm_input_tensor, masked_lm_positions], output) def create_classification_model(self, input_width, num_classes): test_object = networks.Classification( input_width=input_width, num_classes=num_classes) # Create a 2-dimensional input (the first dimension is implicit). pooled_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32) output = test_object(pooled_data) return tf.keras.Model(pooled_data, output) def test_per_example_loss_3d_input(self): """Test per-example loss with a 3-dimensional input, from a masked LM.""" vocab_size = 100 sequence_length = 32 hidden_size = 64 num_predictions = 21 model = self.create_lm_model( vocab_size=vocab_size, sequence_length=sequence_length, hidden_size=hidden_size, num_predictions=num_predictions) # Get the output of the masked LM. batch_size = 3 lm_input_data = 10 * np.random.random_sample( (batch_size, sequence_length, hidden_size)) masked_position_data = np.random.randint( 2, size=(batch_size, num_predictions)) output_data = model.predict([lm_input_data, masked_position_data]) # Calculate per-example loss. labels = np.random.randint(vocab_size, size=(batch_size, num_predictions)) per_example_loss_data = weighted_sparse_categorical_crossentropy.per_example_loss( predictions=output_data, labels=labels) # Per-example loss data should have one value per prediction, and those # values shouldn't be zero in this case (as we're using random data). expected_shape = [batch_size, num_predictions] self.assertEqual(expected_shape, per_example_loss_data.shape.as_list()) self.assertNotAllClose( tf.zeros_like(per_example_loss_data), per_example_loss_data) def test_per_example_loss_2d_input(self): """Test per-example loss with a 2-d input, from a classifier.""" input_width = 512 num_classes = 10 model = self.create_classification_model(input_width, num_classes) # Invoke the network as part of a Model. batch_size = 3 input_data = 10 * np.random.random_sample((batch_size, input_width)) output_data = model.predict(input_data) # Calculate per example loss. labels = np.random.randint(num_classes, size=(batch_size)) per_example_loss_data = weighted_sparse_categorical_crossentropy.per_example_loss( predictions=output_data, labels=labels) # Per-example loss data should have one value per batch item, and those # values shouldn't be zero in this case (as we're using random data). self.assertEqual([batch_size], per_example_loss_data.shape.as_list()) self.assertNotAllClose( tf.zeros_like(per_example_loss_data), per_example_loss_data) def test_per_example_loss_weights_3d_input(self): """Test weighted per-example loss with a 3-d input, from a masked LM.""" vocab_size = 100 sequence_length = 32 hidden_size = 64 num_predictions = 21 model = self.create_lm_model( vocab_size=vocab_size, sequence_length=sequence_length, hidden_size=hidden_size, num_predictions=num_predictions) # Get the output of the masked LM. batch_size = 3 lm_input_data = 10 * np.random.random_sample( (batch_size, sequence_length, hidden_size)) masked_position_data = np.random.randint( 2, size=(batch_size, num_predictions)) output_data = model.predict([lm_input_data, masked_position_data]) # Calculate per-example loss with weights. labels = np.random.randint(vocab_size, size=(batch_size, num_predictions)) weights = np.random.randint(2, size=(batch_size, num_predictions)) per_example_loss_data = weighted_sparse_categorical_crossentropy.per_example_loss( predictions=output_data, labels=labels, weights=weights) # Weighted per-example loss data should be equivalent to multiplying the # loss tensor by the weights tensor. expected_weighted_loss = per_example_loss_data * weights self.assertAllClose(expected_weighted_loss, per_example_loss_data) def test_per_example_loss_weights_2d_input(self): """Test weighted per-example loss with a 2-d input, from a classifier.""" input_width = 512 num_classes = 10 model = self.create_classification_model(input_width, num_classes) # Invoke the network as part of a Model. batch_size = 3 input_data = 10 * np.random.random_sample((batch_size, input_width)) output_data = model.predict(input_data) # Calculate per-example loss with weights. labels = np.random.randint(num_classes, size=(batch_size)) weights = np.random.randint(2, size=(batch_size)) per_example_loss_data = weighted_sparse_categorical_crossentropy.per_example_loss( predictions=output_data, labels=labels, weights=weights) # Weighted per-example loss data should be equivalent to multiplying the # loss tensor by the weights tensor. expected_weighted_loss = per_example_loss_data * weights self.assertAllClose(expected_weighted_loss, per_example_loss_data) def test_loss_3d_input(self): """Test overall loss with a 3-dimensional input, from a masked LM.""" vocab_size = 100 sequence_length = 32 hidden_size = 64 num_predictions = 21 model = self.create_lm_model( vocab_size=vocab_size, sequence_length=sequence_length, hidden_size=hidden_size, num_predictions=num_predictions) # Get the output of the masked LM. batch_size = 3 lm_input_data = 10 * np.random.random_sample( (batch_size, sequence_length, hidden_size)) masked_position_data = np.random.randint( 2, size=(batch_size, num_predictions)) output_data = model.predict([lm_input_data, masked_position_data]) # Calculate loss. labels = np.random.randint(vocab_size, size=(batch_size, num_predictions)) weights = np.random.randint(2, size=(batch_size, num_predictions)) per_example_loss_data = weighted_sparse_categorical_crossentropy.loss( predictions=output_data, labels=labels, weights=weights) # Total loss data should have one value, and that value shouldn't be zero # in this case (as we're using random data). expected_shape = [] # Scalar self.assertEqual(expected_shape, per_example_loss_data.shape.as_list()) self.assertNotAllClose( tf.zeros_like(per_example_loss_data), per_example_loss_data) def test_loss_2d_input(self): """Test overall loss with a 2-d input, from a classifier.""" input_width = 512 num_classes = 10 model = self.create_classification_model(input_width, num_classes) # Invoke the network as part of a Model. batch_size = 3 input_data = 10 * np.random.random_sample((batch_size, input_width)) output_data = model.predict(input_data) # Calculate per example loss. labels = np.random.randint(num_classes, size=(batch_size)) loss_data = weighted_sparse_categorical_crossentropy.loss( predictions=output_data, labels=labels) # Loss data should have one value only, and that value shouldn't be zero in # this case (as we're using random data). self.assertNotAllClose(0, loss_data) def test_loss_weights_3d_input(self): """Test masked loss with a 3-dimensional input, from a masked LM.""" vocab_size = 100 sequence_length = 32 hidden_size = 64 num_predictions = 21 model = self.create_lm_model( vocab_size=vocab_size, sequence_length=sequence_length, hidden_size=hidden_size, num_predictions=num_predictions) # Get the output of the masked LM. batch_size = 3 lm_input_data = 10 * np.random.random_sample( (batch_size, sequence_length, hidden_size)) masked_position_data = np.random.randint( 2, size=(batch_size, num_predictions)) output_data = model.predict([lm_input_data, masked_position_data]) # Calculate a fully masked weight tensor. This should give a loss of zero. labels = np.random.randint(vocab_size, size=(batch_size, num_predictions)) null_weights = np.zeros((batch_size, num_predictions)) weighted_loss_data = weighted_sparse_categorical_crossentropy.loss( predictions=output_data, labels=labels, weights=null_weights) # Because the tensor is fully masked, the loss should be 0. self.assertAllClose(0, weighted_loss_data) def test_loss_weights_2d_input(self): """Test masked loss with a 2-d input, from a classifier.""" input_width = 512 num_classes = 10 model = self.create_classification_model(input_width, num_classes) # Invoke the network as part of a Model. batch_size = 3 input_data = 10 * np.random.random_sample((batch_size, input_width)) output_data = model.predict(input_data) # Calculate a fully masked weight tensor. This should give a loss of zero. labels = np.random.randint(num_classes, size=(batch_size)) null_weights = np.zeros((batch_size)) weighted_loss_data = weighted_sparse_categorical_crossentropy.loss( predictions=output_data, labels=labels, weights=null_weights) # Because the tensor is fully masked, the loss should be 0. self.assertAllClose(0, weighted_loss_data) def test_mismatched_predictions_and_labels_ranks_squeezes(self): """Test that the loss asserts when rank(predictions)-1 != rank(labels).""" batch_size = 3 output_data = np.random.random_sample((batch_size, 10)) labels = np.random.randint(10, size=(batch_size, 1)) # All that this test tests is that the squeeze is successful. _ = weighted_sparse_categorical_crossentropy.per_example_loss( predictions=output_data, labels=labels) def test_mismatched_weights_and_labels_ranks_fail(self): """Test that the loss asserts when rank(predictions) != rank(labels).""" batch_size = 3 output_data = np.random.random_sample((batch_size, 10, 15)) labels = np.random.randint(10, size=(batch_size, 10)) weights = np.random.randint(2, size=(batch_size)) with self.assertRaisesRegex(RuntimeError, ".*of the same rank.*"): _ = weighted_sparse_categorical_crossentropy.per_example_loss( predictions=output_data, labels=labels, weights=weights) with self.assertRaisesRegex(RuntimeError, ".*of the same rank.*"): _ = weighted_sparse_categorical_crossentropy.loss( predictions=output_data, labels=labels, weights=weights) def test_tf_tensor_inputs(self): """Test that tf.Tensors can be used as inputs to the loss function.""" batch_size = 3 output_data = tf.convert_to_tensor( np.random.random_sample((batch_size, 10, 15))) labels = tf.convert_to_tensor(np.random.randint(10, size=(batch_size, 10))) weights = tf.convert_to_tensor(np.random.randint(2, size=(batch_size, 10))) # We're not trying to validate numerical correctness, just ensure that # we can in fact pass tensors to these functions without causing runtime # errors from the shape checking code. _ = weighted_sparse_categorical_crossentropy.per_example_loss( predictions=output_data, labels=labels, weights=weights) _ = weighted_sparse_categorical_crossentropy.loss( predictions=output_data, labels=labels, weights=weights) def test_legacy_lm_loss_compatibility(self): """Test to validate computational correctness during refactors.""" # This is the empirical output of a masked LM with the following parameters: # batch_size = 3 # vocab_size = 5 # sequence_length = 4 # num_predictions = 2 output_data = np.array( [[[-2.5286622, -1.0963473, -1.4925185, -2.4451098, -1.2923571], [-2.7117882, -1.1205841, -4.02187, -0.9966936, -1.5119683]], [[-2.5379114, -0.82479054, -2.287932, -1.3747153, -2.053741], [-2.5379114, -0.82479054, -2.287932, -1.3747153, -2.053741]], [[-2.7760355, -1.8219438, -3.0924666, -1.0779881, -0.9407509], [-2.7760355, -1.8219438, -3.0924666, -1.0779881, -0.9407509]]]) labels = np.array([[4, 0], [2, 2], [2, 1]]) # Validate that per_example loss calculations are the same. per_example_loss_data = weighted_sparse_categorical_crossentropy.per_example_loss( predictions=output_data, labels=labels) expected_per_example_loss_data = [[1.2923571, 2.7117882], [2.287932, 2.287932], [3.0924666, 1.8219438]] self.assertAllClose(expected_per_example_loss_data, per_example_loss_data) # Validate that overall loss calculations are the same. weights = np.array([[1, 0], [0, 0], [0, 0]]) loss_data = weighted_sparse_categorical_crossentropy.loss( predictions=output_data, labels=labels, weights=weights) expected_loss_data = 1.2923441 self.assertAllClose(expected_loss_data, loss_data) def test_legacy_classification_loss_compatibility(self): """Test to validate computational correctness during refactors.""" # This is the empirical output of a classifier with the following params: # batch_size = 2 # num_classes = 3 output_data = np.array([[-1.6094601e-03, -1.0966038e+01, -6.4434357e+00], [-1.6975292e-03, -6.4009643e+00, -1.0226612e+01]]) labels = np.array([2, 1]) # Validate that per_example loss calculations are the same. per_example_loss_data = weighted_sparse_categorical_crossentropy.per_example_loss( predictions=output_data, labels=labels) expected_per_example_loss_data = [6.4434357, 6.4009643] self.assertAllClose(expected_per_example_loss_data, per_example_loss_data) # Validate that overall loss calculations are the same. weights = None loss_data = weighted_sparse_categorical_crossentropy.loss( predictions=output_data, labels=labels, weights=weights) expected_loss_data = 6.4222 self.assertAllClose(expected_loss_data, loss_data) if __name__ == "__main__": tf.test.main()
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/losses/weighted_sparse_categorical_crossentropy_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Activations package definition. Subject to change.""" from official.nlp.modeling.losses.weighted_sparse_categorical_crossentropy import loss as weighted_sparse_categorical_crossentropy_loss from official.nlp.modeling.losses.weighted_sparse_categorical_crossentropy import per_example_loss as weighted_sparse_categorical_crossentropy_per_example_loss
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/losses/__init__.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Sparse categorical cross-entropy losses.""" from __future__ import absolute_import from __future__ import division # from __future__ import google_type_annotations from __future__ import print_function import tensorflow as tf def _adjust_labels(labels, predictions): """Adjust the 'labels' tensor by squeezing it if needed.""" labels = tf.cast(labels, tf.int32) if len(predictions.shape) == len(labels.shape): labels = tf.squeeze(labels, [-1]) return labels, predictions def _validate_rank(labels, predictions, weights): if weights is not None and len(weights.shape) != len(labels.shape): raise RuntimeError( ("Weight and label tensors were not of the same rank. weights.shape " "was %s, and labels.shape was %s.") % (predictions.shape, labels.shape)) if (len(predictions.shape) - 1) != len(labels.shape): raise RuntimeError( ("Weighted sparse categorical crossentropy expects `labels` to have a " "rank of one less than `predictions`. labels.shape was %s, and " "predictions.shape was %s.") % (labels.shape, predictions.shape)) def per_example_loss(labels, predictions, weights=None): """Calculate a per-example sparse categorical crossentropy loss. This loss function assumes that the predictions are post-softmax. Args: labels: The labels to evaluate against. Should be a set of integer indices ranging from 0 to (vocab_size-1). predictions: The network predictions. Should have softmax already applied. weights: An optional weight array of the same shape as the 'labels' array. If None, all examples will be used. Returns: A tensor of shape predictions.shape[:-1] containing the per-example loss. """ # When using these functions with the Keras core API, we will need to squeeze # the labels tensor - Keras adds a spurious inner dimension. labels, predictions = _adjust_labels(labels, predictions) _validate_rank(labels, predictions, weights) labels_one_hot = tf.keras.backend.one_hot(labels, predictions.shape[-1]) labels_one_hot = tf.keras.backend.cast(labels_one_hot, predictions.dtype) per_example_loss_data = -tf.keras.backend.sum( predictions * labels_one_hot, axis=[-1]) if weights is not None: weights = tf.keras.backend.cast(weights, per_example_loss_data.dtype) per_example_loss_data = weights * per_example_loss_data return per_example_loss_data def loss(labels, predictions, weights=None): """Calculate a per-batch sparse categorical crossentropy loss. This loss function assumes that the predictions are post-softmax. Args: labels: The labels to evaluate against. Should be a set of integer indices ranging from 0 to (vocab_size-1). predictions: The network predictions. Should have softmax already applied. weights: An optional weight array of the same shape as the 'labels' array. If None, all examples will be used. Returns: A loss scalar. Raises: RuntimeError if the passed tensors do not have the same rank. """ # When using these functions with the Keras core API, we will need to squeeze # the labels tensor - Keras adds a spurious inner dimension. labels, predictions = _adjust_labels(labels, predictions) _validate_rank(labels, predictions, weights) per_example_loss_data = per_example_loss(labels, predictions, weights) if weights is None: return tf.keras.backend.mean(per_example_loss_data) else: numerator = tf.keras.backend.sum(per_example_loss_data) weights = tf.keras.backend.cast(weights, predictions.dtype) denominator = tf.keras.backend.sum(weights) + 1e-5 return numerator / denominator
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/losses/weighted_sparse_categorical_crossentropy.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras-based masked softmax layer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import from official.nlp.modeling.layers import masked_softmax # This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It # guarantees forward compatibility of this code for the V2 switchover. @keras_parameterized.run_all_keras_modes class MaskedSoftmaxLayerTest(keras_parameterized.TestCase): def test_non_masked_softmax(self): test_layer = masked_softmax.MaskedSoftmax() input_tensor = tf.keras.Input(shape=(4, 8)) output = test_layer(input_tensor) model = tf.keras.Model(input_tensor, output) input_data = 10 * np.random.random_sample((3, 4, 8)) output_data = model.predict(input_data) expected_data = tf.nn.softmax(input_data) self.assertAllClose(expected_data, output_data) def test_masked_softmax(self): test_layer = masked_softmax.MaskedSoftmax() input_tensor = tf.keras.Input(shape=(4, 8)) mask_tensor = tf.keras.Input(shape=(4, 8)) output = test_layer([input_tensor, mask_tensor]) model = tf.keras.Model([input_tensor, mask_tensor], output) input_data = 10 * np.random.random_sample((3, 4, 8)) mask_data = np.random.randint(2, size=(3, 4, 8)) output_data = model.predict([input_data, mask_data]) expected_zeros = np.greater(mask_data, 0) is_zeros = np.greater(output_data, 0) self.assertAllEqual(expected_zeros, is_zeros) def test_masked_softmax_with_none_mask(self): test_layer = masked_softmax.MaskedSoftmax() input_tensor = tf.keras.Input(shape=(4, 8)) output = test_layer([input_tensor, None]) model = tf.keras.Model(input_tensor, output) input_data = 10 * np.random.random_sample((3, 4, 8)) output_data = model.predict(input_data) expected_data = tf.nn.softmax(input_data) self.assertAllClose(expected_data, output_data) def test_softmax_with_axes_expansion(self): test_layer = masked_softmax.MaskedSoftmax(mask_expansion_axes=[1]) input_tensor = tf.keras.Input(shape=(4, 8)) mask_tensor = tf.keras.Input(shape=(8)) output = test_layer([input_tensor, mask_tensor]) model = tf.keras.Model([input_tensor, mask_tensor], output) input_data = 10 * np.random.random_sample((3, 4, 8)) mask_data = np.random.randint(2, size=(3, 8)) output_data = model.predict([input_data, mask_data]) expanded_mask = np.expand_dims(mask_data, axis=1) * np.ones_like(input_data) expected_zeros = np.greater(expanded_mask, 0) is_zeros = np.greater(output_data, 0) self.assertAllEqual(expected_zeros, is_zeros) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/layers/masked_softmax_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras-based one-hot embedding layer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import from official.nlp.modeling.layers import on_device_embedding # This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It # guarantees forward compatibility of this code for the V2 switchover. @keras_parameterized.run_all_keras_modes class OnDeviceEmbeddingTest(keras_parameterized.TestCase): def test_layer_creation(self): vocab_size = 31 embedding_width = 27 test_layer = on_device_embedding.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width) # Create a 2-dimensional input (the first dimension is implicit). sequence_length = 23 input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) output_tensor = test_layer(input_tensor) # The output should be the same as the input, save that it has an extra # embedding_width dimension on the end. expected_output_shape = [None, sequence_length, embedding_width] self.assertEqual(expected_output_shape, output_tensor.shape.as_list()) self.assertEqual(output_tensor.dtype, tf.float32) def test_layer_creation_with_float16_dtype(self): vocab_size = 31 embedding_width = 27 test_layer = on_device_embedding.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width, dtype="float16") # Create a 2-dimensional input (the first dimension is implicit). sequence_length = 23 input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) output_tensor = test_layer(input_tensor) # The output should be the same as the input, save that it has an extra # embedding_width dimension on the end. expected_output_shape = [None, sequence_length, embedding_width] self.assertEqual(expected_output_shape, output_tensor.shape.as_list()) self.assertEqual(output_tensor.dtype, tf.float16) def test_layer_invocation(self): vocab_size = 31 embedding_width = 27 test_layer = on_device_embedding.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width) # Create a 2-dimensional input (the first dimension is implicit). sequence_length = 23 input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) output_tensor = test_layer(input_tensor) # Create a model from the test layer. model = tf.keras.Model(input_tensor, output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 3 input_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) output = model.predict(input_data) self.assertEqual(tf.float32, output.dtype) def test_layer_invocation_with_float16_dtype(self): vocab_size = 31 embedding_width = 27 test_layer = on_device_embedding.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width, dtype="float16") # Create a 2-dimensional input (the first dimension is implicit). sequence_length = 23 input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) output_tensor = test_layer(input_tensor) # Create a model from the test layer. model = tf.keras.Model(input_tensor, output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 3 input_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) output = model.predict(input_data) self.assertEqual(tf.float16, output.dtype) def test_one_hot_layer_creation(self): vocab_size = 31 embedding_width = 27 test_layer = on_device_embedding.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width, use_one_hot=True) # Create a 2-dimensional input (the first dimension is implicit). sequence_length = 23 input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) output_tensor = test_layer(input_tensor) # The output should be the same as the input, save that it has an extra # embedding_width dimension on the end. expected_output_shape = [None, sequence_length, embedding_width] self.assertEqual(expected_output_shape, output_tensor.shape.as_list()) self.assertEqual(output_tensor.dtype, tf.float32) def test_one_hot_layer_creation_with_float16_dtype(self): vocab_size = 31 embedding_width = 27 test_layer = on_device_embedding.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width, dtype="float16", use_one_hot=True) # Create a 2-dimensional input (the first dimension is implicit). sequence_length = 23 input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) output_tensor = test_layer(input_tensor) # The output should be the same as the input, save that it has an extra # embedding_width dimension on the end. expected_output_shape = [None, sequence_length, embedding_width] self.assertEqual(expected_output_shape, output_tensor.shape.as_list()) self.assertEqual(output_tensor.dtype, tf.float16) def test_one_hot_layer_invocation(self): vocab_size = 31 embedding_width = 27 test_layer = on_device_embedding.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width, use_one_hot=True) # Create a 2-dimensional input (the first dimension is implicit). sequence_length = 23 input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) output_tensor = test_layer(input_tensor) # Create a model from the test layer. model = tf.keras.Model(input_tensor, output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 3 input_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) output = model.predict(input_data) self.assertEqual(tf.float32, output.dtype) def test_one_hot_layer_invocation_with_float16_dtype(self): vocab_size = 31 embedding_width = 27 test_layer = on_device_embedding.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width, dtype="float16", use_one_hot=True) # Create a 2-dimensional input (the first dimension is implicit). sequence_length = 23 input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) output_tensor = test_layer(input_tensor) # Create a model from the test layer. model = tf.keras.Model(input_tensor, output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 3 input_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) output = model.predict(input_data) self.assertEqual(tf.float16, output.dtype) if __name__ == "__main__": tf.test.main()
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/layers/on_device_embedding_test.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras-based attention layer.""" from __future__ import absolute_import from __future__ import division # from __future__ import google_type_annotations from __future__ import print_function import math import tensorflow as tf from official.nlp.modeling.layers import dense_einsum from official.nlp.modeling.layers import masked_softmax # @tf.keras.utils.register_keras_serializable(package="Text") class Attention(tf.keras.layers.Layer): """Attention layer. This is an implementation of multi-headed attention based on "Attention is all you Need". If `from_tensor` and `to_tensor` are the same, then this is self-attention. Each timestep in `from_tensor` attends to the corresponding sequence in `to_tensor`, and returns a fixed-width vector. This function first projects `from_tensor` into a "query" tensor and `to_tensor` into "key" and "value" tensors. These are (effectively) a list of tensors of length `num_attention_heads`, where each tensor is of shape [batch_size, seq_length, size_per_head]. Then, the query and key tensors are dot-producted and scaled. These are softmaxed to obtain attention probabilities. The value tensors are then interpolated by these probabilities, then concatenated back to a single tensor and returned. Attributes: num_heads: Number of attention heads. head_size: Size of each attention head. dropout: Dropout probability. kernel_initializer: Initializer for dense layer kernels. bias_initializer: Initializer for dense layer biases. kernel_regularizer: Regularizer for dense layer kernels. bias_regularizer: Regularizer for dense layer biases. activity_regularizer: Regularizer for dense layer activity. kernel_constraint: Constraint for dense layer kernels. bias_constraint: Constraint for dense layer kernels. """ def __init__(self, num_heads, head_size, dropout_rate=0.0, kernel_initializer="glorot_uniform", bias_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(Attention, self).__init__(**kwargs) self._num_heads = num_heads self._head_size = head_size self._dropout_rate = dropout_rate self._kernel_initializer = tf.keras.initializers.get(kernel_initializer) self._bias_initializer = tf.keras.initializers.get(bias_initializer) self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer) self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer) self._kernel_constraint = tf.keras.constraints.get(kernel_constraint) self._bias_constraint = tf.keras.constraints.get(bias_constraint) self._query_dense = dense_einsum.DenseEinsum( output_shape=(self._num_heads, self._head_size), kernel_initializer=self._kernel_initializer, bias_initializer=self._bias_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activity_regularizer=self._activity_regularizer, kernel_constraint=self._kernel_constraint, bias_constraint=self._bias_constraint, name="query") self._key_dense = dense_einsum.DenseEinsum( output_shape=(self._num_heads, self._head_size), kernel_initializer=self._kernel_initializer, bias_initializer=self._bias_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activity_regularizer=self._activity_regularizer, kernel_constraint=self._kernel_constraint, bias_constraint=self._bias_constraint, name="key") self._value_dense = dense_einsum.DenseEinsum( output_shape=(self._num_heads, self._head_size), kernel_initializer=self._kernel_initializer, bias_initializer=self._bias_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activity_regularizer=self._activity_regularizer, kernel_constraint=self._kernel_constraint, bias_constraint=self._bias_constraint, name="value") self._masked_softmax = masked_softmax.MaskedSoftmax(mask_expansion_axes=[1]) self._dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) def get_config(self): config = { "num_heads": self._num_heads, "head_size": self._head_size, "dropout_rate": self._dropout_rate, "kernel_initializer": tf.keras.initializers.serialize(self._kernel_initializer), "bias_initializer": tf.keras.initializers.serialize(self._bias_initializer), "kernel_regularizer": tf.keras.regularizers.serialize(self._kernel_regularizer), "bias_regularizer": tf.keras.regularizers.serialize(self._bias_regularizer), "activity_regularizer": tf.keras.regularizers.serialize(self._activity_regularizer), "kernel_constraint": tf.keras.constraints.serialize(self._kernel_constraint), "bias_constraint": tf.keras.constraints.serialize(self._bias_constraint) } base_config = super(Attention, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs): from_tensor = inputs[0] to_tensor = inputs[1] attention_mask = inputs[2] if len(inputs) == 3 else None # Scalar dimensions referenced here: # B = batch size (number of sequences) # F = `from_tensor` sequence length # T = `to_tensor` sequence length # N = `num_attention_heads` # H = `size_per_head` # `query_tensor` = [B, F, N ,H] query_tensor = self._query_dense(from_tensor) # `key_tensor` = [B, T, N, H] key_tensor = self._key_dense(to_tensor) # `value_tensor` = [B, T, N, H] value_tensor = self._value_dense(to_tensor) # Take the dot product between "query" and "key" to get the raw # attention scores. #attention_scores = tf.einsum("BTNH,BFNH->BNFT", key_tensor, query_tensor) # Instead of using the einsum equation, we expand it into the below # equivalent equations. # `query_tensor` = [B, N, F, H] query_tensor = tf.transpose(query_tensor, [0, 2, 1, 3]) # `key_tensor` = [B, N, T, H] key_tensor = tf.transpose(key_tensor, [0, 2, 1, 3]) # `attention_scores` = [B, N, F, T] attention_scores = tf.matmul(query_tensor, key_tensor, transpose_b=True) attention_scores = tf.multiply(attention_scores, 1.0 / math.sqrt(float(self._head_size))) # Normalize the attention scores to probabilities. # `attention_probs` = [B, N, F, T] attention_probs = self._masked_softmax([attention_scores, attention_mask]) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self._dropout(attention_probs) # `context_layer` = [B, F, N, H] return tf.einsum("BNFT,BTNH->BFNH", attention_probs, value_tensor) # @tf.keras.utils.register_keras_serializable(package="Text") class CachedAttention(Attention): """Attention layer with cache used for auto-agressive decoding. Attributes: num_heads: Number of attention heads. head_size: Size of each attention head. **kwargs: Other keyword arguments inherit from `Attention` class. """ def __init__(self, num_heads, head_size, **kwargs): super(CachedAttention, self).__init__(num_heads, head_size, **kwargs) def _update_cache(self, key_tensor, value_tensor, cache, decode_loop_step): """Updates cache states and gets full-length key/value tensors.""" # Combines cached keys and values with new keys and values. if decode_loop_step is not None: # TPU special case. key_seq_dim = cache["key"].shape.as_list()[1] indices = tf.reshape( tf.one_hot(decode_loop_step, key_seq_dim, dtype=key_tensor.dtype), [1, key_seq_dim, 1, 1]) key_tensor = cache["key"] + key_tensor * indices value_seq_dim = cache["value"].shape.as_list()[1] indices = tf.reshape( tf.one_hot(decode_loop_step, value_seq_dim, dtype=value_tensor.dtype), [1, value_seq_dim, 1, 1]) value_tensor = cache["value"] + value_tensor * indices else: key_tensor = tf.concat( [tf.cast(cache["key"], key_tensor.dtype), key_tensor], axis=1) value_tensor = tf.concat( [tf.cast(cache["value"], value_tensor.dtype), value_tensor], axis=1) # Update cache cache["key"] = key_tensor cache["value"] = value_tensor return key_tensor, value_tensor def call(self, inputs, decode_loop_step=None): from_tensor = inputs[0] to_tensor = inputs[1] attention_mask = inputs[2] if len(inputs) >= 3 else None cache = inputs[3] if len(inputs) >= 4 else None # Scalar dimensions referenced here: # B = batch size (number of sequences) # F = `from_tensor` sequence length # T = `to_tensor` sequence length # N = `num_attention_heads` # H = `size_per_head` # `query_tensor` = [B, F, N ,H] query_tensor = self._query_dense(from_tensor) # `key_tensor` = [B, T, N, H] key_tensor = self._key_dense(to_tensor) # `value_tensor` = [B, T, N, H] value_tensor = self._value_dense(to_tensor) if cache: key_tensor, value_tensor = self._update_cache(key_tensor, value_tensor, cache, decode_loop_step) # Take the dot product between "query" and "key" to get the raw # attention scores. attention_scores = tf.einsum("BTNH,BFNH->BNFT", key_tensor, query_tensor) attention_scores = tf.multiply(attention_scores, 1.0 / math.sqrt(float(self._head_size))) # Normalize the attention scores to probabilities. # `attention_probs` = [B, N, F, T] attention_probs = self._masked_softmax([attention_scores, attention_mask]) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self._dropout(attention_probs) # `context_layer` = [B, F, N, H] return tf.einsum("BNFT,BTNH->BFNH", attention_probs, value_tensor), cache
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/layers/attention.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras-based transformer block layer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import from official.nlp.modeling.layers import transformer # This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It # guarantees forward compatibility of this code for the V2 switchover. @keras_parameterized.run_all_keras_modes class TransformerLayerTest(keras_parameterized.TestCase): def test_layer_creation(self): test_layer = transformer.Transformer( num_attention_heads=10, intermediate_size=2048, intermediate_activation='relu') sequence_length = 21 width = 80 # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) output_tensor = test_layer(data_tensor) # The default output of a transformer layer should be the same as the input. self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list()) def test_layer_creation_with_mask(self): test_layer = transformer.Transformer( num_attention_heads=10, intermediate_size=2048, intermediate_activation='relu') sequence_length = 21 width = 80 # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) # Create a 2-dimensional input (the first dimension is implicit). mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) output_tensor = test_layer([data_tensor, mask_tensor]) # The default output of a transformer layer should be the same as the input. self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list()) def test_layer_creation_with_incorrect_mask_fails(self): test_layer = transformer.Transformer( num_attention_heads=10, intermediate_size=2048, intermediate_activation='relu') sequence_length = 21 width = 80 # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) # Create a 2-dimensional input (the first dimension is implicit). mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length - 3)) with self.assertRaisesRegex(ValueError, 'When passing a mask tensor.*'): _ = test_layer([data_tensor, mask_tensor]) def test_layer_invocation(self): test_layer = transformer.Transformer( num_attention_heads=10, intermediate_size=2048, intermediate_activation='relu') sequence_length = 21 width = 80 # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) output_tensor = test_layer(data_tensor) # Create a model from the test layer. model = tf.keras.Model(data_tensor, output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 6 input_data = 10 * np.random.random_sample( (batch_size, sequence_length, width)) _ = model.predict(input_data) def test_layer_invocation_with_mask(self): test_layer = transformer.Transformer( num_attention_heads=10, intermediate_size=2048, intermediate_activation='relu') sequence_length = 21 width = 80 # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) # Create a 2-dimensional input (the first dimension is implicit). mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) output_tensor = test_layer([data_tensor, mask_tensor]) # Create a model from the test layer. model = tf.keras.Model([data_tensor, mask_tensor], output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 6 input_data = 10 * np.random.random_sample( (batch_size, sequence_length, width)) # The attention mask should be of shape (batch, from_seq_len, to_seq_len), # which here is (batch, sequence_length, sequence_length) mask_data = np.random.randint( 2, size=(batch_size, sequence_length, sequence_length)) _ = model.predict([input_data, mask_data]) def test_layer_invocation_with_float16_dtype(self): test_layer = transformer.Transformer( num_attention_heads=10, intermediate_size=2048, intermediate_activation='relu', dtype='float16') sequence_length = 21 width = 80 # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input( shape=(sequence_length, width), dtype=tf.float16) # Create a 2-dimensional input (the first dimension is implicit). mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) output_tensor = test_layer([data_tensor, mask_tensor]) # Create a model from the test layer. model = tf.keras.Model([data_tensor, mask_tensor], output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 6 input_data = (10 * np.random.random_sample( (batch_size, sequence_length, width))).astype(np.float16) # The attention mask should be of shape (batch, from_seq_len, to_seq_len), # which here is (batch, sequence_length, sequence_length) mask_data = np.random.randint( 2, size=(batch_size, sequence_length, sequence_length)) _ = model.predict([input_data, mask_data]) def test_transform_with_initializer(self): test_layer = transformer.Transformer( num_attention_heads=10, intermediate_size=2048, intermediate_activation='relu', kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)) sequence_length = 21 width = 80 # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) output = test_layer(data_tensor) # The default output of a transformer layer should be the same as the input. self.assertEqual(data_tensor.shape.as_list(), output.shape.as_list()) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/layers/transformer_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras-based einsum layer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import from official.nlp.modeling.layers import dense_einsum # This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It # guarantees forward compatibility of this code for the V2 switchover. @keras_parameterized.run_all_keras_modes class DenseEinsumLayer(keras_parameterized.TestCase): def test_3D_einsum_with_two_bound_dimensions(self): test_layer = dense_einsum.DenseEinsum( output_shape=(64,), num_summed_dimensions=2) # Create a 4-dimensional input (the first dimension is implicit). input_tensor = tf.keras.Input(shape=(None, 40, 80)) _ = test_layer(input_tensor) self.assertEqual(test_layer._einsum_string, "abcd,cde->abe") self.assertEqual(test_layer._kernel_shape, (40, 80, 64)) def test_3D_einsum_with_one_bound_dimensions(self): test_layer = dense_einsum.DenseEinsum( output_shape=(64, 32), num_summed_dimensions=1) # Create a 3-dimensional input (the first dimension is implicit). input_tensor = tf.keras.Input(shape=(None, 80)) _ = test_layer(input_tensor) self.assertEqual(test_layer._einsum_string, "abc,cde->abde") self.assertEqual(test_layer._kernel_shape, (80, 64, 32)) def test_2D_einsum_with_one_bound_dimensions(self): test_layer = dense_einsum.DenseEinsum( output_shape=(64,), num_summed_dimensions=1) # Create a 3-dimensional input (the first dimension is implicit). input_tensor = tf.keras.Input(shape=(None, 80)) _ = test_layer(input_tensor) self.assertEqual(test_layer._einsum_string, "abc,cd->abd") self.assertEqual(test_layer._kernel_shape, (80, 64)) def test_bias_term_can_be_disabled(self): # A layer created using the bias should have two weights. test_layer = dense_einsum.DenseEinsum( output_shape=64, num_summed_dimensions=1, use_bias=True) input_tensor = tf.keras.Input(shape=(None, 80)) _ = test_layer(input_tensor) self.assertEqual(2, len(test_layer.get_weights())) # A layer created without the bias should have only one weight. test_layer = dense_einsum.DenseEinsum( output_shape=64, num_summed_dimensions=1, use_bias=False) input_tensor = tf.keras.Input(shape=(None, 80)) _ = test_layer(input_tensor) self.assertEqual(1, len(test_layer.get_weights())) def test_activation(self): # Create a model that does not use an activation. no_activation_layer = dense_einsum.DenseEinsum( output_shape=64, num_summed_dimensions=1, activation=None) input_tensor = tf.keras.Input(shape=(None, 80)) output_tensor = no_activation_layer(input_tensor) no_activation_model = tf.keras.Model(input_tensor, output_tensor) # Create a model that uses a softmax activation. activation_layer = dense_einsum.DenseEinsum( output_shape=64, num_summed_dimensions=1, activation="softmax") input_tensor = tf.keras.Input(shape=(None, 80)) output_tensor = activation_layer(input_tensor) activation_model = tf.keras.Model(input_tensor, output_tensor) # Make sure the models' weights are identical. activation_model.set_weights(no_activation_model.get_weights()) # Predict using each model on the same input data. The output should be # different, since one is using a softmax - even though the models' weights # are the same. input_values = 10 * np.random.random_sample((10, 4, 80)) non_activated_data = no_activation_model.predict(input_values) activated_data = activation_model.predict(input_values) self.assertNotAllClose(activated_data, non_activated_data) def test_non_iterable_output_shape(self): test_layer = dense_einsum.DenseEinsum( output_shape=64, num_summed_dimensions=1) # Create a 3-dimensional input (the first dimension is implicit). input_tensor = tf.keras.Input(shape=(None, 80)) _ = test_layer(input_tensor) self.assertEqual(test_layer._einsum_string, "abc,cd->abd") self.assertEqual(test_layer._kernel_shape, (80, 64)) def test_with_explicit_initializer(self): test_layer = dense_einsum.DenseEinsum( output_shape=(64,), num_summed_dimensions=2, kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)) # Create a 4-dimensional input (the first dimension is implicit). input_tensor = tf.keras.Input(shape=(None, 40, 80)) _ = test_layer(input_tensor) self.assertEqual(test_layer._einsum_string, "abcd,cde->abe") self.assertEqual(test_layer._kernel_shape, (40, 80, 64)) if __name__ == "__main__": tf.test.main()
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/layers/dense_einsum_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Layers package definition.""" from official.nlp.modeling.layers.attention import * # pylint: disable=wildcard-import from official.nlp.modeling.layers.dense_einsum import DenseEinsum from official.nlp.modeling.layers.masked_softmax import MaskedSoftmax from official.nlp.modeling.layers.on_device_embedding import OnDeviceEmbedding from official.nlp.modeling.layers.position_embedding import PositionEmbedding from official.nlp.modeling.layers.self_attention_mask import SelfAttentionMask from official.nlp.modeling.layers.transformer import Transformer from official.nlp.modeling.layers.transformer_scaffold import TransformerScaffold
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/layers/__init__.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras-based positional embedding layer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import from official.nlp.modeling.layers import position_embedding # This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It # guarantees forward compatibility of this code for the V2 switchover. @keras_parameterized.run_all_keras_modes class PositionEmbeddingLayerTest(keras_parameterized.TestCase): def test_static_layer_output_shape(self): test_layer = position_embedding.PositionEmbedding() # Create a 3-dimensional input (the first dimension is implicit). sequence_length = 21 width = 30 input_tensor = tf.keras.Input(shape=(sequence_length, width)) output_tensor = test_layer(input_tensor) # When using static positional embedding shapes, the output is expected # to be the same as the input shape in all dimensions save batch. expected_output_shape = [1, sequence_length, width] self.assertEqual(expected_output_shape, output_tensor.shape.as_list()) # The default output dtype for this layer should be tf.float32. self.assertEqual(tf.float32, output_tensor.dtype) def test_float16_dtype(self): test_layer = position_embedding.PositionEmbedding(dtype="float16") # Create a 3-dimensional input (the first dimension is implicit). sequence_length = 21 width = 30 input_tensor = tf.keras.Input(shape=(sequence_length, width)) output_tensor = test_layer(input_tensor) # When using static positional embedding shapes, the output is expected # to be the same as the input shape in all dimensions save batch. expected_output_shape = [1, sequence_length, width] self.assertEqual(expected_output_shape, output_tensor.shape.as_list()) # The default output dtype for this layer should be tf.float32. self.assertEqual(tf.float16, output_tensor.dtype) def test_dynamic_layer_output_shape(self): max_sequence_length = 40 test_layer = position_embedding.PositionEmbedding( use_dynamic_slicing=True, max_sequence_length=max_sequence_length) # Create a 3-dimensional input (the first dimension is implicit). width = 30 input_tensor = tf.keras.Input(shape=(None, width)) output_tensor = test_layer(input_tensor) # When using dynamic positional embedding shapes, the output is expected # to be the same as the input shape in all dimensions - but may be None if # the input shape is None there. expected_output_shape = [1, None, width] self.assertEqual(expected_output_shape, output_tensor.shape.as_list()) def test_dynamic_layer_slicing(self): max_sequence_length = 40 test_layer = position_embedding.PositionEmbedding( use_dynamic_slicing=True, max_sequence_length=max_sequence_length) # Create a 3-dimensional input (the first dimension is implicit). width = 30 input_tensor = tf.keras.Input(shape=(None, width)) output_tensor = test_layer(input_tensor) model = tf.keras.Model(input_tensor, output_tensor) # Create input data that is shorter than max_sequence_length, which should # trigger a down-slice. input_length = 17 # Note: This test explicitly uses a batch size of 1. This is to get around # Keras' restriction on Model invocations: inputs are expected to have the # same batch cardinality as outputs. In practice, this layer should be used # inside a model, where it can be projected when added to another tensor. input_data = np.ones((1, input_length, width)) output_data = model.predict(input_data) self.assertAllEqual([1, input_length, width], output_data.shape) if __name__ == "__main__": tf.test.main()
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/layers/position_embedding_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras layer that creates a self-attention mask.""" from __future__ import absolute_import from __future__ import division # from __future__ import google_type_annotations from __future__ import print_function import tensorflow as tf from official.modeling import tf_utils @tf.keras.utils.register_keras_serializable(package='Text') class SelfAttentionMask(tf.keras.layers.Layer): """Create 3D attention mask from a 2D tensor mask. inputs[0]: from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...]. inputs[1]: to_mask: int32 Tensor of shape [batch_size, to_seq_length]. Returns: float Tensor of shape [batch_size, from_seq_length, to_seq_length]. """ def call(self, inputs): from_tensor = inputs[0] to_mask = inputs[1] from_shape = tf_utils.get_shape_list(from_tensor, expected_rank=[2, 3]) batch_size = from_shape[0] from_seq_length = from_shape[1] to_shape = tf_utils.get_shape_list(to_mask, expected_rank=2) to_seq_length = to_shape[1] to_mask = tf.cast( tf.reshape(to_mask, [batch_size, 1, to_seq_length]), dtype=from_tensor.dtype) # We don't assume that `from_tensor` is a mask (although it could be). We # don't actually care if we attend *from* padding tokens (only *to* padding) # tokens so we create a tensor of all ones. # # `broadcast_ones` = [batch_size, from_seq_length, 1] broadcast_ones = tf.ones( shape=[batch_size, from_seq_length, 1], dtype=from_tensor.dtype) # Here we broadcast along two dimensions to create the mask. mask = broadcast_ones * to_mask return mask
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/layers/self_attention_mask.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras-based softmax layer with optional masking.""" from __future__ import absolute_import from __future__ import division # from __future__ import google_type_annotations from __future__ import print_function import tensorflow as tf @tf.keras.utils.register_keras_serializable(package='Text') class MaskedSoftmax(tf.keras.layers.Layer): """Performs a softmax with optional masking on a tensor. Attributes: mask_expansion_axes: Any axes that should be padded on the mask tensor. """ def __init__(self, mask_expansion_axes=None, **kwargs): self._mask_expansion_axes = mask_expansion_axes super(MaskedSoftmax, self).__init__(**kwargs) def call(self, inputs): if isinstance(inputs, list) and len(inputs) == 2: scores, mask = inputs else: scores, mask = (inputs, None) if mask is not None: if self._mask_expansion_axes is not None: mask = tf.expand_dims(mask, axis=self._mask_expansion_axes) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. adder = (1.0 - tf.cast(mask, scores.dtype)) * -10000.0 # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. scores += adder return tf.nn.softmax(scores) def get_config(self): config = {'mask_expansion_axes': self._mask_expansion_axes} base_config = super(MaskedSoftmax, self).get_config() return dict(list(base_config.items()) + list(config.items()))
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/layers/masked_softmax.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras-based one-hot embedding layer.""" from __future__ import absolute_import from __future__ import division # from __future__ import google_type_annotations from __future__ import print_function import tensorflow as tf from official.modeling import tf_utils # @tf.keras.utils.register_keras_serializable(package="Text") class OnDeviceEmbedding(tf.keras.layers.Layer): """Performs an embedding lookup suitable for accelerator devices. This layer uses either tf.gather or tf.one_hot to translate integer indices to float embeddings. Attributes: vocab_size: Number of elements in the vocabulary. embedding_width: Output size of the embedding layer. initializer: The initializer to use for the embedding weights. Defaults to "glorot_uniform". use_one_hot: Whether to use tf.one_hot over tf.gather for the embedding lookup. Defaults to False (that is, using tf.gather). Setting this option to True may improve performance, especially on small vocabulary sizes, but will generally require more memory. """ def __init__(self, vocab_size, embedding_width, initializer="glorot_uniform", use_one_hot=False, **kwargs): # We need to have a default dtype of float32, since the inputs (which Keras # usually uses to infer the dtype) will always be int32. if "dtype" not in kwargs: kwargs["dtype"] = "float32" super(OnDeviceEmbedding, self).__init__(**kwargs) self._vocab_size = vocab_size self._embedding_width = embedding_width self._initializer = initializer self._use_one_hot = use_one_hot def get_config(self): config = { "vocab_size": self._vocab_size, "embedding_width": self._embedding_width, "initializer": self._initializer, "use_one_hot": self._use_one_hot, } base_config = super(OnDeviceEmbedding, self).get_config() return dict(list(base_config.items()) + list(config.items())) def build(self, input_shape): self.embeddings = self.add_weight( "embeddings", shape=[self._vocab_size, self._embedding_width], initializer=self._initializer) super(OnDeviceEmbedding, self).build(input_shape) def call(self, inputs): input_shape = tf_utils.get_shape_list(inputs, expected_rank=2) input_shape.append(self._embedding_width) flat_inputs = tf.reshape(inputs, [-1]) if self._use_one_hot: one_hot_data = tf.one_hot( flat_inputs, depth=self._vocab_size, dtype=self._dtype) embeddings = tf.matmul(one_hot_data, self.embeddings) else: embeddings = tf.gather(self.embeddings, flat_inputs) embeddings = tf.reshape(embeddings, input_shape) return embeddings
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/layers/on_device_embedding.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras-based einsum layer.""" from __future__ import absolute_import from __future__ import division # from __future__ import google_type_annotations from __future__ import print_function import tensorflow as tf _CHR_IDX = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m"] # @tf.keras.utils.register_keras_serializable(package="Text") class DenseEinsum(tf.keras.layers.Layer): """A densely connected layer that uses tf.einsum as the backing computation. This layer can perform einsum calculations of arbitrary dimensionality. Attributes: output_shape: Positive integer or tuple, dimensionality of the output space. num_summed_dimensions: The number of dimensions to sum over. Standard 2D matmul should use 1, 3D matmul should use 2, and so forth. activation: Activation function to use. If you don't specify anything, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its "activation").. kernel_constraint: Constraint function applied to the `kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. Input shape: N-D tensor with shape: `(batch_size, ..., input_dim)`. The most common situation would be a 2D input with shape `(batch_size, input_dim)`. Output shape: N-D tensor with shape: `(batch_size, ..., units)`. For instance, for a 2D input with shape `(batch_size, input_dim)`, the output would have shape `(batch_size, units)`. """ def __init__(self, output_shape, num_summed_dimensions=1, activation=None, use_bias=True, kernel_initializer="glorot_uniform", bias_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(DenseEinsum, self).__init__(**kwargs) self._output_shape = output_shape if isinstance( output_shape, (list, tuple)) else (output_shape,) self._activation = tf.keras.activations.get(activation) self._use_bias = use_bias self._kernel_initializer = tf.keras.initializers.get(kernel_initializer) self._bias_initializer = tf.keras.initializers.get(bias_initializer) self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer) self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer) self._kernel_constraint = tf.keras.constraints.get(kernel_constraint) self._bias_constraint = tf.keras.constraints.get(bias_constraint) self._num_summed_dimensions = num_summed_dimensions self._einsum_string = None def _build_einsum_string(self, free_input_dims, bound_dims, output_dims): input_str = "" kernel_str = "" output_str = "" letter_offset = 0 for i in range(free_input_dims): char = _CHR_IDX[i + letter_offset] input_str += char output_str += char letter_offset += free_input_dims for i in range(bound_dims): char = _CHR_IDX[i + letter_offset] input_str += char kernel_str += char letter_offset += bound_dims for i in range(output_dims): char = _CHR_IDX[i + letter_offset] kernel_str += char output_str += char return input_str + "," + kernel_str + "->" + output_str def build(self, input_shape): input_shape = tf.TensorShape(input_shape) input_rank = input_shape.rank free_input_dims = input_rank - self._num_summed_dimensions output_dims = len(self._output_shape) self._einsum_string = self._build_einsum_string(free_input_dims, self._num_summed_dimensions, output_dims) # This is only saved for testing purposes. self._kernel_shape = ( input_shape[free_input_dims:].concatenate(self._output_shape)) self._kernel = self.add_weight( "kernel", shape=self._kernel_shape, initializer=self._kernel_initializer, regularizer=self._kernel_regularizer, constraint=self._kernel_constraint, dtype=self.dtype, trainable=True) if self._use_bias: self._bias = self.add_weight( "bias", shape=self._output_shape, initializer=self._bias_initializer, regularizer=self._bias_regularizer, constraint=self._bias_constraint, dtype=self.dtype, trainable=True) else: self._bias = None super(DenseEinsum, self).build(input_shape) def get_config(self): config = { "output_shape": self._output_shape, "activation": tf.keras.activations.serialize(self._activation), "use_bias": self._use_bias, "kernel_initializer": tf.keras.initializers.serialize(self._kernel_initializer), "bias_initializer": tf.keras.initializers.serialize(self._bias_initializer), "kernel_regularizer": tf.keras.regularizers.serialize(self._kernel_regularizer), "bias_regularizer": tf.keras.regularizers.serialize(self._bias_regularizer), "activity_regularizer": tf.keras.regularizers.serialize(self._activity_regularizer), "kernel_constraint": tf.keras.constraints.serialize(self._kernel_constraint), "bias_constraint": tf.keras.constraints.serialize(self._bias_constraint) } base_config = super(DenseEinsum, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs): ret = tf.einsum(self._einsum_string, inputs, self._kernel) if self._use_bias: ret += self._bias if self._activation is not None: ret = self._activation(ret) return ret
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/layers/dense_einsum.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras-based transformer block layer.""" from __future__ import absolute_import from __future__ import division # from __future__ import google_type_annotations from __future__ import print_function import tensorflow as tf from official.nlp.modeling.layers import attention from official.nlp.modeling.layers import dense_einsum # @tf.keras.utils.register_keras_serializable(package="Text") class Transformer(tf.keras.layers.Layer): """Transformer layer. This layer implements the Transformer from "Attention Is All You Need". (https://arxiv.org/abs/1706.03762). Attributes: num_attention_heads: Number of attention heads. intermediate_size: Size of the intermediate layer. intermediate_activation: Activation for the intermediate layer. dropout_rate: Dropout probability for the post-attention and output dropout. attention_dropout_rate: Dropout probability for within the attention layer. kernel_initializer: Initializer for dense layer kernels. bias_initializer: Initializer for dense layer biases. kernel_regularizer: Regularizer for dense layer kernels. bias_regularizer: Regularizer for dense layer biases. activity_regularizer: Regularizer for dense layer activity. kernel_constraint: Constraint for dense layer kernels. bias_constraint: Constraint for dense layer kernels. """ def __init__(self, num_attention_heads, intermediate_size, intermediate_activation, dropout_rate=0.0, attention_dropout_rate=0.0, kernel_initializer="glorot_uniform", bias_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(Transformer, self).__init__(**kwargs) self._num_heads = num_attention_heads self._intermediate_size = intermediate_size self._intermediate_activation = intermediate_activation self._attention_dropout_rate = attention_dropout_rate self._dropout_rate = dropout_rate self._kernel_initializer = tf.keras.initializers.get(kernel_initializer) self._bias_initializer = tf.keras.initializers.get(bias_initializer) self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer) self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer) self._kernel_constraint = tf.keras.constraints.get(kernel_constraint) self._bias_constraint = tf.keras.constraints.get(bias_constraint) def build(self, input_shape): input_tensor = input_shape[0] if len(input_shape) == 2 else input_shape input_tensor_shape = tf.TensorShape(input_tensor) if len(input_tensor_shape) != 3: raise ValueError("TransformerLayer expects a three-dimensional input of " "shape [batch, sequence, width].") batch_size, sequence_length, hidden_size = input_tensor_shape if len(input_shape) == 2: mask_tensor_shape = tf.TensorShape(input_shape[1]) expected_mask_tensor_shape = tf.TensorShape( [batch_size, sequence_length, sequence_length]) if not expected_mask_tensor_shape.is_compatible_with(mask_tensor_shape): raise ValueError("When passing a mask tensor to TransformerLayer, the " "mask tensor must be of shape [batch, " "sequence_length, sequence_length] (here %s). Got a " "mask tensor of shape %s." % (expected_mask_tensor_shape, mask_tensor_shape)) if hidden_size % self._num_heads != 0: raise ValueError( "The input size (%d) is not a multiple of the number of attention " "heads (%d)" % (hidden_size, self._num_heads)) self._attention_head_size = int(hidden_size // self._num_heads) self._attention_layer = attention.Attention( num_heads=self._num_heads, head_size=self._attention_head_size, dropout_rate=self._attention_dropout_rate, kernel_initializer=self._kernel_initializer, bias_initializer=self._bias_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activity_regularizer=self._activity_regularizer, kernel_constraint=self._kernel_constraint, bias_constraint=self._bias_constraint, name="self_attention") self._attention_output_dense = dense_einsum.DenseEinsum( output_shape=hidden_size, num_summed_dimensions=2, kernel_initializer=self._kernel_initializer, bias_initializer=self._bias_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activity_regularizer=self._activity_regularizer, kernel_constraint=self._kernel_constraint, bias_constraint=self._bias_constraint, name="self_attention_output") self._attention_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) self._attention_layer_norm = ( tf.keras.layers.LayerNormalization( name="self_attention_layer_norm", axis=-1, epsilon=1e-12, dtype=tf.float32)) self._intermediate_dense = dense_einsum.DenseEinsum( output_shape=self._intermediate_size, activation=None, kernel_initializer=self._kernel_initializer, bias_initializer=self._bias_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activity_regularizer=self._activity_regularizer, kernel_constraint=self._kernel_constraint, bias_constraint=self._bias_constraint, name="intermediate") self._intermediate_activation_layer = tf.keras.layers.Activation( self._intermediate_activation) self._output_dense = dense_einsum.DenseEinsum( output_shape=hidden_size, kernel_initializer=self._kernel_initializer, bias_initializer=self._bias_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activity_regularizer=self._activity_regularizer, kernel_constraint=self._kernel_constraint, bias_constraint=self._bias_constraint, name="output") self._output_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) self._output_layer_norm = tf.keras.layers.LayerNormalization( name="output_layer_norm", axis=-1, epsilon=1e-12, dtype=tf.float32) super(Transformer, self).build(input_shape) def get_config(self): config = { "num_attention_heads": self._num_heads, "intermediate_size": self._intermediate_size, "intermediate_activation": self._intermediate_activation, "dropout_rate": self._dropout_rate, "attention_dropout_rate": self._attention_dropout_rate, "kernel_initializer": tf.keras.initializers.serialize(self._kernel_initializer), "bias_initializer": tf.keras.initializers.serialize(self._bias_initializer), "kernel_regularizer": tf.keras.regularizers.serialize(self._kernel_regularizer), "bias_regularizer": tf.keras.regularizers.serialize(self._bias_regularizer), "activity_regularizer": tf.keras.regularizers.serialize(self._activity_regularizer), "kernel_constraint": tf.keras.constraints.serialize(self._kernel_constraint), "bias_constraint": tf.keras.constraints.serialize(self._bias_constraint) } base_config = super(Transformer, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs): if isinstance(inputs, (list, tuple)) and len(inputs) == 2: input_tensor, attention_mask = inputs else: input_tensor, attention_mask = (inputs, None) attention_inputs = [input_tensor, input_tensor] if attention_mask is not None: attention_inputs.append(attention_mask) attention_output = self._attention_layer(attention_inputs) attention_output = self._attention_output_dense(attention_output) attention_output = self._attention_dropout(attention_output) # Use float32 in keras layer norm and the gelu activation in the # intermediate dense layer for numeric stability if self.dtype == tf.float16: input_tensor = tf.cast(input_tensor, tf.float32) attention_output = tf.cast(attention_output, tf.float32) attention_output = self._attention_layer_norm(input_tensor + attention_output) intermediate_output = self._intermediate_dense(attention_output) if self.dtype == tf.float16: # Casts to float32 so that activation is done in float32. intermediate_output = tf.cast(intermediate_output, tf.float32) intermediate_output = self._intermediate_activation_layer( intermediate_output) intermediate_output = tf.cast(intermediate_output, tf.float16) else: intermediate_output = self._intermediate_activation_layer( intermediate_output) layer_output = self._output_dense(intermediate_output) layer_output = self._output_dropout(layer_output) # Use float32 in keras layer norm for numeric stability if self.dtype == tf.float16: layer_output = tf.cast(layer_output, tf.float32) layer_output = self._output_layer_norm(layer_output + attention_output) if self.dtype == tf.float16: layer_output = tf.cast(layer_output, tf.float16) return layer_output
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/layers/transformer.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras-based positional embedding layer.""" from __future__ import absolute_import from __future__ import division # from __future__ import google_type_annotations from __future__ import print_function import tensorflow as tf from official.modeling import tf_utils # @tf.keras.utils.register_keras_serializable(package="Text") class PositionEmbedding(tf.keras.layers.Layer): """Creates a positional embedding. This layer creates a positional embedding as described in "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding" (https://arxiv.org/abs/1810.04805). This layer can be set up to either create a statically shaped slice or a dynamically shaped slice. If `use_dynamic_slicing` is True, the input tensor can have a dynamic 1st dimension, while if `use_dynamic_slicing` is False the input size must be fixed. Attributes: use_dynamic_slicing: Whether to use the dynamic slicing path. max_sequence_length: The maximum size of the dynamic sequence. Only applicable if `use_dynamic_slicing` is True. initializer: The initializer to use for the embedding weights. Defaults to "glorot_uniform". """ def __init__(self, initializer="glorot_uniform", use_dynamic_slicing=False, max_sequence_length=None, **kwargs): # We need to have a default dtype of float32, since the inputs (which Keras # usually uses to infer the dtype) will always be int32. if "dtype" not in kwargs: kwargs["dtype"] = "float32" super(PositionEmbedding, self).__init__(**kwargs) if use_dynamic_slicing and max_sequence_length is None: raise ValueError( "If `use_dynamic_slicing` is True, `max_sequence_length` must be set." ) self._max_sequence_length = max_sequence_length self._initializer = tf.keras.initializers.get(initializer) self._use_dynamic_slicing = use_dynamic_slicing def get_config(self): config = { "max_sequence_length": self._max_sequence_length, "initializer": tf.keras.initializers.serialize(self._initializer), "use_dynamic_slicing": self._use_dynamic_slicing, } base_config = super(PositionEmbedding, self).get_config() return dict(list(base_config.items()) + list(config.items())) def build(self, input_shape): """Implements build() for the layer.""" dimension_list = input_shape.as_list() if len(dimension_list) != 3: raise ValueError("PositionEmbedding expects a 3-dimensional input tensor " "of shape [batch, sequence, width]") seq_length = dimension_list[1] width = dimension_list[2] # If we are not using dynamic slicing, we must assume that the sequence # length is fixed and max_sequence_length should not be specified. if not self._use_dynamic_slicing: if seq_length is None: raise ValueError( "PositionEmbedding must have `use_dynamic_slicing` set " "to True (and max_sequence_length set) when the " "sequence (1st) dimension of the input is None.") if self._max_sequence_length is not None: raise ValueError( "When `use_dynamic_slicing` is False, max_sequence_length should " "not be specified and we ought to use seq_length to get the " "variable shape.") if self._max_sequence_length is not None: weight_sequence_length = self._max_sequence_length else: weight_sequence_length = seq_length self._position_embeddings = self.add_weight( "embeddings", shape=[weight_sequence_length, width], initializer=self._initializer) super(PositionEmbedding, self).build(input_shape) def call(self, inputs): """Implements call() for the layer.""" if self._use_dynamic_slicing: input_shape = tf_utils.get_shape_list(inputs, expected_rank=3) seq_length = input_shape[1] width = input_shape[2] position_embeddings = tf.expand_dims( tf.slice(self._position_embeddings, [0, 0], [seq_length, width]), axis=0) else: position_embeddings = tf.expand_dims(self._position_embeddings, axis=0) return position_embeddings
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/layers/position_embedding.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras-based transformer scaffold layer.""" from __future__ import absolute_import from __future__ import division # from __future__ import google_type_annotations from __future__ import print_function import tensorflow as tf from official.nlp.modeling.layers import attention from official.nlp.modeling.layers import dense_einsum # @tf.keras.utils.register_keras_serializable(package="Text") class TransformerScaffold(tf.keras.layers.Layer): """Transformer scaffold layer. This layer implements the Transformer from "Attention Is All You Need". (https://arxiv.org/abs/1706.03762), with a customizable attention layer option. Users can pass a class to `attention_cls` and associated config to `attention_cfg`, in which case the scaffold will instantiate the class with the config, or pass a class instance to `attention_cls`. Attributes: num_attention_heads: Number of attention heads. intermediate_size: Size of the intermediate layer. intermediate_activation: Activation for the intermediate layer. attention_cls: A class to instantate, or a layer instance. attention_cfg: The config with which to instantiate `attention_cls`. Ignored if attention_cls is a layer instance. dropout_rate: Dropout probability for the post-attention and output dropout. attention_dropout_rate: Dropout probability for within the attention layer. kernel_initializer: Initializer for dense layer kernels. bias_initializer: Initializer for dense layer biases. kernel_regularizer: Regularizer for dense layer kernels. bias_regularizer: Regularizer for dense layer biases. activity_regularizer: Regularizer for dense layer activity. kernel_constraint: Constraint for dense layer kernels. bias_constraint: Constraint for dense layer kernels. """ def __init__(self, num_attention_heads, intermediate_size, intermediate_activation, attention_cls=attention.Attention, attention_cfg=None, dropout_rate=0.0, attention_dropout_rate=0.0, kernel_initializer="glorot_uniform", bias_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(TransformerScaffold, self).__init__(**kwargs) self._attention_cfg = attention_cfg self._attention_cls = attention_cls self._num_heads = num_attention_heads self._intermediate_size = intermediate_size self._intermediate_activation = intermediate_activation self._attention_dropout_rate = attention_dropout_rate self._dropout_rate = dropout_rate self._kernel_initializer = tf.keras.initializers.get(kernel_initializer) self._bias_initializer = tf.keras.initializers.get(bias_initializer) self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer) self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer) self._kernel_constraint = tf.keras.constraints.get(kernel_constraint) self._bias_constraint = tf.keras.constraints.get(bias_constraint) def build(self, input_shape): input_tensor = input_shape[0] if len(input_shape) == 2 else input_shape input_tensor_shape = tf.TensorShape(input_tensor) if len(input_tensor_shape) != 3: raise ValueError( "TransformerScaffold expects a three-dimensional input of " "shape [batch, sequence, width].") batch_size, sequence_length, hidden_size = input_tensor_shape if len(input_shape) == 2: mask_tensor_shape = tf.TensorShape(input_shape[1]) expected_mask_tensor_shape = tf.TensorShape( [batch_size, sequence_length, sequence_length]) if not expected_mask_tensor_shape.is_compatible_with(mask_tensor_shape): raise ValueError("When passing a mask tensor to TransformerLayer, the " "mask tensor must be of shape [batch, " "sequence_length, sequence_length] (here %s). Got a " "mask tensor of shape %s." % (expected_mask_tensor_shape, mask_tensor_shape)) if hidden_size % self._num_heads != 0: raise ValueError( "The input size (%d) is not a multiple of the number of attention " "heads (%d)" % (hidden_size, self._num_heads)) self._attention_head_size = int(hidden_size // self._num_heads) if isinstance(self._attention_cls, tf.keras.layers.Layer): self._attention_layer = self._attention_cls else: if self._attention_cfg is None: attention_cfg = { "num_heads": self._num_heads, "head_size": self._attention_head_size, "dropout_rate": self._attention_dropout_rate, "kernel_initializer": self._kernel_initializer, "bias_initializer": self._bias_initializer, "kernel_regularizer": self._kernel_regularizer, "bias_regularizer": self._bias_regularizer, "activity_regularizer": self._activity_regularizer, "kernel_constraint": self._kernel_constraint, "bias_constraint": self._bias_constraint, "name": "self_attention" } else: attention_cfg = self._attention_cfg self._attention_layer = self._attention_cls(**attention_cfg) self._attention_output_dense = dense_einsum.DenseEinsum( output_shape=hidden_size, num_summed_dimensions=2, kernel_initializer=self._kernel_initializer, bias_initializer=self._bias_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activity_regularizer=self._activity_regularizer, kernel_constraint=self._kernel_constraint, bias_constraint=self._bias_constraint, name="self_attention_output") self._attention_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) self._attention_layer_norm = ( tf.keras.layers.LayerNormalization( name="self_attention_layer_norm", axis=-1, epsilon=1e-12, dtype=tf.float32)) self._intermediate_dense = dense_einsum.DenseEinsum( output_shape=self._intermediate_size, activation=self._intermediate_activation, kernel_initializer=self._kernel_initializer, bias_initializer=self._bias_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activity_regularizer=self._activity_regularizer, kernel_constraint=self._kernel_constraint, bias_constraint=self._bias_constraint, dtype=tf.float32, # This layer is always float32 for numeric stability. name="intermediate") self._output_dense = dense_einsum.DenseEinsum( output_shape=hidden_size, kernel_initializer=self._kernel_initializer, bias_initializer=self._bias_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activity_regularizer=self._activity_regularizer, kernel_constraint=self._kernel_constraint, bias_constraint=self._bias_constraint, name="output") self._output_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) self._output_layer_norm = tf.keras.layers.LayerNormalization( name="output_layer_norm", axis=-1, epsilon=1e-12, dtype=tf.float32) super(TransformerScaffold, self).build(input_shape) def get_config(self): config = { "attention_cls": self._attention_layer, "num_attention_heads": self._num_heads, "intermediate_size": self._intermediate_size, "intermediate_activation": self._intermediate_activation, "dropout_rate": self._dropout_rate, "attention_dropout_rate": self._attention_dropout_rate, "kernel_initializer": tf.keras.initializers.serialize(self._kernel_initializer), "bias_initializer": tf.keras.initializers.serialize(self._bias_initializer), "kernel_regularizer": tf.keras.regularizers.serialize(self._kernel_regularizer), "bias_regularizer": tf.keras.regularizers.serialize(self._bias_regularizer), "activity_regularizer": tf.keras.regularizers.serialize(self._activity_regularizer), "kernel_constraint": tf.keras.constraints.serialize(self._kernel_constraint), "bias_constraint": tf.keras.constraints.serialize(self._bias_constraint) } base_config = super(TransformerScaffold, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs): if isinstance(inputs, (list, tuple)) and len(inputs) == 2: input_tensor, attention_mask = inputs else: input_tensor, attention_mask = (inputs, None) attention_inputs = [input_tensor, input_tensor] if attention_mask is not None: attention_inputs.append(attention_mask) attention_output = self._attention_layer(attention_inputs) attention_output = self._attention_output_dense(attention_output) attention_output = self._attention_dropout(attention_output) # Use float32 in keras layer norm and the gelu activation in the # intermediate dense layer for numeric stability if self.dtype == tf.float16: input_tensor = tf.cast(input_tensor, tf.float32) attention_output = tf.cast(attention_output, tf.float32) attention_output = self._attention_layer_norm(input_tensor + attention_output) intermediate_output = self._intermediate_dense(attention_output) if self.dtype == tf.float16: intermediate_output = tf.cast(intermediate_output, tf.float16) layer_output = self._output_dense(intermediate_output) layer_output = self._output_dropout(layer_output) # Use float32 in keras layer norm for numeric stability if self.dtype == tf.float16: layer_output = tf.cast(layer_output, tf.float32) layer_output = self._output_layer_norm(layer_output + attention_output) if self.dtype == tf.float16: layer_output = tf.cast(layer_output, tf.float16) return layer_output
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/layers/transformer_scaffold.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras-based transformer block layer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import numpy as np import tensorflow as tf from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import from official.nlp.modeling.layers import attention from official.nlp.modeling.layers import transformer_scaffold # Test class that wraps a standard attention layer. If this layer is called # at any point, the list passed to the config object will be filled with a # boolean 'True'. We register this class as a Keras serializable so we can # test serialization below. # @tf.keras.utils.register_keras_serializable(package='TestOnly') class ValidatedAttentionLayer(attention.Attention): def __init__(self, call_list, **kwargs): super(ValidatedAttentionLayer, self).__init__(**kwargs) self.list = call_list def call(self, inputs): self.list.append(True) return super(ValidatedAttentionLayer, self).call(inputs) def get_config(self): config = super(ValidatedAttentionLayer, self).get_config() config['call_list'] = [] return config # This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It # guarantees forward compatibility of this code for the V2 switchover. @keras_parameterized.run_all_keras_modes class TransformerLayerTest(keras_parameterized.TestCase): def test_layer_creation(self): sequence_length = 21 width = 80 call_list = [] attention_layer_cfg = { 'num_heads': 10, 'head_size': 8, 'call_list': call_list, } test_layer = transformer_scaffold.TransformerScaffold( attention_cls=ValidatedAttentionLayer, attention_cfg=attention_layer_cfg, num_attention_heads=10, intermediate_size=2048, intermediate_activation='relu') # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) output_tensor = test_layer(data_tensor) # The default output of a transformer layer should be the same as the input. self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list()) # If call_list[0] exists and is True, the passed layer class was # instantiated from the given config properly. self.assertNotEmpty(call_list) self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.") def test_layer_creation_with_mask(self): sequence_length = 21 width = 80 call_list = [] attention_layer_cfg = { 'num_heads': 10, 'head_size': 8, 'call_list': call_list, } test_layer = transformer_scaffold.TransformerScaffold( attention_cls=ValidatedAttentionLayer, attention_cfg=attention_layer_cfg, num_attention_heads=10, intermediate_size=2048, intermediate_activation='relu') # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) # Create a 2-dimensional input (the first dimension is implicit). mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) output_tensor = test_layer([data_tensor, mask_tensor]) # The default output of a transformer layer should be the same as the input. self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list()) # If call_list[0] exists and is True, the passed layer class was # instantiated from the given config properly. self.assertNotEmpty(call_list) self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.") def test_layer_creation_with_incorrect_mask_fails(self): sequence_length = 21 width = 80 call_list = [] attention_layer_cfg = { 'num_heads': 10, 'head_size': 8, 'call_list': call_list, } test_layer = transformer_scaffold.TransformerScaffold( attention_cls=ValidatedAttentionLayer, attention_cfg=attention_layer_cfg, num_attention_heads=10, intermediate_size=2048, intermediate_activation='relu') # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) # Create a 2-dimensional input (the first dimension is implicit). mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length - 3)) with self.assertRaisesRegex(ValueError, 'When passing a mask tensor.*'): _ = test_layer([data_tensor, mask_tensor]) def test_layer_invocation(self): sequence_length = 21 width = 80 call_list = [] attention_layer_cfg = { 'num_heads': 10, 'head_size': 8, 'call_list': call_list, } test_layer = transformer_scaffold.TransformerScaffold( attention_cls=ValidatedAttentionLayer, attention_cfg=attention_layer_cfg, num_attention_heads=10, intermediate_size=2048, intermediate_activation='relu') # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) output_tensor = test_layer(data_tensor) # Create a model from the test layer. model = tf.keras.Model(data_tensor, output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 6 input_data = 10 * np.random.random_sample( (batch_size, sequence_length, width)) _ = model.predict(input_data) # If call_list[0] exists and is True, the passed layer class was # instantiated from the given config properly. self.assertNotEmpty(call_list) self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.") def test_layer_invocation_with_mask(self): sequence_length = 21 width = 80 call_list = [] attention_layer_cfg = { 'num_heads': 10, 'head_size': 8, 'call_list': call_list, } test_layer = transformer_scaffold.TransformerScaffold( attention_cls=ValidatedAttentionLayer, attention_cfg=attention_layer_cfg, num_attention_heads=10, intermediate_size=2048, intermediate_activation='relu') # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) # Create a 2-dimensional input (the first dimension is implicit). mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) output_tensor = test_layer([data_tensor, mask_tensor]) # Create a model from the test layer. model = tf.keras.Model([data_tensor, mask_tensor], output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 6 input_data = 10 * np.random.random_sample( (batch_size, sequence_length, width)) # The attention mask should be of shape (batch, from_seq_len, to_seq_len), # which here is (batch, sequence_length, sequence_length) mask_data = np.random.randint( 2, size=(batch_size, sequence_length, sequence_length)) _ = model.predict([input_data, mask_data]) # If call_list[0] exists and is True, the passed layer class was # instantiated from the given config properly. self.assertNotEmpty(call_list) self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.") def test_layer_invocation_with_float16_dtype(self): sequence_length = 21 width = 80 call_list = [] attention_layer_cfg = { 'num_heads': 10, 'head_size': 8, 'call_list': call_list, } test_layer = transformer_scaffold.TransformerScaffold( attention_cls=ValidatedAttentionLayer, attention_cfg=attention_layer_cfg, num_attention_heads=10, intermediate_size=2048, intermediate_activation='relu', dtype='float16') # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input( shape=(sequence_length, width), dtype=tf.float16) # Create a 2-dimensional input (the first dimension is implicit). mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) output_tensor = test_layer([data_tensor, mask_tensor]) # Create a model from the test layer. model = tf.keras.Model([data_tensor, mask_tensor], output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 6 input_data = (10 * np.random.random_sample( (batch_size, sequence_length, width))).astype(np.float16) # The attention mask should be of shape (batch, from_seq_len, to_seq_len), # which here is (batch, sequence_length, sequence_length) mask_data = np.random.randint( 2, size=(batch_size, sequence_length, sequence_length)) _ = model.predict([input_data, mask_data]) # If call_list[0] exists and is True, the passed layer class was # instantiated from the given config properly. self.assertNotEmpty(call_list) self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.") def test_transform_with_initializer(self): sequence_length = 21 width = 80 call_list = [] attention_layer_cfg = { 'num_heads': 10, 'head_size': 8, 'call_list': call_list, } test_layer = transformer_scaffold.TransformerScaffold( attention_cls=ValidatedAttentionLayer, attention_cfg=attention_layer_cfg, num_attention_heads=10, intermediate_size=2048, intermediate_activation='relu', kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)) # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) output = test_layer(data_tensor) # The default output of a transformer layer should be the same as the input. self.assertEqual(data_tensor.shape.as_list(), output.shape.as_list()) # If call_list[0] exists and is True, the passed layer class was # instantiated from the given config properly. self.assertNotEmpty(call_list) self.assertTrue(call_list[0]) def test_layer_restoration_from_config(self): sequence_length = 21 width = 80 call_list = [] attention_layer_cfg = { 'num_heads': 10, 'head_size': 8, 'call_list': call_list, 'name': 'test_layer', } test_layer = transformer_scaffold.TransformerScaffold( attention_cls=ValidatedAttentionLayer, attention_cfg=attention_layer_cfg, num_attention_heads=10, intermediate_size=2048, intermediate_activation='relu') # Create a 3-dimensional input (the first dimension is implicit). data_tensor = tf.keras.Input(shape=(sequence_length, width)) # Create a 2-dimensional input (the first dimension is implicit). mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length)) output_tensor = test_layer([data_tensor, mask_tensor]) # Create a model from the test layer. model = tf.keras.Model([data_tensor, mask_tensor], output_tensor) # Invoke the model on test data. We can't validate the output data itself # (the NN is too complex) but this will rule out structural runtime errors. batch_size = 6 input_data = 10 * np.random.random_sample( (batch_size, sequence_length, width)) # The attention mask should be of shape (batch, from_seq_len, to_seq_len), # which here is (batch, sequence_length, sequence_length) mask_data = np.random.randint( 2, size=(batch_size, sequence_length, sequence_length)) pre_serialization_output = model.predict([input_data, mask_data]) # Serialize the model config. Pass the serialized data through json to # ensure that we can serialize this layer to disk. serialized_data = json.dumps(model.get_config()) post_string_serialized_data = json.loads(serialized_data) # Create a new model from the old config, and copy the weights. These models # should have identical outputs. new_model = tf.keras.Model.from_config(post_string_serialized_data) new_model.set_weights(model.get_weights()) output = new_model.predict([input_data, mask_data]) self.assertAllClose(pre_serialization_output, output) # If the layer was configured correctly, it should have a list attribute # (since it should have the custom class and config passed to it). new_model.summary() new_call_list = new_model.get_layer( name='transformer_scaffold')._attention_layer.list self.assertNotEmpty(new_call_list) self.assertTrue(new_call_list[0], "The passed layer class wasn't instantiated.") if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/layers/transformer_scaffold_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the attention layer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import from official.nlp.modeling.layers import attention # This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It # guarantees forward compatibility of this code for the V2 switchover. @keras_parameterized.run_all_keras_modes class AttentionLayerTest(keras_parameterized.TestCase): def test_non_masked_attention(self): """Test that the attention layer can be created without a mask tensor.""" test_layer = attention.Attention(num_heads=12, head_size=64) # Create a 3-dimensional input (the first dimension is implicit). from_tensor = tf.keras.Input(shape=(40, 80)) to_tensor = tf.keras.Input(shape=(20, 80)) output = test_layer([from_tensor, to_tensor]) self.assertEqual(output.shape.as_list(), [None, 40, 12, 64]) def test_non_masked_self_attention(self): """Test with one input (self-attenntion) and no mask tensor.""" test_layer = attention.Attention(num_heads=12, head_size=64) # Create a 3-dimensional input (the first dimension is implicit). from_tensor = tf.keras.Input(shape=(40, 80)) output = test_layer([from_tensor, from_tensor]) self.assertEqual(output.shape.as_list(), [None, 40, 12, 64]) def test_masked_attention(self): """Test with a mask tensor.""" test_layer = attention.Attention(num_heads=2, head_size=2) # Create a 3-dimensional input (the first dimension is implicit). from_tensor = tf.keras.Input(shape=(4, 8)) to_tensor = tf.keras.Input(shape=(2, 8)) mask_tensor = tf.keras.Input(shape=(4, 2)) output = test_layer([from_tensor, to_tensor, mask_tensor]) # Create a model containing the test layer. model = tf.keras.Model([from_tensor, to_tensor, mask_tensor], output) # Generate data for the input (non-mask) tensors. from_data = 10 * np.random.random_sample((3, 4, 8)) to_data = 10 * np.random.random_sample((3, 2, 8)) # Invoke the data with a random set of mask data. This should mask at least # one element. mask_data = np.random.randint(2, size=(3, 4, 2)) masked_output_data = model.predict([from_data, to_data, mask_data]) # Invoke the same data, but with a null mask (where no elements are masked). null_mask_data = np.ones((3, 4, 2)) unmasked_output_data = model.predict([from_data, to_data, null_mask_data]) # Because one data is masked and one is not, the outputs should not be the # same. self.assertNotAllClose(masked_output_data, unmasked_output_data) def test_initializer(self): """Test with a specified initializer.""" test_layer = attention.Attention( num_heads=12, head_size=64, kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02)) # Create a 3-dimensional input (the first dimension is implicit). from_tensor = tf.keras.Input(shape=(40, 80)) output = test_layer([from_tensor, from_tensor]) self.assertEqual(output.shape.as_list(), [None, 40, 12, 64]) def _create_cache(batch_size, init_decode_length, num_heads, head_size): return { "key": tf.zeros([batch_size, init_decode_length, num_heads, head_size], dtype=tf.float32), "value": tf.zeros([batch_size, init_decode_length, num_heads, head_size], dtype=tf.float32) } @keras_parameterized.run_all_keras_modes class CachedAttentionTest(keras_parameterized.TestCase): def test_masked_attention(self): """Test with a mask tensor.""" num_heads, head_size = 2, 2 # Create a 3-dimensional input (the first dimension is implicit). from_seq_length = 4 batch_size = 3 # GPU/CPU case. init_decode_length = 0 # Directly tests the keras layer. cache = _create_cache(batch_size, init_decode_length, num_heads, head_size) layer = attention.CachedAttention(num_heads=num_heads, head_size=head_size) # Generate data for the input (non-mask) tensors. from_data = tf.zeros((batch_size, from_seq_length, 8), dtype=np.float32) # Invoke the data with a random set of mask data. This should mask at least # one element. mask_data = np.random.randint( 2, size=(batch_size, from_seq_length, from_seq_length)) masked_output_data, cache = layer([from_data, from_data, mask_data, cache]) self.assertEqual(masked_output_data.shape, (3, 4, 2, 2)) self.assertEqual(cache["value"].shape, (3, 4, 2, 2)) # Tests inputs without cache. masked_output_data, cache = layer([from_data, from_data, mask_data]) self.assertEqual(masked_output_data.shape, (3, 4, 2, 2)) self.assertIsNone(cache) def test_padded_decode(self): """Test with a mask tensor.""" num_heads, head_size = 2, 2 from_seq_length = 4 # TPU decoding should pre-allocate the entire sequence. batch_size = 3 init_decode_length = from_seq_length # Directly tests the keras layer. cache = _create_cache(batch_size, init_decode_length, num_heads, head_size) layer = attention.CachedAttention(num_heads=num_heads, head_size=head_size) # Generate data for the input (non-mask) tensors. from_data = tf.zeros((batch_size, from_seq_length, 8), dtype=np.float32) decode_loop_step = 2 mask_data = np.random.randint( 2, size=(batch_size, from_seq_length, from_seq_length), dtype=np.int32) # Testing the invocation directly as Keras cannot consume inputs correctly. masked_output_data, cache = layer([from_data, from_data, mask_data, cache], decode_loop_step=decode_loop_step) self.assertEqual(masked_output_data.shape, (3, 4, 2, 2)) self.assertEqual(cache["value"].shape, (3, 4, 2, 2)) if __name__ == "__main__": tf.test.main()
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/layers/attention_test.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Transformer-based text encoder network.""" from __future__ import absolute_import from __future__ import division # from __future__ import google_type_annotations from __future__ import print_function import tensorflow as tf from official.modeling import activations from official.nlp.modeling import layers @tf.keras.utils.register_keras_serializable(package='Text') class TransformerEncoder(tf.keras.Model): """Bi-directional Transformer-based encoder network. This network implements a bi-directional Transformer-based encoder as described in "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding" (https://arxiv.org/abs/1810.04805). It includes the embedding lookups and transformer layers, but not the masked language model or classification task networks. The default values for this object are taken from the BERT-Base implementation in "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding". Attributes: vocab_size: The size of the token vocabulary. hidden_size: The size of the transformer hidden layers. num_layers: The number of transformer layers. num_attention_heads: The number of attention heads for each transformer. The hidden size must be divisible by the number of attention heads. sequence_length: The sequence length that this encoder expects. If None, the sequence length is dynamic; if an integer, the encoder will require sequences padded to this length. max_sequence_length: The maximum sequence length that this encoder can consume. If None, max_sequence_length uses the value from sequence length. This determines the variable shape for positional embeddings. type_vocab_size: The number of types that the 'type_ids' input can take. intermediate_size: The intermediate size for the transformer layers. activation: The activation to use for the transformer layers. dropout_rate: The dropout rate to use for the transformer layers. attention_dropout_rate: The dropout rate to use for the attention layers within the transformer layers. initializer: The initialzer to use for all weights in this encoder. float_dtype: The dtype of this encoder. Can be 'float32' or 'float16'. """ def __init__(self, vocab_size, hidden_size=768, num_layers=12, num_attention_heads=12, sequence_length=512, max_sequence_length=None, type_vocab_size=16, intermediate_size=3072, activation=activations.gelu, dropout_rate=0.1, attention_dropout_rate=0.1, initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02), float_dtype='float32', **kwargs): activation = tf.keras.activations.get(activation) initializer = tf.keras.initializers.get(initializer) if not max_sequence_length: max_sequence_length = sequence_length self._self_setattr_tracking = False self._config_dict = { 'vocab_size': vocab_size, 'hidden_size': hidden_size, 'num_layers': num_layers, 'num_attention_heads': num_attention_heads, 'sequence_length': sequence_length, 'max_sequence_length': max_sequence_length, 'type_vocab_size': type_vocab_size, 'intermediate_size': intermediate_size, 'activation': tf.keras.activations.serialize(activation), 'dropout_rate': dropout_rate, 'attention_dropout_rate': attention_dropout_rate, 'initializer': tf.keras.initializers.serialize(initializer), 'float_dtype': float_dtype, } word_ids = tf.keras.layers.Input( shape=(sequence_length,), dtype=tf.int32, name='input_word_ids') mask = tf.keras.layers.Input( shape=(sequence_length,), dtype=tf.int32, name='input_mask') type_ids = tf.keras.layers.Input( shape=(sequence_length,), dtype=tf.int32, name='input_type_ids') self._embedding_layer = layers.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=hidden_size, initializer=initializer, name='word_embeddings') word_embeddings = self._embedding_layer(word_ids) # Always uses dynamic slicing for simplicity. self._position_embedding_layer = layers.PositionEmbedding( initializer=initializer, use_dynamic_slicing=True, max_sequence_length=max_sequence_length) position_embeddings = self._position_embedding_layer(word_embeddings) type_embeddings = ( layers.OnDeviceEmbedding( vocab_size=type_vocab_size, embedding_width=hidden_size, initializer=initializer, use_one_hot=True, name='type_embeddings')(type_ids)) embeddings = tf.keras.layers.Add()( [word_embeddings, position_embeddings, type_embeddings]) embeddings = ( tf.keras.layers.LayerNormalization( name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32)(embeddings)) embeddings = ( tf.keras.layers.Dropout(rate=dropout_rate, dtype=tf.float32)(embeddings)) if float_dtype == 'float16': embeddings = tf.cast(embeddings, tf.float16) data = embeddings attention_mask = layers.SelfAttentionMask()([data, mask]) for i in range(num_layers): layer = layers.Transformer( num_attention_heads=num_attention_heads, intermediate_size=intermediate_size, intermediate_activation=activation, dropout_rate=dropout_rate, attention_dropout_rate=attention_dropout_rate, kernel_initializer=initializer, dtype=float_dtype, name='transformer/layer_%d' % i) data = layer([data, attention_mask]) first_token_tensor = ( tf.keras.layers.Lambda(lambda x: tf.squeeze(x[:, 0:1, :], axis=1))(data) ) cls_output = tf.keras.layers.Dense( units=hidden_size, activation='tanh', kernel_initializer=initializer, name='pooler_transform')( first_token_tensor) super(TransformerEncoder, self).__init__( inputs=[word_ids, mask, type_ids], outputs=[data, cls_output], **kwargs) def get_embedding_table(self): return self._embedding_layer.embeddings def get_config(self): return self._config_dict @classmethod def from_config(cls, config, custom_objects=None): return cls(**config)
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks/transformer_encoder.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for span_labeling network.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import from official.nlp.modeling.networks import span_labeling # This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It # guarantees forward compatibility of this code for the V2 switchover. @keras_parameterized.run_all_keras_modes class SpanLabelingTest(keras_parameterized.TestCase): def test_network_creation(self): """Validate that the Keras object can be created.""" sequence_length = 15 input_width = 512 test_network = span_labeling.SpanLabeling( input_width=input_width, output='predictions') # Create a 3-dimensional input (the first dimension is implicit). sequence_data = tf.keras.Input( shape=(sequence_length, input_width), dtype=tf.float32) start_outputs, end_outputs = test_network(sequence_data) # Validate that the outputs are of the expected shape. expected_output_shape = [None, sequence_length] self.assertEqual(expected_output_shape, start_outputs.shape.as_list()) self.assertEqual(expected_output_shape, end_outputs.shape.as_list()) def test_network_invocation(self): """Validate that the Keras object can be invoked.""" sequence_length = 15 input_width = 512 test_network = span_labeling.SpanLabeling(input_width=input_width) # Create a 3-dimensional input (the first dimension is implicit). sequence_data = tf.keras.Input( shape=(sequence_length, input_width), dtype=tf.float32) outputs = test_network(sequence_data) model = tf.keras.Model(sequence_data, outputs) # Invoke the network as part of a Model. batch_size = 3 input_data = 10 * np.random.random_sample( (batch_size, sequence_length, input_width)) start_outputs, end_outputs = model.predict(input_data) # Validate that the outputs are of the expected shape. expected_output_shape = (batch_size, sequence_length) self.assertEqual(expected_output_shape, start_outputs.shape) self.assertEqual(expected_output_shape, end_outputs.shape) def test_network_invocation_with_internal_logit_output(self): """Validate that the logit outputs are correct.""" sequence_length = 15 input_width = 512 test_network = span_labeling.SpanLabeling( input_width=input_width, output='predictions') # Create a 3-dimensional input (the first dimension is implicit). sequence_data = tf.keras.Input( shape=(sequence_length, input_width), dtype=tf.float32) output = test_network(sequence_data) model = tf.keras.Model(sequence_data, output) logit_model = tf.keras.Model( test_network.inputs, [test_network.start_logits, test_network.end_logits]) batch_size = 3 input_data = 10 * np.random.random_sample( (batch_size, sequence_length, input_width)) start_outputs, end_outputs = model.predict(input_data) start_logits, end_logits = logit_model.predict(input_data) # Ensure that the tensor shapes are correct. expected_output_shape = (batch_size, sequence_length) self.assertEqual(expected_output_shape, start_outputs.shape) self.assertEqual(expected_output_shape, end_outputs.shape) self.assertEqual(expected_output_shape, start_logits.shape) self.assertEqual(expected_output_shape, end_logits.shape) # Ensure that the logits, when softmaxed, create the outputs. input_tensor = tf.keras.Input(expected_output_shape[1:]) output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor) softmax_model = tf.keras.Model(input_tensor, output_tensor) start_softmax = softmax_model.predict(start_logits) self.assertAllClose(start_outputs, start_softmax) end_softmax = softmax_model.predict(end_logits) self.assertAllClose(end_outputs, end_softmax) def test_network_invocation_with_external_logit_output(self): """Validate that the logit outputs are correct.""" sequence_length = 15 input_width = 512 test_network = span_labeling.SpanLabeling( input_width=input_width, output='predictions') logit_network = span_labeling.SpanLabeling( input_width=input_width, output='logits') logit_network.set_weights(test_network.get_weights()) # Create a 3-dimensional input (the first dimension is implicit). sequence_data = tf.keras.Input( shape=(sequence_length, input_width), dtype=tf.float32) output = test_network(sequence_data) logit_output = logit_network(sequence_data) model = tf.keras.Model(sequence_data, output) logit_model = tf.keras.Model(sequence_data, logit_output) batch_size = 3 input_data = 10 * np.random.random_sample( (batch_size, sequence_length, input_width)) start_outputs, end_outputs = model.predict(input_data) start_logits, end_logits = logit_model.predict(input_data) # Ensure that the tensor shapes are correct. expected_output_shape = (batch_size, sequence_length) self.assertEqual(expected_output_shape, start_outputs.shape) self.assertEqual(expected_output_shape, end_outputs.shape) self.assertEqual(expected_output_shape, start_logits.shape) self.assertEqual(expected_output_shape, end_logits.shape) # Ensure that the logits, when softmaxed, create the outputs. input_tensor = tf.keras.Input(expected_output_shape[1:]) output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor) softmax_model = tf.keras.Model(input_tensor, output_tensor) start_softmax = softmax_model.predict(start_logits) self.assertAllClose(start_outputs, start_softmax) end_softmax = softmax_model.predict(end_logits) self.assertAllClose(end_outputs, end_softmax) def test_serialize_deserialize(self): # Create a network object that sets all of its config options. network = span_labeling.SpanLabeling( input_width=128, activation='relu', initializer='zeros', output='predictions') # Create another network object from the first object's config. new_network = span_labeling.SpanLabeling.from_config(network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(network.get_config(), new_network.get_config()) def test_unknown_output_type_fails(self): with self.assertRaisesRegex(ValueError, 'Unknown `output` value "bad".*'): _ = span_labeling.SpanLabeling(input_width=10, output='bad') if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks/span_labeling_test.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Trainer network for BERT-style models.""" from __future__ import absolute_import from __future__ import division # from __future__ import google_type_annotations from __future__ import print_function import copy import tensorflow as tf from official.nlp.modeling import networks @tf.keras.utils.register_keras_serializable(package='Text') class BertPretrainer(tf.keras.Model): """BERT network training model. This is an implementation of the network structure surrounding a transformer encoder as described in "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding" (https://arxiv.org/abs/1810.04805). The BertTrainer allows a user to pass in a transformer stack, and instantiates the masked language model and classification networks that are used to create the training objectives. Attributes: network: A transformer network. This network should output a sequence output and a classification output. Furthermore, it should expose its embedding table via a "get_embedding_table" method. num_classes: Number of classes to predict from the classification network. num_token_predictions: Number of tokens to predict from the masked LM. activation: The activation (if any) to use in the masked LM and classification networks. If None, no activation will be used. initializer: The initializer (if any) to use in the masked LM and classification networks. Defaults to a Glorot uniform initializer. output: The output style for this network. Can be either 'logits' or 'predictions'. """ def __init__(self, network, num_classes, num_token_predictions, float_type, activation=None, output_activation=None, initializer='glorot_uniform', output='logits', **kwargs): self._self_setattr_tracking = False self._config = { 'network': network, 'num_classes': num_classes, 'num_token_predictions': num_token_predictions, 'activation': activation, 'output_activation': output_activation, 'initializer': initializer, 'output': output, } # We want to use the inputs of the passed network as the inputs to this # Model. To do this, we need to keep a copy of the network inputs for use # when we construct the Model object at the end of init. (We keep a copy # because we'll be adding another tensor to the copy later.) network_inputs = network.inputs inputs = copy.copy(network_inputs) # Because we have a copy of inputs to create this Model object, we can # invoke the Network object with its own input tensors to start the Model. # Note that, because of how deferred construction happens, we can't use # the copy of the list here - by the time the network is invoked, the list # object contains the additional input added below. sequence_output, cls_output = network(network_inputs) sequence_output_length = sequence_output.shape.as_list()[1] if sequence_output_length < num_token_predictions: raise ValueError( "The passed network's output length is %s, which is less than the " 'requested num_token_predictions %s.' % (sequence_output_length, num_token_predictions)) masked_lm_positions = tf.keras.layers.Input( shape=(num_token_predictions,), name='masked_lm_positions', dtype=tf.int32) inputs.append(masked_lm_positions) self.masked_lm = networks.MaskedLM( num_predictions=num_token_predictions, input_width=sequence_output.shape[-1], source_network=network, float_type=float_type, activation=activation, initializer=initializer, output=output, name='masked_lm') lm_outputs = self.masked_lm([sequence_output, masked_lm_positions]) self.classification = networks.Classification( input_width=cls_output.shape[-1], num_classes=num_classes, initializer=initializer, output=output, name='classification') sentence_outputs = self.classification(cls_output) super(BertPretrainer, self).__init__( inputs=inputs, outputs=[lm_outputs, sentence_outputs], **kwargs) def get_config(self): return self._config @classmethod def from_config(cls, config, custom_objects=None): return cls(**config)
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks/bert_pretrainer.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Classification network.""" from __future__ import absolute_import from __future__ import division # from __future__ import google_type_annotations from __future__ import print_function import tensorflow as tf @tf.keras.utils.register_keras_serializable(package='Text') class Classification(tf.keras.Model): """Classification network head for BERT modeling. This network implements a simple classifier head based on a dense layer. Attributes: input_width: The innermost dimension of the input tensor to this network. num_classes: The number of classes that this network should classify to. activation: The activation, if any, for the dense layer in this network. initializer: The intializer for the dense layer in this network. Defaults to a Glorot uniform initializer. output: The output style for this network. Can be either 'logits' or 'predictions'. """ def __init__(self, input_width, num_classes, initializer='glorot_uniform', output='logits', **kwargs): self._self_setattr_tracking = False self._config_dict = { 'input_width': input_width, 'num_classes': num_classes, 'initializer': initializer, 'output': output, } cls_output = tf.keras.layers.Input( shape=(input_width,), name='cls_output', dtype=tf.float32) self.logits = tf.keras.layers.Dense( num_classes, activation=None, kernel_initializer=initializer, name='predictions/transform/logits')( cls_output) predictions = tf.keras.layers.Activation(tf.nn.log_softmax)(self.logits) if output == 'logits': output_tensors = self.logits elif output == 'predictions': output_tensors = predictions else: raise ValueError( ('Unknown `output` value "%s". `output` can be either "logits" or ' '"predictions"') % output) super(Classification, self).__init__( inputs=[cls_output], outputs=output_tensors, **kwargs) def get_config(self): return self._config_dict @classmethod def from_config(cls, config, custom_objects=None): return cls(**config)
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks/classification.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Trainer network for BERT-style models.""" from __future__ import absolute_import from __future__ import division # from __future__ import google_type_annotations from __future__ import print_function import tensorflow as tf from official.nlp.modeling import networks @tf.keras.utils.register_keras_serializable(package='Text') class BertClassifier(tf.keras.Model): """Classifier model based on a BERT-style transformer-based encoder. This is an implementation of the network structure surrounding a transformer encoder as described in "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding" (https://arxiv.org/abs/1810.04805). The BertClassifier allows a user to pass in a transformer stack, and instantiates a classification network based on the passed `num_classes` argument. Attributes: network: A transformer network. This network should output a sequence output and a classification output. Furthermore, it should expose its embedding table via a "get_embedding_table" method. num_classes: Number of classes to predict from the classification network. initializer: The initializer (if any) to use in the classification networks. Defaults to a Glorot uniform initializer. output: The output style for this network. Can be either 'logits' or 'predictions'. """ def __init__(self, network, num_classes, initializer='glorot_uniform', output='logits', dropout_rate=0.1, **kwargs): self._self_setattr_tracking = False self._config = { 'network': network, 'num_classes': num_classes, 'initializer': initializer, 'output': output, } # We want to use the inputs of the passed network as the inputs to this # Model. To do this, we need to keep a handle to the network inputs for use # when we construct the Model object at the end of init. inputs = network.inputs # Because we have a copy of inputs to create this Model object, we can # invoke the Network object with its own input tensors to start the Model. _, cls_output = network(inputs) cls_output = tf.keras.layers.Dropout(rate=dropout_rate)(cls_output) self.classifier = networks.Classification( input_width=cls_output.shape[-1], num_classes=num_classes, initializer=initializer, output=output, name='classification') predictions = self.classifier(cls_output) super(BertClassifier, self).__init__( inputs=inputs, outputs=predictions, **kwargs) def get_config(self): return self._config @classmethod def from_config(cls, config, custom_objects=None): return cls(**config)
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks/bert_classifier.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for BERT trainer network.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import from official.nlp.modeling import networks from official.nlp.modeling.networks import bert_classifier # This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It # guarantees forward compatibility of this code for the V2 switchover. @keras_parameterized.run_all_keras_modes class BertClassifierTest(keras_parameterized.TestCase): def test_bert_trainer(self): """Validate that the Keras object can be created.""" # Build a transformer network to use within the BERT trainer. vocab_size = 100 sequence_length = 512 test_network = networks.TransformerEncoder( vocab_size=vocab_size, num_layers=2, sequence_length=sequence_length) # Create a BERT trainer with the created network. num_classes = 3 bert_trainer_model = bert_classifier.BertClassifier( test_network, num_classes=num_classes) # Create a set of 2-dimensional inputs (the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) # Invoke the trainer model on the inputs. This causes the layer to be built. cls_outs = bert_trainer_model([word_ids, mask, type_ids]) # Validate that the outputs are of the expected shape. expected_classification_shape = [None, num_classes] self.assertAllEqual(expected_classification_shape, cls_outs.shape.as_list()) def test_bert_trainer_tensor_call(self): """Validate that the Keras object can be invoked.""" # Build a transformer network to use within the BERT trainer. (Here, we use # a short sequence_length for convenience.) test_network = networks.TransformerEncoder( vocab_size=100, num_layers=2, sequence_length=2) # Create a BERT trainer with the created network. bert_trainer_model = bert_classifier.BertClassifier( test_network, num_classes=2) # Create a set of 2-dimensional data tensors to feed into the model. word_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32) mask = tf.constant([[1, 1], [1, 0]], dtype=tf.int32) type_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32) # Invoke the trainer model on the tensors. In Eager mode, this does the # actual calculation. (We can't validate the outputs, since the network is # too complex: this simply ensures we're not hitting runtime errors.) _ = bert_trainer_model([word_ids, mask, type_ids]) def test_serialize_deserialize(self): """Validate that the BERT trainer can be serialized and deserialized.""" # Build a transformer network to use within the BERT trainer. (Here, we use # a short sequence_length for convenience.) test_network = networks.TransformerEncoder( vocab_size=100, num_layers=2, sequence_length=5) # Create a BERT trainer with the created network. (Note that all the args # are different, so we can catch any serialization mismatches.) bert_trainer_model = bert_classifier.BertClassifier( test_network, num_classes=4, initializer='zeros', output='predictions') # Create another BERT trainer via serialization and deserialization. config = bert_trainer_model.get_config() new_bert_trainer_model = bert_classifier.BertClassifier.from_config(config) # Validate that the config can be forced to JSON. _ = new_bert_trainer_model.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(bert_trainer_model.get_config(), new_bert_trainer_model.get_config()) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks/bert_classifier_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Networks package definition.""" from official.nlp.modeling.networks.albert_transformer_encoder import AlbertTransformerEncoder from official.nlp.modeling.networks.classification import Classification from official.nlp.modeling.networks.encoder_scaffold import EncoderScaffold from official.nlp.modeling.networks.masked_lm import MaskedLM from official.nlp.modeling.networks.span_labeling import SpanLabeling from official.nlp.modeling.networks.transformer_encoder import TransformerEncoder
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks/__init__.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for BERT trainer network.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import from official.nlp.modeling import networks from official.nlp.modeling.networks import bert_span_labeler # This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It # guarantees forward compatibility of this code for the V2 switchover. @keras_parameterized.run_all_keras_modes class BertSpanLabelerTest(keras_parameterized.TestCase): def test_bert_trainer(self): """Validate that the Keras object can be created.""" # Build a transformer network to use within the BERT trainer. vocab_size = 100 sequence_length = 512 test_network = networks.TransformerEncoder( vocab_size=vocab_size, num_layers=2, sequence_length=sequence_length) # Create a BERT trainer with the created network. bert_trainer_model = bert_span_labeler.BertSpanLabeler(test_network) # Create a set of 2-dimensional inputs (the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) # Invoke the trainer model on the inputs. This causes the layer to be built. cls_outs = bert_trainer_model([word_ids, mask, type_ids]) # Validate that there are 2 outputs are of the expected shape. self.assertEqual(2, len(cls_outs)) expected_shape = [None, sequence_length] for out in cls_outs: self.assertAllEqual(expected_shape, out.shape.as_list()) def test_bert_trainer_named_compilation(self): """Validate compilation using explicit output names.""" # Build a transformer network to use within the BERT trainer. vocab_size = 100 sequence_length = 512 test_network = networks.TransformerEncoder( vocab_size=vocab_size, num_layers=2, sequence_length=sequence_length) # Create a BERT trainer with the created network. bert_trainer_model = bert_span_labeler.BertSpanLabeler(test_network) # Attempt to compile the model using a string-keyed dict of output names to # loss functions. This will validate that the outputs are named as we # expect. bert_trainer_model.compile( optimizer='sgd', loss={ 'start_positions': 'mse', 'end_positions': 'mse' }) def test_bert_trainer_tensor_call(self): """Validate that the Keras object can be invoked.""" # Build a transformer network to use within the BERT trainer. (Here, we use # a short sequence_length for convenience.) test_network = networks.TransformerEncoder( vocab_size=100, num_layers=2, sequence_length=2) # Create a BERT trainer with the created network. bert_trainer_model = bert_span_labeler.BertSpanLabeler(test_network) # Create a set of 2-dimensional data tensors to feed into the model. word_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32) mask = tf.constant([[1, 1], [1, 0]], dtype=tf.int32) type_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32) # Invoke the trainer model on the tensors. In Eager mode, this does the # actual calculation. (We can't validate the outputs, since the network is # too complex: this simply ensures we're not hitting runtime errors.) _ = bert_trainer_model([word_ids, mask, type_ids]) def test_serialize_deserialize(self): """Validate that the BERT trainer can be serialized and deserialized.""" # Build a transformer network to use within the BERT trainer. (Here, we use # a short sequence_length for convenience.) test_network = networks.TransformerEncoder( vocab_size=100, num_layers=2, sequence_length=5) # Create a BERT trainer with the created network. (Note that all the args # are different, so we can catch any serialization mismatches.) bert_trainer_model = bert_span_labeler.BertSpanLabeler(test_network) # Create another BERT trainer via serialization and deserialization. config = bert_trainer_model.get_config() new_bert_trainer_model = bert_span_labeler.BertSpanLabeler.from_config( config) # Validate that the config can be forced to JSON. _ = new_bert_trainer_model.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(bert_trainer_model.get_config(), new_bert_trainer_model.get_config()) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks/bert_span_labeler_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for transformer-based text encoder network.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import from official.nlp.modeling.networks import transformer_encoder # This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It # guarantees forward compatibility of this code for the V2 switchover. @keras_parameterized.run_all_keras_modes class TransformerEncoderTest(keras_parameterized.TestCase): def test_network_creation(self): hidden_size = 32 sequence_length = 21 # Create a small TransformerEncoder for testing. test_network = transformer_encoder.TransformerEncoder( vocab_size=100, hidden_size=hidden_size, sequence_length=sequence_length, num_attention_heads=2, num_layers=3) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = test_network([word_ids, mask, type_ids]) expected_data_shape = [None, sequence_length, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # The default output dtype is float32. self.assertAllEqual(tf.float32, data.dtype) self.assertAllEqual(tf.float32, pooled.dtype) def test_network_creation_with_float16_dtype(self): hidden_size = 32 sequence_length = 21 tf.keras.mixed_precision.experimental.set_policy("mixed_float16") # Create a small TransformerEncoder for testing. test_network = transformer_encoder.TransformerEncoder( vocab_size=100, hidden_size=hidden_size, sequence_length=sequence_length, num_attention_heads=2, num_layers=3, float_dtype="float16") # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = test_network([word_ids, mask, type_ids]) expected_data_shape = [None, sequence_length, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # If float_dtype is set to float16, the output should always be float16. self.assertAllEqual(tf.float16, data.dtype) self.assertAllEqual(tf.float16, pooled.dtype) def test_network_invocation(self): hidden_size = 32 sequence_length = 21 vocab_size = 57 num_types = 7 tf.keras.mixed_precision.experimental.set_policy("float32") # Create a small TransformerEncoder for testing. test_network = transformer_encoder.TransformerEncoder( vocab_size=vocab_size, hidden_size=hidden_size, sequence_length=sequence_length, num_attention_heads=2, num_layers=3, type_vocab_size=num_types) self.assertTrue( test_network._position_embedding_layer._use_dynamic_slicing) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = test_network([word_ids, mask, type_ids]) # Create a model based off of this network: model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) # Invoke the model. We can't validate the output data here (the model is too # complex) but this will catch structural runtime errors. batch_size = 3 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) type_id_data = np.random.randint( num_types, size=(batch_size, sequence_length)) _ = model.predict([word_id_data, mask_data, type_id_data]) # Creates a TransformerEncoder with max_sequence_length != sequence_length max_sequence_length = 128 test_network = transformer_encoder.TransformerEncoder( vocab_size=vocab_size, hidden_size=hidden_size, sequence_length=sequence_length, max_sequence_length=max_sequence_length, num_attention_heads=2, num_layers=3, type_vocab_size=num_types) self.assertTrue(test_network._position_embedding_layer._use_dynamic_slicing) model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) _ = model.predict([word_id_data, mask_data, type_id_data]) def test_serialize_deserialize(self): # Create a network object that sets all of its config options. kwargs = dict( vocab_size=100, hidden_size=32, num_layers=3, num_attention_heads=2, sequence_length=21, max_sequence_length=21, type_vocab_size=12, intermediate_size=1223, activation="relu", dropout_rate=0.05, attention_dropout_rate=0.22, initializer="glorot_uniform", float_dtype="float16") network = transformer_encoder.TransformerEncoder(**kwargs) expected_config = dict(kwargs) expected_config["activation"] = tf.keras.activations.serialize( tf.keras.activations.get(expected_config["activation"])) expected_config["initializer"] = tf.keras.initializers.serialize( tf.keras.initializers.get(expected_config["initializer"])) self.assertEqual(network.get_config(), expected_config) # Create another network object from the first object's config. new_network = transformer_encoder.TransformerEncoder.from_config( network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(network.get_config(), new_network.get_config()) if __name__ == "__main__": assert tf.version.VERSION.startswith('2.') tf.test.main()
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks/transformer_encoder_test.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ALBERT (https://arxiv.org/abs/1810.04805) text encoder network.""" from __future__ import absolute_import from __future__ import division # from __future__ import google_type_annotations from __future__ import print_function import tensorflow as tf from official.modeling import activations from official.nlp.modeling import layers @tf.keras.utils.register_keras_serializable(package='Text') class AlbertTransformerEncoder(tf.keras.Model): """ALBERT (https://arxiv.org/abs/1810.04805) text encoder network. This network implements the encoder described in the paper "ALBERT: A Lite BERT for Self-supervised Learning of Language Representations" (https://arxiv.org/abs/1909.11942). Compared with BERT (https://arxiv.org/abs/1810.04805), ALBERT refactorizes embedding parameters into two smaller matrices and shares parameters across layers. The default values for this object are taken from the ALBERT-Base implementation described in the paper. Attributes: vocab_size: The size of the token vocabulary. embedding_width: The width of the word embeddings. If the embedding width is not equal to hidden size, embedding parameters will be factorized into two matrices in the shape of ['vocab_size', 'embedding_width'] and ['embedding_width', 'hidden_size'] ('embedding_width' is usually much smaller than 'hidden_size'). hidden_size: The size of the transformer hidden layers. num_layers: The number of transformer layers. num_attention_heads: The number of attention heads for each transformer. The hidden size must be divisible by the number of attention heads. sequence_length: The sequence length that this encoder expects. If None, the sequence length is dynamic; if an integer, the encoder will require sequences padded to this length. max_sequence_length: The maximum sequence length that this encoder can consume. If None, max_sequence_length uses the value from sequence length. This determines the variable shape for positional embeddings. type_vocab_size: The number of types that the 'type_ids' input can take. intermediate_size: The intermediate size for the transformer layers. activation: The activation to use for the transformer layers. dropout_rate: The dropout rate to use for the transformer layers. attention_dropout_rate: The dropout rate to use for the attention layers within the transformer layers. initializer: The initialzer to use for all weights in this encoder. float_dtype: The dtype of this encoder. Can be 'float32' or 'float16'. """ def __init__(self, vocab_size, embedding_width=128, hidden_size=768, num_layers=12, num_attention_heads=12, sequence_length=512, max_sequence_length=None, type_vocab_size=16, intermediate_size=3072, activation=activations.gelu, dropout_rate=0.1, attention_dropout_rate=0.1, initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02), float_dtype='float32', **kwargs): activation = tf.keras.activations.get(activation) initializer = tf.keras.initializers.get(initializer) if not max_sequence_length: max_sequence_length = sequence_length self._self_setattr_tracking = False self._config_dict = { 'vocab_size': vocab_size, 'embedding_width': embedding_width, 'hidden_size': hidden_size, 'num_layers': num_layers, 'num_attention_heads': num_attention_heads, 'sequence_length': sequence_length, 'max_sequence_length': max_sequence_length, 'type_vocab_size': type_vocab_size, 'intermediate_size': intermediate_size, 'activation': tf.keras.activations.serialize(activation), 'dropout_rate': dropout_rate, 'attention_dropout_rate': attention_dropout_rate, 'initializer': tf.keras.initializers.serialize(initializer), 'float_dtype': float_dtype, } word_ids = tf.keras.layers.Input( shape=(sequence_length,), dtype=tf.int32, name='input_word_ids') mask = tf.keras.layers.Input( shape=(sequence_length,), dtype=tf.int32, name='input_mask') type_ids = tf.keras.layers.Input( shape=(sequence_length,), dtype=tf.int32, name='input_type_ids') self._embedding_layer = layers.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=embedding_width, initializer=initializer, dtype=float_dtype, name='word_embeddings') word_embeddings = self._embedding_layer(word_ids) # Always uses dynamic slicing for simplicity. self._position_embedding_layer = layers.PositionEmbedding( initializer=initializer, use_dynamic_slicing=True, max_sequence_length=max_sequence_length, dtype=float_dtype) position_embeddings = self._position_embedding_layer(word_embeddings) type_embeddings = ( layers.OnDeviceEmbedding( vocab_size=type_vocab_size, embedding_width=embedding_width, initializer=initializer, use_one_hot=True, dtype=float_dtype, name='type_embeddings')(type_ids)) embeddings = tf.keras.layers.Add()( [word_embeddings, position_embeddings, type_embeddings]) embeddings = ( tf.keras.layers.LayerNormalization( name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=float_dtype)(embeddings)) embeddings = ( tf.keras.layers.Dropout(rate=dropout_rate, dtype=tf.float32)(embeddings)) # We project the 'embedding' output to 'hidden_size' if it is not already # 'hidden_size'. if embedding_width != hidden_size: embeddings = layers.DenseEinsum( output_shape=hidden_size, kernel_initializer=initializer, name='embedding_projection')( embeddings) if float_dtype == 'float16': embeddings = tf.cast(embeddings, tf.float16) data = embeddings attention_mask = layers.SelfAttentionMask()([data, mask]) shared_layer = layers.Transformer( num_attention_heads=num_attention_heads, intermediate_size=intermediate_size, intermediate_activation=activation, dropout_rate=dropout_rate, attention_dropout_rate=attention_dropout_rate, kernel_initializer=initializer, dtype=float_dtype, name='transformer') for _ in range(num_layers): data = shared_layer([data, attention_mask]) first_token_tensor = ( tf.keras.layers.Lambda(lambda x: tf.squeeze(x[:, 0:1, :], axis=1))(data) ) cls_output = tf.keras.layers.Dense( units=hidden_size, activation='tanh', kernel_initializer=initializer, dtype=float_dtype, name='pooler_transform')( first_token_tensor) super(AlbertTransformerEncoder, self).__init__( inputs=[word_ids, mask, type_ids], outputs=[data, cls_output], **kwargs) def get_embedding_table(self): return self._embedding_layer.embeddings def get_config(self): return self._config_dict @classmethod def from_config(cls, config): return cls(**config)
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks/albert_transformer_encoder.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for transformer-based text encoder network.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import from official.modeling import activations from official.nlp.modeling import layers from official.nlp.modeling.networks import encoder_scaffold # Test class that wraps a standard transformer layer. If this layer is called # at any point, the list passed to the config object will be filled with a # boolean 'True'. We register this class as a Keras serializable so we can # test serialization below. # @tf.keras.utils.register_keras_serializable(package="TestOnly") class ValidatedTransformerLayer(layers.Transformer): def __init__(self, call_list, **kwargs): super(ValidatedTransformerLayer, self).__init__(**kwargs) self.list = call_list def call(self, inputs): self.list.append(True) return super(ValidatedTransformerLayer, self).call(inputs) def get_config(self): config = super(ValidatedTransformerLayer, self).get_config() config["call_list"] = [] return config # This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It # guarantees forward compatibility of this code for the V2 switchover. @keras_parameterized.run_all_keras_modes class EncoderScaffoldLayerClassTest(keras_parameterized.TestCase): def test_network_creation(self): hidden_size = 32 sequence_length = 21 num_hidden_instances = 3 embedding_cfg = { "vocab_size": 100, "type_vocab_size": 16, "hidden_size": hidden_size, "seq_length": sequence_length, "max_seq_length": sequence_length, "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dropout_rate": 0.1, } call_list = [] hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dtype": "float32", "call_list": call_list } # Create a small EncoderScaffold for testing. test_network = encoder_scaffold.EncoderScaffold( num_hidden_instances=num_hidden_instances, num_output_classes=hidden_size, classification_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), hidden_cls=ValidatedTransformerLayer, hidden_cfg=hidden_cfg, embedding_cfg=embedding_cfg) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = test_network([word_ids, mask, type_ids]) expected_data_shape = [None, sequence_length, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # The default output dtype is float32. self.assertAllEqual(tf.float32, data.dtype) self.assertAllEqual(tf.float32, pooled.dtype) # If call_list[0] exists and is True, the passed layer class was # instantiated from the given config properly. self.assertNotEmpty(call_list) self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.") def test_network_creation_with_float16_dtype(self): tf.keras.mixed_precision.experimental.set_policy("mixed_float16") hidden_size = 32 sequence_length = 21 embedding_cfg = { "vocab_size": 100, "type_vocab_size": 16, "hidden_size": hidden_size, "seq_length": sequence_length, "max_seq_length": sequence_length, "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dropout_rate": 0.1, "dtype": "float16", } hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dtype": "float16", } # Create a small EncoderScaffold for testing. test_network = encoder_scaffold.EncoderScaffold( num_hidden_instances=3, num_output_classes=hidden_size, classification_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), classification_layer_dtype=tf.float16, hidden_cfg=hidden_cfg, embedding_cfg=embedding_cfg) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = test_network([word_ids, mask, type_ids]) expected_data_shape = [None, sequence_length, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # If float_dtype is set to float16, the output should always be float16. self.assertAllEqual(tf.float16, data.dtype) self.assertAllEqual(tf.float16, pooled.dtype) def test_network_invocation(self): hidden_size = 32 sequence_length = 21 vocab_size = 57 num_types = 7 embedding_cfg = { "vocab_size": vocab_size, "type_vocab_size": num_types, "hidden_size": hidden_size, "seq_length": sequence_length, "max_seq_length": sequence_length, "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dropout_rate": 0.1, } hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dtype": "float32", } tf.keras.mixed_precision.experimental.set_policy("float32") print(hidden_cfg) print(embedding_cfg) # Create a small EncoderScaffold for testing. test_network = encoder_scaffold.EncoderScaffold( num_hidden_instances=3, num_output_classes=hidden_size, classification_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), hidden_cfg=hidden_cfg, embedding_cfg=embedding_cfg) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = test_network([word_ids, mask, type_ids]) # Create a model based off of this network: model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) # Invoke the model. We can't validate the output data here (the model is too # complex) but this will catch structural runtime errors. batch_size = 3 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) type_id_data = np.random.randint( num_types, size=(batch_size, sequence_length)) _ = model.predict([word_id_data, mask_data, type_id_data]) # Creates a EncoderScaffold with max_sequence_length != sequence_length num_types = 7 embedding_cfg = { "vocab_size": vocab_size, "type_vocab_size": num_types, "hidden_size": hidden_size, "seq_length": sequence_length, "max_seq_length": sequence_length * 2, "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dropout_rate": 0.1, } hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), } # Create a small EncoderScaffold for testing. test_network = encoder_scaffold.EncoderScaffold( num_hidden_instances=3, num_output_classes=hidden_size, classification_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), hidden_cfg=hidden_cfg, embedding_cfg=embedding_cfg) model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) _ = model.predict([word_id_data, mask_data, type_id_data]) def test_serialize_deserialize(self): # Create a network object that sets all of its config options. hidden_size = 32 sequence_length = 21 embedding_cfg = { "vocab_size": 100, "type_vocab_size": 16, "hidden_size": hidden_size, "seq_length": sequence_length, "max_seq_length": sequence_length, "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dropout_rate": 0.1, } hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dtype": "float32", } # Create a small EncoderScaffold for testing. network = encoder_scaffold.EncoderScaffold( num_hidden_instances=3, num_output_classes=hidden_size, classification_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), hidden_cfg=hidden_cfg, embedding_cfg=embedding_cfg) # Create another network object from the first object's config. new_network = encoder_scaffold.EncoderScaffold.from_config( network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(network.get_config(), new_network.get_config()) @keras_parameterized.run_all_keras_modes class EncoderScaffoldEmbeddingNetworkTest(keras_parameterized.TestCase): def test_network_invocation(self): hidden_size = 32 sequence_length = 21 vocab_size = 57 # Build an embedding network to swap in for the default network. This one # will have 2 inputs (mask and word_ids) instead of 3, and won't use # positional embeddings. word_ids = tf.keras.layers.Input( shape=(sequence_length,), dtype=tf.int32, name="input_word_ids") mask = tf.keras.layers.Input( shape=(sequence_length,), dtype=tf.int32, name="input_mask") embedding_layer = layers.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=hidden_size, initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02), name="word_embeddings") word_embeddings = embedding_layer(word_ids) network = tf.keras.Model([word_ids, mask], [word_embeddings, mask]) hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dtype": "float32", } # Create a small EncoderScaffold for testing. test_network = encoder_scaffold.EncoderScaffold( num_hidden_instances=3, num_output_classes=hidden_size, classification_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), hidden_cfg=hidden_cfg, embedding_cls=network, embedding_data=embedding_layer.embeddings) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = test_network([word_ids, mask]) # Create a model based off of this network: model = tf.keras.Model([word_ids, mask], [data, pooled]) # Invoke the model. We can't validate the output data here (the model is too # complex) but this will catch structural runtime errors. batch_size = 3 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) _ = model.predict([word_id_data, mask_data]) # Test that we can get the embedding data that we passed to the object. This # is necessary to support standard language model training. self.assertIs(embedding_layer.embeddings, test_network.get_embedding_table()) def test_serialize_deserialize(self): hidden_size = 32 sequence_length = 21 vocab_size = 57 # Build an embedding network to swap in for the default network. This one # will have 2 inputs (mask and word_ids) instead of 3, and won't use # positional embeddings. word_ids = tf.keras.layers.Input( shape=(sequence_length,), dtype=tf.int32, name="input_word_ids") mask = tf.keras.layers.Input( shape=(sequence_length,), dtype=tf.int32, name="input_mask") embedding_layer = layers.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=hidden_size, initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02), name="word_embeddings") word_embeddings = embedding_layer(word_ids) network = tf.keras.Model([word_ids, mask], [word_embeddings, mask]) hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dtype": "float32", } # Create a small EncoderScaffold for testing. test_network = encoder_scaffold.EncoderScaffold( num_hidden_instances=3, num_output_classes=hidden_size, classification_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), hidden_cfg=hidden_cfg, embedding_cls=network, embedding_data=embedding_layer.embeddings) # Create another network object from the first object's config. new_network = encoder_scaffold.EncoderScaffold.from_config( test_network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(test_network.get_config(), new_network.get_config()) # Create a model based off of the old and new networks: word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = new_network([word_ids, mask]) new_model = tf.keras.Model([word_ids, mask], [data, pooled]) data, pooled = test_network([word_ids, mask]) model = tf.keras.Model([word_ids, mask], [data, pooled]) # Copy the weights between models. new_model.set_weights(model.get_weights()) # Invoke the models. batch_size = 3 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) data, cls = model.predict([word_id_data, mask_data]) new_data, new_cls = new_model.predict([word_id_data, mask_data]) # The output should be equal. self.assertAllEqual(data, new_data) self.assertAllEqual(cls, new_cls) # We should not be able to get a reference to the embedding data. with self.assertRaisesRegex(RuntimeError, ".*does not have a reference.*"): new_network.get_embedding_table() @keras_parameterized.run_all_keras_modes class EncoderScaffoldHiddenInstanceTest(keras_parameterized.TestCase): def test_network_invocation(self): hidden_size = 32 sequence_length = 21 vocab_size = 57 num_types = 7 embedding_cfg = { "vocab_size": vocab_size, "type_vocab_size": num_types, "hidden_size": hidden_size, "seq_length": sequence_length, "max_seq_length": sequence_length, "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dropout_rate": 0.1, "dtype": "float32", } call_list = [] hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dtype": "float32", "call_list": call_list } # Create a small EncoderScaffold for testing. This time, we pass an already- # instantiated layer object. xformer = ValidatedTransformerLayer(**hidden_cfg) test_network = encoder_scaffold.EncoderScaffold( num_hidden_instances=3, num_output_classes=hidden_size, classification_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), hidden_cls=xformer, embedding_cfg=embedding_cfg) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = test_network([word_ids, mask, type_ids]) # Create a model based off of this network: model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) # Invoke the model. We can't validate the output data here (the model is too # complex) but this will catch structural runtime errors. batch_size = 3 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) type_id_data = np.random.randint( num_types, size=(batch_size, sequence_length)) _ = model.predict([word_id_data, mask_data, type_id_data]) # If call_list[0] exists and is True, the passed layer class was # called as part of the graph creation. self.assertNotEmpty(call_list) self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.") def test_serialize_deserialize(self): hidden_size = 32 sequence_length = 21 vocab_size = 57 num_types = 7 embedding_cfg = { "vocab_size": vocab_size, "type_vocab_size": num_types, "hidden_size": hidden_size, "seq_length": sequence_length, "max_seq_length": sequence_length, "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dropout_rate": 0.1, "dtype": "float32", } call_list = [] hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dtype": "float32", "call_list": call_list } # Create a small EncoderScaffold for testing. This time, we pass an already- # instantiated layer object. xformer = ValidatedTransformerLayer(**hidden_cfg) test_network = encoder_scaffold.EncoderScaffold( num_hidden_instances=3, num_output_classes=hidden_size, classification_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), hidden_cls=xformer, embedding_cfg=embedding_cfg) # Create another network object from the first object's config. new_network = encoder_scaffold.EncoderScaffold.from_config( test_network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(test_network.get_config(), new_network.get_config()) # Create a model based off of the old and new networks: word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = new_network([word_ids, mask, type_ids]) new_model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) data, pooled = test_network([word_ids, mask, type_ids]) model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) # Copy the weights between models. new_model.set_weights(model.get_weights()) # Invoke the models. batch_size = 3 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) type_id_data = np.random.randint( num_types, size=(batch_size, sequence_length)) data, cls = model.predict([word_id_data, mask_data, type_id_data]) new_data, new_cls = new_model.predict( [word_id_data, mask_data, type_id_data]) # The output should be equal. self.assertAllEqual(data, new_data) self.assertAllEqual(cls, new_cls) if __name__ == "__main__": assert tf.version.VERSION.startswith('2.') tf.test.main()
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks/encoder_scaffold_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ALBERT transformer-based text encoder network.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import from official.nlp.modeling.networks import albert_transformer_encoder # This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It # guarantees forward compatibility of this code for the V2 switchover. @keras_parameterized.run_all_keras_modes class AlbertTransformerEncoderTest(keras_parameterized.TestCase): @parameterized.named_parameters( dict(testcase_name="default", expected_dtype=tf.float32), dict( testcase_name="with_float16_dtype", expected_dtype=tf.float16, float_dtype="float16"), ) def test_network_creation(self, expected_dtype, float_dtype=None): hidden_size = 32 sequence_length = 21 kwargs = dict( vocab_size=100, hidden_size=hidden_size, sequence_length=sequence_length, num_attention_heads=2, num_layers=3) if float_dtype is not None: kwargs["float_dtype"] = float_dtype # Create a small TransformerEncoder for testing. test_network = albert_transformer_encoder.AlbertTransformerEncoder(**kwargs) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = test_network([word_ids, mask, type_ids]) expected_data_shape = [None, sequence_length, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) self.assertEqual(expected_dtype, data.dtype) self.assertEqual(expected_dtype, pooled.dtype) # ALBERT has additonal 'embedding_hidden_mapping_in' weights and # it shares transformer weights. self.assertNotEmpty( [x for x in test_network.weights if "embedding_projection/" in x.name]) self.assertNotEmpty( [x for x in test_network.weights if "transformer/" in x.name]) self.assertEmpty( [x for x in test_network.weights if "transformer/layer" in x.name]) def test_network_invocation(self): hidden_size = 32 sequence_length = 21 vocab_size = 57 num_types = 7 # Create a small TransformerEncoder for testing. test_network = albert_transformer_encoder.AlbertTransformerEncoder( vocab_size=vocab_size, embedding_width=8, hidden_size=hidden_size, sequence_length=sequence_length, num_attention_heads=2, num_layers=3, type_vocab_size=num_types) self.assertTrue( test_network._position_embedding_layer._use_dynamic_slicing) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = test_network([word_ids, mask, type_ids]) # Create a model based off of this network: model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) # Invoke the model. We can't validate the output data here (the model is too # complex) but this will catch structural runtime errors. batch_size = 3 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) type_id_data = np.random.randint( num_types, size=(batch_size, sequence_length)) _ = model.predict([word_id_data, mask_data, type_id_data]) # Creates a TransformerEncoder with max_sequence_length != sequence_length max_sequence_length = 128 test_network = albert_transformer_encoder.AlbertTransformerEncoder( vocab_size=vocab_size, embedding_width=8, hidden_size=hidden_size, sequence_length=sequence_length, max_sequence_length=max_sequence_length, num_attention_heads=2, num_layers=3, type_vocab_size=num_types) self.assertTrue(test_network._position_embedding_layer._use_dynamic_slicing) model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) _ = model.predict([word_id_data, mask_data, type_id_data]) def test_serialize_deserialize(self): # Create a network object that sets all of its config options. kwargs = dict( vocab_size=100, embedding_width=8, hidden_size=32, num_layers=3, num_attention_heads=2, sequence_length=21, max_sequence_length=21, type_vocab_size=12, intermediate_size=1223, activation="relu", dropout_rate=0.05, attention_dropout_rate=0.22, initializer="glorot_uniform", float_dtype="float16") network = albert_transformer_encoder.AlbertTransformerEncoder(**kwargs) expected_config = dict(kwargs) expected_config["activation"] = tf.keras.activations.serialize( tf.keras.activations.get(expected_config["activation"])) expected_config["initializer"] = tf.keras.initializers.serialize( tf.keras.initializers.get(expected_config["initializer"])) self.assertEqual(network.get_config(), expected_config) # Create another network object from the first object's config. new_network = ( albert_transformer_encoder.AlbertTransformerEncoder.from_config( network.get_config())) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(network.get_config(), new_network.get_config()) if __name__ == "__main__": tf.test.main()
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks/albert_transformer_encoder_test.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Transformer-based text encoder network.""" from __future__ import absolute_import from __future__ import division # from __future__ import google_type_annotations from __future__ import print_function import inspect import tensorflow as tf from official.nlp.modeling import layers @tf.keras.utils.register_keras_serializable(package='Text') class EncoderScaffold(tf.keras.Model): """Bi-directional Transformer-based encoder network scaffold. This network allows users to flexibly implement an encoder similar to the one described in "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding" (https://arxiv.org/abs/1810.04805). In this network, users can choose to provide a custom embedding subnetwork (which will replace the standard embedding logic) and/or a custom hidden layer class (which will replace the Transformer instantiation in the encoder). For each of these custom injection points, users can pass either a class or a class instance. If a class is passed, that class will be instantiated using the 'embedding_cfg' or 'hidden_cfg' argument, respectively; if an instance is passed, that instance will be invoked. (In the case of hidden_cls, the instance will be invoked 'num_hidden_instances' times. If the hidden_cls is not overridden, a default transformer layer will be instantiated. Attributes: num_output_classes: The output size of the classification layer. classification_layer_initializer: The initializer for the classification layer. classification_layer_dtype: The dtype for the classification layer. embedding_cls: The class or instance to use to embed the input data. This class or instance defines the inputs to this encoder. If embedding_cls is not set, a default embedding network (from the original BERT paper) will be created. embedding_cfg: A dict of kwargs to pass to the embedding_cls, if it needs to be instantiated. If embedding_cls is not set, a config dict must be passed to 'embedding_cfg' with the following values: "vocab_size": The size of the token vocabulary. "type_vocab_size": The size of the type vocabulary. "hidden_size": The hidden size for this encoder. "max_seq_length": The maximum sequence length for this encoder. "seq_length": The sequence length for this encoder. "initializer": The initializer for the embedding portion of this encoder. "dropout_rate": The dropout rate to apply before the encoding layers. "dtype": (Optional): The dtype of the embedding layers. embedding_data: A reference to the embedding weights that will be used to train the masked language model, if necessary. This is optional, and only needed if (1) you are overriding embedding_cls and (2) are doing standard pretraining. num_hidden_instances: The number of times to instantiate and/or invoke the hidden_cls. hidden_cls: The class or instance to encode the input data. If hidden_cls is not set, a KerasBERT transformer layer will be used as the encoder class. hidden_cfg: A dict of kwargs to pass to the hidden_cls, if it needs to be instantiated. If hidden_cls is not set, a config dict must be passed to 'hidden_cfg' with the following values: "num_attention_heads": The number of attention heads. The hidden size must be divisible by num_attention_heads. "intermediate_size": The intermediate size of the transformer. "intermediate_activation": The activation to apply in the transfomer. "dropout_rate": The overall dropout rate for the transformer layers. "attention_dropout_rate": The dropout rate for the attention layers. "kernel_initializer": The initializer for the transformer layers. "dtype": The dtype of the transformer. """ def __init__( self, num_output_classes, classification_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), classification_layer_dtype=tf.float32, embedding_cls=None, embedding_cfg=None, embedding_data=None, num_hidden_instances=1, hidden_cls=layers.Transformer, hidden_cfg=None, **kwargs): print(embedding_cfg) self._self_setattr_tracking = False self._hidden_cls = hidden_cls self._hidden_cfg = hidden_cfg self._num_hidden_instances = num_hidden_instances self._num_output_classes = num_output_classes self._classification_layer_initializer = classification_layer_initializer self._embedding_cls = embedding_cls self._embedding_cfg = embedding_cfg self._embedding_data = embedding_data self._kwargs = kwargs if embedding_cls: if inspect.isclass(embedding_cls): self._embedding_network = embedding_cls(embedding_cfg) else: self._embedding_network = embedding_cls inputs = self._embedding_network.inputs embeddings, mask = self._embedding_network(inputs) else: self._embedding_network = None word_ids = tf.keras.layers.Input( shape=(embedding_cfg['seq_length'],), dtype=tf.int32, name='input_word_ids') mask = tf.keras.layers.Input( shape=(embedding_cfg['seq_length'],), dtype=tf.int32, name='input_mask') type_ids = tf.keras.layers.Input( shape=(embedding_cfg['seq_length'],), dtype=tf.int32, name='input_type_ids') inputs = [word_ids, mask, type_ids] self._embedding_layer = layers.OnDeviceEmbedding( vocab_size=embedding_cfg['vocab_size'], embedding_width=embedding_cfg['hidden_size'], initializer=embedding_cfg['initializer'], name='word_embeddings') word_embeddings = self._embedding_layer(word_ids) # Always uses dynamic slicing for simplicity. self._position_embedding_layer = layers.PositionEmbedding( initializer=embedding_cfg['initializer'], use_dynamic_slicing=True, max_sequence_length=embedding_cfg['max_seq_length']) position_embeddings = self._position_embedding_layer(word_embeddings) type_embeddings = ( layers.OnDeviceEmbedding( vocab_size=embedding_cfg['type_vocab_size'], embedding_width=embedding_cfg['hidden_size'], initializer=embedding_cfg['initializer'], use_one_hot=True, name='type_embeddings')(type_ids)) embeddings = tf.keras.layers.Add()( [word_embeddings, position_embeddings, type_embeddings]) embeddings = ( tf.keras.layers.LayerNormalization( name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32)(embeddings)) embeddings = ( tf.keras.layers.Dropout( rate=embedding_cfg['dropout_rate'], dtype=tf.float32)(embeddings)) if embedding_cfg.get('dtype') == 'float16': embeddings = tf.cast(embeddings, tf.float16) attention_mask = layers.SelfAttentionMask()([embeddings, mask]) data = embeddings for _ in range(num_hidden_instances): if inspect.isclass(hidden_cls): layer = self._hidden_cls(**hidden_cfg) else: layer = self._hidden_cls data = layer([data, attention_mask]) first_token_tensor = ( tf.keras.layers.Lambda(lambda x: tf.squeeze(x[:, 0:1, :], axis=1))(data) ) cls_output = tf.keras.layers.Dense( units=num_output_classes, activation='tanh', kernel_initializer=classification_layer_initializer, dtype=classification_layer_dtype, name='cls_transform')( first_token_tensor) super(EncoderScaffold, self).__init__( inputs=inputs, outputs=[data, cls_output], **kwargs) def get_config(self): config_dict = { 'num_hidden_instances': self._num_hidden_instances, 'num_output_classes': self._num_output_classes, 'classification_layer_initializer': self._classification_layer_initializer, 'embedding_cls': self._embedding_network, 'embedding_cfg': self._embedding_cfg, 'hidden_cfg': self._hidden_cfg, } if inspect.isclass(self._hidden_cls): config_dict['hidden_cls_string'] = tf.keras.utils.get_registered_name( self._hidden_cls) else: config_dict['hidden_cls'] = self._hidden_cls config_dict.update(self._kwargs) return config_dict @classmethod def from_config(cls, config, custom_objects=None): if 'hidden_cls_string' in config: config['hidden_cls'] = tf.keras.utils.get_registered_object( config['hidden_cls_string'], custom_objects=custom_objects) del config['hidden_cls_string'] return cls(**config) def get_embedding_table(self): if self._embedding_network is None: # In this case, we don't have a custom embedding network and can return # the standard embedding data. return self._embedding_layer.embeddings if self._embedding_data is None: raise RuntimeError(('The EncoderScaffold %s does not have a reference ' 'to the embedding data. This is required when you ' 'pass a custom embedding network to the scaffold. ' 'It is also possible that you are trying to get ' 'embedding data from an embedding scaffold with a ' 'custom embedding network where the scaffold has ' 'been serialized and deserialized. Unfortunately, ' 'accessing custom embedding references after ' 'serialization is not yet supported.') % self.name) else: return self._embedding_data
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks/encoder_scaffold.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for classification network.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import from official.nlp.modeling.networks import classification # This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It # guarantees forward compatibility of this code for the V2 switchover. @keras_parameterized.run_all_keras_modes class ClassificationTest(keras_parameterized.TestCase): def test_network_creation(self): """Validate that the Keras object can be created.""" input_width = 512 num_classes = 10 test_object = classification.Classification( input_width=input_width, num_classes=num_classes) # Create a 2-dimensional input (the first dimension is implicit). cls_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32) output = test_object(cls_data) # Validate that the outputs are of the expected shape. expected_output_shape = [None, num_classes] self.assertEqual(expected_output_shape, output.shape.as_list()) def test_network_invocation(self): """Validate that the Keras object can be invoked.""" input_width = 512 num_classes = 10 test_object = classification.Classification( input_width=input_width, num_classes=num_classes, output='predictions') # Create a 2-dimensional input (the first dimension is implicit). cls_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32) output = test_object(cls_data) # Invoke the network as part of a Model. model = tf.keras.Model(cls_data, output) input_data = 10 * np.random.random_sample((3, input_width)) _ = model.predict(input_data) def test_network_invocation_with_internal_logits(self): """Validate that the logit outputs are correct.""" input_width = 512 num_classes = 10 test_object = classification.Classification( input_width=input_width, num_classes=num_classes, output='predictions') # Create a 2-dimensional input (the first dimension is implicit). cls_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32) output = test_object(cls_data) model = tf.keras.Model(cls_data, output) logits_model = tf.keras.Model(test_object.inputs, test_object.logits) batch_size = 3 input_data = 10 * np.random.random_sample((batch_size, input_width)) outputs = model.predict(input_data) logits = logits_model.predict(input_data) # Ensure that the tensor shapes are correct. expected_output_shape = (batch_size, num_classes) self.assertEqual(expected_output_shape, outputs.shape) self.assertEqual(expected_output_shape, logits.shape) # Ensure that the logits, when softmaxed, create the outputs. input_tensor = tf.keras.Input(expected_output_shape[1:]) output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor) softmax_model = tf.keras.Model(input_tensor, output_tensor) calculated_softmax = softmax_model.predict(logits) self.assertAllClose(outputs, calculated_softmax) def test_network_invocation_with_internal_and_external_logits(self): """Validate that the logit outputs are correct.""" input_width = 512 num_classes = 10 test_object = classification.Classification( input_width=input_width, num_classes=num_classes, output='logits') # Create a 2-dimensional input (the first dimension is implicit). cls_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32) output = test_object(cls_data) model = tf.keras.Model(cls_data, output) logits_model = tf.keras.Model(test_object.inputs, test_object.logits) batch_size = 3 input_data = 10 * np.random.random_sample((batch_size, input_width)) outputs = model.predict(input_data) logits = logits_model.predict(input_data) # Ensure that the tensor shapes are correct. expected_output_shape = (batch_size, num_classes) self.assertEqual(expected_output_shape, outputs.shape) self.assertEqual(expected_output_shape, logits.shape) self.assertAllClose(outputs, logits) def test_network_invocation_with_logit_output(self): """Validate that the logit outputs are correct.""" input_width = 512 num_classes = 10 test_object = classification.Classification( input_width=input_width, num_classes=num_classes, output='predictions') logit_object = classification.Classification( input_width=input_width, num_classes=num_classes, output='logits') logit_object.set_weights(test_object.get_weights()) # Create a 2-dimensional input (the first dimension is implicit). cls_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32) output = test_object(cls_data) logit_output = logit_object(cls_data) model = tf.keras.Model(cls_data, output) logits_model = tf.keras.Model(cls_data, logit_output) batch_size = 3 input_data = 10 * np.random.random_sample((batch_size, input_width)) outputs = model.predict(input_data) logits = logits_model.predict(input_data) # Ensure that the tensor shapes are correct. expected_output_shape = (batch_size, num_classes) self.assertEqual(expected_output_shape, outputs.shape) self.assertEqual(expected_output_shape, logits.shape) # Ensure that the logits, when softmaxed, create the outputs. input_tensor = tf.keras.Input(expected_output_shape[1:]) output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor) softmax_model = tf.keras.Model(input_tensor, output_tensor) calculated_softmax = softmax_model.predict(logits) self.assertAllClose(outputs, calculated_softmax) def test_serialize_deserialize(self): # Create a network object that sets all of its config options. network = classification.Classification( input_width=128, num_classes=10, initializer='zeros', output='predictions') # Create another network object from the first object's config. new_network = classification.Classification.from_config( network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(network.get_config(), new_network.get_config()) def test_unknown_output_type_fails(self): with self.assertRaisesRegex(ValueError, 'Unknown `output` value "bad".*'): _ = classification.Classification( input_width=128, num_classes=10, output='bad') if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks/classification_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Trainer network for BERT-style models.""" from __future__ import absolute_import from __future__ import division # from __future__ import google_type_annotations from __future__ import print_function import tensorflow as tf from official.nlp.modeling import networks @tf.keras.utils.register_keras_serializable(package='Text') class BertSpanLabeler(tf.keras.Model): """Span labeler model based on a BERT-style transformer-based encoder. This is an implementation of the network structure surrounding a transformer encoder as described in "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding" (https://arxiv.org/abs/1810.04805). The BertSpanLabeler allows a user to pass in a transformer stack, and instantiates a span labeling network based on a single dense layer. Attributes: network: A transformer network. This network should output a sequence output and a classification output. Furthermore, it should expose its embedding table via a "get_embedding_table" method. initializer: The initializer (if any) to use in the span labeling network. Defaults to a Glorot uniform initializer. output: The output style for this network. Can be either 'logits' or 'predictions'. """ def __init__(self, network, initializer='glorot_uniform', output='logits', **kwargs): self._self_setattr_tracking = False self._config = { 'network': network, 'initializer': initializer, 'output': output, } # We want to use the inputs of the passed network as the inputs to this # Model. To do this, we need to keep a handle to the network inputs for use # when we construct the Model object at the end of init. inputs = network.inputs # Because we have a copy of inputs to create this Model object, we can # invoke the Network object with its own input tensors to start the Model. sequence_output, _ = network(inputs) # This is an instance variable for ease of access to the underlying task # network. self.span_labeling = networks.SpanLabeling( input_width=sequence_output.shape[-1], initializer=initializer, output=output, name='span_labeling') start_logits, end_logits = self.span_labeling(sequence_output) # Use identity layers wrapped in lambdas to explicitly name the output # tensors. This allows us to use string-keyed dicts in Keras fit/predict/ # evaluate calls. start_logits = tf.keras.layers.Lambda( tf.identity, name='start_positions')( start_logits) end_logits = tf.keras.layers.Lambda( tf.identity, name='end_positions')( end_logits) logits = [start_logits, end_logits] super(BertSpanLabeler, self).__init__( inputs=inputs, outputs=logits, **kwargs) def get_config(self): return self._config @classmethod def from_config(cls, config, custom_objects=None): return cls(**config)
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks/bert_span_labeler.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for BERT trainer network.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import from official.nlp.modeling import networks from official.nlp.modeling.networks import bert_pretrainer # This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It # guarantees forward compatibility of this code for the V2 switchover. @keras_parameterized.run_all_keras_modes class BertPretrainerTest(keras_parameterized.TestCase): def test_bert_trainer(self): """Validate that the Keras object can be created.""" # Build a transformer network to use within the BERT trainer. vocab_size = 100 sequence_length = 512 test_network = networks.TransformerEncoder( vocab_size=vocab_size, num_layers=2, sequence_length=sequence_length) # Create a BERT trainer with the created network. num_classes = 3 num_token_predictions = 2 bert_trainer_model = bert_pretrainer.BertPretrainer( test_network, num_classes=num_classes, num_token_predictions=num_token_predictions) # Create a set of 2-dimensional inputs (the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) lm_mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) # Invoke the trainer model on the inputs. This causes the layer to be built. lm_outs, cls_outs = bert_trainer_model([word_ids, mask, type_ids, lm_mask]) # Validate that the outputs are of the expected shape. expected_lm_shape = [None, num_token_predictions, vocab_size] expected_classification_shape = [None, num_classes] self.assertAllEqual(expected_lm_shape, lm_outs.shape.as_list()) self.assertAllEqual(expected_classification_shape, cls_outs.shape.as_list()) def test_bert_trainer_tensor_call(self): """Validate that the Keras object can be invoked.""" # Build a transformer network to use within the BERT trainer. (Here, we use # a short sequence_length for convenience.) test_network = networks.TransformerEncoder( vocab_size=100, num_layers=2, sequence_length=2) # Create a BERT trainer with the created network. bert_trainer_model = bert_pretrainer.BertPretrainer( test_network, num_classes=2, num_token_predictions=2) # Create a set of 2-dimensional data tensors to feed into the model. word_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32) mask = tf.constant([[1, 1], [1, 0]], dtype=tf.int32) type_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32) lm_mask = tf.constant([[1, 1], [1, 0]], dtype=tf.int32) # Invoke the trainer model on the tensors. In Eager mode, this does the # actual calculation. (We can't validate the outputs, since the network is # too complex: this simply ensures we're not hitting runtime errors.) _, _ = bert_trainer_model([word_ids, mask, type_ids, lm_mask]) def test_serialize_deserialize(self): """Validate that the BERT trainer can be serialized and deserialized.""" # Build a transformer network to use within the BERT trainer. (Here, we use # a short sequence_length for convenience.) test_network = networks.TransformerEncoder( vocab_size=100, num_layers=2, sequence_length=5) # Create a BERT trainer with the created network. (Note that all the args # are different, so we can catch any serialization mismatches.) bert_trainer_model = bert_pretrainer.BertPretrainer( test_network, num_classes=4, num_token_predictions=3) # Create another BERT trainer via serialization and deserialization. config = bert_trainer_model.get_config() new_bert_trainer_model = bert_pretrainer.BertPretrainer.from_config(config) # Validate that the config can be forced to JSON. _ = new_bert_trainer_model.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(bert_trainer_model.get_config(), new_bert_trainer_model.get_config()) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks/bert_pretrainer_test.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Span labeling network.""" from __future__ import absolute_import from __future__ import division # from __future__ import google_type_annotations from __future__ import print_function import tensorflow as tf @tf.keras.utils.register_keras_serializable(package='Text') class SpanLabeling(tf.keras.Model): """Span labeling network head for BERT modeling. This network implements a simple single-span labeler based on a dense layer. Attributes: input_width: The innermost dimension of the input tensor to this network. activation: The activation, if any, for the dense layer in this network. initializer: The intializer for the dense layer in this network. Defaults to a Glorot uniform initializer. output: The output style for this network. Can be either 'logits' or 'predictions'. """ def __init__(self, input_width, activation=None, initializer='glorot_uniform', output='logits', **kwargs): self._self_setattr_tracking = False self._config = { 'input_width': input_width, 'activation': activation, 'initializer': initializer, 'output': output, } sequence_data = tf.keras.layers.Input( shape=(None, input_width), name='sequence_data', dtype=tf.float32) intermediate_logits = tf.keras.layers.Dense( 2, # This layer predicts start location and end location. activation=activation, kernel_initializer=initializer, name='predictions/transform/logits')( sequence_data) self.start_logits, self.end_logits = ( tf.keras.layers.Lambda(self._split_output_tensor)(intermediate_logits)) start_predictions = tf.keras.layers.Activation(tf.nn.log_softmax)( self.start_logits) end_predictions = tf.keras.layers.Activation(tf.nn.log_softmax)( self.end_logits) if output == 'logits': output_tensors = [self.start_logits, self.end_logits] elif output == 'predictions': output_tensors = [start_predictions, end_predictions] else: raise ValueError( ('Unknown `output` value "%s". `output` can be either "logits" or ' '"predictions"') % output) super(SpanLabeling, self).__init__( inputs=[sequence_data], outputs=output_tensors, **kwargs) def _split_output_tensor(self, tensor): transposed_tensor = tf.transpose(tensor, [2, 0, 1]) return tf.unstack(transposed_tensor) def get_config(self): return self._config @classmethod def from_config(cls, config, custom_objects=None): return cls(**config)
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks/span_labeling.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Masked language model network.""" from __future__ import absolute_import from __future__ import division # from __future__ import google_type_annotations from __future__ import print_function import tensorflow as tf from official.modeling import tf_utils @tf.keras.utils.register_keras_serializable(package='Text') class MaskedLM(tf.keras.Model): """Masked language model network head for BERT modeling. This network implements a masked language model based on the provided network. It assumes that the network being passed has a "get_embedding_table()" method. Attributes: input_width: The innermost dimension of the input tensor to this network. num_predictions: The number of predictions to make per sequence. source_network: The network with the embedding layer to use for the embedding layer. activation: The activation, if any, for the dense layer in this network. initializer: The intializer for the dense layer in this network. Defaults to a Glorot uniform initializer. output: The output style for this network. Can be either 'logits' or 'predictions'. """ def __init__(self, input_width, num_predictions, source_network, float_type, activation=None, initializer='glorot_uniform', output='logits', **kwargs): embedding_table = source_network.get_embedding_table() vocab_size, hidden_size = embedding_table.shape sequence_data = tf.keras.layers.Input( shape=(None, input_width), name='sequence_data', dtype=tf.float32) masked_lm_positions = tf.keras.layers.Input( shape=(num_predictions,), name='masked_lm_positions', dtype=tf.int32) masked_lm_input = tf.keras.layers.Lambda( lambda x: self._gather_indexes(x[0], x[1]))( [sequence_data, masked_lm_positions]) lm_data = ( tf.keras.layers.Dense( hidden_size, activation=activation, kernel_initializer=initializer, name='cls/predictions/transform/dense')(masked_lm_input)) lm_data = tf.keras.layers.LayerNormalization( axis=-1, epsilon=1e-12, name='cls/predictions/transform/LayerNorm')( lm_data) lm_data = tf.keras.layers.Lambda( lambda x: tf.matmul(x, tf.cast(embedding_table, float_type), transpose_b=True))( lm_data) logits = Bias( initializer=tf.keras.initializers.Zeros(), name='cls/predictions/output_bias')( lm_data) # We can't use the standard Keras reshape layer here, since it expects # the input and output batch size to be the same. reshape_layer = tf.keras.layers.Lambda( lambda x: tf.reshape(x, [-1, num_predictions, vocab_size])) self.logits = reshape_layer(logits) predictions = tf.keras.layers.Activation(tf.nn.log_softmax, dtype='float32')(self.logits) if output == 'logits': output_tensors = self.logits elif output == 'predictions': output_tensors = predictions else: raise ValueError( ('Unknown `output` value "%s". `output` can be either "logits" or ' '"predictions"') % output) super(MaskedLM, self).__init__( inputs=[sequence_data, masked_lm_positions], outputs=output_tensors, **kwargs) def get_config(self): raise NotImplementedError('MaskedLM cannot be directly serialized at this ' 'time. Please use it only in Layers or ' 'functionally subclassed Models/Networks.') def _gather_indexes(self, sequence_tensor, positions): """Gathers the vectors at the specific positions. Args: sequence_tensor: Sequence output of `BertModel` layer of shape (`batch_size`, `seq_length`, num_hidden) where num_hidden is number of hidden units of `BertModel` layer. positions: Positions ids of tokens in sequence to mask for pretraining of with dimension (batch_size, num_predictions) where `num_predictions` is maximum number of tokens to mask out and predict per each sequence. Returns: Masked out sequence tensor of shape (batch_size * num_predictions, num_hidden). """ sequence_shape = tf_utils.get_shape_list( sequence_tensor, name='sequence_output_tensor') batch_size, seq_length, width = sequence_shape flat_offsets = tf.keras.backend.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) flat_positions = tf.keras.backend.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = tf.keras.backend.reshape( sequence_tensor, [batch_size * seq_length, width]) output_tensor = tf.gather(flat_sequence_tensor, flat_positions) return output_tensor @tf.keras.utils.register_keras_serializable(package='Text') # Temporary until we can create a Dense layer that ties the embedding. class Bias(tf.keras.layers.Layer): """Adds a bias term to an input.""" def __init__(self, initializer='zeros', regularizer=None, constraint=None, activation=None, **kwargs): super(Bias, self).__init__(**kwargs) self._initializer = tf.keras.initializers.get(initializer) self._regularizer = tf.keras.regularizers.get(regularizer) self._constraint = tf.keras.constraints.get(constraint) self._activation = tf.keras.activations.get(activation) def build(self, input_shape): input_shape = tf.TensorShape(input_shape) self._bias = self.add_weight( 'bias', shape=input_shape[1:], initializer=self._initializer, regularizer=self._regularizer, constraint=self._constraint, dtype=self._dtype, trainable=True) super(Bias, self).build(input_shape) def get_config(self): config = { 'activation': tf.keras.activations.serialize(self._activation), 'initializer': tf.keras.initializers.serialize(self._initializer), 'regularizer': tf.keras.regularizers.serialize(self._regularizer), 'constraint': tf.keras.constraints.serialize(self._constraint) } base_config = super(Bias, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs): outputs = tf.nn.bias_add(inputs, self._bias) if self._activation is not None: return self._activation(outputs) # pylint: disable=not-callable else: return outputs
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks/masked_lm.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for masked language model network.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import from official.nlp.modeling.networks import masked_lm from official.nlp.modeling.networks import transformer_encoder # This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It # guarantees forward compatibility of this code for the V2 switchover. @keras_parameterized.run_all_keras_modes class MaskedLMTest(keras_parameterized.TestCase): def create_network(self, vocab_size, sequence_length, hidden_size, num_predictions, output='predictions', xformer_stack=None): # First, create a transformer stack that we can use to get the LM's # vocabulary weight. if xformer_stack is None: xformer_stack = transformer_encoder.TransformerEncoder( vocab_size=vocab_size, num_layers=1, sequence_length=sequence_length, hidden_size=hidden_size, num_attention_heads=4, ) word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) lm_outputs, _ = xformer_stack([word_ids, mask, type_ids]) # Create a maskedLM from the transformer stack. test_network = masked_lm.MaskedLM( num_predictions=num_predictions, input_width=lm_outputs.shape[-1], source_network=xformer_stack, output=output) return test_network def test_network_creation(self): vocab_size = 100 sequence_length = 32 hidden_size = 64 num_predictions = 21 test_network = self.create_network( vocab_size=vocab_size, sequence_length=sequence_length, hidden_size=hidden_size, num_predictions=num_predictions) # Make sure that the output tensor of the masked LM is the right shape. lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size)) masked_lm_positions = tf.keras.Input( shape=(num_predictions,), dtype=tf.int32) output = test_network([lm_input_tensor, masked_lm_positions]) expected_output_shape = [None, num_predictions, vocab_size] self.assertEqual(expected_output_shape, output.shape.as_list()) def test_network_invocation_with_internal_logits(self): vocab_size = 100 sequence_length = 32 hidden_size = 64 num_predictions = 21 test_network = self.create_network( vocab_size=vocab_size, sequence_length=sequence_length, hidden_size=hidden_size, num_predictions=num_predictions) # Create a model from the masked LM layer. lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size)) masked_lm_positions = tf.keras.Input( shape=(num_predictions,), dtype=tf.int32) output = test_network([lm_input_tensor, masked_lm_positions]) model = tf.keras.Model([lm_input_tensor, masked_lm_positions], output) logits_model = tf.keras.Model(test_network.inputs, test_network.logits) # Invoke the masked LM on some fake data to make sure there are no runtime # errors in the code. batch_size = 3 lm_input_data = 10 * np.random.random_sample( (batch_size, sequence_length, hidden_size)) masked_position_data = np.random.randint( 2, size=(batch_size, num_predictions)) outputs = model.predict([lm_input_data, masked_position_data]) logits = logits_model.predict([lm_input_data, masked_position_data]) # Ensure that the tensor shapes are correct. expected_output_shape = (batch_size, num_predictions, vocab_size) self.assertEqual(expected_output_shape, outputs.shape) self.assertEqual(expected_output_shape, logits.shape) # Ensure that the logits, when softmaxed, create the outputs. input_tensor = tf.keras.Input(expected_output_shape[1:]) output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor) softmax_model = tf.keras.Model(input_tensor, output_tensor) calculated_softmax = softmax_model.predict(logits) self.assertAllClose(outputs, calculated_softmax) def test_network_invocation_with_external_logits(self): vocab_size = 100 sequence_length = 32 hidden_size = 64 num_predictions = 21 xformer_stack = transformer_encoder.TransformerEncoder( vocab_size=vocab_size, num_layers=1, sequence_length=sequence_length, hidden_size=hidden_size, num_attention_heads=4, ) test_network = self.create_network( vocab_size=vocab_size, sequence_length=sequence_length, hidden_size=hidden_size, num_predictions=num_predictions, xformer_stack=xformer_stack, output='predictions') logit_network = self.create_network( vocab_size=vocab_size, sequence_length=sequence_length, hidden_size=hidden_size, num_predictions=num_predictions, xformer_stack=xformer_stack, output='logits') logit_network.set_weights(test_network.get_weights()) # Create a model from the masked LM layer. lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size)) masked_lm_positions = tf.keras.Input( shape=(num_predictions,), dtype=tf.int32) output = test_network([lm_input_tensor, masked_lm_positions]) logit_output = logit_network([lm_input_tensor, masked_lm_positions]) model = tf.keras.Model([lm_input_tensor, masked_lm_positions], output) logits_model = tf.keras.Model(([lm_input_tensor, masked_lm_positions]), logit_output) # Invoke the masked LM on some fake data to make sure there are no runtime # errors in the code. batch_size = 3 lm_input_data = 10 * np.random.random_sample( (batch_size, sequence_length, hidden_size)) masked_position_data = np.random.randint( 2, size=(batch_size, num_predictions)) outputs = model.predict([lm_input_data, masked_position_data]) logits = logits_model.predict([lm_input_data, masked_position_data]) # Ensure that the tensor shapes are correct. expected_output_shape = (batch_size, num_predictions, vocab_size) self.assertEqual(expected_output_shape, outputs.shape) self.assertEqual(expected_output_shape, logits.shape) # Ensure that the logits, when softmaxed, create the outputs. input_tensor = tf.keras.Input(expected_output_shape[1:]) output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor) softmax_model = tf.keras.Model(input_tensor, output_tensor) calculated_softmax = softmax_model.predict(logits) self.assertAllClose(outputs, calculated_softmax) def test_network_invocation(self): vocab_size = 100 sequence_length = 32 hidden_size = 64 num_predictions = 21 test_network = self.create_network( vocab_size=vocab_size, sequence_length=sequence_length, hidden_size=hidden_size, num_predictions=num_predictions) # Create a model from the masked LM layer. lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size)) masked_lm_positions = tf.keras.Input( shape=(num_predictions,), dtype=tf.int32) output = test_network([lm_input_tensor, masked_lm_positions]) model = tf.keras.Model([lm_input_tensor, masked_lm_positions], output) # Invoke the masked LM on some fake data to make sure there are no runtime # errors in the code. batch_size = 3 lm_input_data = 10 * np.random.random_sample( (batch_size, sequence_length, hidden_size)) masked_position_data = np.random.randint( 2, size=(batch_size, num_predictions)) _ = model.predict([lm_input_data, masked_position_data]) def test_unknown_output_type_fails(self): with self.assertRaisesRegex(ValueError, 'Unknown `output` value "bad".*'): _ = self.create_network( vocab_size=8, sequence_length=8, hidden_size=8, num_predictions=8, output='bad') if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks/masked_lm_test.py
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import bz2 import glob import gzip import os import urllib.request import shutil import sys class PubMedDownloader: def __init__(self, subset, save_path): self.subset = subset # Modifying self.save_path in two steps to handle creation of subdirectories self.save_path = save_path + '/pubmed' + '/' if not os.path.exists(self.save_path): os.makedirs(self.save_path) self.save_path = self.save_path + '/' + subset if not os.path.exists(self.save_path): os.makedirs(self.save_path) self.download_urls = { 'baseline' : 'ftp://ftp.ncbi.nlm.nih.gov/pubmed/baseline/', 'daily_update' : 'ftp://ftp.ncbi.nlm.nih.gov/pubmed/updatefiles/', 'fulltext' : 'ftp://ftp.ncbi.nlm.nih.gov/pub/pmc/oa_bulk/', 'open_access' : 'ftp://ftp.ncbi.nlm.nih.gov/pub/pmc/oa_bulk/' } def download(self): print('subset:', self.subset) url = self.download_urls[self.subset] self.download_files(url) self.extract_files() def download_files(self, url): url = self.download_urls[self.subset] output = os.popen('curl ' + url).read() if self.subset == 'fulltext' or self.subset == 'open_access': line_split = 'comm_use' if self.subset == 'fulltext' else 'non_comm_use' for line in output.splitlines(): if line[-10:] == 'xml.tar.gz' and \ line.split(' ')[-1].split('.')[0] == line_split: file = os.path.join(self.save_path, line.split(' ')[-1]) if not os.path.isfile(file): print('Downloading', file) response = urllib.request.urlopen(url + line.split(' ')[-1]) with open(file, "wb") as handle: handle.write(response.read()) elif self.subset == 'baseline' or self.subset == 'daily_update': for line in output.splitlines(): if line[-3:] == '.gz': file = os.path.join(self.save_path, line.split(' ')[-1]) if not os.path.isfile(file): print('Downloading', file) response = urllib.request.urlopen(url + line.split(' ')[-1]) with open(file, "wb") as handle: handle.write(response.read()) else: assert False, 'Invalid PubMed dataset/subset specified.' def extract_files(self): files = glob.glob(self.save_path + '/*.xml.gz') for file in files: print('file:', file) input = gzip.GzipFile(file, mode='rb') s = input.read() input.close() out = open(file[:-3], mode='wb') out.write(s) out.close()
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/data/PubMedDownloader.py
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import bz2 import os import urllib.request import sys class SquadDownloader: def __init__(self, save_path): self.save_path = save_path + '/squad' if not os.path.exists(self.save_path): os.makedirs(self.save_path) if not os.path.exists(self.save_path + '/v1.1'): os.makedirs(self.save_path + '/v1.1') if not os.path.exists(self.save_path + '/v2.0'): os.makedirs(self.save_path + '/v2.0') self.download_urls = { 'https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json' : 'v1.1/train-v1.1.json', 'https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json' : 'v1.1/dev-v1.1.json', 'https://worksheets.codalab.org/rest/bundles/0xbcd57bee090b421c982906709c8c27e1/contents/blob/' : 'v1.1/evaluate-v1.1.py', 'https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json' : 'v2.0/train-v2.0.json', 'https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json' : 'v2.0/dev-v2.0.json', 'https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/' : 'v2.0/evaluate-v2.0.py', } def download(self): for item in self.download_urls: url = item file = self.download_urls[item] print('Downloading:', url) if os.path.isfile(self.save_path + '/' + file): print('** Download file already exists, skipping download') else: response = urllib.request.urlopen(url) with open(self.save_path + '/' + file, "wb") as handle: handle.write(response.read())
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/data/SquadDownloader.py
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from GooglePretrainedWeightDownloader import GooglePretrainedWeightDownloader from NVIDIAPretrainedWeightDownloader import NVIDIAPretrainedWeightDownloader from WikiDownloader import WikiDownloader from BooksDownloader import BooksDownloader from GLUEDownloader import GLUEDownloader from SquadDownloader import SquadDownloader from PubMedDownloader import PubMedDownloader class Downloader: def __init__(self, dataset_name, save_path): self.dataset_name = dataset_name self.save_path = save_path def download(self): if self.dataset_name == 'bookscorpus': self.download_bookscorpus() elif self.dataset_name == 'wikicorpus_en': self.download_wikicorpus('en') elif self.dataset_name == 'wikicorpus_zh': self.download_wikicorpus('zh') elif self.dataset_name == 'pubmed_baseline': self.download_pubmed('baseline') elif self.dataset_name == 'pubmed_daily_update': self.download_pubmed('daily_update') elif self.dataset_name == 'pubmed_fulltext': self.download_pubmed('fulltext') elif self.dataset_name == 'pubmed_open_access': self.download_pubmed('open_access') elif self.dataset_name == 'google_pretrained_weights': self.download_google_pretrained_weights() elif self.dataset_name == 'nvidia_pretrained_weights': self.download_nvidia_pretrained_weights() elif self.dataset_name == 'mrpc': self.download_glue(self.dataset_name) elif self.dataset_name == 'mnli': self.download_glue(self.dataset_name) elif self.dataset_name == 'cola': self.download_glue(self.dataset_name) elif self.dataset_name == 'sst-2': self.download_glue(self.dataset_name) elif self.dataset_name == 'squad': self.download_squad() elif self.dataset_name == 'all': self.download_bookscorpus() self.download_wikicorpus('en') self.download_wikicorpus('zh') self.download_pubmed('baseline') self.download_pubmed('daily_update') self.download_pubmed('fulltext') self.download_pubmed('open_access') self.download_google_pretrained_weights() self.download_nvidia_pretrained_weights() self.download_glue("cola") self.download_glue("mnli") self.download_glue("mrpc") self.download_glue("sst-2") self.download_squad() else: print(self.dataset_name) assert False, 'Unknown dataset_name provided to downloader' def download_bookscorpus(self): downloader = BooksDownloader(self.save_path) downloader.download() def download_wikicorpus(self, language): downloader = WikiDownloader(language, self.save_path) downloader.download() def download_pubmed(self, subset): downloader = PubMedDownloader(subset, self.save_path) downloader.download() def download_google_pretrained_weights(self): downloader = GooglePretrainedWeightDownloader(self.save_path) downloader.download() def download_nvidia_pretrained_weights(self): downloader = NVIDIAPretrainedWeightDownloader(self.save_path) downloader.download() def download_glue(self, glue_task_name): downloader = GLUEDownloader(self.save_path) downloader.download(glue_task_name) def download_squad(self): downloader = SquadDownloader(self.save_path) downloader.download()
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/data/Downloader.py
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import os class BookscorpusTextFormatting: def __init__(self, books_path, output_filename, recursive = False): self.books_path = books_path self.recursive = recursive self.output_filename = output_filename # This puts one book per line def merge(self): with open(self.output_filename, mode='w', newline='\n') as ofile: for filename in glob.glob(self.books_path + '/' + '*.txt', recursive=True): with open(filename, mode='r', encoding='utf-8-sig', newline='\n') as file: for line in file: if line.strip() != '': ofile.write(line.strip() + ' ') ofile.write("\n\n")
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/data/BookscorpusTextFormatting.py
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os class NVIDIAPretrainedWeightDownloader: def __init__(self, save_path): self.save_path = save_path + '/nvidia_pretrained_weights' if not os.path.exists(self.save_path): os.makedirs(self.save_path) pass def download(self): assert False, 'NVIDIAPretrainedWeightDownloader not implemented yet.'
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/data/NVIDIAPretrainedWeightDownloader.py
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/data/__init__.py
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import bz2 import os import urllib.request import sys import subprocess class WikiDownloader: def __init__(self, language, save_path): self.save_path = save_path + '/wikicorpus_' + language if not os.path.exists(self.save_path): os.makedirs(self.save_path) self.language = language self.download_urls = { 'en' : 'https://dumps.wikimedia.your.org/enwiki/latest/enwiki-latest-pages-articles.xml.bz2', 'zh' : 'https://dumps.wikimedia.your.org/zhwiki/latest/zhwiki-latest-pages-articles.xml.bz2' } self.output_files = { 'en' : 'wikicorpus_en.xml.bz2', 'zh' : 'wikicorpus_zh.xml.bz2' } def download(self): if self.language in self.download_urls: url = self.download_urls[self.language] filename = self.output_files[self.language] print('Downloading:', url) if os.path.isfile(self.save_path + '/' + filename): print('** Download file already exists, skipping download') else: cmd = ['wget', url, '--output-document={}'.format(self.save_path + '/' + filename), '--no-check-certificate'] print('Running:', cmd) status = subprocess.run(cmd) if status.returncode != 0: raise RuntimeError('Wiki download not successful') # Always unzipping since this is relatively fast and will overwrite print('Unzipping:', self.output_files[self.language]) subprocess.run('bzip2 -dk ' + self.save_path + '/' + filename, shell=True, check=True) else: assert False, 'WikiDownloader not implemented for this language yet.'
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/data/WikiDownloader.py
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import os class WikicorpusTextFormatting: def __init__(self, wiki_path, output_filename, recursive = False): self.wiki_path = wiki_path self.recursive = recursive self.output_filename = output_filename # This puts one article per line def merge(self): with open(self.output_filename, mode='w', newline='\n') as ofile: for dirname in glob.glob(self.wiki_path + '/*/', recursive=False): for filename in glob.glob(dirname + 'wiki_*', recursive=self.recursive): print(filename) article_lines = [] article_open = False with open(filename, mode='r', newline='\n') as file: for line in file: if '<doc id=' in line: article_open = True elif '</doc>' in line: article_open = False for oline in article_lines[1:]: if oline != '\n': ofile.write(oline.rstrip() + " ") ofile.write("\n\n") article_lines = [] else: if article_open: article_lines.append(line)
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/data/WikicorpusTextFormatting.py
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import os import pubmed_parser as pmp class PubMedTextFormatting: def __init__(self, pubmed_path, output_filename, recursive = False): self.pubmed_path = pubmed_path self.recursive = recursive self.output_filename = output_filename # This puts one article per line def merge(self): print('PubMed path:', self.pubmed_path) with open(self.output_filename, mode='w', newline='\n') as ofile: for filename in glob.glob(self.pubmed_path + '/*.xml*', recursive=self.recursive): print('file:', filename) dicts_out = pmp.parse_medline_xml(filename) for dict_out in dicts_out: if not dict_out['abstract']: continue try: for line in dict_out['abstract'].splitlines(): if len(line) < 30: continue ofile.write(line.strip() + " ") ofile.write("\n\n") except: ofile.write("\n\n") continue
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/data/PubMedTextFormatting.py
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import wget from pathlib import Path def mkdir(path): Path(path).mkdir(parents=True, exist_ok=True) class GLUEDownloader: def __init__(self, save_path): self.save_path = save_path + '/glue' def download(self, task_name): mkdir(self.save_path) if task_name in {'mrpc', 'mnli'}: task_name = task_name.upper() elif task_name == 'cola': task_name = 'CoLA' else: # SST-2 assert task_name == 'sst-2' task_name = 'SST' wget.download( 'https://gist.githubusercontent.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e/raw/17b8dd0d724281ed7c3b2aeeda662b92809aadd5/download_glue_data.py', out=self.save_path, ) sys.path.append(self.save_path) import download_glue_data download_glue_data.main( ['--data_dir', self.save_path, '--tasks', task_name]) sys.path.pop()
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/data/GLUEDownloader.py
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import BookscorpusTextFormatting import Downloader import TextSharding import WikicorpusTextFormatting import PubMedTextFormatting import argparse import itertools import multiprocessing import os import pprint import subprocess def main(args): working_dir = os.environ['BERT_PREP_WORKING_DIR'] print('Working Directory:', working_dir) print('Action:', args.action) print('Dataset Name:', args.dataset) if args.input_files: args.input_files = args.input_files.split(',') hdf5_tfrecord_folder_prefix = "/lower_case_" + str(args.do_lower_case) + "_seq_len_" + str(args.max_seq_length) \ + "_max_pred_" + str(args.max_predictions_per_seq) + "_masked_lm_prob_" + str(args.masked_lm_prob) \ + "_random_seed_" + str(args.random_seed) + "_dupe_factor_" + str(args.dupe_factor) \ + "_shard_" + str(args.n_training_shards) + "_test_split_" + str(int(args.fraction_test_set * 100)) directory_structure = { 'download' : working_dir + '/download', # Downloaded and decompressed 'extracted' : working_dir +'/extracted', # Extracted from whatever the initial format is (e.g., wikiextractor) 'formatted' : working_dir + '/formatted_one_article_per_line', # This is the level where all sources should look the same 'sharded' : working_dir + '/sharded', 'tfrecord' : working_dir + '/tfrecord' + hdf5_tfrecord_folder_prefix, 'hdf5': working_dir + '/hdf5'+ hdf5_tfrecord_folder_prefix, } print('\nDirectory Structure:') pp = pprint.PrettyPrinter(indent=2) pp.pprint(directory_structure) print('') if args.action == 'download': if not os.path.exists(directory_structure['download']): os.makedirs(directory_structure['download']) downloader = Downloader.Downloader(args.dataset, directory_structure['download']) downloader.download() elif args.action == 'text_formatting': assert args.dataset != 'google_pretrained_weights' and args.dataset != 'nvidia_pretrained_weights' \ and args.dataset != 'squad' and args.dataset != 'mrpc' and args.dataset != 'cola' and \ args.dataset != 'mnli' and args.dataset != 'sst-2', 'Cannot perform text_formatting on pretrained weights' if not os.path.exists(directory_structure['extracted']): os.makedirs(directory_structure['extracted']) if not os.path.exists(directory_structure['formatted']): os.makedirs(directory_structure['formatted']) if args.dataset == 'bookscorpus': books_path = directory_structure['download'] + '/bookscorpus' #books_path = directory_structure['download'] output_filename = directory_structure['formatted'] + '/bookscorpus_one_book_per_line.txt' books_formatter = BookscorpusTextFormatting.BookscorpusTextFormatting(books_path, output_filename, recursive=True) books_formatter.merge() elif args.dataset == 'wikicorpus_en': if args.skip_wikiextractor == 0: path_to_wikiextractor_in_container = 'python -m wikiextractor.WikiExtractor' wikiextractor_command = path_to_wikiextractor_in_container + ' ' + directory_structure['download'] + '/' + args.dataset + '/wikicorpus_en.xml ' + '-b 100M --processes ' + str(args.n_processes) + ' -o ' + directory_structure['extracted'] + '/' + args.dataset print('WikiExtractor Command:', wikiextractor_command) wikiextractor_process = subprocess.run(wikiextractor_command, shell=True, check=True) wiki_path = directory_structure['extracted'] + '/wikicorpus_en' output_filename = directory_structure['formatted'] + '/wikicorpus_en_one_article_per_line.txt' wiki_formatter = WikicorpusTextFormatting.WikicorpusTextFormatting(wiki_path, output_filename, recursive=True) wiki_formatter.merge() elif args.dataset == 'wikicorpus_zh': assert False, 'wikicorpus_zh not fully supported at this time. The simplified/tradition Chinese data needs to be translated and properly segmented still, and should work once this step is added.' if args.skip_wikiextractor == 0: path_to_wikiextractor_in_container = '/workspace/wikiextractor/WikiExtractor.py' wikiextractor_command = path_to_wikiextractor_in_container + ' ' + directory_structure['download'] + '/' + args.dataset + '/wikicorpus_zh.xml ' + '-b 100M --processes ' + str(args.n_processes) + ' -o ' + directory_structure['extracted'] + '/' + args.dataset print('WikiExtractor Command:', wikiextractor_command) wikiextractor_process = subprocess.run(wikiextractor_command, shell=True, check=True) wiki_path = directory_structure['extracted'] + '/wikicorpus_zh' output_filename = directory_structure['formatted'] + '/wikicorpus_zh_one_article_per_line.txt' wiki_formatter = WikicorpusTextFormatting.WikicorpusTextFormatting(wiki_path, output_filename, recursive=True) wiki_formatter.merge() elif args.dataset == 'pubmed_baseline': pubmed_path = directory_structure['download'] + '/pubmed' + '/baseline' output_filename = directory_structure['formatted'] + '/pubmed_baseline_one_article_per_line.txt' pubmed_formatter = PubMedTextFormatting.PubMedTextFormatting(pubmed_path, output_filename, recursive=True) pubmed_formatter.merge() elif args.action == 'sharding': # Note: books+wiki requires user to provide list of input_files (comma-separated with no spaces) if args.dataset == 'bookscorpus' or 'wikicorpus' in args.dataset or 'books_wiki' in args.dataset or 'pubmed' in args.dataset: if args.input_files is None: if args.dataset == 'bookscorpus': args.input_files = [directory_structure['formatted'] + '/bookscorpus_one_book_per_line.txt'] elif args.dataset == 'wikicorpus_en': args.input_files = [directory_structure['formatted'] + '/wikicorpus_en_one_article_per_line.txt'] elif args.dataset == 'wikicorpus_zh': args.input_files = [directory_structure['formatted'] + '/wikicorpus_zh_one_article_per_line.txt'] elif args.dataset == 'books_wiki_en_corpus': args.input_files = [directory_structure['formatted'] + '/bookscorpus_one_book_per_line.txt', directory_structure['formatted'] + '/wikicorpus_en_one_article_per_line.txt'] elif args.dataset == 'pubmed_baseline': args.input_files = [directory_structure['formatted'] + '/pubmed_baseline_one_article_per_line.txt'] output_file_prefix = directory_structure['sharded'] + '/' + args.dataset + '/' + args.dataset if not os.path.exists(directory_structure['sharded']): os.makedirs(directory_structure['sharded']) if not os.path.exists(directory_structure['sharded'] + '/' + args.dataset): os.makedirs(directory_structure['sharded'] + '/' + args.dataset) if not os.path.exists(directory_structure['sharded'] + '/' + args.dataset + '/training'): os.makedirs(directory_structure['sharded'] + '/' + args.dataset + '/training') if not os.path.exists(directory_structure['sharded'] + '/' + args.dataset + '/test'): os.makedirs(directory_structure['sharded'] + '/' + args.dataset + '/test') # Segmentation is here because all datasets look the same in one article/book/whatever per line format, and # it seemed unnecessarily complicated to add an additional preprocessing step to call just for this. # Different languages (e.g., Chinese simplified/traditional) may require translation and # other packages to be called from here -- just add a conditional branch for those extra steps segmenter = TextSharding.NLTKSegmenter() sharding = TextSharding.Sharding(args.input_files, output_file_prefix, args.n_training_shards, args.n_test_shards, args.fraction_test_set) sharding.load_articles() sharding.segment_articles_into_sentences(segmenter) sharding.distribute_articles_over_shards() sharding.write_shards_to_disk() else: assert False, 'Unsupported dataset for sharding' elif args.action == 'create_tfrecord_files': if not os.path.exists(directory_structure['tfrecord'] + "/" + args.dataset): os.makedirs(directory_structure['tfrecord'] + "/" + args.dataset) if not os.path.exists(directory_structure['tfrecord'] + "/" + args.dataset + '/training'): os.makedirs(directory_structure['tfrecord'] + "/" + args.dataset + '/training') if not os.path.exists(directory_structure['tfrecord'] + "/" + args.dataset + '/test'): os.makedirs(directory_structure['tfrecord'] + "/" + args.dataset + '/test') last_process = None def create_record_worker(filename_prefix, shard_id, output_format='tfrecord', split='training'): bert_preprocessing_command = 'python /workspace/bert_tf2/create_pretraining_data.py' bert_preprocessing_command += ' --input_file=' + directory_structure['sharded'] + '/' + args.dataset + '/' + split + '/' + filename_prefix + '_' + str(shard_id) + '.txt' bert_preprocessing_command += ' --output_file=' + directory_structure['tfrecord'] + '/' + args.dataset + '/' + split + '/' + filename_prefix + '_' + str(shard_id) + '.' + output_format bert_preprocessing_command += ' --vocab_file=' + args.vocab_file bert_preprocessing_command += ' --do_lower_case' if args.do_lower_case else '' bert_preprocessing_command += ' --max_seq_length=' + str(args.max_seq_length) bert_preprocessing_command += ' --max_predictions_per_seq=' + str(args.max_predictions_per_seq) bert_preprocessing_command += ' --masked_lm_prob=' + str(args.masked_lm_prob) bert_preprocessing_command += ' --random_seed=' + str(args.random_seed) bert_preprocessing_command += ' --dupe_factor=' + str(args.dupe_factor) bert_preprocessing_process = subprocess.Popen(bert_preprocessing_command, shell=True) last_process = bert_preprocessing_process # This could be better optimized (fine if all take equal time) if shard_id % args.n_processes == 0 and shard_id > 0: bert_preprocessing_process.wait() return last_process output_file_prefix = args.dataset for i in range(args.n_training_shards): last_process = create_record_worker(output_file_prefix + '_training', i, 'tfrecord', 'training') last_process.wait() for i in range(args.n_test_shards): last_process = create_record_worker(output_file_prefix + '_test', i, 'tfrecord', 'test') last_process.wait() elif args.action == 'create_hdf5_files': assert False, 'HDF5 format not fully supported in this release.' if not os.path.exists(directory_structure['hdf5'] + "/" + args.dataset): os.makedirs(directory_structure['hdf5'] + "/" + args.dataset) last_process = None def create_record_worker(filename_prefix, shard_id, output_format='hdf5'): bert_preprocessing_command = 'python /workspace/bert_tf2/create_pretraining_data.py' bert_preprocessing_command += ' --input_file=' + directory_structure['sharded'] + '/' + args.dataset + '/' + filename_prefix + '_' + str(shard_id) + '.txt' bert_preprocessing_command += ' --output_file=' + directory_structure['hdf5'] + '/' + args.dataset + '/' + filename_prefix + '_' + str(shard_id) + '.' + output_format bert_preprocessing_command += ' --vocab_file=' + args.vocab_file bert_preprocessing_command += ' --do_lower_case' if args.do_lower_case else '' bert_preprocessing_command += ' --max_seq_length=' + args.max_seq_length bert_preprocessing_command += ' --max_predictions_per_seq=' + args.max_predictions_per_seq bert_preprocessing_command += ' --masked_lm_prob=' + args.masked_lm_prob bert_preprocessing_command += ' --random_seed=' + args.random_seed bert_preprocessing_command += ' --dupe_factor=' + args.dupe_factor bert_preprocessing_process = subprocess.Popen(bert_preprocessing_command, shell=True) last_process = bert_preprocessing_process # This could be better optimized (fine if all take equal time) if shard_id % args.n_processes == 0 and shard_id > 0: bert_preprocessing_process.wait() for i in range(args.n_training_shards): create_record_worker(args.output_file_prefix + '_training', i) last_process.wait() for i in range(args.n_test_shards): create_record_worker(args.output_file_prefix + '_test', i) last_process.wait() if __name__ == "__main__": parser = argparse.ArgumentParser( description='Preprocessing Application for Everything BERT-related' ) parser.add_argument( '--action', type=str, help='Specify the action you want the app to take. e.g., generate vocab, segment, create tfrecords', choices={ 'download', # Download and verify mdf5/sha sums 'text_formatting', # Convert into a file that contains one article/book per line 'sharding', # Convert previous formatted text into shards containing one sentence per line 'create_tfrecord_files', # Turn each shard into a TFrecord with masking and next sentence prediction info 'create_hdf5_files' # Turn each shard into a HDF5 file with masking and next sentence prediction info } ) parser.add_argument( '--dataset', type=str, help='Specify the dataset to perform --action on', choices={ 'bookscorpus', 'wikicorpus_en', 'wikicorpus_zh', 'books_wiki_en_corpus', 'pubmed_baseline', 'pubmed_daily_update', 'pubmed_fulltext', 'pubmed_open_access', 'google_pretrained_weights', 'nvidia_pretrained_weights', 'squad', 'mrpc', 'sst-2', 'mnli', 'cola', 'all' } ) parser.add_argument( '--input_files', type=str, help='Specify the input files in a comma-separated list (no spaces)' ) parser.add_argument( '--n_training_shards', type=int, help='Specify the number of training shards to generate', default=1472 ) parser.add_argument( '--n_test_shards', type=int, help='Specify the number of test shards to generate', default=1472 ) parser.add_argument( '--fraction_test_set', type=float, help='Specify the fraction (0..1) of the data to withhold for the test data split (based on number of sequences)', default=0.1 ) parser.add_argument( '--segmentation_method', type=str, help='Specify your choice of sentence segmentation', choices={ 'nltk' }, default='nltk' ) parser.add_argument( '--n_processes', type=int, help='Specify the max number of processes to allow at one time', default=4 ) parser.add_argument( '--random_seed', type=int, help='Specify the base seed to use for any random number generation', default=12345 ) parser.add_argument( '--dupe_factor', type=int, help='Specify the duplication factor', default=5 ) parser.add_argument( '--masked_lm_prob', type=float, help='Specify the probability for masked lm', default=0.15 ) parser.add_argument( '--max_seq_length', type=int, help='Specify the maximum sequence length', default=512 ) parser.add_argument( '--max_predictions_per_seq', type=int, help='Specify the maximum number of masked words per sequence', default=20 ) parser.add_argument( '--do_lower_case', type=int, help='Specify whether it is cased (0) or uncased (1) (any number greater than 0 will be treated as uncased)', default=1 ) parser.add_argument( '--vocab_file', type=str, help='Specify absolute path to vocab file to use)' ) parser.add_argument( '--skip_wikiextractor', type=int, help='Specify whether to skip wikiextractor step 0=False, 1=True', default=0 ) parser.add_argument( '--interactive_json_config_generator', type=str, help='Specify the action you want the app to take. e.g., generate vocab, segment, create tfrecords' ) args = parser.parse_args() main(args)
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/data/bertPrep.py
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import subprocess class BooksDownloader: def __init__(self, save_path): self.save_path = save_path pass def download(self): bookscorpus_download_command = 'python3 /workspace/bookcorpus/download_files.py --list /workspace/bookcorpus/url_list.jsonl --out' bookscorpus_download_command += ' ' + self.save_path + '/bookscorpus' bookscorpus_download_command += ' --trash-bad-count' bookscorpus_download_process = subprocess.run(bookscorpus_download_command, shell=True, check=True)
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/data/BooksDownloader.py
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict from itertools import islice import multiprocessing import os import statistics class Sharding: def __init__(self, input_files, output_name_prefix, n_training_shards, n_test_shards, fraction_test_set): assert len(input_files) > 0, 'The input file list must contain at least one file.' assert n_training_shards > 0, 'There must be at least one output shard.' assert n_test_shards > 0, 'There must be at least one output shard.' self.n_training_shards = n_training_shards self.n_test_shards = n_test_shards self.fraction_test_set = fraction_test_set self.input_files = input_files self.output_name_prefix = output_name_prefix self.output_training_identifier = '_training' self.output_test_identifier = '_test' self.output_file_extension = '.txt' self.articles = {} # key: integer identifier, value: list of articles self.sentences = {} # key: integer identifier, value: list of sentences self.output_training_files = {} # key: filename, value: list of articles to go into file self.output_test_files = {} # key: filename, value: list of articles to go into file self.init_output_files() # Remember, the input files contain one article per line (the whitespace check is to skip extraneous blank lines) def load_articles(self): print('Start: Loading Articles') global_article_count = 0 for input_file in self.input_files: print('input file:', input_file) with open(input_file, mode='r', newline='\n') as f: for i, line in enumerate(f): if line.strip(): self.articles[global_article_count] = line.rstrip() global_article_count += 1 print('End: Loading Articles: There are', len(self.articles), 'articles.') def segment_articles_into_sentences(self, segmenter): print('Start: Sentence Segmentation') if len(self.articles) == 0: self.load_articles() assert len(self.articles) != 0, 'Please check that input files are present and contain data.' # TODO: WIP: multiprocessing (create independent ranges and spawn processes) use_multiprocessing = 'serial' def chunks(data, size=len(self.articles)): it = iter(data) for i in range(0, len(data), size): yield {k: data[k] for k in islice(it, size)} if use_multiprocessing == 'manager': manager = multiprocessing.Manager() return_dict = manager.dict() jobs = [] n_processes = 7 # in addition to the main process, total = n_proc+1 def work(articles, return_dict): sentences = {} for i, article in enumerate(articles): sentences[i] = segmenter.segment_string(articles[article]) if i % 5000 == 0: print('Segmenting article', i) return_dict.update(sentences) for item in chunks(self.articles, len(self.articles)): p = multiprocessing.Process(target=work, args=(item, return_dict)) # Busy wait while len(jobs) >= n_processes: pass jobs.append(p) p.start() for proc in jobs: proc.join() elif use_multiprocessing == 'queue': work_queue = multiprocessing.Queue() jobs = [] for item in chunks(self.articles, len(self.articles)): pass else: # serial option for i, article in enumerate(self.articles): self.sentences[i] = segmenter.segment_string(self.articles[article]) if i % 5000 == 0: print('Segmenting article', i) print('End: Sentence Segmentation') def init_output_files(self): print('Start: Init Output Files') assert len(self.output_training_files) == 0, 'Internal storage self.output_files already contains data. This function is intended to be used by the constructor only.' assert len(self.output_test_files) == 0, 'Internal storage self.output_files already contains data. This function is intended to be used by the constructor only.' for i in range(self.n_training_shards): name = self.output_name_prefix + self.output_training_identifier + '_' + str(i) + self.output_file_extension self.output_training_files[name] = [] for i in range(self.n_test_shards): name = self.output_name_prefix + self.output_test_identifier + '_' + str(i) + self.output_file_extension self.output_test_files[name] = [] print('End: Init Output Files') def get_sentences_per_shard(self, shard): result = 0 for article_id in shard: result += len(self.sentences[article_id]) return result def distribute_articles_over_shards(self): print('Start: Distribute Articles Over Shards') assert len(self.articles) >= self.n_training_shards + self.n_test_shards, 'There are fewer articles than shards. Please add more data or reduce the number of shards requested.' # Create dictionary with - key: sentence count per article, value: article id number sentence_counts = defaultdict(lambda: []) max_sentences = 0 total_sentences = 0 for article_id in self.sentences: current_length = len(self.sentences[article_id]) sentence_counts[current_length].append(article_id) max_sentences = max(max_sentences, current_length) total_sentences += current_length n_sentences_assigned_to_training = int((1 - self.fraction_test_set) * total_sentences) nominal_sentences_per_training_shard = n_sentences_assigned_to_training // self.n_training_shards nominal_sentences_per_test_shard = (total_sentences - n_sentences_assigned_to_training) // self.n_test_shards consumed_article_set = set({}) unused_article_set = set(self.articles.keys()) # Make first pass and add one article worth of lines per file for file in self.output_training_files: current_article_id = sentence_counts[max_sentences][-1] sentence_counts[max_sentences].pop(-1) self.output_training_files[file].append(current_article_id) consumed_article_set.add(current_article_id) unused_article_set.remove(current_article_id) # Maintain the max sentence count while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0: max_sentences -= 1 if len(self.sentences[current_article_id]) > nominal_sentences_per_training_shard: nominal_sentences_per_training_shard = len(self.sentences[current_article_id]) print('Warning: A single article contains more than the nominal number of sentences per training shard.') for file in self.output_test_files: current_article_id = sentence_counts[max_sentences][-1] sentence_counts[max_sentences].pop(-1) self.output_test_files[file].append(current_article_id) consumed_article_set.add(current_article_id) unused_article_set.remove(current_article_id) # Maintain the max sentence count while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0: max_sentences -= 1 if len(self.sentences[current_article_id]) > nominal_sentences_per_test_shard: nominal_sentences_per_test_shard = len(self.sentences[current_article_id]) print('Warning: A single article contains more than the nominal number of sentences per test shard.') training_counts = [] test_counts = [] for shard in self.output_training_files: training_counts.append(self.get_sentences_per_shard(self.output_training_files[shard])) for shard in self.output_test_files: test_counts.append(self.get_sentences_per_shard(self.output_test_files[shard])) training_median = statistics.median(training_counts) test_median = statistics.median(test_counts) # Make subsequent passes over files to find articles to add without going over limit history_remaining = [] n_history_remaining = 4 while len(consumed_article_set) < len(self.articles): for fidx, file in enumerate(self.output_training_files): nominal_next_article_size = min(nominal_sentences_per_training_shard - training_counts[fidx], max_sentences) # Maintain the max sentence count while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0: max_sentences -= 1 while len(sentence_counts[nominal_next_article_size]) == 0 and nominal_next_article_size > 0: nominal_next_article_size -= 1 if nominal_next_article_size not in sentence_counts or nominal_next_article_size == 0 or training_counts[fidx] > training_median: continue # skip adding to this file, will come back later if no file can accept unused articles current_article_id = sentence_counts[nominal_next_article_size][-1] sentence_counts[nominal_next_article_size].pop(-1) self.output_training_files[file].append(current_article_id) consumed_article_set.add(current_article_id) unused_article_set.remove(current_article_id) for fidx, file in enumerate(self.output_test_files): nominal_next_article_size = min(nominal_sentences_per_test_shard - test_counts[fidx], max_sentences) # Maintain the max sentence count while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0: max_sentences -= 1 while len(sentence_counts[nominal_next_article_size]) == 0 and nominal_next_article_size > 0: nominal_next_article_size -= 1 if nominal_next_article_size not in sentence_counts or nominal_next_article_size == 0 or test_counts[fidx] > test_median: continue # skip adding to this file, will come back later if no file can accept unused articles current_article_id = sentence_counts[nominal_next_article_size][-1] sentence_counts[nominal_next_article_size].pop(-1) self.output_test_files[file].append(current_article_id) consumed_article_set.add(current_article_id) unused_article_set.remove(current_article_id) # If unable to place articles a few times, bump up nominal sizes by fraction until articles get placed if len(history_remaining) == n_history_remaining: history_remaining.pop(0) history_remaining.append(len(unused_article_set)) history_same = True for i in range(1, len(history_remaining)): history_same = history_same and (history_remaining[i-1] == history_remaining[i]) if history_same: nominal_sentences_per_training_shard += 1 # nominal_sentences_per_test_shard += 1 training_counts = [] test_counts = [] for shard in self.output_training_files: training_counts.append(self.get_sentences_per_shard(self.output_training_files[shard])) for shard in self.output_test_files: test_counts.append(self.get_sentences_per_shard(self.output_test_files[shard])) training_median = statistics.median(training_counts) test_median = statistics.median(test_counts) print('Distributing data over shards:', len(unused_article_set), 'articles remaining.') if len(unused_article_set) != 0: print('Warning: Some articles did not make it into output files.') for shard in self.output_training_files: print('Training shard:', self.get_sentences_per_shard(self.output_training_files[shard])) for shard in self.output_test_files: print('Test shard:', self.get_sentences_per_shard(self.output_test_files[shard])) print('End: Distribute Articles Over Shards') def write_shards_to_disk(self): print('Start: Write Shards to Disk') for shard in self.output_training_files: self.write_single_shard(shard, self.output_training_files[shard], 'training') for shard in self.output_test_files: self.write_single_shard(shard, self.output_test_files[shard], 'test') print('End: Write Shards to Disk') def write_single_shard(self, shard_name, shard, split): shard_split = os.path.split(shard_name) shard_name = shard_split[0] + '/' + split + '/' + shard_split[1] with open(shard_name, mode='w', newline='\n') as f: for article_id in shard: for line in self.sentences[article_id]: f.write(line + '\n') f.write('\n') # Line break between articles import nltk nltk.download('punkt') class NLTKSegmenter: def __init(self): pass def segment_string(self, article): return nltk.tokenize.sent_tokenize(article)
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/data/TextSharding.py
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import hashlib import os import urllib.request import tarfile class GooglePretrainedWeightDownloader: def __init__(self, save_path): self.save_path = save_path + '/google_pretrained_weights' if not os.path.exists(self.save_path): os.makedirs(self.save_path) # Download urls self.model_urls = { 'bert_base_uncased': ('https://storage.googleapis.com/cloud-tpu-checkpoints/bert/keras_bert/uncased_L-12_H-768_A-12.tar.gz', 'uncased_L-12_H-768_A-12.tar.gz'), 'bert_large_uncased': ('https://storage.googleapis.com/cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16.tar.gz', 'uncased_L-24_H-1024_A-16.tar.gz'), # 'bert_base_cased': ('https://storage.googleapis.com/cloud-tpu-checkpoints/bert/keras_bert/cased_L-12_H-768_A-12.tar.gz', 'cased_L-12_H-768_A-12.tar.gz'), # 'bert_large_cased': ('https://storage.googleapis.com/cloud-tpu-checkpoints/bert/keras_bert/cased_L-24_H-1024_A-16.tar.gz', 'cased_L-24_H-1024_A-16.tar.gz'), # 'bert_base_multilingual_cased': ('https://storage.googleapis.com/bert_models/2018_11_23/multi_cased_L-12_H-768_A-12.zip', 'multi_cased_L-12_H-768_A-12.zip'), # 'bert_large_multilingual_uncased': ('https://storage.googleapis.com/bert_models/2018_11_03/multilingual_L-12_H-768_A-12.zip', 'multilingual_L-12_H-768_A-12.zip'), # 'bert_base_chinese': ('https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip', 'chinese_L-12_H-768_A-12.zip') } # SHA256sum verification for file download integrity (and checking for changes from the download source over time) self.bert_base_uncased_sha = { 'bert_config.json': '7b4e5f53efbd058c67cda0aacfafb340113ea1b5797d9ce6ee411704ba21fcbc', 'bert_model.ckpt.data-00000-of-00001': 'f8d2e9873133ea4d252662be01a074fb6b9e115d5fd1e3678d385cf65cf5210f', 'bert_model.ckpt.index': '06a6b8cdff0e61f62f8f24946a607aa6f5ad9b969c1b85363541ab144f80c767', # 'checkpoint': 'da4c827756174a576abc3490e385fa8a36600cf5eb7bbea29315cf1f4ad59639', 'vocab.txt': '07eced375cec144d27c900241f3e339478dec958f92fddbc551f295c992038a3', } self.bert_large_uncased_sha = { 'bert_config.json': 'bfa42236d269e2aeb3a6d30412a33d15dbe8ea597e2b01dc9518c63cc6efafcb', 'bert_model.ckpt.data-00000-of-00001': '9aa66efcbbbfd87fc173115c4f906a42a70d26ca4ca1e318358e4de81dbddb0b', 'bert_model.ckpt.index': '1811d5b68b2fd1a8c5d2961b2691eb626d75c4e789079eb1ba3649aa3fff7336', # 'checkpoint': 'da4c827756174a576abc3490e385fa8a36600cf5eb7bbea29315cf1f4ad59639', 'vocab.txt': '07eced375cec144d27c900241f3e339478dec958f92fddbc551f295c992038a3', } self.bert_base_cased_sha = { 'bert_config.json': 'f11dfb757bea16339a33e1bf327b0aade6e57fd9c29dc6b84f7ddb20682f48bc', 'bert_model.ckpt.data-00000-of-00001': 'ed0febc0fbcd2b7ef9f02112e00cb26c5de2086bca26c07b48b09c723446bc85', 'bert_model.ckpt.index': 'af085a027ef3686466c9b662f9174129401bb4bc49856c917c02322ab7ca26d5', 'checkpoint': 'da4c827756174a576abc3490e385fa8a36600cf5eb7bbea29315cf1f4ad59639', 'vocab.txt': 'eeaa9875b23b04b4c54ef759d03db9d1ba1554838f8fb26c5d96fa551df93d02', } self.bert_large_cased_sha = { 'bert_config.json': '7adb2125c8225da495656c982fd1c5f64ba8f20ad020838571a3f8a954c2df57', 'bert_model.ckpt.data-00000-of-00001': '1f96efeac7c8728e2bacb8ec6230f5ed42a26f5aa6b6b0a138778c190adf2a0b', 'bert_model.ckpt.index': '373ed159af87775ce549239649bfc4df825bffab0da31620575dab44818443c3', 'checkpoint': 'da4c827756174a576abc3490e385fa8a36600cf5eb7bbea29315cf1f4ad59639', 'vocab.txt': 'eeaa9875b23b04b4c54ef759d03db9d1ba1554838f8fb26c5d96fa551df93d02', } self.bert_base_multilingual_cased_sha = { 'bert_config.json': 'e76c3964bc14a8bb37a5530cdc802699d2f4a6fddfab0611e153aa2528f234f0', 'bert_model.ckpt.data-00000-of-00001': '55b8a2df41f69c60c5180e50a7c31b7cdf6238909390c4ddf05fbc0d37aa1ac5', 'bert_model.ckpt.index': '7d8509c2a62b4e300feb55f8e5f1eef41638f4998dd4d887736f42d4f6a34b37', 'bert_model.ckpt.meta': '95e5f1997e8831f1c31e5cf530f1a2e99f121e9cd20887f2dce6fe9e3343e3fa', 'vocab.txt': 'fe0fda7c425b48c516fc8f160d594c8022a0808447475c1a7c6d6479763f310c', } self.bert_large_multilingual_uncased_sha = { 'bert_config.json': '49063bb061390211d2fdd108cada1ed86faa5f90b80c8f6fdddf406afa4c4624', 'bert_model.ckpt.data-00000-of-00001': '3cd83912ebeb0efe2abf35c9f1d5a515d8e80295e61c49b75c8853f756658429', 'bert_model.ckpt.index': '87c372c1a3b1dc7effaaa9103c80a81b3cbab04c7933ced224eec3b8ad2cc8e7', 'bert_model.ckpt.meta': '27f504f34f02acaa6b0f60d65195ec3e3f9505ac14601c6a32b421d0c8413a29', 'vocab.txt': '87b44292b452f6c05afa49b2e488e7eedf79ea4f4c39db6f2f4b37764228ef3f', } self.bert_base_chinese_sha = { 'bert_config.json': '7aaad0335058e2640bcb2c2e9a932b1cd9da200c46ea7b8957d54431f201c015', 'bert_model.ckpt.data-00000-of-00001': '756699356b78ad0ef1ca9ba6528297bcb3dd1aef5feadd31f4775d7c7fc989ba', 'bert_model.ckpt.index': '46315546e05ce62327b3e2cd1bed22836adcb2ff29735ec87721396edb21b82e', 'bert_model.ckpt.meta': 'c0f8d51e1ab986604bc2b25d6ec0af7fd21ff94cf67081996ec3f3bf5d823047', 'vocab.txt': '45bbac6b341c319adc98a532532882e91a9cefc0329aa57bac9ae761c27b291c', } # Relate SHA to urls for loop below self.model_sha = { 'bert_base_uncased': self.bert_base_uncased_sha, 'bert_large_uncased': self.bert_large_uncased_sha, # 'bert_base_cased': self.bert_base_cased_sha, # 'bert_large_cased': self.bert_large_cased_sha, # 'bert_base_multilingual_cased': self.bert_base_multilingual_cased_sha, # 'bert_large_multilingual_uncased': self.bert_large_multilingual_uncased_sha, # 'bert_base_chinese': self.bert_base_chinese_sha } # Helper to get sha256sum of a file def sha256sum(self, filename): h = hashlib.sha256() b = bytearray(128*1024) mv = memoryview(b) with open(filename, 'rb', buffering=0) as f: for n in iter(lambda : f.readinto(mv), 0): h.update(mv[:n]) return h.hexdigest() def download(self): # Iterate over urls: download, unzip, verify sha256sum found_mismatch_sha = False for model in self.model_urls: url = self.model_urls[model][0] file = self.save_path + '/' + self.model_urls[model][1] print('Downloading', url) response = urllib.request.urlopen(url) with open(file, 'wb') as handle: handle.write(response.read()) print('Unzipping', file) tf = tarfile.open(file) tf.extractall(self.save_path) sha_dict = self.model_sha[model] for extracted_file in sha_dict: sha = sha_dict[extracted_file] if sha != self.sha256sum(file[:-7] + '/' + extracted_file): found_mismatch_sha = True print('SHA256sum does not match on file:', extracted_file, 'from download url:', url) else: print(file[:-7] + '/' + extracted_file, '\t', 'verified') if not found_mismatch_sha: print("All downloads pass sha256sum verification.") def serialize(self): pass def deserialize(self): pass def listAvailableWeights(self): print("Available Weight Datasets") for item in self.model_urls: print(item) def listLocallyStoredWeights(self): pass
DeepLearningExamples-master
TensorFlow2/LanguageModeling/BERT/data/GooglePretrainedWeightDownloader.py
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tool to inspect a model.""" import os from absl import app from absl import flags from absl import logging import numpy as np from PIL import Image import tensorflow as tf from dllogger import StdOutBackend, JSONStreamBackend, Verbosity import dllogger as DLLogger from model import inference from utils import hparams_config from utils import model_utils from utils import setup flags.DEFINE_string('model_name', 'efficientdet-d0', 'Model.') flags.DEFINE_string('mode', 'benchmark', 'Run mode: {dry, export, benchmark}') flags.DEFINE_string('trace_filename', None, 'Trace file name.') flags.DEFINE_integer('bm_runs', 100, 'Number of benchmark runs.') flags.DEFINE_string('tensorrt', None, 'TensorRT mode: {None, FP32, FP16, INT8}') flags.DEFINE_integer('batch_size', 1, 'Batch size for inference.') flags.DEFINE_string('ckpt_path', '_', 'checkpoint dir used for eval.') flags.DEFINE_string('export_ckpt', None, 'Output model ckpt path.') flags.DEFINE_string( 'hparams', '', 'Comma separated k=v pairs of hyperparameters or a module' ' containing attributes to use as hyperparameters.') flags.DEFINE_bool('amp', True, 'Enable mixed precision training') flags.DEFINE_bool('use_xla', True, 'Use XLA') flags.DEFINE_string('input_image', None, 'Input image path for inference.') flags.DEFINE_string('output_image_dir', None, 'Output dir for inference.') flags.DEFINE_string('dllogger_path', '/tmp/time_log.txt', 'Filepath for dllogger logs') # For video. flags.DEFINE_string('input_video', None, 'Input video path for inference.') flags.DEFINE_string('output_video', None, 'Output video path. If None, play it online instead.') # For visualization. flags.DEFINE_integer('max_boxes_to_draw', 100, 'Max number of boxes to draw.') flags.DEFINE_float('min_score_thresh', 0.4, 'Score threshold to show box.') flags.DEFINE_string('nms_method', 'hard', 'nms method, hard or gaussian.') # For saved model. flags.DEFINE_string('saved_model_dir', None, 'Folder path for saved model.') flags.DEFINE_string('tflite_path', None, 'Path for exporting tflite file.') flags.DEFINE_bool('debug', False, 'Debug mode.') FLAGS = flags.FLAGS def main(_): model_config = hparams_config.get_detection_config(FLAGS.model_name) model_config.override(FLAGS.hparams) # Add custom overrides model_config.is_training_bn = False model_config.image_size = model_utils.parse_image_size(model_config.image_size) # A hack to make flag consistent with nms configs. if FLAGS.min_score_thresh: model_config.nms_configs.score_thresh = FLAGS.min_score_thresh if FLAGS.nms_method: model_config.nms_configs.method = FLAGS.nms_method if FLAGS.max_boxes_to_draw: model_config.nms_configs.max_output_size = FLAGS.max_boxes_to_draw model_config.mixed_precision = FLAGS.amp setup.set_flags(FLAGS, model_config, training=False) model_params = model_config.as_dict() ckpt_path_or_file = FLAGS.ckpt_path if tf.io.gfile.isdir(ckpt_path_or_file): ckpt_path_or_file = tf.train.latest_checkpoint(ckpt_path_or_file) driver = inference.ServingDriver(FLAGS.model_name, ckpt_path_or_file, FLAGS.batch_size or None, FLAGS.min_score_thresh, FLAGS.max_boxes_to_draw, model_params) # dllogger setup backends = [] backends+=[ JSONStreamBackend(verbosity=Verbosity.VERBOSE, filename=FLAGS.dllogger_path), StdOutBackend(verbosity=Verbosity.DEFAULT)] DLLogger.init(backends=backends) DLLogger.metadata('inference_fps', {'unit': 'images/s'}) DLLogger.metadata('inference_latency_ms', {'unit': 'ms'}) DLLogger.metadata('latency_avg', {'unit': 's'}) DLLogger.metadata('latency_90', {'unit': 's'}) DLLogger.metadata('latency_95', {'unit': 's'}) DLLogger.metadata('latency_99', {'unit': 's'}) if FLAGS.mode == 'export': if tf.io.gfile.exists(FLAGS.saved_model_dir): tf.io.gfile.rmtree(FLAGS.saved_model_dir) driver.export(FLAGS.saved_model_dir, FLAGS.tflite_path, FLAGS.tensorrt) elif FLAGS.mode == 'benchmark': if FLAGS.saved_model_dir: driver.load(FLAGS.saved_model_dir) batch_size = FLAGS.batch_size or 1 if FLAGS.input_image: image_file = tf.io.read_file(FLAGS.input_image) image_arrays = tf.image.decode_image(image_file) image_arrays.set_shape((None, None, 3)) image_arrays = tf.expand_dims(image_arrays, 0) if batch_size > 1: image_arrays = tf.tile(image_arrays, [batch_size, 1, 1, 1]) else: # use synthetic data if no image is provided. image_arrays = tf.ones((batch_size, *model_config.image_size, 3), dtype=tf.uint8) driver.benchmark(image_arrays, FLAGS.bm_runs, FLAGS.trace_filename) elif FLAGS.mode == 'dry': # transfer to tf2 format ckpt driver.build() if FLAGS.export_ckpt: driver.model.save_weights(FLAGS.export_ckpt) elif FLAGS.mode == 'video': import cv2 # pylint: disable=g-import-not-at-top if tf.saved_model.contains_saved_model(FLAGS.saved_model_dir): driver.load(FLAGS.saved_model_dir) cap = cv2.VideoCapture(FLAGS.input_video) if not cap.isOpened(): print('Error opening input video: {}'.format(FLAGS.input_video)) out_ptr = None if FLAGS.output_video: frame_width, frame_height = int(cap.get(3)), int(cap.get(4)) out_ptr = cv2.VideoWriter(FLAGS.output_video, cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), 25, (frame_width, frame_height)) while cap.isOpened(): # Capture frame-by-frame ret, frame = cap.read() if not ret: break raw_frames = np.array([frame]) detections_bs = driver.serve(raw_frames) boxes, scores, classes, _ = tf.nest.map_structure(np.array, detections_bs) new_frame = driver.visualize( raw_frames[0], boxes[0], scores[0], classes[0], min_score_thresh=model_config.nms_configs.score_thresh, max_boxes_to_draw=model_config.nms_configs.max_output_size) if out_ptr: # write frame into output file. out_ptr.write(new_frame) else: # show the frame online, mainly used for real-time speed test. cv2.imshow('Frame', new_frame) # Press Q on keyboard to exit if cv2.waitKey(1) & 0xFF == ord('q'): break if __name__ == '__main__': logging.set_verbosity(logging.ERROR) app.run(main)
DeepLearningExamples-master
TensorFlow2/Detection/Efficientdet/inspector.py
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ==============================================================================
DeepLearningExamples-master
TensorFlow2/Detection/Efficientdet/__init__.py
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The main training script.""" import os import time from mpi4py import MPI from absl import app from absl import flags from absl import logging import tensorflow as tf import horovod.tensorflow.keras as hvd from dllogger import StdOutBackend, JSONStreamBackend, Verbosity import dllogger as DLLogger from model import anchors, callback_builder, coco_metric, dataloader from model import efficientdet_keras, label_util, optimizer_builder, postprocess from utils import hparams_config, model_utils, setup, train_lib, util_keras from utils.horovod_utils import is_main_process, get_world_size, get_rank # Model specific paramenters flags.DEFINE_string('training_mode', 'traineval', '(train/train300/traineval)') flags.DEFINE_string( 'training_file_pattern', None, 'Glob for training data files (e.g., COCO train - minival set)') flags.DEFINE_string('model_name', 'efficientdet-d0', 'Model name.') flags.DEFINE_string('model_dir', None, 'Location of model_dir') flags.DEFINE_integer('batch_size', 64, 'training local batch size') flags.DEFINE_integer('eval_batch_size', 64, 'evaluation local batch size') flags.DEFINE_integer('num_examples_per_epoch', 120000, 'Number of examples in one epoch (coco default is 117266)') flags.DEFINE_integer('num_epochs', None, 'Number of epochs for training') flags.DEFINE_bool('benchmark', False, 'Train for a fixed number of steps for performance') flags.DEFINE_integer('benchmark_steps', 100, 'Train for these many steps to benchmark training performance') flags.DEFINE_bool('use_fake_data', False, 'Use fake input.') flags.DEFINE_bool('use_xla', True, 'Use XLA') flags.DEFINE_bool('amp', True, 'Enable mixed precision training') flags.DEFINE_bool('set_num_threads', True, 'Set inter-op and intra-op parallelism threads') flags.DEFINE_string('log_filename', 'time_log.txt', 'Filename for dllogger logs') flags.DEFINE_integer('log_steps', 1, 'Interval of steps between logging of batch level stats') flags.DEFINE_bool('lr_tb', False, 'Log learning rate at each step to TB') flags.DEFINE_bool('enable_map_parallelization', True, 'Parallelize stateless map transformations in dataloader') flags.DEFINE_integer('checkpoint_period', 10, 'Save ema model weights after every X epochs for eval') flags.DEFINE_string('pretrained_ckpt', None, 'Start training from this EfficientDet checkpoint.') flags.DEFINE_string('backbone_init', None, 'Initialize backbone weights from checkpoint in this directory.') flags.DEFINE_string( 'hparams', '', 'Comma separated k=v pairs of hyperparameters or a module' ' containing attributes to use as hyperparameters.') flags.DEFINE_float('lr', None, 'Learning rate') flags.DEFINE_float('warmup_value', 0.0001, 'Initial warmup value') flags.DEFINE_float('warmup_epochs', None, 'Number of warmup epochs') flags.DEFINE_integer('seed', None, 'Random seed') flags.DEFINE_bool('debug', False, 'Enable debug mode') flags.DEFINE_bool('time_history', True, 'Get time history') flags.DEFINE_bool('validate', False, 'Get validation loss after each epoch') flags.DEFINE_string('val_file_pattern', None, 'Glob for eval tfrecords, e.g. coco/val-*.tfrecord.') flags.DEFINE_string( 'val_json_file', None, 'COCO validation JSON containing golden bounding boxes. If None, use the ' 'ground truth from the dataloader. Ignored if testdev_dir is not None.') flags.DEFINE_string('testdev_dir', None, 'COCO testdev dir. If not None, ignorer val_json_file.') flags.DEFINE_integer('eval_samples', 5000, 'The number of samples for ' 'evaluation.') FLAGS = flags.FLAGS def main(_): # get e2e training time begin = time.time() logging.info("Training started at: {}".format(time.asctime())) hvd.init() # Parse and override hparams config = hparams_config.get_detection_config(FLAGS.model_name) config.override(FLAGS.hparams) if FLAGS.num_epochs: # NOTE: remove this flag after updating all docs. config.num_epochs = FLAGS.num_epochs if FLAGS.lr: config.learning_rate = FLAGS.lr if FLAGS.warmup_value: config.lr_warmup_init = FLAGS.warmup_value if FLAGS.warmup_epochs: config.lr_warmup_epoch = FLAGS.warmup_epochs config.backbone_init = FLAGS.backbone_init config.mixed_precision = FLAGS.amp config.image_size = model_utils.parse_image_size(config.image_size) # get eval config eval_config = hparams_config.get_detection_config(FLAGS.model_name) eval_config.override(FLAGS.hparams) eval_config.val_json_file = FLAGS.val_json_file eval_config.val_file_pattern = FLAGS.val_file_pattern eval_config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS eval_config.drop_remainder = False # eval all examples w/o drop. eval_config.image_size = model_utils.parse_image_size(eval_config['image_size']) # setup setup.set_flags(FLAGS, config, training=True) if FLAGS.debug: tf.config.experimental_run_functions_eagerly(True) tf.debugging.set_log_device_placement(True) tf.random.set_seed(111111) logging.set_verbosity(logging.DEBUG) # Check data path if FLAGS.training_file_pattern is None or FLAGS.val_file_pattern is None or FLAGS.val_json_file is None: raise RuntimeError('You must specify --training_file_pattern, --val_file_pattern and --val_json_file for training.') steps_per_epoch = (FLAGS.num_examples_per_epoch + (FLAGS.batch_size * get_world_size()) - 1) // (FLAGS.batch_size * get_world_size()) if FLAGS.benchmark == True: # For ci perf training runs, run for a fixed number of iterations per epoch steps_per_epoch = FLAGS.benchmark_steps params = dict( config.as_dict(), model_name=FLAGS.model_name, model_dir=FLAGS.model_dir, steps_per_epoch=steps_per_epoch, checkpoint_period=FLAGS.checkpoint_period, batch_size=FLAGS.batch_size, num_shards=get_world_size(), val_json_file=FLAGS.val_json_file, testdev_dir=FLAGS.testdev_dir, mode='train') logging.info('Training params: {}'.format(params)) # make output dir if it does not exist tf.io.gfile.makedirs(FLAGS.model_dir) # dllogger setup backends = [] if is_main_process(): log_path = os.path.join(FLAGS.model_dir, FLAGS.log_filename) backends+=[ JSONStreamBackend(verbosity=Verbosity.VERBOSE, filename=log_path), StdOutBackend(verbosity=Verbosity.DEFAULT)] DLLogger.init(backends=backends) DLLogger.metadata('avg_fps_training', {'unit': 'images/s'}) DLLogger.metadata('avg_fps_training_per_GPU', {'unit': 'images/s'}) DLLogger.metadata('avg_latency_training', {'unit': 's'}) DLLogger.metadata('training_loss', {'unit': None}) DLLogger.metadata('e2e_training_time', {'unit': 's'}) def get_dataset(is_training, params): file_pattern = ( FLAGS.training_file_pattern if is_training else FLAGS.val_file_pattern) if not file_pattern: raise ValueError('No matching files.') return dataloader.InputReader( file_pattern, is_training=is_training, use_fake_data=FLAGS.use_fake_data, max_instances_per_image=config.max_instances_per_image, enable_map_parallelization=FLAGS.enable_map_parallelization)( params) num_samples = (FLAGS.eval_samples + get_world_size() - 1) // get_world_size() num_samples = (num_samples + FLAGS.eval_batch_size - 1) // FLAGS.eval_batch_size eval_config.num_samples = num_samples def get_eval_dataset(eval_config): dataset = dataloader.InputReader( FLAGS.val_file_pattern, is_training=False, max_instances_per_image=eval_config.max_instances_per_image)( eval_config, batch_size=FLAGS.eval_batch_size) dataset = dataset.shard(get_world_size(), get_rank()) dataset = dataset.take(num_samples) return dataset eval_dataset = get_eval_dataset(eval_config) # pick focal loss implementation focal_loss = train_lib.StableFocalLoss( params['alpha'], params['gamma'], label_smoothing=params['label_smoothing'], reduction=tf.keras.losses.Reduction.NONE) model = train_lib.EfficientDetNetTrain(params['model_name'], config) model.build((None, *config.image_size, 3)) model.compile( optimizer=optimizer_builder.get_optimizer(params), loss={ 'box_loss': train_lib.BoxLoss( params['delta'], reduction=tf.keras.losses.Reduction.NONE), 'box_iou_loss': train_lib.BoxIouLoss( params['iou_loss_type'], params['min_level'], params['max_level'], params['num_scales'], params['aspect_ratios'], params['anchor_scale'], params['image_size'], reduction=tf.keras.losses.Reduction.NONE), 'class_loss': focal_loss, 'seg_loss': tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE) }) train_from_epoch = util_keras.restore_ckpt(model, params['model_dir'], config.moving_average_decay, steps_per_epoch=steps_per_epoch) print("training_mode: {}".format(FLAGS.training_mode)) callbacks = callback_builder.get_callbacks(params, FLAGS.training_mode, eval_config, eval_dataset, DLLogger, FLAGS.time_history, FLAGS.log_steps, FLAGS.lr_tb, FLAGS.benchmark) history = model.fit( get_dataset(True, params=params), epochs=params['num_epochs'], steps_per_epoch=steps_per_epoch, initial_epoch=train_from_epoch, callbacks=callbacks, verbose=1 if is_main_process() else 0, validation_data=get_dataset(False, params=params) if FLAGS.validate else None, validation_steps=(FLAGS.eval_samples // FLAGS.eval_batch_size) if FLAGS.validate else None) if is_main_process(): model.save_weights(os.path.join(FLAGS.model_dir, 'ckpt-final')) # log final stats stats = {} for callback in callbacks: if isinstance(callback, callback_builder.TimeHistory): if callback.epoch_runtime_log: stats['avg_fps_training'] = callback.average_examples_per_second stats['avg_fps_training_per_GPU'] = callback.average_examples_per_second / get_world_size() stats['avg_latency_training'] = callback.average_time_per_iteration if history and history.history: train_hist = history.history #Gets final loss from training. stats['training_loss'] = float(hvd.allreduce(tf.constant(train_hist['loss'][-1], dtype=tf.float32), average=True)) if os.path.exists(os.path.join(FLAGS.model_dir,'ema_weights')): ckpt_epoch = "%02d" % sorted(set([int(f.rsplit('.')[0].rsplit('-')[1]) for f in os.listdir(os.path.join(FLAGS.model_dir,'ema_weights')) if 'emackpt' in f]), reverse=True)[0] ckpt = os.path.join(FLAGS.model_dir, 'ema_weights', 'emackpt-' + str(ckpt_epoch)) util_keras.restore_ckpt(model, ckpt, eval_config.moving_average_decay, steps_per_epoch=0, skip_mismatch=False, expect_partial=True) if is_main_process(): model.save(os.path.join(FLAGS.model_dir, 'emackpt-final')) else: ckpt_epoch = 'final' ckpt = os.path.join(FLAGS.model_dir, 'ckpt-' + ckpt_epoch) if is_main_process(): model.save(os.path.join(FLAGS.model_dir, 'ckpt-' + ckpt_epoch)) # Start evaluation of final ema checkpoint logging.set_verbosity(logging.WARNING) @tf.function def model_fn(images, labels): cls_outputs, box_outputs = model(images, training=False) detections = postprocess.generate_detections(eval_config, cls_outputs, box_outputs, labels['image_scales'], labels['source_ids']) tf.numpy_function(evaluator.update_state, [labels['groundtruth_data'], postprocess.transform_detections(detections)], []) if FLAGS.benchmark == False and (FLAGS.training_mode == 'train' or FLAGS.num_epochs < 200): # Evaluator for AP calculation. label_map = label_util.get_label_map(eval_config.label_map) evaluator = coco_metric.EvaluationMetric( filename=eval_config.val_json_file, label_map=label_map) evaluator.reset_states() # evaluate all images. pbar = tf.keras.utils.Progbar(num_samples) for i, (images, labels) in enumerate(eval_dataset): model_fn(images, labels) if is_main_process(): pbar.update(i) # gather detections from all ranks evaluator.gather() if is_main_process(): # compute the final eval results. metrics = evaluator.result() metric_dict = {} for i, name in enumerate(evaluator.metric_names): metric_dict[name] = metrics[i] if label_map: for i, cid in enumerate(sorted(label_map.keys())): name = 'AP_/%s' % label_map[cid] metric_dict[name] = metrics[i + len(evaluator.metric_names)] # csv format csv_metrics = ['AP','AP50','AP75','APs','APm','APl'] csv_format = ",".join([str(ckpt_epoch)] + [str(round(metric_dict[key] * 100, 2)) for key in csv_metrics]) print(FLAGS.model_name, metric_dict, "csv format:", csv_format) DLLogger.log(step=(), data={'epoch': ckpt_epoch, 'validation_accuracy_mAP': round(metric_dict['AP'] * 100, 2)}) DLLogger.flush() MPI.COMM_WORLD.Barrier() if is_main_process(): stats['e2e_training_time'] = time.time() - begin DLLogger.log(step=(), data=stats) DLLogger.flush() if __name__ == '__main__': logging.set_verbosity(logging.INFO) app.run(main)
DeepLearningExamples-master
TensorFlow2/Detection/Efficientdet/train.py
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A simple example on how to use keras model for inference.""" import os from absl import app from absl import flags from absl import logging import numpy as np from PIL import Image import tensorflow as tf from utils import hparams_config from model import inference from model import efficientdet_keras flags.DEFINE_string('image_path', None, 'Location of test image.') flags.DEFINE_string('output_dir', None, 'Directory of annotated output images.') flags.DEFINE_string('model_dir', None, 'Location of the checkpoint to run.') flags.DEFINE_string('model_name', 'efficientdet-d0', 'Model name to use.') flags.DEFINE_string('hparams', '', 'Comma separated k=v pairs or a yaml file') flags.DEFINE_bool('debug', False, 'If true, run function in eager for debug.') flags.DEFINE_string('saved_model_dir', None, 'Saved model directory') FLAGS = flags.FLAGS def main(_): imgs = [np.array(Image.open(FLAGS.image_path))] * 2 # Create model config. config = hparams_config.get_efficientdet_config('efficientdet-d0') config.is_training_bn = False config.image_size = '1920x1280' config.nms_configs.score_thresh = 0.4 config.nms_configs.max_output_size = 100 config.override(FLAGS.hparams) # Use 'mixed_float16' if running on GPUs. policy = tf.keras.mixed_precision.experimental.Policy('float32') tf.keras.mixed_precision.experimental.set_policy(policy) tf.config.experimental_run_functions_eagerly(FLAGS.debug) # Create and run the model. model = efficientdet_keras.EfficientDetModel(config=config) model.build((None, None, None, 3)) model.load_weights(tf.train.latest_checkpoint(FLAGS.model_dir)) model.summary() class ExportModel(tf.Module): def __init__(self, model): super().__init__() self.model = model @tf.function def f(self, imgs): return self.model(imgs, training=False, post_mode='global') imgs = tf.convert_to_tensor(imgs, dtype=tf.uint8) export_model = ExportModel(model) if FLAGS.saved_model_dir: tf.saved_model.save( export_model, FLAGS.saved_model_dir, signatures=export_model.f.get_concrete_function( tf.TensorSpec(shape=(None, None, None, 3), dtype=tf.uint8))) export_model = tf.saved_model.load(FLAGS.saved_model_dir) boxes, scores, classes, valid_len = export_model.f(imgs) # Visualize results. for i, img in enumerate(imgs): length = valid_len[i] img = inference.visualize_image( img, boxes[i].numpy()[:length], classes[i].numpy().astype(np.int)[:length], scores[i].numpy()[:length], label_map=config.label_map, min_score_thresh=config.nms_configs.score_thresh, max_boxes_to_draw=config.nms_configs.max_output_size) output_image_path = os.path.join(FLAGS.output_dir, str(i) + '.jpg') Image.fromarray(img).save(output_image_path) print('writing annotated image to %s' % output_image_path) if __name__ == '__main__': flags.mark_flag_as_required('image_path') flags.mark_flag_as_required('output_dir') flags.mark_flag_as_required('model_dir') logging.set_verbosity(logging.ERROR) app.run(main)
DeepLearningExamples-master
TensorFlow2/Detection/Efficientdet/infer.py
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Eval libraries.""" import os from mpi4py import MPI from absl import app from absl import flags from absl import logging import tensorflow as tf import horovod.tensorflow.keras as hvd from model import anchors from model import coco_metric from model import dataloader from model import efficientdet_keras from model import label_util from model import postprocess from utils import hparams_config from utils import model_utils from utils import util_keras from utils.horovod_utils import get_rank, get_world_size, is_main_process flags.DEFINE_integer('eval_samples', 5000, 'Number of eval samples.') flags.DEFINE_string('val_file_pattern', None, 'Glob for eval tfrecords, e.g. coco/val-*.tfrecord.') flags.DEFINE_string('val_json_file', None, 'Groudtruth, e.g. annotations/instances_val2017.json.') flags.DEFINE_string('model_name', 'efficientdet-d0', 'Model name to use.') flags.DEFINE_string('ckpt_path', None, 'Checkpoint path to evaluate') flags.DEFINE_integer('batch_size', 8, 'Local batch size.') flags.DEFINE_string('only_this_epoch', None, 'Evaluate only this epoch checkpoint.') flags.DEFINE_bool('enable_map_parallelization', True, 'Parallelize stateless map transformations in dataloader') flags.DEFINE_bool('amp', True, 'Use mixed precision for eval.') flags.DEFINE_string('hparams', '', 'Comma separated k=v pairs or a yaml file.') FLAGS = flags.FLAGS def main(_): hvd.init() gpus = tf.config.experimental.list_physical_devices('GPU') for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) if gpus: tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU') if FLAGS.amp: policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16') tf.keras.mixed_precision.experimental.set_policy(policy) else: os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '0' config = hparams_config.get_efficientdet_config(FLAGS.model_name) config.override(FLAGS.hparams) config.val_json_file = FLAGS.val_json_file config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS config.drop_remainder = False # eval all examples w/o drop. config.image_size = model_utils.parse_image_size(config['image_size']) @tf.function def model_fn(images, labels): cls_outputs, box_outputs = model(images, training=False) detections = postprocess.generate_detections(config, cls_outputs, box_outputs, labels['image_scales'], labels['source_ids']) tf.numpy_function(evaluator.update_state, [labels['groundtruth_data'], postprocess.transform_detections(detections)], []) # Network model = efficientdet_keras.EfficientDetNet(config=config) model.build((None, *config.image_size, 3)) # dataset batch_size = FLAGS.batch_size # local batch size. ds = dataloader.InputReader( FLAGS.val_file_pattern, is_training=False, max_instances_per_image=config.max_instances_per_image, enable_map_parallelization=FLAGS.enable_map_parallelization)( config, batch_size=batch_size) ds = ds.shard(get_world_size(), get_rank()) # Evaluator for AP calculation. label_map = label_util.get_label_map(config.label_map) evaluator = coco_metric.EvaluationMetric( filename=config.val_json_file, label_map=label_map) util_keras.restore_ckpt(model, FLAGS.ckpt_path, config.moving_average_decay, steps_per_epoch=0, skip_mismatch=False, expect_partial=True) if FLAGS.eval_samples: num_samples = (FLAGS.eval_samples + get_world_size() - 1) // get_world_size() num_samples = (num_samples + batch_size - 1) // batch_size ds = ds.take(num_samples) evaluator.reset_states() # evaluate all images. pbar = tf.keras.utils.Progbar(num_samples) for i, (images, labels) in enumerate(ds): model_fn(images, labels) if is_main_process(): pbar.update(i) # gather detections from all ranks evaluator.gather() if is_main_process(): # compute the final eval results. metrics = evaluator.result() metric_dict = {} for i, name in enumerate(evaluator.metric_names): metric_dict[name] = metrics[i] if label_map: for i, cid in enumerate(sorted(label_map.keys())): name = 'AP_/%s' % label_map[cid] metric_dict[name] = metrics[i + len(evaluator.metric_names)] # csv format csv_metrics = ['AP','AP50','AP75','APs','APm','APl'] csv_format = ",".join([str(round(metric_dict[key] * 100, 2)) for key in csv_metrics]) print(FLAGS.model_name, metric_dict, "csv format:", csv_format) MPI.COMM_WORLD.Barrier() if __name__ == '__main__': flags.mark_flag_as_required('val_file_pattern') flags.mark_flag_as_required('val_json_file') flags.mark_flag_as_required('ckpt_path') logging.set_verbosity(logging.WARNING) app.run(main)
DeepLearningExamples-master
TensorFlow2/Detection/Efficientdet/eval.py
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # This library is mostly based on tensorflow object detection API # https://github.com/tensorflow/models/blob/master/research/object_detection/dataset_tools/create_coco_tf_record.py
DeepLearningExamples-master
TensorFlow2/Detection/Efficientdet/dataset/__init__.py
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Label map utility functions.""" from absl import logging from six.moves import range def _validate_label_map(label_map): """Checks if a label map is valid. Args: label_map: StringIntLabelMap to validate. Raises: ValueError: if label map is invalid. """ for item in label_map.item: if item.id < 0: raise ValueError('Label map ids should be >= 0.') if (item.id == 0 and item.name != 'background' and item.display_name != 'background'): raise ValueError('Label map id 0 is reserved for the background label') def create_category_index(categories): """Creates dictionary of COCO compatible categories keyed by category id. Args: categories: a list of dicts, each of which has the following keys: 'id': (required) an integer id uniquely identifying this category. 'name': (required) string representing category name e.g., 'cat', 'dog', 'pizza'. Returns: category_index: a dict containing the same entries as categories, but keyed by the 'id' field of each category. """ category_index = {} for cat in categories: category_index[cat['id']] = cat return category_index def get_max_label_map_index(label_map): """Get maximum index in label map. Args: label_map: a StringIntLabelMapProto Returns: an integer """ return max([item.id for item in label_map.item]) def convert_label_map_to_categories(label_map, max_num_classes, use_display_name=True): """Given label map proto returns categories list compatible with eval. This function converts label map proto and returns a list of dicts, each of which has the following keys: 'id': (required) an integer id uniquely identifying this category. 'name': (required) string representing category name e.g., 'cat', 'dog', 'pizza'. 'keypoints': (optional) a dictionary of keypoint string 'label' to integer 'id'. We only allow class into the list if its id-label_id_offset is between 0 (inclusive) and max_num_classes (exclusive). If there are several items mapping to the same id in the label map, we will only keep the first one in the categories list. Args: label_map: a StringIntLabelMapProto or None. If None, a default categories list is created with max_num_classes categories. max_num_classes: maximum number of (consecutive) label indices to include. use_display_name: (boolean) choose whether to load 'display_name' field as category name. If False or if the display_name field does not exist, uses 'name' field as category names instead. Returns: categories: a list of dictionaries representing all possible categories. """ categories = [] list_of_ids_already_added = [] if not label_map: label_id_offset = 1 for class_id in range(max_num_classes): categories.append({ 'id': class_id + label_id_offset, 'name': 'category_{}'.format(class_id + label_id_offset) }) return categories for item in label_map.item: if not 0 < item.id <= max_num_classes: logging.info( 'Ignore item %d since it falls outside of requested ' 'label range.', item.id) continue if use_display_name and item.HasField('display_name'): name = item.display_name else: name = item.name if item.id not in list_of_ids_already_added: list_of_ids_already_added.append(item.id) category = {'id': item.id, 'name': name} if item.keypoints: keypoints = {} list_of_keypoint_ids = [] for kv in item.keypoints: if kv.id in list_of_keypoint_ids: raise ValueError('Duplicate keypoint ids are not allowed. ' 'Found {} more than once'.format(kv.id)) keypoints[kv.label] = kv.id list_of_keypoint_ids.append(kv.id) category['keypoints'] = keypoints categories.append(category) return categories def create_class_agnostic_category_index(): """Creates a category index with a single `object` class.""" return {1: {'id': 1, 'name': 'object'}}
DeepLearningExamples-master
TensorFlow2/Detection/Efficientdet/dataset/label_map_util.py
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Convert raw COCO 2017 dataset to TFRecord. Example usage: python create_coco_tfrecord.py --logtostderr \ --image_dir="${TRAIN_IMAGE_DIR}" \ --image_info_file="${TRAIN_IMAGE_INFO_FILE}" \ --object_annotations_file="${TRAIN_ANNOTATIONS_FILE}" \ --caption_annotations_file="${CAPTION_ANNOTATIONS_FILE}" \ --output_file_prefix="${OUTPUT_DIR/FILE_PREFIX}" \ --num_shards=100 """ import collections import hashlib import io import json import multiprocessing import os from absl import app from absl import flags from absl import logging import numpy as np import PIL.Image from pycocotools import mask import tensorflow.compat.v1 as tf import label_map_util import tfrecord_util flags.DEFINE_boolean( 'include_masks', False, 'Whether to include instance segmentations masks ' '(PNG encoded) in the result. default: False.') flags.DEFINE_string('image_dir', '', 'Directory containing images.') flags.DEFINE_string( 'image_info_file', '', 'File containing image information. ' 'Tf Examples in the output files correspond to the image ' 'info entries in this file. If this file is not provided ' 'object_annotations_file is used if present. Otherwise, ' 'caption_annotations_file is used to get image info.') flags.DEFINE_string( 'object_annotations_file', '', 'File containing object ' 'annotations - boxes and instance masks.') flags.DEFINE_string('caption_annotations_file', '', 'File containing image ' 'captions.') flags.DEFINE_string('output_file_prefix', '/tmp/train', 'Path to output file') flags.DEFINE_integer('num_shards', 32, 'Number of shards for output file.') flags.DEFINE_integer('num_threads', None, 'Number of threads to run.') FLAGS = flags.FLAGS def create_tf_example(image, image_dir, bbox_annotations=None, category_index=None, caption_annotations=None, include_masks=False): """Converts image and annotations to a tf.Example proto. Args: image: dict with keys: [u'license', u'file_name', u'coco_url', u'height', u'width', u'date_captured', u'flickr_url', u'id'] image_dir: directory containing the image files. bbox_annotations: list of dicts with keys: [u'segmentation', u'area', u'iscrowd', u'image_id', u'bbox', u'category_id', u'id'] Notice that bounding box coordinates in the official COCO dataset are given as [x, y, width, height] tuples using absolute coordinates where x, y represent the top-left (0-indexed) corner. This function converts to the format expected by the Tensorflow Object Detection API (which is which is [ymin, xmin, ymax, xmax] with coordinates normalized relative to image size). category_index: a dict containing COCO category information keyed by the 'id' field of each category. See the label_map_util.create_category_index function. caption_annotations: list of dict with keys: [u'id', u'image_id', u'str']. include_masks: Whether to include instance segmentations masks (PNG encoded) in the result. default: False. Returns: example: The converted tf.Example num_annotations_skipped: Number of (invalid) annotations that were ignored. Raises: ValueError: if the image pointed to by data['filename'] is not a valid JPEG """ image_height = image['height'] image_width = image['width'] filename = image['file_name'] image_id = image['id'] full_path = os.path.join(image_dir, filename) with tf.gfile.GFile(full_path, 'rb') as fid: encoded_jpg = fid.read() encoded_jpg_io = io.BytesIO(encoded_jpg) image = PIL.Image.open(encoded_jpg_io) key = hashlib.sha256(encoded_jpg).hexdigest() feature_dict = { 'image/height': tfrecord_util.int64_feature(image_height), 'image/width': tfrecord_util.int64_feature(image_width), 'image/filename': tfrecord_util.bytes_feature(filename.encode('utf8')), 'image/source_id': tfrecord_util.bytes_feature(str(image_id).encode('utf8')), 'image/key/sha256': tfrecord_util.bytes_feature(key.encode('utf8')), 'image/encoded': tfrecord_util.bytes_feature(encoded_jpg), 'image/format': tfrecord_util.bytes_feature('jpeg'.encode('utf8')), } num_annotations_skipped = 0 if bbox_annotations: xmin = [] xmax = [] ymin = [] ymax = [] is_crowd = [] category_names = [] category_ids = [] area = [] encoded_mask_png = [] for object_annotations in bbox_annotations: (x, y, width, height) = tuple(object_annotations['bbox']) if width <= 0 or height <= 0: num_annotations_skipped += 1 continue if x + width > image_width or y + height > image_height: num_annotations_skipped += 1 continue xmin.append(float(x) / image_width) xmax.append(float(x + width) / image_width) ymin.append(float(y) / image_height) ymax.append(float(y + height) / image_height) is_crowd.append(object_annotations['iscrowd']) category_id = int(object_annotations['category_id']) category_ids.append(category_id) category_names.append(category_index[category_id]['name'].encode('utf8')) area.append(object_annotations['area']) if include_masks: run_len_encoding = mask.frPyObjects(object_annotations['segmentation'], image_height, image_width) binary_mask = mask.decode(run_len_encoding) if not object_annotations['iscrowd']: binary_mask = np.amax(binary_mask, axis=2) pil_image = PIL.Image.fromarray(binary_mask) output_io = io.BytesIO() pil_image.save(output_io, format='PNG') encoded_mask_png.append(output_io.getvalue()) feature_dict.update({ 'image/object/bbox/xmin': tfrecord_util.float_list_feature(xmin), 'image/object/bbox/xmax': tfrecord_util.float_list_feature(xmax), 'image/object/bbox/ymin': tfrecord_util.float_list_feature(ymin), 'image/object/bbox/ymax': tfrecord_util.float_list_feature(ymax), 'image/object/class/text': tfrecord_util.bytes_list_feature(category_names), 'image/object/class/label': tfrecord_util.int64_list_feature(category_ids), 'image/object/is_crowd': tfrecord_util.int64_list_feature(is_crowd), 'image/object/area': tfrecord_util.float_list_feature(area), }) if include_masks: feature_dict['image/object/mask'] = ( tfrecord_util.bytes_list_feature(encoded_mask_png)) if caption_annotations: captions = [] for caption_annotation in caption_annotations: captions.append(caption_annotation['caption'].encode('utf8')) feature_dict.update( {'image/caption': tfrecord_util.bytes_list_feature(captions)}) example = tf.train.Example(features=tf.train.Features(feature=feature_dict)) return key, example, num_annotations_skipped def _pool_create_tf_example(args): return create_tf_example(*args) def _load_object_annotations(object_annotations_file): """Loads object annotation JSON file.""" with tf.gfile.GFile(object_annotations_file, 'r') as fid: obj_annotations = json.load(fid) images = obj_annotations['images'] category_index = label_map_util.create_category_index( obj_annotations['categories']) img_to_obj_annotation = collections.defaultdict(list) logging.info('Building bounding box index.') for annotation in obj_annotations['annotations']: image_id = annotation['image_id'] img_to_obj_annotation[image_id].append(annotation) missing_annotation_count = 0 for image in images: image_id = image['id'] if image_id not in img_to_obj_annotation: missing_annotation_count += 1 logging.info('%d images are missing bboxes.', missing_annotation_count) return img_to_obj_annotation, category_index def _load_caption_annotations(caption_annotations_file): """Loads caption annotation JSON file.""" with tf.gfile.GFile(caption_annotations_file, 'r') as fid: caption_annotations = json.load(fid) img_to_caption_annotation = collections.defaultdict(list) logging.info('Building caption index.') for annotation in caption_annotations['annotations']: image_id = annotation['image_id'] img_to_caption_annotation[image_id].append(annotation) missing_annotation_count = 0 images = caption_annotations['images'] for image in images: image_id = image['id'] if image_id not in img_to_caption_annotation: missing_annotation_count += 1 logging.info('%d images are missing captions.', missing_annotation_count) return img_to_caption_annotation def _load_images_info(image_info_file): with tf.gfile.GFile(image_info_file, 'r') as fid: info_dict = json.load(fid) return info_dict['images'] def _create_tf_record_from_coco_annotations(image_info_file, image_dir, output_path, num_shards, object_annotations_file=None, caption_annotations_file=None, include_masks=False): """Loads COCO annotation json files and converts to tf.Record format. Args: image_info_file: JSON file containing image info. The number of tf.Examples in the output tf Record files is exactly equal to the number of image info entries in this file. This can be any of train/val/test annotation json files Eg. 'image_info_test-dev2017.json', 'instance_annotations_train2017.json', 'caption_annotations_train2017.json', etc. image_dir: Directory containing the image files. output_path: Path to output tf.Record file. num_shards: Number of output files to create. object_annotations_file: JSON file containing bounding box annotations. caption_annotations_file: JSON file containing caption annotations. include_masks: Whether to include instance segmentations masks (PNG encoded) in the result. default: False. """ logging.info('writing to output path: %s', output_path) writers = [ tf.python_io.TFRecordWriter(output_path + '-%05d-of-%05d.tfrecord' % (i, num_shards)) for i in range(num_shards) ] images = _load_images_info(image_info_file) img_to_obj_annotation = None img_to_caption_annotation = None category_index = None if object_annotations_file: img_to_obj_annotation, category_index = ( _load_object_annotations(object_annotations_file)) if caption_annotations_file: img_to_caption_annotation = ( _load_caption_annotations(caption_annotations_file)) def _get_object_annotation(image_id): if img_to_obj_annotation: return img_to_obj_annotation[image_id] else: return None def _get_caption_annotation(image_id): if img_to_caption_annotation: return img_to_caption_annotation[image_id] else: return None pool = multiprocessing.Pool(FLAGS.num_threads) total_num_annotations_skipped = 0 for idx, (_, tf_example, num_annotations_skipped) in enumerate( pool.imap( _pool_create_tf_example, [(image, image_dir, _get_object_annotation(image['id']), category_index, _get_caption_annotation(image['id']), include_masks) for image in images])): if idx % 100 == 0: logging.info('On image %d of %d', idx, len(images)) total_num_annotations_skipped += num_annotations_skipped writers[idx % num_shards].write(tf_example.SerializeToString()) pool.close() pool.join() for writer in writers: writer.close() logging.info('Finished writing, skipped %d annotations.', total_num_annotations_skipped) def main(_): assert FLAGS.image_dir, '`image_dir` missing.' assert (FLAGS.image_info_file or FLAGS.object_annotations_file or FLAGS.caption_annotations_file), ('All annotation files are ' 'missing.') if FLAGS.image_info_file: image_info_file = FLAGS.image_info_file elif FLAGS.object_annotations_file: image_info_file = FLAGS.object_annotations_file else: image_info_file = FLAGS.caption_annotations_file directory = os.path.dirname(FLAGS.output_file_prefix) if not tf.gfile.IsDirectory(directory): tf.gfile.MakeDirs(directory) _create_tf_record_from_coco_annotations(image_info_file, FLAGS.image_dir, FLAGS.output_file_prefix, FLAGS.num_shards, FLAGS.object_annotations_file, FLAGS.caption_annotations_file, FLAGS.include_masks) if __name__ == '__main__': app.run(main)
DeepLearningExamples-master
TensorFlow2/Detection/Efficientdet/dataset/create_coco_tfrecord.py
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""TFRecord related utilities.""" from six.moves import range import tensorflow.compat.v1 as tf def int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def int64_list_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) def bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def bytes_list_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=value)) def float_list_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def read_examples_list(path): """Read list of training or validation examples. The file is assumed to contain a single example per line where the first token in the line is an identifier that allows us to find the image and annotation xml for that example. For example, the line: xyz 3 would allow us to find files xyz.jpg and xyz.xml (the 3 would be ignored). Args: path: absolute path to examples list file. Returns: list of example identifiers (strings). """ with tf.gfile.GFile(path) as fid: lines = fid.readlines() return [line.strip().split(' ')[0] for line in lines] def recursive_parse_xml_to_dict(xml): """Recursively parses XML contents to python dict. We assume that `object` tags are the only ones that can appear multiple times at the same level of a tree. Args: xml: xml tree obtained by parsing XML file contents using lxml.etree Returns: Python dictionary holding XML contents. """ if not xml: return {xml.tag: xml.text} result = {} for child in xml: child_result = recursive_parse_xml_to_dict(child) if child.tag != 'object': result[child.tag] = child_result[child.tag] else: if child.tag not in result: result[child.tag] = [] result[child.tag].append(child_result[child.tag]) return {xml.tag: result} def open_sharded_output_tfrecords(exit_stack, base_path, num_shards): """Opens all TFRecord shards for writing and adds them to an exit stack. Args: exit_stack: A context2.ExitStack used to automatically closed the TFRecords opened in this function. base_path: The base path for all shards num_shards: The number of shards Returns: The list of opened TFRecords. Position k in the list corresponds to shard k. """ tf_record_output_filenames = [ '{}-{:05d}-of-{:05d}'.format(base_path, idx, num_shards) for idx in range(num_shards) ] tfrecords = [ exit_stack.enter_context(tf.python_io.TFRecordWriter(file_name)) for file_name in tf_record_output_filenames ] return tfrecords
DeepLearningExamples-master
TensorFlow2/Detection/Efficientdet/dataset/tfrecord_util.py
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Common utils.""" import contextlib import os from typing import Text, Tuple, Union from absl import logging import numpy as np import tensorflow.compat.v1 as tf import tensorflow.compat.v2 as tf2 # pylint: disable=logging-format-interpolation def get_ema_vars(): """Get all exponential moving average (ema) variables.""" ema_vars = tf.trainable_variables() + \ tf.get_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES) for v in tf.global_variables(): # We maintain mva for batch norm moving mean and variance as well. if 'moving_mean' in v.name or 'moving_variance' in v.name: ema_vars.append(v) return list(set(ema_vars)) def get_ckpt_var_map(ckpt_path, ckpt_scope, var_scope, skip_mismatch=None): """Get a var map for restoring from pretrained checkpoints. Args: ckpt_path: string. A pretrained checkpoint path. ckpt_scope: string. Scope name for checkpoint variables. var_scope: string. Scope name for model variables. skip_mismatch: skip variables if shape mismatch. Returns: var_map: a dictionary from checkpoint name to model variables. """ logging.info('Init model from checkpoint {}'.format(ckpt_path)) if not ckpt_scope.endswith('/') or not var_scope.endswith('/'): raise ValueError('Please specific scope name ending with /') if ckpt_scope.startswith('/'): ckpt_scope = ckpt_scope[1:] if var_scope.startswith('/'): var_scope = var_scope[1:] var_map = {} # Get the list of vars to restore. model_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=var_scope) reader = tf.train.load_checkpoint(ckpt_path) ckpt_var_name_to_shape = reader.get_variable_to_shape_map() ckpt_var_names = set(reader.get_variable_to_shape_map().keys()) for i, v in enumerate(model_vars): if not v.op.name.startswith(var_scope): logging.info('skip {} -- does not match scope {}'.format( v.op.name, var_scope)) ckpt_var = ckpt_scope + v.op.name[len(var_scope):] if (ckpt_var not in ckpt_var_names and v.op.name.endswith('/ExponentialMovingAverage')): ckpt_var = ckpt_scope + v.op.name[:-len('/ExponentialMovingAverage')] if ckpt_var not in ckpt_var_names: if 'Momentum' in ckpt_var or 'RMSProp' in ckpt_var: # Skip optimizer variables. continue if skip_mismatch: logging.info('skip {} ({}) -- not in ckpt'.format(v.op.name, ckpt_var)) continue raise ValueError('{} is not in ckpt {}'.format(v.op, ckpt_path)) if v.shape != ckpt_var_name_to_shape[ckpt_var]: if skip_mismatch: logging.info('skip {} ({} vs {}) -- shape mismatch'.format( v.op.name, v.shape, ckpt_var_name_to_shape[ckpt_var])) continue raise ValueError('shape mismatch {} ({} vs {})'.format( v.op.name, v.shape, ckpt_var_name_to_shape[ckpt_var])) if i < 5: # Log the first few elements for sanity check. logging.info('Init {} from ckpt var {}'.format(v.op.name, ckpt_var)) var_map[ckpt_var] = v return var_map def drop_connect(inputs, is_training, survival_prob): """Drop the entire conv with given survival probability.""" # "Deep Networks with Stochastic Depth", https://arxiv.org/pdf/1603.09382.pdf if not is_training: return inputs # Compute tensor. batch_size = tf.shape(inputs)[0] random_tensor = survival_prob random_tensor += tf.random.uniform([batch_size, 1, 1, 1], dtype=inputs.dtype) binary_tensor = tf.floor(random_tensor) # Unlike conventional way that multiply survival_prob at test time, here we # divide survival_prob at training time, such that no addition compute is # needed at test time. output = inputs / survival_prob * binary_tensor return output def num_params_flops(readable_format=True): """Return number of parameters and flops.""" nparams = np.sum( [np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]) options = tf.profiler.ProfileOptionBuilder.float_operation() options['output'] = 'none' flops = tf.profiler.profile( tf.get_default_graph(), options=options).total_float_ops # We use flops to denote multiply-adds, which is counted as 2 ops in tfprof. flops = flops // 2 if readable_format: nparams = float(nparams) * 1e-6 flops = float(flops) * 1e-9 return nparams, flops conv_kernel_initializer = tf.initializers.variance_scaling() dense_kernel_initializer = tf.initializers.variance_scaling() class Pair(tuple): def __new__(cls, name, value): return super().__new__(cls, (name, value)) def __init__(self, name, _): # pylint: disable=super-init-not-called self.name = name def scalar(name, tensor, is_tpu=True): """Stores a (name, Tensor) tuple in a custom collection.""" logging.info('Adding scale summary {}'.format(Pair(name, tensor))) if is_tpu: tf.add_to_collection('scalar_summaries', Pair(name, tf.reduce_mean(tensor))) else: tf.summary.scalar(name, tf.reduce_mean(tensor)) def image(name, tensor, is_tpu=True): logging.info('Adding image summary {}'.format(Pair(name, tensor))) if is_tpu: tf.add_to_collection('image_summaries', Pair(name, tensor)) else: tf.summary.image(name, tensor) def get_tpu_host_call(global_step, params): """Get TPU host call for summaries.""" scalar_summaries = tf.get_collection('scalar_summaries') if params['img_summary_steps']: image_summaries = tf.get_collection('image_summaries') else: image_summaries = [] if not scalar_summaries and not image_summaries: return None # No summaries to write. model_dir = params['model_dir'] iterations_per_loop = params.get('iterations_per_loop', 100) img_steps = params['img_summary_steps'] def host_call_fn(global_step, *args): """Training host call. Creates summaries for training metrics.""" gs = global_step[0] with tf2.summary.create_file_writer( model_dir, max_queue=iterations_per_loop).as_default(): with tf2.summary.record_if(True): for i, _ in enumerate(scalar_summaries): name = scalar_summaries[i][0] tensor = args[i][0] tf2.summary.scalar(name, tensor, step=gs) if img_steps: with tf2.summary.record_if(lambda: tf.math.equal(gs % img_steps, 0)): # Log images every 1k steps. for i, _ in enumerate(image_summaries): name = image_summaries[i][0] tensor = args[i + len(scalar_summaries)] tf2.summary.image(name, tensor, step=gs) return tf.summary.all_v2_summary_ops() reshaped_tensors = [tf.reshape(t, [1]) for _, t in scalar_summaries] reshaped_tensors += [t for _, t in image_summaries] global_step_t = tf.reshape(global_step, [1]) return host_call_fn, [global_step_t] + reshaped_tensors def archive_ckpt(ckpt_eval, ckpt_objective, ckpt_path): """Archive a checkpoint if the metric is better.""" ckpt_dir, ckpt_name = os.path.split(ckpt_path) saved_objective_path = os.path.join(ckpt_dir, 'best_objective.txt') saved_objective = float('-inf') if tf.io.gfile.exists(saved_objective_path): with tf.io.gfile.GFile(saved_objective_path, 'r') as f: saved_objective = float(f.read()) if saved_objective > ckpt_objective: logging.info('Ckpt {} is worse than {}'.format(ckpt_objective, saved_objective)) return False filenames = tf.io.gfile.glob(ckpt_path + '.*') if filenames is None: logging.info('No files to copy for checkpoint {}'.format(ckpt_path)) return False # clear up the backup folder. backup_dir = os.path.join(ckpt_dir, 'backup') if tf.io.gfile.exists(backup_dir): tf.io.gfile.rmtree(backup_dir) # rename the old checkpoints to backup folder. dst_dir = os.path.join(ckpt_dir, 'archive') if tf.io.gfile.exists(dst_dir): logging.info('mv {} to {}'.format(dst_dir, backup_dir)) tf.io.gfile.rename(dst_dir, backup_dir) # Write checkpoints. tf.io.gfile.makedirs(dst_dir) for f in filenames: dest = os.path.join(dst_dir, os.path.basename(f)) tf.io.gfile.copy(f, dest, overwrite=True) ckpt_state = tf.train.generate_checkpoint_state_proto( dst_dir, model_checkpoint_path=os.path.join(dst_dir, ckpt_name)) with tf.io.gfile.GFile(os.path.join(dst_dir, 'checkpoint'), 'w') as f: f.write(str(ckpt_state)) with tf.io.gfile.GFile(os.path.join(dst_dir, 'best_eval.txt'), 'w') as f: f.write('%s' % ckpt_eval) # Update the best objective. with tf.io.gfile.GFile(saved_objective_path, 'w') as f: f.write('%f' % ckpt_objective) logging.info('Copying checkpoint {} to {}'.format(ckpt_path, dst_dir)) return True def parse_image_size(image_size: Union[Text, int, Tuple[int, int]]): """Parse the image size and return (height, width). Args: image_size: A integer, a tuple (H, W), or a string with HxW format. Returns: A tuple of integer (height, width). """ if isinstance(image_size, int): # image_size is integer, with the same width and height. return (image_size, image_size) if isinstance(image_size, str): # image_size is a string with format WxH width, height = image_size.lower().split('x') return (int(height), int(width)) if isinstance(image_size, tuple): return image_size raise ValueError('image_size must be an int, WxH string, or (height, width)' 'tuple. Was %r' % image_size) def get_feat_sizes(image_size: Union[Text, int, Tuple[int, int]], max_level: int): """Get feat widths and heights for all levels. Args: image_size: A integer, a tuple (H, W), or a string with HxW format. max_level: maximum feature level. Returns: feat_sizes: a list of tuples (height, width) for each level. """ image_size = parse_image_size(image_size) feat_sizes = [{'height': image_size[0], 'width': image_size[1]}] feat_size = image_size for _ in range(1, max_level + 1): feat_size = ((feat_size[0] - 1) // 2 + 1, (feat_size[1] - 1) // 2 + 1) feat_sizes.append({'height': feat_size[0], 'width': feat_size[1]}) return feat_sizes def verify_feats_size(feats, feat_sizes, min_level, max_level, data_format='channels_last'): """Verify the feature map sizes.""" expected_output_size = feat_sizes[min_level:max_level + 1] for cnt, size in enumerate(expected_output_size): h_id, w_id = (2, 3) if data_format == 'channels_first' else (1, 2) if feats[cnt].shape[h_id] != size['height']: raise ValueError( 'feats[{}] has shape {} but its height should be {}.' '(input_height: {}, min_level: {}, max_level: {}.)'.format( cnt, feats[cnt].shape, size['height'], feat_sizes[0]['height'], min_level, max_level)) if feats[cnt].shape[w_id] != size['width']: raise ValueError( 'feats[{}] has shape {} but its width should be {}.' '(input_width: {}, min_level: {}, max_level: {}.)'.format( cnt, feats[cnt].shape, size['width'], feat_sizes[0]['width'], min_level, max_level)) @contextlib.contextmanager def float16_scope(): """Scope class for float16.""" def _custom_getter(getter, *args, **kwargs): """Returns a custom getter that methods must be called under.""" cast_to_float16 = False requested_dtype = kwargs['dtype'] if requested_dtype == tf.float16: kwargs['dtype'] = tf.float32 cast_to_float16 = True var = getter(*args, **kwargs) if cast_to_float16: var = tf.cast(var, tf.float16) return var with tf.variable_scope('', custom_getter=_custom_getter) as varscope: yield varscope def set_precision_policy(policy_name: Text = None, loss_scale: bool = False): """Set precision policy according to the name. Args: policy_name: precision policy name, one of 'float32', 'mixed_float16', 'mixed_bfloat16', or None. loss_scale: whether to use loss scale (only for training). """ if not policy_name: return assert policy_name in ('mixed_float16', 'mixed_bfloat16', 'float32') logging.info('use mixed precision policy name %s', policy_name) # TODO(tanmingxing): use tf.keras.layers.enable_v2_dtype_behavior() when it # available in stable TF release. from tensorflow.python.keras.engine import base_layer_utils # pylint: disable=g-import-not-at-top,g-direct-tensorflow-import base_layer_utils.enable_v2_dtype_behavior() # mixed_float16 training is not supported for now, so disable loss_scale. # float32 and mixed_bfloat16 do not need loss scale for training. if loss_scale: policy = tf2.keras.mixed_precision.experimental.Policy(policy_name) else: policy = tf2.keras.mixed_precision.experimental.Policy( policy_name, loss_scale=None) tf2.keras.mixed_precision.experimental.set_policy(policy) def build_model_with_precision(pp, mm, ii, tt, *args, **kwargs): """Build model with its inputs/params for a specified precision context. This is highly specific to this codebase, and not intended to be general API. Advanced users only. DO NOT use it if you don't know what it does. NOTE: short argument names are intended to avoid conficts with kwargs. Args: pp: A string, precision policy name, such as "mixed_float16". mm: A function, for rmodel builder. ii: A tensor, for model inputs. tt: A bool, If true, it is for training; otherwise, it is for eval. *args: A list of model arguments. **kwargs: A dict, extra model parameters. Returns: the output of mm model. """ if pp == 'mixed_bfloat16': set_precision_policy(pp) inputs = tf.cast(ii, tf.bfloat16) with tf.tpu.bfloat16_scope(): outputs = mm(inputs, *args, **kwargs) set_precision_policy('float32') elif pp == 'mixed_float16': set_precision_policy(pp, loss_scale=tt) inputs = tf.cast(ii, tf.float16) with float16_scope(): outputs = mm(inputs, *args, **kwargs) set_precision_policy('float32') elif not pp or pp == 'float32': outputs = mm(ii, *args, **kwargs) else: raise ValueError('Unknow precision name {}'.format(pp)) # Users are responsible to convert the dtype of all outputs. return outputs
DeepLearningExamples-master
TensorFlow2/Detection/Efficientdet/utils/model_utils.py
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """WBF for test-time augmentation.""" import tensorflow as tf def vectorized_iou(clusters, detection): """Calculates the ious for box with each element of clusters.""" x11, y11, x12, y12 = tf.split(clusters[:, 1:5], 4, axis=1) x21, y21, x22, y22 = tf.split(detection[1:5], 4) xa = tf.maximum(x11, x21) ya = tf.maximum(y11, y21) xb = tf.minimum(x12, x22) yb = tf.minimum(y12, y22) inter_area = tf.maximum((xb - xa), 0) * tf.maximum((yb - ya), 0) boxa_area = (x12 - x11) * (y12 - y11) boxb_area = (x22 - x21) * (y22 - y21) iou = inter_area / (boxa_area + boxb_area - inter_area) return iou def find_matching_cluster(clusters, detection): """Returns the index of the highest iou matching cluster for detection.""" if not clusters: return -1 ious = vectorized_iou(tf.stack(clusters), detection) ious = tf.reshape(ious, [len(clusters)]) if tf.math.reduce_max(ious) < 0.55: # returns -1 if no iou is higher than 0.55. return -1 return tf.argmax(ious) def weighted_average(samples, weights): return tf.math.reduce_sum(samples * weights) / tf.math.reduce_sum(weights) def average_detections(detections, num_models): """Takes a list of detections and returns the average, both in box co-ordinates and confidence.""" num_detections = len(detections) detections = tf.stack(detections) return [ detections[0][0], weighted_average(detections[:, 1], detections[:, 5]), weighted_average(detections[:, 2], detections[:, 5]), weighted_average(detections[:, 3], detections[:, 5]), weighted_average(detections[:, 4], detections[:, 5]), tf.math.reduce_mean(detections[:, 5]) * min(1, num_detections/num_models), detections[0][6], ] def ensemble_detections(params, detections, num_models): """Ensembles a group of detections by clustering the detections and returning the average of the clusters.""" all_clusters = [] for cid in range(params['num_classes']): indices = tf.where(tf.equal(detections[:, 6], cid)) if indices.shape[0] == 0: continue class_detections = tf.gather_nd(detections, indices) clusters = [] cluster_averages = [] for d in class_detections: cluster_index = find_matching_cluster(cluster_averages, d) if cluster_index == -1: clusters.append([d]) cluster_averages.append(average_detections([d], num_models)) else: clusters[cluster_index].append(d) cluster_averages[cluster_index] = average_detections( clusters[cluster_index], num_models) all_clusters.extend(cluster_averages) all_clusters.sort(reverse=True, key=lambda d: d[5]) return tf.stack(all_clusters)
DeepLearningExamples-master
TensorFlow2/Detection/Efficientdet/utils/wbf.py
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import horovod.tensorflow.keras as hvd def get_rank(): try: return hvd.rank() except: return 0 def get_world_size(): try: return hvd.size() except: return 1 def is_main_process(): return get_rank() == 0
DeepLearningExamples-master
TensorFlow2/Detection/Efficientdet/utils/horovod_utils.py
import os import multiprocessing import numpy as np import tensorflow as tf import horovod.tensorflow as hvd from utils.horovod_utils import get_world_size def set_flags(params, hps, training=True): if training and params.set_num_threads: os.environ['TF_NUM_INTRAOP_THREADS'] = '1' os.environ['TF_NUM_INTEROP_THREADS'] = str(max(2, (multiprocessing.cpu_count() // hvd.size()) - 2)) if params.use_xla: # it turns out tf_xla_enable_lazy_compilation is used before importing tersorflow for the first time, # so setting this flag in the current function would have no effect. Thus, this flag is already # set in Dockerfile. The remaining XLA flags are set here. TF_XLA_FLAGS = os.environ['TF_XLA_FLAGS'] # contains tf_xla_enable_lazy_compilation os.environ['TF_XLA_FLAGS'] = TF_XLA_FLAGS + " --tf_xla_auto_jit=1" os.environ['TF_EXTRA_PTXAS_OPTIONS'] = "-sw200428197=true" tf.keras.backend.clear_session() tf.config.optimizer.set_jit(True) gpus = tf.config.list_physical_devices('GPU') for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) assert tf.config.experimental.get_memory_growth(gpu) tf.config.experimental.set_visible_devices(gpus, 'GPU') if gpus and training: tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU') if training: np.random.seed(params.seed) tf.random.set_seed(params.seed) if hps.mixed_precision: policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16') tf.keras.mixed_precision.experimental.set_policy(policy) else: os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '0'
DeepLearningExamples-master
TensorFlow2/Detection/Efficientdet/utils/setup.py
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Common keras utils.""" from typing import Text from absl import logging import tensorflow as tf from model import normalization_builder def build_batch_norm(is_training_bn: bool, beta_initializer: Text = 'zeros', gamma_initializer: Text = 'ones', data_format: Text = 'channels_last', momentum: float = 0.99, epsilon: float = 1e-3, name: Text = 'tpu_batch_normalization'): """Build a batch normalization layer. Args: is_training_bn: `bool` for whether the model is training. beta_initializer: `str`, beta initializer. gamma_initializer: `str`, gamma initializer. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. momentum: `float`, momentume of batch norm. epsilon: `float`, small value for numerical stability. name: the name of the batch normalization layer Returns: A normalized `Tensor` with the same `data_format`. """ axis = 1 if data_format == 'channels_first' else -1 batch_norm_class = normalization_builder.batch_norm_class(is_training_bn) bn_layer = batch_norm_class( axis=axis, momentum=momentum, epsilon=epsilon, center=True, scale=True, beta_initializer=beta_initializer, gamma_initializer=gamma_initializer, name=name) return bn_layer def get_ema_vars(model): """Get all exponential moving average (ema) variables.""" ema_vars = model.trainable_weights for v in model.weights: # We maintain mva for batch norm moving mean and variance as well. if 'moving_mean' in v.name or 'moving_variance' in v.name: ema_vars.append(v) ema_vars_dict = dict() # Remove duplicate vars for var in ema_vars: ema_vars_dict[var.ref()] = var return ema_vars_dict def average_name(ema, var): """Returns the name of the `Variable` holding the average for `var`. A hacker for tf2. Args: ema: A `ExponentialMovingAverage` object. var: A `Variable` object. Returns: A string: The name of the variable that will be used or was used by the `ExponentialMovingAverage class` to hold the moving average of `var`. """ if var.ref() in ema._averages: # pylint: disable=protected-access return ema._averages[var.ref()].name.split(':')[0] # pylint: disable=protected-access return tf.compat.v1.get_default_graph().unique_name( var.name.split(':')[0] + '/' + ema.name, mark_as_used=False) def restore_ckpt(model, ckpt_path_or_file, ema_decay=0.9998, steps_per_epoch=0, skip_mismatch=True, expect_partial=False): """Restore variables from a given checkpoint. Args: model: the keras model to be restored. ckpt_path_or_file: the path or file for checkpoint. ema_decay: ema decay rate. If None or zero or negative value, disable ema. steps_per_epoch: number of iterations in each training epoch skip_mismatch: whether to skip variables if shape mismatch. expect_partial: this will supress warnings when variables mismatch """ if ckpt_path_or_file == '_': logging.info('Running test: do not load any ckpt.') return if tf.io.gfile.isdir(ckpt_path_or_file): ckpt_path_or_file = tf.train.latest_checkpoint(ckpt_path_or_file) if not ckpt_path_or_file: return 0 if (tf.train.list_variables(ckpt_path_or_file)[0][0] == '_CHECKPOINTABLE_OBJECT_GRAPH'): if expect_partial: model.load_weights(ckpt_path_or_file).expect_partial() else: model.load_weights(ckpt_path_or_file) logging.info('Restored checkpoint with load_weights method!') else: if ema_decay > 0: ema = tf.train.ExponentialMovingAverage(decay=0.0) ema_vars = get_ema_vars(model) var_dict = { average_name(ema, var): var for (ref, var) in ema_vars.items() } else: ema_vars = get_ema_vars(model) var_dict = { var.name.split(':')[0]: var for (ref, var) in ema_vars.items() } # add variables that not in var_dict for v in model.weights: if v.ref() not in ema_vars: var_dict[v.name.split(':')[0]] = v # try to load graph-based checkpoint with ema support, # else load checkpoint via keras.load_weights which doesn't support ema. for i, (key, var) in enumerate(var_dict.items()): try: var.assign(tf.train.load_variable(ckpt_path_or_file, key)) if i < 10: logging.info('Init %s from %s (%s)', var.name, key, ckpt_path_or_file) except tf.errors.NotFoundError as e: if skip_mismatch: logging.warning('Not found %s in %s', key, ckpt_path_or_file) else: raise e except ValueError as e: if skip_mismatch: logging.warning('%s: %s', key, e) else: raise e if steps_per_epoch > 0: last_iteration = model.optimizer.iterations ckpt_epoch = last_iteration // steps_per_epoch logging.info("Restored checkpoint at epoch: {}".format(ckpt_epoch + 1)) return ckpt_epoch def get_mixed_precision_policy(): current_version = tuple(map(int, tf.__version__.split('.')))[:3] threshold_version = (2, 4, 0) # The threshold tensorflow version is 2.4.0 return tf.keras.mixed_precision.global_policy() if current_version >= threshold_version \ else tf.keras.mixed_precision.experimental.global_policy()
DeepLearningExamples-master
TensorFlow2/Detection/Efficientdet/utils/util_keras.py
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Training related libraries.""" import re import numpy as np import tensorflow as tf import horovod.tensorflow as hvd from model import iou_utils from model import anchors from model import efficientdet_keras from utils.util_keras import get_mixed_precision_policy class FocalLoss(tf.keras.losses.Loss): """Compute the focal loss between `logits` and the golden `target` values. Focal loss = -(1-pt)^gamma * log(pt) where pt is the probability of being classified to the true class. """ def __init__(self, alpha, gamma, label_smoothing=0.0, **kwargs): """Initialize focal loss. Args: alpha: A float32 scalar multiplying alpha to the loss from positive examples and (1-alpha) to the loss from negative examples. gamma: A float32 scalar modulating loss from hard and easy examples. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. **kwargs: other params. """ super().__init__(**kwargs) self.alpha = alpha self.gamma = gamma self.label_smoothing = label_smoothing @tf.autograph.experimental.do_not_convert def call(self, y, y_pred): """Compute focal loss for y and y_pred. Args: y: A tuple of (normalizer, y_true), where y_true is the target class. y_pred: A float32 tensor [batch, height_in, width_in, num_predictions]. Returns: the focal loss. """ normalizer, y_true = y alpha = tf.convert_to_tensor(self.alpha, dtype=y_pred.dtype) gamma = tf.convert_to_tensor(self.gamma, dtype=y_pred.dtype) # compute focal loss multipliers before label smoothing, such that it will # not blow up the loss. pred_prob = tf.sigmoid(y_pred) p_t = (y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob)) alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha) modulating_factor = (1.0 - p_t)**gamma # apply label smoothing for cross_entropy for each entry. y_true = y_true * (1.0 - self.label_smoothing) + 0.5 * self.label_smoothing ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=y_pred) # compute the final loss and return return alpha_factor * modulating_factor * ce / normalizer class StableFocalLoss(tf.keras.losses.Loss): """Compute the focal loss between `logits` and the golden `target` values. Focal loss = -(1-pt)^gamma * log(pt) where pt is the probability of being classified to the true class. Below are comments/derivations for computing modulator. For brevity, let x = logits, z = targets, r = gamma, and p_t = sigmod(x) for positive samples and 1 - sigmoid(x) for negative examples. The modulator, defined as (1 - P_t)^r, is a critical part in focal loss computation. For r > 0, it puts more weights on hard examples, and less weights on easier ones. However if it is directly computed as (1 - P_t)^r, its back-propagation is not stable when r < 1. The implementation here resolves the issue. For positive samples (labels being 1), (1 - p_t)^r = (1 - sigmoid(x))^r = (1 - (1 / (1 + exp(-x))))^r = (exp(-x) / (1 + exp(-x)))^r = exp(log((exp(-x) / (1 + exp(-x)))^r)) = exp(r * log(exp(-x)) - r * log(1 + exp(-x))) = exp(- r * x - r * log(1 + exp(-x))) For negative samples (labels being 0), (1 - p_t)^r = (sigmoid(x))^r = (1 / (1 + exp(-x)))^r = exp(log((1 / (1 + exp(-x)))^r)) = exp(-r * log(1 + exp(-x))) Therefore one unified form for positive (z = 1) and negative (z = 0) samples is: (1 - p_t)^r = exp(-r * z * x - r * log(1 + exp(-x))). """ def __init__(self, alpha, gamma, label_smoothing=0.0, **kwargs): """Initialize focal loss. Args: alpha: A float32 scalar multiplying alpha to the loss from positive examples and (1-alpha) to the loss from negative examples. gamma: A float32 scalar modulating loss from hard and easy examples. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. **kwargs: other params. """ super().__init__(**kwargs) self.alpha = alpha self.gamma = gamma self.label_smoothing = label_smoothing @tf.autograph.experimental.do_not_convert def call(self, y, y_pred): """Compute focal loss for y and y_pred. Args: y: A tuple of (normalizer, y_true), where y_true is the target class. y_pred: A float32 tensor [batch, height_in, width_in, num_predictions]. Returns: the focal loss. """ normalizer, y_true = y alpha = tf.convert_to_tensor(self.alpha, dtype=y_pred.dtype) gamma = tf.convert_to_tensor(self.gamma, dtype=y_pred.dtype) positive_label_mask = tf.equal(y_true, 1.0) negative_pred = -1.0 * y_pred modulator = tf.exp(gamma * y_true * negative_pred - gamma * tf.math.log1p(tf.exp(negative_pred))) # apply label smoothing for cross_entropy for each entry. y_true = y_true * (1.0 - self.label_smoothing) + 0.5 * self.label_smoothing ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=y_pred) loss = modulator * ce weighted_loss = tf.where(positive_label_mask, alpha * loss, (1.0 - alpha) * loss) weighted_loss /= normalizer return weighted_loss class BoxLoss(tf.keras.losses.Loss): """L2 box regression loss.""" def __init__(self, delta=0.1, **kwargs): """Initialize box loss. Args: delta: `float`, the point where the huber loss function changes from a quadratic to linear. It is typically around the mean value of regression target. For instances, the regression targets of 512x512 input with 6 anchors on P3-P7 pyramid is about [0.1, 0.1, 0.2, 0.2]. **kwargs: other params. """ super().__init__(**kwargs) self.huber = tf.keras.losses.Huber( delta, reduction=tf.keras.losses.Reduction.NONE) @tf.autograph.experimental.do_not_convert def call(self, y_true, box_outputs): num_positives, box_targets = y_true normalizer = num_positives * 4.0 mask = tf.cast(box_targets != 0.0, tf.float32) box_targets = tf.expand_dims(box_targets, axis=-1) box_outputs = tf.expand_dims(box_outputs, axis=-1) box_loss = self.huber(box_targets, box_outputs) * mask box_loss = tf.reduce_sum(box_loss) box_loss /= normalizer return box_loss class BoxIouLoss(tf.keras.losses.Loss): """Box iou loss.""" def __init__(self, iou_loss_type, min_level, max_level, num_scales, aspect_ratios, anchor_scale, image_size, **kwargs): super().__init__(**kwargs) self.iou_loss_type = iou_loss_type self.input_anchors = anchors.Anchors(min_level, max_level, num_scales, aspect_ratios, anchor_scale, image_size) @tf.autograph.experimental.do_not_convert def call(self, y_true, box_outputs): anchor_boxes = tf.tile( self.input_anchors.boxes, [box_outputs.shape[0] // self.input_anchors.boxes.shape[0], 1]) num_positives, box_targets = y_true mask = tf.cast(box_targets != 0.0, box_targets.dtype) box_outputs = anchors.decode_box_outputs(box_outputs, anchor_boxes) * mask box_targets = anchors.decode_box_outputs(box_targets, anchor_boxes) * mask normalizer = num_positives * 4.0 box_iou_loss = iou_utils.iou_loss(box_outputs, box_targets, self.iou_loss_type) box_iou_loss = tf.reduce_sum(box_iou_loss) / normalizer return box_iou_loss class EfficientDetNetTrain(efficientdet_keras.EfficientDetNet): """A customized trainer for EfficientDet. see https://www.tensorflow.org/guide/keras/customizing_what_happens_in_fit """ def _freeze_vars(self): if self.config.var_freeze_expr: return [ v for v in self.trainable_variables if not re.match(self.config.var_freeze_expr, v.name) ] return self.trainable_variables def _reg_l2_loss(self, weight_decay, regex=r'.*(kernel|weight):0$'): """Return regularization l2 loss loss.""" var_match = re.compile(regex) return weight_decay * tf.add_n([ tf.nn.l2_loss(v) for v in self._freeze_vars() if var_match.match(v.name) ]) def _detection_loss(self, cls_outputs, box_outputs, labels, loss_vals): """Computes total detection loss. Computes total detection loss including box and class loss from all levels. Args: cls_outputs: an OrderDict with keys representing levels and values representing logits in [batch_size, height, width, num_anchors]. box_outputs: an OrderDict with keys representing levels and values representing box regression targets in [batch_size, height, width, num_anchors * 4]. labels: the dictionary that returned from dataloader that includes groundtruth targets. loss_vals: A dict of loss values. Returns: total_loss: an integer tensor representing total loss reducing from class and box losses from all levels. cls_loss: an integer tensor representing total class loss. box_loss: an integer tensor representing total box regression loss. box_iou_loss: an integer tensor representing total box iou loss. """ # convert to float32 for loss computing. cls_outputs = [tf.cast(i, tf.float32) for i in cls_outputs] box_outputs = [tf.cast(i, tf.float32) for i in box_outputs] # Sum all positives in a batch for normalization and avoid zero # num_positives_sum, which would lead to inf loss during training num_positives_sum = tf.reduce_sum(labels['mean_num_positives']) + 1.0 levels = range(len(cls_outputs)) cls_losses = [] box_losses = [] for level in levels: # Onehot encoding for classification labels. cls_targets_at_level = tf.one_hot(labels['cls_targets_%d' % (level + 3)], self.config.num_classes) if self.config.data_format == 'channels_first': targets_shape = tf.shape(cls_targets_at_level) bs = targets_shape[0] width = targets_shape[2] height = targets_shape[3] cls_targets_at_level = tf.reshape(cls_targets_at_level, [bs, -1, width, height]) else: targets_shape = tf.shape(cls_targets_at_level) bs = targets_shape[0] width = targets_shape[1] height = targets_shape[2] cls_targets_at_level = tf.reshape(cls_targets_at_level, [bs, width, height, -1]) box_targets_at_level = labels['box_targets_%d' % (level + 3)] class_loss_layer = self.loss.get('class_loss', None) if class_loss_layer: cls_loss = class_loss_layer([num_positives_sum, cls_targets_at_level], cls_outputs[level]) if self.config.data_format == 'channels_first': cls_loss = tf.reshape( cls_loss, [bs, -1, width, height, self.config.num_classes]) else: cls_loss = tf.reshape( cls_loss, [bs, width, height, -1, self.config.num_classes]) cls_loss *= tf.cast( tf.expand_dims( tf.not_equal(labels['cls_targets_%d' % (level + 3)], -2), -1), tf.float32) cls_losses.append(tf.reduce_sum(cls_loss)) if self.config.box_loss_weight and self.loss.get('box_loss', None): box_loss_layer = self.loss['box_loss'] box_losses.append( box_loss_layer([num_positives_sum, box_targets_at_level], box_outputs[level])) if self.config.iou_loss_type: box_outputs = tf.concat([tf.reshape(v, [-1, 4]) for v in box_outputs], axis=0) box_targets = tf.concat([ tf.reshape(labels['box_targets_%d' % (level + 3)], [-1, 4]) for level in levels ], axis=0) box_iou_loss_layer = self.loss['box_iou_loss'] box_iou_loss = box_iou_loss_layer([num_positives_sum, box_targets], box_outputs) loss_vals['box_iou_loss'] = box_iou_loss else: box_iou_loss = 0 cls_loss = tf.add_n(cls_losses) if cls_losses else 0 box_loss = tf.add_n(box_losses) if box_losses else 0 total_loss = ( cls_loss + self.config.box_loss_weight * box_loss + self.config.iou_loss_weight * box_iou_loss) loss_vals['det_loss'] = total_loss loss_vals['cls_loss'] = cls_loss loss_vals['box_loss'] = box_loss return total_loss def train_step(self, data): """Train step. Args: data: Tuple of (images, labels). Image tensor with shape [batch_size, height, width, 3]. The height and width are fixed and equal.Input labels in a dictionary. The labels include class targets and box targets which are dense label maps. The labels are generated from get_input_fn function in data/dataloader.py. Returns: A dict record loss info. """ images, labels = data with tf.GradientTape() as tape: if len(self.config.heads) == 2: cls_outputs, box_outputs, seg_outputs = self(images, training=True) elif 'object_detection' in self.config.heads: cls_outputs, box_outputs = self(images, training=True) elif 'segmentation' in self.config.heads: seg_outputs, = self(images, training=True) total_loss = 0 loss_vals = {} if 'object_detection' in self.config.heads: det_loss = self._detection_loss(cls_outputs, box_outputs, labels, loss_vals) total_loss += det_loss if 'segmentation' in self.config.heads: seg_loss_layer = self.loss['seg_loss'] seg_loss = seg_loss_layer(labels['image_masks'], seg_outputs) total_loss += seg_loss loss_vals['seg_loss'] = seg_loss reg_l2_loss = self._reg_l2_loss(self.config.weight_decay) loss_vals['reg_l2_loss'] = reg_l2_loss total_loss += reg_l2_loss if isinstance(self.optimizer, tf.keras.mixed_precision.LossScaleOptimizer): scaled_loss = self.optimizer.get_scaled_loss(total_loss) optimizer = self.optimizer._optimizer else: scaled_loss = total_loss optimizer = self.optimizer compress = get_mixed_precision_policy().compute_dtype == 'float16' tape = hvd.DistributedGradientTape(tape, compression=hvd.Compression.fp16 \ if compress else hvd.Compression.none) loss_vals['loss'] = total_loss loss_vals['learning_rate'] = optimizer.learning_rate(optimizer.iterations) trainable_vars = self._freeze_vars() scaled_gradients = tape.gradient(scaled_loss, trainable_vars) if isinstance(self.optimizer, tf.keras.mixed_precision.LossScaleOptimizer): gradients = self.optimizer.get_unscaled_gradients(scaled_gradients) else: gradients = scaled_gradients if self.config.clip_gradients_norm > 0: clip_norm = abs(self.config.clip_gradients_norm) gradients = [ tf.clip_by_norm(g, clip_norm) if g is not None else None for g in gradients ] gradients, _ = tf.clip_by_global_norm(gradients, clip_norm) loss_vals['gradient_norm'] = tf.linalg.global_norm(gradients) self.optimizer.apply_gradients(zip(gradients, trainable_vars)) return loss_vals def test_step(self, data): """Test step. Args: data: Tuple of (images, labels). Image tensor with shape [batch_size, height, width, 3]. The height and width are fixed and equal.Input labels in a dictionary. The labels include class targets and box targets which are dense label maps. The labels are generated from get_input_fn function in data/dataloader.py. Returns: A dict record loss info. """ images, labels = data if len(self.config.heads) == 2: cls_outputs, box_outputs, seg_outputs = self(images, training=False) elif 'object_detection' in self.config.heads: cls_outputs, box_outputs = self(images, training=False) elif 'segmentation' in self.config.heads: seg_outputs, = self(images, training=False) reg_l2loss = self._reg_l2_loss(self.config.weight_decay) total_loss = reg_l2loss loss_vals = {} if 'object_detection' in self.config.heads: det_loss = self._detection_loss(cls_outputs, box_outputs, labels, loss_vals) total_loss += det_loss if 'segmentation' in self.config.heads: seg_loss_layer = self.loss['seg_loss'] seg_loss = seg_loss_layer(labels['image_masks'], seg_outputs) total_loss += seg_loss loss_vals['seg_loss'] = seg_loss loss_vals['loss'] = total_loss return loss_vals
DeepLearningExamples-master
TensorFlow2/Detection/Efficientdet/utils/train_lib.py
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Hparams for model architecture and trainer.""" import ast import collections import copy from typing import Any, Dict, Text import six import tensorflow as tf import yaml def eval_str_fn(val): if val in {'true', 'false'}: return val == 'true' try: return ast.literal_eval(val) except (ValueError, SyntaxError): return val # pylint: disable=protected-access class Config(object): """A config utility class.""" def __init__(self, config_dict=None): self.update(config_dict) def __setattr__(self, k, v): self.__dict__[k] = Config(v) if isinstance(v, dict) else copy.deepcopy(v) def __getattr__(self, k): return self.__dict__[k] def __getitem__(self, k): return self.__dict__[k] def __repr__(self): return repr(self.as_dict()) def __str__(self): try: return yaml.dump(self.as_dict(), indent=4) except TypeError: return str(self.as_dict()) def _update(self, config_dict, allow_new_keys=True): """Recursively update internal members.""" if not config_dict: return for k, v in six.iteritems(config_dict): if k not in self.__dict__: if allow_new_keys: self.__setattr__(k, v) else: raise KeyError('Key `{}` does not exist for overriding. '.format(k)) else: if isinstance(self.__dict__[k], Config) and isinstance(v, dict): self.__dict__[k]._update(v, allow_new_keys) elif isinstance(self.__dict__[k], Config) and isinstance(v, Config): self.__dict__[k]._update(v.as_dict(), allow_new_keys) else: self.__setattr__(k, v) def get(self, k, default_value=None): return self.__dict__.get(k, default_value) def update(self, config_dict): """Update members while allowing new keys.""" self._update(config_dict, allow_new_keys=True) def keys(self): return self.__dict__.keys() def override(self, config_dict_or_str, allow_new_keys=False): """Update members while disallowing new keys.""" if isinstance(config_dict_or_str, str): if not config_dict_or_str: return elif '=' in config_dict_or_str: config_dict = self.parse_from_str(config_dict_or_str) elif config_dict_or_str.endswith('.yaml'): config_dict = self.parse_from_yaml(config_dict_or_str) else: raise ValueError( 'Invalid string {}, must end with .yaml or contains "=".'.format( config_dict_or_str)) elif isinstance(config_dict_or_str, dict): config_dict = config_dict_or_str else: raise ValueError('Unknown value type: {}'.format(config_dict_or_str)) self._update(config_dict, allow_new_keys) def parse_from_yaml(self, yaml_file_path: Text) -> Dict[Any, Any]: """Parses a yaml file and returns a dictionary.""" with tf.io.gfile.GFile(yaml_file_path, 'r') as f: config_dict = yaml.load(f, Loader=yaml.FullLoader) return config_dict def save_to_yaml(self, yaml_file_path): """Write a dictionary into a yaml file.""" with tf.io.gfile.GFile(yaml_file_path, 'w') as f: yaml.dump(self.as_dict(), f, default_flow_style=False) def parse_from_str(self, config_str: Text) -> Dict[Any, Any]: """Parse a string like 'x.y=1,x.z=2' to nested dict {x: {y: 1, z: 2}}.""" if not config_str: return {} config_dict = {} try: for kv_pair in config_str.split(','): if not kv_pair: # skip empty string continue key_str, value_str = kv_pair.split('=') key_str = key_str.strip() def add_kv_recursive(k, v): """Recursively parse x.y.z=tt to {x: {y: {z: tt}}}.""" if '.' not in k: if '*' in v: # we reserve * to split arrays. return {k: [eval_str_fn(vv) for vv in v.split('*')]} return {k: eval_str_fn(v)} pos = k.index('.') return {k[:pos]: add_kv_recursive(k[pos + 1:], v)} def merge_dict_recursive(target, src): """Recursively merge two nested dictionary.""" for k in src.keys(): if ((k in target and isinstance(target[k], dict) and isinstance(src[k], collections.abc.Mapping))): merge_dict_recursive(target[k], src[k]) else: target[k] = src[k] merge_dict_recursive(config_dict, add_kv_recursive(key_str, value_str)) return config_dict except ValueError: raise ValueError('Invalid config_str: {}'.format(config_str)) def as_dict(self): """Returns a dict representation.""" config_dict = {} for k, v in six.iteritems(self.__dict__): if isinstance(v, Config): config_dict[k] = v.as_dict() else: config_dict[k] = copy.deepcopy(v) return config_dict # pylint: enable=protected-access def default_detection_configs(): """Returns a default detection configs.""" h = Config() # model name. h.name = 'efficientdet-d1' # activation type: see activation_fn in model/activation_builder.py. h.act_type = 'swish' # input preprocessing parameters h.image_size = 640 # An integer or a string WxH such as 640x320. h.target_size = None h.input_rand_hflip = True h.jitter_min = 0.1 h.jitter_max = 2.0 h.sample_image = None h.shuffle_buffer = 5000 # dataset specific parameters h.num_classes = 91 h.seg_num_classes = 3 # segmentation classes h.heads = ['object_detection'] # 'object_detection', 'segmentation' h.skip_crowd_during_training = True h.label_map = None # a dict or a string of 'coco', 'voc', 'waymo'. h.max_instances_per_image = 100 # Default to 100 for COCO. h.regenerate_source_id = False # model architecture h.min_level = 3 h.max_level = 7 h.num_scales = 3 h.aspect_ratios = [(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)] h.anchor_scale = 4.0 # is batchnorm training mode h.is_training_bn = True # optimization h.momentum = 0.9 h.optimizer = 'sgd' h.learning_rate = 0.08 # 0.008 for adam. h.lr_warmup_init = 0.008 # 0.0008 for adam. h.lr_warmup_epoch = 1.0 h.clip_gradients_norm = 10.0 h.num_epochs = 300 h.data_format = 'channels_last' # classification loss h.label_smoothing = 0.0 # 0.1 is a good default # Behold the focal loss parameters h.alpha = 0.25 h.gamma = 1.5 # localization loss h.delta = 0.1 # regularization parameter of huber loss. # total loss = box_loss * box_loss_weight + iou_loss * iou_loss_weight h.box_loss_weight = 50.0 h.iou_loss_type = None h.iou_loss_weight = 1.0 # regularization l2 loss. h.weight_decay = 4e-5 h.mixed_precision = False # If False, use float32. h.mixed_precision_on_inputs = False h.loss_scale = 2**15 # For detection. h.box_class_repeats = 3 h.fpn_cell_repeats = 3 h.fpn_num_filters = 88 h.separable_conv = True h.apply_bn_for_resampling = True h.conv_after_downsample = False h.conv_bn_act_pattern = False h.drop_remainder = True # drop remainder for the final batch eval. # For post-processing nms, must be a dict. h.nms_configs = { 'method': 'gaussian', 'iou_thresh': None, # use the default value based on method. 'score_thresh': 0., 'sigma': None, 'pyfunc': True, 'max_nms_inputs': 5000, 'max_output_size': 100, } # version. h.fpn_name = None h.fpn_weight_method = None h.fpn_config = None # No stochastic depth in default. h.survival_prob = None h.img_summary_steps = None h.lr_decay_method = 'cosine' h.moving_average_decay = 0.9998 h.ckpt_var_scope = None # ckpt variable scope. # If true, skip loading pretrained weights if shape mismatches. h.skip_mismatch = True h.backbone_name = 'efficientnet-b0' h.backbone_config = None h.backbone_init = None h.var_freeze_expr = None # A temporary flag to switch between legacy and keras models. h.use_keras_model = True h.dataset_type = None h.positives_momentum = None h.grad_checkpoint = False return h efficientdet_model_param_dict = { 'efficientdet-d0': dict( name='efficientdet-d0', backbone_name='efficientnet-b0', image_size=512, fpn_num_filters=64, fpn_cell_repeats=3, box_class_repeats=3, ), 'efficientdet-d1': dict( name='efficientdet-d1', backbone_name='efficientnet-b1', image_size=640, fpn_num_filters=88, fpn_cell_repeats=4, box_class_repeats=3, ), 'efficientdet-d2': dict( name='efficientdet-d2', backbone_name='efficientnet-b2', image_size=768, fpn_num_filters=112, fpn_cell_repeats=5, box_class_repeats=3, ), 'efficientdet-d3': dict( name='efficientdet-d3', backbone_name='efficientnet-b3', image_size=896, fpn_num_filters=160, fpn_cell_repeats=6, box_class_repeats=4, ), 'efficientdet-d4': dict( name='efficientdet-d4', backbone_name='efficientnet-b4', image_size=1024, fpn_num_filters=224, fpn_cell_repeats=7, box_class_repeats=4, ), 'efficientdet-d5': dict( name='efficientdet-d5', backbone_name='efficientnet-b5', image_size=1280, fpn_num_filters=288, fpn_cell_repeats=7, box_class_repeats=4, ), 'efficientdet-d6': dict( name='efficientdet-d6', backbone_name='efficientnet-b6', image_size=1280, fpn_num_filters=384, fpn_cell_repeats=8, box_class_repeats=5, fpn_weight_method='sum', # Use unweighted sum for stability. ), 'efficientdet-d7': dict( name='efficientdet-d7', backbone_name='efficientnet-b6', image_size=1536, fpn_num_filters=384, fpn_cell_repeats=8, box_class_repeats=5, anchor_scale=5.0, fpn_weight_method='sum', # Use unweighted sum for stability. ), 'efficientdet-d7x': dict( name='efficientdet-d7x', backbone_name='efficientnet-b7', image_size=1536, fpn_num_filters=384, fpn_cell_repeats=8, box_class_repeats=5, anchor_scale=4.0, max_level=8, fpn_weight_method='sum', # Use unweighted sum for stability. ), } def get_efficientdet_config(model_name='efficientdet-d1'): """Get the default config for EfficientDet based on model name.""" h = default_detection_configs() if model_name in efficientdet_model_param_dict: h.override(efficientdet_model_param_dict[model_name]) else: raise ValueError('Unknown model name: {}'.format(model_name)) return h def get_detection_config(model_name): if model_name.startswith('efficientdet'): return get_efficientdet_config(model_name) else: raise ValueError('model name must start with efficientdet.')
DeepLearningExamples-master
TensorFlow2/Detection/Efficientdet/utils/hparams_config.py
# Lint as: python3 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains definitions for EfficientNet model. [1] Mingxing Tan, Quoc V. Le EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks. ICML'19, https://arxiv.org/abs/1905.11946 """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import os from typing import Any, Dict, Optional, Text, Tuple import copy import tensorflow as tf from efficientnet.layers import simple_swish, hard_swish, identity, gelu, get_activation from efficientnet.blocks import conv2d_block, mb_conv_block from efficientnet.common_modules import round_filters, round_repeats, load_weights from model import dataloader def build_dict(name, args=None): if name == "ModelConfig": return_dict = copy.deepcopy(ModelConfig) elif name == "BlockConfig": return_dict = copy.deepcopy(BlockConfig) else: raise ValueError("Name of requested dictionary not found!") if args is None: return return_dict if isinstance(args, dict): return_dict.update(args) elif isinstance(args, tuple): return_dict.update( {a: p for a, p in zip(list(return_dict.keys()), args)} ) else: raise ValueError("Expected tuple or dict!") return return_dict # Config for a single MB Conv Block. BlockConfig = { 'input_filters': 0, 'output_filters': 0, 'kernel_size': 3, 'num_repeat': 1, 'expand_ratio': 1, 'strides': (1, 1), 'se_ratio': None, 'id_skip': True, 'fused_conv': False, 'conv_type': 'depthwise' } # Default Config for Efficientnet-B0. ModelConfig = { 'width_coefficient': 1.0, 'depth_coefficient': 1.0, 'resolution': 224, 'dropout_rate': 0.2, 'blocks': ( # (input_filters, output_filters, kernel_size, num_repeat, # expand_ratio, strides, se_ratio) # pylint: disable=bad-whitespace build_dict(name="BlockConfig", args=(32, 16, 3, 1, 1, (1, 1), 0.25)), build_dict(name="BlockConfig", args=(16, 24, 3, 2, 6, (2, 2), 0.25)), build_dict(name="BlockConfig", args=(24, 40, 5, 2, 6, (2, 2), 0.25)), build_dict(name="BlockConfig", args=(40, 80, 3, 3, 6, (2, 2), 0.25)), build_dict(name="BlockConfig", args=(80, 112, 5, 3, 6, (1, 1), 0.25)), build_dict(name="BlockConfig", args=(112, 192, 5, 4, 6, (2, 2), 0.25)), build_dict(name="BlockConfig", args=(192, 320, 3, 1, 6, (1, 1), 0.25)), # pylint: enable=bad-whitespace ), 'stem_base_filters': 32, 'top_base_filters': 1280, 'activation': 'swish', 'batch_norm': 'default', 'bn_momentum': 0.99, 'bn_epsilon': 1e-3, # While the original implementation used a weight decay of 1e-5, # tf.nn.l2_loss divides it by 2, so we halve this to compensate in Keras 'weight_decay': 5e-6, 'drop_connect_rate': 0.0, 'depth_divisor': 8, 'min_depth': None, 'use_se': True, 'input_channels': 3, 'num_classes': 1000, 'model_name': 'efficientnet', 'rescale_input': True, 'data_format': 'channels_last', 'dtype': 'float32', 'weight_init': 'fan_in', } MODEL_CONFIGS = { # (width, depth, resolution, dropout) 'efficientnet-b0': build_dict(name="ModelConfig", args=(1.0, 1.0, 224, 0.2)), 'efficientnet-b1': build_dict(name="ModelConfig", args=(1.0, 1.1, 240, 0.2)), 'efficientnet-b2': build_dict(name="ModelConfig", args=(1.1, 1.2, 260, 0.3)), 'efficientnet-b3': build_dict(name="ModelConfig", args=(1.2, 1.4, 300, 0.3)), 'efficientnet-b4': build_dict(name="ModelConfig", args=(1.4, 1.8, 380, 0.4)), 'efficientnet-b5': build_dict(name="ModelConfig", args=(1.6, 2.2, 456, 0.4)), 'efficientnet-b6': build_dict(name="ModelConfig", args=(1.8, 2.6, 528, 0.5)), 'efficientnet-b7': build_dict(name="ModelConfig", args=(2.0, 3.1, 600, 0.5)), 'efficientnet-b8': build_dict(name="ModelConfig", args=(2.2, 3.6, 672, 0.5)), 'efficientnet-l2': build_dict(name="ModelConfig", args=(4.3, 5.3, 800, 0.5)), } DENSE_KERNEL_INITIALIZER = { 'class_name': 'VarianceScaling', 'config': { 'scale': 1 / 3.0, 'mode': 'fan_in', 'distribution': 'uniform' } } def efficientnet(image_input: tf.keras.layers.Input, config: dict, features_only: bool): """Creates an EfficientNet graph given the model parameters. This function is wrapped by the `EfficientNet` class to make a tf.keras.Model. Args: image_input: the input batch of images config: the model config features_only: build only feature network Returns: the output of efficientnet """ depth_coefficient = config['depth_coefficient'] blocks = config['blocks'] stem_base_filters = config['stem_base_filters'] top_base_filters = config['top_base_filters'] activation = get_activation(config['activation']) dropout_rate = config['dropout_rate'] drop_connect_rate = config['drop_connect_rate'] num_classes = config['num_classes'] input_channels = config['input_channels'] rescale_input = config['rescale_input'] data_format = tf.keras.backend.image_data_format() dtype = config['dtype'] weight_decay = config['weight_decay'] weight_init = config['weight_init'] endpoints = {} reduction_idx = 0 x = image_input if data_format == 'channels_first': # Happens on GPU/TPU if available. x = tf.keras.layers.Permute((3, 1, 2))(x) if rescale_input: processor = dataloader.InputProcessor(image=x, output_size=x.shape) processor.normalize_image(dtype=dtype) x = processor.get_image() # Build stem x = conv2d_block(x, round_filters(stem_base_filters, config), config, kernel_size=[3, 3], strides=[2, 2], activation=activation, name='stem') # Build blocks num_blocks_total = sum( round_repeats(block['num_repeat'], depth_coefficient) for block in blocks) block_num = 0 for stack_idx, block in enumerate(blocks): assert block['num_repeat'] > 0 is_reduction = False # reduction flag for blocks after the stem layer # If the first block has super-pixel (space-to-depth) layer, then stem is # the first reduction point. if (block['strides'] == (2,2) and stack_idx == 0): reduction_idx += 1 endpoints['reduction_%s' % reduction_idx] = x elif ((stack_idx == len(blocks) - 1) or blocks[stack_idx + 1]['strides'][0] > 1): is_reduction = True reduction_idx += 1 # Update block input and output filters based on depth multiplier block.update({ 'input_filters':round_filters(block['input_filters'], config), 'output_filters':round_filters(block['output_filters'], config), 'num_repeat':round_repeats(block['num_repeat'], depth_coefficient)}) # The first block needs to take care of stride and filter size increase drop_rate = drop_connect_rate * float(block_num) / num_blocks_total config.update({'drop_connect_rate': drop_rate}) # TODO(Sugh) replace block_prefix = 'stack_{}/block_0/'.format(stack_idx) x = mb_conv_block(x, block, config, block_prefix) block_num += 1 if block['num_repeat'] > 1: block.update({ 'input_filters':block['output_filters'], 'strides':(1, 1) }) for block_idx in range(block['num_repeat'] - 1): drop_rate = drop_connect_rate * float(block_num) / num_blocks_total config.update({'drop_connect_rate': drop_rate}) block_prefix = 'stack_{}/block_{}/'.format(stack_idx, block_idx + 1) x = mb_conv_block(x, block, config, prefix=block_prefix) block_num += 1 if is_reduction: endpoints['reduction_%s' % reduction_idx] = x # Build top if not features_only: x = conv2d_block(x, round_filters(top_base_filters, config), config, activation=activation, name='top') # Build classifier DENSE_KERNEL_INITIALIZER['config']['mode'] = weight_init x = tf.keras.layers.GlobalAveragePooling2D(name='top_pool')(x) if dropout_rate and dropout_rate > 0: x = tf.keras.layers.Dropout(dropout_rate, name='top_dropout')(x) x = tf.keras.layers.Dense( num_classes, kernel_initializer=DENSE_KERNEL_INITIALIZER, kernel_regularizer=tf.keras.regularizers.l2(weight_decay), bias_regularizer=tf.keras.regularizers.l2(weight_decay), name='logits')(x) x = tf.keras.layers.Activation('softmax', name='probs', dtype=tf.float32)(x) return [x] + list( filter(lambda endpoint: endpoint is not None, [ endpoints.get('reduction_1'), endpoints.get('reduction_2'), endpoints.get('reduction_3'), endpoints.get('reduction_4'), endpoints.get('reduction_5'), ])) @tf.keras.utils.register_keras_serializable(package='Vision') class EfficientNet(tf.keras.Model): """Wrapper class for an EfficientNet Keras model. Contains helper methods to build, manage, and save metadata about the model. """ def __init__(self, config: Dict[Text, Any] = None, features_only: bool = None, overrides: Dict[Text, Any] = None): """Create an EfficientNet model. Args: config: (optional) the main model parameters to create the model features_only: (optional) build the base feature network only overrides: (optional) a dict containing keys that can override config """ overrides = overrides or {} config = config or build_dict(name="ModelConfig") self.config = config self.config.update(overrides) input_channels = self.config['input_channels'] model_name = self.config['model_name'] input_shape = (None, None, input_channels) # Should handle any size image image_input = tf.keras.layers.Input(shape=input_shape) output = efficientnet(image_input, self.config, features_only) # Cast to float32 in case we have a different model dtype # output = tf.cast(output, tf.float32) super(EfficientNet, self).__init__( inputs=image_input, outputs=output, name=model_name) @classmethod def from_name(cls, model_name: Text, features_only: bool = None, model_weights_path: Text = None, weights_format: Text = 'saved_model', overrides: Dict[Text, Any] = None): """Construct an EfficientNet model from a predefined model name. E.g., `EfficientNet.from_name('efficientnet-b0')`. Args: model_name: the predefined model name features_only: (optional) build the base feature network only model_weights_path: the path to the weights (h5 file or saved model dir) weights_format: the model weights format. One of 'saved_model', 'h5', or 'checkpoint'. overrides: (optional) a dict containing keys that can override config Returns: A constructed EfficientNet instance. """ model_configs = dict(MODEL_CONFIGS) overrides = dict(overrides) if overrides else {} # One can define their own custom models if necessary model_configs.update(overrides.pop('model_config', {})) if model_name not in model_configs: raise ValueError('Unknown model name {}'.format(model_name)) config = model_configs[model_name] model = cls(config=config, overrides=overrides, features_only=features_only) if model_weights_path: if weights_format == 'checkpoint' and tf.io.gfile.isdir(model_weights_path): model_weights_path = tf.train.latest_checkpoint(model_weights_path) load_weights(model, model_weights_path, weights_format=weights_format) return model
DeepLearningExamples-master
TensorFlow2/Detection/Efficientdet/efficientnet/efficientnet_model.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Common modeling utilities.""" from __future__ import absolute_import from __future__ import division # from __future__ import google_type_annotations from __future__ import print_function import numpy as np import math import tensorflow as tf from typing import Text, Optional __all__ = ['count_params', 'load_weights', 'round_filters', 'round_repeats'] def count_params(model, trainable_only=True): """Returns the count of all model parameters, or just trainable ones.""" if not trainable_only: return model.count_params() else: return int(np.sum([tf.keras.backend.count_params(p) for p in model.trainable_weights])) def load_weights(model: tf.keras.Model, model_weights_path: Text, weights_format: Text = 'saved_model'): """Load model weights from the given file path. Args: model: the model to load weights into model_weights_path: the path of the model weights weights_format: the model weights format. One of 'saved_model', 'h5', or 'checkpoint'. """ if weights_format == 'saved_model': loaded_model = tf.keras.models.load_model(model_weights_path) model.set_weights(loaded_model.get_weights()) else: model.load_weights(model_weights_path).expect_partial() def round_filters(filters: int, config: dict) -> int: """Round number of filters based on width coefficient.""" width_coefficient = config['width_coefficient'] min_depth = config['min_depth'] divisor = config['depth_divisor'] orig_filters = filters if not width_coefficient: return filters filters *= width_coefficient min_depth = min_depth or divisor new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_filters < 0.9 * filters: new_filters += divisor return int(new_filters) def round_repeats(repeats: int, depth_coefficient: float) -> int: """Round number of repeats based on depth coefficient.""" return int(math.ceil(depth_coefficient * repeats))
DeepLearningExamples-master
TensorFlow2/Detection/Efficientdet/efficientnet/common_modules.py
from efficientnet.blocks.conv2d_block import conv2d_block from efficientnet.blocks.mb_conv_block import mb_conv_block __all__ = ['conv2d_block', 'mb_conv_block']
DeepLearningExamples-master
TensorFlow2/Detection/Efficientdet/efficientnet/blocks/__init__.py
import tensorflow as tf from typing import Any, Dict, Optional, Text, Tuple from model import normalization_builder __all__ = ['conv2d_block'] CONV_KERNEL_INITIALIZER = { 'class_name': 'VarianceScaling', 'config': { 'scale': 2.0, 'mode': 'fan_in', # Note: this is a truncated normal distribution 'distribution': 'normal' } } def conv2d_block(inputs: tf.Tensor, conv_filters: Optional[int], config: dict, kernel_size: Any = (1, 1), strides: Any = (1, 1), use_batch_norm: bool = True, use_bias: bool = False, activation: Any = None, depthwise: bool = False, name: Text = None): """A conv2d followed by batch norm and an activation.""" batch_norm = normalization_builder.batch_norm_class() bn_momentum = config['bn_momentum'] bn_epsilon = config['bn_epsilon'] data_format = tf.keras.backend.image_data_format() weight_decay = config['weight_decay'] name = name or '' # Collect args based on what kind of conv2d block is desired init_kwargs = { 'kernel_size': kernel_size, 'strides': strides, 'use_bias': use_bias, 'padding': 'same', 'name': name + '_conv2d', 'kernel_regularizer': tf.keras.regularizers.l2(weight_decay), 'bias_regularizer': tf.keras.regularizers.l2(weight_decay), } CONV_KERNEL_INITIALIZER['config']['mode'] = config['weight_init'] if depthwise: conv2d = tf.keras.layers.DepthwiseConv2D init_kwargs.update({'depthwise_initializer': CONV_KERNEL_INITIALIZER}) else: conv2d = tf.keras.layers.Conv2D init_kwargs.update({'filters': conv_filters, 'kernel_initializer': CONV_KERNEL_INITIALIZER}) x = conv2d(**init_kwargs)(inputs) if use_batch_norm: bn_axis = 1 if data_format == 'channels_first' else -1 x = batch_norm(axis=bn_axis, momentum=bn_momentum, epsilon=bn_epsilon, name=name + '_bn')(x) if activation is not None: x = tf.keras.layers.Activation(activation, name=name + '_activation')(x) return x
DeepLearningExamples-master
TensorFlow2/Detection/Efficientdet/efficientnet/blocks/conv2d_block.py
import tensorflow as tf from typing import Any, Dict, Optional, Text, Tuple from efficientnet.layers import get_activation from efficientnet.blocks import conv2d_block __all__ = ['mb_conv_block'] def mb_conv_block(inputs: tf.Tensor, block: dict, config: dict, prefix: Text = None): """Mobile Inverted Residual Bottleneck. Args: inputs: the Keras input to the block block: BlockConfig, arguments to create a Block config: ModelConfig, a set of model parameters prefix: prefix for naming all layers Returns: the output of the block """ use_se = config['use_se'] activation = get_activation(config['activation']) drop_connect_rate = config['drop_connect_rate'] data_format = tf.keras.backend.image_data_format() use_depthwise = block['conv_type'] != 'no_depthwise' prefix = prefix or '' filters = block['input_filters'] * block['expand_ratio'] x = inputs if block['fused_conv']: # If we use fused mbconv, skip expansion and use regular conv. x = conv2d_block(x, filters, config, kernel_size=block['kernel_size'], strides=block['strides'], activation=activation, name=prefix + 'fused') else: if block['expand_ratio'] != 1: # Expansion phase kernel_size = (1, 1) if use_depthwise else (3, 3) x = conv2d_block(x, filters, config, kernel_size=kernel_size, activation=activation, name=prefix + 'expand') # Depthwise Convolution if use_depthwise: x = conv2d_block(x, conv_filters=None, config=config, kernel_size=block['kernel_size'], strides=block['strides'], activation=activation, depthwise=True, name=prefix + 'depthwise') # Squeeze and Excitation phase if use_se: assert block['se_ratio'] is not None assert 0 < block['se_ratio'] <= 1 num_reduced_filters = max(1, int( block['input_filters'] * block['se_ratio'] )) if data_format == 'channels_first': se_shape = (filters, 1, 1) else: se_shape = (1, 1, filters) se = tf.keras.layers.GlobalAveragePooling2D(name=prefix + 'se_squeeze')(x) se = tf.keras.layers.Reshape(se_shape, name=prefix + 'se_reshape')(se) se = conv2d_block(se, num_reduced_filters, config, use_bias=True, use_batch_norm=False, activation=activation, name=prefix + 'se_reduce') se = conv2d_block(se, filters, config, use_bias=True, use_batch_norm=False, activation='sigmoid', name=prefix + 'se_expand') x = tf.keras.layers.multiply([x, se], name=prefix + 'se_excite') # Output phase x = conv2d_block(x, block['output_filters'], config, activation=None, name=prefix + 'project') # Add identity so that quantization-aware training can insert quantization # ops correctly. x = tf.keras.layers.Activation(get_activation('identity'), name=prefix + 'id')(x) if (block['id_skip'] and all(s == 1 for s in block['strides']) and block['input_filters'] == block['output_filters']): if drop_connect_rate and drop_connect_rate > 0: # Apply dropconnect # The only difference between dropout and dropconnect in TF is scaling by # drop_connect_rate during training. See: # https://github.com/keras-team/keras/pull/9898#issuecomment-380577612 x = tf.keras.layers.Dropout(drop_connect_rate, noise_shape=(None, 1, 1, 1), name=prefix + 'drop')(x) x = tf.keras.layers.add([x, inputs], name=prefix + 'add') return x
DeepLearningExamples-master
TensorFlow2/Detection/Efficientdet/efficientnet/blocks/mb_conv_block.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Customized Swish activation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import six import math import tensorflow as tf __all__ = ['simple_swish', 'hard_swish', 'identity', 'gelu', 'get_activation'] @tf.keras.utils.register_keras_serializable(package='Text') def simple_swish(features): """Computes the Swish activation function. The tf.nn.swish operation uses a custom gradient to reduce memory usage. Since saving custom gradients in SavedModel is currently not supported, and one would not be able to use an exported TF-Hub module for fine-tuning, we provide this wrapper that can allow to select whether to use the native TensorFlow swish operation, or whether to use a customized operation that has uses default TensorFlow gradient computation. Args: features: A `Tensor` representing preactivation values. Returns: The activation value. """ features = tf.convert_to_tensor(features) return features * tf.nn.sigmoid(features) @tf.keras.utils.register_keras_serializable(package='Text') def hard_swish(features): """Computes a hard version of the swish function. This operation can be used to reduce computational cost and improve quantization for edge devices. Args: features: A `Tensor` representing preactivation values. Returns: The activation value. """ features = tf.convert_to_tensor(features) return features * tf.nn.relu6(features + tf.constant(3.)) * (1. / 6.) @tf.keras.utils.register_keras_serializable(package='Text') def identity(features): """Computes the identity function. Useful for helping in quantization. Args: features: A `Tensor` representing preactivation values. Returns: The activation value. """ features = tf.convert_to_tensor(features) return tf.identity(features) @tf.keras.utils.register_keras_serializable(package='Text') def gelu(x): """Gaussian Error Linear Unit. This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415 Args: x: float Tensor to perform activation. Returns: `x` with the GELU activation applied. """ cdf = 0.5 * (1.0 + tf.tanh( (math.sqrt(2 / math.pi) * (x + 0.044715 * tf.pow(x, 3))))) return x * cdf # TODO(hongkuny): consider moving custom string-map lookup to keras api. def get_activation(identifier): """Maps a identifier to a Python function, e.g., "relu" => `tf.nn.relu`. It checks string first and if it is one of customized activation not in TF, the corresponding activation will be returned. For non-customized activation names and callable identifiers, always fallback to tf.keras.activations.get. Args: identifier: String name of the activation function or callable. Returns: A Python function corresponding to the activation function. """ if isinstance(identifier, six.string_types): name_to_fn = { "gelu": gelu, "simple_swish": simple_swish, "hard_swish": hard_swish, "identity": identity, } identifier = str(identifier).lower() if identifier in name_to_fn: return tf.keras.activations.get(name_to_fn[identifier]) return tf.keras.activations.get(identifier)
DeepLearningExamples-master
TensorFlow2/Detection/Efficientdet/efficientnet/layers/activations.py
from efficientnet.layers.activations import simple_swish, hard_swish, identity, gelu, get_activation __all__ = ['simple_swish', 'hard_swish', 'identity', 'gelu', 'get_activation']
DeepLearningExamples-master
TensorFlow2/Detection/Efficientdet/efficientnet/layers/__init__.py
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utils used to manipulate tensor shapes.""" from six.moves import zip import tensorflow.compat.v1 as tf from visualize import static_shape def _is_tensor(t): """Returns a boolean indicating whether the input is a tensor. Args: t: the input to be tested. Returns: a boolean that indicates whether t is a tensor. """ return isinstance(t, (tf.Tensor, tf.SparseTensor, tf.Variable)) def _set_dim_0(t, d0): """Sets the 0-th dimension of the input tensor. Args: t: the input tensor, assuming the rank is at least 1. d0: an integer indicating the 0-th dimension of the input tensor. Returns: the tensor t with the 0-th dimension set. """ t_shape = t.get_shape().as_list() t_shape[0] = d0 t.set_shape(t_shape) return t def pad_tensor(t, length): """Pads the input tensor with 0s along the first dimension up to the length. Args: t: the input tensor, assuming the rank is at least 1. length: a tensor of shape [1] or an integer, indicating the first dimension of the input tensor t after padding, assuming length <= t.shape[0]. Returns: padded_t: the padded tensor, whose first dimension is length. If the length is an integer, the first dimension of padded_t is set to length statically. """ t_rank = tf.rank(t) t_shape = tf.shape(t) t_d0 = t_shape[0] pad_d0 = tf.expand_dims(length - t_d0, 0) pad_shape = tf.cond( tf.greater(t_rank, 1), lambda: tf.concat([pad_d0, t_shape[1:]], 0), lambda: tf.expand_dims(length - t_d0, 0)) padded_t = tf.concat([t, tf.zeros(pad_shape, dtype=t.dtype)], 0) if not _is_tensor(length): padded_t = _set_dim_0(padded_t, length) return padded_t def clip_tensor(t, length): """Clips the input tensor along the first dimension up to the length. Args: t: the input tensor, assuming the rank is at least 1. length: a tensor of shape [1] or an integer, indicating the first dimension of the input tensor t after clipping, assuming length <= t.shape[0]. Returns: clipped_t: the clipped tensor, whose first dimension is length. If the length is an integer, the first dimension of clipped_t is set to length statically. """ clipped_t = tf.gather(t, tf.range(length)) if not _is_tensor(length): clipped_t = _set_dim_0(clipped_t, length) return clipped_t def pad_or_clip_tensor(t, length): """Pad or clip the input tensor along the first dimension. Args: t: the input tensor, assuming the rank is at least 1. length: a tensor of shape [1] or an integer, indicating the first dimension of the input tensor t after processing. Returns: processed_t: the processed tensor, whose first dimension is length. If the length is an integer, the first dimension of the processed tensor is set to length statically. """ return pad_or_clip_nd(t, [length] + t.shape.as_list()[1:]) def pad_or_clip_nd(tensor, output_shape): """Pad or Clip given tensor to the output shape. Args: tensor: Input tensor to pad or clip. output_shape: A list of integers / scalar tensors (or None for dynamic dim) representing the size to pad or clip each dimension of the input tensor. Returns: Input tensor padded and clipped to the output shape. """ tensor_shape = tf.shape(tensor) clip_size = [ tf.where(tensor_shape[i] - shape > 0, shape, -1) if shape is not None else -1 for i, shape in enumerate(output_shape) ] clipped_tensor = tf.slice( tensor, begin=tf.zeros(len(clip_size), dtype=tf.int32), size=clip_size) # Pad tensor if the shape of clipped tensor is smaller than the expected # shape. clipped_tensor_shape = tf.shape(clipped_tensor) trailing_paddings = [ shape - clipped_tensor_shape[i] if shape is not None else 0 for i, shape in enumerate(output_shape) ] paddings = tf.stack( [ tf.zeros(len(trailing_paddings), dtype=tf.int32), trailing_paddings ], axis=1) padded_tensor = tf.pad(clipped_tensor, paddings=paddings) output_static_shape = [ dim if not isinstance(dim, tf.Tensor) else None for dim in output_shape ] padded_tensor.set_shape(output_static_shape) return padded_tensor def combined_static_and_dynamic_shape(tensor): """Returns a list containing static and dynamic values for the dimensions. Returns a list of static and dynamic values for shape dimensions. This is useful to preserve static shapes when available in reshape operation. Args: tensor: A tensor of any type. Returns: A list of size tensor.shape.ndims containing integers or a scalar tensor. """ static_tensor_shape = tensor.shape.as_list() dynamic_tensor_shape = tf.shape(tensor) combined_shape = [] for index, dim in enumerate(static_tensor_shape): if dim is not None: combined_shape.append(dim) else: combined_shape.append(dynamic_tensor_shape[index]) return combined_shape def static_or_dynamic_map_fn(fn, elems, dtype=None, parallel_iterations=32, back_prop=True): """Runs map_fn as a (static) for loop when possible. This function rewrites the map_fn as an explicit unstack input -> for loop over function calls -> stack result combination. This allows our graphs to be acyclic when the batch size is static. For comparison, see https://www.tensorflow.org/api_docs/python/tf/map_fn. Note that `static_or_dynamic_map_fn` currently is not *fully* interchangeable with the default tf.map_fn function as it does not accept nested inputs (only Tensors or lists of Tensors). Likewise, the output of `fn` can only be a Tensor or list of Tensors. TODO(jonathanhuang): make this function fully interchangeable with tf.map_fn. Args: fn: The callable to be performed. It accepts one argument, which will have the same structure as elems. Its output must have the same structure as elems. elems: A tensor or list of tensors, each of which will be unpacked along their first dimension. The sequence of the resulting slices will be applied to fn. dtype: (optional) The output type(s) of fn. If fn returns a structure of Tensors differing from the structure of elems, then dtype is not optional and must have the same structure as the output of fn. parallel_iterations: (optional) number of batch items to process in parallel. This flag is only used if the native tf.map_fn is used and defaults to 32 instead of 10 (unlike the standard tf.map_fn default). back_prop: (optional) True enables support for back propagation. This flag is only used if the native tf.map_fn is used. Returns: A tensor or sequence of tensors. Each tensor packs the results of applying fn to tensors unpacked from elems along the first dimension, from first to last. Raises: ValueError: if `elems` a Tensor or a list of Tensors. ValueError: if `fn` does not return a Tensor or list of Tensors """ if isinstance(elems, list): for elem in elems: if not isinstance(elem, tf.Tensor): raise ValueError('`elems` must be a Tensor or list of Tensors.') elem_shapes = [elem.shape.as_list() for elem in elems] # Fall back on tf.map_fn if shapes of each entry of `elems` are None or fail # to all be the same size along the batch dimension. for elem_shape in elem_shapes: if (not elem_shape or not elem_shape[0] or elem_shape[0] != elem_shapes[0][0]): return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop) arg_tuples = zip(*[tf.unstack(elem) for elem in elems]) outputs = [fn(arg_tuple) for arg_tuple in arg_tuples] else: if not isinstance(elems, tf.Tensor): raise ValueError('`elems` must be a Tensor or list of Tensors.') elems_shape = elems.shape.as_list() if not elems_shape or not elems_shape[0]: return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop) outputs = [fn(arg) for arg in tf.unstack(elems)] # Stack `outputs`, which is a list of Tensors or list of lists of Tensors if all([isinstance(output, tf.Tensor) for output in outputs]): return tf.stack(outputs) else: if all([isinstance(output, list) for output in outputs]): if all([all( [isinstance(entry, tf.Tensor) for entry in output_list]) for output_list in outputs]): return [tf.stack(output_tuple) for output_tuple in zip(*outputs)] raise ValueError('`fn` should return a Tensor or a list of Tensors.') def check_min_image_dim(min_dim, image_tensor): """Checks that the image width/height are greater than some number. This function is used to check that the width and height of an image are above a certain value. If the image shape is static, this function will perform the check at graph construction time. Otherwise, if the image shape varies, an Assertion control dependency will be added to the graph. Args: min_dim: The minimum number of pixels along the width and height of the image. image_tensor: The image tensor to check size for. Returns: If `image_tensor` has dynamic size, return `image_tensor` with a Assert control dependency. Otherwise returns image_tensor. Raises: ValueError: if `image_tensor`'s' width or height is smaller than `min_dim`. """ image_shape = image_tensor.get_shape() image_height = static_shape.get_height(image_shape) image_width = static_shape.get_width(image_shape) if image_height is None or image_width is None: shape_assert = tf.Assert( tf.logical_and(tf.greater_equal(tf.shape(image_tensor)[1], min_dim), tf.greater_equal(tf.shape(image_tensor)[2], min_dim)), ['image size must be >= {} in both height and width.'.format(min_dim)]) with tf.control_dependencies([shape_assert]): return tf.identity(image_tensor) if image_height < min_dim or image_width < min_dim: raise ValueError( 'image size must be >= %d in both height and width; image dim = %d,%d' % (min_dim, image_height, image_width)) return image_tensor def assert_shape_equal(shape_a, shape_b): """Asserts that shape_a and shape_b are equal. If the shapes are static, raises a ValueError when the shapes mismatch. If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes mismatch. Args: shape_a: a list containing shape of the first tensor. shape_b: a list containing shape of the second tensor. Returns: Either a tf.no_op() when shapes are all static and a tf.assert_equal() op when the shapes are dynamic. Raises: ValueError: When shapes are both static and unequal. """ if (all(isinstance(dim, int) for dim in shape_a) and all(isinstance(dim, int) for dim in shape_b)): if shape_a != shape_b: raise ValueError('Unequal shapes {}, {}'.format(shape_a, shape_b)) else: return tf.no_op() else: return tf.assert_equal(shape_a, shape_b) def assert_shape_equal_along_first_dimension(shape_a, shape_b): """Asserts that shape_a and shape_b are the same along the 0th-dimension. If the shapes are static, raises a ValueError when the shapes mismatch. If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes mismatch. Args: shape_a: a list containing shape of the first tensor. shape_b: a list containing shape of the second tensor. Returns: Either a tf.no_op() when shapes are all static and a tf.assert_equal() op when the shapes are dynamic. Raises: ValueError: When shapes are both static and unequal. """ if isinstance(shape_a[0], int) and isinstance(shape_b[0], int): if shape_a[0] != shape_b[0]: raise ValueError('Unequal first dimension {}, {}'.format( shape_a[0], shape_b[0])) else: return tf.no_op() else: return tf.assert_equal(shape_a[0], shape_b[0]) def assert_box_normalized(boxes, maximum_normalized_coordinate=1.1): """Asserts the input box tensor is normalized. Args: boxes: a tensor of shape [N, 4] where N is the number of boxes. maximum_normalized_coordinate: Maximum coordinate value to be considered as normalized, default to 1.1. Returns: a tf.Assert op which fails when the input box tensor is not normalized. Raises: ValueError: When the input box tensor is not normalized. """ box_minimum = tf.reduce_min(boxes) box_maximum = tf.reduce_max(boxes) return tf.Assert( tf.logical_and( tf.less_equal(box_maximum, maximum_normalized_coordinate), tf.greater_equal(box_minimum, 0)), [boxes]) def flatten_dimensions(inputs, first, last): """Flattens `K-d` tensor along [first, last) dimensions. Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape [D0, D1, ..., D(first) * D(first+1) * ... * D(last-1), D(last), ..., D(K-1)]. Example: `inputs` is a tensor with initial shape [10, 5, 20, 20, 3]. new_tensor = flatten_dimensions(inputs, first=1, last=3) new_tensor.shape -> [10, 100, 20, 3]. Args: inputs: a tensor with shape [D0, D1, ..., D(K-1)]. first: first value for the range of dimensions to flatten. last: last value for the range of dimensions to flatten. Note that the last dimension itself is excluded. Returns: a tensor with shape [D0, D1, ..., D(first) * D(first + 1) * ... * D(last - 1), D(last), ..., D(K-1)]. Raises: ValueError: if first and last arguments are incorrect. """ if first >= inputs.shape.ndims or last > inputs.shape.ndims: raise ValueError('`first` and `last` must be less than inputs.shape.ndims. ' 'found {} and {} respectively while ndims is {}'.format( first, last, inputs.shape.ndims)) shape = combined_static_and_dynamic_shape(inputs) flattened_dim_prod = tf.reduce_prod(shape[first:last], keepdims=True) new_shape = tf.concat([shape[:first], flattened_dim_prod, shape[last:]], axis=0) return tf.reshape(inputs, new_shape) def flatten_first_n_dimensions(inputs, n): """Flattens `K-d` tensor along first n dimension to be a `(K-n+1)-d` tensor. Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape [D0 * D1 * ... * D(n-1), D(n), ... D(K-1)]. Example: `inputs` is a tensor with initial shape [10, 5, 20, 20, 3]. new_tensor = flatten_first_n_dimensions(inputs, 2) new_tensor.shape -> [50, 20, 20, 3]. Args: inputs: a tensor with shape [D0, D1, ..., D(K-1)]. n: The number of dimensions to flatten. Returns: a tensor with shape [D0 * D1 * ... * D(n-1), D(n), ... D(K-1)]. """ return flatten_dimensions(inputs, first=0, last=n) def expand_first_dimension(inputs, dims): """Expands `K-d` tensor along first dimension to be a `(K+n-1)-d` tensor. Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape [dims[0], dims[1], ..., dims[-1], D1, ..., D(k-1)]. Example: `inputs` is a tensor with shape [50, 20, 20, 3]. new_tensor = expand_first_dimension(inputs, [10, 5]). new_tensor.shape -> [10, 5, 20, 20, 3]. Args: inputs: a tensor with shape [D0, D1, ..., D(K-1)]. dims: List with new dimensions to expand first axis into. The length of `dims` is typically 2 or larger. Returns: a tensor with shape [dims[0], dims[1], ..., dims[-1], D1, ..., D(k-1)]. """ inputs_shape = combined_static_and_dynamic_shape(inputs) expanded_shape = tf.stack(dims + inputs_shape[1:]) # Verify that it is possible to expand the first axis of inputs. assert_op = tf.assert_equal( inputs_shape[0], tf.reduce_prod(tf.stack(dims)), message=('First dimension of `inputs` cannot be expanded into provided ' '`dims`')) with tf.control_dependencies([assert_op]): inputs_reshaped = tf.reshape(inputs, expanded_shape) return inputs_reshaped def resize_images_and_return_shapes(inputs, image_resizer_fn): """Resizes images using the given function and returns their true shapes. Args: inputs: a float32 Tensor representing a batch of inputs of shape [batch_size, height, width, channels]. image_resizer_fn: a function which takes in a single image and outputs a resized image and its original shape. Returns: resized_inputs: The inputs resized according to image_resizer_fn. true_image_shapes: A integer tensor of shape [batch_size, 3] representing the height, width and number of channels in inputs. """ if inputs.dtype is not tf.float32: raise ValueError('`resize_images_and_return_shapes` expects a' ' tf.float32 tensor') # TODO(jonathanhuang): revisit whether to always use batch size as # the number of parallel iterations vs allow for dynamic batching. outputs = static_or_dynamic_map_fn( image_resizer_fn, elems=inputs, dtype=[tf.float32, tf.int32]) resized_inputs = outputs[0] true_image_shapes = outputs[1] return resized_inputs, true_image_shapes
DeepLearningExamples-master
TensorFlow2/Detection/Efficientdet/visualize/shape_utils.py