peacock-data-public-datasets-idc-mint
/
docker
/bloom13b
/Model-References
/TensorFlow
/nlp
/transformer
/decoder.py
#!/usr/bin/env python3 | |
# coding=utf-8 | |
# Copyright 2021 The Tensor2Tensor Authors. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
############################################################################### | |
# Copyright (C) 2021-2022 Habana Labs, Ltd. an Intel Company | |
############################################################################### | |
# Changes: | |
# - renamed from t2t_decoder.py to decoder.py | |
# - added shebang | |
# - organized imports | |
# - added support for HPU | |
# - renamed t2t_trainer to trainer | |
# - added use_hpu hparam | |
# - added workarounds to run on HPU | |
# - added support for recipe cache | |
# - added support for fast inference | |
# - added support for horovod | |
r"""Decode from trained T2T models. | |
This binary performs inference using the Estimator API. | |
Example usage to decode from dataset: | |
./decoder.py \ | |
--data_dir ~/data \ | |
--problem=algorithmic_identity_binary40 \ | |
--model=transformer | |
--hparams_set=transformer_base | |
Set FLAGS.decode_interactive or FLAGS.decode_from_file for alternative decode | |
sources. | |
""" | |
from __future__ import absolute_import | |
from __future__ import division | |
from __future__ import print_function | |
import os | |
import shutil | |
from TensorFlow.nlp.transformer import trainer | |
from TensorFlow.nlp.transformer.data_generators import problem # pylint: disable=unused-import | |
from TensorFlow.nlp.transformer.data_generators import text_encoder | |
from TensorFlow.nlp.transformer.utils import decoding | |
from TensorFlow.nlp.transformer.utils import registry | |
from TensorFlow.nlp.transformer.utils import trainer_lib | |
from TensorFlow.nlp.transformer.utils import usr_dir | |
import tensorflow.compat.v1 as tf | |
flags = tf.flags | |
FLAGS = flags.FLAGS | |
# Additional flags in trainer.py and utils/flags.py | |
flags.DEFINE_string("checkpoint_path", None, | |
"Path to the model checkpoint. Overrides output_dir.") | |
flags.DEFINE_bool("keep_timestamp", False, | |
"Set the mtime of the decoded file to the " | |
"checkpoint_path+'.index' mtime.") | |
flags.DEFINE_bool("decode_interactive", False, | |
"Interactive local inference mode.") | |
flags.DEFINE_integer("decode_shards", 1, "Number of decoding replicas.") | |
flags.DEFINE_string("score_file", "", "File to score. Each line in the file " | |
"must be in the format input \t target.") | |
flags.DEFINE_bool("decode_in_memory", False, "Decode in memory.") | |
flags.DEFINE_bool("disable_grappler_optimizations", False, | |
"Disable Grappler if need be to avoid tensor format errors.") | |
flags.DEFINE_bool("use_fast_inference", True, "Use fast inference with static shapes") | |
def create_hparams(): | |
hparams_path = None | |
if FLAGS.output_dir: | |
hparams_path = os.path.join(FLAGS.output_dir, "hparams.json") | |
return trainer_lib.create_hparams( | |
FLAGS.hparams_set, | |
FLAGS.hparams, | |
data_dir=os.path.expanduser(FLAGS.data_dir), | |
problem_name=FLAGS.problem, | |
hparams_path=hparams_path) | |
def create_decode_hparams(): | |
decode_hp = decoding.decode_hparams(FLAGS.decode_hparams) | |
decode_hp.shards = FLAGS.decode_shards | |
decode_hp.shard_id = FLAGS.worker_id | |
decode_in_memory = FLAGS.decode_in_memory or decode_hp.decode_in_memory | |
decode_hp.decode_in_memory = decode_in_memory | |
decode_hp.decode_to_file = FLAGS.decode_to_file | |
decode_hp.decode_reference = FLAGS.decode_reference | |
return decode_hp | |
def decode(estimator, hparams, decode_hp): | |
"""Decode from estimator. Interactive, from file, or from dataset.""" | |
if FLAGS.decode_interactive: | |
if estimator.config.use_tpu: | |
raise ValueError("TPU can only decode from dataset.") | |
decoding.decode_interactively(estimator, hparams, decode_hp, | |
checkpoint_path=FLAGS.checkpoint_path) | |
elif FLAGS.decode_from_file: | |
decoding.decode_from_file(estimator, FLAGS.decode_from_file, hparams, | |
decode_hp, FLAGS.decode_to_file, | |
checkpoint_path=FLAGS.checkpoint_path) | |
if FLAGS.checkpoint_path and FLAGS.keep_timestamp: | |
ckpt_time = os.path.getmtime(FLAGS.checkpoint_path + ".index") | |
os.utime(FLAGS.decode_to_file, (ckpt_time, ckpt_time)) | |
else: | |
decoding.decode_from_dataset( | |
estimator, | |
FLAGS.problem, | |
hparams, | |
decode_hp, | |
decode_to_file=FLAGS.decode_to_file, | |
dataset_split="test" if FLAGS.eval_use_test_set else None, | |
checkpoint_path=FLAGS.checkpoint_path) | |
def score_file(filename): | |
"""Score each line in a file and return the scores.""" | |
# Prepare model. | |
hparams = create_hparams() | |
encoders = registry.problem(FLAGS.problem).feature_encoders(FLAGS.data_dir) | |
has_inputs = "inputs" in encoders | |
# Prepare features for feeding into the model. | |
if has_inputs: | |
inputs_ph = tf.placeholder(dtype=tf.int32) # Just length dimension. | |
batch_inputs = tf.reshape(inputs_ph, [1, -1, 1, 1]) # Make it 4D. | |
targets_ph = tf.placeholder(dtype=tf.int32) # Just length dimension. | |
batch_targets = tf.reshape(targets_ph, [1, -1, 1, 1]) # Make it 4D. | |
if has_inputs: | |
features = {"inputs": batch_inputs, "targets": batch_targets} | |
else: | |
features = {"targets": batch_targets} | |
# Prepare the model and the graph when model runs on features. | |
model = registry.model(FLAGS.model)(hparams, tf.estimator.ModeKeys.EVAL) | |
_, losses = model(features) | |
saver = tf.train.Saver() | |
with tf.Session() as sess: | |
# Load weights from checkpoint. | |
if FLAGS.checkpoint_path is None: | |
ckpts = tf.train.get_checkpoint_state(FLAGS.output_dir) | |
ckpt = ckpts.model_checkpoint_path | |
else: | |
ckpt = FLAGS.checkpoint_path | |
saver.restore(sess, ckpt) | |
# Run on each line. | |
with tf.gfile.Open(filename) as f: | |
lines = f.readlines() | |
results = [] | |
for line in lines: | |
tab_split = line.split("\t") | |
if len(tab_split) > 2: | |
raise ValueError("Each line must have at most one tab separator.") | |
if len(tab_split) == 1: | |
targets = tab_split[0].strip() | |
else: | |
targets = tab_split[1].strip() | |
inputs = tab_split[0].strip() | |
# Run encoders and append EOS symbol. | |
targets_numpy = encoders["targets"].encode( | |
targets) + [text_encoder.EOS_ID] | |
if has_inputs: | |
inputs_numpy = encoders["inputs"].encode(inputs) + [text_encoder.EOS_ID] | |
# Prepare the feed. | |
if has_inputs: | |
feed = {inputs_ph: inputs_numpy, targets_ph: targets_numpy} | |
else: | |
feed = {targets_ph: targets_numpy} | |
# Get the score. | |
np_loss = sess.run(losses["training"], feed) | |
results.append(np_loss) | |
return results | |
def get_workaround_flag(name): | |
return f'WA_{name}' | |
def is_workaround_enabled(name): | |
flag = get_workaround_flag(name) | |
is_enabled = os.environ.get(flag, 'true') == 'true' | |
if is_enabled: | |
print(f"Warning! Workaround {flag} is enabled. Run with {flag}=false to disable it.") | |
return is_enabled | |
def main(_): | |
tf.disable_v2_behavior() | |
tf.enable_resource_variables() | |
tf.logging.set_verbosity(tf.logging.INFO) | |
trainer_lib.set_random_seed(FLAGS.random_seed) | |
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir) | |
if FLAGS.use_hpu: | |
from habana_frameworks.tensorflow import load_habana_module | |
load_habana_module() | |
hvd = trainer.init_multinode() | |
if FLAGS.use_hpu: | |
if FLAGS.recipe_cache: | |
trainer.prepare_recipe_cache() | |
if FLAGS.use_bf16: | |
if not is_workaround_enabled('FORCE_FP32'): | |
os.environ['TF_BF16_CONVERSION'] = FLAGS.bf16_config_path | |
else: | |
print("Warning! BF16 precision is not supported in inference mode. Switching back to fp32...") | |
if is_workaround_enabled('DISABLE_DYNAMIC_SHAPES'): | |
os.environ['TF_ENABLE_DYNAMIC_SHAPES'] = 'false' | |
if FLAGS.score_file: | |
filename = os.path.expanduser(FLAGS.score_file) | |
if not tf.gfile.Exists(filename): | |
raise ValueError("The file to score doesn't exist: %s" % filename) | |
results = score_file(filename) | |
if not FLAGS.decode_to_file: | |
raise ValueError("To score a file, specify --decode_to_file for results.") | |
write_file = tf.gfile.Open(os.path.expanduser(FLAGS.decode_to_file), "w") | |
for score in results: | |
write_file.write("%.6f\n" % score) | |
write_file.close() | |
return | |
hp = create_hparams() | |
hp.add_hparam("use_hpu", FLAGS.use_hpu) | |
hp.add_hparam("use_horovod", FLAGS.use_horovod) | |
decode_hp = create_decode_hparams() | |
decode_hp.add_hparam("use_horovod", hp.use_horovod) | |
if FLAGS.use_horovod: | |
hp.add_hparam("hvd_worker_id", hvd.rank()) | |
hp.add_hparam("hvd_size", hvd.size()) | |
decode_hp.add_hparam("hvd_worker_id", hp.hvd_worker_id) | |
decode_hp.add_hparam("hvd_size", hp.hvd_size) | |
run_config = trainer.create_run_config(hp) | |
if FLAGS.disable_grappler_optimizations: | |
run_config.session_config.graph_options.rewrite_options.disable_meta_optimizer = True | |
assert FLAGS.use_fast_inference or not FLAGS.use_horovod, "Multinode inference is only supported with use_fast_inference=True" | |
# summary-hook in tf.estimator.EstimatorSpec requires | |
# hparams.model_dir to be set. | |
hp.add_hparam("model_dir", run_config.model_dir) | |
estimator = trainer_lib.create_estimator( | |
FLAGS.model, | |
hp, | |
run_config, | |
decode_hparams=decode_hp, | |
use_tpu=FLAGS.use_tpu) | |
decode(estimator, hp, decode_hp) | |
if __name__ == "__main__": | |
tf.logging.set_verbosity(tf.logging.INFO) | |
tf.app.run() | |